xref: /linux/tools/testing/selftests/bpf/test_verifier.c (revision 30da46b5dc3a9a14db11706d841440e28b12bb53)
1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 
27 #include <sys/capability.h>
28 
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
34 
35 #include <bpf/bpf.h>
36 
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "../../../include/linux/filter.h"
48 
49 #define MAX_INSNS	BPF_MAXINSNS
50 #define MAX_FIXUPS	8
51 #define MAX_NR_MAPS	13
52 #define POINTER_VALUE	0xcafe4all
53 #define TEST_DATA_LEN	64
54 
55 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
56 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
57 
58 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
59 static bool unpriv_disabled = false;
60 
61 struct bpf_test {
62 	const char *descr;
63 	struct bpf_insn	insns[MAX_INSNS];
64 	int fixup_map_hash_8b[MAX_FIXUPS];
65 	int fixup_map_hash_48b[MAX_FIXUPS];
66 	int fixup_map_hash_16b[MAX_FIXUPS];
67 	int fixup_map_array_48b[MAX_FIXUPS];
68 	int fixup_map_sockmap[MAX_FIXUPS];
69 	int fixup_map_sockhash[MAX_FIXUPS];
70 	int fixup_map_xskmap[MAX_FIXUPS];
71 	int fixup_map_stacktrace[MAX_FIXUPS];
72 	int fixup_prog1[MAX_FIXUPS];
73 	int fixup_prog2[MAX_FIXUPS];
74 	int fixup_map_in_map[MAX_FIXUPS];
75 	int fixup_cgroup_storage[MAX_FIXUPS];
76 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
77 	const char *errstr;
78 	const char *errstr_unpriv;
79 	uint32_t retval, retval_unpriv;
80 	enum {
81 		UNDEF,
82 		ACCEPT,
83 		REJECT
84 	} result, result_unpriv;
85 	enum bpf_prog_type prog_type;
86 	uint8_t flags;
87 	__u8 data[TEST_DATA_LEN];
88 	void (*fill_helper)(struct bpf_test *self);
89 };
90 
91 /* Note we want this to be 64 bit aligned so that the end of our array is
92  * actually the end of the structure.
93  */
94 #define MAX_ENTRIES 11
95 
96 struct test_val {
97 	unsigned int index;
98 	int foo[MAX_ENTRIES];
99 };
100 
101 struct other_val {
102 	long long foo;
103 	long long bar;
104 };
105 
106 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
107 {
108 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
109 #define PUSH_CNT 51
110 	unsigned int len = BPF_MAXINSNS;
111 	struct bpf_insn *insn = self->insns;
112 	int i = 0, j, k = 0;
113 
114 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
115 loop:
116 	for (j = 0; j < PUSH_CNT; j++) {
117 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
118 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
119 		i++;
120 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
121 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
122 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
123 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
124 					 BPF_FUNC_skb_vlan_push),
125 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
126 		i++;
127 	}
128 
129 	for (j = 0; j < PUSH_CNT; j++) {
130 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
131 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
132 		i++;
133 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
134 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
135 					 BPF_FUNC_skb_vlan_pop),
136 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
137 		i++;
138 	}
139 	if (++k < 5)
140 		goto loop;
141 
142 	for (; i < len - 1; i++)
143 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
144 	insn[len - 1] = BPF_EXIT_INSN();
145 }
146 
147 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
148 {
149 	struct bpf_insn *insn = self->insns;
150 	unsigned int len = BPF_MAXINSNS;
151 	int i = 0;
152 
153 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
154 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
155 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
156 	i++;
157 	while (i < len - 1)
158 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
159 	insn[i] = BPF_EXIT_INSN();
160 }
161 
162 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
163 {
164 	struct bpf_insn *insn = self->insns;
165 	uint64_t res = 0;
166 	int i = 0;
167 
168 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
169 	while (i < self->retval) {
170 		uint64_t val = bpf_semi_rand_get();
171 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
172 
173 		res ^= val;
174 		insn[i++] = tmp[0];
175 		insn[i++] = tmp[1];
176 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
177 	}
178 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
179 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
180 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
181 	insn[i] = BPF_EXIT_INSN();
182 	res ^= (res >> 32);
183 	self->retval = (uint32_t)res;
184 }
185 
186 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
187 #define BPF_SK_LOOKUP							\
188 	/* struct bpf_sock_tuple tuple = {} */				\
189 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
190 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
191 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
192 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
193 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
194 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
195 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
196 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
197 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
198 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
199 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
200 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
201 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
202 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
203 
204 static struct bpf_test tests[] = {
205 	{
206 		"add+sub+mul",
207 		.insns = {
208 			BPF_MOV64_IMM(BPF_REG_1, 1),
209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
210 			BPF_MOV64_IMM(BPF_REG_2, 3),
211 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
213 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
214 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
215 			BPF_EXIT_INSN(),
216 		},
217 		.result = ACCEPT,
218 		.retval = -3,
219 	},
220 	{
221 		"DIV32 by 0, zero check 1",
222 		.insns = {
223 			BPF_MOV32_IMM(BPF_REG_0, 42),
224 			BPF_MOV32_IMM(BPF_REG_1, 0),
225 			BPF_MOV32_IMM(BPF_REG_2, 1),
226 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
227 			BPF_EXIT_INSN(),
228 		},
229 		.result = ACCEPT,
230 		.retval = 42,
231 	},
232 	{
233 		"DIV32 by 0, zero check 2",
234 		.insns = {
235 			BPF_MOV32_IMM(BPF_REG_0, 42),
236 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
237 			BPF_MOV32_IMM(BPF_REG_2, 1),
238 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
239 			BPF_EXIT_INSN(),
240 		},
241 		.result = ACCEPT,
242 		.retval = 42,
243 	},
244 	{
245 		"DIV64 by 0, zero check",
246 		.insns = {
247 			BPF_MOV32_IMM(BPF_REG_0, 42),
248 			BPF_MOV32_IMM(BPF_REG_1, 0),
249 			BPF_MOV32_IMM(BPF_REG_2, 1),
250 			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
251 			BPF_EXIT_INSN(),
252 		},
253 		.result = ACCEPT,
254 		.retval = 42,
255 	},
256 	{
257 		"MOD32 by 0, zero check 1",
258 		.insns = {
259 			BPF_MOV32_IMM(BPF_REG_0, 42),
260 			BPF_MOV32_IMM(BPF_REG_1, 0),
261 			BPF_MOV32_IMM(BPF_REG_2, 1),
262 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
263 			BPF_EXIT_INSN(),
264 		},
265 		.result = ACCEPT,
266 		.retval = 42,
267 	},
268 	{
269 		"MOD32 by 0, zero check 2",
270 		.insns = {
271 			BPF_MOV32_IMM(BPF_REG_0, 42),
272 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
273 			BPF_MOV32_IMM(BPF_REG_2, 1),
274 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
275 			BPF_EXIT_INSN(),
276 		},
277 		.result = ACCEPT,
278 		.retval = 42,
279 	},
280 	{
281 		"MOD64 by 0, zero check",
282 		.insns = {
283 			BPF_MOV32_IMM(BPF_REG_0, 42),
284 			BPF_MOV32_IMM(BPF_REG_1, 0),
285 			BPF_MOV32_IMM(BPF_REG_2, 1),
286 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
287 			BPF_EXIT_INSN(),
288 		},
289 		.result = ACCEPT,
290 		.retval = 42,
291 	},
292 	{
293 		"DIV32 by 0, zero check ok, cls",
294 		.insns = {
295 			BPF_MOV32_IMM(BPF_REG_0, 42),
296 			BPF_MOV32_IMM(BPF_REG_1, 2),
297 			BPF_MOV32_IMM(BPF_REG_2, 16),
298 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
299 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
300 			BPF_EXIT_INSN(),
301 		},
302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
303 		.result = ACCEPT,
304 		.retval = 8,
305 	},
306 	{
307 		"DIV32 by 0, zero check 1, cls",
308 		.insns = {
309 			BPF_MOV32_IMM(BPF_REG_1, 0),
310 			BPF_MOV32_IMM(BPF_REG_0, 1),
311 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
312 			BPF_EXIT_INSN(),
313 		},
314 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 		.result = ACCEPT,
316 		.retval = 0,
317 	},
318 	{
319 		"DIV32 by 0, zero check 2, cls",
320 		.insns = {
321 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
322 			BPF_MOV32_IMM(BPF_REG_0, 1),
323 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 			BPF_EXIT_INSN(),
325 		},
326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 		.result = ACCEPT,
328 		.retval = 0,
329 	},
330 	{
331 		"DIV64 by 0, zero check, cls",
332 		.insns = {
333 			BPF_MOV32_IMM(BPF_REG_1, 0),
334 			BPF_MOV32_IMM(BPF_REG_0, 1),
335 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
336 			BPF_EXIT_INSN(),
337 		},
338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
339 		.result = ACCEPT,
340 		.retval = 0,
341 	},
342 	{
343 		"MOD32 by 0, zero check ok, cls",
344 		.insns = {
345 			BPF_MOV32_IMM(BPF_REG_0, 42),
346 			BPF_MOV32_IMM(BPF_REG_1, 3),
347 			BPF_MOV32_IMM(BPF_REG_2, 5),
348 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
349 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
350 			BPF_EXIT_INSN(),
351 		},
352 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
353 		.result = ACCEPT,
354 		.retval = 2,
355 	},
356 	{
357 		"MOD32 by 0, zero check 1, cls",
358 		.insns = {
359 			BPF_MOV32_IMM(BPF_REG_1, 0),
360 			BPF_MOV32_IMM(BPF_REG_0, 1),
361 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
362 			BPF_EXIT_INSN(),
363 		},
364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
365 		.result = ACCEPT,
366 		.retval = 1,
367 	},
368 	{
369 		"MOD32 by 0, zero check 2, cls",
370 		.insns = {
371 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
372 			BPF_MOV32_IMM(BPF_REG_0, 1),
373 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374 			BPF_EXIT_INSN(),
375 		},
376 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 		.result = ACCEPT,
378 		.retval = 1,
379 	},
380 	{
381 		"MOD64 by 0, zero check 1, cls",
382 		.insns = {
383 			BPF_MOV32_IMM(BPF_REG_1, 0),
384 			BPF_MOV32_IMM(BPF_REG_0, 2),
385 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
386 			BPF_EXIT_INSN(),
387 		},
388 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
389 		.result = ACCEPT,
390 		.retval = 2,
391 	},
392 	{
393 		"MOD64 by 0, zero check 2, cls",
394 		.insns = {
395 			BPF_MOV32_IMM(BPF_REG_1, 0),
396 			BPF_MOV32_IMM(BPF_REG_0, -1),
397 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
398 			BPF_EXIT_INSN(),
399 		},
400 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
401 		.result = ACCEPT,
402 		.retval = -1,
403 	},
404 	/* Just make sure that JITs used udiv/umod as otherwise we get
405 	 * an exception from INT_MIN/-1 overflow similarly as with div
406 	 * by zero.
407 	 */
408 	{
409 		"DIV32 overflow, check 1",
410 		.insns = {
411 			BPF_MOV32_IMM(BPF_REG_1, -1),
412 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
413 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
414 			BPF_EXIT_INSN(),
415 		},
416 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
417 		.result = ACCEPT,
418 		.retval = 0,
419 	},
420 	{
421 		"DIV32 overflow, check 2",
422 		.insns = {
423 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
424 			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
425 			BPF_EXIT_INSN(),
426 		},
427 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
428 		.result = ACCEPT,
429 		.retval = 0,
430 	},
431 	{
432 		"DIV64 overflow, check 1",
433 		.insns = {
434 			BPF_MOV64_IMM(BPF_REG_1, -1),
435 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
436 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
437 			BPF_EXIT_INSN(),
438 		},
439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
440 		.result = ACCEPT,
441 		.retval = 0,
442 	},
443 	{
444 		"DIV64 overflow, check 2",
445 		.insns = {
446 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
447 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
448 			BPF_EXIT_INSN(),
449 		},
450 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 		.result = ACCEPT,
452 		.retval = 0,
453 	},
454 	{
455 		"MOD32 overflow, check 1",
456 		.insns = {
457 			BPF_MOV32_IMM(BPF_REG_1, -1),
458 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
459 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
460 			BPF_EXIT_INSN(),
461 		},
462 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
463 		.result = ACCEPT,
464 		.retval = INT_MIN,
465 	},
466 	{
467 		"MOD32 overflow, check 2",
468 		.insns = {
469 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
470 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
471 			BPF_EXIT_INSN(),
472 		},
473 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
474 		.result = ACCEPT,
475 		.retval = INT_MIN,
476 	},
477 	{
478 		"MOD64 overflow, check 1",
479 		.insns = {
480 			BPF_MOV64_IMM(BPF_REG_1, -1),
481 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
482 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
483 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
484 			BPF_MOV32_IMM(BPF_REG_0, 0),
485 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
486 			BPF_MOV32_IMM(BPF_REG_0, 1),
487 			BPF_EXIT_INSN(),
488 		},
489 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
490 		.result = ACCEPT,
491 		.retval = 1,
492 	},
493 	{
494 		"MOD64 overflow, check 2",
495 		.insns = {
496 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
497 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
498 			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
499 			BPF_MOV32_IMM(BPF_REG_0, 0),
500 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
501 			BPF_MOV32_IMM(BPF_REG_0, 1),
502 			BPF_EXIT_INSN(),
503 		},
504 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
505 		.result = ACCEPT,
506 		.retval = 1,
507 	},
508 	{
509 		"xor32 zero extend check",
510 		.insns = {
511 			BPF_MOV32_IMM(BPF_REG_2, -1),
512 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
513 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
514 			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
515 			BPF_MOV32_IMM(BPF_REG_0, 2),
516 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
517 			BPF_MOV32_IMM(BPF_REG_0, 1),
518 			BPF_EXIT_INSN(),
519 		},
520 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
521 		.result = ACCEPT,
522 		.retval = 1,
523 	},
524 	{
525 		"empty prog",
526 		.insns = {
527 		},
528 		.errstr = "unknown opcode 00",
529 		.result = REJECT,
530 	},
531 	{
532 		"only exit insn",
533 		.insns = {
534 			BPF_EXIT_INSN(),
535 		},
536 		.errstr = "R0 !read_ok",
537 		.result = REJECT,
538 	},
539 	{
540 		"unreachable",
541 		.insns = {
542 			BPF_EXIT_INSN(),
543 			BPF_EXIT_INSN(),
544 		},
545 		.errstr = "unreachable",
546 		.result = REJECT,
547 	},
548 	{
549 		"unreachable2",
550 		.insns = {
551 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
552 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
553 			BPF_EXIT_INSN(),
554 		},
555 		.errstr = "unreachable",
556 		.result = REJECT,
557 	},
558 	{
559 		"out of range jump",
560 		.insns = {
561 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 			BPF_EXIT_INSN(),
563 		},
564 		.errstr = "jump out of range",
565 		.result = REJECT,
566 	},
567 	{
568 		"out of range jump2",
569 		.insns = {
570 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
571 			BPF_EXIT_INSN(),
572 		},
573 		.errstr = "jump out of range",
574 		.result = REJECT,
575 	},
576 	{
577 		"test1 ld_imm64",
578 		.insns = {
579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
580 			BPF_LD_IMM64(BPF_REG_0, 0),
581 			BPF_LD_IMM64(BPF_REG_0, 0),
582 			BPF_LD_IMM64(BPF_REG_0, 1),
583 			BPF_LD_IMM64(BPF_REG_0, 1),
584 			BPF_MOV64_IMM(BPF_REG_0, 2),
585 			BPF_EXIT_INSN(),
586 		},
587 		.errstr = "invalid BPF_LD_IMM insn",
588 		.errstr_unpriv = "R1 pointer comparison",
589 		.result = REJECT,
590 	},
591 	{
592 		"test2 ld_imm64",
593 		.insns = {
594 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
595 			BPF_LD_IMM64(BPF_REG_0, 0),
596 			BPF_LD_IMM64(BPF_REG_0, 0),
597 			BPF_LD_IMM64(BPF_REG_0, 1),
598 			BPF_LD_IMM64(BPF_REG_0, 1),
599 			BPF_EXIT_INSN(),
600 		},
601 		.errstr = "invalid BPF_LD_IMM insn",
602 		.errstr_unpriv = "R1 pointer comparison",
603 		.result = REJECT,
604 	},
605 	{
606 		"test3 ld_imm64",
607 		.insns = {
608 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
609 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
610 			BPF_LD_IMM64(BPF_REG_0, 0),
611 			BPF_LD_IMM64(BPF_REG_0, 0),
612 			BPF_LD_IMM64(BPF_REG_0, 1),
613 			BPF_LD_IMM64(BPF_REG_0, 1),
614 			BPF_EXIT_INSN(),
615 		},
616 		.errstr = "invalid bpf_ld_imm64 insn",
617 		.result = REJECT,
618 	},
619 	{
620 		"test4 ld_imm64",
621 		.insns = {
622 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
623 			BPF_EXIT_INSN(),
624 		},
625 		.errstr = "invalid bpf_ld_imm64 insn",
626 		.result = REJECT,
627 	},
628 	{
629 		"test5 ld_imm64",
630 		.insns = {
631 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
632 		},
633 		.errstr = "invalid bpf_ld_imm64 insn",
634 		.result = REJECT,
635 	},
636 	{
637 		"test6 ld_imm64",
638 		.insns = {
639 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
640 			BPF_RAW_INSN(0, 0, 0, 0, 0),
641 			BPF_EXIT_INSN(),
642 		},
643 		.result = ACCEPT,
644 	},
645 	{
646 		"test7 ld_imm64",
647 		.insns = {
648 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
649 			BPF_RAW_INSN(0, 0, 0, 0, 1),
650 			BPF_EXIT_INSN(),
651 		},
652 		.result = ACCEPT,
653 		.retval = 1,
654 	},
655 	{
656 		"test8 ld_imm64",
657 		.insns = {
658 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
659 			BPF_RAW_INSN(0, 0, 0, 0, 1),
660 			BPF_EXIT_INSN(),
661 		},
662 		.errstr = "uses reserved fields",
663 		.result = REJECT,
664 	},
665 	{
666 		"test9 ld_imm64",
667 		.insns = {
668 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
669 			BPF_RAW_INSN(0, 0, 0, 1, 1),
670 			BPF_EXIT_INSN(),
671 		},
672 		.errstr = "invalid bpf_ld_imm64 insn",
673 		.result = REJECT,
674 	},
675 	{
676 		"test10 ld_imm64",
677 		.insns = {
678 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
680 			BPF_EXIT_INSN(),
681 		},
682 		.errstr = "invalid bpf_ld_imm64 insn",
683 		.result = REJECT,
684 	},
685 	{
686 		"test11 ld_imm64",
687 		.insns = {
688 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 			BPF_EXIT_INSN(),
691 		},
692 		.errstr = "invalid bpf_ld_imm64 insn",
693 		.result = REJECT,
694 	},
695 	{
696 		"test12 ld_imm64",
697 		.insns = {
698 			BPF_MOV64_IMM(BPF_REG_1, 0),
699 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
700 			BPF_RAW_INSN(0, 0, 0, 0, 1),
701 			BPF_EXIT_INSN(),
702 		},
703 		.errstr = "not pointing to valid bpf_map",
704 		.result = REJECT,
705 	},
706 	{
707 		"test13 ld_imm64",
708 		.insns = {
709 			BPF_MOV64_IMM(BPF_REG_1, 0),
710 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
711 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
712 			BPF_EXIT_INSN(),
713 		},
714 		.errstr = "invalid bpf_ld_imm64 insn",
715 		.result = REJECT,
716 	},
717 	{
718 		"arsh32 on imm",
719 		.insns = {
720 			BPF_MOV64_IMM(BPF_REG_0, 1),
721 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
722 			BPF_EXIT_INSN(),
723 		},
724 		.result = REJECT,
725 		.errstr = "unknown opcode c4",
726 	},
727 	{
728 		"arsh32 on reg",
729 		.insns = {
730 			BPF_MOV64_IMM(BPF_REG_0, 1),
731 			BPF_MOV64_IMM(BPF_REG_1, 5),
732 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
733 			BPF_EXIT_INSN(),
734 		},
735 		.result = REJECT,
736 		.errstr = "unknown opcode cc",
737 	},
738 	{
739 		"arsh64 on imm",
740 		.insns = {
741 			BPF_MOV64_IMM(BPF_REG_0, 1),
742 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
743 			BPF_EXIT_INSN(),
744 		},
745 		.result = ACCEPT,
746 	},
747 	{
748 		"arsh64 on reg",
749 		.insns = {
750 			BPF_MOV64_IMM(BPF_REG_0, 1),
751 			BPF_MOV64_IMM(BPF_REG_1, 5),
752 			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
753 			BPF_EXIT_INSN(),
754 		},
755 		.result = ACCEPT,
756 	},
757 	{
758 		"no bpf_exit",
759 		.insns = {
760 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
761 		},
762 		.errstr = "not an exit",
763 		.result = REJECT,
764 	},
765 	{
766 		"loop (back-edge)",
767 		.insns = {
768 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
769 			BPF_EXIT_INSN(),
770 		},
771 		.errstr = "back-edge",
772 		.result = REJECT,
773 	},
774 	{
775 		"loop2 (back-edge)",
776 		.insns = {
777 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
778 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
779 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
780 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
781 			BPF_EXIT_INSN(),
782 		},
783 		.errstr = "back-edge",
784 		.result = REJECT,
785 	},
786 	{
787 		"conditional loop",
788 		.insns = {
789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
790 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
791 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
793 			BPF_EXIT_INSN(),
794 		},
795 		.errstr = "back-edge",
796 		.result = REJECT,
797 	},
798 	{
799 		"read uninitialized register",
800 		.insns = {
801 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
802 			BPF_EXIT_INSN(),
803 		},
804 		.errstr = "R2 !read_ok",
805 		.result = REJECT,
806 	},
807 	{
808 		"read invalid register",
809 		.insns = {
810 			BPF_MOV64_REG(BPF_REG_0, -1),
811 			BPF_EXIT_INSN(),
812 		},
813 		.errstr = "R15 is invalid",
814 		.result = REJECT,
815 	},
816 	{
817 		"program doesn't init R0 before exit",
818 		.insns = {
819 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
820 			BPF_EXIT_INSN(),
821 		},
822 		.errstr = "R0 !read_ok",
823 		.result = REJECT,
824 	},
825 	{
826 		"program doesn't init R0 before exit in all branches",
827 		.insns = {
828 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
829 			BPF_MOV64_IMM(BPF_REG_0, 1),
830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
831 			BPF_EXIT_INSN(),
832 		},
833 		.errstr = "R0 !read_ok",
834 		.errstr_unpriv = "R1 pointer comparison",
835 		.result = REJECT,
836 	},
837 	{
838 		"stack out of bounds",
839 		.insns = {
840 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
841 			BPF_EXIT_INSN(),
842 		},
843 		.errstr = "invalid stack",
844 		.result = REJECT,
845 	},
846 	{
847 		"invalid call insn1",
848 		.insns = {
849 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
850 			BPF_EXIT_INSN(),
851 		},
852 		.errstr = "unknown opcode 8d",
853 		.result = REJECT,
854 	},
855 	{
856 		"invalid call insn2",
857 		.insns = {
858 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
859 			BPF_EXIT_INSN(),
860 		},
861 		.errstr = "BPF_CALL uses reserved",
862 		.result = REJECT,
863 	},
864 	{
865 		"invalid function call",
866 		.insns = {
867 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
868 			BPF_EXIT_INSN(),
869 		},
870 		.errstr = "invalid func unknown#1234567",
871 		.result = REJECT,
872 	},
873 	{
874 		"uninitialized stack1",
875 		.insns = {
876 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
878 			BPF_LD_MAP_FD(BPF_REG_1, 0),
879 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
880 				     BPF_FUNC_map_lookup_elem),
881 			BPF_EXIT_INSN(),
882 		},
883 		.fixup_map_hash_8b = { 2 },
884 		.errstr = "invalid indirect read from stack",
885 		.result = REJECT,
886 	},
887 	{
888 		"uninitialized stack2",
889 		.insns = {
890 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
891 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
892 			BPF_EXIT_INSN(),
893 		},
894 		.errstr = "invalid read from stack",
895 		.result = REJECT,
896 	},
897 	{
898 		"invalid fp arithmetic",
899 		/* If this gets ever changed, make sure JITs can deal with it. */
900 		.insns = {
901 			BPF_MOV64_IMM(BPF_REG_0, 0),
902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
903 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
904 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
905 			BPF_EXIT_INSN(),
906 		},
907 		.errstr = "R1 subtraction from stack pointer",
908 		.result = REJECT,
909 	},
910 	{
911 		"non-invalid fp arithmetic",
912 		.insns = {
913 			BPF_MOV64_IMM(BPF_REG_0, 0),
914 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
915 			BPF_EXIT_INSN(),
916 		},
917 		.result = ACCEPT,
918 	},
919 	{
920 		"invalid argument register",
921 		.insns = {
922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
923 				     BPF_FUNC_get_cgroup_classid),
924 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
925 				     BPF_FUNC_get_cgroup_classid),
926 			BPF_EXIT_INSN(),
927 		},
928 		.errstr = "R1 !read_ok",
929 		.result = REJECT,
930 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
931 	},
932 	{
933 		"non-invalid argument register",
934 		.insns = {
935 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
936 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
937 				     BPF_FUNC_get_cgroup_classid),
938 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
939 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
940 				     BPF_FUNC_get_cgroup_classid),
941 			BPF_EXIT_INSN(),
942 		},
943 		.result = ACCEPT,
944 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
945 	},
946 	{
947 		"check valid spill/fill",
948 		.insns = {
949 			/* spill R1(ctx) into stack */
950 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
951 			/* fill it back into R2 */
952 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
953 			/* should be able to access R0 = *(R2 + 8) */
954 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
955 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
956 			BPF_EXIT_INSN(),
957 		},
958 		.errstr_unpriv = "R0 leaks addr",
959 		.result = ACCEPT,
960 		.result_unpriv = REJECT,
961 		.retval = POINTER_VALUE,
962 	},
963 	{
964 		"check valid spill/fill, skb mark",
965 		.insns = {
966 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
967 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
968 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
969 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
970 				    offsetof(struct __sk_buff, mark)),
971 			BPF_EXIT_INSN(),
972 		},
973 		.result = ACCEPT,
974 		.result_unpriv = ACCEPT,
975 	},
976 	{
977 		"check corrupted spill/fill",
978 		.insns = {
979 			/* spill R1(ctx) into stack */
980 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
981 			/* mess up with R1 pointer on stack */
982 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
983 			/* fill back into R0 should fail */
984 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
985 			BPF_EXIT_INSN(),
986 		},
987 		.errstr_unpriv = "attempt to corrupt spilled",
988 		.errstr = "corrupted spill",
989 		.result = REJECT,
990 	},
991 	{
992 		"invalid src register in STX",
993 		.insns = {
994 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
995 			BPF_EXIT_INSN(),
996 		},
997 		.errstr = "R15 is invalid",
998 		.result = REJECT,
999 	},
1000 	{
1001 		"invalid dst register in STX",
1002 		.insns = {
1003 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1004 			BPF_EXIT_INSN(),
1005 		},
1006 		.errstr = "R14 is invalid",
1007 		.result = REJECT,
1008 	},
1009 	{
1010 		"invalid dst register in ST",
1011 		.insns = {
1012 			BPF_ST_MEM(BPF_B, 14, -1, -1),
1013 			BPF_EXIT_INSN(),
1014 		},
1015 		.errstr = "R14 is invalid",
1016 		.result = REJECT,
1017 	},
1018 	{
1019 		"invalid src register in LDX",
1020 		.insns = {
1021 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1022 			BPF_EXIT_INSN(),
1023 		},
1024 		.errstr = "R12 is invalid",
1025 		.result = REJECT,
1026 	},
1027 	{
1028 		"invalid dst register in LDX",
1029 		.insns = {
1030 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1031 			BPF_EXIT_INSN(),
1032 		},
1033 		.errstr = "R11 is invalid",
1034 		.result = REJECT,
1035 	},
1036 	{
1037 		"junk insn",
1038 		.insns = {
1039 			BPF_RAW_INSN(0, 0, 0, 0, 0),
1040 			BPF_EXIT_INSN(),
1041 		},
1042 		.errstr = "unknown opcode 00",
1043 		.result = REJECT,
1044 	},
1045 	{
1046 		"junk insn2",
1047 		.insns = {
1048 			BPF_RAW_INSN(1, 0, 0, 0, 0),
1049 			BPF_EXIT_INSN(),
1050 		},
1051 		.errstr = "BPF_LDX uses reserved fields",
1052 		.result = REJECT,
1053 	},
1054 	{
1055 		"junk insn3",
1056 		.insns = {
1057 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
1058 			BPF_EXIT_INSN(),
1059 		},
1060 		.errstr = "unknown opcode ff",
1061 		.result = REJECT,
1062 	},
1063 	{
1064 		"junk insn4",
1065 		.insns = {
1066 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
1067 			BPF_EXIT_INSN(),
1068 		},
1069 		.errstr = "unknown opcode ff",
1070 		.result = REJECT,
1071 	},
1072 	{
1073 		"junk insn5",
1074 		.insns = {
1075 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1076 			BPF_EXIT_INSN(),
1077 		},
1078 		.errstr = "BPF_ALU uses reserved fields",
1079 		.result = REJECT,
1080 	},
1081 	{
1082 		"misaligned read from stack",
1083 		.insns = {
1084 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1085 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1086 			BPF_EXIT_INSN(),
1087 		},
1088 		.errstr = "misaligned stack access",
1089 		.result = REJECT,
1090 	},
1091 	{
1092 		"invalid map_fd for function call",
1093 		.insns = {
1094 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1095 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1097 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1098 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1099 				     BPF_FUNC_map_delete_elem),
1100 			BPF_EXIT_INSN(),
1101 		},
1102 		.errstr = "fd 0 is not pointing to valid bpf_map",
1103 		.result = REJECT,
1104 	},
1105 	{
1106 		"don't check return value before access",
1107 		.insns = {
1108 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1109 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1110 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1111 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1112 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1113 				     BPF_FUNC_map_lookup_elem),
1114 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1115 			BPF_EXIT_INSN(),
1116 		},
1117 		.fixup_map_hash_8b = { 3 },
1118 		.errstr = "R0 invalid mem access 'map_value_or_null'",
1119 		.result = REJECT,
1120 	},
1121 	{
1122 		"access memory with incorrect alignment",
1123 		.insns = {
1124 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1125 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1126 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1127 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1128 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1129 				     BPF_FUNC_map_lookup_elem),
1130 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1131 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1132 			BPF_EXIT_INSN(),
1133 		},
1134 		.fixup_map_hash_8b = { 3 },
1135 		.errstr = "misaligned value access",
1136 		.result = REJECT,
1137 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1138 	},
1139 	{
1140 		"sometimes access memory with incorrect alignment",
1141 		.insns = {
1142 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1143 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1144 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1145 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1146 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1147 				     BPF_FUNC_map_lookup_elem),
1148 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1149 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1150 			BPF_EXIT_INSN(),
1151 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1152 			BPF_EXIT_INSN(),
1153 		},
1154 		.fixup_map_hash_8b = { 3 },
1155 		.errstr = "R0 invalid mem access",
1156 		.errstr_unpriv = "R0 leaks addr",
1157 		.result = REJECT,
1158 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1159 	},
1160 	{
1161 		"jump test 1",
1162 		.insns = {
1163 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1164 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1165 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1166 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1167 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1168 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1170 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1171 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1172 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1173 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1174 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1175 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1176 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1177 			BPF_MOV64_IMM(BPF_REG_0, 0),
1178 			BPF_EXIT_INSN(),
1179 		},
1180 		.errstr_unpriv = "R1 pointer comparison",
1181 		.result_unpriv = REJECT,
1182 		.result = ACCEPT,
1183 	},
1184 	{
1185 		"jump test 2",
1186 		.insns = {
1187 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1188 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1189 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1190 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1191 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1192 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1193 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1194 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1195 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1196 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1197 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1198 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1199 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1200 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1201 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1202 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1203 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1204 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1205 			BPF_MOV64_IMM(BPF_REG_0, 0),
1206 			BPF_EXIT_INSN(),
1207 		},
1208 		.errstr_unpriv = "R1 pointer comparison",
1209 		.result_unpriv = REJECT,
1210 		.result = ACCEPT,
1211 	},
1212 	{
1213 		"jump test 3",
1214 		.insns = {
1215 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1216 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1217 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1219 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1220 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1221 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1223 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1224 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1225 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1226 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1227 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1228 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1229 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1231 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1232 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1233 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1234 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1235 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1236 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1237 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1239 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1241 				     BPF_FUNC_map_delete_elem),
1242 			BPF_EXIT_INSN(),
1243 		},
1244 		.fixup_map_hash_8b = { 24 },
1245 		.errstr_unpriv = "R1 pointer comparison",
1246 		.result_unpriv = REJECT,
1247 		.result = ACCEPT,
1248 		.retval = -ENOENT,
1249 	},
1250 	{
1251 		"jump test 4",
1252 		.insns = {
1253 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1255 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1256 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1258 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1259 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1260 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1261 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1262 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1263 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1264 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1265 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1266 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1267 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1268 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1269 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1270 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1271 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1272 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1273 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1274 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1275 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1277 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1278 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1279 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1281 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1282 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1285 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1286 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1287 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1290 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1291 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1292 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1293 			BPF_MOV64_IMM(BPF_REG_0, 0),
1294 			BPF_EXIT_INSN(),
1295 		},
1296 		.errstr_unpriv = "R1 pointer comparison",
1297 		.result_unpriv = REJECT,
1298 		.result = ACCEPT,
1299 	},
1300 	{
1301 		"jump test 5",
1302 		.insns = {
1303 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1304 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1305 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1306 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1307 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1308 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1309 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1310 			BPF_MOV64_IMM(BPF_REG_0, 0),
1311 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1312 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1313 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1314 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1315 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1316 			BPF_MOV64_IMM(BPF_REG_0, 0),
1317 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1318 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1319 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1320 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1321 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1322 			BPF_MOV64_IMM(BPF_REG_0, 0),
1323 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1324 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1325 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1326 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1327 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1328 			BPF_MOV64_IMM(BPF_REG_0, 0),
1329 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1330 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1331 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1332 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1333 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1334 			BPF_MOV64_IMM(BPF_REG_0, 0),
1335 			BPF_EXIT_INSN(),
1336 		},
1337 		.errstr_unpriv = "R1 pointer comparison",
1338 		.result_unpriv = REJECT,
1339 		.result = ACCEPT,
1340 	},
1341 	{
1342 		"access skb fields ok",
1343 		.insns = {
1344 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1345 				    offsetof(struct __sk_buff, len)),
1346 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1347 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1348 				    offsetof(struct __sk_buff, mark)),
1349 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1350 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1351 				    offsetof(struct __sk_buff, pkt_type)),
1352 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1353 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1354 				    offsetof(struct __sk_buff, queue_mapping)),
1355 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1356 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1357 				    offsetof(struct __sk_buff, protocol)),
1358 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1359 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1360 				    offsetof(struct __sk_buff, vlan_present)),
1361 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1362 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1363 				    offsetof(struct __sk_buff, vlan_tci)),
1364 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1365 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1366 				    offsetof(struct __sk_buff, napi_id)),
1367 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1368 			BPF_EXIT_INSN(),
1369 		},
1370 		.result = ACCEPT,
1371 	},
1372 	{
1373 		"access skb fields bad1",
1374 		.insns = {
1375 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1376 			BPF_EXIT_INSN(),
1377 		},
1378 		.errstr = "invalid bpf_context access",
1379 		.result = REJECT,
1380 	},
1381 	{
1382 		"access skb fields bad2",
1383 		.insns = {
1384 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1385 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1386 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1388 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1389 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1390 				     BPF_FUNC_map_lookup_elem),
1391 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1392 			BPF_EXIT_INSN(),
1393 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1394 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1395 				    offsetof(struct __sk_buff, pkt_type)),
1396 			BPF_EXIT_INSN(),
1397 		},
1398 		.fixup_map_hash_8b = { 4 },
1399 		.errstr = "different pointers",
1400 		.errstr_unpriv = "R1 pointer comparison",
1401 		.result = REJECT,
1402 	},
1403 	{
1404 		"access skb fields bad3",
1405 		.insns = {
1406 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1407 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1408 				    offsetof(struct __sk_buff, pkt_type)),
1409 			BPF_EXIT_INSN(),
1410 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1411 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1413 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1415 				     BPF_FUNC_map_lookup_elem),
1416 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1417 			BPF_EXIT_INSN(),
1418 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1419 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1420 		},
1421 		.fixup_map_hash_8b = { 6 },
1422 		.errstr = "different pointers",
1423 		.errstr_unpriv = "R1 pointer comparison",
1424 		.result = REJECT,
1425 	},
1426 	{
1427 		"access skb fields bad4",
1428 		.insns = {
1429 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1430 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1431 				    offsetof(struct __sk_buff, len)),
1432 			BPF_MOV64_IMM(BPF_REG_0, 0),
1433 			BPF_EXIT_INSN(),
1434 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1435 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1436 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1437 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1438 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1439 				     BPF_FUNC_map_lookup_elem),
1440 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1441 			BPF_EXIT_INSN(),
1442 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1443 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1444 		},
1445 		.fixup_map_hash_8b = { 7 },
1446 		.errstr = "different pointers",
1447 		.errstr_unpriv = "R1 pointer comparison",
1448 		.result = REJECT,
1449 	},
1450 	{
1451 		"invalid access __sk_buff family",
1452 		.insns = {
1453 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1454 				    offsetof(struct __sk_buff, family)),
1455 			BPF_EXIT_INSN(),
1456 		},
1457 		.errstr = "invalid bpf_context access",
1458 		.result = REJECT,
1459 	},
1460 	{
1461 		"invalid access __sk_buff remote_ip4",
1462 		.insns = {
1463 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1464 				    offsetof(struct __sk_buff, remote_ip4)),
1465 			BPF_EXIT_INSN(),
1466 		},
1467 		.errstr = "invalid bpf_context access",
1468 		.result = REJECT,
1469 	},
1470 	{
1471 		"invalid access __sk_buff local_ip4",
1472 		.insns = {
1473 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1474 				    offsetof(struct __sk_buff, local_ip4)),
1475 			BPF_EXIT_INSN(),
1476 		},
1477 		.errstr = "invalid bpf_context access",
1478 		.result = REJECT,
1479 	},
1480 	{
1481 		"invalid access __sk_buff remote_ip6",
1482 		.insns = {
1483 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1484 				    offsetof(struct __sk_buff, remote_ip6)),
1485 			BPF_EXIT_INSN(),
1486 		},
1487 		.errstr = "invalid bpf_context access",
1488 		.result = REJECT,
1489 	},
1490 	{
1491 		"invalid access __sk_buff local_ip6",
1492 		.insns = {
1493 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1494 				    offsetof(struct __sk_buff, local_ip6)),
1495 			BPF_EXIT_INSN(),
1496 		},
1497 		.errstr = "invalid bpf_context access",
1498 		.result = REJECT,
1499 	},
1500 	{
1501 		"invalid access __sk_buff remote_port",
1502 		.insns = {
1503 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1504 				    offsetof(struct __sk_buff, remote_port)),
1505 			BPF_EXIT_INSN(),
1506 		},
1507 		.errstr = "invalid bpf_context access",
1508 		.result = REJECT,
1509 	},
1510 	{
1511 		"invalid access __sk_buff remote_port",
1512 		.insns = {
1513 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1514 				    offsetof(struct __sk_buff, local_port)),
1515 			BPF_EXIT_INSN(),
1516 		},
1517 		.errstr = "invalid bpf_context access",
1518 		.result = REJECT,
1519 	},
1520 	{
1521 		"valid access __sk_buff family",
1522 		.insns = {
1523 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1524 				    offsetof(struct __sk_buff, family)),
1525 			BPF_EXIT_INSN(),
1526 		},
1527 		.result = ACCEPT,
1528 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1529 	},
1530 	{
1531 		"valid access __sk_buff remote_ip4",
1532 		.insns = {
1533 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1534 				    offsetof(struct __sk_buff, remote_ip4)),
1535 			BPF_EXIT_INSN(),
1536 		},
1537 		.result = ACCEPT,
1538 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1539 	},
1540 	{
1541 		"valid access __sk_buff local_ip4",
1542 		.insns = {
1543 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1544 				    offsetof(struct __sk_buff, local_ip4)),
1545 			BPF_EXIT_INSN(),
1546 		},
1547 		.result = ACCEPT,
1548 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1549 	},
1550 	{
1551 		"valid access __sk_buff remote_ip6",
1552 		.insns = {
1553 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1554 				    offsetof(struct __sk_buff, remote_ip6[0])),
1555 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1556 				    offsetof(struct __sk_buff, remote_ip6[1])),
1557 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1558 				    offsetof(struct __sk_buff, remote_ip6[2])),
1559 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1560 				    offsetof(struct __sk_buff, remote_ip6[3])),
1561 			BPF_EXIT_INSN(),
1562 		},
1563 		.result = ACCEPT,
1564 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1565 	},
1566 	{
1567 		"valid access __sk_buff local_ip6",
1568 		.insns = {
1569 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1570 				    offsetof(struct __sk_buff, local_ip6[0])),
1571 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1572 				    offsetof(struct __sk_buff, local_ip6[1])),
1573 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1574 				    offsetof(struct __sk_buff, local_ip6[2])),
1575 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1576 				    offsetof(struct __sk_buff, local_ip6[3])),
1577 			BPF_EXIT_INSN(),
1578 		},
1579 		.result = ACCEPT,
1580 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1581 	},
1582 	{
1583 		"valid access __sk_buff remote_port",
1584 		.insns = {
1585 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1586 				    offsetof(struct __sk_buff, remote_port)),
1587 			BPF_EXIT_INSN(),
1588 		},
1589 		.result = ACCEPT,
1590 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1591 	},
1592 	{
1593 		"valid access __sk_buff remote_port",
1594 		.insns = {
1595 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1596 				    offsetof(struct __sk_buff, local_port)),
1597 			BPF_EXIT_INSN(),
1598 		},
1599 		.result = ACCEPT,
1600 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1601 	},
1602 	{
1603 		"invalid access of tc_classid for SK_SKB",
1604 		.insns = {
1605 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1606 				    offsetof(struct __sk_buff, tc_classid)),
1607 			BPF_EXIT_INSN(),
1608 		},
1609 		.result = REJECT,
1610 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1611 		.errstr = "invalid bpf_context access",
1612 	},
1613 	{
1614 		"invalid access of skb->mark for SK_SKB",
1615 		.insns = {
1616 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 				    offsetof(struct __sk_buff, mark)),
1618 			BPF_EXIT_INSN(),
1619 		},
1620 		.result =  REJECT,
1621 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1622 		.errstr = "invalid bpf_context access",
1623 	},
1624 	{
1625 		"check skb->mark is not writeable by SK_SKB",
1626 		.insns = {
1627 			BPF_MOV64_IMM(BPF_REG_0, 0),
1628 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1629 				    offsetof(struct __sk_buff, mark)),
1630 			BPF_EXIT_INSN(),
1631 		},
1632 		.result =  REJECT,
1633 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1634 		.errstr = "invalid bpf_context access",
1635 	},
1636 	{
1637 		"check skb->tc_index is writeable by SK_SKB",
1638 		.insns = {
1639 			BPF_MOV64_IMM(BPF_REG_0, 0),
1640 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1641 				    offsetof(struct __sk_buff, tc_index)),
1642 			BPF_EXIT_INSN(),
1643 		},
1644 		.result = ACCEPT,
1645 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1646 	},
1647 	{
1648 		"check skb->priority is writeable by SK_SKB",
1649 		.insns = {
1650 			BPF_MOV64_IMM(BPF_REG_0, 0),
1651 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1652 				    offsetof(struct __sk_buff, priority)),
1653 			BPF_EXIT_INSN(),
1654 		},
1655 		.result = ACCEPT,
1656 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1657 	},
1658 	{
1659 		"direct packet read for SK_SKB",
1660 		.insns = {
1661 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1662 				    offsetof(struct __sk_buff, data)),
1663 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1664 				    offsetof(struct __sk_buff, data_end)),
1665 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1667 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1668 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1669 			BPF_MOV64_IMM(BPF_REG_0, 0),
1670 			BPF_EXIT_INSN(),
1671 		},
1672 		.result = ACCEPT,
1673 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1674 	},
1675 	{
1676 		"direct packet write for SK_SKB",
1677 		.insns = {
1678 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1679 				    offsetof(struct __sk_buff, data)),
1680 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1681 				    offsetof(struct __sk_buff, data_end)),
1682 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1683 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1684 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1685 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1686 			BPF_MOV64_IMM(BPF_REG_0, 0),
1687 			BPF_EXIT_INSN(),
1688 		},
1689 		.result = ACCEPT,
1690 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1691 	},
1692 	{
1693 		"overlapping checks for direct packet access SK_SKB",
1694 		.insns = {
1695 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1696 				    offsetof(struct __sk_buff, data)),
1697 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1698 				    offsetof(struct __sk_buff, data_end)),
1699 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1700 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1701 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1702 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1703 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1704 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1705 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1706 			BPF_MOV64_IMM(BPF_REG_0, 0),
1707 			BPF_EXIT_INSN(),
1708 		},
1709 		.result = ACCEPT,
1710 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1711 	},
1712 	{
1713 		"valid access family in SK_MSG",
1714 		.insns = {
1715 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1716 				    offsetof(struct sk_msg_md, family)),
1717 			BPF_EXIT_INSN(),
1718 		},
1719 		.result = ACCEPT,
1720 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1721 	},
1722 	{
1723 		"valid access remote_ip4 in SK_MSG",
1724 		.insns = {
1725 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1726 				    offsetof(struct sk_msg_md, remote_ip4)),
1727 			BPF_EXIT_INSN(),
1728 		},
1729 		.result = ACCEPT,
1730 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1731 	},
1732 	{
1733 		"valid access local_ip4 in SK_MSG",
1734 		.insns = {
1735 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1736 				    offsetof(struct sk_msg_md, local_ip4)),
1737 			BPF_EXIT_INSN(),
1738 		},
1739 		.result = ACCEPT,
1740 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1741 	},
1742 	{
1743 		"valid access remote_port in SK_MSG",
1744 		.insns = {
1745 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1746 				    offsetof(struct sk_msg_md, remote_port)),
1747 			BPF_EXIT_INSN(),
1748 		},
1749 		.result = ACCEPT,
1750 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1751 	},
1752 	{
1753 		"valid access local_port in SK_MSG",
1754 		.insns = {
1755 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1756 				    offsetof(struct sk_msg_md, local_port)),
1757 			BPF_EXIT_INSN(),
1758 		},
1759 		.result = ACCEPT,
1760 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1761 	},
1762 	{
1763 		"valid access remote_ip6 in SK_MSG",
1764 		.insns = {
1765 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1766 				    offsetof(struct sk_msg_md, remote_ip6[0])),
1767 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1768 				    offsetof(struct sk_msg_md, remote_ip6[1])),
1769 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1770 				    offsetof(struct sk_msg_md, remote_ip6[2])),
1771 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1772 				    offsetof(struct sk_msg_md, remote_ip6[3])),
1773 			BPF_EXIT_INSN(),
1774 		},
1775 		.result = ACCEPT,
1776 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1777 	},
1778 	{
1779 		"valid access local_ip6 in SK_MSG",
1780 		.insns = {
1781 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1782 				    offsetof(struct sk_msg_md, local_ip6[0])),
1783 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1784 				    offsetof(struct sk_msg_md, local_ip6[1])),
1785 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1786 				    offsetof(struct sk_msg_md, local_ip6[2])),
1787 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1788 				    offsetof(struct sk_msg_md, local_ip6[3])),
1789 			BPF_EXIT_INSN(),
1790 		},
1791 		.result = ACCEPT,
1792 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1793 	},
1794 	{
1795 		"invalid 64B read of family in SK_MSG",
1796 		.insns = {
1797 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1798 				    offsetof(struct sk_msg_md, family)),
1799 			BPF_EXIT_INSN(),
1800 		},
1801 		.errstr = "invalid bpf_context access",
1802 		.result = REJECT,
1803 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1804 	},
1805 	{
1806 		"invalid read past end of SK_MSG",
1807 		.insns = {
1808 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1809 				    offsetof(struct sk_msg_md, local_port) + 4),
1810 			BPF_EXIT_INSN(),
1811 		},
1812 		.errstr = "R0 !read_ok",
1813 		.result = REJECT,
1814 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1815 	},
1816 	{
1817 		"invalid read offset in SK_MSG",
1818 		.insns = {
1819 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1820 				    offsetof(struct sk_msg_md, family) + 1),
1821 			BPF_EXIT_INSN(),
1822 		},
1823 		.errstr = "invalid bpf_context access",
1824 		.result = REJECT,
1825 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1826 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1827 	},
1828 	{
1829 		"direct packet read for SK_MSG",
1830 		.insns = {
1831 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1832 				    offsetof(struct sk_msg_md, data)),
1833 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1834 				    offsetof(struct sk_msg_md, data_end)),
1835 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1836 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1837 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1838 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1839 			BPF_MOV64_IMM(BPF_REG_0, 0),
1840 			BPF_EXIT_INSN(),
1841 		},
1842 		.result = ACCEPT,
1843 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1844 	},
1845 	{
1846 		"direct packet write for SK_MSG",
1847 		.insns = {
1848 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1849 				    offsetof(struct sk_msg_md, data)),
1850 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1851 				    offsetof(struct sk_msg_md, data_end)),
1852 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1853 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1854 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1855 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1856 			BPF_MOV64_IMM(BPF_REG_0, 0),
1857 			BPF_EXIT_INSN(),
1858 		},
1859 		.result = ACCEPT,
1860 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1861 	},
1862 	{
1863 		"overlapping checks for direct packet access SK_MSG",
1864 		.insns = {
1865 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1866 				    offsetof(struct sk_msg_md, data)),
1867 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1868 				    offsetof(struct sk_msg_md, data_end)),
1869 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1870 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1871 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1872 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1873 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1874 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1875 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1876 			BPF_MOV64_IMM(BPF_REG_0, 0),
1877 			BPF_EXIT_INSN(),
1878 		},
1879 		.result = ACCEPT,
1880 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1881 	},
1882 	{
1883 		"check skb->mark is not writeable by sockets",
1884 		.insns = {
1885 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1886 				    offsetof(struct __sk_buff, mark)),
1887 			BPF_EXIT_INSN(),
1888 		},
1889 		.errstr = "invalid bpf_context access",
1890 		.errstr_unpriv = "R1 leaks addr",
1891 		.result = REJECT,
1892 	},
1893 	{
1894 		"check skb->tc_index is not writeable by sockets",
1895 		.insns = {
1896 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1897 				    offsetof(struct __sk_buff, tc_index)),
1898 			BPF_EXIT_INSN(),
1899 		},
1900 		.errstr = "invalid bpf_context access",
1901 		.errstr_unpriv = "R1 leaks addr",
1902 		.result = REJECT,
1903 	},
1904 	{
1905 		"check cb access: byte",
1906 		.insns = {
1907 			BPF_MOV64_IMM(BPF_REG_0, 0),
1908 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1909 				    offsetof(struct __sk_buff, cb[0])),
1910 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1911 				    offsetof(struct __sk_buff, cb[0]) + 1),
1912 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1913 				    offsetof(struct __sk_buff, cb[0]) + 2),
1914 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1915 				    offsetof(struct __sk_buff, cb[0]) + 3),
1916 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1917 				    offsetof(struct __sk_buff, cb[1])),
1918 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1919 				    offsetof(struct __sk_buff, cb[1]) + 1),
1920 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1921 				    offsetof(struct __sk_buff, cb[1]) + 2),
1922 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1923 				    offsetof(struct __sk_buff, cb[1]) + 3),
1924 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1925 				    offsetof(struct __sk_buff, cb[2])),
1926 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1927 				    offsetof(struct __sk_buff, cb[2]) + 1),
1928 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1929 				    offsetof(struct __sk_buff, cb[2]) + 2),
1930 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1931 				    offsetof(struct __sk_buff, cb[2]) + 3),
1932 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1933 				    offsetof(struct __sk_buff, cb[3])),
1934 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1935 				    offsetof(struct __sk_buff, cb[3]) + 1),
1936 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1937 				    offsetof(struct __sk_buff, cb[3]) + 2),
1938 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1939 				    offsetof(struct __sk_buff, cb[3]) + 3),
1940 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1941 				    offsetof(struct __sk_buff, cb[4])),
1942 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1943 				    offsetof(struct __sk_buff, cb[4]) + 1),
1944 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1945 				    offsetof(struct __sk_buff, cb[4]) + 2),
1946 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1947 				    offsetof(struct __sk_buff, cb[4]) + 3),
1948 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1949 				    offsetof(struct __sk_buff, cb[0])),
1950 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1951 				    offsetof(struct __sk_buff, cb[0]) + 1),
1952 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1953 				    offsetof(struct __sk_buff, cb[0]) + 2),
1954 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1955 				    offsetof(struct __sk_buff, cb[0]) + 3),
1956 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1957 				    offsetof(struct __sk_buff, cb[1])),
1958 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1959 				    offsetof(struct __sk_buff, cb[1]) + 1),
1960 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1961 				    offsetof(struct __sk_buff, cb[1]) + 2),
1962 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1963 				    offsetof(struct __sk_buff, cb[1]) + 3),
1964 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1965 				    offsetof(struct __sk_buff, cb[2])),
1966 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1967 				    offsetof(struct __sk_buff, cb[2]) + 1),
1968 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1969 				    offsetof(struct __sk_buff, cb[2]) + 2),
1970 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1971 				    offsetof(struct __sk_buff, cb[2]) + 3),
1972 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1973 				    offsetof(struct __sk_buff, cb[3])),
1974 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1975 				    offsetof(struct __sk_buff, cb[3]) + 1),
1976 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1977 				    offsetof(struct __sk_buff, cb[3]) + 2),
1978 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1979 				    offsetof(struct __sk_buff, cb[3]) + 3),
1980 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1981 				    offsetof(struct __sk_buff, cb[4])),
1982 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1983 				    offsetof(struct __sk_buff, cb[4]) + 1),
1984 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1985 				    offsetof(struct __sk_buff, cb[4]) + 2),
1986 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1987 				    offsetof(struct __sk_buff, cb[4]) + 3),
1988 			BPF_EXIT_INSN(),
1989 		},
1990 		.result = ACCEPT,
1991 	},
1992 	{
1993 		"__sk_buff->hash, offset 0, byte store not permitted",
1994 		.insns = {
1995 			BPF_MOV64_IMM(BPF_REG_0, 0),
1996 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1997 				    offsetof(struct __sk_buff, hash)),
1998 			BPF_EXIT_INSN(),
1999 		},
2000 		.errstr = "invalid bpf_context access",
2001 		.result = REJECT,
2002 	},
2003 	{
2004 		"__sk_buff->tc_index, offset 3, byte store not permitted",
2005 		.insns = {
2006 			BPF_MOV64_IMM(BPF_REG_0, 0),
2007 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2008 				    offsetof(struct __sk_buff, tc_index) + 3),
2009 			BPF_EXIT_INSN(),
2010 		},
2011 		.errstr = "invalid bpf_context access",
2012 		.result = REJECT,
2013 	},
2014 	{
2015 		"check skb->hash byte load permitted",
2016 		.insns = {
2017 			BPF_MOV64_IMM(BPF_REG_0, 0),
2018 #if __BYTE_ORDER == __LITTLE_ENDIAN
2019 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2020 				    offsetof(struct __sk_buff, hash)),
2021 #else
2022 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2023 				    offsetof(struct __sk_buff, hash) + 3),
2024 #endif
2025 			BPF_EXIT_INSN(),
2026 		},
2027 		.result = ACCEPT,
2028 	},
2029 	{
2030 		"check skb->hash byte load permitted 1",
2031 		.insns = {
2032 			BPF_MOV64_IMM(BPF_REG_0, 0),
2033 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2034 				    offsetof(struct __sk_buff, hash) + 1),
2035 			BPF_EXIT_INSN(),
2036 		},
2037 		.result = ACCEPT,
2038 	},
2039 	{
2040 		"check skb->hash byte load permitted 2",
2041 		.insns = {
2042 			BPF_MOV64_IMM(BPF_REG_0, 0),
2043 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2044 				    offsetof(struct __sk_buff, hash) + 2),
2045 			BPF_EXIT_INSN(),
2046 		},
2047 		.result = ACCEPT,
2048 	},
2049 	{
2050 		"check skb->hash byte load permitted 3",
2051 		.insns = {
2052 			BPF_MOV64_IMM(BPF_REG_0, 0),
2053 #if __BYTE_ORDER == __LITTLE_ENDIAN
2054 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2055 				    offsetof(struct __sk_buff, hash) + 3),
2056 #else
2057 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2058 				    offsetof(struct __sk_buff, hash)),
2059 #endif
2060 			BPF_EXIT_INSN(),
2061 		},
2062 		.result = ACCEPT,
2063 	},
2064 	{
2065 		"check cb access: byte, wrong type",
2066 		.insns = {
2067 			BPF_MOV64_IMM(BPF_REG_0, 0),
2068 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2069 				    offsetof(struct __sk_buff, cb[0])),
2070 			BPF_EXIT_INSN(),
2071 		},
2072 		.errstr = "invalid bpf_context access",
2073 		.result = REJECT,
2074 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2075 	},
2076 	{
2077 		"check cb access: half",
2078 		.insns = {
2079 			BPF_MOV64_IMM(BPF_REG_0, 0),
2080 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2081 				    offsetof(struct __sk_buff, cb[0])),
2082 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2083 				    offsetof(struct __sk_buff, cb[0]) + 2),
2084 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2085 				    offsetof(struct __sk_buff, cb[1])),
2086 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2087 				    offsetof(struct __sk_buff, cb[1]) + 2),
2088 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2089 				    offsetof(struct __sk_buff, cb[2])),
2090 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2091 				    offsetof(struct __sk_buff, cb[2]) + 2),
2092 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2093 				    offsetof(struct __sk_buff, cb[3])),
2094 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2095 				    offsetof(struct __sk_buff, cb[3]) + 2),
2096 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2097 				    offsetof(struct __sk_buff, cb[4])),
2098 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2099 				    offsetof(struct __sk_buff, cb[4]) + 2),
2100 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2101 				    offsetof(struct __sk_buff, cb[0])),
2102 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2103 				    offsetof(struct __sk_buff, cb[0]) + 2),
2104 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2105 				    offsetof(struct __sk_buff, cb[1])),
2106 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2107 				    offsetof(struct __sk_buff, cb[1]) + 2),
2108 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2109 				    offsetof(struct __sk_buff, cb[2])),
2110 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2111 				    offsetof(struct __sk_buff, cb[2]) + 2),
2112 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2113 				    offsetof(struct __sk_buff, cb[3])),
2114 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2115 				    offsetof(struct __sk_buff, cb[3]) + 2),
2116 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2117 				    offsetof(struct __sk_buff, cb[4])),
2118 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2119 				    offsetof(struct __sk_buff, cb[4]) + 2),
2120 			BPF_EXIT_INSN(),
2121 		},
2122 		.result = ACCEPT,
2123 	},
2124 	{
2125 		"check cb access: half, unaligned",
2126 		.insns = {
2127 			BPF_MOV64_IMM(BPF_REG_0, 0),
2128 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2129 				    offsetof(struct __sk_buff, cb[0]) + 1),
2130 			BPF_EXIT_INSN(),
2131 		},
2132 		.errstr = "misaligned context access",
2133 		.result = REJECT,
2134 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2135 	},
2136 	{
2137 		"check __sk_buff->hash, offset 0, half store not permitted",
2138 		.insns = {
2139 			BPF_MOV64_IMM(BPF_REG_0, 0),
2140 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2141 				    offsetof(struct __sk_buff, hash)),
2142 			BPF_EXIT_INSN(),
2143 		},
2144 		.errstr = "invalid bpf_context access",
2145 		.result = REJECT,
2146 	},
2147 	{
2148 		"check __sk_buff->tc_index, offset 2, half store not permitted",
2149 		.insns = {
2150 			BPF_MOV64_IMM(BPF_REG_0, 0),
2151 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2152 				    offsetof(struct __sk_buff, tc_index) + 2),
2153 			BPF_EXIT_INSN(),
2154 		},
2155 		.errstr = "invalid bpf_context access",
2156 		.result = REJECT,
2157 	},
2158 	{
2159 		"check skb->hash half load permitted",
2160 		.insns = {
2161 			BPF_MOV64_IMM(BPF_REG_0, 0),
2162 #if __BYTE_ORDER == __LITTLE_ENDIAN
2163 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2164 				    offsetof(struct __sk_buff, hash)),
2165 #else
2166 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2167 				    offsetof(struct __sk_buff, hash) + 2),
2168 #endif
2169 			BPF_EXIT_INSN(),
2170 		},
2171 		.result = ACCEPT,
2172 	},
2173 	{
2174 		"check skb->hash half load permitted 2",
2175 		.insns = {
2176 			BPF_MOV64_IMM(BPF_REG_0, 0),
2177 #if __BYTE_ORDER == __LITTLE_ENDIAN
2178 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2179 				    offsetof(struct __sk_buff, hash) + 2),
2180 #else
2181 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2182 				    offsetof(struct __sk_buff, hash)),
2183 #endif
2184 			BPF_EXIT_INSN(),
2185 		},
2186 		.result = ACCEPT,
2187 	},
2188 	{
2189 		"check skb->hash half load not permitted, unaligned 1",
2190 		.insns = {
2191 			BPF_MOV64_IMM(BPF_REG_0, 0),
2192 #if __BYTE_ORDER == __LITTLE_ENDIAN
2193 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2194 				    offsetof(struct __sk_buff, hash) + 1),
2195 #else
2196 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2197 				    offsetof(struct __sk_buff, hash) + 3),
2198 #endif
2199 			BPF_EXIT_INSN(),
2200 		},
2201 		.errstr = "invalid bpf_context access",
2202 		.result = REJECT,
2203 	},
2204 	{
2205 		"check skb->hash half load not permitted, unaligned 3",
2206 		.insns = {
2207 			BPF_MOV64_IMM(BPF_REG_0, 0),
2208 #if __BYTE_ORDER == __LITTLE_ENDIAN
2209 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2210 				    offsetof(struct __sk_buff, hash) + 3),
2211 #else
2212 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2213 				    offsetof(struct __sk_buff, hash) + 1),
2214 #endif
2215 			BPF_EXIT_INSN(),
2216 		},
2217 		.errstr = "invalid bpf_context access",
2218 		.result = REJECT,
2219 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2220 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2221 	},
2222 	{
2223 		"check cb access: half, wrong type",
2224 		.insns = {
2225 			BPF_MOV64_IMM(BPF_REG_0, 0),
2226 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2227 				    offsetof(struct __sk_buff, cb[0])),
2228 			BPF_EXIT_INSN(),
2229 		},
2230 		.errstr = "invalid bpf_context access",
2231 		.result = REJECT,
2232 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2233 	},
2234 	{
2235 		"check cb access: word",
2236 		.insns = {
2237 			BPF_MOV64_IMM(BPF_REG_0, 0),
2238 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2239 				    offsetof(struct __sk_buff, cb[0])),
2240 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2241 				    offsetof(struct __sk_buff, cb[1])),
2242 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2243 				    offsetof(struct __sk_buff, cb[2])),
2244 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2245 				    offsetof(struct __sk_buff, cb[3])),
2246 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2247 				    offsetof(struct __sk_buff, cb[4])),
2248 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2249 				    offsetof(struct __sk_buff, cb[0])),
2250 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2251 				    offsetof(struct __sk_buff, cb[1])),
2252 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2253 				    offsetof(struct __sk_buff, cb[2])),
2254 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2255 				    offsetof(struct __sk_buff, cb[3])),
2256 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2257 				    offsetof(struct __sk_buff, cb[4])),
2258 			BPF_EXIT_INSN(),
2259 		},
2260 		.result = ACCEPT,
2261 	},
2262 	{
2263 		"check cb access: word, unaligned 1",
2264 		.insns = {
2265 			BPF_MOV64_IMM(BPF_REG_0, 0),
2266 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2267 				    offsetof(struct __sk_buff, cb[0]) + 2),
2268 			BPF_EXIT_INSN(),
2269 		},
2270 		.errstr = "misaligned context access",
2271 		.result = REJECT,
2272 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2273 	},
2274 	{
2275 		"check cb access: word, unaligned 2",
2276 		.insns = {
2277 			BPF_MOV64_IMM(BPF_REG_0, 0),
2278 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2279 				    offsetof(struct __sk_buff, cb[4]) + 1),
2280 			BPF_EXIT_INSN(),
2281 		},
2282 		.errstr = "misaligned context access",
2283 		.result = REJECT,
2284 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2285 	},
2286 	{
2287 		"check cb access: word, unaligned 3",
2288 		.insns = {
2289 			BPF_MOV64_IMM(BPF_REG_0, 0),
2290 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2291 				    offsetof(struct __sk_buff, cb[4]) + 2),
2292 			BPF_EXIT_INSN(),
2293 		},
2294 		.errstr = "misaligned context access",
2295 		.result = REJECT,
2296 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2297 	},
2298 	{
2299 		"check cb access: word, unaligned 4",
2300 		.insns = {
2301 			BPF_MOV64_IMM(BPF_REG_0, 0),
2302 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2303 				    offsetof(struct __sk_buff, cb[4]) + 3),
2304 			BPF_EXIT_INSN(),
2305 		},
2306 		.errstr = "misaligned context access",
2307 		.result = REJECT,
2308 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2309 	},
2310 	{
2311 		"check cb access: double",
2312 		.insns = {
2313 			BPF_MOV64_IMM(BPF_REG_0, 0),
2314 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2315 				    offsetof(struct __sk_buff, cb[0])),
2316 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2317 				    offsetof(struct __sk_buff, cb[2])),
2318 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2319 				    offsetof(struct __sk_buff, cb[0])),
2320 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2321 				    offsetof(struct __sk_buff, cb[2])),
2322 			BPF_EXIT_INSN(),
2323 		},
2324 		.result = ACCEPT,
2325 	},
2326 	{
2327 		"check cb access: double, unaligned 1",
2328 		.insns = {
2329 			BPF_MOV64_IMM(BPF_REG_0, 0),
2330 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2331 				    offsetof(struct __sk_buff, cb[1])),
2332 			BPF_EXIT_INSN(),
2333 		},
2334 		.errstr = "misaligned context access",
2335 		.result = REJECT,
2336 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2337 	},
2338 	{
2339 		"check cb access: double, unaligned 2",
2340 		.insns = {
2341 			BPF_MOV64_IMM(BPF_REG_0, 0),
2342 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2343 				    offsetof(struct __sk_buff, cb[3])),
2344 			BPF_EXIT_INSN(),
2345 		},
2346 		.errstr = "misaligned context access",
2347 		.result = REJECT,
2348 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2349 	},
2350 	{
2351 		"check cb access: double, oob 1",
2352 		.insns = {
2353 			BPF_MOV64_IMM(BPF_REG_0, 0),
2354 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2355 				    offsetof(struct __sk_buff, cb[4])),
2356 			BPF_EXIT_INSN(),
2357 		},
2358 		.errstr = "invalid bpf_context access",
2359 		.result = REJECT,
2360 	},
2361 	{
2362 		"check cb access: double, oob 2",
2363 		.insns = {
2364 			BPF_MOV64_IMM(BPF_REG_0, 0),
2365 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2366 				    offsetof(struct __sk_buff, cb[4])),
2367 			BPF_EXIT_INSN(),
2368 		},
2369 		.errstr = "invalid bpf_context access",
2370 		.result = REJECT,
2371 	},
2372 	{
2373 		"check __sk_buff->ifindex dw store not permitted",
2374 		.insns = {
2375 			BPF_MOV64_IMM(BPF_REG_0, 0),
2376 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2377 				    offsetof(struct __sk_buff, ifindex)),
2378 			BPF_EXIT_INSN(),
2379 		},
2380 		.errstr = "invalid bpf_context access",
2381 		.result = REJECT,
2382 	},
2383 	{
2384 		"check __sk_buff->ifindex dw load not permitted",
2385 		.insns = {
2386 			BPF_MOV64_IMM(BPF_REG_0, 0),
2387 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2388 				    offsetof(struct __sk_buff, ifindex)),
2389 			BPF_EXIT_INSN(),
2390 		},
2391 		.errstr = "invalid bpf_context access",
2392 		.result = REJECT,
2393 	},
2394 	{
2395 		"check cb access: double, wrong type",
2396 		.insns = {
2397 			BPF_MOV64_IMM(BPF_REG_0, 0),
2398 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2399 				    offsetof(struct __sk_buff, cb[0])),
2400 			BPF_EXIT_INSN(),
2401 		},
2402 		.errstr = "invalid bpf_context access",
2403 		.result = REJECT,
2404 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2405 	},
2406 	{
2407 		"check out of range skb->cb access",
2408 		.insns = {
2409 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2410 				    offsetof(struct __sk_buff, cb[0]) + 256),
2411 			BPF_EXIT_INSN(),
2412 		},
2413 		.errstr = "invalid bpf_context access",
2414 		.errstr_unpriv = "",
2415 		.result = REJECT,
2416 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
2417 	},
2418 	{
2419 		"write skb fields from socket prog",
2420 		.insns = {
2421 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2422 				    offsetof(struct __sk_buff, cb[4])),
2423 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2424 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2425 				    offsetof(struct __sk_buff, mark)),
2426 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2427 				    offsetof(struct __sk_buff, tc_index)),
2428 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2429 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2430 				    offsetof(struct __sk_buff, cb[0])),
2431 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2432 				    offsetof(struct __sk_buff, cb[2])),
2433 			BPF_EXIT_INSN(),
2434 		},
2435 		.result = ACCEPT,
2436 		.errstr_unpriv = "R1 leaks addr",
2437 		.result_unpriv = REJECT,
2438 	},
2439 	{
2440 		"write skb fields from tc_cls_act prog",
2441 		.insns = {
2442 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2443 				    offsetof(struct __sk_buff, cb[0])),
2444 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2445 				    offsetof(struct __sk_buff, mark)),
2446 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2447 				    offsetof(struct __sk_buff, tc_index)),
2448 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2449 				    offsetof(struct __sk_buff, tc_index)),
2450 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2451 				    offsetof(struct __sk_buff, cb[3])),
2452 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2453 				    offsetof(struct __sk_buff, tstamp)),
2454 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2455 				    offsetof(struct __sk_buff, tstamp)),
2456 			BPF_EXIT_INSN(),
2457 		},
2458 		.errstr_unpriv = "",
2459 		.result_unpriv = REJECT,
2460 		.result = ACCEPT,
2461 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2462 	},
2463 	{
2464 		"PTR_TO_STACK store/load",
2465 		.insns = {
2466 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2467 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2468 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2469 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2470 			BPF_EXIT_INSN(),
2471 		},
2472 		.result = ACCEPT,
2473 		.retval = 0xfaceb00c,
2474 	},
2475 	{
2476 		"PTR_TO_STACK store/load - bad alignment on off",
2477 		.insns = {
2478 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2480 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2481 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2482 			BPF_EXIT_INSN(),
2483 		},
2484 		.result = REJECT,
2485 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2486 	},
2487 	{
2488 		"PTR_TO_STACK store/load - bad alignment on reg",
2489 		.insns = {
2490 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2491 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2492 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2493 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2494 			BPF_EXIT_INSN(),
2495 		},
2496 		.result = REJECT,
2497 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2498 	},
2499 	{
2500 		"PTR_TO_STACK store/load - out of bounds low",
2501 		.insns = {
2502 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2503 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2504 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2505 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2506 			BPF_EXIT_INSN(),
2507 		},
2508 		.result = REJECT,
2509 		.errstr = "invalid stack off=-79992 size=8",
2510 	},
2511 	{
2512 		"PTR_TO_STACK store/load - out of bounds high",
2513 		.insns = {
2514 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2515 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2516 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2517 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2518 			BPF_EXIT_INSN(),
2519 		},
2520 		.result = REJECT,
2521 		.errstr = "invalid stack off=0 size=8",
2522 	},
2523 	{
2524 		"unpriv: return pointer",
2525 		.insns = {
2526 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2527 			BPF_EXIT_INSN(),
2528 		},
2529 		.result = ACCEPT,
2530 		.result_unpriv = REJECT,
2531 		.errstr_unpriv = "R0 leaks addr",
2532 		.retval = POINTER_VALUE,
2533 	},
2534 	{
2535 		"unpriv: add const to pointer",
2536 		.insns = {
2537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2538 			BPF_MOV64_IMM(BPF_REG_0, 0),
2539 			BPF_EXIT_INSN(),
2540 		},
2541 		.result = ACCEPT,
2542 	},
2543 	{
2544 		"unpriv: add pointer to pointer",
2545 		.insns = {
2546 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2547 			BPF_MOV64_IMM(BPF_REG_0, 0),
2548 			BPF_EXIT_INSN(),
2549 		},
2550 		.result = REJECT,
2551 		.errstr = "R1 pointer += pointer",
2552 	},
2553 	{
2554 		"unpriv: neg pointer",
2555 		.insns = {
2556 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2557 			BPF_MOV64_IMM(BPF_REG_0, 0),
2558 			BPF_EXIT_INSN(),
2559 		},
2560 		.result = ACCEPT,
2561 		.result_unpriv = REJECT,
2562 		.errstr_unpriv = "R1 pointer arithmetic",
2563 	},
2564 	{
2565 		"unpriv: cmp pointer with const",
2566 		.insns = {
2567 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2568 			BPF_MOV64_IMM(BPF_REG_0, 0),
2569 			BPF_EXIT_INSN(),
2570 		},
2571 		.result = ACCEPT,
2572 		.result_unpriv = REJECT,
2573 		.errstr_unpriv = "R1 pointer comparison",
2574 	},
2575 	{
2576 		"unpriv: cmp pointer with pointer",
2577 		.insns = {
2578 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2579 			BPF_MOV64_IMM(BPF_REG_0, 0),
2580 			BPF_EXIT_INSN(),
2581 		},
2582 		.result = ACCEPT,
2583 		.result_unpriv = REJECT,
2584 		.errstr_unpriv = "R10 pointer comparison",
2585 	},
2586 	{
2587 		"unpriv: check that printk is disallowed",
2588 		.insns = {
2589 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2590 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2591 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2592 			BPF_MOV64_IMM(BPF_REG_2, 8),
2593 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2594 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2595 				     BPF_FUNC_trace_printk),
2596 			BPF_MOV64_IMM(BPF_REG_0, 0),
2597 			BPF_EXIT_INSN(),
2598 		},
2599 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
2600 		.result_unpriv = REJECT,
2601 		.result = ACCEPT,
2602 	},
2603 	{
2604 		"unpriv: pass pointer to helper function",
2605 		.insns = {
2606 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2607 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2608 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2609 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2610 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2611 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2612 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2613 				     BPF_FUNC_map_update_elem),
2614 			BPF_MOV64_IMM(BPF_REG_0, 0),
2615 			BPF_EXIT_INSN(),
2616 		},
2617 		.fixup_map_hash_8b = { 3 },
2618 		.errstr_unpriv = "R4 leaks addr",
2619 		.result_unpriv = REJECT,
2620 		.result = ACCEPT,
2621 	},
2622 	{
2623 		"unpriv: indirectly pass pointer on stack to helper function",
2624 		.insns = {
2625 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2626 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2627 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2628 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2629 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2630 				     BPF_FUNC_map_lookup_elem),
2631 			BPF_MOV64_IMM(BPF_REG_0, 0),
2632 			BPF_EXIT_INSN(),
2633 		},
2634 		.fixup_map_hash_8b = { 3 },
2635 		.errstr = "invalid indirect read from stack off -8+0 size 8",
2636 		.result = REJECT,
2637 	},
2638 	{
2639 		"unpriv: mangle pointer on stack 1",
2640 		.insns = {
2641 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2642 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2643 			BPF_MOV64_IMM(BPF_REG_0, 0),
2644 			BPF_EXIT_INSN(),
2645 		},
2646 		.errstr_unpriv = "attempt to corrupt spilled",
2647 		.result_unpriv = REJECT,
2648 		.result = ACCEPT,
2649 	},
2650 	{
2651 		"unpriv: mangle pointer on stack 2",
2652 		.insns = {
2653 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2654 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2655 			BPF_MOV64_IMM(BPF_REG_0, 0),
2656 			BPF_EXIT_INSN(),
2657 		},
2658 		.errstr_unpriv = "attempt to corrupt spilled",
2659 		.result_unpriv = REJECT,
2660 		.result = ACCEPT,
2661 	},
2662 	{
2663 		"unpriv: read pointer from stack in small chunks",
2664 		.insns = {
2665 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2666 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2667 			BPF_MOV64_IMM(BPF_REG_0, 0),
2668 			BPF_EXIT_INSN(),
2669 		},
2670 		.errstr = "invalid size",
2671 		.result = REJECT,
2672 	},
2673 	{
2674 		"unpriv: write pointer into ctx",
2675 		.insns = {
2676 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2677 			BPF_MOV64_IMM(BPF_REG_0, 0),
2678 			BPF_EXIT_INSN(),
2679 		},
2680 		.errstr_unpriv = "R1 leaks addr",
2681 		.result_unpriv = REJECT,
2682 		.errstr = "invalid bpf_context access",
2683 		.result = REJECT,
2684 	},
2685 	{
2686 		"unpriv: spill/fill of ctx",
2687 		.insns = {
2688 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2689 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2690 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2691 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2692 			BPF_MOV64_IMM(BPF_REG_0, 0),
2693 			BPF_EXIT_INSN(),
2694 		},
2695 		.result = ACCEPT,
2696 	},
2697 	{
2698 		"unpriv: spill/fill of ctx 2",
2699 		.insns = {
2700 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2701 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2702 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2703 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2704 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2705 				     BPF_FUNC_get_hash_recalc),
2706 			BPF_MOV64_IMM(BPF_REG_0, 0),
2707 			BPF_EXIT_INSN(),
2708 		},
2709 		.result = ACCEPT,
2710 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2711 	},
2712 	{
2713 		"unpriv: spill/fill of ctx 3",
2714 		.insns = {
2715 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2716 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2717 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2718 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2719 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2720 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2721 				     BPF_FUNC_get_hash_recalc),
2722 			BPF_EXIT_INSN(),
2723 		},
2724 		.result = REJECT,
2725 		.errstr = "R1 type=fp expected=ctx",
2726 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2727 	},
2728 	{
2729 		"unpriv: spill/fill of ctx 4",
2730 		.insns = {
2731 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2732 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2733 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2734 			BPF_MOV64_IMM(BPF_REG_0, 1),
2735 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2736 				     BPF_REG_0, -8, 0),
2737 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2738 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2739 				     BPF_FUNC_get_hash_recalc),
2740 			BPF_EXIT_INSN(),
2741 		},
2742 		.result = REJECT,
2743 		.errstr = "R1 type=inv expected=ctx",
2744 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2745 	},
2746 	{
2747 		"unpriv: spill/fill of different pointers stx",
2748 		.insns = {
2749 			BPF_MOV64_IMM(BPF_REG_3, 42),
2750 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2752 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2753 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2754 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2755 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2756 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2757 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2758 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2759 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2760 				    offsetof(struct __sk_buff, mark)),
2761 			BPF_MOV64_IMM(BPF_REG_0, 0),
2762 			BPF_EXIT_INSN(),
2763 		},
2764 		.result = REJECT,
2765 		.errstr = "same insn cannot be used with different pointers",
2766 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2767 	},
2768 	{
2769 		"unpriv: spill/fill of different pointers stx - ctx and sock",
2770 		.insns = {
2771 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2772 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2773 			BPF_SK_LOOKUP,
2774 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2775 			/* u64 foo; */
2776 			/* void *target = &foo; */
2777 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2778 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2779 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2780 			/* if (skb == NULL) *target = sock; */
2781 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2782 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2783 			/* else *target = skb; */
2784 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2785 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2786 			/* struct __sk_buff *skb = *target; */
2787 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2788 			/* skb->mark = 42; */
2789 			BPF_MOV64_IMM(BPF_REG_3, 42),
2790 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2791 				    offsetof(struct __sk_buff, mark)),
2792 			/* if (sk) bpf_sk_release(sk) */
2793 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2794 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2795 			BPF_MOV64_IMM(BPF_REG_0, 0),
2796 			BPF_EXIT_INSN(),
2797 		},
2798 		.result = REJECT,
2799 		.errstr = "type=ctx expected=sock",
2800 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2801 	},
2802 	{
2803 		"unpriv: spill/fill of different pointers stx - leak sock",
2804 		.insns = {
2805 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2806 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2807 			BPF_SK_LOOKUP,
2808 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2809 			/* u64 foo; */
2810 			/* void *target = &foo; */
2811 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2812 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2813 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2814 			/* if (skb == NULL) *target = sock; */
2815 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2816 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2817 			/* else *target = skb; */
2818 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2819 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2820 			/* struct __sk_buff *skb = *target; */
2821 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2822 			/* skb->mark = 42; */
2823 			BPF_MOV64_IMM(BPF_REG_3, 42),
2824 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2825 				    offsetof(struct __sk_buff, mark)),
2826 			BPF_EXIT_INSN(),
2827 		},
2828 		.result = REJECT,
2829 		//.errstr = "same insn cannot be used with different pointers",
2830 		.errstr = "Unreleased reference",
2831 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2832 	},
2833 	{
2834 		"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2835 		.insns = {
2836 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2837 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2838 			BPF_SK_LOOKUP,
2839 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2840 			/* u64 foo; */
2841 			/* void *target = &foo; */
2842 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2843 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2844 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2845 			/* if (skb) *target = skb */
2846 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2847 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2848 			/* else *target = sock */
2849 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2850 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2851 			/* struct bpf_sock *sk = *target; */
2852 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2853 			/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2854 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2855 				BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2856 					    offsetof(struct bpf_sock, mark)),
2857 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2858 			BPF_MOV64_IMM(BPF_REG_0, 0),
2859 			BPF_EXIT_INSN(),
2860 		},
2861 		.result = REJECT,
2862 		.errstr = "same insn cannot be used with different pointers",
2863 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2864 	},
2865 	{
2866 		"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2867 		.insns = {
2868 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2869 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2870 			BPF_SK_LOOKUP,
2871 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2872 			/* u64 foo; */
2873 			/* void *target = &foo; */
2874 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2875 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2876 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2877 			/* if (skb) *target = skb */
2878 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2879 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2880 			/* else *target = sock */
2881 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2882 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2883 			/* struct bpf_sock *sk = *target; */
2884 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2885 			/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2886 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2887 				BPF_MOV64_IMM(BPF_REG_3, 42),
2888 				BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2889 					    offsetof(struct bpf_sock, mark)),
2890 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2891 			BPF_MOV64_IMM(BPF_REG_0, 0),
2892 			BPF_EXIT_INSN(),
2893 		},
2894 		.result = REJECT,
2895 		//.errstr = "same insn cannot be used with different pointers",
2896 		.errstr = "cannot write into socket",
2897 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2898 	},
2899 	{
2900 		"unpriv: spill/fill of different pointers ldx",
2901 		.insns = {
2902 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2903 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2904 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2905 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2906 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2907 				      -(__s32)offsetof(struct bpf_perf_event_data,
2908 						       sample_period) - 8),
2909 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2910 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2911 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2912 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2913 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2914 				    offsetof(struct bpf_perf_event_data,
2915 					     sample_period)),
2916 			BPF_MOV64_IMM(BPF_REG_0, 0),
2917 			BPF_EXIT_INSN(),
2918 		},
2919 		.result = REJECT,
2920 		.errstr = "same insn cannot be used with different pointers",
2921 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2922 	},
2923 	{
2924 		"unpriv: write pointer into map elem value",
2925 		.insns = {
2926 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2927 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2928 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2929 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2930 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2931 				     BPF_FUNC_map_lookup_elem),
2932 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2933 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2934 			BPF_EXIT_INSN(),
2935 		},
2936 		.fixup_map_hash_8b = { 3 },
2937 		.errstr_unpriv = "R0 leaks addr",
2938 		.result_unpriv = REJECT,
2939 		.result = ACCEPT,
2940 	},
2941 	{
2942 		"unpriv: partial copy of pointer",
2943 		.insns = {
2944 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2945 			BPF_MOV64_IMM(BPF_REG_0, 0),
2946 			BPF_EXIT_INSN(),
2947 		},
2948 		.errstr_unpriv = "R10 partial copy",
2949 		.result_unpriv = REJECT,
2950 		.result = ACCEPT,
2951 	},
2952 	{
2953 		"unpriv: pass pointer to tail_call",
2954 		.insns = {
2955 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2956 			BPF_LD_MAP_FD(BPF_REG_2, 0),
2957 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2958 				     BPF_FUNC_tail_call),
2959 			BPF_MOV64_IMM(BPF_REG_0, 0),
2960 			BPF_EXIT_INSN(),
2961 		},
2962 		.fixup_prog1 = { 1 },
2963 		.errstr_unpriv = "R3 leaks addr into helper",
2964 		.result_unpriv = REJECT,
2965 		.result = ACCEPT,
2966 	},
2967 	{
2968 		"unpriv: cmp map pointer with zero",
2969 		.insns = {
2970 			BPF_MOV64_IMM(BPF_REG_1, 0),
2971 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2972 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2973 			BPF_MOV64_IMM(BPF_REG_0, 0),
2974 			BPF_EXIT_INSN(),
2975 		},
2976 		.fixup_map_hash_8b = { 1 },
2977 		.errstr_unpriv = "R1 pointer comparison",
2978 		.result_unpriv = REJECT,
2979 		.result = ACCEPT,
2980 	},
2981 	{
2982 		"unpriv: write into frame pointer",
2983 		.insns = {
2984 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2985 			BPF_MOV64_IMM(BPF_REG_0, 0),
2986 			BPF_EXIT_INSN(),
2987 		},
2988 		.errstr = "frame pointer is read only",
2989 		.result = REJECT,
2990 	},
2991 	{
2992 		"unpriv: spill/fill frame pointer",
2993 		.insns = {
2994 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2996 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2997 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2998 			BPF_MOV64_IMM(BPF_REG_0, 0),
2999 			BPF_EXIT_INSN(),
3000 		},
3001 		.errstr = "frame pointer is read only",
3002 		.result = REJECT,
3003 	},
3004 	{
3005 		"unpriv: cmp of frame pointer",
3006 		.insns = {
3007 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
3008 			BPF_MOV64_IMM(BPF_REG_0, 0),
3009 			BPF_EXIT_INSN(),
3010 		},
3011 		.errstr_unpriv = "R10 pointer comparison",
3012 		.result_unpriv = REJECT,
3013 		.result = ACCEPT,
3014 	},
3015 	{
3016 		"unpriv: adding of fp",
3017 		.insns = {
3018 			BPF_MOV64_IMM(BPF_REG_0, 0),
3019 			BPF_MOV64_IMM(BPF_REG_1, 0),
3020 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
3021 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
3022 			BPF_EXIT_INSN(),
3023 		},
3024 		.result = ACCEPT,
3025 	},
3026 	{
3027 		"unpriv: cmp of stack pointer",
3028 		.insns = {
3029 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3030 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3031 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
3032 			BPF_MOV64_IMM(BPF_REG_0, 0),
3033 			BPF_EXIT_INSN(),
3034 		},
3035 		.errstr_unpriv = "R2 pointer comparison",
3036 		.result_unpriv = REJECT,
3037 		.result = ACCEPT,
3038 	},
3039 	{
3040 		"runtime/jit: tail_call within bounds, prog once",
3041 		.insns = {
3042 			BPF_MOV64_IMM(BPF_REG_3, 0),
3043 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3044 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3045 				     BPF_FUNC_tail_call),
3046 			BPF_MOV64_IMM(BPF_REG_0, 1),
3047 			BPF_EXIT_INSN(),
3048 		},
3049 		.fixup_prog1 = { 1 },
3050 		.result = ACCEPT,
3051 		.retval = 42,
3052 	},
3053 	{
3054 		"runtime/jit: tail_call within bounds, prog loop",
3055 		.insns = {
3056 			BPF_MOV64_IMM(BPF_REG_3, 1),
3057 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3058 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3059 				     BPF_FUNC_tail_call),
3060 			BPF_MOV64_IMM(BPF_REG_0, 1),
3061 			BPF_EXIT_INSN(),
3062 		},
3063 		.fixup_prog1 = { 1 },
3064 		.result = ACCEPT,
3065 		.retval = 41,
3066 	},
3067 	{
3068 		"runtime/jit: tail_call within bounds, no prog",
3069 		.insns = {
3070 			BPF_MOV64_IMM(BPF_REG_3, 2),
3071 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3072 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3073 				     BPF_FUNC_tail_call),
3074 			BPF_MOV64_IMM(BPF_REG_0, 1),
3075 			BPF_EXIT_INSN(),
3076 		},
3077 		.fixup_prog1 = { 1 },
3078 		.result = ACCEPT,
3079 		.retval = 1,
3080 	},
3081 	{
3082 		"runtime/jit: tail_call out of bounds",
3083 		.insns = {
3084 			BPF_MOV64_IMM(BPF_REG_3, 256),
3085 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3086 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3087 				     BPF_FUNC_tail_call),
3088 			BPF_MOV64_IMM(BPF_REG_0, 2),
3089 			BPF_EXIT_INSN(),
3090 		},
3091 		.fixup_prog1 = { 1 },
3092 		.result = ACCEPT,
3093 		.retval = 2,
3094 	},
3095 	{
3096 		"runtime/jit: pass negative index to tail_call",
3097 		.insns = {
3098 			BPF_MOV64_IMM(BPF_REG_3, -1),
3099 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3100 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3101 				     BPF_FUNC_tail_call),
3102 			BPF_MOV64_IMM(BPF_REG_0, 2),
3103 			BPF_EXIT_INSN(),
3104 		},
3105 		.fixup_prog1 = { 1 },
3106 		.result = ACCEPT,
3107 		.retval = 2,
3108 	},
3109 	{
3110 		"runtime/jit: pass > 32bit index to tail_call",
3111 		.insns = {
3112 			BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3113 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3114 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3115 				     BPF_FUNC_tail_call),
3116 			BPF_MOV64_IMM(BPF_REG_0, 2),
3117 			BPF_EXIT_INSN(),
3118 		},
3119 		.fixup_prog1 = { 2 },
3120 		.result = ACCEPT,
3121 		.retval = 42,
3122 		/* Verifier rewrite for unpriv skips tail call here. */
3123 		.retval_unpriv = 2,
3124 	},
3125 	{
3126 		"stack pointer arithmetic",
3127 		.insns = {
3128 			BPF_MOV64_IMM(BPF_REG_1, 4),
3129 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3130 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3131 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3132 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3133 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3134 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3135 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3136 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3137 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3138 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3139 			BPF_MOV64_IMM(BPF_REG_0, 0),
3140 			BPF_EXIT_INSN(),
3141 		},
3142 		.result = ACCEPT,
3143 	},
3144 	{
3145 		"raw_stack: no skb_load_bytes",
3146 		.insns = {
3147 			BPF_MOV64_IMM(BPF_REG_2, 4),
3148 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3149 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3150 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3151 			BPF_MOV64_IMM(BPF_REG_4, 8),
3152 			/* Call to skb_load_bytes() omitted. */
3153 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3154 			BPF_EXIT_INSN(),
3155 		},
3156 		.result = REJECT,
3157 		.errstr = "invalid read from stack off -8+0 size 8",
3158 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3159 	},
3160 	{
3161 		"raw_stack: skb_load_bytes, negative len",
3162 		.insns = {
3163 			BPF_MOV64_IMM(BPF_REG_2, 4),
3164 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3165 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3166 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3167 			BPF_MOV64_IMM(BPF_REG_4, -8),
3168 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3169 				     BPF_FUNC_skb_load_bytes),
3170 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3171 			BPF_EXIT_INSN(),
3172 		},
3173 		.result = REJECT,
3174 		.errstr = "R4 min value is negative",
3175 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3176 	},
3177 	{
3178 		"raw_stack: skb_load_bytes, negative len 2",
3179 		.insns = {
3180 			BPF_MOV64_IMM(BPF_REG_2, 4),
3181 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3182 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3183 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3184 			BPF_MOV64_IMM(BPF_REG_4, ~0),
3185 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3186 				     BPF_FUNC_skb_load_bytes),
3187 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3188 			BPF_EXIT_INSN(),
3189 		},
3190 		.result = REJECT,
3191 		.errstr = "R4 min value is negative",
3192 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3193 	},
3194 	{
3195 		"raw_stack: skb_load_bytes, zero len",
3196 		.insns = {
3197 			BPF_MOV64_IMM(BPF_REG_2, 4),
3198 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3199 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3200 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3201 			BPF_MOV64_IMM(BPF_REG_4, 0),
3202 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3203 				     BPF_FUNC_skb_load_bytes),
3204 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3205 			BPF_EXIT_INSN(),
3206 		},
3207 		.result = REJECT,
3208 		.errstr = "invalid stack type R3",
3209 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3210 	},
3211 	{
3212 		"raw_stack: skb_load_bytes, no init",
3213 		.insns = {
3214 			BPF_MOV64_IMM(BPF_REG_2, 4),
3215 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3216 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3217 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3218 			BPF_MOV64_IMM(BPF_REG_4, 8),
3219 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3220 				     BPF_FUNC_skb_load_bytes),
3221 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3222 			BPF_EXIT_INSN(),
3223 		},
3224 		.result = ACCEPT,
3225 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3226 	},
3227 	{
3228 		"raw_stack: skb_load_bytes, init",
3229 		.insns = {
3230 			BPF_MOV64_IMM(BPF_REG_2, 4),
3231 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3232 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3233 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3234 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3235 			BPF_MOV64_IMM(BPF_REG_4, 8),
3236 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3237 				     BPF_FUNC_skb_load_bytes),
3238 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3239 			BPF_EXIT_INSN(),
3240 		},
3241 		.result = ACCEPT,
3242 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3243 	},
3244 	{
3245 		"raw_stack: skb_load_bytes, spilled regs around bounds",
3246 		.insns = {
3247 			BPF_MOV64_IMM(BPF_REG_2, 4),
3248 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3249 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3250 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3251 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3252 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3253 			BPF_MOV64_IMM(BPF_REG_4, 8),
3254 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3255 				     BPF_FUNC_skb_load_bytes),
3256 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3257 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3258 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3259 				    offsetof(struct __sk_buff, mark)),
3260 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3261 				    offsetof(struct __sk_buff, priority)),
3262 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3263 			BPF_EXIT_INSN(),
3264 		},
3265 		.result = ACCEPT,
3266 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3267 	},
3268 	{
3269 		"raw_stack: skb_load_bytes, spilled regs corruption",
3270 		.insns = {
3271 			BPF_MOV64_IMM(BPF_REG_2, 4),
3272 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3273 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3274 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3275 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3276 			BPF_MOV64_IMM(BPF_REG_4, 8),
3277 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3278 				     BPF_FUNC_skb_load_bytes),
3279 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3280 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3281 				    offsetof(struct __sk_buff, mark)),
3282 			BPF_EXIT_INSN(),
3283 		},
3284 		.result = REJECT,
3285 		.errstr = "R0 invalid mem access 'inv'",
3286 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3287 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3288 	},
3289 	{
3290 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
3291 		.insns = {
3292 			BPF_MOV64_IMM(BPF_REG_2, 4),
3293 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3295 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3296 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3297 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3298 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3299 			BPF_MOV64_IMM(BPF_REG_4, 8),
3300 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3301 				     BPF_FUNC_skb_load_bytes),
3302 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3303 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3304 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3305 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3306 				    offsetof(struct __sk_buff, mark)),
3307 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3308 				    offsetof(struct __sk_buff, priority)),
3309 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3310 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3311 				    offsetof(struct __sk_buff, pkt_type)),
3312 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3313 			BPF_EXIT_INSN(),
3314 		},
3315 		.result = REJECT,
3316 		.errstr = "R3 invalid mem access 'inv'",
3317 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3318 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3319 	},
3320 	{
3321 		"raw_stack: skb_load_bytes, spilled regs + data",
3322 		.insns = {
3323 			BPF_MOV64_IMM(BPF_REG_2, 4),
3324 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3325 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3326 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3327 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3328 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3329 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3330 			BPF_MOV64_IMM(BPF_REG_4, 8),
3331 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3332 				     BPF_FUNC_skb_load_bytes),
3333 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3334 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3335 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3336 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3337 				    offsetof(struct __sk_buff, mark)),
3338 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3339 				    offsetof(struct __sk_buff, priority)),
3340 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3341 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3342 			BPF_EXIT_INSN(),
3343 		},
3344 		.result = ACCEPT,
3345 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3346 	},
3347 	{
3348 		"raw_stack: skb_load_bytes, invalid access 1",
3349 		.insns = {
3350 			BPF_MOV64_IMM(BPF_REG_2, 4),
3351 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3352 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3353 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3354 			BPF_MOV64_IMM(BPF_REG_4, 8),
3355 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3356 				     BPF_FUNC_skb_load_bytes),
3357 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3358 			BPF_EXIT_INSN(),
3359 		},
3360 		.result = REJECT,
3361 		.errstr = "invalid stack type R3 off=-513 access_size=8",
3362 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3363 	},
3364 	{
3365 		"raw_stack: skb_load_bytes, invalid access 2",
3366 		.insns = {
3367 			BPF_MOV64_IMM(BPF_REG_2, 4),
3368 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3369 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3370 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3371 			BPF_MOV64_IMM(BPF_REG_4, 8),
3372 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3373 				     BPF_FUNC_skb_load_bytes),
3374 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3375 			BPF_EXIT_INSN(),
3376 		},
3377 		.result = REJECT,
3378 		.errstr = "invalid stack type R3 off=-1 access_size=8",
3379 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3380 	},
3381 	{
3382 		"raw_stack: skb_load_bytes, invalid access 3",
3383 		.insns = {
3384 			BPF_MOV64_IMM(BPF_REG_2, 4),
3385 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3386 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3387 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3388 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3389 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3390 				     BPF_FUNC_skb_load_bytes),
3391 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3392 			BPF_EXIT_INSN(),
3393 		},
3394 		.result = REJECT,
3395 		.errstr = "R4 min value is negative",
3396 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3397 	},
3398 	{
3399 		"raw_stack: skb_load_bytes, invalid access 4",
3400 		.insns = {
3401 			BPF_MOV64_IMM(BPF_REG_2, 4),
3402 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3403 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3404 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3405 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3406 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3407 				     BPF_FUNC_skb_load_bytes),
3408 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3409 			BPF_EXIT_INSN(),
3410 		},
3411 		.result = REJECT,
3412 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3413 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3414 	},
3415 	{
3416 		"raw_stack: skb_load_bytes, invalid access 5",
3417 		.insns = {
3418 			BPF_MOV64_IMM(BPF_REG_2, 4),
3419 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3420 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3421 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3422 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3423 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3424 				     BPF_FUNC_skb_load_bytes),
3425 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3426 			BPF_EXIT_INSN(),
3427 		},
3428 		.result = REJECT,
3429 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3430 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3431 	},
3432 	{
3433 		"raw_stack: skb_load_bytes, invalid access 6",
3434 		.insns = {
3435 			BPF_MOV64_IMM(BPF_REG_2, 4),
3436 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3437 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3438 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3439 			BPF_MOV64_IMM(BPF_REG_4, 0),
3440 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3441 				     BPF_FUNC_skb_load_bytes),
3442 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3443 			BPF_EXIT_INSN(),
3444 		},
3445 		.result = REJECT,
3446 		.errstr = "invalid stack type R3 off=-512 access_size=0",
3447 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3448 	},
3449 	{
3450 		"raw_stack: skb_load_bytes, large access",
3451 		.insns = {
3452 			BPF_MOV64_IMM(BPF_REG_2, 4),
3453 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3454 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3455 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3456 			BPF_MOV64_IMM(BPF_REG_4, 512),
3457 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3458 				     BPF_FUNC_skb_load_bytes),
3459 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3460 			BPF_EXIT_INSN(),
3461 		},
3462 		.result = ACCEPT,
3463 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3464 	},
3465 	{
3466 		"context stores via ST",
3467 		.insns = {
3468 			BPF_MOV64_IMM(BPF_REG_0, 0),
3469 			BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3470 			BPF_EXIT_INSN(),
3471 		},
3472 		.errstr = "BPF_ST stores into R1 ctx is not allowed",
3473 		.result = REJECT,
3474 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3475 	},
3476 	{
3477 		"context stores via XADD",
3478 		.insns = {
3479 			BPF_MOV64_IMM(BPF_REG_0, 0),
3480 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3481 				     BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3482 			BPF_EXIT_INSN(),
3483 		},
3484 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
3485 		.result = REJECT,
3486 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3487 	},
3488 	{
3489 		"direct packet access: test1",
3490 		.insns = {
3491 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3492 				    offsetof(struct __sk_buff, data)),
3493 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3494 				    offsetof(struct __sk_buff, data_end)),
3495 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3496 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3497 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3498 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3499 			BPF_MOV64_IMM(BPF_REG_0, 0),
3500 			BPF_EXIT_INSN(),
3501 		},
3502 		.result = ACCEPT,
3503 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3504 	},
3505 	{
3506 		"direct packet access: test2",
3507 		.insns = {
3508 			BPF_MOV64_IMM(BPF_REG_0, 1),
3509 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3510 				    offsetof(struct __sk_buff, data_end)),
3511 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3512 				    offsetof(struct __sk_buff, data)),
3513 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3514 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3515 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3516 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3517 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3518 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3519 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3520 				    offsetof(struct __sk_buff, data)),
3521 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3522 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3523 				    offsetof(struct __sk_buff, len)),
3524 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3525 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3526 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3527 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3528 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3529 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3530 				    offsetof(struct __sk_buff, data_end)),
3531 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3532 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3533 			BPF_MOV64_IMM(BPF_REG_0, 0),
3534 			BPF_EXIT_INSN(),
3535 		},
3536 		.result = ACCEPT,
3537 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3538 	},
3539 	{
3540 		"direct packet access: test3",
3541 		.insns = {
3542 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3543 				    offsetof(struct __sk_buff, data)),
3544 			BPF_MOV64_IMM(BPF_REG_0, 0),
3545 			BPF_EXIT_INSN(),
3546 		},
3547 		.errstr = "invalid bpf_context access off=76",
3548 		.result = REJECT,
3549 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3550 	},
3551 	{
3552 		"direct packet access: test4 (write)",
3553 		.insns = {
3554 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3555 				    offsetof(struct __sk_buff, data)),
3556 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3557 				    offsetof(struct __sk_buff, data_end)),
3558 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3560 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3561 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3562 			BPF_MOV64_IMM(BPF_REG_0, 0),
3563 			BPF_EXIT_INSN(),
3564 		},
3565 		.result = ACCEPT,
3566 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3567 	},
3568 	{
3569 		"direct packet access: test5 (pkt_end >= reg, good access)",
3570 		.insns = {
3571 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3572 				    offsetof(struct __sk_buff, data)),
3573 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3574 				    offsetof(struct __sk_buff, data_end)),
3575 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3576 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3577 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3578 			BPF_MOV64_IMM(BPF_REG_0, 1),
3579 			BPF_EXIT_INSN(),
3580 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3581 			BPF_MOV64_IMM(BPF_REG_0, 0),
3582 			BPF_EXIT_INSN(),
3583 		},
3584 		.result = ACCEPT,
3585 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3586 	},
3587 	{
3588 		"direct packet access: test6 (pkt_end >= reg, bad access)",
3589 		.insns = {
3590 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3591 				    offsetof(struct __sk_buff, data)),
3592 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3593 				    offsetof(struct __sk_buff, data_end)),
3594 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3595 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3596 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3597 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3598 			BPF_MOV64_IMM(BPF_REG_0, 1),
3599 			BPF_EXIT_INSN(),
3600 			BPF_MOV64_IMM(BPF_REG_0, 0),
3601 			BPF_EXIT_INSN(),
3602 		},
3603 		.errstr = "invalid access to packet",
3604 		.result = REJECT,
3605 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3606 	},
3607 	{
3608 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
3609 		.insns = {
3610 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3611 				    offsetof(struct __sk_buff, data)),
3612 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3613 				    offsetof(struct __sk_buff, data_end)),
3614 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3615 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3616 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3617 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3618 			BPF_MOV64_IMM(BPF_REG_0, 1),
3619 			BPF_EXIT_INSN(),
3620 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3621 			BPF_MOV64_IMM(BPF_REG_0, 0),
3622 			BPF_EXIT_INSN(),
3623 		},
3624 		.errstr = "invalid access to packet",
3625 		.result = REJECT,
3626 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3627 	},
3628 	{
3629 		"direct packet access: test8 (double test, variant 1)",
3630 		.insns = {
3631 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3632 				    offsetof(struct __sk_buff, data)),
3633 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3634 				    offsetof(struct __sk_buff, data_end)),
3635 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3636 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3637 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3638 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3639 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3640 			BPF_MOV64_IMM(BPF_REG_0, 1),
3641 			BPF_EXIT_INSN(),
3642 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3643 			BPF_MOV64_IMM(BPF_REG_0, 0),
3644 			BPF_EXIT_INSN(),
3645 		},
3646 		.result = ACCEPT,
3647 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3648 	},
3649 	{
3650 		"direct packet access: test9 (double test, variant 2)",
3651 		.insns = {
3652 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3653 				    offsetof(struct __sk_buff, data)),
3654 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3655 				    offsetof(struct __sk_buff, data_end)),
3656 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3658 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3659 			BPF_MOV64_IMM(BPF_REG_0, 1),
3660 			BPF_EXIT_INSN(),
3661 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3662 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3663 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3664 			BPF_MOV64_IMM(BPF_REG_0, 0),
3665 			BPF_EXIT_INSN(),
3666 		},
3667 		.result = ACCEPT,
3668 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3669 	},
3670 	{
3671 		"direct packet access: test10 (write invalid)",
3672 		.insns = {
3673 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3674 				    offsetof(struct __sk_buff, data)),
3675 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3676 				    offsetof(struct __sk_buff, data_end)),
3677 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3679 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3680 			BPF_MOV64_IMM(BPF_REG_0, 0),
3681 			BPF_EXIT_INSN(),
3682 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3683 			BPF_MOV64_IMM(BPF_REG_0, 0),
3684 			BPF_EXIT_INSN(),
3685 		},
3686 		.errstr = "invalid access to packet",
3687 		.result = REJECT,
3688 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3689 	},
3690 	{
3691 		"direct packet access: test11 (shift, good access)",
3692 		.insns = {
3693 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3694 				    offsetof(struct __sk_buff, data)),
3695 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3696 				    offsetof(struct __sk_buff, data_end)),
3697 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3699 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3700 			BPF_MOV64_IMM(BPF_REG_3, 144),
3701 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3702 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3703 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3704 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3705 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3706 			BPF_MOV64_IMM(BPF_REG_0, 1),
3707 			BPF_EXIT_INSN(),
3708 			BPF_MOV64_IMM(BPF_REG_0, 0),
3709 			BPF_EXIT_INSN(),
3710 		},
3711 		.result = ACCEPT,
3712 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3713 		.retval = 1,
3714 	},
3715 	{
3716 		"direct packet access: test12 (and, good access)",
3717 		.insns = {
3718 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3719 				    offsetof(struct __sk_buff, data)),
3720 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3721 				    offsetof(struct __sk_buff, data_end)),
3722 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3723 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3724 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3725 			BPF_MOV64_IMM(BPF_REG_3, 144),
3726 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3727 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3728 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3729 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3730 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3731 			BPF_MOV64_IMM(BPF_REG_0, 1),
3732 			BPF_EXIT_INSN(),
3733 			BPF_MOV64_IMM(BPF_REG_0, 0),
3734 			BPF_EXIT_INSN(),
3735 		},
3736 		.result = ACCEPT,
3737 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3738 		.retval = 1,
3739 	},
3740 	{
3741 		"direct packet access: test13 (branches, good access)",
3742 		.insns = {
3743 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3744 				    offsetof(struct __sk_buff, data)),
3745 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3746 				    offsetof(struct __sk_buff, data_end)),
3747 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3748 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3749 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3750 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3751 				    offsetof(struct __sk_buff, mark)),
3752 			BPF_MOV64_IMM(BPF_REG_4, 1),
3753 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3754 			BPF_MOV64_IMM(BPF_REG_3, 14),
3755 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3756 			BPF_MOV64_IMM(BPF_REG_3, 24),
3757 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3758 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3759 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3760 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3761 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3762 			BPF_MOV64_IMM(BPF_REG_0, 1),
3763 			BPF_EXIT_INSN(),
3764 			BPF_MOV64_IMM(BPF_REG_0, 0),
3765 			BPF_EXIT_INSN(),
3766 		},
3767 		.result = ACCEPT,
3768 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3769 		.retval = 1,
3770 	},
3771 	{
3772 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3773 		.insns = {
3774 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3775 				    offsetof(struct __sk_buff, data)),
3776 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3777 				    offsetof(struct __sk_buff, data_end)),
3778 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3779 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3780 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3781 			BPF_MOV64_IMM(BPF_REG_5, 12),
3782 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3783 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3784 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3785 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3786 			BPF_MOV64_IMM(BPF_REG_0, 1),
3787 			BPF_EXIT_INSN(),
3788 			BPF_MOV64_IMM(BPF_REG_0, 0),
3789 			BPF_EXIT_INSN(),
3790 		},
3791 		.result = ACCEPT,
3792 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3793 		.retval = 1,
3794 	},
3795 	{
3796 		"direct packet access: test15 (spill with xadd)",
3797 		.insns = {
3798 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3799 				    offsetof(struct __sk_buff, data)),
3800 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3801 				    offsetof(struct __sk_buff, data_end)),
3802 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3803 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3804 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3805 			BPF_MOV64_IMM(BPF_REG_5, 4096),
3806 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3807 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3808 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3809 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3810 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3811 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3812 			BPF_MOV64_IMM(BPF_REG_0, 0),
3813 			BPF_EXIT_INSN(),
3814 		},
3815 		.errstr = "R2 invalid mem access 'inv'",
3816 		.result = REJECT,
3817 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3818 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3819 	},
3820 	{
3821 		"direct packet access: test16 (arith on data_end)",
3822 		.insns = {
3823 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3824 				    offsetof(struct __sk_buff, data)),
3825 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3826 				    offsetof(struct __sk_buff, data_end)),
3827 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3828 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3829 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3830 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3831 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3832 			BPF_MOV64_IMM(BPF_REG_0, 0),
3833 			BPF_EXIT_INSN(),
3834 		},
3835 		.errstr = "R3 pointer arithmetic on pkt_end",
3836 		.result = REJECT,
3837 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3838 	},
3839 	{
3840 		"direct packet access: test17 (pruning, alignment)",
3841 		.insns = {
3842 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3843 				    offsetof(struct __sk_buff, data)),
3844 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3845 				    offsetof(struct __sk_buff, data_end)),
3846 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3847 				    offsetof(struct __sk_buff, mark)),
3848 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3849 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3850 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3851 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3852 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3853 			BPF_MOV64_IMM(BPF_REG_0, 0),
3854 			BPF_EXIT_INSN(),
3855 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3856 			BPF_JMP_A(-6),
3857 		},
3858 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3859 		.result = REJECT,
3860 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3861 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3862 	},
3863 	{
3864 		"direct packet access: test18 (imm += pkt_ptr, 1)",
3865 		.insns = {
3866 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3867 				    offsetof(struct __sk_buff, data)),
3868 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3869 				    offsetof(struct __sk_buff, data_end)),
3870 			BPF_MOV64_IMM(BPF_REG_0, 8),
3871 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3872 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3873 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3874 			BPF_MOV64_IMM(BPF_REG_0, 0),
3875 			BPF_EXIT_INSN(),
3876 		},
3877 		.result = ACCEPT,
3878 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3879 	},
3880 	{
3881 		"direct packet access: test19 (imm += pkt_ptr, 2)",
3882 		.insns = {
3883 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3884 				    offsetof(struct __sk_buff, data)),
3885 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3886 				    offsetof(struct __sk_buff, data_end)),
3887 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3888 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3889 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3890 			BPF_MOV64_IMM(BPF_REG_4, 4),
3891 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3892 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3893 			BPF_MOV64_IMM(BPF_REG_0, 0),
3894 			BPF_EXIT_INSN(),
3895 		},
3896 		.result = ACCEPT,
3897 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3898 	},
3899 	{
3900 		"direct packet access: test20 (x += pkt_ptr, 1)",
3901 		.insns = {
3902 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3903 				    offsetof(struct __sk_buff, data)),
3904 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3905 				    offsetof(struct __sk_buff, data_end)),
3906 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3907 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3908 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3909 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3910 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3911 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3912 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3913 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3914 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3915 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3916 			BPF_MOV64_IMM(BPF_REG_0, 0),
3917 			BPF_EXIT_INSN(),
3918 		},
3919 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3920 		.result = ACCEPT,
3921 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3922 	},
3923 	{
3924 		"direct packet access: test21 (x += pkt_ptr, 2)",
3925 		.insns = {
3926 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3927 				    offsetof(struct __sk_buff, data)),
3928 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3929 				    offsetof(struct __sk_buff, data_end)),
3930 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3931 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3932 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3933 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3934 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3935 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3936 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3937 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3938 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3939 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3940 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3941 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3942 			BPF_MOV64_IMM(BPF_REG_0, 0),
3943 			BPF_EXIT_INSN(),
3944 		},
3945 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3946 		.result = ACCEPT,
3947 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3948 	},
3949 	{
3950 		"direct packet access: test22 (x += pkt_ptr, 3)",
3951 		.insns = {
3952 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3953 				    offsetof(struct __sk_buff, data)),
3954 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3955 				    offsetof(struct __sk_buff, data_end)),
3956 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3957 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3958 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3959 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3960 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3961 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3962 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3963 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3964 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3965 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3966 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3967 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3968 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3969 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3970 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3971 			BPF_MOV64_IMM(BPF_REG_2, 1),
3972 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3973 			BPF_MOV64_IMM(BPF_REG_0, 0),
3974 			BPF_EXIT_INSN(),
3975 		},
3976 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3977 		.result = ACCEPT,
3978 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3979 	},
3980 	{
3981 		"direct packet access: test23 (x += pkt_ptr, 4)",
3982 		.insns = {
3983 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3984 				    offsetof(struct __sk_buff, data)),
3985 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3986 				    offsetof(struct __sk_buff, data_end)),
3987 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3988 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3989 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3990 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3991 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3992 			BPF_MOV64_IMM(BPF_REG_0, 31),
3993 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3994 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3995 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3996 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3997 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3998 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3999 			BPF_MOV64_IMM(BPF_REG_0, 0),
4000 			BPF_EXIT_INSN(),
4001 		},
4002 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4003 		.result = REJECT,
4004 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
4005 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4006 	},
4007 	{
4008 		"direct packet access: test24 (x += pkt_ptr, 5)",
4009 		.insns = {
4010 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4011 				    offsetof(struct __sk_buff, data)),
4012 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4013 				    offsetof(struct __sk_buff, data_end)),
4014 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4015 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4016 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4017 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
4018 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4019 			BPF_MOV64_IMM(BPF_REG_0, 64),
4020 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4021 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4022 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4023 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
4024 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4025 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4026 			BPF_MOV64_IMM(BPF_REG_0, 0),
4027 			BPF_EXIT_INSN(),
4028 		},
4029 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4030 		.result = ACCEPT,
4031 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4032 	},
4033 	{
4034 		"direct packet access: test25 (marking on <, good access)",
4035 		.insns = {
4036 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4037 				    offsetof(struct __sk_buff, data)),
4038 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4039 				    offsetof(struct __sk_buff, data_end)),
4040 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4041 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4042 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
4043 			BPF_MOV64_IMM(BPF_REG_0, 0),
4044 			BPF_EXIT_INSN(),
4045 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4046 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4047 		},
4048 		.result = ACCEPT,
4049 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4050 	},
4051 	{
4052 		"direct packet access: test26 (marking on <, bad access)",
4053 		.insns = {
4054 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4055 				    offsetof(struct __sk_buff, data)),
4056 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4057 				    offsetof(struct __sk_buff, data_end)),
4058 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4059 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4060 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4061 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4062 			BPF_MOV64_IMM(BPF_REG_0, 0),
4063 			BPF_EXIT_INSN(),
4064 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4065 		},
4066 		.result = REJECT,
4067 		.errstr = "invalid access to packet",
4068 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4069 	},
4070 	{
4071 		"direct packet access: test27 (marking on <=, good access)",
4072 		.insns = {
4073 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4074 				    offsetof(struct __sk_buff, data)),
4075 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4076 				    offsetof(struct __sk_buff, data_end)),
4077 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4078 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4079 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4080 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4081 			BPF_MOV64_IMM(BPF_REG_0, 1),
4082 			BPF_EXIT_INSN(),
4083 		},
4084 		.result = ACCEPT,
4085 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4086 		.retval = 1,
4087 	},
4088 	{
4089 		"direct packet access: test28 (marking on <=, bad access)",
4090 		.insns = {
4091 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4092 				    offsetof(struct __sk_buff, data)),
4093 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4094 				    offsetof(struct __sk_buff, data_end)),
4095 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4097 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4098 			BPF_MOV64_IMM(BPF_REG_0, 1),
4099 			BPF_EXIT_INSN(),
4100 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4101 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4102 		},
4103 		.result = REJECT,
4104 		.errstr = "invalid access to packet",
4105 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4106 	},
4107 	{
4108 		"helper access to packet: test1, valid packet_ptr range",
4109 		.insns = {
4110 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4111 				    offsetof(struct xdp_md, data)),
4112 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4113 				    offsetof(struct xdp_md, data_end)),
4114 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4115 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4116 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4117 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4118 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4119 			BPF_MOV64_IMM(BPF_REG_4, 0),
4120 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4121 				     BPF_FUNC_map_update_elem),
4122 			BPF_MOV64_IMM(BPF_REG_0, 0),
4123 			BPF_EXIT_INSN(),
4124 		},
4125 		.fixup_map_hash_8b = { 5 },
4126 		.result_unpriv = ACCEPT,
4127 		.result = ACCEPT,
4128 		.prog_type = BPF_PROG_TYPE_XDP,
4129 	},
4130 	{
4131 		"helper access to packet: test2, unchecked packet_ptr",
4132 		.insns = {
4133 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4134 				    offsetof(struct xdp_md, data)),
4135 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4136 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4137 				     BPF_FUNC_map_lookup_elem),
4138 			BPF_MOV64_IMM(BPF_REG_0, 0),
4139 			BPF_EXIT_INSN(),
4140 		},
4141 		.fixup_map_hash_8b = { 1 },
4142 		.result = REJECT,
4143 		.errstr = "invalid access to packet",
4144 		.prog_type = BPF_PROG_TYPE_XDP,
4145 	},
4146 	{
4147 		"helper access to packet: test3, variable add",
4148 		.insns = {
4149 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4150 					offsetof(struct xdp_md, data)),
4151 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4152 					offsetof(struct xdp_md, data_end)),
4153 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4154 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4155 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4156 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4157 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4158 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4159 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4160 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4161 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4162 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4163 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4164 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4165 				     BPF_FUNC_map_lookup_elem),
4166 			BPF_MOV64_IMM(BPF_REG_0, 0),
4167 			BPF_EXIT_INSN(),
4168 		},
4169 		.fixup_map_hash_8b = { 11 },
4170 		.result = ACCEPT,
4171 		.prog_type = BPF_PROG_TYPE_XDP,
4172 	},
4173 	{
4174 		"helper access to packet: test4, packet_ptr with bad range",
4175 		.insns = {
4176 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4177 				    offsetof(struct xdp_md, data)),
4178 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4179 				    offsetof(struct xdp_md, data_end)),
4180 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4181 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4182 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4183 			BPF_MOV64_IMM(BPF_REG_0, 0),
4184 			BPF_EXIT_INSN(),
4185 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4186 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4187 				     BPF_FUNC_map_lookup_elem),
4188 			BPF_MOV64_IMM(BPF_REG_0, 0),
4189 			BPF_EXIT_INSN(),
4190 		},
4191 		.fixup_map_hash_8b = { 7 },
4192 		.result = REJECT,
4193 		.errstr = "invalid access to packet",
4194 		.prog_type = BPF_PROG_TYPE_XDP,
4195 	},
4196 	{
4197 		"helper access to packet: test5, packet_ptr with too short range",
4198 		.insns = {
4199 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4200 				    offsetof(struct xdp_md, data)),
4201 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4202 				    offsetof(struct xdp_md, data_end)),
4203 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4204 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4205 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4206 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4207 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4208 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4209 				     BPF_FUNC_map_lookup_elem),
4210 			BPF_MOV64_IMM(BPF_REG_0, 0),
4211 			BPF_EXIT_INSN(),
4212 		},
4213 		.fixup_map_hash_8b = { 6 },
4214 		.result = REJECT,
4215 		.errstr = "invalid access to packet",
4216 		.prog_type = BPF_PROG_TYPE_XDP,
4217 	},
4218 	{
4219 		"helper access to packet: test6, cls valid packet_ptr range",
4220 		.insns = {
4221 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4222 				    offsetof(struct __sk_buff, data)),
4223 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4224 				    offsetof(struct __sk_buff, data_end)),
4225 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4226 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4227 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4228 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4229 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4230 			BPF_MOV64_IMM(BPF_REG_4, 0),
4231 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4232 				     BPF_FUNC_map_update_elem),
4233 			BPF_MOV64_IMM(BPF_REG_0, 0),
4234 			BPF_EXIT_INSN(),
4235 		},
4236 		.fixup_map_hash_8b = { 5 },
4237 		.result = ACCEPT,
4238 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4239 	},
4240 	{
4241 		"helper access to packet: test7, cls unchecked packet_ptr",
4242 		.insns = {
4243 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4244 				    offsetof(struct __sk_buff, data)),
4245 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4246 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4247 				     BPF_FUNC_map_lookup_elem),
4248 			BPF_MOV64_IMM(BPF_REG_0, 0),
4249 			BPF_EXIT_INSN(),
4250 		},
4251 		.fixup_map_hash_8b = { 1 },
4252 		.result = REJECT,
4253 		.errstr = "invalid access to packet",
4254 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4255 	},
4256 	{
4257 		"helper access to packet: test8, cls variable add",
4258 		.insns = {
4259 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4260 					offsetof(struct __sk_buff, data)),
4261 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4262 					offsetof(struct __sk_buff, data_end)),
4263 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4264 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4265 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4266 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4267 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4268 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4269 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4270 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4271 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4272 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4273 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4274 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4275 				     BPF_FUNC_map_lookup_elem),
4276 			BPF_MOV64_IMM(BPF_REG_0, 0),
4277 			BPF_EXIT_INSN(),
4278 		},
4279 		.fixup_map_hash_8b = { 11 },
4280 		.result = ACCEPT,
4281 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4282 	},
4283 	{
4284 		"helper access to packet: test9, cls packet_ptr with bad range",
4285 		.insns = {
4286 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4287 				    offsetof(struct __sk_buff, data)),
4288 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4289 				    offsetof(struct __sk_buff, data_end)),
4290 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4292 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4293 			BPF_MOV64_IMM(BPF_REG_0, 0),
4294 			BPF_EXIT_INSN(),
4295 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4296 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4297 				     BPF_FUNC_map_lookup_elem),
4298 			BPF_MOV64_IMM(BPF_REG_0, 0),
4299 			BPF_EXIT_INSN(),
4300 		},
4301 		.fixup_map_hash_8b = { 7 },
4302 		.result = REJECT,
4303 		.errstr = "invalid access to packet",
4304 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4305 	},
4306 	{
4307 		"helper access to packet: test10, cls packet_ptr with too short range",
4308 		.insns = {
4309 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4310 				    offsetof(struct __sk_buff, data)),
4311 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4312 				    offsetof(struct __sk_buff, data_end)),
4313 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4314 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4315 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4316 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4317 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4318 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4319 				     BPF_FUNC_map_lookup_elem),
4320 			BPF_MOV64_IMM(BPF_REG_0, 0),
4321 			BPF_EXIT_INSN(),
4322 		},
4323 		.fixup_map_hash_8b = { 6 },
4324 		.result = REJECT,
4325 		.errstr = "invalid access to packet",
4326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4327 	},
4328 	{
4329 		"helper access to packet: test11, cls unsuitable helper 1",
4330 		.insns = {
4331 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4332 				    offsetof(struct __sk_buff, data)),
4333 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4334 				    offsetof(struct __sk_buff, data_end)),
4335 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4336 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4337 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4338 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4339 			BPF_MOV64_IMM(BPF_REG_2, 0),
4340 			BPF_MOV64_IMM(BPF_REG_4, 42),
4341 			BPF_MOV64_IMM(BPF_REG_5, 0),
4342 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4343 				     BPF_FUNC_skb_store_bytes),
4344 			BPF_MOV64_IMM(BPF_REG_0, 0),
4345 			BPF_EXIT_INSN(),
4346 		},
4347 		.result = REJECT,
4348 		.errstr = "helper access to the packet",
4349 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4350 	},
4351 	{
4352 		"helper access to packet: test12, cls unsuitable helper 2",
4353 		.insns = {
4354 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4355 				    offsetof(struct __sk_buff, data)),
4356 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4357 				    offsetof(struct __sk_buff, data_end)),
4358 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4360 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4361 			BPF_MOV64_IMM(BPF_REG_2, 0),
4362 			BPF_MOV64_IMM(BPF_REG_4, 4),
4363 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4364 				     BPF_FUNC_skb_load_bytes),
4365 			BPF_MOV64_IMM(BPF_REG_0, 0),
4366 			BPF_EXIT_INSN(),
4367 		},
4368 		.result = REJECT,
4369 		.errstr = "helper access to the packet",
4370 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4371 	},
4372 	{
4373 		"helper access to packet: test13, cls helper ok",
4374 		.insns = {
4375 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4376 				    offsetof(struct __sk_buff, data)),
4377 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4378 				    offsetof(struct __sk_buff, data_end)),
4379 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4380 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4381 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4382 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4383 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4384 			BPF_MOV64_IMM(BPF_REG_2, 4),
4385 			BPF_MOV64_IMM(BPF_REG_3, 0),
4386 			BPF_MOV64_IMM(BPF_REG_4, 0),
4387 			BPF_MOV64_IMM(BPF_REG_5, 0),
4388 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4389 				     BPF_FUNC_csum_diff),
4390 			BPF_MOV64_IMM(BPF_REG_0, 0),
4391 			BPF_EXIT_INSN(),
4392 		},
4393 		.result = ACCEPT,
4394 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4395 	},
4396 	{
4397 		"helper access to packet: test14, cls helper ok sub",
4398 		.insns = {
4399 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4400 				    offsetof(struct __sk_buff, data)),
4401 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4402 				    offsetof(struct __sk_buff, data_end)),
4403 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4404 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4405 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4406 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4407 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4408 			BPF_MOV64_IMM(BPF_REG_2, 4),
4409 			BPF_MOV64_IMM(BPF_REG_3, 0),
4410 			BPF_MOV64_IMM(BPF_REG_4, 0),
4411 			BPF_MOV64_IMM(BPF_REG_5, 0),
4412 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4413 				     BPF_FUNC_csum_diff),
4414 			BPF_MOV64_IMM(BPF_REG_0, 0),
4415 			BPF_EXIT_INSN(),
4416 		},
4417 		.result = ACCEPT,
4418 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4419 	},
4420 	{
4421 		"helper access to packet: test15, cls helper fail sub",
4422 		.insns = {
4423 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4424 				    offsetof(struct __sk_buff, data)),
4425 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4426 				    offsetof(struct __sk_buff, data_end)),
4427 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4428 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4429 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4430 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4431 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4432 			BPF_MOV64_IMM(BPF_REG_2, 4),
4433 			BPF_MOV64_IMM(BPF_REG_3, 0),
4434 			BPF_MOV64_IMM(BPF_REG_4, 0),
4435 			BPF_MOV64_IMM(BPF_REG_5, 0),
4436 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4437 				     BPF_FUNC_csum_diff),
4438 			BPF_MOV64_IMM(BPF_REG_0, 0),
4439 			BPF_EXIT_INSN(),
4440 		},
4441 		.result = REJECT,
4442 		.errstr = "invalid access to packet",
4443 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4444 	},
4445 	{
4446 		"helper access to packet: test16, cls helper fail range 1",
4447 		.insns = {
4448 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4449 				    offsetof(struct __sk_buff, data)),
4450 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4451 				    offsetof(struct __sk_buff, data_end)),
4452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4453 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4454 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4455 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4456 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4457 			BPF_MOV64_IMM(BPF_REG_2, 8),
4458 			BPF_MOV64_IMM(BPF_REG_3, 0),
4459 			BPF_MOV64_IMM(BPF_REG_4, 0),
4460 			BPF_MOV64_IMM(BPF_REG_5, 0),
4461 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4462 				     BPF_FUNC_csum_diff),
4463 			BPF_MOV64_IMM(BPF_REG_0, 0),
4464 			BPF_EXIT_INSN(),
4465 		},
4466 		.result = REJECT,
4467 		.errstr = "invalid access to packet",
4468 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4469 	},
4470 	{
4471 		"helper access to packet: test17, cls helper fail range 2",
4472 		.insns = {
4473 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4474 				    offsetof(struct __sk_buff, data)),
4475 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4476 				    offsetof(struct __sk_buff, data_end)),
4477 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4478 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4480 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4481 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4482 			BPF_MOV64_IMM(BPF_REG_2, -9),
4483 			BPF_MOV64_IMM(BPF_REG_3, 0),
4484 			BPF_MOV64_IMM(BPF_REG_4, 0),
4485 			BPF_MOV64_IMM(BPF_REG_5, 0),
4486 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4487 				     BPF_FUNC_csum_diff),
4488 			BPF_MOV64_IMM(BPF_REG_0, 0),
4489 			BPF_EXIT_INSN(),
4490 		},
4491 		.result = REJECT,
4492 		.errstr = "R2 min value is negative",
4493 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4494 	},
4495 	{
4496 		"helper access to packet: test18, cls helper fail range 3",
4497 		.insns = {
4498 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4499 				    offsetof(struct __sk_buff, data)),
4500 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4501 				    offsetof(struct __sk_buff, data_end)),
4502 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4503 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4504 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4505 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4506 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4507 			BPF_MOV64_IMM(BPF_REG_2, ~0),
4508 			BPF_MOV64_IMM(BPF_REG_3, 0),
4509 			BPF_MOV64_IMM(BPF_REG_4, 0),
4510 			BPF_MOV64_IMM(BPF_REG_5, 0),
4511 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4512 				     BPF_FUNC_csum_diff),
4513 			BPF_MOV64_IMM(BPF_REG_0, 0),
4514 			BPF_EXIT_INSN(),
4515 		},
4516 		.result = REJECT,
4517 		.errstr = "R2 min value is negative",
4518 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4519 	},
4520 	{
4521 		"helper access to packet: test19, cls helper range zero",
4522 		.insns = {
4523 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4524 				    offsetof(struct __sk_buff, data)),
4525 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4526 				    offsetof(struct __sk_buff, data_end)),
4527 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4528 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4529 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4530 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4531 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4532 			BPF_MOV64_IMM(BPF_REG_2, 0),
4533 			BPF_MOV64_IMM(BPF_REG_3, 0),
4534 			BPF_MOV64_IMM(BPF_REG_4, 0),
4535 			BPF_MOV64_IMM(BPF_REG_5, 0),
4536 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4537 				     BPF_FUNC_csum_diff),
4538 			BPF_MOV64_IMM(BPF_REG_0, 0),
4539 			BPF_EXIT_INSN(),
4540 		},
4541 		.result = ACCEPT,
4542 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4543 	},
4544 	{
4545 		"helper access to packet: test20, pkt end as input",
4546 		.insns = {
4547 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4548 				    offsetof(struct __sk_buff, data)),
4549 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4550 				    offsetof(struct __sk_buff, data_end)),
4551 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4552 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4553 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4554 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4555 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4556 			BPF_MOV64_IMM(BPF_REG_2, 4),
4557 			BPF_MOV64_IMM(BPF_REG_3, 0),
4558 			BPF_MOV64_IMM(BPF_REG_4, 0),
4559 			BPF_MOV64_IMM(BPF_REG_5, 0),
4560 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4561 				     BPF_FUNC_csum_diff),
4562 			BPF_MOV64_IMM(BPF_REG_0, 0),
4563 			BPF_EXIT_INSN(),
4564 		},
4565 		.result = REJECT,
4566 		.errstr = "R1 type=pkt_end expected=fp",
4567 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4568 	},
4569 	{
4570 		"helper access to packet: test21, wrong reg",
4571 		.insns = {
4572 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4573 				    offsetof(struct __sk_buff, data)),
4574 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4575 				    offsetof(struct __sk_buff, data_end)),
4576 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4577 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4578 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4579 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4580 			BPF_MOV64_IMM(BPF_REG_2, 4),
4581 			BPF_MOV64_IMM(BPF_REG_3, 0),
4582 			BPF_MOV64_IMM(BPF_REG_4, 0),
4583 			BPF_MOV64_IMM(BPF_REG_5, 0),
4584 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4585 				     BPF_FUNC_csum_diff),
4586 			BPF_MOV64_IMM(BPF_REG_0, 0),
4587 			BPF_EXIT_INSN(),
4588 		},
4589 		.result = REJECT,
4590 		.errstr = "invalid access to packet",
4591 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4592 	},
4593 	{
4594 		"prevent map lookup in sockmap",
4595 		.insns = {
4596 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4597 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4598 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4599 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4600 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4601 				     BPF_FUNC_map_lookup_elem),
4602 			BPF_EXIT_INSN(),
4603 		},
4604 		.fixup_map_sockmap = { 3 },
4605 		.result = REJECT,
4606 		.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4607 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4608 	},
4609 	{
4610 		"prevent map lookup in sockhash",
4611 		.insns = {
4612 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4613 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4614 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4615 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4616 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4617 				     BPF_FUNC_map_lookup_elem),
4618 			BPF_EXIT_INSN(),
4619 		},
4620 		.fixup_map_sockhash = { 3 },
4621 		.result = REJECT,
4622 		.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4623 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4624 	},
4625 	{
4626 		"prevent map lookup in xskmap",
4627 		.insns = {
4628 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4629 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4630 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4631 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4632 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4633 				     BPF_FUNC_map_lookup_elem),
4634 			BPF_EXIT_INSN(),
4635 		},
4636 		.fixup_map_xskmap = { 3 },
4637 		.result = REJECT,
4638 		.errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4639 		.prog_type = BPF_PROG_TYPE_XDP,
4640 	},
4641 	{
4642 		"prevent map lookup in stack trace",
4643 		.insns = {
4644 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4645 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4646 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4647 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4648 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4649 				     BPF_FUNC_map_lookup_elem),
4650 			BPF_EXIT_INSN(),
4651 		},
4652 		.fixup_map_stacktrace = { 3 },
4653 		.result = REJECT,
4654 		.errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4655 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
4656 	},
4657 	{
4658 		"prevent map lookup in prog array",
4659 		.insns = {
4660 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4661 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4662 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4663 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4664 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4665 				     BPF_FUNC_map_lookup_elem),
4666 			BPF_EXIT_INSN(),
4667 		},
4668 		.fixup_prog2 = { 3 },
4669 		.result = REJECT,
4670 		.errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4671 	},
4672 	{
4673 		"valid map access into an array with a constant",
4674 		.insns = {
4675 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4676 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4677 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4678 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4679 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4680 				     BPF_FUNC_map_lookup_elem),
4681 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4682 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4683 				   offsetof(struct test_val, foo)),
4684 			BPF_EXIT_INSN(),
4685 		},
4686 		.fixup_map_hash_48b = { 3 },
4687 		.errstr_unpriv = "R0 leaks addr",
4688 		.result_unpriv = REJECT,
4689 		.result = ACCEPT,
4690 	},
4691 	{
4692 		"valid map access into an array with a register",
4693 		.insns = {
4694 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4695 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4696 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4697 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4698 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4699 				     BPF_FUNC_map_lookup_elem),
4700 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4701 			BPF_MOV64_IMM(BPF_REG_1, 4),
4702 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4703 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4704 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4705 				   offsetof(struct test_val, foo)),
4706 			BPF_EXIT_INSN(),
4707 		},
4708 		.fixup_map_hash_48b = { 3 },
4709 		.errstr_unpriv = "R0 leaks addr",
4710 		.result_unpriv = REJECT,
4711 		.result = ACCEPT,
4712 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4713 	},
4714 	{
4715 		"valid map access into an array with a variable",
4716 		.insns = {
4717 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4718 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4720 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4721 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4722 				     BPF_FUNC_map_lookup_elem),
4723 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4724 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4725 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4726 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4727 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4728 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4729 				   offsetof(struct test_val, foo)),
4730 			BPF_EXIT_INSN(),
4731 		},
4732 		.fixup_map_hash_48b = { 3 },
4733 		.errstr_unpriv = "R0 leaks addr",
4734 		.result_unpriv = REJECT,
4735 		.result = ACCEPT,
4736 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4737 	},
4738 	{
4739 		"valid map access into an array with a signed variable",
4740 		.insns = {
4741 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4742 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4743 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4744 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4745 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4746 				     BPF_FUNC_map_lookup_elem),
4747 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4748 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4749 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4750 			BPF_MOV32_IMM(BPF_REG_1, 0),
4751 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4752 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4753 			BPF_MOV32_IMM(BPF_REG_1, 0),
4754 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4755 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4756 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4757 				   offsetof(struct test_val, foo)),
4758 			BPF_EXIT_INSN(),
4759 		},
4760 		.fixup_map_hash_48b = { 3 },
4761 		.errstr_unpriv = "R0 leaks addr",
4762 		.result_unpriv = REJECT,
4763 		.result = ACCEPT,
4764 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4765 	},
4766 	{
4767 		"invalid map access into an array with a constant",
4768 		.insns = {
4769 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4770 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4771 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4772 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4773 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4774 				     BPF_FUNC_map_lookup_elem),
4775 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4776 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4777 				   offsetof(struct test_val, foo)),
4778 			BPF_EXIT_INSN(),
4779 		},
4780 		.fixup_map_hash_48b = { 3 },
4781 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
4782 		.result = REJECT,
4783 	},
4784 	{
4785 		"invalid map access into an array with a register",
4786 		.insns = {
4787 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4788 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4789 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4790 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4791 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4792 				     BPF_FUNC_map_lookup_elem),
4793 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4794 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4795 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4796 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4797 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4798 				   offsetof(struct test_val, foo)),
4799 			BPF_EXIT_INSN(),
4800 		},
4801 		.fixup_map_hash_48b = { 3 },
4802 		.errstr = "R0 min value is outside of the array range",
4803 		.result = REJECT,
4804 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4805 	},
4806 	{
4807 		"invalid map access into an array with a variable",
4808 		.insns = {
4809 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4810 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4811 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4812 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4813 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4814 				     BPF_FUNC_map_lookup_elem),
4815 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4816 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4817 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4818 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4819 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4820 				   offsetof(struct test_val, foo)),
4821 			BPF_EXIT_INSN(),
4822 		},
4823 		.fixup_map_hash_48b = { 3 },
4824 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4825 		.result = REJECT,
4826 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4827 	},
4828 	{
4829 		"invalid map access into an array with no floor check",
4830 		.insns = {
4831 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4832 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4833 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4834 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4835 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4836 				     BPF_FUNC_map_lookup_elem),
4837 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4838 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4839 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4840 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4841 			BPF_MOV32_IMM(BPF_REG_1, 0),
4842 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4843 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4844 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4845 				   offsetof(struct test_val, foo)),
4846 			BPF_EXIT_INSN(),
4847 		},
4848 		.fixup_map_hash_48b = { 3 },
4849 		.errstr_unpriv = "R0 leaks addr",
4850 		.errstr = "R0 unbounded memory access",
4851 		.result_unpriv = REJECT,
4852 		.result = REJECT,
4853 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4854 	},
4855 	{
4856 		"invalid map access into an array with a invalid max check",
4857 		.insns = {
4858 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4859 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4860 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4861 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4862 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4863 				     BPF_FUNC_map_lookup_elem),
4864 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4865 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4866 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4867 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4868 			BPF_MOV32_IMM(BPF_REG_1, 0),
4869 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4870 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4871 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4872 				   offsetof(struct test_val, foo)),
4873 			BPF_EXIT_INSN(),
4874 		},
4875 		.fixup_map_hash_48b = { 3 },
4876 		.errstr_unpriv = "R0 leaks addr",
4877 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
4878 		.result_unpriv = REJECT,
4879 		.result = REJECT,
4880 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4881 	},
4882 	{
4883 		"invalid map access into an array with a invalid max check",
4884 		.insns = {
4885 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4886 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4887 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4888 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4889 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4890 				     BPF_FUNC_map_lookup_elem),
4891 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4892 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4893 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4894 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4895 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4896 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4897 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4898 				     BPF_FUNC_map_lookup_elem),
4899 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4900 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4901 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4902 				    offsetof(struct test_val, foo)),
4903 			BPF_EXIT_INSN(),
4904 		},
4905 		.fixup_map_hash_48b = { 3, 11 },
4906 		.errstr = "R0 pointer += pointer",
4907 		.result = REJECT,
4908 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4909 	},
4910 	{
4911 		"direct packet read test#1 for CGROUP_SKB",
4912 		.insns = {
4913 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4914 				    offsetof(struct __sk_buff, data)),
4915 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4916 				    offsetof(struct __sk_buff, data_end)),
4917 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4918 				    offsetof(struct __sk_buff, len)),
4919 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4920 				    offsetof(struct __sk_buff, pkt_type)),
4921 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4922 				    offsetof(struct __sk_buff, mark)),
4923 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4924 				    offsetof(struct __sk_buff, mark)),
4925 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4926 				    offsetof(struct __sk_buff, queue_mapping)),
4927 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4928 				    offsetof(struct __sk_buff, protocol)),
4929 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4930 				    offsetof(struct __sk_buff, vlan_present)),
4931 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4933 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4934 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4935 			BPF_MOV64_IMM(BPF_REG_0, 0),
4936 			BPF_EXIT_INSN(),
4937 		},
4938 		.result = ACCEPT,
4939 		.result_unpriv = REJECT,
4940 		.errstr_unpriv = "invalid bpf_context access off=76 size=4",
4941 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4942 	},
4943 	{
4944 		"direct packet read test#2 for CGROUP_SKB",
4945 		.insns = {
4946 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4947 				    offsetof(struct __sk_buff, vlan_tci)),
4948 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4949 				    offsetof(struct __sk_buff, vlan_proto)),
4950 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4951 				    offsetof(struct __sk_buff, priority)),
4952 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4953 				    offsetof(struct __sk_buff, priority)),
4954 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4955 				    offsetof(struct __sk_buff,
4956 					     ingress_ifindex)),
4957 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4958 				    offsetof(struct __sk_buff, tc_index)),
4959 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4960 				    offsetof(struct __sk_buff, hash)),
4961 			BPF_MOV64_IMM(BPF_REG_0, 0),
4962 			BPF_EXIT_INSN(),
4963 		},
4964 		.result = ACCEPT,
4965 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4966 	},
4967 	{
4968 		"direct packet read test#3 for CGROUP_SKB",
4969 		.insns = {
4970 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4971 				    offsetof(struct __sk_buff, cb[0])),
4972 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4973 				    offsetof(struct __sk_buff, cb[1])),
4974 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4975 				    offsetof(struct __sk_buff, cb[2])),
4976 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4977 				    offsetof(struct __sk_buff, cb[3])),
4978 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4979 				    offsetof(struct __sk_buff, cb[4])),
4980 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4981 				    offsetof(struct __sk_buff, napi_id)),
4982 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
4983 				    offsetof(struct __sk_buff, cb[0])),
4984 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
4985 				    offsetof(struct __sk_buff, cb[1])),
4986 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4987 				    offsetof(struct __sk_buff, cb[2])),
4988 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
4989 				    offsetof(struct __sk_buff, cb[3])),
4990 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
4991 				    offsetof(struct __sk_buff, cb[4])),
4992 			BPF_MOV64_IMM(BPF_REG_0, 0),
4993 			BPF_EXIT_INSN(),
4994 		},
4995 		.result = ACCEPT,
4996 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4997 	},
4998 	{
4999 		"direct packet read test#4 for CGROUP_SKB",
5000 		.insns = {
5001 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5002 				    offsetof(struct __sk_buff, family)),
5003 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5004 				    offsetof(struct __sk_buff, remote_ip4)),
5005 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5006 				    offsetof(struct __sk_buff, local_ip4)),
5007 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5008 				    offsetof(struct __sk_buff, remote_ip6[0])),
5009 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5010 				    offsetof(struct __sk_buff, remote_ip6[1])),
5011 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5012 				    offsetof(struct __sk_buff, remote_ip6[2])),
5013 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5014 				    offsetof(struct __sk_buff, remote_ip6[3])),
5015 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5016 				    offsetof(struct __sk_buff, local_ip6[0])),
5017 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5018 				    offsetof(struct __sk_buff, local_ip6[1])),
5019 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5020 				    offsetof(struct __sk_buff, local_ip6[2])),
5021 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5022 				    offsetof(struct __sk_buff, local_ip6[3])),
5023 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5024 				    offsetof(struct __sk_buff, remote_port)),
5025 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5026 				    offsetof(struct __sk_buff, local_port)),
5027 			BPF_MOV64_IMM(BPF_REG_0, 0),
5028 			BPF_EXIT_INSN(),
5029 		},
5030 		.result = ACCEPT,
5031 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5032 	},
5033 	{
5034 		"invalid access of tc_classid for CGROUP_SKB",
5035 		.insns = {
5036 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5037 				    offsetof(struct __sk_buff, tc_classid)),
5038 			BPF_MOV64_IMM(BPF_REG_0, 0),
5039 			BPF_EXIT_INSN(),
5040 		},
5041 		.result = REJECT,
5042 		.errstr = "invalid bpf_context access",
5043 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5044 	},
5045 	{
5046 		"invalid access of data_meta for CGROUP_SKB",
5047 		.insns = {
5048 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5049 				    offsetof(struct __sk_buff, data_meta)),
5050 			BPF_MOV64_IMM(BPF_REG_0, 0),
5051 			BPF_EXIT_INSN(),
5052 		},
5053 		.result = REJECT,
5054 		.errstr = "invalid bpf_context access",
5055 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5056 	},
5057 	{
5058 		"invalid access of flow_keys for CGROUP_SKB",
5059 		.insns = {
5060 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5061 				    offsetof(struct __sk_buff, flow_keys)),
5062 			BPF_MOV64_IMM(BPF_REG_0, 0),
5063 			BPF_EXIT_INSN(),
5064 		},
5065 		.result = REJECT,
5066 		.errstr = "invalid bpf_context access",
5067 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5068 	},
5069 	{
5070 		"invalid write access to napi_id for CGROUP_SKB",
5071 		.insns = {
5072 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5073 				    offsetof(struct __sk_buff, napi_id)),
5074 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5075 				    offsetof(struct __sk_buff, napi_id)),
5076 			BPF_MOV64_IMM(BPF_REG_0, 0),
5077 			BPF_EXIT_INSN(),
5078 		},
5079 		.result = REJECT,
5080 		.errstr = "invalid bpf_context access",
5081 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5082 	},
5083 	{
5084 		"valid cgroup storage access",
5085 		.insns = {
5086 			BPF_MOV64_IMM(BPF_REG_2, 0),
5087 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5088 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5089 				     BPF_FUNC_get_local_storage),
5090 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5091 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5092 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5093 			BPF_EXIT_INSN(),
5094 		},
5095 		.fixup_cgroup_storage = { 1 },
5096 		.result = ACCEPT,
5097 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5098 	},
5099 	{
5100 		"invalid cgroup storage access 1",
5101 		.insns = {
5102 			BPF_MOV64_IMM(BPF_REG_2, 0),
5103 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5104 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5105 				     BPF_FUNC_get_local_storage),
5106 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5107 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5108 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5109 			BPF_EXIT_INSN(),
5110 		},
5111 		.fixup_map_hash_8b = { 1 },
5112 		.result = REJECT,
5113 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5114 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5115 	},
5116 	{
5117 		"invalid cgroup storage access 2",
5118 		.insns = {
5119 			BPF_MOV64_IMM(BPF_REG_2, 0),
5120 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5121 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5122 				     BPF_FUNC_get_local_storage),
5123 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5124 			BPF_EXIT_INSN(),
5125 		},
5126 		.result = REJECT,
5127 		.errstr = "fd 1 is not pointing to valid bpf_map",
5128 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5129 	},
5130 	{
5131 		"invalid cgroup storage access 3",
5132 		.insns = {
5133 			BPF_MOV64_IMM(BPF_REG_2, 0),
5134 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5135 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5136 				     BPF_FUNC_get_local_storage),
5137 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5138 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5139 			BPF_MOV64_IMM(BPF_REG_0, 0),
5140 			BPF_EXIT_INSN(),
5141 		},
5142 		.fixup_cgroup_storage = { 1 },
5143 		.result = REJECT,
5144 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5145 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5146 	},
5147 	{
5148 		"invalid cgroup storage access 4",
5149 		.insns = {
5150 			BPF_MOV64_IMM(BPF_REG_2, 0),
5151 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5152 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5153 				     BPF_FUNC_get_local_storage),
5154 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5155 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5156 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5157 			BPF_EXIT_INSN(),
5158 		},
5159 		.fixup_cgroup_storage = { 1 },
5160 		.result = REJECT,
5161 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5162 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5163 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5164 	},
5165 	{
5166 		"invalid cgroup storage access 5",
5167 		.insns = {
5168 			BPF_MOV64_IMM(BPF_REG_2, 7),
5169 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5170 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5171 				     BPF_FUNC_get_local_storage),
5172 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5173 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5174 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5175 			BPF_EXIT_INSN(),
5176 		},
5177 		.fixup_cgroup_storage = { 1 },
5178 		.result = REJECT,
5179 		.errstr = "get_local_storage() doesn't support non-zero flags",
5180 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5181 	},
5182 	{
5183 		"invalid cgroup storage access 6",
5184 		.insns = {
5185 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5186 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5187 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5188 				     BPF_FUNC_get_local_storage),
5189 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5190 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5191 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5192 			BPF_EXIT_INSN(),
5193 		},
5194 		.fixup_cgroup_storage = { 1 },
5195 		.result = REJECT,
5196 		.errstr = "get_local_storage() doesn't support non-zero flags",
5197 		.errstr_unpriv = "R2 leaks addr into helper function",
5198 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5199 	},
5200 	{
5201 		"valid per-cpu cgroup storage access",
5202 		.insns = {
5203 			BPF_MOV64_IMM(BPF_REG_2, 0),
5204 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5205 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5206 				     BPF_FUNC_get_local_storage),
5207 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5208 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5209 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5210 			BPF_EXIT_INSN(),
5211 		},
5212 		.fixup_percpu_cgroup_storage = { 1 },
5213 		.result = ACCEPT,
5214 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5215 	},
5216 	{
5217 		"invalid per-cpu cgroup storage access 1",
5218 		.insns = {
5219 			BPF_MOV64_IMM(BPF_REG_2, 0),
5220 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5221 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5222 				     BPF_FUNC_get_local_storage),
5223 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5224 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5225 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5226 			BPF_EXIT_INSN(),
5227 		},
5228 		.fixup_map_hash_8b = { 1 },
5229 		.result = REJECT,
5230 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5231 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5232 	},
5233 	{
5234 		"invalid per-cpu cgroup storage access 2",
5235 		.insns = {
5236 			BPF_MOV64_IMM(BPF_REG_2, 0),
5237 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5238 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5239 				     BPF_FUNC_get_local_storage),
5240 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5241 			BPF_EXIT_INSN(),
5242 		},
5243 		.result = REJECT,
5244 		.errstr = "fd 1 is not pointing to valid bpf_map",
5245 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5246 	},
5247 	{
5248 		"invalid per-cpu cgroup storage access 3",
5249 		.insns = {
5250 			BPF_MOV64_IMM(BPF_REG_2, 0),
5251 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5252 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5253 				     BPF_FUNC_get_local_storage),
5254 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5255 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5256 			BPF_MOV64_IMM(BPF_REG_0, 0),
5257 			BPF_EXIT_INSN(),
5258 		},
5259 		.fixup_percpu_cgroup_storage = { 1 },
5260 		.result = REJECT,
5261 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5262 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5263 	},
5264 	{
5265 		"invalid per-cpu cgroup storage access 4",
5266 		.insns = {
5267 			BPF_MOV64_IMM(BPF_REG_2, 0),
5268 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5269 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5270 				     BPF_FUNC_get_local_storage),
5271 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5272 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5273 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5274 			BPF_EXIT_INSN(),
5275 		},
5276 		.fixup_cgroup_storage = { 1 },
5277 		.result = REJECT,
5278 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5279 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5280 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5281 	},
5282 	{
5283 		"invalid per-cpu cgroup storage access 5",
5284 		.insns = {
5285 			BPF_MOV64_IMM(BPF_REG_2, 7),
5286 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5287 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5288 				     BPF_FUNC_get_local_storage),
5289 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5290 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5291 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5292 			BPF_EXIT_INSN(),
5293 		},
5294 		.fixup_percpu_cgroup_storage = { 1 },
5295 		.result = REJECT,
5296 		.errstr = "get_local_storage() doesn't support non-zero flags",
5297 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5298 	},
5299 	{
5300 		"invalid per-cpu cgroup storage access 6",
5301 		.insns = {
5302 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5303 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5304 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5305 				     BPF_FUNC_get_local_storage),
5306 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5307 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5308 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5309 			BPF_EXIT_INSN(),
5310 		},
5311 		.fixup_percpu_cgroup_storage = { 1 },
5312 		.result = REJECT,
5313 		.errstr = "get_local_storage() doesn't support non-zero flags",
5314 		.errstr_unpriv = "R2 leaks addr into helper function",
5315 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5316 	},
5317 	{
5318 		"write tstamp from CGROUP_SKB",
5319 		.insns = {
5320 			BPF_MOV64_IMM(BPF_REG_0, 0),
5321 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5322 				    offsetof(struct __sk_buff, tstamp)),
5323 			BPF_MOV64_IMM(BPF_REG_0, 0),
5324 			BPF_EXIT_INSN(),
5325 		},
5326 		.result = ACCEPT,
5327 		.result_unpriv = REJECT,
5328 		.errstr_unpriv = "invalid bpf_context access off=152 size=8",
5329 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5330 	},
5331 	{
5332 		"read tstamp from CGROUP_SKB",
5333 		.insns = {
5334 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5335 				    offsetof(struct __sk_buff, tstamp)),
5336 			BPF_MOV64_IMM(BPF_REG_0, 0),
5337 			BPF_EXIT_INSN(),
5338 		},
5339 		.result = ACCEPT,
5340 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5341 	},
5342 	{
5343 		"multiple registers share map_lookup_elem result",
5344 		.insns = {
5345 			BPF_MOV64_IMM(BPF_REG_1, 10),
5346 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5347 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5348 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5349 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5351 				     BPF_FUNC_map_lookup_elem),
5352 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5353 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5354 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5355 			BPF_EXIT_INSN(),
5356 		},
5357 		.fixup_map_hash_8b = { 4 },
5358 		.result = ACCEPT,
5359 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5360 	},
5361 	{
5362 		"alu ops on ptr_to_map_value_or_null, 1",
5363 		.insns = {
5364 			BPF_MOV64_IMM(BPF_REG_1, 10),
5365 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5366 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5367 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5368 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5369 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5370 				     BPF_FUNC_map_lookup_elem),
5371 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5372 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5373 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5374 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5375 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5376 			BPF_EXIT_INSN(),
5377 		},
5378 		.fixup_map_hash_8b = { 4 },
5379 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5380 		.result = REJECT,
5381 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5382 	},
5383 	{
5384 		"alu ops on ptr_to_map_value_or_null, 2",
5385 		.insns = {
5386 			BPF_MOV64_IMM(BPF_REG_1, 10),
5387 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5388 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5389 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5390 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5391 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5392 				     BPF_FUNC_map_lookup_elem),
5393 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5394 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5395 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5396 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5397 			BPF_EXIT_INSN(),
5398 		},
5399 		.fixup_map_hash_8b = { 4 },
5400 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5401 		.result = REJECT,
5402 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5403 	},
5404 	{
5405 		"alu ops on ptr_to_map_value_or_null, 3",
5406 		.insns = {
5407 			BPF_MOV64_IMM(BPF_REG_1, 10),
5408 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5409 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5410 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5411 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5412 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5413 				     BPF_FUNC_map_lookup_elem),
5414 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5415 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5416 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5417 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5418 			BPF_EXIT_INSN(),
5419 		},
5420 		.fixup_map_hash_8b = { 4 },
5421 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5422 		.result = REJECT,
5423 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5424 	},
5425 	{
5426 		"invalid memory access with multiple map_lookup_elem calls",
5427 		.insns = {
5428 			BPF_MOV64_IMM(BPF_REG_1, 10),
5429 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5430 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5431 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5432 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5433 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5434 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5435 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5436 				     BPF_FUNC_map_lookup_elem),
5437 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5438 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5439 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5440 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5441 				     BPF_FUNC_map_lookup_elem),
5442 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5443 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5444 			BPF_EXIT_INSN(),
5445 		},
5446 		.fixup_map_hash_8b = { 4 },
5447 		.result = REJECT,
5448 		.errstr = "R4 !read_ok",
5449 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5450 	},
5451 	{
5452 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
5453 		.insns = {
5454 			BPF_MOV64_IMM(BPF_REG_1, 10),
5455 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5456 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5457 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5458 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5459 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5460 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5461 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5462 				     BPF_FUNC_map_lookup_elem),
5463 			BPF_MOV64_IMM(BPF_REG_2, 10),
5464 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5465 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5466 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5467 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5468 				     BPF_FUNC_map_lookup_elem),
5469 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5470 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5471 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5472 			BPF_EXIT_INSN(),
5473 		},
5474 		.fixup_map_hash_8b = { 4 },
5475 		.result = ACCEPT,
5476 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5477 	},
5478 	{
5479 		"invalid map access from else condition",
5480 		.insns = {
5481 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5482 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5483 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5484 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5485 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5486 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5487 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5488 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5489 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5490 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5491 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5492 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5493 			BPF_EXIT_INSN(),
5494 		},
5495 		.fixup_map_hash_48b = { 3 },
5496 		.errstr = "R0 unbounded memory access",
5497 		.result = REJECT,
5498 		.errstr_unpriv = "R0 leaks addr",
5499 		.result_unpriv = REJECT,
5500 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5501 	},
5502 	{
5503 		"constant register |= constant should keep constant type",
5504 		.insns = {
5505 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5506 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5507 			BPF_MOV64_IMM(BPF_REG_2, 34),
5508 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5509 			BPF_MOV64_IMM(BPF_REG_3, 0),
5510 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5511 			BPF_EXIT_INSN(),
5512 		},
5513 		.result = ACCEPT,
5514 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5515 	},
5516 	{
5517 		"constant register |= constant should not bypass stack boundary checks",
5518 		.insns = {
5519 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5520 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5521 			BPF_MOV64_IMM(BPF_REG_2, 34),
5522 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5523 			BPF_MOV64_IMM(BPF_REG_3, 0),
5524 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5525 			BPF_EXIT_INSN(),
5526 		},
5527 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5528 		.result = REJECT,
5529 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5530 	},
5531 	{
5532 		"constant register |= constant register should keep constant type",
5533 		.insns = {
5534 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5535 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5536 			BPF_MOV64_IMM(BPF_REG_2, 34),
5537 			BPF_MOV64_IMM(BPF_REG_4, 13),
5538 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5539 			BPF_MOV64_IMM(BPF_REG_3, 0),
5540 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5541 			BPF_EXIT_INSN(),
5542 		},
5543 		.result = ACCEPT,
5544 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5545 	},
5546 	{
5547 		"constant register |= constant register should not bypass stack boundary checks",
5548 		.insns = {
5549 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5550 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5551 			BPF_MOV64_IMM(BPF_REG_2, 34),
5552 			BPF_MOV64_IMM(BPF_REG_4, 24),
5553 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5554 			BPF_MOV64_IMM(BPF_REG_3, 0),
5555 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5556 			BPF_EXIT_INSN(),
5557 		},
5558 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5559 		.result = REJECT,
5560 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5561 	},
5562 	{
5563 		"invalid direct packet write for LWT_IN",
5564 		.insns = {
5565 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5566 				    offsetof(struct __sk_buff, data)),
5567 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5568 				    offsetof(struct __sk_buff, data_end)),
5569 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5570 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5571 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5572 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5573 			BPF_MOV64_IMM(BPF_REG_0, 0),
5574 			BPF_EXIT_INSN(),
5575 		},
5576 		.errstr = "cannot write into packet",
5577 		.result = REJECT,
5578 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5579 	},
5580 	{
5581 		"invalid direct packet write for LWT_OUT",
5582 		.insns = {
5583 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5584 				    offsetof(struct __sk_buff, data)),
5585 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5586 				    offsetof(struct __sk_buff, data_end)),
5587 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5589 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5590 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5591 			BPF_MOV64_IMM(BPF_REG_0, 0),
5592 			BPF_EXIT_INSN(),
5593 		},
5594 		.errstr = "cannot write into packet",
5595 		.result = REJECT,
5596 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5597 	},
5598 	{
5599 		"direct packet write for LWT_XMIT",
5600 		.insns = {
5601 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5602 				    offsetof(struct __sk_buff, data)),
5603 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5604 				    offsetof(struct __sk_buff, data_end)),
5605 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5606 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5607 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5608 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5609 			BPF_MOV64_IMM(BPF_REG_0, 0),
5610 			BPF_EXIT_INSN(),
5611 		},
5612 		.result = ACCEPT,
5613 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5614 	},
5615 	{
5616 		"direct packet read for LWT_IN",
5617 		.insns = {
5618 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5619 				    offsetof(struct __sk_buff, data)),
5620 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5621 				    offsetof(struct __sk_buff, data_end)),
5622 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5623 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5624 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5625 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5626 			BPF_MOV64_IMM(BPF_REG_0, 0),
5627 			BPF_EXIT_INSN(),
5628 		},
5629 		.result = ACCEPT,
5630 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5631 	},
5632 	{
5633 		"direct packet read for LWT_OUT",
5634 		.insns = {
5635 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5636 				    offsetof(struct __sk_buff, data)),
5637 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5638 				    offsetof(struct __sk_buff, data_end)),
5639 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5640 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5641 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5642 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5643 			BPF_MOV64_IMM(BPF_REG_0, 0),
5644 			BPF_EXIT_INSN(),
5645 		},
5646 		.result = ACCEPT,
5647 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5648 	},
5649 	{
5650 		"direct packet read for LWT_XMIT",
5651 		.insns = {
5652 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5653 				    offsetof(struct __sk_buff, data)),
5654 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5655 				    offsetof(struct __sk_buff, data_end)),
5656 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5658 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5659 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5660 			BPF_MOV64_IMM(BPF_REG_0, 0),
5661 			BPF_EXIT_INSN(),
5662 		},
5663 		.result = ACCEPT,
5664 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5665 	},
5666 	{
5667 		"overlapping checks for direct packet access",
5668 		.insns = {
5669 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5670 				    offsetof(struct __sk_buff, data)),
5671 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5672 				    offsetof(struct __sk_buff, data_end)),
5673 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5674 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5675 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5676 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5677 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5678 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5679 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5680 			BPF_MOV64_IMM(BPF_REG_0, 0),
5681 			BPF_EXIT_INSN(),
5682 		},
5683 		.result = ACCEPT,
5684 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5685 	},
5686 	{
5687 		"make headroom for LWT_XMIT",
5688 		.insns = {
5689 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5690 			BPF_MOV64_IMM(BPF_REG_2, 34),
5691 			BPF_MOV64_IMM(BPF_REG_3, 0),
5692 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5693 			/* split for s390 to succeed */
5694 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5695 			BPF_MOV64_IMM(BPF_REG_2, 42),
5696 			BPF_MOV64_IMM(BPF_REG_3, 0),
5697 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5698 			BPF_MOV64_IMM(BPF_REG_0, 0),
5699 			BPF_EXIT_INSN(),
5700 		},
5701 		.result = ACCEPT,
5702 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5703 	},
5704 	{
5705 		"invalid access of tc_classid for LWT_IN",
5706 		.insns = {
5707 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5708 				    offsetof(struct __sk_buff, tc_classid)),
5709 			BPF_EXIT_INSN(),
5710 		},
5711 		.result = REJECT,
5712 		.errstr = "invalid bpf_context access",
5713 	},
5714 	{
5715 		"invalid access of tc_classid for LWT_OUT",
5716 		.insns = {
5717 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5718 				    offsetof(struct __sk_buff, tc_classid)),
5719 			BPF_EXIT_INSN(),
5720 		},
5721 		.result = REJECT,
5722 		.errstr = "invalid bpf_context access",
5723 	},
5724 	{
5725 		"invalid access of tc_classid for LWT_XMIT",
5726 		.insns = {
5727 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5728 				    offsetof(struct __sk_buff, tc_classid)),
5729 			BPF_EXIT_INSN(),
5730 		},
5731 		.result = REJECT,
5732 		.errstr = "invalid bpf_context access",
5733 	},
5734 	{
5735 		"leak pointer into ctx 1",
5736 		.insns = {
5737 			BPF_MOV64_IMM(BPF_REG_0, 0),
5738 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5739 				    offsetof(struct __sk_buff, cb[0])),
5740 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5741 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5742 				      offsetof(struct __sk_buff, cb[0])),
5743 			BPF_EXIT_INSN(),
5744 		},
5745 		.fixup_map_hash_8b = { 2 },
5746 		.errstr_unpriv = "R2 leaks addr into mem",
5747 		.result_unpriv = REJECT,
5748 		.result = REJECT,
5749 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5750 	},
5751 	{
5752 		"leak pointer into ctx 2",
5753 		.insns = {
5754 			BPF_MOV64_IMM(BPF_REG_0, 0),
5755 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5756 				    offsetof(struct __sk_buff, cb[0])),
5757 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5758 				      offsetof(struct __sk_buff, cb[0])),
5759 			BPF_EXIT_INSN(),
5760 		},
5761 		.errstr_unpriv = "R10 leaks addr into mem",
5762 		.result_unpriv = REJECT,
5763 		.result = REJECT,
5764 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5765 	},
5766 	{
5767 		"leak pointer into ctx 3",
5768 		.insns = {
5769 			BPF_MOV64_IMM(BPF_REG_0, 0),
5770 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5771 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5772 				      offsetof(struct __sk_buff, cb[0])),
5773 			BPF_EXIT_INSN(),
5774 		},
5775 		.fixup_map_hash_8b = { 1 },
5776 		.errstr_unpriv = "R2 leaks addr into ctx",
5777 		.result_unpriv = REJECT,
5778 		.result = ACCEPT,
5779 	},
5780 	{
5781 		"leak pointer into map val",
5782 		.insns = {
5783 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5784 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5785 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5786 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5787 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5788 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5789 				     BPF_FUNC_map_lookup_elem),
5790 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5791 			BPF_MOV64_IMM(BPF_REG_3, 0),
5792 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5793 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5794 			BPF_MOV64_IMM(BPF_REG_0, 0),
5795 			BPF_EXIT_INSN(),
5796 		},
5797 		.fixup_map_hash_8b = { 4 },
5798 		.errstr_unpriv = "R6 leaks addr into mem",
5799 		.result_unpriv = REJECT,
5800 		.result = ACCEPT,
5801 	},
5802 	{
5803 		"helper access to map: full range",
5804 		.insns = {
5805 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5806 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5807 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5808 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5809 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5810 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5811 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5812 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5813 			BPF_MOV64_IMM(BPF_REG_3, 0),
5814 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5815 			BPF_EXIT_INSN(),
5816 		},
5817 		.fixup_map_hash_48b = { 3 },
5818 		.result = ACCEPT,
5819 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5820 	},
5821 	{
5822 		"helper access to map: partial range",
5823 		.insns = {
5824 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5825 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5826 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5827 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5828 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5829 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5830 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5831 			BPF_MOV64_IMM(BPF_REG_2, 8),
5832 			BPF_MOV64_IMM(BPF_REG_3, 0),
5833 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5834 			BPF_EXIT_INSN(),
5835 		},
5836 		.fixup_map_hash_48b = { 3 },
5837 		.result = ACCEPT,
5838 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5839 	},
5840 	{
5841 		"helper access to map: empty range",
5842 		.insns = {
5843 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5844 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5845 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5846 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5847 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5848 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5849 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5850 			BPF_MOV64_IMM(BPF_REG_2, 0),
5851 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5852 			BPF_EXIT_INSN(),
5853 		},
5854 		.fixup_map_hash_48b = { 3 },
5855 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
5856 		.result = REJECT,
5857 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5858 	},
5859 	{
5860 		"helper access to map: out-of-bound range",
5861 		.insns = {
5862 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5863 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5864 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5865 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5866 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5867 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5868 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5869 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5870 			BPF_MOV64_IMM(BPF_REG_3, 0),
5871 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5872 			BPF_EXIT_INSN(),
5873 		},
5874 		.fixup_map_hash_48b = { 3 },
5875 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
5876 		.result = REJECT,
5877 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5878 	},
5879 	{
5880 		"helper access to map: negative range",
5881 		.insns = {
5882 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5883 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5884 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5885 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5886 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5887 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5888 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5889 			BPF_MOV64_IMM(BPF_REG_2, -8),
5890 			BPF_MOV64_IMM(BPF_REG_3, 0),
5891 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5892 			BPF_EXIT_INSN(),
5893 		},
5894 		.fixup_map_hash_48b = { 3 },
5895 		.errstr = "R2 min value is negative",
5896 		.result = REJECT,
5897 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5898 	},
5899 	{
5900 		"helper access to adjusted map (via const imm): full range",
5901 		.insns = {
5902 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5903 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5904 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5905 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5906 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5907 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5908 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5909 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5910 				offsetof(struct test_val, foo)),
5911 			BPF_MOV64_IMM(BPF_REG_2,
5912 				sizeof(struct test_val) -
5913 				offsetof(struct test_val, foo)),
5914 			BPF_MOV64_IMM(BPF_REG_3, 0),
5915 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5916 			BPF_EXIT_INSN(),
5917 		},
5918 		.fixup_map_hash_48b = { 3 },
5919 		.result = ACCEPT,
5920 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5921 	},
5922 	{
5923 		"helper access to adjusted map (via const imm): partial range",
5924 		.insns = {
5925 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5926 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5927 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5928 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5929 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5930 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5931 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5933 				offsetof(struct test_val, foo)),
5934 			BPF_MOV64_IMM(BPF_REG_2, 8),
5935 			BPF_MOV64_IMM(BPF_REG_3, 0),
5936 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5937 			BPF_EXIT_INSN(),
5938 		},
5939 		.fixup_map_hash_48b = { 3 },
5940 		.result = ACCEPT,
5941 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5942 	},
5943 	{
5944 		"helper access to adjusted map (via const imm): empty range",
5945 		.insns = {
5946 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5947 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5948 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5949 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5950 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5951 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5952 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5953 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5954 				offsetof(struct test_val, foo)),
5955 			BPF_MOV64_IMM(BPF_REG_2, 0),
5956 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5957 			BPF_EXIT_INSN(),
5958 		},
5959 		.fixup_map_hash_48b = { 3 },
5960 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
5961 		.result = REJECT,
5962 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5963 	},
5964 	{
5965 		"helper access to adjusted map (via const imm): out-of-bound range",
5966 		.insns = {
5967 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5968 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5969 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5970 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5971 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5972 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5973 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5974 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5975 				offsetof(struct test_val, foo)),
5976 			BPF_MOV64_IMM(BPF_REG_2,
5977 				sizeof(struct test_val) -
5978 				offsetof(struct test_val, foo) + 8),
5979 			BPF_MOV64_IMM(BPF_REG_3, 0),
5980 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5981 			BPF_EXIT_INSN(),
5982 		},
5983 		.fixup_map_hash_48b = { 3 },
5984 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
5985 		.result = REJECT,
5986 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5987 	},
5988 	{
5989 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
5990 		.insns = {
5991 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5992 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5993 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5994 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5995 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5996 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5997 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5998 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5999 				offsetof(struct test_val, foo)),
6000 			BPF_MOV64_IMM(BPF_REG_2, -8),
6001 			BPF_MOV64_IMM(BPF_REG_3, 0),
6002 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6003 			BPF_EXIT_INSN(),
6004 		},
6005 		.fixup_map_hash_48b = { 3 },
6006 		.errstr = "R2 min value is negative",
6007 		.result = REJECT,
6008 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6009 	},
6010 	{
6011 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
6012 		.insns = {
6013 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6014 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6015 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6016 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6017 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6018 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6019 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6020 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6021 				offsetof(struct test_val, foo)),
6022 			BPF_MOV64_IMM(BPF_REG_2, -1),
6023 			BPF_MOV64_IMM(BPF_REG_3, 0),
6024 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6025 			BPF_EXIT_INSN(),
6026 		},
6027 		.fixup_map_hash_48b = { 3 },
6028 		.errstr = "R2 min value is negative",
6029 		.result = REJECT,
6030 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6031 	},
6032 	{
6033 		"helper access to adjusted map (via const reg): full range",
6034 		.insns = {
6035 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6036 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6037 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6038 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6039 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6040 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6041 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6042 			BPF_MOV64_IMM(BPF_REG_3,
6043 				offsetof(struct test_val, foo)),
6044 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6045 			BPF_MOV64_IMM(BPF_REG_2,
6046 				sizeof(struct test_val) -
6047 				offsetof(struct test_val, foo)),
6048 			BPF_MOV64_IMM(BPF_REG_3, 0),
6049 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6050 			BPF_EXIT_INSN(),
6051 		},
6052 		.fixup_map_hash_48b = { 3 },
6053 		.result = ACCEPT,
6054 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6055 	},
6056 	{
6057 		"helper access to adjusted map (via const reg): partial range",
6058 		.insns = {
6059 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6060 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6061 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6062 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6063 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6064 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6065 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6066 			BPF_MOV64_IMM(BPF_REG_3,
6067 				offsetof(struct test_val, foo)),
6068 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6069 			BPF_MOV64_IMM(BPF_REG_2, 8),
6070 			BPF_MOV64_IMM(BPF_REG_3, 0),
6071 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6072 			BPF_EXIT_INSN(),
6073 		},
6074 		.fixup_map_hash_48b = { 3 },
6075 		.result = ACCEPT,
6076 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6077 	},
6078 	{
6079 		"helper access to adjusted map (via const reg): empty range",
6080 		.insns = {
6081 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6082 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6083 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6084 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6085 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6086 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6087 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6088 			BPF_MOV64_IMM(BPF_REG_3, 0),
6089 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6090 			BPF_MOV64_IMM(BPF_REG_2, 0),
6091 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6092 			BPF_EXIT_INSN(),
6093 		},
6094 		.fixup_map_hash_48b = { 3 },
6095 		.errstr = "R1 min value is outside of the array range",
6096 		.result = REJECT,
6097 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6098 	},
6099 	{
6100 		"helper access to adjusted map (via const reg): out-of-bound range",
6101 		.insns = {
6102 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6103 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6104 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6105 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6106 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6107 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6108 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6109 			BPF_MOV64_IMM(BPF_REG_3,
6110 				offsetof(struct test_val, foo)),
6111 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6112 			BPF_MOV64_IMM(BPF_REG_2,
6113 				sizeof(struct test_val) -
6114 				offsetof(struct test_val, foo) + 8),
6115 			BPF_MOV64_IMM(BPF_REG_3, 0),
6116 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6117 			BPF_EXIT_INSN(),
6118 		},
6119 		.fixup_map_hash_48b = { 3 },
6120 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6121 		.result = REJECT,
6122 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6123 	},
6124 	{
6125 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
6126 		.insns = {
6127 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6128 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6129 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6130 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6131 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6132 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6133 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6134 			BPF_MOV64_IMM(BPF_REG_3,
6135 				offsetof(struct test_val, foo)),
6136 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6137 			BPF_MOV64_IMM(BPF_REG_2, -8),
6138 			BPF_MOV64_IMM(BPF_REG_3, 0),
6139 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6140 			BPF_EXIT_INSN(),
6141 		},
6142 		.fixup_map_hash_48b = { 3 },
6143 		.errstr = "R2 min value is negative",
6144 		.result = REJECT,
6145 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6146 	},
6147 	{
6148 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
6149 		.insns = {
6150 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6151 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6152 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6153 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6154 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6155 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6156 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6157 			BPF_MOV64_IMM(BPF_REG_3,
6158 				offsetof(struct test_val, foo)),
6159 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6160 			BPF_MOV64_IMM(BPF_REG_2, -1),
6161 			BPF_MOV64_IMM(BPF_REG_3, 0),
6162 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6163 			BPF_EXIT_INSN(),
6164 		},
6165 		.fixup_map_hash_48b = { 3 },
6166 		.errstr = "R2 min value is negative",
6167 		.result = REJECT,
6168 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6169 	},
6170 	{
6171 		"helper access to adjusted map (via variable): full range",
6172 		.insns = {
6173 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6174 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6175 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6176 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6177 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6178 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6179 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6180 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6181 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6182 				offsetof(struct test_val, foo), 4),
6183 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6184 			BPF_MOV64_IMM(BPF_REG_2,
6185 				sizeof(struct test_val) -
6186 				offsetof(struct test_val, foo)),
6187 			BPF_MOV64_IMM(BPF_REG_3, 0),
6188 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6189 			BPF_EXIT_INSN(),
6190 		},
6191 		.fixup_map_hash_48b = { 3 },
6192 		.result = ACCEPT,
6193 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6194 	},
6195 	{
6196 		"helper access to adjusted map (via variable): partial range",
6197 		.insns = {
6198 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6199 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6200 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6201 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6202 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6203 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6204 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6205 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6206 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6207 				offsetof(struct test_val, foo), 4),
6208 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6209 			BPF_MOV64_IMM(BPF_REG_2, 8),
6210 			BPF_MOV64_IMM(BPF_REG_3, 0),
6211 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6212 			BPF_EXIT_INSN(),
6213 		},
6214 		.fixup_map_hash_48b = { 3 },
6215 		.result = ACCEPT,
6216 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6217 	},
6218 	{
6219 		"helper access to adjusted map (via variable): empty range",
6220 		.insns = {
6221 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6223 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6224 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6225 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6226 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6227 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6228 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6229 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6230 				offsetof(struct test_val, foo), 3),
6231 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6232 			BPF_MOV64_IMM(BPF_REG_2, 0),
6233 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6234 			BPF_EXIT_INSN(),
6235 		},
6236 		.fixup_map_hash_48b = { 3 },
6237 		.errstr = "R1 min value is outside of the array range",
6238 		.result = REJECT,
6239 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6240 	},
6241 	{
6242 		"helper access to adjusted map (via variable): no max check",
6243 		.insns = {
6244 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6245 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6246 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6247 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6248 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6249 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6250 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6251 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6252 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6253 			BPF_MOV64_IMM(BPF_REG_2, 1),
6254 			BPF_MOV64_IMM(BPF_REG_3, 0),
6255 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6256 			BPF_EXIT_INSN(),
6257 		},
6258 		.fixup_map_hash_48b = { 3 },
6259 		.errstr = "R1 unbounded memory access",
6260 		.result = REJECT,
6261 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6262 	},
6263 	{
6264 		"helper access to adjusted map (via variable): wrong max check",
6265 		.insns = {
6266 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6267 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6268 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6269 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6270 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6271 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6272 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6273 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6274 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6275 				offsetof(struct test_val, foo), 4),
6276 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6277 			BPF_MOV64_IMM(BPF_REG_2,
6278 				sizeof(struct test_val) -
6279 				offsetof(struct test_val, foo) + 1),
6280 			BPF_MOV64_IMM(BPF_REG_3, 0),
6281 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6282 			BPF_EXIT_INSN(),
6283 		},
6284 		.fixup_map_hash_48b = { 3 },
6285 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
6286 		.result = REJECT,
6287 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6288 	},
6289 	{
6290 		"helper access to map: bounds check using <, good access",
6291 		.insns = {
6292 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6293 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6294 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6295 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6296 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6297 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6298 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6299 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6300 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6301 			BPF_MOV64_IMM(BPF_REG_0, 0),
6302 			BPF_EXIT_INSN(),
6303 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6304 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6305 			BPF_MOV64_IMM(BPF_REG_0, 0),
6306 			BPF_EXIT_INSN(),
6307 		},
6308 		.fixup_map_hash_48b = { 3 },
6309 		.result = ACCEPT,
6310 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6311 	},
6312 	{
6313 		"helper access to map: bounds check using <, bad access",
6314 		.insns = {
6315 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6316 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6317 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6318 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6319 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6320 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6321 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6322 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6323 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6324 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6325 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6326 			BPF_MOV64_IMM(BPF_REG_0, 0),
6327 			BPF_EXIT_INSN(),
6328 			BPF_MOV64_IMM(BPF_REG_0, 0),
6329 			BPF_EXIT_INSN(),
6330 		},
6331 		.fixup_map_hash_48b = { 3 },
6332 		.result = REJECT,
6333 		.errstr = "R1 unbounded memory access",
6334 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6335 	},
6336 	{
6337 		"helper access to map: bounds check using <=, good access",
6338 		.insns = {
6339 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6340 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6341 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6342 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6343 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6344 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6345 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6346 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6347 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6348 			BPF_MOV64_IMM(BPF_REG_0, 0),
6349 			BPF_EXIT_INSN(),
6350 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6351 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6352 			BPF_MOV64_IMM(BPF_REG_0, 0),
6353 			BPF_EXIT_INSN(),
6354 		},
6355 		.fixup_map_hash_48b = { 3 },
6356 		.result = ACCEPT,
6357 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6358 	},
6359 	{
6360 		"helper access to map: bounds check using <=, bad access",
6361 		.insns = {
6362 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6363 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6364 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6365 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6366 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6367 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6368 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6369 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6370 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6371 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6372 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6373 			BPF_MOV64_IMM(BPF_REG_0, 0),
6374 			BPF_EXIT_INSN(),
6375 			BPF_MOV64_IMM(BPF_REG_0, 0),
6376 			BPF_EXIT_INSN(),
6377 		},
6378 		.fixup_map_hash_48b = { 3 },
6379 		.result = REJECT,
6380 		.errstr = "R1 unbounded memory access",
6381 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6382 	},
6383 	{
6384 		"helper access to map: bounds check using s<, good access",
6385 		.insns = {
6386 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6388 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6389 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6390 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6391 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6392 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6393 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6394 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6395 			BPF_MOV64_IMM(BPF_REG_0, 0),
6396 			BPF_EXIT_INSN(),
6397 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6398 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6399 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6400 			BPF_MOV64_IMM(BPF_REG_0, 0),
6401 			BPF_EXIT_INSN(),
6402 		},
6403 		.fixup_map_hash_48b = { 3 },
6404 		.result = ACCEPT,
6405 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6406 	},
6407 	{
6408 		"helper access to map: bounds check using s<, good access 2",
6409 		.insns = {
6410 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6411 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6412 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6413 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6414 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6415 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6416 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6417 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6418 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6419 			BPF_MOV64_IMM(BPF_REG_0, 0),
6420 			BPF_EXIT_INSN(),
6421 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6422 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6423 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6424 			BPF_MOV64_IMM(BPF_REG_0, 0),
6425 			BPF_EXIT_INSN(),
6426 		},
6427 		.fixup_map_hash_48b = { 3 },
6428 		.result = ACCEPT,
6429 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6430 	},
6431 	{
6432 		"helper access to map: bounds check using s<, bad access",
6433 		.insns = {
6434 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6435 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6436 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6437 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6438 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6439 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6440 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6441 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6442 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6443 			BPF_MOV64_IMM(BPF_REG_0, 0),
6444 			BPF_EXIT_INSN(),
6445 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6446 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6447 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6448 			BPF_MOV64_IMM(BPF_REG_0, 0),
6449 			BPF_EXIT_INSN(),
6450 		},
6451 		.fixup_map_hash_48b = { 3 },
6452 		.result = REJECT,
6453 		.errstr = "R1 min value is negative",
6454 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6455 	},
6456 	{
6457 		"helper access to map: bounds check using s<=, good access",
6458 		.insns = {
6459 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6460 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6461 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6462 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6463 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6464 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6465 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6466 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6467 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6468 			BPF_MOV64_IMM(BPF_REG_0, 0),
6469 			BPF_EXIT_INSN(),
6470 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6471 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6472 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6473 			BPF_MOV64_IMM(BPF_REG_0, 0),
6474 			BPF_EXIT_INSN(),
6475 		},
6476 		.fixup_map_hash_48b = { 3 },
6477 		.result = ACCEPT,
6478 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6479 	},
6480 	{
6481 		"helper access to map: bounds check using s<=, good access 2",
6482 		.insns = {
6483 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6484 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6485 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6486 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6487 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6488 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6489 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6490 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6491 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6492 			BPF_MOV64_IMM(BPF_REG_0, 0),
6493 			BPF_EXIT_INSN(),
6494 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6495 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6496 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6497 			BPF_MOV64_IMM(BPF_REG_0, 0),
6498 			BPF_EXIT_INSN(),
6499 		},
6500 		.fixup_map_hash_48b = { 3 },
6501 		.result = ACCEPT,
6502 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6503 	},
6504 	{
6505 		"helper access to map: bounds check using s<=, bad access",
6506 		.insns = {
6507 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6508 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6509 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6510 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6511 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6512 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6513 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6514 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6515 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6516 			BPF_MOV64_IMM(BPF_REG_0, 0),
6517 			BPF_EXIT_INSN(),
6518 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6519 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6520 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6521 			BPF_MOV64_IMM(BPF_REG_0, 0),
6522 			BPF_EXIT_INSN(),
6523 		},
6524 		.fixup_map_hash_48b = { 3 },
6525 		.result = REJECT,
6526 		.errstr = "R1 min value is negative",
6527 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6528 	},
6529 	{
6530 		"map access: known scalar += value_ptr",
6531 		.insns = {
6532 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6533 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6534 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6535 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6536 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6537 				     BPF_FUNC_map_lookup_elem),
6538 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6539 			BPF_MOV64_IMM(BPF_REG_1, 4),
6540 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6541 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6542 			BPF_MOV64_IMM(BPF_REG_0, 1),
6543 			BPF_EXIT_INSN(),
6544 		},
6545 		.fixup_map_array_48b = { 3 },
6546 		.result = ACCEPT,
6547 		.retval = 1,
6548 	},
6549 	{
6550 		"map access: value_ptr += known scalar",
6551 		.insns = {
6552 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6553 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6554 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6555 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6556 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6557 				     BPF_FUNC_map_lookup_elem),
6558 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6559 			BPF_MOV64_IMM(BPF_REG_1, 4),
6560 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6561 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6562 			BPF_MOV64_IMM(BPF_REG_0, 1),
6563 			BPF_EXIT_INSN(),
6564 		},
6565 		.fixup_map_array_48b = { 3 },
6566 		.result = ACCEPT,
6567 		.retval = 1,
6568 	},
6569 	{
6570 		"map access: unknown scalar += value_ptr",
6571 		.insns = {
6572 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6573 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6574 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6575 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6576 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6577 				     BPF_FUNC_map_lookup_elem),
6578 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6579 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6580 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6581 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6582 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6583 			BPF_MOV64_IMM(BPF_REG_0, 1),
6584 			BPF_EXIT_INSN(),
6585 		},
6586 		.fixup_map_array_48b = { 3 },
6587 		.result = ACCEPT,
6588 		.retval = 1,
6589 	},
6590 	{
6591 		"map access: value_ptr += unknown scalar",
6592 		.insns = {
6593 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6594 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6595 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6596 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6597 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6598 				     BPF_FUNC_map_lookup_elem),
6599 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6600 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6601 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6602 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6603 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6604 			BPF_MOV64_IMM(BPF_REG_0, 1),
6605 			BPF_EXIT_INSN(),
6606 		},
6607 		.fixup_map_array_48b = { 3 },
6608 		.result = ACCEPT,
6609 		.retval = 1,
6610 	},
6611 	{
6612 		"map access: value_ptr += value_ptr",
6613 		.insns = {
6614 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6615 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6617 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6618 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6619 				     BPF_FUNC_map_lookup_elem),
6620 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6621 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
6622 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6623 			BPF_MOV64_IMM(BPF_REG_0, 1),
6624 			BPF_EXIT_INSN(),
6625 		},
6626 		.fixup_map_array_48b = { 3 },
6627 		.result = REJECT,
6628 		.errstr = "R0 pointer += pointer prohibited",
6629 	},
6630 	{
6631 		"map access: known scalar -= value_ptr",
6632 		.insns = {
6633 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6634 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6635 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6636 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6637 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6638 				     BPF_FUNC_map_lookup_elem),
6639 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6640 			BPF_MOV64_IMM(BPF_REG_1, 4),
6641 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6642 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6643 			BPF_MOV64_IMM(BPF_REG_0, 1),
6644 			BPF_EXIT_INSN(),
6645 		},
6646 		.fixup_map_array_48b = { 3 },
6647 		.result = REJECT,
6648 		.errstr = "R1 tried to subtract pointer from scalar",
6649 	},
6650 	{
6651 		"map access: value_ptr -= known scalar",
6652 		.insns = {
6653 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6654 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6655 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6656 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6657 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6658 				     BPF_FUNC_map_lookup_elem),
6659 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6660 			BPF_MOV64_IMM(BPF_REG_1, 4),
6661 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6662 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6663 			BPF_MOV64_IMM(BPF_REG_0, 1),
6664 			BPF_EXIT_INSN(),
6665 		},
6666 		.fixup_map_array_48b = { 3 },
6667 		.result = REJECT,
6668 		.errstr = "R0 min value is outside of the array range",
6669 	},
6670 	{
6671 		"map access: value_ptr -= known scalar, 2",
6672 		.insns = {
6673 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6674 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6675 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6676 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6677 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6678 				     BPF_FUNC_map_lookup_elem),
6679 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6680 			BPF_MOV64_IMM(BPF_REG_1, 6),
6681 			BPF_MOV64_IMM(BPF_REG_2, 4),
6682 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6683 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
6684 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6685 			BPF_MOV64_IMM(BPF_REG_0, 1),
6686 			BPF_EXIT_INSN(),
6687 		},
6688 		.fixup_map_array_48b = { 3 },
6689 		.result = ACCEPT,
6690 		.retval = 1,
6691 	},
6692 	{
6693 		"map access: unknown scalar -= value_ptr",
6694 		.insns = {
6695 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6696 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6697 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6698 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6699 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6700 				     BPF_FUNC_map_lookup_elem),
6701 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6702 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6703 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6704 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6705 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6706 			BPF_MOV64_IMM(BPF_REG_0, 1),
6707 			BPF_EXIT_INSN(),
6708 		},
6709 		.fixup_map_array_48b = { 3 },
6710 		.result = REJECT,
6711 		.errstr = "R1 tried to subtract pointer from scalar",
6712 	},
6713 	{
6714 		"map access: value_ptr -= unknown scalar",
6715 		.insns = {
6716 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6717 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6718 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6719 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6720 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6721 				     BPF_FUNC_map_lookup_elem),
6722 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6723 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6724 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6725 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6726 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6727 			BPF_MOV64_IMM(BPF_REG_0, 1),
6728 			BPF_EXIT_INSN(),
6729 		},
6730 		.fixup_map_array_48b = { 3 },
6731 		.result = REJECT,
6732 		.errstr = "R0 min value is negative",
6733 	},
6734 	{
6735 		"map access: value_ptr -= unknown scalar, 2",
6736 		.insns = {
6737 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6738 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6739 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6740 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6741 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6742 				     BPF_FUNC_map_lookup_elem),
6743 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6744 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6745 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6746 			BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
6747 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6748 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6749 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
6750 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6751 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6752 			BPF_MOV64_IMM(BPF_REG_0, 1),
6753 			BPF_EXIT_INSN(),
6754 		},
6755 		.fixup_map_array_48b = { 3 },
6756 		.result = ACCEPT,
6757 		.retval = 1,
6758 	},
6759 	{
6760 		"map access: value_ptr -= value_ptr",
6761 		.insns = {
6762 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6763 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6764 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6765 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6766 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6767 				     BPF_FUNC_map_lookup_elem),
6768 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6769 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
6770 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6771 			BPF_MOV64_IMM(BPF_REG_0, 1),
6772 			BPF_EXIT_INSN(),
6773 		},
6774 		.fixup_map_array_48b = { 3 },
6775 		.result = REJECT,
6776 		.errstr = "R0 invalid mem access 'inv'",
6777 		.errstr_unpriv = "R0 pointer -= pointer prohibited",
6778 	},
6779 	{
6780 		"map lookup helper access to map",
6781 		.insns = {
6782 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6783 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6784 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6785 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6786 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6787 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6788 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6789 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6790 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6791 			BPF_EXIT_INSN(),
6792 		},
6793 		.fixup_map_hash_16b = { 3, 8 },
6794 		.result = ACCEPT,
6795 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6796 	},
6797 	{
6798 		"map update helper access to map",
6799 		.insns = {
6800 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6801 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6802 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6803 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6804 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6805 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6806 			BPF_MOV64_IMM(BPF_REG_4, 0),
6807 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6808 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6809 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6810 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6811 			BPF_EXIT_INSN(),
6812 		},
6813 		.fixup_map_hash_16b = { 3, 10 },
6814 		.result = ACCEPT,
6815 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6816 	},
6817 	{
6818 		"map update helper access to map: wrong size",
6819 		.insns = {
6820 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6821 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6822 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6823 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6824 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6825 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6826 			BPF_MOV64_IMM(BPF_REG_4, 0),
6827 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6828 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6829 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6830 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6831 			BPF_EXIT_INSN(),
6832 		},
6833 		.fixup_map_hash_8b = { 3 },
6834 		.fixup_map_hash_16b = { 10 },
6835 		.result = REJECT,
6836 		.errstr = "invalid access to map value, value_size=8 off=0 size=16",
6837 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6838 	},
6839 	{
6840 		"map helper access to adjusted map (via const imm)",
6841 		.insns = {
6842 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6843 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6844 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6845 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6846 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6847 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6848 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6849 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6850 				      offsetof(struct other_val, bar)),
6851 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6852 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6853 			BPF_EXIT_INSN(),
6854 		},
6855 		.fixup_map_hash_16b = { 3, 9 },
6856 		.result = ACCEPT,
6857 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6858 	},
6859 	{
6860 		"map helper access to adjusted map (via const imm): out-of-bound 1",
6861 		.insns = {
6862 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6863 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6864 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6865 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6866 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6867 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6868 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6869 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6870 				      sizeof(struct other_val) - 4),
6871 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6872 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6873 			BPF_EXIT_INSN(),
6874 		},
6875 		.fixup_map_hash_16b = { 3, 9 },
6876 		.result = REJECT,
6877 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6878 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6879 	},
6880 	{
6881 		"map helper access to adjusted map (via const imm): out-of-bound 2",
6882 		.insns = {
6883 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6885 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6886 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6887 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6888 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6889 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6890 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6891 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6892 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6893 			BPF_EXIT_INSN(),
6894 		},
6895 		.fixup_map_hash_16b = { 3, 9 },
6896 		.result = REJECT,
6897 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6898 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6899 	},
6900 	{
6901 		"map helper access to adjusted map (via const reg)",
6902 		.insns = {
6903 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6904 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6905 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6906 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6907 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6908 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6909 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6910 			BPF_MOV64_IMM(BPF_REG_3,
6911 				      offsetof(struct other_val, bar)),
6912 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6913 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6914 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6915 			BPF_EXIT_INSN(),
6916 		},
6917 		.fixup_map_hash_16b = { 3, 10 },
6918 		.result = ACCEPT,
6919 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6920 	},
6921 	{
6922 		"map helper access to adjusted map (via const reg): out-of-bound 1",
6923 		.insns = {
6924 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6925 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6926 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6927 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6928 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6929 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6930 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6931 			BPF_MOV64_IMM(BPF_REG_3,
6932 				      sizeof(struct other_val) - 4),
6933 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6934 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6935 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6936 			BPF_EXIT_INSN(),
6937 		},
6938 		.fixup_map_hash_16b = { 3, 10 },
6939 		.result = REJECT,
6940 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6941 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6942 	},
6943 	{
6944 		"map helper access to adjusted map (via const reg): out-of-bound 2",
6945 		.insns = {
6946 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6947 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6948 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6949 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6950 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6951 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6952 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6953 			BPF_MOV64_IMM(BPF_REG_3, -4),
6954 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6955 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6956 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6957 			BPF_EXIT_INSN(),
6958 		},
6959 		.fixup_map_hash_16b = { 3, 10 },
6960 		.result = REJECT,
6961 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6962 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6963 	},
6964 	{
6965 		"map helper access to adjusted map (via variable)",
6966 		.insns = {
6967 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6968 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6969 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6970 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6971 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6972 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6973 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6974 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6975 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6976 				    offsetof(struct other_val, bar), 4),
6977 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6978 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6979 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6980 			BPF_EXIT_INSN(),
6981 		},
6982 		.fixup_map_hash_16b = { 3, 11 },
6983 		.result = ACCEPT,
6984 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6985 	},
6986 	{
6987 		"map helper access to adjusted map (via variable): no max check",
6988 		.insns = {
6989 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6990 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6991 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6992 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6993 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6994 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6995 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6996 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6997 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6998 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6999 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7000 			BPF_EXIT_INSN(),
7001 		},
7002 		.fixup_map_hash_16b = { 3, 10 },
7003 		.result = REJECT,
7004 		.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
7005 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7006 	},
7007 	{
7008 		"map helper access to adjusted map (via variable): wrong max check",
7009 		.insns = {
7010 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7011 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7012 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7013 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7014 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7015 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7016 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7017 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7018 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7019 				    offsetof(struct other_val, bar) + 1, 4),
7020 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7021 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7022 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7023 			BPF_EXIT_INSN(),
7024 		},
7025 		.fixup_map_hash_16b = { 3, 11 },
7026 		.result = REJECT,
7027 		.errstr = "invalid access to map value, value_size=16 off=9 size=8",
7028 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7029 	},
7030 	{
7031 		"map element value is preserved across register spilling",
7032 		.insns = {
7033 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7034 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7035 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7036 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7037 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7038 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7039 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7040 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7041 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7042 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7043 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7044 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7045 			BPF_EXIT_INSN(),
7046 		},
7047 		.fixup_map_hash_48b = { 3 },
7048 		.errstr_unpriv = "R0 leaks addr",
7049 		.result = ACCEPT,
7050 		.result_unpriv = REJECT,
7051 	},
7052 	{
7053 		"map element value or null is marked on register spilling",
7054 		.insns = {
7055 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7056 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7057 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7058 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7059 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7060 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7061 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
7062 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7063 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7064 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7065 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7066 			BPF_EXIT_INSN(),
7067 		},
7068 		.fixup_map_hash_48b = { 3 },
7069 		.errstr_unpriv = "R0 leaks addr",
7070 		.result = ACCEPT,
7071 		.result_unpriv = REJECT,
7072 	},
7073 	{
7074 		"map element value store of cleared call register",
7075 		.insns = {
7076 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7077 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7078 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7079 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7080 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7081 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
7082 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
7083 			BPF_EXIT_INSN(),
7084 		},
7085 		.fixup_map_hash_48b = { 3 },
7086 		.errstr_unpriv = "R1 !read_ok",
7087 		.errstr = "R1 !read_ok",
7088 		.result = REJECT,
7089 		.result_unpriv = REJECT,
7090 	},
7091 	{
7092 		"map element value with unaligned store",
7093 		.insns = {
7094 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7095 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7096 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7097 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7098 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7099 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
7100 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7101 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7102 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
7103 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
7104 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7105 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
7106 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
7107 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
7108 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
7109 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
7110 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
7111 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
7112 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
7113 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
7114 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
7115 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
7116 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
7117 			BPF_EXIT_INSN(),
7118 		},
7119 		.fixup_map_hash_48b = { 3 },
7120 		.errstr_unpriv = "R0 leaks addr",
7121 		.result = ACCEPT,
7122 		.result_unpriv = REJECT,
7123 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7124 	},
7125 	{
7126 		"map element value with unaligned load",
7127 		.insns = {
7128 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7129 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7130 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7131 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7132 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7133 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7134 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7135 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
7136 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7137 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7138 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
7139 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7140 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
7141 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
7142 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
7143 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7144 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
7145 			BPF_EXIT_INSN(),
7146 		},
7147 		.fixup_map_hash_48b = { 3 },
7148 		.errstr_unpriv = "R0 leaks addr",
7149 		.result = ACCEPT,
7150 		.result_unpriv = REJECT,
7151 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7152 	},
7153 	{
7154 		"map element value illegal alu op, 1",
7155 		.insns = {
7156 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7157 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7158 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7159 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7160 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7161 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7162 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
7163 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7164 			BPF_EXIT_INSN(),
7165 		},
7166 		.fixup_map_hash_48b = { 3 },
7167 		.errstr = "R0 bitwise operator &= on pointer",
7168 		.result = REJECT,
7169 	},
7170 	{
7171 		"map element value illegal alu op, 2",
7172 		.insns = {
7173 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7174 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7175 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7176 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7177 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7178 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7179 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
7180 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7181 			BPF_EXIT_INSN(),
7182 		},
7183 		.fixup_map_hash_48b = { 3 },
7184 		.errstr = "R0 32-bit pointer arithmetic prohibited",
7185 		.result = REJECT,
7186 	},
7187 	{
7188 		"map element value illegal alu op, 3",
7189 		.insns = {
7190 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7191 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7192 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7193 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7194 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7195 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7196 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
7197 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7198 			BPF_EXIT_INSN(),
7199 		},
7200 		.fixup_map_hash_48b = { 3 },
7201 		.errstr = "R0 pointer arithmetic with /= operator",
7202 		.result = REJECT,
7203 	},
7204 	{
7205 		"map element value illegal alu op, 4",
7206 		.insns = {
7207 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7208 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7209 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7210 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7211 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7212 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7213 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
7214 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7215 			BPF_EXIT_INSN(),
7216 		},
7217 		.fixup_map_hash_48b = { 3 },
7218 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
7219 		.errstr = "invalid mem access 'inv'",
7220 		.result = REJECT,
7221 		.result_unpriv = REJECT,
7222 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7223 	},
7224 	{
7225 		"map element value illegal alu op, 5",
7226 		.insns = {
7227 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7228 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7229 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7230 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7231 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7232 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7233 			BPF_MOV64_IMM(BPF_REG_3, 4096),
7234 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7235 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7236 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7237 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
7238 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
7239 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7240 			BPF_EXIT_INSN(),
7241 		},
7242 		.fixup_map_hash_48b = { 3 },
7243 		.errstr = "R0 invalid mem access 'inv'",
7244 		.result = REJECT,
7245 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7246 	},
7247 	{
7248 		"map element value is preserved across register spilling",
7249 		.insns = {
7250 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7251 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7252 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7253 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7254 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7255 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7256 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
7257 				offsetof(struct test_val, foo)),
7258 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7259 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7260 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7261 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7262 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7263 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7264 			BPF_EXIT_INSN(),
7265 		},
7266 		.fixup_map_hash_48b = { 3 },
7267 		.errstr_unpriv = "R0 leaks addr",
7268 		.result = ACCEPT,
7269 		.result_unpriv = REJECT,
7270 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7271 	},
7272 	{
7273 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
7274 		.insns = {
7275 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7276 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7277 			BPF_MOV64_IMM(BPF_REG_0, 0),
7278 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7279 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7280 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7281 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7282 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7283 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7284 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7285 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7286 			BPF_MOV64_IMM(BPF_REG_2, 16),
7287 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7288 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7289 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7290 			BPF_MOV64_IMM(BPF_REG_4, 0),
7291 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7292 			BPF_MOV64_IMM(BPF_REG_3, 0),
7293 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7294 			BPF_MOV64_IMM(BPF_REG_0, 0),
7295 			BPF_EXIT_INSN(),
7296 		},
7297 		.result = ACCEPT,
7298 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7299 	},
7300 	{
7301 		"helper access to variable memory: stack, bitwise AND, zero included",
7302 		.insns = {
7303 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7305 			BPF_MOV64_IMM(BPF_REG_2, 16),
7306 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7307 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7308 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7309 			BPF_MOV64_IMM(BPF_REG_3, 0),
7310 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7311 			BPF_EXIT_INSN(),
7312 		},
7313 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7314 		.result = REJECT,
7315 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7316 	},
7317 	{
7318 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
7319 		.insns = {
7320 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7321 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7322 			BPF_MOV64_IMM(BPF_REG_2, 16),
7323 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7324 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7325 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
7326 			BPF_MOV64_IMM(BPF_REG_4, 0),
7327 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7328 			BPF_MOV64_IMM(BPF_REG_3, 0),
7329 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7330 			BPF_MOV64_IMM(BPF_REG_0, 0),
7331 			BPF_EXIT_INSN(),
7332 		},
7333 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7334 		.result = REJECT,
7335 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7336 	},
7337 	{
7338 		"helper access to variable memory: stack, JMP, correct bounds",
7339 		.insns = {
7340 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7341 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7342 			BPF_MOV64_IMM(BPF_REG_0, 0),
7343 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7344 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7345 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7346 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7347 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7348 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7349 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7350 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7351 			BPF_MOV64_IMM(BPF_REG_2, 16),
7352 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7353 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7354 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
7355 			BPF_MOV64_IMM(BPF_REG_4, 0),
7356 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7357 			BPF_MOV64_IMM(BPF_REG_3, 0),
7358 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7359 			BPF_MOV64_IMM(BPF_REG_0, 0),
7360 			BPF_EXIT_INSN(),
7361 		},
7362 		.result = ACCEPT,
7363 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7364 	},
7365 	{
7366 		"helper access to variable memory: stack, JMP (signed), correct bounds",
7367 		.insns = {
7368 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7369 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7370 			BPF_MOV64_IMM(BPF_REG_0, 0),
7371 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7372 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7373 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7374 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7375 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7376 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7377 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7378 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7379 			BPF_MOV64_IMM(BPF_REG_2, 16),
7380 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7381 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7382 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
7383 			BPF_MOV64_IMM(BPF_REG_4, 0),
7384 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7385 			BPF_MOV64_IMM(BPF_REG_3, 0),
7386 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7387 			BPF_MOV64_IMM(BPF_REG_0, 0),
7388 			BPF_EXIT_INSN(),
7389 		},
7390 		.result = ACCEPT,
7391 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7392 	},
7393 	{
7394 		"helper access to variable memory: stack, JMP, bounds + offset",
7395 		.insns = {
7396 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7397 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7398 			BPF_MOV64_IMM(BPF_REG_2, 16),
7399 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7400 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7401 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
7402 			BPF_MOV64_IMM(BPF_REG_4, 0),
7403 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
7404 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7405 			BPF_MOV64_IMM(BPF_REG_3, 0),
7406 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7407 			BPF_MOV64_IMM(BPF_REG_0, 0),
7408 			BPF_EXIT_INSN(),
7409 		},
7410 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7411 		.result = REJECT,
7412 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7413 	},
7414 	{
7415 		"helper access to variable memory: stack, JMP, wrong max",
7416 		.insns = {
7417 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7418 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7419 			BPF_MOV64_IMM(BPF_REG_2, 16),
7420 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7421 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7422 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
7423 			BPF_MOV64_IMM(BPF_REG_4, 0),
7424 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7425 			BPF_MOV64_IMM(BPF_REG_3, 0),
7426 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7427 			BPF_MOV64_IMM(BPF_REG_0, 0),
7428 			BPF_EXIT_INSN(),
7429 		},
7430 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7431 		.result = REJECT,
7432 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7433 	},
7434 	{
7435 		"helper access to variable memory: stack, JMP, no max check",
7436 		.insns = {
7437 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7438 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7439 			BPF_MOV64_IMM(BPF_REG_2, 16),
7440 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7441 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7442 			BPF_MOV64_IMM(BPF_REG_4, 0),
7443 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7444 			BPF_MOV64_IMM(BPF_REG_3, 0),
7445 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7446 			BPF_MOV64_IMM(BPF_REG_0, 0),
7447 			BPF_EXIT_INSN(),
7448 		},
7449 		/* because max wasn't checked, signed min is negative */
7450 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
7451 		.result = REJECT,
7452 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7453 	},
7454 	{
7455 		"helper access to variable memory: stack, JMP, no min check",
7456 		.insns = {
7457 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7458 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7459 			BPF_MOV64_IMM(BPF_REG_2, 16),
7460 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7461 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7462 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
7463 			BPF_MOV64_IMM(BPF_REG_3, 0),
7464 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7465 			BPF_MOV64_IMM(BPF_REG_0, 0),
7466 			BPF_EXIT_INSN(),
7467 		},
7468 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7469 		.result = REJECT,
7470 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7471 	},
7472 	{
7473 		"helper access to variable memory: stack, JMP (signed), no min check",
7474 		.insns = {
7475 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7476 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7477 			BPF_MOV64_IMM(BPF_REG_2, 16),
7478 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7479 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7480 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
7481 			BPF_MOV64_IMM(BPF_REG_3, 0),
7482 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7483 			BPF_MOV64_IMM(BPF_REG_0, 0),
7484 			BPF_EXIT_INSN(),
7485 		},
7486 		.errstr = "R2 min value is negative",
7487 		.result = REJECT,
7488 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7489 	},
7490 	{
7491 		"helper access to variable memory: map, JMP, correct bounds",
7492 		.insns = {
7493 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7494 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7495 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7496 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7497 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7498 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7500 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7501 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7502 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7503 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7504 				sizeof(struct test_val), 4),
7505 			BPF_MOV64_IMM(BPF_REG_4, 0),
7506 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7507 			BPF_MOV64_IMM(BPF_REG_3, 0),
7508 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7509 			BPF_MOV64_IMM(BPF_REG_0, 0),
7510 			BPF_EXIT_INSN(),
7511 		},
7512 		.fixup_map_hash_48b = { 3 },
7513 		.result = ACCEPT,
7514 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7515 	},
7516 	{
7517 		"helper access to variable memory: map, JMP, wrong max",
7518 		.insns = {
7519 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7520 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7521 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7522 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7523 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7524 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7525 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7526 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7527 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7528 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7529 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7530 				sizeof(struct test_val) + 1, 4),
7531 			BPF_MOV64_IMM(BPF_REG_4, 0),
7532 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7533 			BPF_MOV64_IMM(BPF_REG_3, 0),
7534 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7535 			BPF_MOV64_IMM(BPF_REG_0, 0),
7536 			BPF_EXIT_INSN(),
7537 		},
7538 		.fixup_map_hash_48b = { 3 },
7539 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
7540 		.result = REJECT,
7541 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7542 	},
7543 	{
7544 		"helper access to variable memory: map adjusted, JMP, correct bounds",
7545 		.insns = {
7546 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7547 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7548 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7549 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7550 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7551 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7552 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7553 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7554 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7555 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7556 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7557 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7558 				sizeof(struct test_val) - 20, 4),
7559 			BPF_MOV64_IMM(BPF_REG_4, 0),
7560 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7561 			BPF_MOV64_IMM(BPF_REG_3, 0),
7562 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7563 			BPF_MOV64_IMM(BPF_REG_0, 0),
7564 			BPF_EXIT_INSN(),
7565 		},
7566 		.fixup_map_hash_48b = { 3 },
7567 		.result = ACCEPT,
7568 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7569 	},
7570 	{
7571 		"helper access to variable memory: map adjusted, JMP, wrong max",
7572 		.insns = {
7573 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7574 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7575 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7576 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7577 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7578 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7579 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7580 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7581 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7582 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7583 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7584 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7585 				sizeof(struct test_val) - 19, 4),
7586 			BPF_MOV64_IMM(BPF_REG_4, 0),
7587 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7588 			BPF_MOV64_IMM(BPF_REG_3, 0),
7589 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7590 			BPF_MOV64_IMM(BPF_REG_0, 0),
7591 			BPF_EXIT_INSN(),
7592 		},
7593 		.fixup_map_hash_48b = { 3 },
7594 		.errstr = "R1 min value is outside of the array range",
7595 		.result = REJECT,
7596 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7597 	},
7598 	{
7599 		"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7600 		.insns = {
7601 			BPF_MOV64_IMM(BPF_REG_1, 0),
7602 			BPF_MOV64_IMM(BPF_REG_2, 0),
7603 			BPF_MOV64_IMM(BPF_REG_3, 0),
7604 			BPF_MOV64_IMM(BPF_REG_4, 0),
7605 			BPF_MOV64_IMM(BPF_REG_5, 0),
7606 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7607 			BPF_EXIT_INSN(),
7608 		},
7609 		.result = ACCEPT,
7610 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7611 	},
7612 	{
7613 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7614 		.insns = {
7615 			BPF_MOV64_IMM(BPF_REG_1, 0),
7616 			BPF_MOV64_IMM(BPF_REG_2, 1),
7617 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7618 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7619 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7620 			BPF_MOV64_IMM(BPF_REG_3, 0),
7621 			BPF_MOV64_IMM(BPF_REG_4, 0),
7622 			BPF_MOV64_IMM(BPF_REG_5, 0),
7623 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7624 			BPF_EXIT_INSN(),
7625 		},
7626 		.errstr = "R1 type=inv expected=fp",
7627 		.result = REJECT,
7628 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7629 	},
7630 	{
7631 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7632 		.insns = {
7633 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7634 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7635 			BPF_MOV64_IMM(BPF_REG_2, 0),
7636 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7637 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7638 			BPF_MOV64_IMM(BPF_REG_3, 0),
7639 			BPF_MOV64_IMM(BPF_REG_4, 0),
7640 			BPF_MOV64_IMM(BPF_REG_5, 0),
7641 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7642 			BPF_EXIT_INSN(),
7643 		},
7644 		.result = ACCEPT,
7645 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7646 	},
7647 	{
7648 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7649 		.insns = {
7650 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7651 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7652 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7653 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7654 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7655 				     BPF_FUNC_map_lookup_elem),
7656 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7657 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7658 			BPF_MOV64_IMM(BPF_REG_2, 0),
7659 			BPF_MOV64_IMM(BPF_REG_3, 0),
7660 			BPF_MOV64_IMM(BPF_REG_4, 0),
7661 			BPF_MOV64_IMM(BPF_REG_5, 0),
7662 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7663 			BPF_EXIT_INSN(),
7664 		},
7665 		.fixup_map_hash_8b = { 3 },
7666 		.result = ACCEPT,
7667 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7668 	},
7669 	{
7670 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7671 		.insns = {
7672 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7673 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7674 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7675 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7676 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7677 				     BPF_FUNC_map_lookup_elem),
7678 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7679 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7680 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7681 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7682 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7683 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7684 			BPF_MOV64_IMM(BPF_REG_3, 0),
7685 			BPF_MOV64_IMM(BPF_REG_4, 0),
7686 			BPF_MOV64_IMM(BPF_REG_5, 0),
7687 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7688 			BPF_EXIT_INSN(),
7689 		},
7690 		.fixup_map_hash_8b = { 3 },
7691 		.result = ACCEPT,
7692 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7693 	},
7694 	{
7695 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7696 		.insns = {
7697 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7698 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7699 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7700 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7701 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7702 				     BPF_FUNC_map_lookup_elem),
7703 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7704 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7705 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7706 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7707 			BPF_MOV64_IMM(BPF_REG_3, 0),
7708 			BPF_MOV64_IMM(BPF_REG_4, 0),
7709 			BPF_MOV64_IMM(BPF_REG_5, 0),
7710 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7711 			BPF_EXIT_INSN(),
7712 		},
7713 		.fixup_map_hash_8b = { 3 },
7714 		.result = ACCEPT,
7715 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7716 	},
7717 	{
7718 		"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
7719 		.insns = {
7720 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7721 				    offsetof(struct __sk_buff, data)),
7722 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7723 				    offsetof(struct __sk_buff, data_end)),
7724 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7725 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7726 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7727 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7728 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7729 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7730 			BPF_MOV64_IMM(BPF_REG_3, 0),
7731 			BPF_MOV64_IMM(BPF_REG_4, 0),
7732 			BPF_MOV64_IMM(BPF_REG_5, 0),
7733 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7734 			BPF_EXIT_INSN(),
7735 		},
7736 		.result = ACCEPT,
7737 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7738 		.retval = 0 /* csum_diff of 64-byte packet */,
7739 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7740 	},
7741 	{
7742 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7743 		.insns = {
7744 			BPF_MOV64_IMM(BPF_REG_1, 0),
7745 			BPF_MOV64_IMM(BPF_REG_2, 0),
7746 			BPF_MOV64_IMM(BPF_REG_3, 0),
7747 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7748 			BPF_EXIT_INSN(),
7749 		},
7750 		.errstr = "R1 type=inv expected=fp",
7751 		.result = REJECT,
7752 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7753 	},
7754 	{
7755 		"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7756 		.insns = {
7757 			BPF_MOV64_IMM(BPF_REG_1, 0),
7758 			BPF_MOV64_IMM(BPF_REG_2, 1),
7759 			BPF_MOV64_IMM(BPF_REG_3, 0),
7760 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7761 			BPF_EXIT_INSN(),
7762 		},
7763 		.errstr = "R1 type=inv expected=fp",
7764 		.result = REJECT,
7765 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7766 	},
7767 	{
7768 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7769 		.insns = {
7770 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7771 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7772 			BPF_MOV64_IMM(BPF_REG_2, 0),
7773 			BPF_MOV64_IMM(BPF_REG_3, 0),
7774 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7775 			BPF_EXIT_INSN(),
7776 		},
7777 		.result = ACCEPT,
7778 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7779 	},
7780 	{
7781 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7782 		.insns = {
7783 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7784 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7785 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7786 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7787 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7788 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7790 			BPF_MOV64_IMM(BPF_REG_2, 0),
7791 			BPF_MOV64_IMM(BPF_REG_3, 0),
7792 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7793 			BPF_EXIT_INSN(),
7794 		},
7795 		.fixup_map_hash_8b = { 3 },
7796 		.result = ACCEPT,
7797 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7798 	},
7799 	{
7800 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7801 		.insns = {
7802 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7803 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7804 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7805 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7806 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7807 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7808 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7809 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7811 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7812 			BPF_MOV64_IMM(BPF_REG_3, 0),
7813 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7814 			BPF_EXIT_INSN(),
7815 		},
7816 		.fixup_map_hash_8b = { 3 },
7817 		.result = ACCEPT,
7818 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7819 	},
7820 	{
7821 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7822 		.insns = {
7823 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7824 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7825 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7826 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7827 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7828 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7829 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7830 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7831 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7832 			BPF_MOV64_IMM(BPF_REG_3, 0),
7833 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7834 			BPF_EXIT_INSN(),
7835 		},
7836 		.fixup_map_hash_8b = { 3 },
7837 		.result = ACCEPT,
7838 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7839 	},
7840 	{
7841 		"helper access to variable memory: 8 bytes leak",
7842 		.insns = {
7843 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7844 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7845 			BPF_MOV64_IMM(BPF_REG_0, 0),
7846 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7847 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7848 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7849 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7850 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7851 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7852 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7853 			BPF_MOV64_IMM(BPF_REG_2, 1),
7854 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7855 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7856 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7857 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7858 			BPF_MOV64_IMM(BPF_REG_3, 0),
7859 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7860 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7861 			BPF_EXIT_INSN(),
7862 		},
7863 		.errstr = "invalid indirect read from stack off -64+32 size 64",
7864 		.result = REJECT,
7865 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7866 	},
7867 	{
7868 		"helper access to variable memory: 8 bytes no leak (init memory)",
7869 		.insns = {
7870 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7871 			BPF_MOV64_IMM(BPF_REG_0, 0),
7872 			BPF_MOV64_IMM(BPF_REG_0, 0),
7873 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7874 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7875 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7876 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7877 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7878 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7879 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7880 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7881 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7882 			BPF_MOV64_IMM(BPF_REG_2, 0),
7883 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7885 			BPF_MOV64_IMM(BPF_REG_3, 0),
7886 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7887 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7888 			BPF_EXIT_INSN(),
7889 		},
7890 		.result = ACCEPT,
7891 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7892 	},
7893 	{
7894 		"invalid and of negative number",
7895 		.insns = {
7896 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7897 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7898 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7899 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7900 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7901 				     BPF_FUNC_map_lookup_elem),
7902 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7903 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7904 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7905 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7906 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7907 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7908 				   offsetof(struct test_val, foo)),
7909 			BPF_EXIT_INSN(),
7910 		},
7911 		.fixup_map_hash_48b = { 3 },
7912 		.errstr = "R0 max value is outside of the array range",
7913 		.result = REJECT,
7914 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7915 	},
7916 	{
7917 		"invalid range check",
7918 		.insns = {
7919 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7920 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7921 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7922 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7923 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7924 				     BPF_FUNC_map_lookup_elem),
7925 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7926 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7927 			BPF_MOV64_IMM(BPF_REG_9, 1),
7928 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7929 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7930 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7931 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7932 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7933 			BPF_MOV32_IMM(BPF_REG_3, 1),
7934 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7935 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7936 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7937 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7938 			BPF_MOV64_REG(BPF_REG_0, 0),
7939 			BPF_EXIT_INSN(),
7940 		},
7941 		.fixup_map_hash_48b = { 3 },
7942 		.errstr = "R0 max value is outside of the array range",
7943 		.result = REJECT,
7944 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7945 	},
7946 	{
7947 		"map in map access",
7948 		.insns = {
7949 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7950 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7951 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7952 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7953 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7954 				     BPF_FUNC_map_lookup_elem),
7955 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7956 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7957 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7958 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7959 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7960 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7961 				     BPF_FUNC_map_lookup_elem),
7962 			BPF_MOV64_IMM(BPF_REG_0, 0),
7963 			BPF_EXIT_INSN(),
7964 		},
7965 		.fixup_map_in_map = { 3 },
7966 		.result = ACCEPT,
7967 	},
7968 	{
7969 		"invalid inner map pointer",
7970 		.insns = {
7971 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7972 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7973 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7974 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7975 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7976 				     BPF_FUNC_map_lookup_elem),
7977 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7978 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7979 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7980 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7981 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7982 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7983 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7984 				     BPF_FUNC_map_lookup_elem),
7985 			BPF_MOV64_IMM(BPF_REG_0, 0),
7986 			BPF_EXIT_INSN(),
7987 		},
7988 		.fixup_map_in_map = { 3 },
7989 		.errstr = "R1 pointer arithmetic on map_ptr prohibited",
7990 		.result = REJECT,
7991 	},
7992 	{
7993 		"forgot null checking on the inner map pointer",
7994 		.insns = {
7995 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7996 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7998 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7999 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8000 				     BPF_FUNC_map_lookup_elem),
8001 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8002 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8003 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8004 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8005 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8006 				     BPF_FUNC_map_lookup_elem),
8007 			BPF_MOV64_IMM(BPF_REG_0, 0),
8008 			BPF_EXIT_INSN(),
8009 		},
8010 		.fixup_map_in_map = { 3 },
8011 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
8012 		.result = REJECT,
8013 	},
8014 	{
8015 		"ld_abs: check calling conv, r1",
8016 		.insns = {
8017 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8018 			BPF_MOV64_IMM(BPF_REG_1, 0),
8019 			BPF_LD_ABS(BPF_W, -0x200000),
8020 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8021 			BPF_EXIT_INSN(),
8022 		},
8023 		.errstr = "R1 !read_ok",
8024 		.result = REJECT,
8025 	},
8026 	{
8027 		"ld_abs: check calling conv, r2",
8028 		.insns = {
8029 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8030 			BPF_MOV64_IMM(BPF_REG_2, 0),
8031 			BPF_LD_ABS(BPF_W, -0x200000),
8032 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8033 			BPF_EXIT_INSN(),
8034 		},
8035 		.errstr = "R2 !read_ok",
8036 		.result = REJECT,
8037 	},
8038 	{
8039 		"ld_abs: check calling conv, r3",
8040 		.insns = {
8041 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8042 			BPF_MOV64_IMM(BPF_REG_3, 0),
8043 			BPF_LD_ABS(BPF_W, -0x200000),
8044 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8045 			BPF_EXIT_INSN(),
8046 		},
8047 		.errstr = "R3 !read_ok",
8048 		.result = REJECT,
8049 	},
8050 	{
8051 		"ld_abs: check calling conv, r4",
8052 		.insns = {
8053 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8054 			BPF_MOV64_IMM(BPF_REG_4, 0),
8055 			BPF_LD_ABS(BPF_W, -0x200000),
8056 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8057 			BPF_EXIT_INSN(),
8058 		},
8059 		.errstr = "R4 !read_ok",
8060 		.result = REJECT,
8061 	},
8062 	{
8063 		"ld_abs: check calling conv, r5",
8064 		.insns = {
8065 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8066 			BPF_MOV64_IMM(BPF_REG_5, 0),
8067 			BPF_LD_ABS(BPF_W, -0x200000),
8068 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8069 			BPF_EXIT_INSN(),
8070 		},
8071 		.errstr = "R5 !read_ok",
8072 		.result = REJECT,
8073 	},
8074 	{
8075 		"ld_abs: check calling conv, r7",
8076 		.insns = {
8077 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8078 			BPF_MOV64_IMM(BPF_REG_7, 0),
8079 			BPF_LD_ABS(BPF_W, -0x200000),
8080 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8081 			BPF_EXIT_INSN(),
8082 		},
8083 		.result = ACCEPT,
8084 	},
8085 	{
8086 		"ld_abs: tests on r6 and skb data reload helper",
8087 		.insns = {
8088 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8089 			BPF_LD_ABS(BPF_B, 0),
8090 			BPF_LD_ABS(BPF_H, 0),
8091 			BPF_LD_ABS(BPF_W, 0),
8092 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8093 			BPF_MOV64_IMM(BPF_REG_6, 0),
8094 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8095 			BPF_MOV64_IMM(BPF_REG_2, 1),
8096 			BPF_MOV64_IMM(BPF_REG_3, 2),
8097 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8098 				     BPF_FUNC_skb_vlan_push),
8099 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8100 			BPF_LD_ABS(BPF_B, 0),
8101 			BPF_LD_ABS(BPF_H, 0),
8102 			BPF_LD_ABS(BPF_W, 0),
8103 			BPF_MOV64_IMM(BPF_REG_0, 42),
8104 			BPF_EXIT_INSN(),
8105 		},
8106 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8107 		.result = ACCEPT,
8108 		.retval = 42 /* ultimate return value */,
8109 	},
8110 	{
8111 		"ld_ind: check calling conv, r1",
8112 		.insns = {
8113 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8114 			BPF_MOV64_IMM(BPF_REG_1, 1),
8115 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
8116 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8117 			BPF_EXIT_INSN(),
8118 		},
8119 		.errstr = "R1 !read_ok",
8120 		.result = REJECT,
8121 	},
8122 	{
8123 		"ld_ind: check calling conv, r2",
8124 		.insns = {
8125 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8126 			BPF_MOV64_IMM(BPF_REG_2, 1),
8127 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
8128 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8129 			BPF_EXIT_INSN(),
8130 		},
8131 		.errstr = "R2 !read_ok",
8132 		.result = REJECT,
8133 	},
8134 	{
8135 		"ld_ind: check calling conv, r3",
8136 		.insns = {
8137 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8138 			BPF_MOV64_IMM(BPF_REG_3, 1),
8139 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
8140 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8141 			BPF_EXIT_INSN(),
8142 		},
8143 		.errstr = "R3 !read_ok",
8144 		.result = REJECT,
8145 	},
8146 	{
8147 		"ld_ind: check calling conv, r4",
8148 		.insns = {
8149 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8150 			BPF_MOV64_IMM(BPF_REG_4, 1),
8151 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
8152 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8153 			BPF_EXIT_INSN(),
8154 		},
8155 		.errstr = "R4 !read_ok",
8156 		.result = REJECT,
8157 	},
8158 	{
8159 		"ld_ind: check calling conv, r5",
8160 		.insns = {
8161 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8162 			BPF_MOV64_IMM(BPF_REG_5, 1),
8163 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
8164 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8165 			BPF_EXIT_INSN(),
8166 		},
8167 		.errstr = "R5 !read_ok",
8168 		.result = REJECT,
8169 	},
8170 	{
8171 		"ld_ind: check calling conv, r7",
8172 		.insns = {
8173 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8174 			BPF_MOV64_IMM(BPF_REG_7, 1),
8175 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
8176 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8177 			BPF_EXIT_INSN(),
8178 		},
8179 		.result = ACCEPT,
8180 		.retval = 1,
8181 	},
8182 	{
8183 		"check bpf_perf_event_data->sample_period byte load permitted",
8184 		.insns = {
8185 			BPF_MOV64_IMM(BPF_REG_0, 0),
8186 #if __BYTE_ORDER == __LITTLE_ENDIAN
8187 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8188 				    offsetof(struct bpf_perf_event_data, sample_period)),
8189 #else
8190 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8191 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
8192 #endif
8193 			BPF_EXIT_INSN(),
8194 		},
8195 		.result = ACCEPT,
8196 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8197 	},
8198 	{
8199 		"check bpf_perf_event_data->sample_period half load permitted",
8200 		.insns = {
8201 			BPF_MOV64_IMM(BPF_REG_0, 0),
8202 #if __BYTE_ORDER == __LITTLE_ENDIAN
8203 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8204 				    offsetof(struct bpf_perf_event_data, sample_period)),
8205 #else
8206 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8207 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
8208 #endif
8209 			BPF_EXIT_INSN(),
8210 		},
8211 		.result = ACCEPT,
8212 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8213 	},
8214 	{
8215 		"check bpf_perf_event_data->sample_period word load permitted",
8216 		.insns = {
8217 			BPF_MOV64_IMM(BPF_REG_0, 0),
8218 #if __BYTE_ORDER == __LITTLE_ENDIAN
8219 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8220 				    offsetof(struct bpf_perf_event_data, sample_period)),
8221 #else
8222 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8223 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
8224 #endif
8225 			BPF_EXIT_INSN(),
8226 		},
8227 		.result = ACCEPT,
8228 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8229 	},
8230 	{
8231 		"check bpf_perf_event_data->sample_period dword load permitted",
8232 		.insns = {
8233 			BPF_MOV64_IMM(BPF_REG_0, 0),
8234 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
8235 				    offsetof(struct bpf_perf_event_data, sample_period)),
8236 			BPF_EXIT_INSN(),
8237 		},
8238 		.result = ACCEPT,
8239 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8240 	},
8241 	{
8242 		"check skb->data half load not permitted",
8243 		.insns = {
8244 			BPF_MOV64_IMM(BPF_REG_0, 0),
8245 #if __BYTE_ORDER == __LITTLE_ENDIAN
8246 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8247 				    offsetof(struct __sk_buff, data)),
8248 #else
8249 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8250 				    offsetof(struct __sk_buff, data) + 2),
8251 #endif
8252 			BPF_EXIT_INSN(),
8253 		},
8254 		.result = REJECT,
8255 		.errstr = "invalid bpf_context access",
8256 	},
8257 	{
8258 		"check skb->tc_classid half load not permitted for lwt prog",
8259 		.insns = {
8260 			BPF_MOV64_IMM(BPF_REG_0, 0),
8261 #if __BYTE_ORDER == __LITTLE_ENDIAN
8262 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8263 				    offsetof(struct __sk_buff, tc_classid)),
8264 #else
8265 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8266 				    offsetof(struct __sk_buff, tc_classid) + 2),
8267 #endif
8268 			BPF_EXIT_INSN(),
8269 		},
8270 		.result = REJECT,
8271 		.errstr = "invalid bpf_context access",
8272 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8273 	},
8274 	{
8275 		"bounds checks mixing signed and unsigned, positive bounds",
8276 		.insns = {
8277 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8278 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8279 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8280 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8281 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8282 				     BPF_FUNC_map_lookup_elem),
8283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8284 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8285 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8286 			BPF_MOV64_IMM(BPF_REG_2, 2),
8287 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
8288 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
8289 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8290 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8291 			BPF_MOV64_IMM(BPF_REG_0, 0),
8292 			BPF_EXIT_INSN(),
8293 		},
8294 		.fixup_map_hash_8b = { 3 },
8295 		.errstr = "unbounded min value",
8296 		.result = REJECT,
8297 	},
8298 	{
8299 		"bounds checks mixing signed and unsigned",
8300 		.insns = {
8301 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8302 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8303 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8304 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8305 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8306 				     BPF_FUNC_map_lookup_elem),
8307 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8308 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8309 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8310 			BPF_MOV64_IMM(BPF_REG_2, -1),
8311 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8312 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8313 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8314 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8315 			BPF_MOV64_IMM(BPF_REG_0, 0),
8316 			BPF_EXIT_INSN(),
8317 		},
8318 		.fixup_map_hash_8b = { 3 },
8319 		.errstr = "unbounded min value",
8320 		.result = REJECT,
8321 	},
8322 	{
8323 		"bounds checks mixing signed and unsigned, variant 2",
8324 		.insns = {
8325 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8326 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8327 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8328 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8329 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8330 				     BPF_FUNC_map_lookup_elem),
8331 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8332 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8333 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8334 			BPF_MOV64_IMM(BPF_REG_2, -1),
8335 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8336 			BPF_MOV64_IMM(BPF_REG_8, 0),
8337 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
8338 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8339 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8340 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8341 			BPF_MOV64_IMM(BPF_REG_0, 0),
8342 			BPF_EXIT_INSN(),
8343 		},
8344 		.fixup_map_hash_8b = { 3 },
8345 		.errstr = "unbounded min value",
8346 		.result = REJECT,
8347 	},
8348 	{
8349 		"bounds checks mixing signed and unsigned, variant 3",
8350 		.insns = {
8351 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8352 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8353 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8354 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8355 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8356 				     BPF_FUNC_map_lookup_elem),
8357 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8358 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8359 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8360 			BPF_MOV64_IMM(BPF_REG_2, -1),
8361 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
8362 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
8363 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8364 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8365 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8366 			BPF_MOV64_IMM(BPF_REG_0, 0),
8367 			BPF_EXIT_INSN(),
8368 		},
8369 		.fixup_map_hash_8b = { 3 },
8370 		.errstr = "unbounded min value",
8371 		.result = REJECT,
8372 	},
8373 	{
8374 		"bounds checks mixing signed and unsigned, variant 4",
8375 		.insns = {
8376 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8377 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8378 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8379 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8380 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8381 				     BPF_FUNC_map_lookup_elem),
8382 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8383 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8384 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8385 			BPF_MOV64_IMM(BPF_REG_2, 1),
8386 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
8387 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8388 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8389 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8390 			BPF_MOV64_IMM(BPF_REG_0, 0),
8391 			BPF_EXIT_INSN(),
8392 		},
8393 		.fixup_map_hash_8b = { 3 },
8394 		.result = ACCEPT,
8395 	},
8396 	{
8397 		"bounds checks mixing signed and unsigned, variant 5",
8398 		.insns = {
8399 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8400 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8401 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8402 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8403 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8404 				     BPF_FUNC_map_lookup_elem),
8405 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8406 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8407 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8408 			BPF_MOV64_IMM(BPF_REG_2, -1),
8409 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8410 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
8411 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
8412 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8413 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8414 			BPF_MOV64_IMM(BPF_REG_0, 0),
8415 			BPF_EXIT_INSN(),
8416 		},
8417 		.fixup_map_hash_8b = { 3 },
8418 		.errstr = "unbounded min value",
8419 		.result = REJECT,
8420 	},
8421 	{
8422 		"bounds checks mixing signed and unsigned, variant 6",
8423 		.insns = {
8424 			BPF_MOV64_IMM(BPF_REG_2, 0),
8425 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
8426 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
8427 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8428 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
8429 			BPF_MOV64_IMM(BPF_REG_6, -1),
8430 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
8431 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
8432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8433 			BPF_MOV64_IMM(BPF_REG_5, 0),
8434 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
8435 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8436 				     BPF_FUNC_skb_load_bytes),
8437 			BPF_MOV64_IMM(BPF_REG_0, 0),
8438 			BPF_EXIT_INSN(),
8439 		},
8440 		.errstr = "R4 min value is negative, either use unsigned",
8441 		.result = REJECT,
8442 	},
8443 	{
8444 		"bounds checks mixing signed and unsigned, variant 7",
8445 		.insns = {
8446 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8447 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8448 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8449 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8450 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8451 				     BPF_FUNC_map_lookup_elem),
8452 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8453 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8454 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8455 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
8456 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8457 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8458 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8459 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8460 			BPF_MOV64_IMM(BPF_REG_0, 0),
8461 			BPF_EXIT_INSN(),
8462 		},
8463 		.fixup_map_hash_8b = { 3 },
8464 		.result = ACCEPT,
8465 	},
8466 	{
8467 		"bounds checks mixing signed and unsigned, variant 8",
8468 		.insns = {
8469 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8470 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8471 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8472 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8473 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8474 				     BPF_FUNC_map_lookup_elem),
8475 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8476 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8477 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8478 			BPF_MOV64_IMM(BPF_REG_2, -1),
8479 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8480 			BPF_MOV64_IMM(BPF_REG_0, 0),
8481 			BPF_EXIT_INSN(),
8482 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8483 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8484 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8485 			BPF_MOV64_IMM(BPF_REG_0, 0),
8486 			BPF_EXIT_INSN(),
8487 		},
8488 		.fixup_map_hash_8b = { 3 },
8489 		.errstr = "unbounded min value",
8490 		.result = REJECT,
8491 	},
8492 	{
8493 		"bounds checks mixing signed and unsigned, variant 9",
8494 		.insns = {
8495 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8496 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8497 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8498 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8499 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8500 				     BPF_FUNC_map_lookup_elem),
8501 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8502 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8503 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8504 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8505 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8506 			BPF_MOV64_IMM(BPF_REG_0, 0),
8507 			BPF_EXIT_INSN(),
8508 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8509 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8510 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8511 			BPF_MOV64_IMM(BPF_REG_0, 0),
8512 			BPF_EXIT_INSN(),
8513 		},
8514 		.fixup_map_hash_8b = { 3 },
8515 		.result = ACCEPT,
8516 	},
8517 	{
8518 		"bounds checks mixing signed and unsigned, variant 10",
8519 		.insns = {
8520 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8521 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8522 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8523 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8524 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8525 				     BPF_FUNC_map_lookup_elem),
8526 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8527 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8528 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8529 			BPF_MOV64_IMM(BPF_REG_2, 0),
8530 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8531 			BPF_MOV64_IMM(BPF_REG_0, 0),
8532 			BPF_EXIT_INSN(),
8533 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8534 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8535 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8536 			BPF_MOV64_IMM(BPF_REG_0, 0),
8537 			BPF_EXIT_INSN(),
8538 		},
8539 		.fixup_map_hash_8b = { 3 },
8540 		.errstr = "unbounded min value",
8541 		.result = REJECT,
8542 	},
8543 	{
8544 		"bounds checks mixing signed and unsigned, variant 11",
8545 		.insns = {
8546 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8547 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8548 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8549 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8550 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8551 				     BPF_FUNC_map_lookup_elem),
8552 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8553 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8554 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8555 			BPF_MOV64_IMM(BPF_REG_2, -1),
8556 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8557 			/* Dead branch. */
8558 			BPF_MOV64_IMM(BPF_REG_0, 0),
8559 			BPF_EXIT_INSN(),
8560 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8561 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8562 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8563 			BPF_MOV64_IMM(BPF_REG_0, 0),
8564 			BPF_EXIT_INSN(),
8565 		},
8566 		.fixup_map_hash_8b = { 3 },
8567 		.errstr = "unbounded min value",
8568 		.result = REJECT,
8569 	},
8570 	{
8571 		"bounds checks mixing signed and unsigned, variant 12",
8572 		.insns = {
8573 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8574 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8575 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8576 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8577 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8578 				     BPF_FUNC_map_lookup_elem),
8579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8580 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8581 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8582 			BPF_MOV64_IMM(BPF_REG_2, -6),
8583 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8584 			BPF_MOV64_IMM(BPF_REG_0, 0),
8585 			BPF_EXIT_INSN(),
8586 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8587 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8588 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8589 			BPF_MOV64_IMM(BPF_REG_0, 0),
8590 			BPF_EXIT_INSN(),
8591 		},
8592 		.fixup_map_hash_8b = { 3 },
8593 		.errstr = "unbounded min value",
8594 		.result = REJECT,
8595 	},
8596 	{
8597 		"bounds checks mixing signed and unsigned, variant 13",
8598 		.insns = {
8599 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8600 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8601 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8602 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8603 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8604 				     BPF_FUNC_map_lookup_elem),
8605 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8606 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8607 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8608 			BPF_MOV64_IMM(BPF_REG_2, 2),
8609 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8610 			BPF_MOV64_IMM(BPF_REG_7, 1),
8611 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8612 			BPF_MOV64_IMM(BPF_REG_0, 0),
8613 			BPF_EXIT_INSN(),
8614 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8615 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8616 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8617 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8618 			BPF_MOV64_IMM(BPF_REG_0, 0),
8619 			BPF_EXIT_INSN(),
8620 		},
8621 		.fixup_map_hash_8b = { 3 },
8622 		.errstr = "unbounded min value",
8623 		.result = REJECT,
8624 	},
8625 	{
8626 		"bounds checks mixing signed and unsigned, variant 14",
8627 		.insns = {
8628 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8629 				    offsetof(struct __sk_buff, mark)),
8630 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8631 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8632 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8633 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8634 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8635 				     BPF_FUNC_map_lookup_elem),
8636 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8637 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8638 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8639 			BPF_MOV64_IMM(BPF_REG_2, -1),
8640 			BPF_MOV64_IMM(BPF_REG_8, 2),
8641 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8642 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8643 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8644 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8645 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8646 			BPF_MOV64_IMM(BPF_REG_0, 0),
8647 			BPF_EXIT_INSN(),
8648 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8649 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8650 		},
8651 		.fixup_map_hash_8b = { 4 },
8652 		.errstr = "R0 invalid mem access 'inv'",
8653 		.result = REJECT,
8654 	},
8655 	{
8656 		"bounds checks mixing signed and unsigned, variant 15",
8657 		.insns = {
8658 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8659 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8660 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8661 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8662 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8663 				     BPF_FUNC_map_lookup_elem),
8664 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8665 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8666 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8667 			BPF_MOV64_IMM(BPF_REG_2, -6),
8668 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8669 			BPF_MOV64_IMM(BPF_REG_0, 0),
8670 			BPF_EXIT_INSN(),
8671 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8672 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8673 			BPF_MOV64_IMM(BPF_REG_0, 0),
8674 			BPF_EXIT_INSN(),
8675 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8676 			BPF_MOV64_IMM(BPF_REG_0, 0),
8677 			BPF_EXIT_INSN(),
8678 		},
8679 		.fixup_map_hash_8b = { 3 },
8680 		.errstr = "unbounded min value",
8681 		.result = REJECT,
8682 		.result_unpriv = REJECT,
8683 	},
8684 	{
8685 		"subtraction bounds (map value) variant 1",
8686 		.insns = {
8687 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8688 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8689 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8690 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8691 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8692 				     BPF_FUNC_map_lookup_elem),
8693 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8694 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8695 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8696 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8697 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8698 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8699 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8700 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8701 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8702 			BPF_EXIT_INSN(),
8703 			BPF_MOV64_IMM(BPF_REG_0, 0),
8704 			BPF_EXIT_INSN(),
8705 		},
8706 		.fixup_map_hash_8b = { 3 },
8707 		.errstr = "R0 max value is outside of the array range",
8708 		.result = REJECT,
8709 	},
8710 	{
8711 		"subtraction bounds (map value) variant 2",
8712 		.insns = {
8713 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8714 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8715 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8716 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8717 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8718 				     BPF_FUNC_map_lookup_elem),
8719 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8720 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8721 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8722 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8723 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8724 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8725 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8726 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8727 			BPF_EXIT_INSN(),
8728 			BPF_MOV64_IMM(BPF_REG_0, 0),
8729 			BPF_EXIT_INSN(),
8730 		},
8731 		.fixup_map_hash_8b = { 3 },
8732 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8733 		.result = REJECT,
8734 	},
8735 	{
8736 		"bounds check based on zero-extended MOV",
8737 		.insns = {
8738 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8739 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8741 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8742 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8743 				     BPF_FUNC_map_lookup_elem),
8744 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8745 			/* r2 = 0x0000'0000'ffff'ffff */
8746 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8747 			/* r2 = 0 */
8748 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8749 			/* no-op */
8750 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8751 			/* access at offset 0 */
8752 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8753 			/* exit */
8754 			BPF_MOV64_IMM(BPF_REG_0, 0),
8755 			BPF_EXIT_INSN(),
8756 		},
8757 		.fixup_map_hash_8b = { 3 },
8758 		.result = ACCEPT
8759 	},
8760 	{
8761 		"bounds check based on sign-extended MOV. test1",
8762 		.insns = {
8763 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8764 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8765 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8766 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8767 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8768 				     BPF_FUNC_map_lookup_elem),
8769 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8770 			/* r2 = 0xffff'ffff'ffff'ffff */
8771 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8772 			/* r2 = 0xffff'ffff */
8773 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8774 			/* r0 = <oob pointer> */
8775 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8776 			/* access to OOB pointer */
8777 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8778 			/* exit */
8779 			BPF_MOV64_IMM(BPF_REG_0, 0),
8780 			BPF_EXIT_INSN(),
8781 		},
8782 		.fixup_map_hash_8b = { 3 },
8783 		.errstr = "map_value pointer and 4294967295",
8784 		.result = REJECT
8785 	},
8786 	{
8787 		"bounds check based on sign-extended MOV. test2",
8788 		.insns = {
8789 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8790 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8791 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8792 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8793 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8794 				     BPF_FUNC_map_lookup_elem),
8795 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8796 			/* r2 = 0xffff'ffff'ffff'ffff */
8797 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8798 			/* r2 = 0xfff'ffff */
8799 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8800 			/* r0 = <oob pointer> */
8801 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8802 			/* access to OOB pointer */
8803 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8804 			/* exit */
8805 			BPF_MOV64_IMM(BPF_REG_0, 0),
8806 			BPF_EXIT_INSN(),
8807 		},
8808 		.fixup_map_hash_8b = { 3 },
8809 		.errstr = "R0 min value is outside of the array range",
8810 		.result = REJECT
8811 	},
8812 	{
8813 		"bounds check based on reg_off + var_off + insn_off. test1",
8814 		.insns = {
8815 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8816 				    offsetof(struct __sk_buff, mark)),
8817 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8818 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8819 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8820 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8821 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8822 				     BPF_FUNC_map_lookup_elem),
8823 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8824 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8825 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8826 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8827 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8828 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8829 			BPF_MOV64_IMM(BPF_REG_0, 0),
8830 			BPF_EXIT_INSN(),
8831 		},
8832 		.fixup_map_hash_8b = { 4 },
8833 		.errstr = "value_size=8 off=1073741825",
8834 		.result = REJECT,
8835 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8836 	},
8837 	{
8838 		"bounds check based on reg_off + var_off + insn_off. test2",
8839 		.insns = {
8840 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8841 				    offsetof(struct __sk_buff, mark)),
8842 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8843 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8844 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8845 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8846 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8847 				     BPF_FUNC_map_lookup_elem),
8848 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8849 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8851 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8853 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8854 			BPF_MOV64_IMM(BPF_REG_0, 0),
8855 			BPF_EXIT_INSN(),
8856 		},
8857 		.fixup_map_hash_8b = { 4 },
8858 		.errstr = "value 1073741823",
8859 		.result = REJECT,
8860 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8861 	},
8862 	{
8863 		"bounds check after truncation of non-boundary-crossing range",
8864 		.insns = {
8865 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8866 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8867 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8868 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8869 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8870 				     BPF_FUNC_map_lookup_elem),
8871 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8872 			/* r1 = [0x00, 0xff] */
8873 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8874 			BPF_MOV64_IMM(BPF_REG_2, 1),
8875 			/* r2 = 0x10'0000'0000 */
8876 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8877 			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8878 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8879 			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8880 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8881 			/* r1 = [0x00, 0xff] */
8882 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8883 			/* r1 = 0 */
8884 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8885 			/* no-op */
8886 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8887 			/* access at offset 0 */
8888 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8889 			/* exit */
8890 			BPF_MOV64_IMM(BPF_REG_0, 0),
8891 			BPF_EXIT_INSN(),
8892 		},
8893 		.fixup_map_hash_8b = { 3 },
8894 		.result = ACCEPT
8895 	},
8896 	{
8897 		"bounds check after truncation of boundary-crossing range (1)",
8898 		.insns = {
8899 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8900 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8901 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8902 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8903 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8904 				     BPF_FUNC_map_lookup_elem),
8905 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8906 			/* r1 = [0x00, 0xff] */
8907 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8908 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8909 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8910 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8911 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8912 			 *      [0x0000'0000, 0x0000'007f]
8913 			 */
8914 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8915 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8916 			/* r1 = [0x00, 0xff] or
8917 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8918 			 */
8919 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8920 			/* r1 = 0 or
8921 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8922 			 */
8923 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8924 			/* no-op or OOB pointer computation */
8925 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8926 			/* potentially OOB access */
8927 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8928 			/* exit */
8929 			BPF_MOV64_IMM(BPF_REG_0, 0),
8930 			BPF_EXIT_INSN(),
8931 		},
8932 		.fixup_map_hash_8b = { 3 },
8933 		/* not actually fully unbounded, but the bound is very high */
8934 		.errstr = "R0 unbounded memory access",
8935 		.result = REJECT
8936 	},
8937 	{
8938 		"bounds check after truncation of boundary-crossing range (2)",
8939 		.insns = {
8940 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8941 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8942 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8943 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8944 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8945 				     BPF_FUNC_map_lookup_elem),
8946 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8947 			/* r1 = [0x00, 0xff] */
8948 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8949 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8950 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8951 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8952 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8953 			 *      [0x0000'0000, 0x0000'007f]
8954 			 * difference to previous test: truncation via MOV32
8955 			 * instead of ALU32.
8956 			 */
8957 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8958 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8959 			/* r1 = [0x00, 0xff] or
8960 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8961 			 */
8962 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8963 			/* r1 = 0 or
8964 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8965 			 */
8966 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8967 			/* no-op or OOB pointer computation */
8968 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8969 			/* potentially OOB access */
8970 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8971 			/* exit */
8972 			BPF_MOV64_IMM(BPF_REG_0, 0),
8973 			BPF_EXIT_INSN(),
8974 		},
8975 		.fixup_map_hash_8b = { 3 },
8976 		/* not actually fully unbounded, but the bound is very high */
8977 		.errstr = "R0 unbounded memory access",
8978 		.result = REJECT
8979 	},
8980 	{
8981 		"bounds check after wrapping 32-bit addition",
8982 		.insns = {
8983 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8984 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8985 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8986 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8987 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8988 				     BPF_FUNC_map_lookup_elem),
8989 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8990 			/* r1 = 0x7fff'ffff */
8991 			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8992 			/* r1 = 0xffff'fffe */
8993 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8994 			/* r1 = 0 */
8995 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8996 			/* no-op */
8997 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8998 			/* access at offset 0 */
8999 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9000 			/* exit */
9001 			BPF_MOV64_IMM(BPF_REG_0, 0),
9002 			BPF_EXIT_INSN(),
9003 		},
9004 		.fixup_map_hash_8b = { 3 },
9005 		.result = ACCEPT
9006 	},
9007 	{
9008 		"bounds check after shift with oversized count operand",
9009 		.insns = {
9010 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9011 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9012 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9013 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9014 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9015 				     BPF_FUNC_map_lookup_elem),
9016 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9017 			BPF_MOV64_IMM(BPF_REG_2, 32),
9018 			BPF_MOV64_IMM(BPF_REG_1, 1),
9019 			/* r1 = (u32)1 << (u32)32 = ? */
9020 			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
9021 			/* r1 = [0x0000, 0xffff] */
9022 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
9023 			/* computes unknown pointer, potentially OOB */
9024 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9025 			/* potentially OOB access */
9026 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9027 			/* exit */
9028 			BPF_MOV64_IMM(BPF_REG_0, 0),
9029 			BPF_EXIT_INSN(),
9030 		},
9031 		.fixup_map_hash_8b = { 3 },
9032 		.errstr = "R0 max value is outside of the array range",
9033 		.result = REJECT
9034 	},
9035 	{
9036 		"bounds check after right shift of maybe-negative number",
9037 		.insns = {
9038 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9039 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9040 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9041 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9042 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9043 				     BPF_FUNC_map_lookup_elem),
9044 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9045 			/* r1 = [0x00, 0xff] */
9046 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9047 			/* r1 = [-0x01, 0xfe] */
9048 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
9049 			/* r1 = 0 or 0xff'ffff'ffff'ffff */
9050 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9051 			/* r1 = 0 or 0xffff'ffff'ffff */
9052 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9053 			/* computes unknown pointer, potentially OOB */
9054 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9055 			/* potentially OOB access */
9056 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9057 			/* exit */
9058 			BPF_MOV64_IMM(BPF_REG_0, 0),
9059 			BPF_EXIT_INSN(),
9060 		},
9061 		.fixup_map_hash_8b = { 3 },
9062 		.errstr = "R0 unbounded memory access",
9063 		.result = REJECT
9064 	},
9065 	{
9066 		"bounds check map access with off+size signed 32bit overflow. test1",
9067 		.insns = {
9068 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9069 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9070 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9071 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9072 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9073 				     BPF_FUNC_map_lookup_elem),
9074 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9075 			BPF_EXIT_INSN(),
9076 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
9077 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9078 			BPF_JMP_A(0),
9079 			BPF_EXIT_INSN(),
9080 		},
9081 		.fixup_map_hash_8b = { 3 },
9082 		.errstr = "map_value pointer and 2147483646",
9083 		.result = REJECT
9084 	},
9085 	{
9086 		"bounds check map access with off+size signed 32bit overflow. test2",
9087 		.insns = {
9088 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9089 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9090 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9091 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9092 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9093 				     BPF_FUNC_map_lookup_elem),
9094 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9095 			BPF_EXIT_INSN(),
9096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9097 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9098 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9099 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9100 			BPF_JMP_A(0),
9101 			BPF_EXIT_INSN(),
9102 		},
9103 		.fixup_map_hash_8b = { 3 },
9104 		.errstr = "pointer offset 1073741822",
9105 		.result = REJECT
9106 	},
9107 	{
9108 		"bounds check map access with off+size signed 32bit overflow. test3",
9109 		.insns = {
9110 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9111 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9112 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9113 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9114 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9115 				     BPF_FUNC_map_lookup_elem),
9116 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9117 			BPF_EXIT_INSN(),
9118 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9119 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9120 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9121 			BPF_JMP_A(0),
9122 			BPF_EXIT_INSN(),
9123 		},
9124 		.fixup_map_hash_8b = { 3 },
9125 		.errstr = "pointer offset -1073741822",
9126 		.result = REJECT
9127 	},
9128 	{
9129 		"bounds check map access with off+size signed 32bit overflow. test4",
9130 		.insns = {
9131 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9132 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9133 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9134 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9135 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9136 				     BPF_FUNC_map_lookup_elem),
9137 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9138 			BPF_EXIT_INSN(),
9139 			BPF_MOV64_IMM(BPF_REG_1, 1000000),
9140 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
9141 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9142 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9143 			BPF_JMP_A(0),
9144 			BPF_EXIT_INSN(),
9145 		},
9146 		.fixup_map_hash_8b = { 3 },
9147 		.errstr = "map_value pointer and 1000000000000",
9148 		.result = REJECT
9149 	},
9150 	{
9151 		"pointer/scalar confusion in state equality check (way 1)",
9152 		.insns = {
9153 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9154 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9155 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9156 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9157 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9158 				     BPF_FUNC_map_lookup_elem),
9159 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
9160 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9161 			BPF_JMP_A(1),
9162 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9163 			BPF_JMP_A(0),
9164 			BPF_EXIT_INSN(),
9165 		},
9166 		.fixup_map_hash_8b = { 3 },
9167 		.result = ACCEPT,
9168 		.retval = POINTER_VALUE,
9169 		.result_unpriv = REJECT,
9170 		.errstr_unpriv = "R0 leaks addr as return value"
9171 	},
9172 	{
9173 		"pointer/scalar confusion in state equality check (way 2)",
9174 		.insns = {
9175 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9176 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9177 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9178 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9179 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9180 				     BPF_FUNC_map_lookup_elem),
9181 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9182 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9183 			BPF_JMP_A(1),
9184 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9185 			BPF_EXIT_INSN(),
9186 		},
9187 		.fixup_map_hash_8b = { 3 },
9188 		.result = ACCEPT,
9189 		.retval = POINTER_VALUE,
9190 		.result_unpriv = REJECT,
9191 		.errstr_unpriv = "R0 leaks addr as return value"
9192 	},
9193 	{
9194 		"variable-offset ctx access",
9195 		.insns = {
9196 			/* Get an unknown value */
9197 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9198 			/* Make it small and 4-byte aligned */
9199 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9200 			/* add it to skb.  We now have either &skb->len or
9201 			 * &skb->pkt_type, but we don't know which
9202 			 */
9203 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
9204 			/* dereference it */
9205 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9206 			BPF_EXIT_INSN(),
9207 		},
9208 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
9209 		.result = REJECT,
9210 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9211 	},
9212 	{
9213 		"variable-offset stack access",
9214 		.insns = {
9215 			/* Fill the top 8 bytes of the stack */
9216 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9217 			/* Get an unknown value */
9218 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9219 			/* Make it small and 4-byte aligned */
9220 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9221 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9222 			/* add it to fp.  We now have either fp-4 or fp-8, but
9223 			 * we don't know which
9224 			 */
9225 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9226 			/* dereference it */
9227 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
9228 			BPF_EXIT_INSN(),
9229 		},
9230 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
9231 		.result = REJECT,
9232 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9233 	},
9234 	{
9235 		"indirect variable-offset stack access",
9236 		.insns = {
9237 			/* Fill the top 8 bytes of the stack */
9238 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9239 			/* Get an unknown value */
9240 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9241 			/* Make it small and 4-byte aligned */
9242 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9243 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9244 			/* add it to fp.  We now have either fp-4 or fp-8, but
9245 			 * we don't know which
9246 			 */
9247 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9248 			/* dereference it indirectly */
9249 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9250 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9251 				     BPF_FUNC_map_lookup_elem),
9252 			BPF_MOV64_IMM(BPF_REG_0, 0),
9253 			BPF_EXIT_INSN(),
9254 		},
9255 		.fixup_map_hash_8b = { 5 },
9256 		.errstr = "variable stack read R2",
9257 		.result = REJECT,
9258 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9259 	},
9260 	{
9261 		"direct stack access with 32-bit wraparound. test1",
9262 		.insns = {
9263 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9264 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9265 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9266 			BPF_MOV32_IMM(BPF_REG_0, 0),
9267 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9268 			BPF_EXIT_INSN()
9269 		},
9270 		.errstr = "fp pointer and 2147483647",
9271 		.result = REJECT
9272 	},
9273 	{
9274 		"direct stack access with 32-bit wraparound. test2",
9275 		.insns = {
9276 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9277 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9278 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9279 			BPF_MOV32_IMM(BPF_REG_0, 0),
9280 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9281 			BPF_EXIT_INSN()
9282 		},
9283 		.errstr = "fp pointer and 1073741823",
9284 		.result = REJECT
9285 	},
9286 	{
9287 		"direct stack access with 32-bit wraparound. test3",
9288 		.insns = {
9289 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9290 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9292 			BPF_MOV32_IMM(BPF_REG_0, 0),
9293 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9294 			BPF_EXIT_INSN()
9295 		},
9296 		.errstr = "fp pointer offset 1073741822",
9297 		.result = REJECT
9298 	},
9299 	{
9300 		"liveness pruning and write screening",
9301 		.insns = {
9302 			/* Get an unknown value */
9303 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9304 			/* branch conditions teach us nothing about R2 */
9305 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9306 			BPF_MOV64_IMM(BPF_REG_0, 0),
9307 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9308 			BPF_MOV64_IMM(BPF_REG_0, 0),
9309 			BPF_EXIT_INSN(),
9310 		},
9311 		.errstr = "R0 !read_ok",
9312 		.result = REJECT,
9313 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9314 	},
9315 	{
9316 		"varlen_map_value_access pruning",
9317 		.insns = {
9318 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9319 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9320 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9321 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9322 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9323 				     BPF_FUNC_map_lookup_elem),
9324 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9325 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
9326 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
9327 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
9328 			BPF_MOV32_IMM(BPF_REG_1, 0),
9329 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
9330 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9331 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9332 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
9333 				   offsetof(struct test_val, foo)),
9334 			BPF_EXIT_INSN(),
9335 		},
9336 		.fixup_map_hash_48b = { 3 },
9337 		.errstr_unpriv = "R0 leaks addr",
9338 		.errstr = "R0 unbounded memory access",
9339 		.result_unpriv = REJECT,
9340 		.result = REJECT,
9341 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9342 	},
9343 	{
9344 		"invalid 64-bit BPF_END",
9345 		.insns = {
9346 			BPF_MOV32_IMM(BPF_REG_0, 0),
9347 			{
9348 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
9349 				.dst_reg = BPF_REG_0,
9350 				.src_reg = 0,
9351 				.off   = 0,
9352 				.imm   = 32,
9353 			},
9354 			BPF_EXIT_INSN(),
9355 		},
9356 		.errstr = "unknown opcode d7",
9357 		.result = REJECT,
9358 	},
9359 	{
9360 		"XDP, using ifindex from netdev",
9361 		.insns = {
9362 			BPF_MOV64_IMM(BPF_REG_0, 0),
9363 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9364 				    offsetof(struct xdp_md, ingress_ifindex)),
9365 			BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
9366 			BPF_MOV64_IMM(BPF_REG_0, 1),
9367 			BPF_EXIT_INSN(),
9368 		},
9369 		.result = ACCEPT,
9370 		.prog_type = BPF_PROG_TYPE_XDP,
9371 		.retval = 1,
9372 	},
9373 	{
9374 		"meta access, test1",
9375 		.insns = {
9376 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9377 				    offsetof(struct xdp_md, data_meta)),
9378 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9379 				    offsetof(struct xdp_md, data)),
9380 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9381 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9382 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9383 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9384 			BPF_MOV64_IMM(BPF_REG_0, 0),
9385 			BPF_EXIT_INSN(),
9386 		},
9387 		.result = ACCEPT,
9388 		.prog_type = BPF_PROG_TYPE_XDP,
9389 	},
9390 	{
9391 		"meta access, test2",
9392 		.insns = {
9393 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9394 				    offsetof(struct xdp_md, data_meta)),
9395 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9396 				    offsetof(struct xdp_md, data)),
9397 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9398 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
9399 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9400 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9401 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9402 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9403 			BPF_MOV64_IMM(BPF_REG_0, 0),
9404 			BPF_EXIT_INSN(),
9405 		},
9406 		.result = REJECT,
9407 		.errstr = "invalid access to packet, off=-8",
9408 		.prog_type = BPF_PROG_TYPE_XDP,
9409 	},
9410 	{
9411 		"meta access, test3",
9412 		.insns = {
9413 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9414 				    offsetof(struct xdp_md, data_meta)),
9415 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9416 				    offsetof(struct xdp_md, data_end)),
9417 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9418 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9419 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9420 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9421 			BPF_MOV64_IMM(BPF_REG_0, 0),
9422 			BPF_EXIT_INSN(),
9423 		},
9424 		.result = REJECT,
9425 		.errstr = "invalid access to packet",
9426 		.prog_type = BPF_PROG_TYPE_XDP,
9427 	},
9428 	{
9429 		"meta access, test4",
9430 		.insns = {
9431 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9432 				    offsetof(struct xdp_md, data_meta)),
9433 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9434 				    offsetof(struct xdp_md, data_end)),
9435 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9436 				    offsetof(struct xdp_md, data)),
9437 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9438 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9439 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9440 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9441 			BPF_MOV64_IMM(BPF_REG_0, 0),
9442 			BPF_EXIT_INSN(),
9443 		},
9444 		.result = REJECT,
9445 		.errstr = "invalid access to packet",
9446 		.prog_type = BPF_PROG_TYPE_XDP,
9447 	},
9448 	{
9449 		"meta access, test5",
9450 		.insns = {
9451 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9452 				    offsetof(struct xdp_md, data_meta)),
9453 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9454 				    offsetof(struct xdp_md, data)),
9455 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9456 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9457 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
9458 			BPF_MOV64_IMM(BPF_REG_2, -8),
9459 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9460 				     BPF_FUNC_xdp_adjust_meta),
9461 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9462 			BPF_MOV64_IMM(BPF_REG_0, 0),
9463 			BPF_EXIT_INSN(),
9464 		},
9465 		.result = REJECT,
9466 		.errstr = "R3 !read_ok",
9467 		.prog_type = BPF_PROG_TYPE_XDP,
9468 	},
9469 	{
9470 		"meta access, test6",
9471 		.insns = {
9472 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9473 				    offsetof(struct xdp_md, data_meta)),
9474 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9475 				    offsetof(struct xdp_md, data)),
9476 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9477 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9478 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9480 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
9481 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9482 			BPF_MOV64_IMM(BPF_REG_0, 0),
9483 			BPF_EXIT_INSN(),
9484 		},
9485 		.result = REJECT,
9486 		.errstr = "invalid access to packet",
9487 		.prog_type = BPF_PROG_TYPE_XDP,
9488 	},
9489 	{
9490 		"meta access, test7",
9491 		.insns = {
9492 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9493 				    offsetof(struct xdp_md, data_meta)),
9494 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9495 				    offsetof(struct xdp_md, data)),
9496 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9497 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9498 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9499 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9500 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9501 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9502 			BPF_MOV64_IMM(BPF_REG_0, 0),
9503 			BPF_EXIT_INSN(),
9504 		},
9505 		.result = ACCEPT,
9506 		.prog_type = BPF_PROG_TYPE_XDP,
9507 	},
9508 	{
9509 		"meta access, test8",
9510 		.insns = {
9511 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9512 				    offsetof(struct xdp_md, data_meta)),
9513 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9514 				    offsetof(struct xdp_md, data)),
9515 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9516 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9517 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9518 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9519 			BPF_MOV64_IMM(BPF_REG_0, 0),
9520 			BPF_EXIT_INSN(),
9521 		},
9522 		.result = ACCEPT,
9523 		.prog_type = BPF_PROG_TYPE_XDP,
9524 	},
9525 	{
9526 		"meta access, test9",
9527 		.insns = {
9528 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9529 				    offsetof(struct xdp_md, data_meta)),
9530 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9531 				    offsetof(struct xdp_md, data)),
9532 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9533 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9534 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9535 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9536 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9537 			BPF_MOV64_IMM(BPF_REG_0, 0),
9538 			BPF_EXIT_INSN(),
9539 		},
9540 		.result = REJECT,
9541 		.errstr = "invalid access to packet",
9542 		.prog_type = BPF_PROG_TYPE_XDP,
9543 	},
9544 	{
9545 		"meta access, test10",
9546 		.insns = {
9547 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9548 				    offsetof(struct xdp_md, data_meta)),
9549 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9550 				    offsetof(struct xdp_md, data)),
9551 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9552 				    offsetof(struct xdp_md, data_end)),
9553 			BPF_MOV64_IMM(BPF_REG_5, 42),
9554 			BPF_MOV64_IMM(BPF_REG_6, 24),
9555 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9556 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9557 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9558 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9559 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9560 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9561 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9562 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9563 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9564 			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9565 			BPF_MOV64_IMM(BPF_REG_0, 0),
9566 			BPF_EXIT_INSN(),
9567 		},
9568 		.result = REJECT,
9569 		.errstr = "invalid access to packet",
9570 		.prog_type = BPF_PROG_TYPE_XDP,
9571 	},
9572 	{
9573 		"meta access, test11",
9574 		.insns = {
9575 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9576 				    offsetof(struct xdp_md, data_meta)),
9577 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9578 				    offsetof(struct xdp_md, data)),
9579 			BPF_MOV64_IMM(BPF_REG_5, 42),
9580 			BPF_MOV64_IMM(BPF_REG_6, 24),
9581 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9582 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9583 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9584 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9585 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9586 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9587 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9589 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9590 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9591 			BPF_MOV64_IMM(BPF_REG_0, 0),
9592 			BPF_EXIT_INSN(),
9593 		},
9594 		.result = ACCEPT,
9595 		.prog_type = BPF_PROG_TYPE_XDP,
9596 	},
9597 	{
9598 		"meta access, test12",
9599 		.insns = {
9600 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9601 				    offsetof(struct xdp_md, data_meta)),
9602 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9603 				    offsetof(struct xdp_md, data)),
9604 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9605 				    offsetof(struct xdp_md, data_end)),
9606 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9607 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9608 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9609 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9610 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9611 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9612 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9613 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9614 			BPF_MOV64_IMM(BPF_REG_0, 0),
9615 			BPF_EXIT_INSN(),
9616 		},
9617 		.result = ACCEPT,
9618 		.prog_type = BPF_PROG_TYPE_XDP,
9619 	},
9620 	{
9621 		"arithmetic ops make PTR_TO_CTX unusable",
9622 		.insns = {
9623 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9624 				      offsetof(struct __sk_buff, data) -
9625 				      offsetof(struct __sk_buff, mark)),
9626 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9627 				    offsetof(struct __sk_buff, mark)),
9628 			BPF_EXIT_INSN(),
9629 		},
9630 		.errstr = "dereference of modified ctx ptr",
9631 		.result = REJECT,
9632 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9633 	},
9634 	{
9635 		"pkt_end - pkt_start is allowed",
9636 		.insns = {
9637 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9638 				    offsetof(struct __sk_buff, data_end)),
9639 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9640 				    offsetof(struct __sk_buff, data)),
9641 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9642 			BPF_EXIT_INSN(),
9643 		},
9644 		.result = ACCEPT,
9645 		.retval = TEST_DATA_LEN,
9646 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9647 	},
9648 	{
9649 		"XDP pkt read, pkt_end mangling, bad access 1",
9650 		.insns = {
9651 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9652 				    offsetof(struct xdp_md, data)),
9653 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9654 				    offsetof(struct xdp_md, data_end)),
9655 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9656 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9658 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9659 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9660 			BPF_MOV64_IMM(BPF_REG_0, 0),
9661 			BPF_EXIT_INSN(),
9662 		},
9663 		.errstr = "R3 pointer arithmetic on pkt_end",
9664 		.result = REJECT,
9665 		.prog_type = BPF_PROG_TYPE_XDP,
9666 	},
9667 	{
9668 		"XDP pkt read, pkt_end mangling, bad access 2",
9669 		.insns = {
9670 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9671 				    offsetof(struct xdp_md, data)),
9672 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9673 				    offsetof(struct xdp_md, data_end)),
9674 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9675 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9676 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9677 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9678 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9679 			BPF_MOV64_IMM(BPF_REG_0, 0),
9680 			BPF_EXIT_INSN(),
9681 		},
9682 		.errstr = "R3 pointer arithmetic on pkt_end",
9683 		.result = REJECT,
9684 		.prog_type = BPF_PROG_TYPE_XDP,
9685 	},
9686 	{
9687 		"XDP pkt read, pkt_data' > pkt_end, good access",
9688 		.insns = {
9689 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9690 				    offsetof(struct xdp_md, data)),
9691 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9692 				    offsetof(struct xdp_md, data_end)),
9693 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9694 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9695 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9696 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9697 			BPF_MOV64_IMM(BPF_REG_0, 0),
9698 			BPF_EXIT_INSN(),
9699 		},
9700 		.result = ACCEPT,
9701 		.prog_type = BPF_PROG_TYPE_XDP,
9702 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9703 	},
9704 	{
9705 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
9706 		.insns = {
9707 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9708 				    offsetof(struct xdp_md, data)),
9709 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9710 				    offsetof(struct xdp_md, data_end)),
9711 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9712 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9713 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9714 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9715 			BPF_MOV64_IMM(BPF_REG_0, 0),
9716 			BPF_EXIT_INSN(),
9717 		},
9718 		.errstr = "R1 offset is outside of the packet",
9719 		.result = REJECT,
9720 		.prog_type = BPF_PROG_TYPE_XDP,
9721 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9722 	},
9723 	{
9724 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
9725 		.insns = {
9726 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9727 				    offsetof(struct xdp_md, data)),
9728 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9729 				    offsetof(struct xdp_md, data_end)),
9730 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9731 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9732 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9733 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9734 			BPF_MOV64_IMM(BPF_REG_0, 0),
9735 			BPF_EXIT_INSN(),
9736 		},
9737 		.errstr = "R1 offset is outside of the packet",
9738 		.result = REJECT,
9739 		.prog_type = BPF_PROG_TYPE_XDP,
9740 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9741 	},
9742 	{
9743 		"XDP pkt read, pkt_end > pkt_data', good access",
9744 		.insns = {
9745 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9746 				    offsetof(struct xdp_md, data)),
9747 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9748 				    offsetof(struct xdp_md, data_end)),
9749 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9750 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9751 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9752 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9753 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9754 			BPF_MOV64_IMM(BPF_REG_0, 0),
9755 			BPF_EXIT_INSN(),
9756 		},
9757 		.result = ACCEPT,
9758 		.prog_type = BPF_PROG_TYPE_XDP,
9759 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9760 	},
9761 	{
9762 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
9763 		.insns = {
9764 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9765 				    offsetof(struct xdp_md, data)),
9766 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9767 				    offsetof(struct xdp_md, data_end)),
9768 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9769 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9770 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9771 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9772 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9773 			BPF_MOV64_IMM(BPF_REG_0, 0),
9774 			BPF_EXIT_INSN(),
9775 		},
9776 		.errstr = "R1 offset is outside of the packet",
9777 		.result = REJECT,
9778 		.prog_type = BPF_PROG_TYPE_XDP,
9779 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9780 	},
9781 	{
9782 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
9783 		.insns = {
9784 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9785 				    offsetof(struct xdp_md, data)),
9786 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9787 				    offsetof(struct xdp_md, data_end)),
9788 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9789 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9790 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9791 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9792 			BPF_MOV64_IMM(BPF_REG_0, 0),
9793 			BPF_EXIT_INSN(),
9794 		},
9795 		.errstr = "R1 offset is outside of the packet",
9796 		.result = REJECT,
9797 		.prog_type = BPF_PROG_TYPE_XDP,
9798 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9799 	},
9800 	{
9801 		"XDP pkt read, pkt_data' < pkt_end, good access",
9802 		.insns = {
9803 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9804 				    offsetof(struct xdp_md, data)),
9805 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9806 				    offsetof(struct xdp_md, data_end)),
9807 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9809 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9810 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9811 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9812 			BPF_MOV64_IMM(BPF_REG_0, 0),
9813 			BPF_EXIT_INSN(),
9814 		},
9815 		.result = ACCEPT,
9816 		.prog_type = BPF_PROG_TYPE_XDP,
9817 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9818 	},
9819 	{
9820 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
9821 		.insns = {
9822 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9823 				    offsetof(struct xdp_md, data)),
9824 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9825 				    offsetof(struct xdp_md, data_end)),
9826 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9827 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9828 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9829 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9830 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9831 			BPF_MOV64_IMM(BPF_REG_0, 0),
9832 			BPF_EXIT_INSN(),
9833 		},
9834 		.errstr = "R1 offset is outside of the packet",
9835 		.result = REJECT,
9836 		.prog_type = BPF_PROG_TYPE_XDP,
9837 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9838 	},
9839 	{
9840 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
9841 		.insns = {
9842 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9843 				    offsetof(struct xdp_md, data)),
9844 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9845 				    offsetof(struct xdp_md, data_end)),
9846 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9847 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9848 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9849 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9850 			BPF_MOV64_IMM(BPF_REG_0, 0),
9851 			BPF_EXIT_INSN(),
9852 		},
9853 		.errstr = "R1 offset is outside of the packet",
9854 		.result = REJECT,
9855 		.prog_type = BPF_PROG_TYPE_XDP,
9856 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9857 	},
9858 	{
9859 		"XDP pkt read, pkt_end < pkt_data', good access",
9860 		.insns = {
9861 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9862 				    offsetof(struct xdp_md, data)),
9863 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9864 				    offsetof(struct xdp_md, data_end)),
9865 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9866 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9867 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9868 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9869 			BPF_MOV64_IMM(BPF_REG_0, 0),
9870 			BPF_EXIT_INSN(),
9871 		},
9872 		.result = ACCEPT,
9873 		.prog_type = BPF_PROG_TYPE_XDP,
9874 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9875 	},
9876 	{
9877 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
9878 		.insns = {
9879 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9880 				    offsetof(struct xdp_md, data)),
9881 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9882 				    offsetof(struct xdp_md, data_end)),
9883 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9885 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9886 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9887 			BPF_MOV64_IMM(BPF_REG_0, 0),
9888 			BPF_EXIT_INSN(),
9889 		},
9890 		.errstr = "R1 offset is outside of the packet",
9891 		.result = REJECT,
9892 		.prog_type = BPF_PROG_TYPE_XDP,
9893 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9894 	},
9895 	{
9896 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
9897 		.insns = {
9898 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9899 				    offsetof(struct xdp_md, data)),
9900 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9901 				    offsetof(struct xdp_md, data_end)),
9902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9903 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9904 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9905 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9906 			BPF_MOV64_IMM(BPF_REG_0, 0),
9907 			BPF_EXIT_INSN(),
9908 		},
9909 		.errstr = "R1 offset is outside of the packet",
9910 		.result = REJECT,
9911 		.prog_type = BPF_PROG_TYPE_XDP,
9912 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9913 	},
9914 	{
9915 		"XDP pkt read, pkt_data' >= pkt_end, good access",
9916 		.insns = {
9917 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9918 				    offsetof(struct xdp_md, data)),
9919 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9920 				    offsetof(struct xdp_md, data_end)),
9921 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9922 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9923 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9924 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9925 			BPF_MOV64_IMM(BPF_REG_0, 0),
9926 			BPF_EXIT_INSN(),
9927 		},
9928 		.result = ACCEPT,
9929 		.prog_type = BPF_PROG_TYPE_XDP,
9930 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9931 	},
9932 	{
9933 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9934 		.insns = {
9935 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9936 				    offsetof(struct xdp_md, data)),
9937 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9938 				    offsetof(struct xdp_md, data_end)),
9939 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9940 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9941 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9942 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9943 			BPF_MOV64_IMM(BPF_REG_0, 0),
9944 			BPF_EXIT_INSN(),
9945 		},
9946 		.errstr = "R1 offset is outside of the packet",
9947 		.result = REJECT,
9948 		.prog_type = BPF_PROG_TYPE_XDP,
9949 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9950 	},
9951 	{
9952 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9953 		.insns = {
9954 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9955 				    offsetof(struct xdp_md, data)),
9956 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9957 				    offsetof(struct xdp_md, data_end)),
9958 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9959 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9960 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9961 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9962 			BPF_MOV64_IMM(BPF_REG_0, 0),
9963 			BPF_EXIT_INSN(),
9964 		},
9965 		.errstr = "R1 offset is outside of the packet",
9966 		.result = REJECT,
9967 		.prog_type = BPF_PROG_TYPE_XDP,
9968 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9969 	},
9970 	{
9971 		"XDP pkt read, pkt_end >= pkt_data', good access",
9972 		.insns = {
9973 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9974 				    offsetof(struct xdp_md, data)),
9975 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9976 				    offsetof(struct xdp_md, data_end)),
9977 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9978 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9979 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9980 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9981 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9982 			BPF_MOV64_IMM(BPF_REG_0, 0),
9983 			BPF_EXIT_INSN(),
9984 		},
9985 		.result = ACCEPT,
9986 		.prog_type = BPF_PROG_TYPE_XDP,
9987 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9988 	},
9989 	{
9990 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
9991 		.insns = {
9992 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9993 				    offsetof(struct xdp_md, data)),
9994 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9995 				    offsetof(struct xdp_md, data_end)),
9996 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9998 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9999 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10000 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10001 			BPF_MOV64_IMM(BPF_REG_0, 0),
10002 			BPF_EXIT_INSN(),
10003 		},
10004 		.errstr = "R1 offset is outside of the packet",
10005 		.result = REJECT,
10006 		.prog_type = BPF_PROG_TYPE_XDP,
10007 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10008 	},
10009 	{
10010 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
10011 		.insns = {
10012 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10013 				    offsetof(struct xdp_md, data)),
10014 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10015 				    offsetof(struct xdp_md, data_end)),
10016 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10017 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10018 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10019 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10020 			BPF_MOV64_IMM(BPF_REG_0, 0),
10021 			BPF_EXIT_INSN(),
10022 		},
10023 		.errstr = "R1 offset is outside of the packet",
10024 		.result = REJECT,
10025 		.prog_type = BPF_PROG_TYPE_XDP,
10026 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10027 	},
10028 	{
10029 		"XDP pkt read, pkt_data' <= pkt_end, good access",
10030 		.insns = {
10031 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10032 				    offsetof(struct xdp_md, data)),
10033 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10034 				    offsetof(struct xdp_md, data_end)),
10035 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10036 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10037 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10038 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10039 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10040 			BPF_MOV64_IMM(BPF_REG_0, 0),
10041 			BPF_EXIT_INSN(),
10042 		},
10043 		.result = ACCEPT,
10044 		.prog_type = BPF_PROG_TYPE_XDP,
10045 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10046 	},
10047 	{
10048 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
10049 		.insns = {
10050 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10051 				    offsetof(struct xdp_md, data)),
10052 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10053 				    offsetof(struct xdp_md, data_end)),
10054 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10055 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10056 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10057 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10058 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10059 			BPF_MOV64_IMM(BPF_REG_0, 0),
10060 			BPF_EXIT_INSN(),
10061 		},
10062 		.errstr = "R1 offset is outside of the packet",
10063 		.result = REJECT,
10064 		.prog_type = BPF_PROG_TYPE_XDP,
10065 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10066 	},
10067 	{
10068 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
10069 		.insns = {
10070 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10071 				    offsetof(struct xdp_md, data)),
10072 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10073 				    offsetof(struct xdp_md, data_end)),
10074 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10076 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10077 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10078 			BPF_MOV64_IMM(BPF_REG_0, 0),
10079 			BPF_EXIT_INSN(),
10080 		},
10081 		.errstr = "R1 offset is outside of the packet",
10082 		.result = REJECT,
10083 		.prog_type = BPF_PROG_TYPE_XDP,
10084 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10085 	},
10086 	{
10087 		"XDP pkt read, pkt_end <= pkt_data', good access",
10088 		.insns = {
10089 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10090 				    offsetof(struct xdp_md, data)),
10091 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10092 				    offsetof(struct xdp_md, data_end)),
10093 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10094 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10095 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10096 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10097 			BPF_MOV64_IMM(BPF_REG_0, 0),
10098 			BPF_EXIT_INSN(),
10099 		},
10100 		.result = ACCEPT,
10101 		.prog_type = BPF_PROG_TYPE_XDP,
10102 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10103 	},
10104 	{
10105 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
10106 		.insns = {
10107 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10108 				    offsetof(struct xdp_md, data)),
10109 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10110 				    offsetof(struct xdp_md, data_end)),
10111 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10112 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10113 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10114 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10115 			BPF_MOV64_IMM(BPF_REG_0, 0),
10116 			BPF_EXIT_INSN(),
10117 		},
10118 		.errstr = "R1 offset is outside of the packet",
10119 		.result = REJECT,
10120 		.prog_type = BPF_PROG_TYPE_XDP,
10121 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10122 	},
10123 	{
10124 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
10125 		.insns = {
10126 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10127 				    offsetof(struct xdp_md, data)),
10128 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10129 				    offsetof(struct xdp_md, data_end)),
10130 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10131 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10132 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10133 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10134 			BPF_MOV64_IMM(BPF_REG_0, 0),
10135 			BPF_EXIT_INSN(),
10136 		},
10137 		.errstr = "R1 offset is outside of the packet",
10138 		.result = REJECT,
10139 		.prog_type = BPF_PROG_TYPE_XDP,
10140 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10141 	},
10142 	{
10143 		"XDP pkt read, pkt_meta' > pkt_data, good access",
10144 		.insns = {
10145 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10146 				    offsetof(struct xdp_md, data_meta)),
10147 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10148 				    offsetof(struct xdp_md, data)),
10149 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10150 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10151 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10152 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10153 			BPF_MOV64_IMM(BPF_REG_0, 0),
10154 			BPF_EXIT_INSN(),
10155 		},
10156 		.result = ACCEPT,
10157 		.prog_type = BPF_PROG_TYPE_XDP,
10158 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10159 	},
10160 	{
10161 		"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
10162 		.insns = {
10163 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10164 				    offsetof(struct xdp_md, data_meta)),
10165 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10166 				    offsetof(struct xdp_md, data)),
10167 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10168 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10169 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10170 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10171 			BPF_MOV64_IMM(BPF_REG_0, 0),
10172 			BPF_EXIT_INSN(),
10173 		},
10174 		.errstr = "R1 offset is outside of the packet",
10175 		.result = REJECT,
10176 		.prog_type = BPF_PROG_TYPE_XDP,
10177 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10178 	},
10179 	{
10180 		"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
10181 		.insns = {
10182 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10183 				    offsetof(struct xdp_md, data_meta)),
10184 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10185 				    offsetof(struct xdp_md, data)),
10186 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10187 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10188 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
10189 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10190 			BPF_MOV64_IMM(BPF_REG_0, 0),
10191 			BPF_EXIT_INSN(),
10192 		},
10193 		.errstr = "R1 offset is outside of the packet",
10194 		.result = REJECT,
10195 		.prog_type = BPF_PROG_TYPE_XDP,
10196 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10197 	},
10198 	{
10199 		"XDP pkt read, pkt_data > pkt_meta', good access",
10200 		.insns = {
10201 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10202 				    offsetof(struct xdp_md, data_meta)),
10203 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10204 				    offsetof(struct xdp_md, data)),
10205 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10206 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10207 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10208 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10209 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10210 			BPF_MOV64_IMM(BPF_REG_0, 0),
10211 			BPF_EXIT_INSN(),
10212 		},
10213 		.result = ACCEPT,
10214 		.prog_type = BPF_PROG_TYPE_XDP,
10215 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10216 	},
10217 	{
10218 		"XDP pkt read, pkt_data > pkt_meta', bad access 1",
10219 		.insns = {
10220 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10221 				    offsetof(struct xdp_md, data_meta)),
10222 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10223 				    offsetof(struct xdp_md, data)),
10224 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10225 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10226 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10227 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10228 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10229 			BPF_MOV64_IMM(BPF_REG_0, 0),
10230 			BPF_EXIT_INSN(),
10231 		},
10232 		.errstr = "R1 offset is outside of the packet",
10233 		.result = REJECT,
10234 		.prog_type = BPF_PROG_TYPE_XDP,
10235 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10236 	},
10237 	{
10238 		"XDP pkt read, pkt_data > pkt_meta', bad access 2",
10239 		.insns = {
10240 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10241 				    offsetof(struct xdp_md, data_meta)),
10242 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10243 				    offsetof(struct xdp_md, data)),
10244 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10245 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10246 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10247 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10248 			BPF_MOV64_IMM(BPF_REG_0, 0),
10249 			BPF_EXIT_INSN(),
10250 		},
10251 		.errstr = "R1 offset is outside of the packet",
10252 		.result = REJECT,
10253 		.prog_type = BPF_PROG_TYPE_XDP,
10254 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10255 	},
10256 	{
10257 		"XDP pkt read, pkt_meta' < pkt_data, good access",
10258 		.insns = {
10259 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10260 				    offsetof(struct xdp_md, data_meta)),
10261 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10262 				    offsetof(struct xdp_md, data)),
10263 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10264 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10265 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10266 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10267 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10268 			BPF_MOV64_IMM(BPF_REG_0, 0),
10269 			BPF_EXIT_INSN(),
10270 		},
10271 		.result = ACCEPT,
10272 		.prog_type = BPF_PROG_TYPE_XDP,
10273 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10274 	},
10275 	{
10276 		"XDP pkt read, pkt_meta' < pkt_data, bad access 1",
10277 		.insns = {
10278 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10279 				    offsetof(struct xdp_md, data_meta)),
10280 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10281 				    offsetof(struct xdp_md, data)),
10282 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10283 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10284 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10285 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10286 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10287 			BPF_MOV64_IMM(BPF_REG_0, 0),
10288 			BPF_EXIT_INSN(),
10289 		},
10290 		.errstr = "R1 offset is outside of the packet",
10291 		.result = REJECT,
10292 		.prog_type = BPF_PROG_TYPE_XDP,
10293 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10294 	},
10295 	{
10296 		"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
10297 		.insns = {
10298 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10299 				    offsetof(struct xdp_md, data_meta)),
10300 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10301 				    offsetof(struct xdp_md, data)),
10302 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10303 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10304 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10305 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10306 			BPF_MOV64_IMM(BPF_REG_0, 0),
10307 			BPF_EXIT_INSN(),
10308 		},
10309 		.errstr = "R1 offset is outside of the packet",
10310 		.result = REJECT,
10311 		.prog_type = BPF_PROG_TYPE_XDP,
10312 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10313 	},
10314 	{
10315 		"XDP pkt read, pkt_data < pkt_meta', good access",
10316 		.insns = {
10317 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10318 				    offsetof(struct xdp_md, data_meta)),
10319 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10320 				    offsetof(struct xdp_md, data)),
10321 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10322 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10323 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10324 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10325 			BPF_MOV64_IMM(BPF_REG_0, 0),
10326 			BPF_EXIT_INSN(),
10327 		},
10328 		.result = ACCEPT,
10329 		.prog_type = BPF_PROG_TYPE_XDP,
10330 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10331 	},
10332 	{
10333 		"XDP pkt read, pkt_data < pkt_meta', bad access 1",
10334 		.insns = {
10335 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10336 				    offsetof(struct xdp_md, data_meta)),
10337 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10338 				    offsetof(struct xdp_md, data)),
10339 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10340 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10341 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10342 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10343 			BPF_MOV64_IMM(BPF_REG_0, 0),
10344 			BPF_EXIT_INSN(),
10345 		},
10346 		.errstr = "R1 offset is outside of the packet",
10347 		.result = REJECT,
10348 		.prog_type = BPF_PROG_TYPE_XDP,
10349 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10350 	},
10351 	{
10352 		"XDP pkt read, pkt_data < pkt_meta', bad access 2",
10353 		.insns = {
10354 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10355 				    offsetof(struct xdp_md, data_meta)),
10356 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10357 				    offsetof(struct xdp_md, data)),
10358 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10360 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10361 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10362 			BPF_MOV64_IMM(BPF_REG_0, 0),
10363 			BPF_EXIT_INSN(),
10364 		},
10365 		.errstr = "R1 offset is outside of the packet",
10366 		.result = REJECT,
10367 		.prog_type = BPF_PROG_TYPE_XDP,
10368 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10369 	},
10370 	{
10371 		"XDP pkt read, pkt_meta' >= pkt_data, good access",
10372 		.insns = {
10373 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10374 				    offsetof(struct xdp_md, data_meta)),
10375 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10376 				    offsetof(struct xdp_md, data)),
10377 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10378 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10379 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10380 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10381 			BPF_MOV64_IMM(BPF_REG_0, 0),
10382 			BPF_EXIT_INSN(),
10383 		},
10384 		.result = ACCEPT,
10385 		.prog_type = BPF_PROG_TYPE_XDP,
10386 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10387 	},
10388 	{
10389 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
10390 		.insns = {
10391 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10392 				    offsetof(struct xdp_md, data_meta)),
10393 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10394 				    offsetof(struct xdp_md, data)),
10395 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10396 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10397 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10398 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10399 			BPF_MOV64_IMM(BPF_REG_0, 0),
10400 			BPF_EXIT_INSN(),
10401 		},
10402 		.errstr = "R1 offset is outside of the packet",
10403 		.result = REJECT,
10404 		.prog_type = BPF_PROG_TYPE_XDP,
10405 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10406 	},
10407 	{
10408 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
10409 		.insns = {
10410 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10411 				    offsetof(struct xdp_md, data_meta)),
10412 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10413 				    offsetof(struct xdp_md, data)),
10414 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10415 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10416 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10417 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10418 			BPF_MOV64_IMM(BPF_REG_0, 0),
10419 			BPF_EXIT_INSN(),
10420 		},
10421 		.errstr = "R1 offset is outside of the packet",
10422 		.result = REJECT,
10423 		.prog_type = BPF_PROG_TYPE_XDP,
10424 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10425 	},
10426 	{
10427 		"XDP pkt read, pkt_data >= pkt_meta', good access",
10428 		.insns = {
10429 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10430 				    offsetof(struct xdp_md, data_meta)),
10431 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10432 				    offsetof(struct xdp_md, data)),
10433 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10434 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10435 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10436 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10437 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10438 			BPF_MOV64_IMM(BPF_REG_0, 0),
10439 			BPF_EXIT_INSN(),
10440 		},
10441 		.result = ACCEPT,
10442 		.prog_type = BPF_PROG_TYPE_XDP,
10443 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10444 	},
10445 	{
10446 		"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
10447 		.insns = {
10448 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10449 				    offsetof(struct xdp_md, data_meta)),
10450 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10451 				    offsetof(struct xdp_md, data)),
10452 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10453 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10454 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10455 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10456 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10457 			BPF_MOV64_IMM(BPF_REG_0, 0),
10458 			BPF_EXIT_INSN(),
10459 		},
10460 		.errstr = "R1 offset is outside of the packet",
10461 		.result = REJECT,
10462 		.prog_type = BPF_PROG_TYPE_XDP,
10463 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10464 	},
10465 	{
10466 		"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
10467 		.insns = {
10468 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10469 				    offsetof(struct xdp_md, data_meta)),
10470 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10471 				    offsetof(struct xdp_md, data)),
10472 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10473 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10474 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10475 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10476 			BPF_MOV64_IMM(BPF_REG_0, 0),
10477 			BPF_EXIT_INSN(),
10478 		},
10479 		.errstr = "R1 offset is outside of the packet",
10480 		.result = REJECT,
10481 		.prog_type = BPF_PROG_TYPE_XDP,
10482 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10483 	},
10484 	{
10485 		"XDP pkt read, pkt_meta' <= pkt_data, good access",
10486 		.insns = {
10487 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10488 				    offsetof(struct xdp_md, data_meta)),
10489 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10490 				    offsetof(struct xdp_md, data)),
10491 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10492 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10493 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10494 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10495 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10496 			BPF_MOV64_IMM(BPF_REG_0, 0),
10497 			BPF_EXIT_INSN(),
10498 		},
10499 		.result = ACCEPT,
10500 		.prog_type = BPF_PROG_TYPE_XDP,
10501 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10502 	},
10503 	{
10504 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
10505 		.insns = {
10506 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10507 				    offsetof(struct xdp_md, data_meta)),
10508 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10509 				    offsetof(struct xdp_md, data)),
10510 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10511 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10512 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10513 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10514 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10515 			BPF_MOV64_IMM(BPF_REG_0, 0),
10516 			BPF_EXIT_INSN(),
10517 		},
10518 		.errstr = "R1 offset is outside of the packet",
10519 		.result = REJECT,
10520 		.prog_type = BPF_PROG_TYPE_XDP,
10521 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10522 	},
10523 	{
10524 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
10525 		.insns = {
10526 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10527 				    offsetof(struct xdp_md, data_meta)),
10528 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10529 				    offsetof(struct xdp_md, data)),
10530 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10531 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10532 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10533 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10534 			BPF_MOV64_IMM(BPF_REG_0, 0),
10535 			BPF_EXIT_INSN(),
10536 		},
10537 		.errstr = "R1 offset is outside of the packet",
10538 		.result = REJECT,
10539 		.prog_type = BPF_PROG_TYPE_XDP,
10540 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10541 	},
10542 	{
10543 		"XDP pkt read, pkt_data <= pkt_meta', good access",
10544 		.insns = {
10545 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10546 				    offsetof(struct xdp_md, data_meta)),
10547 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10548 				    offsetof(struct xdp_md, data)),
10549 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10550 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10551 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10552 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10553 			BPF_MOV64_IMM(BPF_REG_0, 0),
10554 			BPF_EXIT_INSN(),
10555 		},
10556 		.result = ACCEPT,
10557 		.prog_type = BPF_PROG_TYPE_XDP,
10558 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10559 	},
10560 	{
10561 		"XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10562 		.insns = {
10563 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10564 				    offsetof(struct xdp_md, data_meta)),
10565 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10566 				    offsetof(struct xdp_md, data)),
10567 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10568 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10569 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10570 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10571 			BPF_MOV64_IMM(BPF_REG_0, 0),
10572 			BPF_EXIT_INSN(),
10573 		},
10574 		.errstr = "R1 offset is outside of the packet",
10575 		.result = REJECT,
10576 		.prog_type = BPF_PROG_TYPE_XDP,
10577 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10578 	},
10579 	{
10580 		"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10581 		.insns = {
10582 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10583 				    offsetof(struct xdp_md, data_meta)),
10584 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10585 				    offsetof(struct xdp_md, data)),
10586 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10587 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10588 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10589 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10590 			BPF_MOV64_IMM(BPF_REG_0, 0),
10591 			BPF_EXIT_INSN(),
10592 		},
10593 		.errstr = "R1 offset is outside of the packet",
10594 		.result = REJECT,
10595 		.prog_type = BPF_PROG_TYPE_XDP,
10596 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10597 	},
10598 	{
10599 		"check deducing bounds from const, 1",
10600 		.insns = {
10601 			BPF_MOV64_IMM(BPF_REG_0, 1),
10602 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10603 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10604 			BPF_EXIT_INSN(),
10605 		},
10606 		.result = REJECT,
10607 		.errstr = "R0 tried to subtract pointer from scalar",
10608 	},
10609 	{
10610 		"check deducing bounds from const, 2",
10611 		.insns = {
10612 			BPF_MOV64_IMM(BPF_REG_0, 1),
10613 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10614 			BPF_EXIT_INSN(),
10615 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10616 			BPF_EXIT_INSN(),
10617 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10618 			BPF_EXIT_INSN(),
10619 		},
10620 		.result = ACCEPT,
10621 		.retval = 1,
10622 	},
10623 	{
10624 		"check deducing bounds from const, 3",
10625 		.insns = {
10626 			BPF_MOV64_IMM(BPF_REG_0, 0),
10627 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10628 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10629 			BPF_EXIT_INSN(),
10630 		},
10631 		.result = REJECT,
10632 		.errstr = "R0 tried to subtract pointer from scalar",
10633 	},
10634 	{
10635 		"check deducing bounds from const, 4",
10636 		.insns = {
10637 			BPF_MOV64_IMM(BPF_REG_0, 0),
10638 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10639 			BPF_EXIT_INSN(),
10640 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10641 			BPF_EXIT_INSN(),
10642 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10643 			BPF_EXIT_INSN(),
10644 		},
10645 		.result = ACCEPT,
10646 	},
10647 	{
10648 		"check deducing bounds from const, 5",
10649 		.insns = {
10650 			BPF_MOV64_IMM(BPF_REG_0, 0),
10651 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10652 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10653 			BPF_EXIT_INSN(),
10654 		},
10655 		.result = REJECT,
10656 		.errstr = "R0 tried to subtract pointer from scalar",
10657 	},
10658 	{
10659 		"check deducing bounds from const, 6",
10660 		.insns = {
10661 			BPF_MOV64_IMM(BPF_REG_0, 0),
10662 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10663 			BPF_EXIT_INSN(),
10664 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10665 			BPF_EXIT_INSN(),
10666 		},
10667 		.result = REJECT,
10668 		.errstr = "R0 tried to subtract pointer from scalar",
10669 	},
10670 	{
10671 		"check deducing bounds from const, 7",
10672 		.insns = {
10673 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10674 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10675 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10676 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10677 				    offsetof(struct __sk_buff, mark)),
10678 			BPF_EXIT_INSN(),
10679 		},
10680 		.result = REJECT,
10681 		.errstr = "dereference of modified ctx ptr",
10682 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10683 	},
10684 	{
10685 		"check deducing bounds from const, 8",
10686 		.insns = {
10687 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10688 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10689 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10690 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10691 				    offsetof(struct __sk_buff, mark)),
10692 			BPF_EXIT_INSN(),
10693 		},
10694 		.result = REJECT,
10695 		.errstr = "dereference of modified ctx ptr",
10696 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10697 	},
10698 	{
10699 		"check deducing bounds from const, 9",
10700 		.insns = {
10701 			BPF_MOV64_IMM(BPF_REG_0, 0),
10702 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10703 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10704 			BPF_EXIT_INSN(),
10705 		},
10706 		.result = REJECT,
10707 		.errstr = "R0 tried to subtract pointer from scalar",
10708 	},
10709 	{
10710 		"check deducing bounds from const, 10",
10711 		.insns = {
10712 			BPF_MOV64_IMM(BPF_REG_0, 0),
10713 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10714 			/* Marks reg as unknown. */
10715 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10716 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10717 			BPF_EXIT_INSN(),
10718 		},
10719 		.result = REJECT,
10720 		.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10721 	},
10722 	{
10723 		"bpf_exit with invalid return code. test1",
10724 		.insns = {
10725 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10726 			BPF_EXIT_INSN(),
10727 		},
10728 		.errstr = "R0 has value (0x0; 0xffffffff)",
10729 		.result = REJECT,
10730 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10731 	},
10732 	{
10733 		"bpf_exit with invalid return code. test2",
10734 		.insns = {
10735 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10736 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10737 			BPF_EXIT_INSN(),
10738 		},
10739 		.result = ACCEPT,
10740 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10741 	},
10742 	{
10743 		"bpf_exit with invalid return code. test3",
10744 		.insns = {
10745 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10746 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10747 			BPF_EXIT_INSN(),
10748 		},
10749 		.errstr = "R0 has value (0x0; 0x3)",
10750 		.result = REJECT,
10751 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10752 	},
10753 	{
10754 		"bpf_exit with invalid return code. test4",
10755 		.insns = {
10756 			BPF_MOV64_IMM(BPF_REG_0, 1),
10757 			BPF_EXIT_INSN(),
10758 		},
10759 		.result = ACCEPT,
10760 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10761 	},
10762 	{
10763 		"bpf_exit with invalid return code. test5",
10764 		.insns = {
10765 			BPF_MOV64_IMM(BPF_REG_0, 2),
10766 			BPF_EXIT_INSN(),
10767 		},
10768 		.errstr = "R0 has value (0x2; 0x0)",
10769 		.result = REJECT,
10770 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10771 	},
10772 	{
10773 		"bpf_exit with invalid return code. test6",
10774 		.insns = {
10775 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10776 			BPF_EXIT_INSN(),
10777 		},
10778 		.errstr = "R0 is not a known value (ctx)",
10779 		.result = REJECT,
10780 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10781 	},
10782 	{
10783 		"bpf_exit with invalid return code. test7",
10784 		.insns = {
10785 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10786 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10787 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10788 			BPF_EXIT_INSN(),
10789 		},
10790 		.errstr = "R0 has unknown scalar value",
10791 		.result = REJECT,
10792 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10793 	},
10794 	{
10795 		"calls: basic sanity",
10796 		.insns = {
10797 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10798 			BPF_MOV64_IMM(BPF_REG_0, 1),
10799 			BPF_EXIT_INSN(),
10800 			BPF_MOV64_IMM(BPF_REG_0, 2),
10801 			BPF_EXIT_INSN(),
10802 		},
10803 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10804 		.result = ACCEPT,
10805 	},
10806 	{
10807 		"calls: not on unpriviledged",
10808 		.insns = {
10809 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10810 			BPF_MOV64_IMM(BPF_REG_0, 1),
10811 			BPF_EXIT_INSN(),
10812 			BPF_MOV64_IMM(BPF_REG_0, 2),
10813 			BPF_EXIT_INSN(),
10814 		},
10815 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10816 		.result_unpriv = REJECT,
10817 		.result = ACCEPT,
10818 		.retval = 1,
10819 	},
10820 	{
10821 		"calls: div by 0 in subprog",
10822 		.insns = {
10823 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10824 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10825 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10826 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10827 				    offsetof(struct __sk_buff, data_end)),
10828 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10829 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10830 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10831 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10832 			BPF_MOV64_IMM(BPF_REG_0, 1),
10833 			BPF_EXIT_INSN(),
10834 			BPF_MOV32_IMM(BPF_REG_2, 0),
10835 			BPF_MOV32_IMM(BPF_REG_3, 1),
10836 			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10837 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10838 				    offsetof(struct __sk_buff, data)),
10839 			BPF_EXIT_INSN(),
10840 		},
10841 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10842 		.result = ACCEPT,
10843 		.retval = 1,
10844 	},
10845 	{
10846 		"calls: multiple ret types in subprog 1",
10847 		.insns = {
10848 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10849 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10850 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10851 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10852 				    offsetof(struct __sk_buff, data_end)),
10853 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10854 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10855 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10856 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10857 			BPF_MOV64_IMM(BPF_REG_0, 1),
10858 			BPF_EXIT_INSN(),
10859 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10860 				    offsetof(struct __sk_buff, data)),
10861 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10862 			BPF_MOV32_IMM(BPF_REG_0, 42),
10863 			BPF_EXIT_INSN(),
10864 		},
10865 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10866 		.result = REJECT,
10867 		.errstr = "R0 invalid mem access 'inv'",
10868 	},
10869 	{
10870 		"calls: multiple ret types in subprog 2",
10871 		.insns = {
10872 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10873 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10874 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10875 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10876 				    offsetof(struct __sk_buff, data_end)),
10877 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10878 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10879 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10880 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10881 			BPF_MOV64_IMM(BPF_REG_0, 1),
10882 			BPF_EXIT_INSN(),
10883 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10884 				    offsetof(struct __sk_buff, data)),
10885 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10886 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10887 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10888 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10889 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10890 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10891 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10892 				     BPF_FUNC_map_lookup_elem),
10893 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10894 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10895 				    offsetof(struct __sk_buff, data)),
10896 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10897 			BPF_EXIT_INSN(),
10898 		},
10899 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10900 		.fixup_map_hash_8b = { 16 },
10901 		.result = REJECT,
10902 		.errstr = "R0 min value is outside of the array range",
10903 	},
10904 	{
10905 		"calls: overlapping caller/callee",
10906 		.insns = {
10907 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10908 			BPF_MOV64_IMM(BPF_REG_0, 1),
10909 			BPF_EXIT_INSN(),
10910 		},
10911 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10912 		.errstr = "last insn is not an exit or jmp",
10913 		.result = REJECT,
10914 	},
10915 	{
10916 		"calls: wrong recursive calls",
10917 		.insns = {
10918 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10919 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10920 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10921 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10923 			BPF_MOV64_IMM(BPF_REG_0, 1),
10924 			BPF_EXIT_INSN(),
10925 		},
10926 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10927 		.errstr = "jump out of range",
10928 		.result = REJECT,
10929 	},
10930 	{
10931 		"calls: wrong src reg",
10932 		.insns = {
10933 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10934 			BPF_MOV64_IMM(BPF_REG_0, 1),
10935 			BPF_EXIT_INSN(),
10936 		},
10937 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10938 		.errstr = "BPF_CALL uses reserved fields",
10939 		.result = REJECT,
10940 	},
10941 	{
10942 		"calls: wrong off value",
10943 		.insns = {
10944 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10945 			BPF_MOV64_IMM(BPF_REG_0, 1),
10946 			BPF_EXIT_INSN(),
10947 			BPF_MOV64_IMM(BPF_REG_0, 2),
10948 			BPF_EXIT_INSN(),
10949 		},
10950 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10951 		.errstr = "BPF_CALL uses reserved fields",
10952 		.result = REJECT,
10953 	},
10954 	{
10955 		"calls: jump back loop",
10956 		.insns = {
10957 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10958 			BPF_MOV64_IMM(BPF_REG_0, 1),
10959 			BPF_EXIT_INSN(),
10960 		},
10961 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10962 		.errstr = "back-edge from insn 0 to 0",
10963 		.result = REJECT,
10964 	},
10965 	{
10966 		"calls: conditional call",
10967 		.insns = {
10968 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10969 				    offsetof(struct __sk_buff, mark)),
10970 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10971 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10972 			BPF_MOV64_IMM(BPF_REG_0, 1),
10973 			BPF_EXIT_INSN(),
10974 			BPF_MOV64_IMM(BPF_REG_0, 2),
10975 			BPF_EXIT_INSN(),
10976 		},
10977 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10978 		.errstr = "jump out of range",
10979 		.result = REJECT,
10980 	},
10981 	{
10982 		"calls: conditional call 2",
10983 		.insns = {
10984 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10985 				    offsetof(struct __sk_buff, mark)),
10986 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10987 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10988 			BPF_MOV64_IMM(BPF_REG_0, 1),
10989 			BPF_EXIT_INSN(),
10990 			BPF_MOV64_IMM(BPF_REG_0, 2),
10991 			BPF_EXIT_INSN(),
10992 			BPF_MOV64_IMM(BPF_REG_0, 3),
10993 			BPF_EXIT_INSN(),
10994 		},
10995 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10996 		.result = ACCEPT,
10997 	},
10998 	{
10999 		"calls: conditional call 3",
11000 		.insns = {
11001 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11002 				    offsetof(struct __sk_buff, mark)),
11003 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11004 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11005 			BPF_MOV64_IMM(BPF_REG_0, 1),
11006 			BPF_EXIT_INSN(),
11007 			BPF_MOV64_IMM(BPF_REG_0, 1),
11008 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11009 			BPF_MOV64_IMM(BPF_REG_0, 3),
11010 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11011 		},
11012 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11013 		.errstr = "back-edge from insn",
11014 		.result = REJECT,
11015 	},
11016 	{
11017 		"calls: conditional call 4",
11018 		.insns = {
11019 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11020 				    offsetof(struct __sk_buff, mark)),
11021 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11022 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11023 			BPF_MOV64_IMM(BPF_REG_0, 1),
11024 			BPF_EXIT_INSN(),
11025 			BPF_MOV64_IMM(BPF_REG_0, 1),
11026 			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
11027 			BPF_MOV64_IMM(BPF_REG_0, 3),
11028 			BPF_EXIT_INSN(),
11029 		},
11030 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11031 		.result = ACCEPT,
11032 	},
11033 	{
11034 		"calls: conditional call 5",
11035 		.insns = {
11036 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11037 				    offsetof(struct __sk_buff, mark)),
11038 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11039 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11040 			BPF_MOV64_IMM(BPF_REG_0, 1),
11041 			BPF_EXIT_INSN(),
11042 			BPF_MOV64_IMM(BPF_REG_0, 1),
11043 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11044 			BPF_MOV64_IMM(BPF_REG_0, 3),
11045 			BPF_EXIT_INSN(),
11046 		},
11047 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11048 		.errstr = "back-edge from insn",
11049 		.result = REJECT,
11050 	},
11051 	{
11052 		"calls: conditional call 6",
11053 		.insns = {
11054 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11055 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
11056 			BPF_EXIT_INSN(),
11057 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11058 				    offsetof(struct __sk_buff, mark)),
11059 			BPF_EXIT_INSN(),
11060 		},
11061 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11062 		.errstr = "back-edge from insn",
11063 		.result = REJECT,
11064 	},
11065 	{
11066 		"calls: using r0 returned by callee",
11067 		.insns = {
11068 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11069 			BPF_EXIT_INSN(),
11070 			BPF_MOV64_IMM(BPF_REG_0, 2),
11071 			BPF_EXIT_INSN(),
11072 		},
11073 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11074 		.result = ACCEPT,
11075 	},
11076 	{
11077 		"calls: using uninit r0 from callee",
11078 		.insns = {
11079 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11080 			BPF_EXIT_INSN(),
11081 			BPF_EXIT_INSN(),
11082 		},
11083 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11084 		.errstr = "!read_ok",
11085 		.result = REJECT,
11086 	},
11087 	{
11088 		"calls: callee is using r1",
11089 		.insns = {
11090 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11091 			BPF_EXIT_INSN(),
11092 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11093 				    offsetof(struct __sk_buff, len)),
11094 			BPF_EXIT_INSN(),
11095 		},
11096 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
11097 		.result = ACCEPT,
11098 		.retval = TEST_DATA_LEN,
11099 	},
11100 	{
11101 		"calls: callee using args1",
11102 		.insns = {
11103 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11104 			BPF_EXIT_INSN(),
11105 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11106 			BPF_EXIT_INSN(),
11107 		},
11108 		.errstr_unpriv = "allowed for root only",
11109 		.result_unpriv = REJECT,
11110 		.result = ACCEPT,
11111 		.retval = POINTER_VALUE,
11112 	},
11113 	{
11114 		"calls: callee using wrong args2",
11115 		.insns = {
11116 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11117 			BPF_EXIT_INSN(),
11118 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11119 			BPF_EXIT_INSN(),
11120 		},
11121 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11122 		.errstr = "R2 !read_ok",
11123 		.result = REJECT,
11124 	},
11125 	{
11126 		"calls: callee using two args",
11127 		.insns = {
11128 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11129 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
11130 				    offsetof(struct __sk_buff, len)),
11131 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
11132 				    offsetof(struct __sk_buff, len)),
11133 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11134 			BPF_EXIT_INSN(),
11135 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11136 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
11137 			BPF_EXIT_INSN(),
11138 		},
11139 		.errstr_unpriv = "allowed for root only",
11140 		.result_unpriv = REJECT,
11141 		.result = ACCEPT,
11142 		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
11143 	},
11144 	{
11145 		"calls: callee changing pkt pointers",
11146 		.insns = {
11147 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
11148 				    offsetof(struct xdp_md, data)),
11149 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
11150 				    offsetof(struct xdp_md, data_end)),
11151 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
11152 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
11153 			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
11154 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11155 			/* clear_all_pkt_pointers() has to walk all frames
11156 			 * to make sure that pkt pointers in the caller
11157 			 * are cleared when callee is calling a helper that
11158 			 * adjusts packet size
11159 			 */
11160 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11161 			BPF_MOV32_IMM(BPF_REG_0, 0),
11162 			BPF_EXIT_INSN(),
11163 			BPF_MOV64_IMM(BPF_REG_2, 0),
11164 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11165 				     BPF_FUNC_xdp_adjust_head),
11166 			BPF_EXIT_INSN(),
11167 		},
11168 		.result = REJECT,
11169 		.errstr = "R6 invalid mem access 'inv'",
11170 		.prog_type = BPF_PROG_TYPE_XDP,
11171 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11172 	},
11173 	{
11174 		"calls: two calls with args",
11175 		.insns = {
11176 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11177 			BPF_EXIT_INSN(),
11178 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11179 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11180 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11181 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11182 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11183 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11184 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11185 			BPF_EXIT_INSN(),
11186 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11187 				    offsetof(struct __sk_buff, len)),
11188 			BPF_EXIT_INSN(),
11189 		},
11190 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11191 		.result = ACCEPT,
11192 		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
11193 	},
11194 	{
11195 		"calls: calls with stack arith",
11196 		.insns = {
11197 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11198 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11199 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11200 			BPF_EXIT_INSN(),
11201 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11202 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11203 			BPF_EXIT_INSN(),
11204 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11205 			BPF_MOV64_IMM(BPF_REG_0, 42),
11206 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11207 			BPF_EXIT_INSN(),
11208 		},
11209 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11210 		.result = ACCEPT,
11211 		.retval = 42,
11212 	},
11213 	{
11214 		"calls: calls with misaligned stack access",
11215 		.insns = {
11216 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11217 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11218 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11219 			BPF_EXIT_INSN(),
11220 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
11221 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11222 			BPF_EXIT_INSN(),
11223 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11224 			BPF_MOV64_IMM(BPF_REG_0, 42),
11225 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11226 			BPF_EXIT_INSN(),
11227 		},
11228 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11229 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
11230 		.errstr = "misaligned stack access",
11231 		.result = REJECT,
11232 	},
11233 	{
11234 		"calls: calls control flow, jump test",
11235 		.insns = {
11236 			BPF_MOV64_IMM(BPF_REG_0, 42),
11237 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11238 			BPF_MOV64_IMM(BPF_REG_0, 43),
11239 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11240 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11241 			BPF_EXIT_INSN(),
11242 		},
11243 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11244 		.result = ACCEPT,
11245 		.retval = 43,
11246 	},
11247 	{
11248 		"calls: calls control flow, jump test 2",
11249 		.insns = {
11250 			BPF_MOV64_IMM(BPF_REG_0, 42),
11251 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11252 			BPF_MOV64_IMM(BPF_REG_0, 43),
11253 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11254 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11255 			BPF_EXIT_INSN(),
11256 		},
11257 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11258 		.errstr = "jump out of range from insn 1 to 4",
11259 		.result = REJECT,
11260 	},
11261 	{
11262 		"calls: two calls with bad jump",
11263 		.insns = {
11264 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11265 			BPF_EXIT_INSN(),
11266 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11267 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11268 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11269 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11270 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11271 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11272 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11273 			BPF_EXIT_INSN(),
11274 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11275 				    offsetof(struct __sk_buff, len)),
11276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
11277 			BPF_EXIT_INSN(),
11278 		},
11279 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11280 		.errstr = "jump out of range from insn 11 to 9",
11281 		.result = REJECT,
11282 	},
11283 	{
11284 		"calls: recursive call. test1",
11285 		.insns = {
11286 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11287 			BPF_EXIT_INSN(),
11288 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11289 			BPF_EXIT_INSN(),
11290 		},
11291 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11292 		.errstr = "back-edge",
11293 		.result = REJECT,
11294 	},
11295 	{
11296 		"calls: recursive call. test2",
11297 		.insns = {
11298 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11299 			BPF_EXIT_INSN(),
11300 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11301 			BPF_EXIT_INSN(),
11302 		},
11303 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11304 		.errstr = "back-edge",
11305 		.result = REJECT,
11306 	},
11307 	{
11308 		"calls: unreachable code",
11309 		.insns = {
11310 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11311 			BPF_EXIT_INSN(),
11312 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11313 			BPF_EXIT_INSN(),
11314 			BPF_MOV64_IMM(BPF_REG_0, 0),
11315 			BPF_EXIT_INSN(),
11316 			BPF_MOV64_IMM(BPF_REG_0, 0),
11317 			BPF_EXIT_INSN(),
11318 		},
11319 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11320 		.errstr = "unreachable insn 6",
11321 		.result = REJECT,
11322 	},
11323 	{
11324 		"calls: invalid call",
11325 		.insns = {
11326 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11327 			BPF_EXIT_INSN(),
11328 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
11329 			BPF_EXIT_INSN(),
11330 		},
11331 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11332 		.errstr = "invalid destination",
11333 		.result = REJECT,
11334 	},
11335 	{
11336 		"calls: invalid call 2",
11337 		.insns = {
11338 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11339 			BPF_EXIT_INSN(),
11340 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
11341 			BPF_EXIT_INSN(),
11342 		},
11343 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11344 		.errstr = "invalid destination",
11345 		.result = REJECT,
11346 	},
11347 	{
11348 		"calls: jumping across function bodies. test1",
11349 		.insns = {
11350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11351 			BPF_MOV64_IMM(BPF_REG_0, 0),
11352 			BPF_EXIT_INSN(),
11353 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
11354 			BPF_EXIT_INSN(),
11355 		},
11356 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11357 		.errstr = "jump out of range",
11358 		.result = REJECT,
11359 	},
11360 	{
11361 		"calls: jumping across function bodies. test2",
11362 		.insns = {
11363 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
11364 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11365 			BPF_MOV64_IMM(BPF_REG_0, 0),
11366 			BPF_EXIT_INSN(),
11367 			BPF_EXIT_INSN(),
11368 		},
11369 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11370 		.errstr = "jump out of range",
11371 		.result = REJECT,
11372 	},
11373 	{
11374 		"calls: call without exit",
11375 		.insns = {
11376 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11377 			BPF_EXIT_INSN(),
11378 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11379 			BPF_EXIT_INSN(),
11380 			BPF_MOV64_IMM(BPF_REG_0, 0),
11381 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
11382 		},
11383 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11384 		.errstr = "not an exit",
11385 		.result = REJECT,
11386 	},
11387 	{
11388 		"calls: call into middle of ld_imm64",
11389 		.insns = {
11390 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11391 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11392 			BPF_MOV64_IMM(BPF_REG_0, 0),
11393 			BPF_EXIT_INSN(),
11394 			BPF_LD_IMM64(BPF_REG_0, 0),
11395 			BPF_EXIT_INSN(),
11396 		},
11397 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11398 		.errstr = "last insn",
11399 		.result = REJECT,
11400 	},
11401 	{
11402 		"calls: call into middle of other call",
11403 		.insns = {
11404 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11405 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11406 			BPF_MOV64_IMM(BPF_REG_0, 0),
11407 			BPF_EXIT_INSN(),
11408 			BPF_MOV64_IMM(BPF_REG_0, 0),
11409 			BPF_MOV64_IMM(BPF_REG_0, 0),
11410 			BPF_EXIT_INSN(),
11411 		},
11412 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11413 		.errstr = "last insn",
11414 		.result = REJECT,
11415 	},
11416 	{
11417 		"calls: ld_abs with changing ctx data in callee",
11418 		.insns = {
11419 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11420 			BPF_LD_ABS(BPF_B, 0),
11421 			BPF_LD_ABS(BPF_H, 0),
11422 			BPF_LD_ABS(BPF_W, 0),
11423 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
11424 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11425 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
11426 			BPF_LD_ABS(BPF_B, 0),
11427 			BPF_LD_ABS(BPF_H, 0),
11428 			BPF_LD_ABS(BPF_W, 0),
11429 			BPF_EXIT_INSN(),
11430 			BPF_MOV64_IMM(BPF_REG_2, 1),
11431 			BPF_MOV64_IMM(BPF_REG_3, 2),
11432 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11433 				     BPF_FUNC_skb_vlan_push),
11434 			BPF_EXIT_INSN(),
11435 		},
11436 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11437 		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
11438 		.result = REJECT,
11439 	},
11440 	{
11441 		"calls: two calls with bad fallthrough",
11442 		.insns = {
11443 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11444 			BPF_EXIT_INSN(),
11445 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11446 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11447 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11448 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11449 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11450 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11451 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11452 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
11453 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11454 				    offsetof(struct __sk_buff, len)),
11455 			BPF_EXIT_INSN(),
11456 		},
11457 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11458 		.errstr = "not an exit",
11459 		.result = REJECT,
11460 	},
11461 	{
11462 		"calls: two calls with stack read",
11463 		.insns = {
11464 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11465 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11466 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11467 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11468 			BPF_EXIT_INSN(),
11469 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11470 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11471 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11472 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11473 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11474 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11475 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11476 			BPF_EXIT_INSN(),
11477 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11478 			BPF_EXIT_INSN(),
11479 		},
11480 		.prog_type = BPF_PROG_TYPE_XDP,
11481 		.result = ACCEPT,
11482 	},
11483 	{
11484 		"calls: two calls with stack write",
11485 		.insns = {
11486 			/* main prog */
11487 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11488 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11489 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11490 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11491 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11492 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11493 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11494 			BPF_EXIT_INSN(),
11495 
11496 			/* subprog 1 */
11497 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11498 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11499 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
11500 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
11501 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11502 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11503 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
11504 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
11505 			/* write into stack frame of main prog */
11506 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11507 			BPF_EXIT_INSN(),
11508 
11509 			/* subprog 2 */
11510 			/* read from stack frame of main prog */
11511 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11512 			BPF_EXIT_INSN(),
11513 		},
11514 		.prog_type = BPF_PROG_TYPE_XDP,
11515 		.result = ACCEPT,
11516 	},
11517 	{
11518 		"calls: stack overflow using two frames (pre-call access)",
11519 		.insns = {
11520 			/* prog 1 */
11521 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11522 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
11523 			BPF_EXIT_INSN(),
11524 
11525 			/* prog 2 */
11526 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11527 			BPF_MOV64_IMM(BPF_REG_0, 0),
11528 			BPF_EXIT_INSN(),
11529 		},
11530 		.prog_type = BPF_PROG_TYPE_XDP,
11531 		.errstr = "combined stack size",
11532 		.result = REJECT,
11533 	},
11534 	{
11535 		"calls: stack overflow using two frames (post-call access)",
11536 		.insns = {
11537 			/* prog 1 */
11538 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11539 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11540 			BPF_EXIT_INSN(),
11541 
11542 			/* prog 2 */
11543 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11544 			BPF_MOV64_IMM(BPF_REG_0, 0),
11545 			BPF_EXIT_INSN(),
11546 		},
11547 		.prog_type = BPF_PROG_TYPE_XDP,
11548 		.errstr = "combined stack size",
11549 		.result = REJECT,
11550 	},
11551 	{
11552 		"calls: stack depth check using three frames. test1",
11553 		.insns = {
11554 			/* main */
11555 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11556 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11557 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11558 			BPF_MOV64_IMM(BPF_REG_0, 0),
11559 			BPF_EXIT_INSN(),
11560 			/* A */
11561 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11562 			BPF_EXIT_INSN(),
11563 			/* B */
11564 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11565 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11566 			BPF_EXIT_INSN(),
11567 		},
11568 		.prog_type = BPF_PROG_TYPE_XDP,
11569 		/* stack_main=32, stack_A=256, stack_B=64
11570 		 * and max(main+A, main+A+B) < 512
11571 		 */
11572 		.result = ACCEPT,
11573 	},
11574 	{
11575 		"calls: stack depth check using three frames. test2",
11576 		.insns = {
11577 			/* main */
11578 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11579 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11580 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11581 			BPF_MOV64_IMM(BPF_REG_0, 0),
11582 			BPF_EXIT_INSN(),
11583 			/* A */
11584 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11585 			BPF_EXIT_INSN(),
11586 			/* B */
11587 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11588 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11589 			BPF_EXIT_INSN(),
11590 		},
11591 		.prog_type = BPF_PROG_TYPE_XDP,
11592 		/* stack_main=32, stack_A=64, stack_B=256
11593 		 * and max(main+A, main+A+B) < 512
11594 		 */
11595 		.result = ACCEPT,
11596 	},
11597 	{
11598 		"calls: stack depth check using three frames. test3",
11599 		.insns = {
11600 			/* main */
11601 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11602 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11603 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11604 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11605 			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11606 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11607 			BPF_MOV64_IMM(BPF_REG_0, 0),
11608 			BPF_EXIT_INSN(),
11609 			/* A */
11610 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11611 			BPF_EXIT_INSN(),
11612 			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11613 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11614 			/* B */
11615 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11616 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11617 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11618 			BPF_EXIT_INSN(),
11619 		},
11620 		.prog_type = BPF_PROG_TYPE_XDP,
11621 		/* stack_main=64, stack_A=224, stack_B=256
11622 		 * and max(main+A, main+A+B) > 512
11623 		 */
11624 		.errstr = "combined stack",
11625 		.result = REJECT,
11626 	},
11627 	{
11628 		"calls: stack depth check using three frames. test4",
11629 		/* void main(void) {
11630 		 *   func1(0);
11631 		 *   func1(1);
11632 		 *   func2(1);
11633 		 * }
11634 		 * void func1(int alloc_or_recurse) {
11635 		 *   if (alloc_or_recurse) {
11636 		 *     frame_pointer[-300] = 1;
11637 		 *   } else {
11638 		 *     func2(alloc_or_recurse);
11639 		 *   }
11640 		 * }
11641 		 * void func2(int alloc_or_recurse) {
11642 		 *   if (alloc_or_recurse) {
11643 		 *     frame_pointer[-300] = 1;
11644 		 *   }
11645 		 * }
11646 		 */
11647 		.insns = {
11648 			/* main */
11649 			BPF_MOV64_IMM(BPF_REG_1, 0),
11650 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11651 			BPF_MOV64_IMM(BPF_REG_1, 1),
11652 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11653 			BPF_MOV64_IMM(BPF_REG_1, 1),
11654 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11655 			BPF_MOV64_IMM(BPF_REG_0, 0),
11656 			BPF_EXIT_INSN(),
11657 			/* A */
11658 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11659 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11660 			BPF_EXIT_INSN(),
11661 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11662 			BPF_EXIT_INSN(),
11663 			/* B */
11664 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11665 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11666 			BPF_EXIT_INSN(),
11667 		},
11668 		.prog_type = BPF_PROG_TYPE_XDP,
11669 		.result = REJECT,
11670 		.errstr = "combined stack",
11671 	},
11672 	{
11673 		"calls: stack depth check using three frames. test5",
11674 		.insns = {
11675 			/* main */
11676 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11677 			BPF_EXIT_INSN(),
11678 			/* A */
11679 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11680 			BPF_EXIT_INSN(),
11681 			/* B */
11682 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11683 			BPF_EXIT_INSN(),
11684 			/* C */
11685 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11686 			BPF_EXIT_INSN(),
11687 			/* D */
11688 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11689 			BPF_EXIT_INSN(),
11690 			/* E */
11691 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11692 			BPF_EXIT_INSN(),
11693 			/* F */
11694 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11695 			BPF_EXIT_INSN(),
11696 			/* G */
11697 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11698 			BPF_EXIT_INSN(),
11699 			/* H */
11700 			BPF_MOV64_IMM(BPF_REG_0, 0),
11701 			BPF_EXIT_INSN(),
11702 		},
11703 		.prog_type = BPF_PROG_TYPE_XDP,
11704 		.errstr = "call stack",
11705 		.result = REJECT,
11706 	},
11707 	{
11708 		"calls: spill into caller stack frame",
11709 		.insns = {
11710 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11711 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11712 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11713 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11714 			BPF_EXIT_INSN(),
11715 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11716 			BPF_MOV64_IMM(BPF_REG_0, 0),
11717 			BPF_EXIT_INSN(),
11718 		},
11719 		.prog_type = BPF_PROG_TYPE_XDP,
11720 		.errstr = "cannot spill",
11721 		.result = REJECT,
11722 	},
11723 	{
11724 		"calls: write into caller stack frame",
11725 		.insns = {
11726 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11727 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11728 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11729 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11730 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11731 			BPF_EXIT_INSN(),
11732 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11733 			BPF_MOV64_IMM(BPF_REG_0, 0),
11734 			BPF_EXIT_INSN(),
11735 		},
11736 		.prog_type = BPF_PROG_TYPE_XDP,
11737 		.result = ACCEPT,
11738 		.retval = 42,
11739 	},
11740 	{
11741 		"calls: write into callee stack frame",
11742 		.insns = {
11743 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11744 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11745 			BPF_EXIT_INSN(),
11746 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11747 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11748 			BPF_EXIT_INSN(),
11749 		},
11750 		.prog_type = BPF_PROG_TYPE_XDP,
11751 		.errstr = "cannot return stack pointer",
11752 		.result = REJECT,
11753 	},
11754 	{
11755 		"calls: two calls with stack write and void return",
11756 		.insns = {
11757 			/* main prog */
11758 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11759 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11761 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11762 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11763 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11764 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11765 			BPF_EXIT_INSN(),
11766 
11767 			/* subprog 1 */
11768 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11769 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11770 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11771 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11772 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11773 			BPF_EXIT_INSN(),
11774 
11775 			/* subprog 2 */
11776 			/* write into stack frame of main prog */
11777 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11778 			BPF_EXIT_INSN(), /* void return */
11779 		},
11780 		.prog_type = BPF_PROG_TYPE_XDP,
11781 		.result = ACCEPT,
11782 	},
11783 	{
11784 		"calls: ambiguous return value",
11785 		.insns = {
11786 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11787 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11788 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11790 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11791 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11792 			BPF_EXIT_INSN(),
11793 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11794 			BPF_MOV64_IMM(BPF_REG_0, 0),
11795 			BPF_EXIT_INSN(),
11796 		},
11797 		.errstr_unpriv = "allowed for root only",
11798 		.result_unpriv = REJECT,
11799 		.errstr = "R0 !read_ok",
11800 		.result = REJECT,
11801 	},
11802 	{
11803 		"calls: two calls that return map_value",
11804 		.insns = {
11805 			/* main prog */
11806 			/* pass fp-16, fp-8 into a function */
11807 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11809 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11810 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11811 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11812 
11813 			/* fetch map_value_ptr from the stack of this function */
11814 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11815 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11816 			/* write into map value */
11817 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11818 			/* fetch secound map_value_ptr from the stack */
11819 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11820 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11821 			/* write into map value */
11822 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11823 			BPF_MOV64_IMM(BPF_REG_0, 0),
11824 			BPF_EXIT_INSN(),
11825 
11826 			/* subprog 1 */
11827 			/* call 3rd function twice */
11828 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11829 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11830 			/* first time with fp-8 */
11831 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11832 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11833 			/* second time with fp-16 */
11834 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11835 			BPF_EXIT_INSN(),
11836 
11837 			/* subprog 2 */
11838 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11839 			/* lookup from map */
11840 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11841 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11842 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11843 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11844 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11845 				     BPF_FUNC_map_lookup_elem),
11846 			/* write map_value_ptr into stack frame of main prog */
11847 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11848 			BPF_MOV64_IMM(BPF_REG_0, 0),
11849 			BPF_EXIT_INSN(), /* return 0 */
11850 		},
11851 		.prog_type = BPF_PROG_TYPE_XDP,
11852 		.fixup_map_hash_8b = { 23 },
11853 		.result = ACCEPT,
11854 	},
11855 	{
11856 		"calls: two calls that return map_value with bool condition",
11857 		.insns = {
11858 			/* main prog */
11859 			/* pass fp-16, fp-8 into a function */
11860 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11861 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11862 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11863 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11864 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11865 			BPF_MOV64_IMM(BPF_REG_0, 0),
11866 			BPF_EXIT_INSN(),
11867 
11868 			/* subprog 1 */
11869 			/* call 3rd function twice */
11870 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11871 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11872 			/* first time with fp-8 */
11873 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11874 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11875 			/* fetch map_value_ptr from the stack of this function */
11876 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11877 			/* write into map value */
11878 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11879 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11880 			/* second time with fp-16 */
11881 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11882 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11883 			/* fetch secound map_value_ptr from the stack */
11884 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11885 			/* write into map value */
11886 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11887 			BPF_EXIT_INSN(),
11888 
11889 			/* subprog 2 */
11890 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11891 			/* lookup from map */
11892 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11893 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11895 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11896 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11897 				     BPF_FUNC_map_lookup_elem),
11898 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11899 			BPF_MOV64_IMM(BPF_REG_0, 0),
11900 			BPF_EXIT_INSN(), /* return 0 */
11901 			/* write map_value_ptr into stack frame of main prog */
11902 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11903 			BPF_MOV64_IMM(BPF_REG_0, 1),
11904 			BPF_EXIT_INSN(), /* return 1 */
11905 		},
11906 		.prog_type = BPF_PROG_TYPE_XDP,
11907 		.fixup_map_hash_8b = { 23 },
11908 		.result = ACCEPT,
11909 	},
11910 	{
11911 		"calls: two calls that return map_value with incorrect bool check",
11912 		.insns = {
11913 			/* main prog */
11914 			/* pass fp-16, fp-8 into a function */
11915 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11916 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11917 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11918 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11919 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11920 			BPF_MOV64_IMM(BPF_REG_0, 0),
11921 			BPF_EXIT_INSN(),
11922 
11923 			/* subprog 1 */
11924 			/* call 3rd function twice */
11925 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11926 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11927 			/* first time with fp-8 */
11928 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11929 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11930 			/* fetch map_value_ptr from the stack of this function */
11931 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11932 			/* write into map value */
11933 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11934 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11935 			/* second time with fp-16 */
11936 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11937 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11938 			/* fetch secound map_value_ptr from the stack */
11939 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11940 			/* write into map value */
11941 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11942 			BPF_EXIT_INSN(),
11943 
11944 			/* subprog 2 */
11945 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11946 			/* lookup from map */
11947 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11948 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11949 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11950 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11951 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11952 				     BPF_FUNC_map_lookup_elem),
11953 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11954 			BPF_MOV64_IMM(BPF_REG_0, 0),
11955 			BPF_EXIT_INSN(), /* return 0 */
11956 			/* write map_value_ptr into stack frame of main prog */
11957 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11958 			BPF_MOV64_IMM(BPF_REG_0, 1),
11959 			BPF_EXIT_INSN(), /* return 1 */
11960 		},
11961 		.prog_type = BPF_PROG_TYPE_XDP,
11962 		.fixup_map_hash_8b = { 23 },
11963 		.result = REJECT,
11964 		.errstr = "invalid read from stack off -16+0 size 8",
11965 	},
11966 	{
11967 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11968 		.insns = {
11969 			/* main prog */
11970 			/* pass fp-16, fp-8 into a function */
11971 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11972 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11973 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11974 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11975 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11976 			BPF_MOV64_IMM(BPF_REG_0, 0),
11977 			BPF_EXIT_INSN(),
11978 
11979 			/* subprog 1 */
11980 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11981 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11982 			/* 1st lookup from map */
11983 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11984 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11985 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11986 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11987 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11988 				     BPF_FUNC_map_lookup_elem),
11989 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11990 			BPF_MOV64_IMM(BPF_REG_8, 0),
11991 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11992 			/* write map_value_ptr into stack frame of main prog at fp-8 */
11993 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11994 			BPF_MOV64_IMM(BPF_REG_8, 1),
11995 
11996 			/* 2nd lookup from map */
11997 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11998 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11999 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12000 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12001 				     BPF_FUNC_map_lookup_elem),
12002 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12003 			BPF_MOV64_IMM(BPF_REG_9, 0),
12004 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12005 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12006 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12007 			BPF_MOV64_IMM(BPF_REG_9, 1),
12008 
12009 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12010 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12011 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12012 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12013 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12014 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12015 			BPF_EXIT_INSN(),
12016 
12017 			/* subprog 2 */
12018 			/* if arg2 == 1 do *arg1 = 0 */
12019 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12020 			/* fetch map_value_ptr from the stack of this function */
12021 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12022 			/* write into map value */
12023 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12024 
12025 			/* if arg4 == 1 do *arg3 = 0 */
12026 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12027 			/* fetch map_value_ptr from the stack of this function */
12028 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12029 			/* write into map value */
12030 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12031 			BPF_EXIT_INSN(),
12032 		},
12033 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12034 		.fixup_map_hash_8b = { 12, 22 },
12035 		.result = REJECT,
12036 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12037 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12038 	},
12039 	{
12040 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
12041 		.insns = {
12042 			/* main prog */
12043 			/* pass fp-16, fp-8 into a function */
12044 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12045 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12046 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12047 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12048 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12049 			BPF_MOV64_IMM(BPF_REG_0, 0),
12050 			BPF_EXIT_INSN(),
12051 
12052 			/* subprog 1 */
12053 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12054 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12055 			/* 1st lookup from map */
12056 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12057 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12058 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12059 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12060 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12061 				     BPF_FUNC_map_lookup_elem),
12062 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12063 			BPF_MOV64_IMM(BPF_REG_8, 0),
12064 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12065 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12066 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12067 			BPF_MOV64_IMM(BPF_REG_8, 1),
12068 
12069 			/* 2nd lookup from map */
12070 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12071 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12072 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12073 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12074 				     BPF_FUNC_map_lookup_elem),
12075 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12076 			BPF_MOV64_IMM(BPF_REG_9, 0),
12077 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12078 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12079 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12080 			BPF_MOV64_IMM(BPF_REG_9, 1),
12081 
12082 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12083 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12084 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12085 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12086 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12087 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12088 			BPF_EXIT_INSN(),
12089 
12090 			/* subprog 2 */
12091 			/* if arg2 == 1 do *arg1 = 0 */
12092 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12093 			/* fetch map_value_ptr from the stack of this function */
12094 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12095 			/* write into map value */
12096 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12097 
12098 			/* if arg4 == 1 do *arg3 = 0 */
12099 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12100 			/* fetch map_value_ptr from the stack of this function */
12101 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12102 			/* write into map value */
12103 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12104 			BPF_EXIT_INSN(),
12105 		},
12106 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12107 		.fixup_map_hash_8b = { 12, 22 },
12108 		.result = ACCEPT,
12109 	},
12110 	{
12111 		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
12112 		.insns = {
12113 			/* main prog */
12114 			/* pass fp-16, fp-8 into a function */
12115 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12116 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12117 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12118 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12119 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12120 			BPF_MOV64_IMM(BPF_REG_0, 0),
12121 			BPF_EXIT_INSN(),
12122 
12123 			/* subprog 1 */
12124 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12125 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12126 			/* 1st lookup from map */
12127 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
12128 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12129 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12130 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12131 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12132 				     BPF_FUNC_map_lookup_elem),
12133 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12134 			BPF_MOV64_IMM(BPF_REG_8, 0),
12135 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12136 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12137 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12138 			BPF_MOV64_IMM(BPF_REG_8, 1),
12139 
12140 			/* 2nd lookup from map */
12141 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12142 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12143 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12144 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12145 				     BPF_FUNC_map_lookup_elem),
12146 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12147 			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
12148 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12149 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12150 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12151 			BPF_MOV64_IMM(BPF_REG_9, 1),
12152 
12153 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12154 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
12155 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12156 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12157 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12158 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
12159 			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
12160 
12161 			/* subprog 2 */
12162 			/* if arg2 == 1 do *arg1 = 0 */
12163 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12164 			/* fetch map_value_ptr from the stack of this function */
12165 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12166 			/* write into map value */
12167 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12168 
12169 			/* if arg4 == 1 do *arg3 = 0 */
12170 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12171 			/* fetch map_value_ptr from the stack of this function */
12172 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12173 			/* write into map value */
12174 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12175 			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
12176 		},
12177 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12178 		.fixup_map_hash_8b = { 12, 22 },
12179 		.result = REJECT,
12180 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12181 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12182 	},
12183 	{
12184 		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
12185 		.insns = {
12186 			/* main prog */
12187 			/* pass fp-16, fp-8 into a function */
12188 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12189 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12190 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12191 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12192 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12193 			BPF_MOV64_IMM(BPF_REG_0, 0),
12194 			BPF_EXIT_INSN(),
12195 
12196 			/* subprog 1 */
12197 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12198 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12199 			/* 1st lookup from map */
12200 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12201 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12202 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12203 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12204 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12205 				     BPF_FUNC_map_lookup_elem),
12206 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12207 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12208 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12209 			BPF_MOV64_IMM(BPF_REG_8, 0),
12210 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12211 			BPF_MOV64_IMM(BPF_REG_8, 1),
12212 
12213 			/* 2nd lookup from map */
12214 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12215 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12216 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12217 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12218 				     BPF_FUNC_map_lookup_elem),
12219 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12220 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12221 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12222 			BPF_MOV64_IMM(BPF_REG_9, 0),
12223 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12224 			BPF_MOV64_IMM(BPF_REG_9, 1),
12225 
12226 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12227 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12228 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12229 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12230 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12231 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12232 			BPF_EXIT_INSN(),
12233 
12234 			/* subprog 2 */
12235 			/* if arg2 == 1 do *arg1 = 0 */
12236 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12237 			/* fetch map_value_ptr from the stack of this function */
12238 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12239 			/* write into map value */
12240 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12241 
12242 			/* if arg4 == 1 do *arg3 = 0 */
12243 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12244 			/* fetch map_value_ptr from the stack of this function */
12245 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12246 			/* write into map value */
12247 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12248 			BPF_EXIT_INSN(),
12249 		},
12250 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12251 		.fixup_map_hash_8b = { 12, 22 },
12252 		.result = ACCEPT,
12253 	},
12254 	{
12255 		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
12256 		.insns = {
12257 			/* main prog */
12258 			/* pass fp-16, fp-8 into a function */
12259 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12260 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12261 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12262 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12263 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12264 			BPF_MOV64_IMM(BPF_REG_0, 0),
12265 			BPF_EXIT_INSN(),
12266 
12267 			/* subprog 1 */
12268 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12269 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12270 			/* 1st lookup from map */
12271 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12272 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12273 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12274 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12275 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12276 				     BPF_FUNC_map_lookup_elem),
12277 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12278 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12279 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12280 			BPF_MOV64_IMM(BPF_REG_8, 0),
12281 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12282 			BPF_MOV64_IMM(BPF_REG_8, 1),
12283 
12284 			/* 2nd lookup from map */
12285 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12287 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12288 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12289 				     BPF_FUNC_map_lookup_elem),
12290 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12291 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12292 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12293 			BPF_MOV64_IMM(BPF_REG_9, 0),
12294 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12295 			BPF_MOV64_IMM(BPF_REG_9, 1),
12296 
12297 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12298 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12299 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12300 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12301 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12302 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12303 			BPF_EXIT_INSN(),
12304 
12305 			/* subprog 2 */
12306 			/* if arg2 == 1 do *arg1 = 0 */
12307 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12308 			/* fetch map_value_ptr from the stack of this function */
12309 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12310 			/* write into map value */
12311 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12312 
12313 			/* if arg4 == 0 do *arg3 = 0 */
12314 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
12315 			/* fetch map_value_ptr from the stack of this function */
12316 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12317 			/* write into map value */
12318 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12319 			BPF_EXIT_INSN(),
12320 		},
12321 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12322 		.fixup_map_hash_8b = { 12, 22 },
12323 		.result = REJECT,
12324 		.errstr = "R0 invalid mem access 'inv'",
12325 	},
12326 	{
12327 		"calls: pkt_ptr spill into caller stack",
12328 		.insns = {
12329 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12330 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12331 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12332 			BPF_EXIT_INSN(),
12333 
12334 			/* subprog 1 */
12335 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12336 				    offsetof(struct __sk_buff, data)),
12337 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12338 				    offsetof(struct __sk_buff, data_end)),
12339 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12340 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12341 			/* spill unchecked pkt_ptr into stack of caller */
12342 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12343 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12344 			/* now the pkt range is verified, read pkt_ptr from stack */
12345 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12346 			/* write 4 bytes into packet */
12347 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12348 			BPF_EXIT_INSN(),
12349 		},
12350 		.result = ACCEPT,
12351 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12352 		.retval = POINTER_VALUE,
12353 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12354 	},
12355 	{
12356 		"calls: pkt_ptr spill into caller stack 2",
12357 		.insns = {
12358 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12360 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12361 			/* Marking is still kept, but not in all cases safe. */
12362 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12363 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12364 			BPF_EXIT_INSN(),
12365 
12366 			/* subprog 1 */
12367 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12368 				    offsetof(struct __sk_buff, data)),
12369 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12370 				    offsetof(struct __sk_buff, data_end)),
12371 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12372 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12373 			/* spill unchecked pkt_ptr into stack of caller */
12374 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12375 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12376 			/* now the pkt range is verified, read pkt_ptr from stack */
12377 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12378 			/* write 4 bytes into packet */
12379 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12380 			BPF_EXIT_INSN(),
12381 		},
12382 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12383 		.errstr = "invalid access to packet",
12384 		.result = REJECT,
12385 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12386 	},
12387 	{
12388 		"calls: pkt_ptr spill into caller stack 3",
12389 		.insns = {
12390 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12391 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12392 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12393 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12394 			/* Marking is still kept and safe here. */
12395 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12396 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12397 			BPF_EXIT_INSN(),
12398 
12399 			/* subprog 1 */
12400 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12401 				    offsetof(struct __sk_buff, data)),
12402 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12403 				    offsetof(struct __sk_buff, data_end)),
12404 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12405 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12406 			/* spill unchecked pkt_ptr into stack of caller */
12407 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12408 			BPF_MOV64_IMM(BPF_REG_5, 0),
12409 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12410 			BPF_MOV64_IMM(BPF_REG_5, 1),
12411 			/* now the pkt range is verified, read pkt_ptr from stack */
12412 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12413 			/* write 4 bytes into packet */
12414 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12415 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12416 			BPF_EXIT_INSN(),
12417 		},
12418 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12419 		.result = ACCEPT,
12420 		.retval = 1,
12421 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12422 	},
12423 	{
12424 		"calls: pkt_ptr spill into caller stack 4",
12425 		.insns = {
12426 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12427 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12428 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12429 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12430 			/* Check marking propagated. */
12431 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12432 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12433 			BPF_EXIT_INSN(),
12434 
12435 			/* subprog 1 */
12436 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12437 				    offsetof(struct __sk_buff, data)),
12438 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12439 				    offsetof(struct __sk_buff, data_end)),
12440 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12441 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12442 			/* spill unchecked pkt_ptr into stack of caller */
12443 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12444 			BPF_MOV64_IMM(BPF_REG_5, 0),
12445 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12446 			BPF_MOV64_IMM(BPF_REG_5, 1),
12447 			/* don't read back pkt_ptr from stack here */
12448 			/* write 4 bytes into packet */
12449 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12450 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12451 			BPF_EXIT_INSN(),
12452 		},
12453 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12454 		.result = ACCEPT,
12455 		.retval = 1,
12456 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12457 	},
12458 	{
12459 		"calls: pkt_ptr spill into caller stack 5",
12460 		.insns = {
12461 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12462 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12463 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
12464 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12465 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12466 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12467 			BPF_EXIT_INSN(),
12468 
12469 			/* subprog 1 */
12470 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12471 				    offsetof(struct __sk_buff, data)),
12472 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12473 				    offsetof(struct __sk_buff, data_end)),
12474 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12475 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12476 			BPF_MOV64_IMM(BPF_REG_5, 0),
12477 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12478 			/* spill checked pkt_ptr into stack of caller */
12479 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12480 			BPF_MOV64_IMM(BPF_REG_5, 1),
12481 			/* don't read back pkt_ptr from stack here */
12482 			/* write 4 bytes into packet */
12483 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12484 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12485 			BPF_EXIT_INSN(),
12486 		},
12487 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12488 		.errstr = "same insn cannot be used with different",
12489 		.result = REJECT,
12490 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12491 	},
12492 	{
12493 		"calls: pkt_ptr spill into caller stack 6",
12494 		.insns = {
12495 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12496 				    offsetof(struct __sk_buff, data_end)),
12497 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12498 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12499 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12500 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12501 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12502 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12503 			BPF_EXIT_INSN(),
12504 
12505 			/* subprog 1 */
12506 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12507 				    offsetof(struct __sk_buff, data)),
12508 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12509 				    offsetof(struct __sk_buff, data_end)),
12510 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12511 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12512 			BPF_MOV64_IMM(BPF_REG_5, 0),
12513 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12514 			/* spill checked pkt_ptr into stack of caller */
12515 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12516 			BPF_MOV64_IMM(BPF_REG_5, 1),
12517 			/* don't read back pkt_ptr from stack here */
12518 			/* write 4 bytes into packet */
12519 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12520 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12521 			BPF_EXIT_INSN(),
12522 		},
12523 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12524 		.errstr = "R4 invalid mem access",
12525 		.result = REJECT,
12526 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12527 	},
12528 	{
12529 		"calls: pkt_ptr spill into caller stack 7",
12530 		.insns = {
12531 			BPF_MOV64_IMM(BPF_REG_2, 0),
12532 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12533 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12534 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12535 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12536 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12537 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12538 			BPF_EXIT_INSN(),
12539 
12540 			/* subprog 1 */
12541 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12542 				    offsetof(struct __sk_buff, data)),
12543 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12544 				    offsetof(struct __sk_buff, data_end)),
12545 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12546 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12547 			BPF_MOV64_IMM(BPF_REG_5, 0),
12548 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12549 			/* spill checked pkt_ptr into stack of caller */
12550 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12551 			BPF_MOV64_IMM(BPF_REG_5, 1),
12552 			/* don't read back pkt_ptr from stack here */
12553 			/* write 4 bytes into packet */
12554 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12555 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12556 			BPF_EXIT_INSN(),
12557 		},
12558 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12559 		.errstr = "R4 invalid mem access",
12560 		.result = REJECT,
12561 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12562 	},
12563 	{
12564 		"calls: pkt_ptr spill into caller stack 8",
12565 		.insns = {
12566 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12567 				    offsetof(struct __sk_buff, data)),
12568 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12569 				    offsetof(struct __sk_buff, data_end)),
12570 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12571 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12572 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12573 			BPF_EXIT_INSN(),
12574 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12575 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12576 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12577 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12578 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12579 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12580 			BPF_EXIT_INSN(),
12581 
12582 			/* subprog 1 */
12583 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12584 				    offsetof(struct __sk_buff, data)),
12585 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12586 				    offsetof(struct __sk_buff, data_end)),
12587 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12588 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12589 			BPF_MOV64_IMM(BPF_REG_5, 0),
12590 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12591 			/* spill checked pkt_ptr into stack of caller */
12592 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12593 			BPF_MOV64_IMM(BPF_REG_5, 1),
12594 			/* don't read back pkt_ptr from stack here */
12595 			/* write 4 bytes into packet */
12596 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12597 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12598 			BPF_EXIT_INSN(),
12599 		},
12600 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12601 		.result = ACCEPT,
12602 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12603 	},
12604 	{
12605 		"calls: pkt_ptr spill into caller stack 9",
12606 		.insns = {
12607 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12608 				    offsetof(struct __sk_buff, data)),
12609 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12610 				    offsetof(struct __sk_buff, data_end)),
12611 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12612 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12613 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12614 			BPF_EXIT_INSN(),
12615 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12617 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12618 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12619 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12620 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12621 			BPF_EXIT_INSN(),
12622 
12623 			/* subprog 1 */
12624 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12625 				    offsetof(struct __sk_buff, data)),
12626 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12627 				    offsetof(struct __sk_buff, data_end)),
12628 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12629 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12630 			BPF_MOV64_IMM(BPF_REG_5, 0),
12631 			/* spill unchecked pkt_ptr into stack of caller */
12632 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12633 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12634 			BPF_MOV64_IMM(BPF_REG_5, 1),
12635 			/* don't read back pkt_ptr from stack here */
12636 			/* write 4 bytes into packet */
12637 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12638 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12639 			BPF_EXIT_INSN(),
12640 		},
12641 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12642 		.errstr = "invalid access to packet",
12643 		.result = REJECT,
12644 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12645 	},
12646 	{
12647 		"calls: caller stack init to zero or map_value_or_null",
12648 		.insns = {
12649 			BPF_MOV64_IMM(BPF_REG_0, 0),
12650 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12651 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12652 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12653 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12654 			/* fetch map_value_or_null or const_zero from stack */
12655 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12656 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12657 			/* store into map_value */
12658 			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12659 			BPF_EXIT_INSN(),
12660 
12661 			/* subprog 1 */
12662 			/* if (ctx == 0) return; */
12663 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12664 			/* else bpf_map_lookup() and *(fp - 8) = r0 */
12665 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12666 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12667 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12668 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12669 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12670 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12671 				     BPF_FUNC_map_lookup_elem),
12672 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12673 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12674 			BPF_EXIT_INSN(),
12675 		},
12676 		.fixup_map_hash_8b = { 13 },
12677 		.result = ACCEPT,
12678 		.prog_type = BPF_PROG_TYPE_XDP,
12679 	},
12680 	{
12681 		"calls: stack init to zero and pruning",
12682 		.insns = {
12683 			/* first make allocated_stack 16 byte */
12684 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12685 			/* now fork the execution such that the false branch
12686 			 * of JGT insn will be verified second and it skisp zero
12687 			 * init of fp-8 stack slot. If stack liveness marking
12688 			 * is missing live_read marks from call map_lookup
12689 			 * processing then pruning will incorrectly assume
12690 			 * that fp-8 stack slot was unused in the fall-through
12691 			 * branch and will accept the program incorrectly
12692 			 */
12693 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12694 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12695 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12696 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12697 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12698 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12699 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12700 				     BPF_FUNC_map_lookup_elem),
12701 			BPF_EXIT_INSN(),
12702 		},
12703 		.fixup_map_hash_48b = { 6 },
12704 		.errstr = "invalid indirect read from stack off -8+0 size 8",
12705 		.result = REJECT,
12706 		.prog_type = BPF_PROG_TYPE_XDP,
12707 	},
12708 	{
12709 		"calls: two calls returning different map pointers for lookup (hash, array)",
12710 		.insns = {
12711 			/* main prog */
12712 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12713 			BPF_CALL_REL(11),
12714 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12715 			BPF_CALL_REL(12),
12716 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12717 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12718 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12720 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12721 				     BPF_FUNC_map_lookup_elem),
12722 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12723 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12724 				   offsetof(struct test_val, foo)),
12725 			BPF_MOV64_IMM(BPF_REG_0, 1),
12726 			BPF_EXIT_INSN(),
12727 			/* subprog 1 */
12728 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12729 			BPF_EXIT_INSN(),
12730 			/* subprog 2 */
12731 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12732 			BPF_EXIT_INSN(),
12733 		},
12734 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12735 		.fixup_map_hash_48b = { 13 },
12736 		.fixup_map_array_48b = { 16 },
12737 		.result = ACCEPT,
12738 		.retval = 1,
12739 	},
12740 	{
12741 		"calls: two calls returning different map pointers for lookup (hash, map in map)",
12742 		.insns = {
12743 			/* main prog */
12744 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12745 			BPF_CALL_REL(11),
12746 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12747 			BPF_CALL_REL(12),
12748 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12749 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12750 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12752 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12753 				     BPF_FUNC_map_lookup_elem),
12754 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12755 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12756 				   offsetof(struct test_val, foo)),
12757 			BPF_MOV64_IMM(BPF_REG_0, 1),
12758 			BPF_EXIT_INSN(),
12759 			/* subprog 1 */
12760 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12761 			BPF_EXIT_INSN(),
12762 			/* subprog 2 */
12763 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12764 			BPF_EXIT_INSN(),
12765 		},
12766 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12767 		.fixup_map_in_map = { 16 },
12768 		.fixup_map_array_48b = { 13 },
12769 		.result = REJECT,
12770 		.errstr = "R0 invalid mem access 'map_ptr'",
12771 	},
12772 	{
12773 		"cond: two branches returning different map pointers for lookup (tail, tail)",
12774 		.insns = {
12775 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12776 				    offsetof(struct __sk_buff, mark)),
12777 			BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12778 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12779 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12780 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12781 			BPF_MOV64_IMM(BPF_REG_3, 7),
12782 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12783 				     BPF_FUNC_tail_call),
12784 			BPF_MOV64_IMM(BPF_REG_0, 1),
12785 			BPF_EXIT_INSN(),
12786 		},
12787 		.fixup_prog1 = { 5 },
12788 		.fixup_prog2 = { 2 },
12789 		.result_unpriv = REJECT,
12790 		.errstr_unpriv = "tail_call abusing map_ptr",
12791 		.result = ACCEPT,
12792 		.retval = 42,
12793 	},
12794 	{
12795 		"cond: two branches returning same map pointers for lookup (tail, tail)",
12796 		.insns = {
12797 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12798 				    offsetof(struct __sk_buff, mark)),
12799 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12800 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12801 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12802 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12803 			BPF_MOV64_IMM(BPF_REG_3, 7),
12804 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12805 				     BPF_FUNC_tail_call),
12806 			BPF_MOV64_IMM(BPF_REG_0, 1),
12807 			BPF_EXIT_INSN(),
12808 		},
12809 		.fixup_prog2 = { 2, 5 },
12810 		.result_unpriv = ACCEPT,
12811 		.result = ACCEPT,
12812 		.retval = 42,
12813 	},
12814 	{
12815 		"search pruning: all branches should be verified (nop operation)",
12816 		.insns = {
12817 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12818 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12819 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12820 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12821 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12822 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12823 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12824 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12825 			BPF_MOV64_IMM(BPF_REG_4, 0),
12826 			BPF_JMP_A(1),
12827 			BPF_MOV64_IMM(BPF_REG_4, 1),
12828 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12829 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12830 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12831 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12832 			BPF_MOV64_IMM(BPF_REG_6, 0),
12833 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12834 			BPF_EXIT_INSN(),
12835 		},
12836 		.fixup_map_hash_8b = { 3 },
12837 		.errstr = "R6 invalid mem access 'inv'",
12838 		.result = REJECT,
12839 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12840 	},
12841 	{
12842 		"search pruning: all branches should be verified (invalid stack access)",
12843 		.insns = {
12844 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12845 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12846 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12847 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12848 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12849 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12850 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12851 			BPF_MOV64_IMM(BPF_REG_4, 0),
12852 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12853 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12854 			BPF_JMP_A(1),
12855 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12856 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12857 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12858 			BPF_EXIT_INSN(),
12859 		},
12860 		.fixup_map_hash_8b = { 3 },
12861 		.errstr = "invalid read from stack off -16+0 size 8",
12862 		.result = REJECT,
12863 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12864 	},
12865 	{
12866 		"jit: lsh, rsh, arsh by 1",
12867 		.insns = {
12868 			BPF_MOV64_IMM(BPF_REG_0, 1),
12869 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
12870 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12871 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12872 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12873 			BPF_EXIT_INSN(),
12874 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12875 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12876 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12877 			BPF_EXIT_INSN(),
12878 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12879 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12880 			BPF_EXIT_INSN(),
12881 			BPF_MOV64_IMM(BPF_REG_0, 2),
12882 			BPF_EXIT_INSN(),
12883 		},
12884 		.result = ACCEPT,
12885 		.retval = 2,
12886 	},
12887 	{
12888 		"jit: mov32 for ldimm64, 1",
12889 		.insns = {
12890 			BPF_MOV64_IMM(BPF_REG_0, 2),
12891 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12892 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12893 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12894 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12895 			BPF_MOV64_IMM(BPF_REG_0, 1),
12896 			BPF_EXIT_INSN(),
12897 		},
12898 		.result = ACCEPT,
12899 		.retval = 2,
12900 	},
12901 	{
12902 		"jit: mov32 for ldimm64, 2",
12903 		.insns = {
12904 			BPF_MOV64_IMM(BPF_REG_0, 1),
12905 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12906 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12907 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12908 			BPF_MOV64_IMM(BPF_REG_0, 2),
12909 			BPF_EXIT_INSN(),
12910 		},
12911 		.result = ACCEPT,
12912 		.retval = 2,
12913 	},
12914 	{
12915 		"jit: various mul tests",
12916 		.insns = {
12917 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12918 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12919 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12920 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12921 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12922 			BPF_MOV64_IMM(BPF_REG_0, 1),
12923 			BPF_EXIT_INSN(),
12924 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12925 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12926 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12927 			BPF_MOV64_IMM(BPF_REG_0, 1),
12928 			BPF_EXIT_INSN(),
12929 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12930 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12931 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12932 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12933 			BPF_MOV64_IMM(BPF_REG_0, 1),
12934 			BPF_EXIT_INSN(),
12935 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12936 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12937 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12938 			BPF_MOV64_IMM(BPF_REG_0, 1),
12939 			BPF_EXIT_INSN(),
12940 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12941 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12942 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12943 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12944 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12945 			BPF_MOV64_IMM(BPF_REG_0, 1),
12946 			BPF_EXIT_INSN(),
12947 			BPF_MOV64_IMM(BPF_REG_0, 2),
12948 			BPF_EXIT_INSN(),
12949 		},
12950 		.result = ACCEPT,
12951 		.retval = 2,
12952 	},
12953 	{
12954 		"xadd/w check unaligned stack",
12955 		.insns = {
12956 			BPF_MOV64_IMM(BPF_REG_0, 1),
12957 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12958 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12959 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12960 			BPF_EXIT_INSN(),
12961 		},
12962 		.result = REJECT,
12963 		.errstr = "misaligned stack access off",
12964 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12965 	},
12966 	{
12967 		"xadd/w check unaligned map",
12968 		.insns = {
12969 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12970 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12971 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12972 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12973 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12974 				     BPF_FUNC_map_lookup_elem),
12975 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12976 			BPF_EXIT_INSN(),
12977 			BPF_MOV64_IMM(BPF_REG_1, 1),
12978 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12979 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12980 			BPF_EXIT_INSN(),
12981 		},
12982 		.fixup_map_hash_8b = { 3 },
12983 		.result = REJECT,
12984 		.errstr = "misaligned value access off",
12985 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12986 	},
12987 	{
12988 		"xadd/w check unaligned pkt",
12989 		.insns = {
12990 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12991 				    offsetof(struct xdp_md, data)),
12992 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12993 				    offsetof(struct xdp_md, data_end)),
12994 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12996 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12997 			BPF_MOV64_IMM(BPF_REG_0, 99),
12998 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12999 			BPF_MOV64_IMM(BPF_REG_0, 1),
13000 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13001 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
13002 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
13003 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
13004 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
13005 			BPF_EXIT_INSN(),
13006 		},
13007 		.result = REJECT,
13008 		.errstr = "BPF_XADD stores into R2 pkt is not allowed",
13009 		.prog_type = BPF_PROG_TYPE_XDP,
13010 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13011 	},
13012 	{
13013 		"xadd/w check whether src/dst got mangled, 1",
13014 		.insns = {
13015 			BPF_MOV64_IMM(BPF_REG_0, 1),
13016 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13017 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13018 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13019 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13020 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13021 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13022 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13023 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13024 			BPF_EXIT_INSN(),
13025 			BPF_MOV64_IMM(BPF_REG_0, 42),
13026 			BPF_EXIT_INSN(),
13027 		},
13028 		.result = ACCEPT,
13029 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13030 		.retval = 3,
13031 	},
13032 	{
13033 		"xadd/w check whether src/dst got mangled, 2",
13034 		.insns = {
13035 			BPF_MOV64_IMM(BPF_REG_0, 1),
13036 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13037 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13038 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13039 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13040 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13041 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13042 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13043 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
13044 			BPF_EXIT_INSN(),
13045 			BPF_MOV64_IMM(BPF_REG_0, 42),
13046 			BPF_EXIT_INSN(),
13047 		},
13048 		.result = ACCEPT,
13049 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13050 		.retval = 3,
13051 	},
13052 	{
13053 		"bpf_get_stack return R0 within range",
13054 		.insns = {
13055 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13056 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13057 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13058 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13059 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13060 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13061 				     BPF_FUNC_map_lookup_elem),
13062 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
13063 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13064 			BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
13065 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13066 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13067 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
13068 			BPF_MOV64_IMM(BPF_REG_4, 256),
13069 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13070 			BPF_MOV64_IMM(BPF_REG_1, 0),
13071 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
13072 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
13073 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
13074 			BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
13075 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
13076 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13077 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
13078 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
13079 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
13080 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
13081 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
13082 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
13083 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13084 			BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
13085 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
13086 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
13087 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13088 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
13089 			BPF_MOV64_IMM(BPF_REG_4, 0),
13090 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13091 			BPF_EXIT_INSN(),
13092 		},
13093 		.fixup_map_hash_48b = { 4 },
13094 		.result = ACCEPT,
13095 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
13096 	},
13097 	{
13098 		"ld_abs: invalid op 1",
13099 		.insns = {
13100 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13101 			BPF_LD_ABS(BPF_DW, 0),
13102 			BPF_EXIT_INSN(),
13103 		},
13104 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13105 		.result = REJECT,
13106 		.errstr = "unknown opcode",
13107 	},
13108 	{
13109 		"ld_abs: invalid op 2",
13110 		.insns = {
13111 			BPF_MOV32_IMM(BPF_REG_0, 256),
13112 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13113 			BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
13114 			BPF_EXIT_INSN(),
13115 		},
13116 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13117 		.result = REJECT,
13118 		.errstr = "unknown opcode",
13119 	},
13120 	{
13121 		"ld_abs: nmap reduced",
13122 		.insns = {
13123 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13124 			BPF_LD_ABS(BPF_H, 12),
13125 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
13126 			BPF_LD_ABS(BPF_H, 12),
13127 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
13128 			BPF_MOV32_IMM(BPF_REG_0, 18),
13129 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
13130 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
13131 			BPF_LD_IND(BPF_W, BPF_REG_7, 14),
13132 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
13133 			BPF_MOV32_IMM(BPF_REG_0, 280971478),
13134 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13135 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13136 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
13137 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13138 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
13139 			BPF_LD_ABS(BPF_H, 12),
13140 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
13141 			BPF_MOV32_IMM(BPF_REG_0, 22),
13142 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13143 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13144 			BPF_LD_IND(BPF_H, BPF_REG_7, 14),
13145 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
13146 			BPF_MOV32_IMM(BPF_REG_0, 17366),
13147 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
13148 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
13149 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
13150 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13151 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13152 			BPF_MOV32_IMM(BPF_REG_0, 256),
13153 			BPF_EXIT_INSN(),
13154 			BPF_MOV32_IMM(BPF_REG_0, 0),
13155 			BPF_EXIT_INSN(),
13156 		},
13157 		.data = {
13158 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
13159 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
13160 			0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
13161 		},
13162 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13163 		.result = ACCEPT,
13164 		.retval = 256,
13165 	},
13166 	{
13167 		"ld_abs: div + abs, test 1",
13168 		.insns = {
13169 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13170 			BPF_LD_ABS(BPF_B, 3),
13171 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13172 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13173 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13174 			BPF_LD_ABS(BPF_B, 4),
13175 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13176 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13177 			BPF_EXIT_INSN(),
13178 		},
13179 		.data = {
13180 			10, 20, 30, 40, 50,
13181 		},
13182 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13183 		.result = ACCEPT,
13184 		.retval = 10,
13185 	},
13186 	{
13187 		"ld_abs: div + abs, test 2",
13188 		.insns = {
13189 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13190 			BPF_LD_ABS(BPF_B, 3),
13191 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13192 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13193 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13194 			BPF_LD_ABS(BPF_B, 128),
13195 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13196 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13197 			BPF_EXIT_INSN(),
13198 		},
13199 		.data = {
13200 			10, 20, 30, 40, 50,
13201 		},
13202 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13203 		.result = ACCEPT,
13204 		.retval = 0,
13205 	},
13206 	{
13207 		"ld_abs: div + abs, test 3",
13208 		.insns = {
13209 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13210 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13211 			BPF_LD_ABS(BPF_B, 3),
13212 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13213 			BPF_EXIT_INSN(),
13214 		},
13215 		.data = {
13216 			10, 20, 30, 40, 50,
13217 		},
13218 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13219 		.result = ACCEPT,
13220 		.retval = 0,
13221 	},
13222 	{
13223 		"ld_abs: div + abs, test 4",
13224 		.insns = {
13225 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13226 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13227 			BPF_LD_ABS(BPF_B, 256),
13228 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13229 			BPF_EXIT_INSN(),
13230 		},
13231 		.data = {
13232 			10, 20, 30, 40, 50,
13233 		},
13234 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13235 		.result = ACCEPT,
13236 		.retval = 0,
13237 	},
13238 	{
13239 		"ld_abs: vlan + abs, test 1",
13240 		.insns = { },
13241 		.data = {
13242 			0x34,
13243 		},
13244 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
13245 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13246 		.result = ACCEPT,
13247 		.retval = 0xbef,
13248 	},
13249 	{
13250 		"ld_abs: vlan + abs, test 2",
13251 		.insns = {
13252 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13253 			BPF_LD_ABS(BPF_B, 0),
13254 			BPF_LD_ABS(BPF_H, 0),
13255 			BPF_LD_ABS(BPF_W, 0),
13256 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
13257 			BPF_MOV64_IMM(BPF_REG_6, 0),
13258 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13259 			BPF_MOV64_IMM(BPF_REG_2, 1),
13260 			BPF_MOV64_IMM(BPF_REG_3, 2),
13261 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13262 				     BPF_FUNC_skb_vlan_push),
13263 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
13264 			BPF_LD_ABS(BPF_B, 0),
13265 			BPF_LD_ABS(BPF_H, 0),
13266 			BPF_LD_ABS(BPF_W, 0),
13267 			BPF_MOV64_IMM(BPF_REG_0, 42),
13268 			BPF_EXIT_INSN(),
13269 		},
13270 		.data = {
13271 			0x34,
13272 		},
13273 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13274 		.result = ACCEPT,
13275 		.retval = 42,
13276 	},
13277 	{
13278 		"ld_abs: jump around ld_abs",
13279 		.insns = { },
13280 		.data = {
13281 			10, 11,
13282 		},
13283 		.fill_helper = bpf_fill_jump_around_ld_abs,
13284 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13285 		.result = ACCEPT,
13286 		.retval = 10,
13287 	},
13288 	{
13289 		"ld_dw: xor semi-random 64 bit imms, test 1",
13290 		.insns = { },
13291 		.data = { },
13292 		.fill_helper = bpf_fill_rand_ld_dw,
13293 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13294 		.result = ACCEPT,
13295 		.retval = 4090,
13296 	},
13297 	{
13298 		"ld_dw: xor semi-random 64 bit imms, test 2",
13299 		.insns = { },
13300 		.data = { },
13301 		.fill_helper = bpf_fill_rand_ld_dw,
13302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13303 		.result = ACCEPT,
13304 		.retval = 2047,
13305 	},
13306 	{
13307 		"ld_dw: xor semi-random 64 bit imms, test 3",
13308 		.insns = { },
13309 		.data = { },
13310 		.fill_helper = bpf_fill_rand_ld_dw,
13311 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13312 		.result = ACCEPT,
13313 		.retval = 511,
13314 	},
13315 	{
13316 		"ld_dw: xor semi-random 64 bit imms, test 4",
13317 		.insns = { },
13318 		.data = { },
13319 		.fill_helper = bpf_fill_rand_ld_dw,
13320 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13321 		.result = ACCEPT,
13322 		.retval = 5,
13323 	},
13324 	{
13325 		"pass unmodified ctx pointer to helper",
13326 		.insns = {
13327 			BPF_MOV64_IMM(BPF_REG_2, 0),
13328 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13329 				     BPF_FUNC_csum_update),
13330 			BPF_MOV64_IMM(BPF_REG_0, 0),
13331 			BPF_EXIT_INSN(),
13332 		},
13333 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13334 		.result = ACCEPT,
13335 	},
13336 	{
13337 		"reference tracking: leak potential reference",
13338 		.insns = {
13339 			BPF_SK_LOOKUP,
13340 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
13341 			BPF_EXIT_INSN(),
13342 		},
13343 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13344 		.errstr = "Unreleased reference",
13345 		.result = REJECT,
13346 	},
13347 	{
13348 		"reference tracking: leak potential reference on stack",
13349 		.insns = {
13350 			BPF_SK_LOOKUP,
13351 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13352 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13353 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13354 			BPF_MOV64_IMM(BPF_REG_0, 0),
13355 			BPF_EXIT_INSN(),
13356 		},
13357 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13358 		.errstr = "Unreleased reference",
13359 		.result = REJECT,
13360 	},
13361 	{
13362 		"reference tracking: leak potential reference on stack 2",
13363 		.insns = {
13364 			BPF_SK_LOOKUP,
13365 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13366 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13367 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13368 			BPF_MOV64_IMM(BPF_REG_0, 0),
13369 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
13370 			BPF_EXIT_INSN(),
13371 		},
13372 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13373 		.errstr = "Unreleased reference",
13374 		.result = REJECT,
13375 	},
13376 	{
13377 		"reference tracking: zero potential reference",
13378 		.insns = {
13379 			BPF_SK_LOOKUP,
13380 			BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
13381 			BPF_EXIT_INSN(),
13382 		},
13383 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13384 		.errstr = "Unreleased reference",
13385 		.result = REJECT,
13386 	},
13387 	{
13388 		"reference tracking: copy and zero potential references",
13389 		.insns = {
13390 			BPF_SK_LOOKUP,
13391 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13392 			BPF_MOV64_IMM(BPF_REG_0, 0),
13393 			BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
13394 			BPF_EXIT_INSN(),
13395 		},
13396 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13397 		.errstr = "Unreleased reference",
13398 		.result = REJECT,
13399 	},
13400 	{
13401 		"reference tracking: release reference without check",
13402 		.insns = {
13403 			BPF_SK_LOOKUP,
13404 			/* reference in r0 may be NULL */
13405 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13406 			BPF_MOV64_IMM(BPF_REG_2, 0),
13407 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13408 			BPF_EXIT_INSN(),
13409 		},
13410 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13411 		.errstr = "type=sock_or_null expected=sock",
13412 		.result = REJECT,
13413 	},
13414 	{
13415 		"reference tracking: release reference",
13416 		.insns = {
13417 			BPF_SK_LOOKUP,
13418 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13419 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13420 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13421 			BPF_EXIT_INSN(),
13422 		},
13423 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13424 		.result = ACCEPT,
13425 	},
13426 	{
13427 		"reference tracking: release reference 2",
13428 		.insns = {
13429 			BPF_SK_LOOKUP,
13430 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13431 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13432 			BPF_EXIT_INSN(),
13433 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13434 			BPF_EXIT_INSN(),
13435 		},
13436 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13437 		.result = ACCEPT,
13438 	},
13439 	{
13440 		"reference tracking: release reference twice",
13441 		.insns = {
13442 			BPF_SK_LOOKUP,
13443 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13444 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13445 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13446 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13447 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13448 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13449 			BPF_EXIT_INSN(),
13450 		},
13451 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13452 		.errstr = "type=inv expected=sock",
13453 		.result = REJECT,
13454 	},
13455 	{
13456 		"reference tracking: release reference twice inside branch",
13457 		.insns = {
13458 			BPF_SK_LOOKUP,
13459 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13460 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13461 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
13462 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13463 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13464 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13465 			BPF_EXIT_INSN(),
13466 		},
13467 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13468 		.errstr = "type=inv expected=sock",
13469 		.result = REJECT,
13470 	},
13471 	{
13472 		"reference tracking: alloc, check, free in one subbranch",
13473 		.insns = {
13474 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13475 				    offsetof(struct __sk_buff, data)),
13476 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13477 				    offsetof(struct __sk_buff, data_end)),
13478 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13480 			/* if (offsetof(skb, mark) > data_len) exit; */
13481 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13482 			BPF_EXIT_INSN(),
13483 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13484 				    offsetof(struct __sk_buff, mark)),
13485 			BPF_SK_LOOKUP,
13486 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
13487 			/* Leak reference in R0 */
13488 			BPF_EXIT_INSN(),
13489 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13490 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13491 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13492 			BPF_EXIT_INSN(),
13493 		},
13494 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13495 		.errstr = "Unreleased reference",
13496 		.result = REJECT,
13497 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13498 	},
13499 	{
13500 		"reference tracking: alloc, check, free in both subbranches",
13501 		.insns = {
13502 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13503 				    offsetof(struct __sk_buff, data)),
13504 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13505 				    offsetof(struct __sk_buff, data_end)),
13506 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13507 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13508 			/* if (offsetof(skb, mark) > data_len) exit; */
13509 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13510 			BPF_EXIT_INSN(),
13511 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13512 				    offsetof(struct __sk_buff, mark)),
13513 			BPF_SK_LOOKUP,
13514 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
13515 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13516 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13517 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13518 			BPF_EXIT_INSN(),
13519 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13520 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13521 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13522 			BPF_EXIT_INSN(),
13523 		},
13524 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13525 		.result = ACCEPT,
13526 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13527 	},
13528 	{
13529 		"reference tracking in call: free reference in subprog",
13530 		.insns = {
13531 			BPF_SK_LOOKUP,
13532 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13533 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13534 			BPF_MOV64_IMM(BPF_REG_0, 0),
13535 			BPF_EXIT_INSN(),
13536 
13537 			/* subprog 1 */
13538 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13539 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13540 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13541 			BPF_EXIT_INSN(),
13542 		},
13543 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13544 		.result = ACCEPT,
13545 	},
13546 	{
13547 		"pass modified ctx pointer to helper, 1",
13548 		.insns = {
13549 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13550 			BPF_MOV64_IMM(BPF_REG_2, 0),
13551 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13552 				     BPF_FUNC_csum_update),
13553 			BPF_MOV64_IMM(BPF_REG_0, 0),
13554 			BPF_EXIT_INSN(),
13555 		},
13556 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13557 		.result = REJECT,
13558 		.errstr = "dereference of modified ctx ptr",
13559 	},
13560 	{
13561 		"pass modified ctx pointer to helper, 2",
13562 		.insns = {
13563 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13564 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13565 				     BPF_FUNC_get_socket_cookie),
13566 			BPF_MOV64_IMM(BPF_REG_0, 0),
13567 			BPF_EXIT_INSN(),
13568 		},
13569 		.result_unpriv = REJECT,
13570 		.result = REJECT,
13571 		.errstr_unpriv = "dereference of modified ctx ptr",
13572 		.errstr = "dereference of modified ctx ptr",
13573 	},
13574 	{
13575 		"pass modified ctx pointer to helper, 3",
13576 		.insns = {
13577 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13578 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13579 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13580 			BPF_MOV64_IMM(BPF_REG_2, 0),
13581 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13582 				     BPF_FUNC_csum_update),
13583 			BPF_MOV64_IMM(BPF_REG_0, 0),
13584 			BPF_EXIT_INSN(),
13585 		},
13586 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13587 		.result = REJECT,
13588 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
13589 	},
13590 	{
13591 		"mov64 src == dst",
13592 		.insns = {
13593 			BPF_MOV64_IMM(BPF_REG_2, 0),
13594 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13595 			// Check bounds are OK
13596 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13597 			BPF_MOV64_IMM(BPF_REG_0, 0),
13598 			BPF_EXIT_INSN(),
13599 		},
13600 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13601 		.result = ACCEPT,
13602 	},
13603 	{
13604 		"mov64 src != dst",
13605 		.insns = {
13606 			BPF_MOV64_IMM(BPF_REG_3, 0),
13607 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13608 			// Check bounds are OK
13609 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13610 			BPF_MOV64_IMM(BPF_REG_0, 0),
13611 			BPF_EXIT_INSN(),
13612 		},
13613 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13614 		.result = ACCEPT,
13615 	},
13616 	{
13617 		"reference tracking in call: free reference in subprog and outside",
13618 		.insns = {
13619 			BPF_SK_LOOKUP,
13620 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13621 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13622 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13623 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13624 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13625 			BPF_EXIT_INSN(),
13626 
13627 			/* subprog 1 */
13628 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13629 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13630 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13631 			BPF_EXIT_INSN(),
13632 		},
13633 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13634 		.errstr = "type=inv expected=sock",
13635 		.result = REJECT,
13636 	},
13637 	{
13638 		"reference tracking in call: alloc & leak reference in subprog",
13639 		.insns = {
13640 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13641 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13642 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13643 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13644 			BPF_MOV64_IMM(BPF_REG_0, 0),
13645 			BPF_EXIT_INSN(),
13646 
13647 			/* subprog 1 */
13648 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13649 			BPF_SK_LOOKUP,
13650 			/* spill unchecked sk_ptr into stack of caller */
13651 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13652 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13653 			BPF_EXIT_INSN(),
13654 		},
13655 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13656 		.errstr = "Unreleased reference",
13657 		.result = REJECT,
13658 	},
13659 	{
13660 		"reference tracking in call: alloc in subprog, release outside",
13661 		.insns = {
13662 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13663 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13664 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13665 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13666 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13667 			BPF_EXIT_INSN(),
13668 
13669 			/* subprog 1 */
13670 			BPF_SK_LOOKUP,
13671 			BPF_EXIT_INSN(), /* return sk */
13672 		},
13673 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13674 		.retval = POINTER_VALUE,
13675 		.result = ACCEPT,
13676 	},
13677 	{
13678 		"reference tracking in call: sk_ptr leak into caller stack",
13679 		.insns = {
13680 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13681 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13682 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13683 			BPF_MOV64_IMM(BPF_REG_0, 0),
13684 			BPF_EXIT_INSN(),
13685 
13686 			/* subprog 1 */
13687 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13688 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13689 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13690 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13691 			/* spill unchecked sk_ptr into stack of caller */
13692 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13693 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13694 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13695 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13696 			BPF_EXIT_INSN(),
13697 
13698 			/* subprog 2 */
13699 			BPF_SK_LOOKUP,
13700 			BPF_EXIT_INSN(),
13701 		},
13702 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13703 		.errstr = "Unreleased reference",
13704 		.result = REJECT,
13705 	},
13706 	{
13707 		"reference tracking in call: sk_ptr spill into caller stack",
13708 		.insns = {
13709 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13710 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13711 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13712 			BPF_MOV64_IMM(BPF_REG_0, 0),
13713 			BPF_EXIT_INSN(),
13714 
13715 			/* subprog 1 */
13716 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13717 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13718 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13719 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13720 			/* spill unchecked sk_ptr into stack of caller */
13721 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13722 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13723 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13724 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13725 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13726 			/* now the sk_ptr is verified, free the reference */
13727 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13728 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13729 			BPF_EXIT_INSN(),
13730 
13731 			/* subprog 2 */
13732 			BPF_SK_LOOKUP,
13733 			BPF_EXIT_INSN(),
13734 		},
13735 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13736 		.result = ACCEPT,
13737 	},
13738 	{
13739 		"reference tracking: allow LD_ABS",
13740 		.insns = {
13741 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13742 			BPF_SK_LOOKUP,
13743 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13744 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13745 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13746 			BPF_LD_ABS(BPF_B, 0),
13747 			BPF_LD_ABS(BPF_H, 0),
13748 			BPF_LD_ABS(BPF_W, 0),
13749 			BPF_EXIT_INSN(),
13750 		},
13751 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13752 		.result = ACCEPT,
13753 	},
13754 	{
13755 		"reference tracking: forbid LD_ABS while holding reference",
13756 		.insns = {
13757 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13758 			BPF_SK_LOOKUP,
13759 			BPF_LD_ABS(BPF_B, 0),
13760 			BPF_LD_ABS(BPF_H, 0),
13761 			BPF_LD_ABS(BPF_W, 0),
13762 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13763 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13764 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13765 			BPF_EXIT_INSN(),
13766 		},
13767 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13768 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13769 		.result = REJECT,
13770 	},
13771 	{
13772 		"reference tracking: allow LD_IND",
13773 		.insns = {
13774 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13775 			BPF_SK_LOOKUP,
13776 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13777 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13778 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13779 			BPF_MOV64_IMM(BPF_REG_7, 1),
13780 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13781 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13782 			BPF_EXIT_INSN(),
13783 		},
13784 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13785 		.result = ACCEPT,
13786 		.retval = 1,
13787 	},
13788 	{
13789 		"reference tracking: forbid LD_IND while holding reference",
13790 		.insns = {
13791 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13792 			BPF_SK_LOOKUP,
13793 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13794 			BPF_MOV64_IMM(BPF_REG_7, 1),
13795 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13796 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13797 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13798 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13799 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13800 			BPF_EXIT_INSN(),
13801 		},
13802 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13803 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13804 		.result = REJECT,
13805 	},
13806 	{
13807 		"reference tracking: check reference or tail call",
13808 		.insns = {
13809 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13810 			BPF_SK_LOOKUP,
13811 			/* if (sk) bpf_sk_release() */
13812 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13813 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13814 			/* bpf_tail_call() */
13815 			BPF_MOV64_IMM(BPF_REG_3, 2),
13816 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13817 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13818 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13819 				     BPF_FUNC_tail_call),
13820 			BPF_MOV64_IMM(BPF_REG_0, 0),
13821 			BPF_EXIT_INSN(),
13822 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13823 			BPF_EXIT_INSN(),
13824 		},
13825 		.fixup_prog1 = { 17 },
13826 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13827 		.result = ACCEPT,
13828 	},
13829 	{
13830 		"reference tracking: release reference then tail call",
13831 		.insns = {
13832 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13833 			BPF_SK_LOOKUP,
13834 			/* if (sk) bpf_sk_release() */
13835 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13836 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13837 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13838 			/* bpf_tail_call() */
13839 			BPF_MOV64_IMM(BPF_REG_3, 2),
13840 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13841 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13842 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13843 				     BPF_FUNC_tail_call),
13844 			BPF_MOV64_IMM(BPF_REG_0, 0),
13845 			BPF_EXIT_INSN(),
13846 		},
13847 		.fixup_prog1 = { 18 },
13848 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13849 		.result = ACCEPT,
13850 	},
13851 	{
13852 		"reference tracking: leak possible reference over tail call",
13853 		.insns = {
13854 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13855 			/* Look up socket and store in REG_6 */
13856 			BPF_SK_LOOKUP,
13857 			/* bpf_tail_call() */
13858 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13859 			BPF_MOV64_IMM(BPF_REG_3, 2),
13860 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13861 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13862 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13863 				     BPF_FUNC_tail_call),
13864 			BPF_MOV64_IMM(BPF_REG_0, 0),
13865 			/* if (sk) bpf_sk_release() */
13866 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13867 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13868 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13869 			BPF_EXIT_INSN(),
13870 		},
13871 		.fixup_prog1 = { 16 },
13872 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13873 		.errstr = "tail_call would lead to reference leak",
13874 		.result = REJECT,
13875 	},
13876 	{
13877 		"reference tracking: leak checked reference over tail call",
13878 		.insns = {
13879 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13880 			/* Look up socket and store in REG_6 */
13881 			BPF_SK_LOOKUP,
13882 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13883 			/* if (!sk) goto end */
13884 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13885 			/* bpf_tail_call() */
13886 			BPF_MOV64_IMM(BPF_REG_3, 0),
13887 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13888 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13889 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13890 				     BPF_FUNC_tail_call),
13891 			BPF_MOV64_IMM(BPF_REG_0, 0),
13892 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13893 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13894 			BPF_EXIT_INSN(),
13895 		},
13896 		.fixup_prog1 = { 17 },
13897 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13898 		.errstr = "tail_call would lead to reference leak",
13899 		.result = REJECT,
13900 	},
13901 	{
13902 		"reference tracking: mangle and release sock_or_null",
13903 		.insns = {
13904 			BPF_SK_LOOKUP,
13905 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13906 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13907 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13908 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13909 			BPF_EXIT_INSN(),
13910 		},
13911 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13912 		.errstr = "R1 pointer arithmetic on sock_or_null prohibited",
13913 		.result = REJECT,
13914 	},
13915 	{
13916 		"reference tracking: mangle and release sock",
13917 		.insns = {
13918 			BPF_SK_LOOKUP,
13919 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13920 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13921 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13922 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13923 			BPF_EXIT_INSN(),
13924 		},
13925 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13926 		.errstr = "R1 pointer arithmetic on sock prohibited",
13927 		.result = REJECT,
13928 	},
13929 	{
13930 		"reference tracking: access member",
13931 		.insns = {
13932 			BPF_SK_LOOKUP,
13933 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13934 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13935 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13936 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13937 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13938 			BPF_EXIT_INSN(),
13939 		},
13940 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13941 		.result = ACCEPT,
13942 	},
13943 	{
13944 		"reference tracking: write to member",
13945 		.insns = {
13946 			BPF_SK_LOOKUP,
13947 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13948 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13949 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13950 			BPF_LD_IMM64(BPF_REG_2, 42),
13951 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
13952 				    offsetof(struct bpf_sock, mark)),
13953 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13954 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13955 			BPF_LD_IMM64(BPF_REG_0, 0),
13956 			BPF_EXIT_INSN(),
13957 		},
13958 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13959 		.errstr = "cannot write into socket",
13960 		.result = REJECT,
13961 	},
13962 	{
13963 		"reference tracking: invalid 64-bit access of member",
13964 		.insns = {
13965 			BPF_SK_LOOKUP,
13966 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13967 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13968 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
13969 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13970 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13971 			BPF_EXIT_INSN(),
13972 		},
13973 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13974 		.errstr = "invalid bpf_sock access off=0 size=8",
13975 		.result = REJECT,
13976 	},
13977 	{
13978 		"reference tracking: access after release",
13979 		.insns = {
13980 			BPF_SK_LOOKUP,
13981 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13982 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13983 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13984 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
13985 			BPF_EXIT_INSN(),
13986 		},
13987 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13988 		.errstr = "!read_ok",
13989 		.result = REJECT,
13990 	},
13991 	{
13992 		"reference tracking: direct access for lookup",
13993 		.insns = {
13994 			/* Check that the packet is at least 64B long */
13995 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13996 				    offsetof(struct __sk_buff, data)),
13997 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13998 				    offsetof(struct __sk_buff, data_end)),
13999 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14000 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
14001 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
14002 			/* sk = sk_lookup_tcp(ctx, skb->data, ...) */
14003 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
14004 			BPF_MOV64_IMM(BPF_REG_4, 0),
14005 			BPF_MOV64_IMM(BPF_REG_5, 0),
14006 			BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
14007 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14008 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14009 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
14010 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14011 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14012 			BPF_EXIT_INSN(),
14013 		},
14014 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14015 		.result = ACCEPT,
14016 	},
14017 	{
14018 		"calls: ctx read at start of subprog",
14019 		.insns = {
14020 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14021 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
14022 			BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
14023 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14024 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14025 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14026 			BPF_EXIT_INSN(),
14027 			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
14028 			BPF_MOV64_IMM(BPF_REG_0, 0),
14029 			BPF_EXIT_INSN(),
14030 		},
14031 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14032 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14033 		.result_unpriv = REJECT,
14034 		.result = ACCEPT,
14035 	},
14036 	{
14037 		"check wire_len is not readable by sockets",
14038 		.insns = {
14039 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14040 				    offsetof(struct __sk_buff, wire_len)),
14041 			BPF_EXIT_INSN(),
14042 		},
14043 		.errstr = "invalid bpf_context access",
14044 		.result = REJECT,
14045 	},
14046 	{
14047 		"check wire_len is readable by tc classifier",
14048 		.insns = {
14049 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14050 				    offsetof(struct __sk_buff, wire_len)),
14051 			BPF_EXIT_INSN(),
14052 		},
14053 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14054 		.result = ACCEPT,
14055 	},
14056 	{
14057 		"check wire_len is not writable by tc classifier",
14058 		.insns = {
14059 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
14060 				    offsetof(struct __sk_buff, wire_len)),
14061 			BPF_EXIT_INSN(),
14062 		},
14063 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14064 		.errstr = "invalid bpf_context access",
14065 		.errstr_unpriv = "R1 leaks addr",
14066 		.result = REJECT,
14067 	},
14068 };
14069 
14070 static int probe_filter_length(const struct bpf_insn *fp)
14071 {
14072 	int len;
14073 
14074 	for (len = MAX_INSNS - 1; len > 0; --len)
14075 		if (fp[len].code != 0 || fp[len].imm != 0)
14076 			break;
14077 	return len + 1;
14078 }
14079 
14080 static int create_map(uint32_t type, uint32_t size_key,
14081 		      uint32_t size_value, uint32_t max_elem)
14082 {
14083 	int fd;
14084 
14085 	fd = bpf_create_map(type, size_key, size_value, max_elem,
14086 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
14087 	if (fd < 0)
14088 		printf("Failed to create hash map '%s'!\n", strerror(errno));
14089 
14090 	return fd;
14091 }
14092 
14093 static int create_prog_dummy1(enum bpf_map_type prog_type)
14094 {
14095 	struct bpf_insn prog[] = {
14096 		BPF_MOV64_IMM(BPF_REG_0, 42),
14097 		BPF_EXIT_INSN(),
14098 	};
14099 
14100 	return bpf_load_program(prog_type, prog,
14101 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14102 }
14103 
14104 static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
14105 {
14106 	struct bpf_insn prog[] = {
14107 		BPF_MOV64_IMM(BPF_REG_3, idx),
14108 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
14109 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14110 			     BPF_FUNC_tail_call),
14111 		BPF_MOV64_IMM(BPF_REG_0, 41),
14112 		BPF_EXIT_INSN(),
14113 	};
14114 
14115 	return bpf_load_program(prog_type, prog,
14116 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14117 }
14118 
14119 static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
14120 			     int p1key)
14121 {
14122 	int p2key = 1;
14123 	int mfd, p1fd, p2fd;
14124 
14125 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
14126 			     sizeof(int), max_elem, 0);
14127 	if (mfd < 0) {
14128 		printf("Failed to create prog array '%s'!\n", strerror(errno));
14129 		return -1;
14130 	}
14131 
14132 	p1fd = create_prog_dummy1(prog_type);
14133 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
14134 	if (p1fd < 0 || p2fd < 0)
14135 		goto out;
14136 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
14137 		goto out;
14138 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
14139 		goto out;
14140 	close(p2fd);
14141 	close(p1fd);
14142 
14143 	return mfd;
14144 out:
14145 	close(p2fd);
14146 	close(p1fd);
14147 	close(mfd);
14148 	return -1;
14149 }
14150 
14151 static int create_map_in_map(void)
14152 {
14153 	int inner_map_fd, outer_map_fd;
14154 
14155 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14156 				      sizeof(int), 1, 0);
14157 	if (inner_map_fd < 0) {
14158 		printf("Failed to create array '%s'!\n", strerror(errno));
14159 		return inner_map_fd;
14160 	}
14161 
14162 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
14163 					     sizeof(int), inner_map_fd, 1, 0);
14164 	if (outer_map_fd < 0)
14165 		printf("Failed to create array of maps '%s'!\n",
14166 		       strerror(errno));
14167 
14168 	close(inner_map_fd);
14169 
14170 	return outer_map_fd;
14171 }
14172 
14173 static int create_cgroup_storage(bool percpu)
14174 {
14175 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
14176 		BPF_MAP_TYPE_CGROUP_STORAGE;
14177 	int fd;
14178 
14179 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
14180 			    TEST_DATA_LEN, 0, 0);
14181 	if (fd < 0)
14182 		printf("Failed to create cgroup storage '%s'!\n",
14183 		       strerror(errno));
14184 
14185 	return fd;
14186 }
14187 
14188 static char bpf_vlog[UINT_MAX >> 8];
14189 
14190 static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
14191 			  struct bpf_insn *prog, int *map_fds)
14192 {
14193 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
14194 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
14195 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
14196 	int *fixup_map_array_48b = test->fixup_map_array_48b;
14197 	int *fixup_map_sockmap = test->fixup_map_sockmap;
14198 	int *fixup_map_sockhash = test->fixup_map_sockhash;
14199 	int *fixup_map_xskmap = test->fixup_map_xskmap;
14200 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
14201 	int *fixup_prog1 = test->fixup_prog1;
14202 	int *fixup_prog2 = test->fixup_prog2;
14203 	int *fixup_map_in_map = test->fixup_map_in_map;
14204 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
14205 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
14206 
14207 	if (test->fill_helper)
14208 		test->fill_helper(test);
14209 
14210 	/* Allocating HTs with 1 elem is fine here, since we only test
14211 	 * for verifier and not do a runtime lookup, so the only thing
14212 	 * that really matters is value size in this case.
14213 	 */
14214 	if (*fixup_map_hash_8b) {
14215 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14216 					sizeof(long long), 1);
14217 		do {
14218 			prog[*fixup_map_hash_8b].imm = map_fds[0];
14219 			fixup_map_hash_8b++;
14220 		} while (*fixup_map_hash_8b);
14221 	}
14222 
14223 	if (*fixup_map_hash_48b) {
14224 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14225 					sizeof(struct test_val), 1);
14226 		do {
14227 			prog[*fixup_map_hash_48b].imm = map_fds[1];
14228 			fixup_map_hash_48b++;
14229 		} while (*fixup_map_hash_48b);
14230 	}
14231 
14232 	if (*fixup_map_hash_16b) {
14233 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14234 					sizeof(struct other_val), 1);
14235 		do {
14236 			prog[*fixup_map_hash_16b].imm = map_fds[2];
14237 			fixup_map_hash_16b++;
14238 		} while (*fixup_map_hash_16b);
14239 	}
14240 
14241 	if (*fixup_map_array_48b) {
14242 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14243 					sizeof(struct test_val), 1);
14244 		do {
14245 			prog[*fixup_map_array_48b].imm = map_fds[3];
14246 			fixup_map_array_48b++;
14247 		} while (*fixup_map_array_48b);
14248 	}
14249 
14250 	if (*fixup_prog1) {
14251 		map_fds[4] = create_prog_array(prog_type, 4, 0);
14252 		do {
14253 			prog[*fixup_prog1].imm = map_fds[4];
14254 			fixup_prog1++;
14255 		} while (*fixup_prog1);
14256 	}
14257 
14258 	if (*fixup_prog2) {
14259 		map_fds[5] = create_prog_array(prog_type, 8, 7);
14260 		do {
14261 			prog[*fixup_prog2].imm = map_fds[5];
14262 			fixup_prog2++;
14263 		} while (*fixup_prog2);
14264 	}
14265 
14266 	if (*fixup_map_in_map) {
14267 		map_fds[6] = create_map_in_map();
14268 		do {
14269 			prog[*fixup_map_in_map].imm = map_fds[6];
14270 			fixup_map_in_map++;
14271 		} while (*fixup_map_in_map);
14272 	}
14273 
14274 	if (*fixup_cgroup_storage) {
14275 		map_fds[7] = create_cgroup_storage(false);
14276 		do {
14277 			prog[*fixup_cgroup_storage].imm = map_fds[7];
14278 			fixup_cgroup_storage++;
14279 		} while (*fixup_cgroup_storage);
14280 	}
14281 
14282 	if (*fixup_percpu_cgroup_storage) {
14283 		map_fds[8] = create_cgroup_storage(true);
14284 		do {
14285 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
14286 			fixup_percpu_cgroup_storage++;
14287 		} while (*fixup_percpu_cgroup_storage);
14288 	}
14289 	if (*fixup_map_sockmap) {
14290 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
14291 					sizeof(int), 1);
14292 		do {
14293 			prog[*fixup_map_sockmap].imm = map_fds[9];
14294 			fixup_map_sockmap++;
14295 		} while (*fixup_map_sockmap);
14296 	}
14297 	if (*fixup_map_sockhash) {
14298 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
14299 					sizeof(int), 1);
14300 		do {
14301 			prog[*fixup_map_sockhash].imm = map_fds[10];
14302 			fixup_map_sockhash++;
14303 		} while (*fixup_map_sockhash);
14304 	}
14305 	if (*fixup_map_xskmap) {
14306 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
14307 					sizeof(int), 1);
14308 		do {
14309 			prog[*fixup_map_xskmap].imm = map_fds[11];
14310 			fixup_map_xskmap++;
14311 		} while (*fixup_map_xskmap);
14312 	}
14313 	if (*fixup_map_stacktrace) {
14314 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
14315 					 sizeof(u64), 1);
14316 		do {
14317 			prog[*fixup_map_stacktrace].imm = map_fds[12];
14318 			fixup_map_stacktrace++;
14319 		} while (fixup_map_stacktrace);
14320 	}
14321 }
14322 
14323 static int set_admin(bool admin)
14324 {
14325 	cap_t caps;
14326 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14327 	int ret = -1;
14328 
14329 	caps = cap_get_proc();
14330 	if (!caps) {
14331 		perror("cap_get_proc");
14332 		return -1;
14333 	}
14334 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
14335 				admin ? CAP_SET : CAP_CLEAR)) {
14336 		perror("cap_set_flag");
14337 		goto out;
14338 	}
14339 	if (cap_set_proc(caps)) {
14340 		perror("cap_set_proc");
14341 		goto out;
14342 	}
14343 	ret = 0;
14344 out:
14345 	if (cap_free(caps))
14346 		perror("cap_free");
14347 	return ret;
14348 }
14349 
14350 static void do_test_single(struct bpf_test *test, bool unpriv,
14351 			   int *passes, int *errors)
14352 {
14353 	int fd_prog, expected_ret, alignment_prevented_execution;
14354 	int prog_len, prog_type = test->prog_type;
14355 	struct bpf_insn *prog = test->insns;
14356 	int map_fds[MAX_NR_MAPS];
14357 	const char *expected_err;
14358 	uint32_t expected_val;
14359 	uint32_t retval;
14360 	__u32 pflags;
14361 	int i, err;
14362 
14363 	for (i = 0; i < MAX_NR_MAPS; i++)
14364 		map_fds[i] = -1;
14365 
14366 	if (!prog_type)
14367 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
14368 	do_test_fixup(test, prog_type, prog, map_fds);
14369 	prog_len = probe_filter_length(prog);
14370 
14371 	pflags = 0;
14372 	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
14373 		pflags |= BPF_F_STRICT_ALIGNMENT;
14374 	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
14375 		pflags |= BPF_F_ANY_ALIGNMENT;
14376 	fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
14377 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
14378 
14379 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
14380 		       test->result_unpriv : test->result;
14381 	expected_err = unpriv && test->errstr_unpriv ?
14382 		       test->errstr_unpriv : test->errstr;
14383 	expected_val = unpriv && test->retval_unpriv ?
14384 		       test->retval_unpriv : test->retval;
14385 
14386 	alignment_prevented_execution = 0;
14387 
14388 	if (expected_ret == ACCEPT) {
14389 		if (fd_prog < 0) {
14390 			printf("FAIL\nFailed to load prog '%s'!\n",
14391 			       strerror(errno));
14392 			goto fail_log;
14393 		}
14394 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14395 		if (fd_prog >= 0 &&
14396 		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) {
14397 			alignment_prevented_execution = 1;
14398 			goto test_ok;
14399 		}
14400 #endif
14401 	} else {
14402 		if (fd_prog >= 0) {
14403 			printf("FAIL\nUnexpected success to load!\n");
14404 			goto fail_log;
14405 		}
14406 		if (!strstr(bpf_vlog, expected_err)) {
14407 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
14408 			      expected_err, bpf_vlog);
14409 			goto fail_log;
14410 		}
14411 	}
14412 
14413 	if (fd_prog >= 0) {
14414 		__u8 tmp[TEST_DATA_LEN << 2];
14415 		__u32 size_tmp = sizeof(tmp);
14416 
14417 		if (unpriv)
14418 			set_admin(true);
14419 		err = bpf_prog_test_run(fd_prog, 1, test->data,
14420 					sizeof(test->data), tmp, &size_tmp,
14421 					&retval, NULL);
14422 		if (unpriv)
14423 			set_admin(false);
14424 		if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
14425 			printf("Unexpected bpf_prog_test_run error\n");
14426 			goto fail_log;
14427 		}
14428 		if (!err && retval != expected_val &&
14429 		    expected_val != POINTER_VALUE) {
14430 			printf("FAIL retval %d != %d\n", retval, expected_val);
14431 			goto fail_log;
14432 		}
14433 	}
14434 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14435 test_ok:
14436 #endif
14437 	(*passes)++;
14438 	printf("OK%s\n", alignment_prevented_execution ?
14439 	       " (NOTE: not executed due to unknown alignment)" : "");
14440 close_fds:
14441 	close(fd_prog);
14442 	for (i = 0; i < MAX_NR_MAPS; i++)
14443 		close(map_fds[i]);
14444 	sched_yield();
14445 	return;
14446 fail_log:
14447 	(*errors)++;
14448 	printf("%s", bpf_vlog);
14449 	goto close_fds;
14450 }
14451 
14452 static bool is_admin(void)
14453 {
14454 	cap_t caps;
14455 	cap_flag_value_t sysadmin = CAP_CLEAR;
14456 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14457 
14458 #ifdef CAP_IS_SUPPORTED
14459 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
14460 		perror("cap_get_flag");
14461 		return false;
14462 	}
14463 #endif
14464 	caps = cap_get_proc();
14465 	if (!caps) {
14466 		perror("cap_get_proc");
14467 		return false;
14468 	}
14469 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
14470 		perror("cap_get_flag");
14471 	if (cap_free(caps))
14472 		perror("cap_free");
14473 	return (sysadmin == CAP_SET);
14474 }
14475 
14476 static void get_unpriv_disabled()
14477 {
14478 	char buf[2];
14479 	FILE *fd;
14480 
14481 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
14482 	if (!fd) {
14483 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
14484 		unpriv_disabled = true;
14485 		return;
14486 	}
14487 	if (fgets(buf, 2, fd) == buf && atoi(buf))
14488 		unpriv_disabled = true;
14489 	fclose(fd);
14490 }
14491 
14492 static bool test_as_unpriv(struct bpf_test *test)
14493 {
14494 	return !test->prog_type ||
14495 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
14496 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
14497 }
14498 
14499 static int do_test(bool unpriv, unsigned int from, unsigned int to)
14500 {
14501 	int i, passes = 0, errors = 0, skips = 0;
14502 
14503 	for (i = from; i < to; i++) {
14504 		struct bpf_test *test = &tests[i];
14505 
14506 		/* Program types that are not supported by non-root we
14507 		 * skip right away.
14508 		 */
14509 		if (test_as_unpriv(test) && unpriv_disabled) {
14510 			printf("#%d/u %s SKIP\n", i, test->descr);
14511 			skips++;
14512 		} else if (test_as_unpriv(test)) {
14513 			if (!unpriv)
14514 				set_admin(false);
14515 			printf("#%d/u %s ", i, test->descr);
14516 			do_test_single(test, true, &passes, &errors);
14517 			if (!unpriv)
14518 				set_admin(true);
14519 		}
14520 
14521 		if (unpriv) {
14522 			printf("#%d/p %s SKIP\n", i, test->descr);
14523 			skips++;
14524 		} else {
14525 			printf("#%d/p %s ", i, test->descr);
14526 			do_test_single(test, false, &passes, &errors);
14527 		}
14528 	}
14529 
14530 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
14531 	       skips, errors);
14532 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
14533 }
14534 
14535 int main(int argc, char **argv)
14536 {
14537 	unsigned int from = 0, to = ARRAY_SIZE(tests);
14538 	bool unpriv = !is_admin();
14539 
14540 	if (argc == 3) {
14541 		unsigned int l = atoi(argv[argc - 2]);
14542 		unsigned int u = atoi(argv[argc - 1]);
14543 
14544 		if (l < to && u < to) {
14545 			from = l;
14546 			to   = u + 1;
14547 		}
14548 	} else if (argc == 2) {
14549 		unsigned int t = atoi(argv[argc - 1]);
14550 
14551 		if (t < to) {
14552 			from = t;
14553 			to   = t + 1;
14554 		}
14555 	}
14556 
14557 	get_unpriv_disabled();
14558 	if (unpriv && unpriv_disabled) {
14559 		printf("Cannot run as unprivileged user with sysctl %s.\n",
14560 		       UNPRIV_SYSCTL);
14561 		return EXIT_FAILURE;
14562 	}
14563 
14564 	bpf_semi_rand_init();
14565 	return do_test(unpriv, from, to);
14566 }
14567