1 /* 2 * Testsuite for eBPF verifier 3 * 4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 */ 10 11 #include <endian.h> 12 #include <asm/types.h> 13 #include <linux/types.h> 14 #include <stdint.h> 15 #include <stdio.h> 16 #include <stdlib.h> 17 #include <unistd.h> 18 #include <errno.h> 19 #include <string.h> 20 #include <stddef.h> 21 #include <stdbool.h> 22 #include <sched.h> 23 24 #include <sys/capability.h> 25 #include <sys/resource.h> 26 27 #include <linux/unistd.h> 28 #include <linux/filter.h> 29 #include <linux/bpf_perf_event.h> 30 #include <linux/bpf.h> 31 32 #include <bpf/bpf.h> 33 34 #ifdef HAVE_GENHDR 35 # include "autoconf.h" 36 #else 37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__) 38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 39 # endif 40 #endif 41 42 #include "../../../include/linux/filter.h" 43 44 #ifndef ARRAY_SIZE 45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 46 #endif 47 48 #define MAX_INSNS 512 49 #define MAX_FIXUPS 8 50 #define MAX_NR_MAPS 4 51 52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) 53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) 54 55 struct bpf_test { 56 const char *descr; 57 struct bpf_insn insns[MAX_INSNS]; 58 int fixup_map1[MAX_FIXUPS]; 59 int fixup_map2[MAX_FIXUPS]; 60 int fixup_prog[MAX_FIXUPS]; 61 int fixup_map_in_map[MAX_FIXUPS]; 62 const char *errstr; 63 const char *errstr_unpriv; 64 enum { 65 UNDEF, 66 ACCEPT, 67 REJECT 68 } result, result_unpriv; 69 enum bpf_prog_type prog_type; 70 uint8_t flags; 71 }; 72 73 /* Note we want this to be 64 bit aligned so that the end of our array is 74 * actually the end of the structure. 75 */ 76 #define MAX_ENTRIES 11 77 78 struct test_val { 79 unsigned int index; 80 int foo[MAX_ENTRIES]; 81 }; 82 83 static struct bpf_test tests[] = { 84 { 85 "add+sub+mul", 86 .insns = { 87 BPF_MOV64_IMM(BPF_REG_1, 1), 88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2), 89 BPF_MOV64_IMM(BPF_REG_2, 3), 90 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2), 91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1), 92 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3), 93 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 94 BPF_EXIT_INSN(), 95 }, 96 .result = ACCEPT, 97 }, 98 { 99 "unreachable", 100 .insns = { 101 BPF_EXIT_INSN(), 102 BPF_EXIT_INSN(), 103 }, 104 .errstr = "unreachable", 105 .result = REJECT, 106 }, 107 { 108 "unreachable2", 109 .insns = { 110 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 111 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 112 BPF_EXIT_INSN(), 113 }, 114 .errstr = "unreachable", 115 .result = REJECT, 116 }, 117 { 118 "out of range jump", 119 .insns = { 120 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 121 BPF_EXIT_INSN(), 122 }, 123 .errstr = "jump out of range", 124 .result = REJECT, 125 }, 126 { 127 "out of range jump2", 128 .insns = { 129 BPF_JMP_IMM(BPF_JA, 0, 0, -2), 130 BPF_EXIT_INSN(), 131 }, 132 .errstr = "jump out of range", 133 .result = REJECT, 134 }, 135 { 136 "test1 ld_imm64", 137 .insns = { 138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 139 BPF_LD_IMM64(BPF_REG_0, 0), 140 BPF_LD_IMM64(BPF_REG_0, 0), 141 BPF_LD_IMM64(BPF_REG_0, 1), 142 BPF_LD_IMM64(BPF_REG_0, 1), 143 BPF_MOV64_IMM(BPF_REG_0, 2), 144 BPF_EXIT_INSN(), 145 }, 146 .errstr = "invalid BPF_LD_IMM insn", 147 .errstr_unpriv = "R1 pointer comparison", 148 .result = REJECT, 149 }, 150 { 151 "test2 ld_imm64", 152 .insns = { 153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 154 BPF_LD_IMM64(BPF_REG_0, 0), 155 BPF_LD_IMM64(BPF_REG_0, 0), 156 BPF_LD_IMM64(BPF_REG_0, 1), 157 BPF_LD_IMM64(BPF_REG_0, 1), 158 BPF_EXIT_INSN(), 159 }, 160 .errstr = "invalid BPF_LD_IMM insn", 161 .errstr_unpriv = "R1 pointer comparison", 162 .result = REJECT, 163 }, 164 { 165 "test3 ld_imm64", 166 .insns = { 167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 168 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), 169 BPF_LD_IMM64(BPF_REG_0, 0), 170 BPF_LD_IMM64(BPF_REG_0, 0), 171 BPF_LD_IMM64(BPF_REG_0, 1), 172 BPF_LD_IMM64(BPF_REG_0, 1), 173 BPF_EXIT_INSN(), 174 }, 175 .errstr = "invalid bpf_ld_imm64 insn", 176 .result = REJECT, 177 }, 178 { 179 "test4 ld_imm64", 180 .insns = { 181 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), 182 BPF_EXIT_INSN(), 183 }, 184 .errstr = "invalid bpf_ld_imm64 insn", 185 .result = REJECT, 186 }, 187 { 188 "test5 ld_imm64", 189 .insns = { 190 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), 191 }, 192 .errstr = "invalid bpf_ld_imm64 insn", 193 .result = REJECT, 194 }, 195 { 196 "test6 ld_imm64", 197 .insns = { 198 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), 199 BPF_RAW_INSN(0, 0, 0, 0, 0), 200 BPF_EXIT_INSN(), 201 }, 202 .result = ACCEPT, 203 }, 204 { 205 "test7 ld_imm64", 206 .insns = { 207 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), 208 BPF_RAW_INSN(0, 0, 0, 0, 1), 209 BPF_EXIT_INSN(), 210 }, 211 .result = ACCEPT, 212 }, 213 { 214 "test8 ld_imm64", 215 .insns = { 216 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1), 217 BPF_RAW_INSN(0, 0, 0, 0, 1), 218 BPF_EXIT_INSN(), 219 }, 220 .errstr = "uses reserved fields", 221 .result = REJECT, 222 }, 223 { 224 "test9 ld_imm64", 225 .insns = { 226 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), 227 BPF_RAW_INSN(0, 0, 0, 1, 1), 228 BPF_EXIT_INSN(), 229 }, 230 .errstr = "invalid bpf_ld_imm64 insn", 231 .result = REJECT, 232 }, 233 { 234 "test10 ld_imm64", 235 .insns = { 236 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), 237 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1), 238 BPF_EXIT_INSN(), 239 }, 240 .errstr = "invalid bpf_ld_imm64 insn", 241 .result = REJECT, 242 }, 243 { 244 "test11 ld_imm64", 245 .insns = { 246 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), 247 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), 248 BPF_EXIT_INSN(), 249 }, 250 .errstr = "invalid bpf_ld_imm64 insn", 251 .result = REJECT, 252 }, 253 { 254 "test12 ld_imm64", 255 .insns = { 256 BPF_MOV64_IMM(BPF_REG_1, 0), 257 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), 258 BPF_RAW_INSN(0, 0, 0, 0, 1), 259 BPF_EXIT_INSN(), 260 }, 261 .errstr = "not pointing to valid bpf_map", 262 .result = REJECT, 263 }, 264 { 265 "test13 ld_imm64", 266 .insns = { 267 BPF_MOV64_IMM(BPF_REG_1, 0), 268 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), 269 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), 270 BPF_EXIT_INSN(), 271 }, 272 .errstr = "invalid bpf_ld_imm64 insn", 273 .result = REJECT, 274 }, 275 { 276 "no bpf_exit", 277 .insns = { 278 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), 279 }, 280 .errstr = "jump out of range", 281 .result = REJECT, 282 }, 283 { 284 "loop (back-edge)", 285 .insns = { 286 BPF_JMP_IMM(BPF_JA, 0, 0, -1), 287 BPF_EXIT_INSN(), 288 }, 289 .errstr = "back-edge", 290 .result = REJECT, 291 }, 292 { 293 "loop2 (back-edge)", 294 .insns = { 295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 297 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 298 BPF_JMP_IMM(BPF_JA, 0, 0, -4), 299 BPF_EXIT_INSN(), 300 }, 301 .errstr = "back-edge", 302 .result = REJECT, 303 }, 304 { 305 "conditional loop", 306 .insns = { 307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), 310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), 311 BPF_EXIT_INSN(), 312 }, 313 .errstr = "back-edge", 314 .result = REJECT, 315 }, 316 { 317 "read uninitialized register", 318 .insns = { 319 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 320 BPF_EXIT_INSN(), 321 }, 322 .errstr = "R2 !read_ok", 323 .result = REJECT, 324 }, 325 { 326 "read invalid register", 327 .insns = { 328 BPF_MOV64_REG(BPF_REG_0, -1), 329 BPF_EXIT_INSN(), 330 }, 331 .errstr = "R15 is invalid", 332 .result = REJECT, 333 }, 334 { 335 "program doesn't init R0 before exit", 336 .insns = { 337 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), 338 BPF_EXIT_INSN(), 339 }, 340 .errstr = "R0 !read_ok", 341 .result = REJECT, 342 }, 343 { 344 "program doesn't init R0 before exit in all branches", 345 .insns = { 346 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 347 BPF_MOV64_IMM(BPF_REG_0, 1), 348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), 349 BPF_EXIT_INSN(), 350 }, 351 .errstr = "R0 !read_ok", 352 .errstr_unpriv = "R1 pointer comparison", 353 .result = REJECT, 354 }, 355 { 356 "stack out of bounds", 357 .insns = { 358 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), 359 BPF_EXIT_INSN(), 360 }, 361 .errstr = "invalid stack", 362 .result = REJECT, 363 }, 364 { 365 "invalid call insn1", 366 .insns = { 367 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0), 368 BPF_EXIT_INSN(), 369 }, 370 .errstr = "BPF_CALL uses reserved", 371 .result = REJECT, 372 }, 373 { 374 "invalid call insn2", 375 .insns = { 376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0), 377 BPF_EXIT_INSN(), 378 }, 379 .errstr = "BPF_CALL uses reserved", 380 .result = REJECT, 381 }, 382 { 383 "invalid function call", 384 .insns = { 385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567), 386 BPF_EXIT_INSN(), 387 }, 388 .errstr = "invalid func unknown#1234567", 389 .result = REJECT, 390 }, 391 { 392 "uninitialized stack1", 393 .insns = { 394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 396 BPF_LD_MAP_FD(BPF_REG_1, 0), 397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 398 BPF_FUNC_map_lookup_elem), 399 BPF_EXIT_INSN(), 400 }, 401 .fixup_map1 = { 2 }, 402 .errstr = "invalid indirect read from stack", 403 .result = REJECT, 404 }, 405 { 406 "uninitialized stack2", 407 .insns = { 408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 409 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8), 410 BPF_EXIT_INSN(), 411 }, 412 .errstr = "invalid read from stack", 413 .result = REJECT, 414 }, 415 { 416 "invalid fp arithmetic", 417 /* If this gets ever changed, make sure JITs can deal with it. */ 418 .insns = { 419 BPF_MOV64_IMM(BPF_REG_0, 0), 420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 421 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8), 422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 423 BPF_EXIT_INSN(), 424 }, 425 .errstr_unpriv = "R1 subtraction from stack pointer", 426 .result_unpriv = REJECT, 427 .errstr = "R1 invalid mem access", 428 .result = REJECT, 429 }, 430 { 431 "non-invalid fp arithmetic", 432 .insns = { 433 BPF_MOV64_IMM(BPF_REG_0, 0), 434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 435 BPF_EXIT_INSN(), 436 }, 437 .result = ACCEPT, 438 }, 439 { 440 "invalid argument register", 441 .insns = { 442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 443 BPF_FUNC_get_cgroup_classid), 444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 445 BPF_FUNC_get_cgroup_classid), 446 BPF_EXIT_INSN(), 447 }, 448 .errstr = "R1 !read_ok", 449 .result = REJECT, 450 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 451 }, 452 { 453 "non-invalid argument register", 454 .insns = { 455 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), 456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 457 BPF_FUNC_get_cgroup_classid), 458 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6), 459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 460 BPF_FUNC_get_cgroup_classid), 461 BPF_EXIT_INSN(), 462 }, 463 .result = ACCEPT, 464 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 465 }, 466 { 467 "check valid spill/fill", 468 .insns = { 469 /* spill R1(ctx) into stack */ 470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 471 /* fill it back into R2 */ 472 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), 473 /* should be able to access R0 = *(R2 + 8) */ 474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */ 475 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 476 BPF_EXIT_INSN(), 477 }, 478 .errstr_unpriv = "R0 leaks addr", 479 .result = ACCEPT, 480 .result_unpriv = REJECT, 481 }, 482 { 483 "check valid spill/fill, skb mark", 484 .insns = { 485 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), 486 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 488 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 489 offsetof(struct __sk_buff, mark)), 490 BPF_EXIT_INSN(), 491 }, 492 .result = ACCEPT, 493 .result_unpriv = ACCEPT, 494 }, 495 { 496 "check corrupted spill/fill", 497 .insns = { 498 /* spill R1(ctx) into stack */ 499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 500 /* mess up with R1 pointer on stack */ 501 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), 502 /* fill back into R0 should fail */ 503 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 504 BPF_EXIT_INSN(), 505 }, 506 .errstr_unpriv = "attempt to corrupt spilled", 507 .errstr = "corrupted spill", 508 .result = REJECT, 509 }, 510 { 511 "invalid src register in STX", 512 .insns = { 513 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1), 514 BPF_EXIT_INSN(), 515 }, 516 .errstr = "R15 is invalid", 517 .result = REJECT, 518 }, 519 { 520 "invalid dst register in STX", 521 .insns = { 522 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1), 523 BPF_EXIT_INSN(), 524 }, 525 .errstr = "R14 is invalid", 526 .result = REJECT, 527 }, 528 { 529 "invalid dst register in ST", 530 .insns = { 531 BPF_ST_MEM(BPF_B, 14, -1, -1), 532 BPF_EXIT_INSN(), 533 }, 534 .errstr = "R14 is invalid", 535 .result = REJECT, 536 }, 537 { 538 "invalid src register in LDX", 539 .insns = { 540 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0), 541 BPF_EXIT_INSN(), 542 }, 543 .errstr = "R12 is invalid", 544 .result = REJECT, 545 }, 546 { 547 "invalid dst register in LDX", 548 .insns = { 549 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0), 550 BPF_EXIT_INSN(), 551 }, 552 .errstr = "R11 is invalid", 553 .result = REJECT, 554 }, 555 { 556 "junk insn", 557 .insns = { 558 BPF_RAW_INSN(0, 0, 0, 0, 0), 559 BPF_EXIT_INSN(), 560 }, 561 .errstr = "invalid BPF_LD_IMM", 562 .result = REJECT, 563 }, 564 { 565 "junk insn2", 566 .insns = { 567 BPF_RAW_INSN(1, 0, 0, 0, 0), 568 BPF_EXIT_INSN(), 569 }, 570 .errstr = "BPF_LDX uses reserved fields", 571 .result = REJECT, 572 }, 573 { 574 "junk insn3", 575 .insns = { 576 BPF_RAW_INSN(-1, 0, 0, 0, 0), 577 BPF_EXIT_INSN(), 578 }, 579 .errstr = "invalid BPF_ALU opcode f0", 580 .result = REJECT, 581 }, 582 { 583 "junk insn4", 584 .insns = { 585 BPF_RAW_INSN(-1, -1, -1, -1, -1), 586 BPF_EXIT_INSN(), 587 }, 588 .errstr = "invalid BPF_ALU opcode f0", 589 .result = REJECT, 590 }, 591 { 592 "junk insn5", 593 .insns = { 594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1), 595 BPF_EXIT_INSN(), 596 }, 597 .errstr = "BPF_ALU uses reserved fields", 598 .result = REJECT, 599 }, 600 { 601 "misaligned read from stack", 602 .insns = { 603 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4), 605 BPF_EXIT_INSN(), 606 }, 607 .errstr = "misaligned stack access", 608 .result = REJECT, 609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 610 }, 611 { 612 "invalid map_fd for function call", 613 .insns = { 614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 615 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), 616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 617 BPF_LD_MAP_FD(BPF_REG_1, 0), 618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 619 BPF_FUNC_map_delete_elem), 620 BPF_EXIT_INSN(), 621 }, 622 .errstr = "fd 0 is not pointing to valid bpf_map", 623 .result = REJECT, 624 }, 625 { 626 "don't check return value before access", 627 .insns = { 628 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 629 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 631 BPF_LD_MAP_FD(BPF_REG_1, 0), 632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 633 BPF_FUNC_map_lookup_elem), 634 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 635 BPF_EXIT_INSN(), 636 }, 637 .fixup_map1 = { 3 }, 638 .errstr = "R0 invalid mem access 'map_value_or_null'", 639 .result = REJECT, 640 }, 641 { 642 "access memory with incorrect alignment", 643 .insns = { 644 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 645 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 647 BPF_LD_MAP_FD(BPF_REG_1, 0), 648 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 649 BPF_FUNC_map_lookup_elem), 650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 651 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), 652 BPF_EXIT_INSN(), 653 }, 654 .fixup_map1 = { 3 }, 655 .errstr = "misaligned value access", 656 .result = REJECT, 657 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 658 }, 659 { 660 "sometimes access memory with incorrect alignment", 661 .insns = { 662 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 665 BPF_LD_MAP_FD(BPF_REG_1, 0), 666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 667 BPF_FUNC_map_lookup_elem), 668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 669 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), 670 BPF_EXIT_INSN(), 671 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), 672 BPF_EXIT_INSN(), 673 }, 674 .fixup_map1 = { 3 }, 675 .errstr = "R0 invalid mem access", 676 .errstr_unpriv = "R0 leaks addr", 677 .result = REJECT, 678 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 679 }, 680 { 681 "jump test 1", 682 .insns = { 683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 684 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8), 685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 686 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), 687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), 688 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1), 689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1), 690 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2), 691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1), 692 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3), 693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1), 694 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4), 695 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), 696 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5), 697 BPF_MOV64_IMM(BPF_REG_0, 0), 698 BPF_EXIT_INSN(), 699 }, 700 .errstr_unpriv = "R1 pointer comparison", 701 .result_unpriv = REJECT, 702 .result = ACCEPT, 703 }, 704 { 705 "jump test 2", 706 .insns = { 707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 708 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), 709 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), 710 BPF_JMP_IMM(BPF_JA, 0, 0, 14), 711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2), 712 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), 713 BPF_JMP_IMM(BPF_JA, 0, 0, 11), 714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2), 715 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), 716 BPF_JMP_IMM(BPF_JA, 0, 0, 8), 717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2), 718 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), 719 BPF_JMP_IMM(BPF_JA, 0, 0, 5), 720 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2), 721 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), 722 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 723 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), 724 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), 725 BPF_MOV64_IMM(BPF_REG_0, 0), 726 BPF_EXIT_INSN(), 727 }, 728 .errstr_unpriv = "R1 pointer comparison", 729 .result_unpriv = REJECT, 730 .result = ACCEPT, 731 }, 732 { 733 "jump test 3", 734 .insns = { 735 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 737 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), 738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 739 BPF_JMP_IMM(BPF_JA, 0, 0, 19), 740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3), 741 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), 742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 743 BPF_JMP_IMM(BPF_JA, 0, 0, 15), 744 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3), 745 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), 746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32), 747 BPF_JMP_IMM(BPF_JA, 0, 0, 11), 748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3), 749 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), 750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40), 751 BPF_JMP_IMM(BPF_JA, 0, 0, 7), 752 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3), 753 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), 754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), 755 BPF_JMP_IMM(BPF_JA, 0, 0, 3), 756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0), 757 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), 758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56), 759 BPF_LD_MAP_FD(BPF_REG_1, 0), 760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 761 BPF_FUNC_map_delete_elem), 762 BPF_EXIT_INSN(), 763 }, 764 .fixup_map1 = { 24 }, 765 .errstr_unpriv = "R1 pointer comparison", 766 .result_unpriv = REJECT, 767 .result = ACCEPT, 768 }, 769 { 770 "jump test 4", 771 .insns = { 772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), 805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), 806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), 807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), 808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 812 BPF_MOV64_IMM(BPF_REG_0, 0), 813 BPF_EXIT_INSN(), 814 }, 815 .errstr_unpriv = "R1 pointer comparison", 816 .result_unpriv = REJECT, 817 .result = ACCEPT, 818 }, 819 { 820 "jump test 5", 821 .insns = { 822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 823 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 824 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 825 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 826 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 827 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 828 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 829 BPF_MOV64_IMM(BPF_REG_0, 0), 830 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 831 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 832 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 833 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 834 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 835 BPF_MOV64_IMM(BPF_REG_0, 0), 836 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 837 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 838 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 839 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 840 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 841 BPF_MOV64_IMM(BPF_REG_0, 0), 842 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 843 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 844 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 845 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 846 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 847 BPF_MOV64_IMM(BPF_REG_0, 0), 848 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 849 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), 850 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 851 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), 852 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 853 BPF_MOV64_IMM(BPF_REG_0, 0), 854 BPF_EXIT_INSN(), 855 }, 856 .errstr_unpriv = "R1 pointer comparison", 857 .result_unpriv = REJECT, 858 .result = ACCEPT, 859 }, 860 { 861 "access skb fields ok", 862 .insns = { 863 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 864 offsetof(struct __sk_buff, len)), 865 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 866 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 867 offsetof(struct __sk_buff, mark)), 868 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 869 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 870 offsetof(struct __sk_buff, pkt_type)), 871 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 872 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 873 offsetof(struct __sk_buff, queue_mapping)), 874 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 875 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 876 offsetof(struct __sk_buff, protocol)), 877 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 878 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 879 offsetof(struct __sk_buff, vlan_present)), 880 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 881 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 882 offsetof(struct __sk_buff, vlan_tci)), 883 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 884 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 885 offsetof(struct __sk_buff, napi_id)), 886 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), 887 BPF_EXIT_INSN(), 888 }, 889 .result = ACCEPT, 890 }, 891 { 892 "access skb fields bad1", 893 .insns = { 894 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4), 895 BPF_EXIT_INSN(), 896 }, 897 .errstr = "invalid bpf_context access", 898 .result = REJECT, 899 }, 900 { 901 "access skb fields bad2", 902 .insns = { 903 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9), 904 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 905 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 907 BPF_LD_MAP_FD(BPF_REG_1, 0), 908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 909 BPF_FUNC_map_lookup_elem), 910 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 911 BPF_EXIT_INSN(), 912 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 914 offsetof(struct __sk_buff, pkt_type)), 915 BPF_EXIT_INSN(), 916 }, 917 .fixup_map1 = { 4 }, 918 .errstr = "different pointers", 919 .errstr_unpriv = "R1 pointer comparison", 920 .result = REJECT, 921 }, 922 { 923 "access skb fields bad3", 924 .insns = { 925 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), 926 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 927 offsetof(struct __sk_buff, pkt_type)), 928 BPF_EXIT_INSN(), 929 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 930 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 932 BPF_LD_MAP_FD(BPF_REG_1, 0), 933 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 934 BPF_FUNC_map_lookup_elem), 935 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 936 BPF_EXIT_INSN(), 937 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 938 BPF_JMP_IMM(BPF_JA, 0, 0, -12), 939 }, 940 .fixup_map1 = { 6 }, 941 .errstr = "different pointers", 942 .errstr_unpriv = "R1 pointer comparison", 943 .result = REJECT, 944 }, 945 { 946 "access skb fields bad4", 947 .insns = { 948 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3), 949 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 950 offsetof(struct __sk_buff, len)), 951 BPF_MOV64_IMM(BPF_REG_0, 0), 952 BPF_EXIT_INSN(), 953 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 954 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 956 BPF_LD_MAP_FD(BPF_REG_1, 0), 957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 958 BPF_FUNC_map_lookup_elem), 959 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 960 BPF_EXIT_INSN(), 961 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 962 BPF_JMP_IMM(BPF_JA, 0, 0, -13), 963 }, 964 .fixup_map1 = { 7 }, 965 .errstr = "different pointers", 966 .errstr_unpriv = "R1 pointer comparison", 967 .result = REJECT, 968 }, 969 { 970 "invalid access __sk_buff family", 971 .insns = { 972 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 973 offsetof(struct __sk_buff, family)), 974 BPF_EXIT_INSN(), 975 }, 976 .errstr = "invalid bpf_context access", 977 .result = REJECT, 978 }, 979 { 980 "invalid access __sk_buff remote_ip4", 981 .insns = { 982 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 983 offsetof(struct __sk_buff, remote_ip4)), 984 BPF_EXIT_INSN(), 985 }, 986 .errstr = "invalid bpf_context access", 987 .result = REJECT, 988 }, 989 { 990 "invalid access __sk_buff local_ip4", 991 .insns = { 992 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 993 offsetof(struct __sk_buff, local_ip4)), 994 BPF_EXIT_INSN(), 995 }, 996 .errstr = "invalid bpf_context access", 997 .result = REJECT, 998 }, 999 { 1000 "invalid access __sk_buff remote_ip6", 1001 .insns = { 1002 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1003 offsetof(struct __sk_buff, remote_ip6)), 1004 BPF_EXIT_INSN(), 1005 }, 1006 .errstr = "invalid bpf_context access", 1007 .result = REJECT, 1008 }, 1009 { 1010 "invalid access __sk_buff local_ip6", 1011 .insns = { 1012 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1013 offsetof(struct __sk_buff, local_ip6)), 1014 BPF_EXIT_INSN(), 1015 }, 1016 .errstr = "invalid bpf_context access", 1017 .result = REJECT, 1018 }, 1019 { 1020 "invalid access __sk_buff remote_port", 1021 .insns = { 1022 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1023 offsetof(struct __sk_buff, remote_port)), 1024 BPF_EXIT_INSN(), 1025 }, 1026 .errstr = "invalid bpf_context access", 1027 .result = REJECT, 1028 }, 1029 { 1030 "invalid access __sk_buff remote_port", 1031 .insns = { 1032 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1033 offsetof(struct __sk_buff, local_port)), 1034 BPF_EXIT_INSN(), 1035 }, 1036 .errstr = "invalid bpf_context access", 1037 .result = REJECT, 1038 }, 1039 { 1040 "valid access __sk_buff family", 1041 .insns = { 1042 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1043 offsetof(struct __sk_buff, family)), 1044 BPF_EXIT_INSN(), 1045 }, 1046 .result = ACCEPT, 1047 .prog_type = BPF_PROG_TYPE_SK_SKB, 1048 }, 1049 { 1050 "valid access __sk_buff remote_ip4", 1051 .insns = { 1052 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1053 offsetof(struct __sk_buff, remote_ip4)), 1054 BPF_EXIT_INSN(), 1055 }, 1056 .result = ACCEPT, 1057 .prog_type = BPF_PROG_TYPE_SK_SKB, 1058 }, 1059 { 1060 "valid access __sk_buff local_ip4", 1061 .insns = { 1062 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1063 offsetof(struct __sk_buff, local_ip4)), 1064 BPF_EXIT_INSN(), 1065 }, 1066 .result = ACCEPT, 1067 .prog_type = BPF_PROG_TYPE_SK_SKB, 1068 }, 1069 { 1070 "valid access __sk_buff remote_ip6", 1071 .insns = { 1072 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1073 offsetof(struct __sk_buff, remote_ip6[0])), 1074 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1075 offsetof(struct __sk_buff, remote_ip6[1])), 1076 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1077 offsetof(struct __sk_buff, remote_ip6[2])), 1078 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1079 offsetof(struct __sk_buff, remote_ip6[3])), 1080 BPF_EXIT_INSN(), 1081 }, 1082 .result = ACCEPT, 1083 .prog_type = BPF_PROG_TYPE_SK_SKB, 1084 }, 1085 { 1086 "valid access __sk_buff local_ip6", 1087 .insns = { 1088 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1089 offsetof(struct __sk_buff, local_ip6[0])), 1090 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1091 offsetof(struct __sk_buff, local_ip6[1])), 1092 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1093 offsetof(struct __sk_buff, local_ip6[2])), 1094 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1095 offsetof(struct __sk_buff, local_ip6[3])), 1096 BPF_EXIT_INSN(), 1097 }, 1098 .result = ACCEPT, 1099 .prog_type = BPF_PROG_TYPE_SK_SKB, 1100 }, 1101 { 1102 "valid access __sk_buff remote_port", 1103 .insns = { 1104 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1105 offsetof(struct __sk_buff, remote_port)), 1106 BPF_EXIT_INSN(), 1107 }, 1108 .result = ACCEPT, 1109 .prog_type = BPF_PROG_TYPE_SK_SKB, 1110 }, 1111 { 1112 "valid access __sk_buff remote_port", 1113 .insns = { 1114 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1115 offsetof(struct __sk_buff, local_port)), 1116 BPF_EXIT_INSN(), 1117 }, 1118 .result = ACCEPT, 1119 .prog_type = BPF_PROG_TYPE_SK_SKB, 1120 }, 1121 { 1122 "invalid access of tc_classid for SK_SKB", 1123 .insns = { 1124 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1125 offsetof(struct __sk_buff, tc_classid)), 1126 BPF_EXIT_INSN(), 1127 }, 1128 .result = REJECT, 1129 .prog_type = BPF_PROG_TYPE_SK_SKB, 1130 .errstr = "invalid bpf_context access", 1131 }, 1132 { 1133 "invalid access of skb->mark for SK_SKB", 1134 .insns = { 1135 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1136 offsetof(struct __sk_buff, mark)), 1137 BPF_EXIT_INSN(), 1138 }, 1139 .result = REJECT, 1140 .prog_type = BPF_PROG_TYPE_SK_SKB, 1141 .errstr = "invalid bpf_context access", 1142 }, 1143 { 1144 "check skb->mark is not writeable by SK_SKB", 1145 .insns = { 1146 BPF_MOV64_IMM(BPF_REG_0, 0), 1147 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1148 offsetof(struct __sk_buff, mark)), 1149 BPF_EXIT_INSN(), 1150 }, 1151 .result = REJECT, 1152 .prog_type = BPF_PROG_TYPE_SK_SKB, 1153 .errstr = "invalid bpf_context access", 1154 }, 1155 { 1156 "check skb->tc_index is writeable by SK_SKB", 1157 .insns = { 1158 BPF_MOV64_IMM(BPF_REG_0, 0), 1159 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1160 offsetof(struct __sk_buff, tc_index)), 1161 BPF_EXIT_INSN(), 1162 }, 1163 .result = ACCEPT, 1164 .prog_type = BPF_PROG_TYPE_SK_SKB, 1165 }, 1166 { 1167 "check skb->priority is writeable by SK_SKB", 1168 .insns = { 1169 BPF_MOV64_IMM(BPF_REG_0, 0), 1170 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1171 offsetof(struct __sk_buff, priority)), 1172 BPF_EXIT_INSN(), 1173 }, 1174 .result = ACCEPT, 1175 .prog_type = BPF_PROG_TYPE_SK_SKB, 1176 }, 1177 { 1178 "direct packet read for SK_SKB", 1179 .insns = { 1180 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1181 offsetof(struct __sk_buff, data)), 1182 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1183 offsetof(struct __sk_buff, data_end)), 1184 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1186 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 1187 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 1188 BPF_MOV64_IMM(BPF_REG_0, 0), 1189 BPF_EXIT_INSN(), 1190 }, 1191 .result = ACCEPT, 1192 .prog_type = BPF_PROG_TYPE_SK_SKB, 1193 }, 1194 { 1195 "direct packet write for SK_SKB", 1196 .insns = { 1197 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1198 offsetof(struct __sk_buff, data)), 1199 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1200 offsetof(struct __sk_buff, data_end)), 1201 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1203 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 1204 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 1205 BPF_MOV64_IMM(BPF_REG_0, 0), 1206 BPF_EXIT_INSN(), 1207 }, 1208 .result = ACCEPT, 1209 .prog_type = BPF_PROG_TYPE_SK_SKB, 1210 }, 1211 { 1212 "overlapping checks for direct packet access SK_SKB", 1213 .insns = { 1214 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 1215 offsetof(struct __sk_buff, data)), 1216 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 1217 offsetof(struct __sk_buff, data_end)), 1218 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 1219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 1220 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), 1221 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 1222 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), 1223 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 1224 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), 1225 BPF_MOV64_IMM(BPF_REG_0, 0), 1226 BPF_EXIT_INSN(), 1227 }, 1228 .result = ACCEPT, 1229 .prog_type = BPF_PROG_TYPE_SK_SKB, 1230 }, 1231 { 1232 "check skb->mark is not writeable by sockets", 1233 .insns = { 1234 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 1235 offsetof(struct __sk_buff, mark)), 1236 BPF_EXIT_INSN(), 1237 }, 1238 .errstr = "invalid bpf_context access", 1239 .errstr_unpriv = "R1 leaks addr", 1240 .result = REJECT, 1241 }, 1242 { 1243 "check skb->tc_index is not writeable by sockets", 1244 .insns = { 1245 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 1246 offsetof(struct __sk_buff, tc_index)), 1247 BPF_EXIT_INSN(), 1248 }, 1249 .errstr = "invalid bpf_context access", 1250 .errstr_unpriv = "R1 leaks addr", 1251 .result = REJECT, 1252 }, 1253 { 1254 "check cb access: byte", 1255 .insns = { 1256 BPF_MOV64_IMM(BPF_REG_0, 0), 1257 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1258 offsetof(struct __sk_buff, cb[0])), 1259 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1260 offsetof(struct __sk_buff, cb[0]) + 1), 1261 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1262 offsetof(struct __sk_buff, cb[0]) + 2), 1263 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1264 offsetof(struct __sk_buff, cb[0]) + 3), 1265 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1266 offsetof(struct __sk_buff, cb[1])), 1267 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1268 offsetof(struct __sk_buff, cb[1]) + 1), 1269 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1270 offsetof(struct __sk_buff, cb[1]) + 2), 1271 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1272 offsetof(struct __sk_buff, cb[1]) + 3), 1273 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1274 offsetof(struct __sk_buff, cb[2])), 1275 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1276 offsetof(struct __sk_buff, cb[2]) + 1), 1277 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1278 offsetof(struct __sk_buff, cb[2]) + 2), 1279 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1280 offsetof(struct __sk_buff, cb[2]) + 3), 1281 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1282 offsetof(struct __sk_buff, cb[3])), 1283 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1284 offsetof(struct __sk_buff, cb[3]) + 1), 1285 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1286 offsetof(struct __sk_buff, cb[3]) + 2), 1287 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1288 offsetof(struct __sk_buff, cb[3]) + 3), 1289 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1290 offsetof(struct __sk_buff, cb[4])), 1291 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1292 offsetof(struct __sk_buff, cb[4]) + 1), 1293 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1294 offsetof(struct __sk_buff, cb[4]) + 2), 1295 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1296 offsetof(struct __sk_buff, cb[4]) + 3), 1297 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1298 offsetof(struct __sk_buff, cb[0])), 1299 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1300 offsetof(struct __sk_buff, cb[0]) + 1), 1301 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1302 offsetof(struct __sk_buff, cb[0]) + 2), 1303 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1304 offsetof(struct __sk_buff, cb[0]) + 3), 1305 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1306 offsetof(struct __sk_buff, cb[1])), 1307 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1308 offsetof(struct __sk_buff, cb[1]) + 1), 1309 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1310 offsetof(struct __sk_buff, cb[1]) + 2), 1311 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1312 offsetof(struct __sk_buff, cb[1]) + 3), 1313 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1314 offsetof(struct __sk_buff, cb[2])), 1315 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1316 offsetof(struct __sk_buff, cb[2]) + 1), 1317 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1318 offsetof(struct __sk_buff, cb[2]) + 2), 1319 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1320 offsetof(struct __sk_buff, cb[2]) + 3), 1321 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1322 offsetof(struct __sk_buff, cb[3])), 1323 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1324 offsetof(struct __sk_buff, cb[3]) + 1), 1325 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1326 offsetof(struct __sk_buff, cb[3]) + 2), 1327 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1328 offsetof(struct __sk_buff, cb[3]) + 3), 1329 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1330 offsetof(struct __sk_buff, cb[4])), 1331 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1332 offsetof(struct __sk_buff, cb[4]) + 1), 1333 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1334 offsetof(struct __sk_buff, cb[4]) + 2), 1335 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1336 offsetof(struct __sk_buff, cb[4]) + 3), 1337 BPF_EXIT_INSN(), 1338 }, 1339 .result = ACCEPT, 1340 }, 1341 { 1342 "__sk_buff->hash, offset 0, byte store not permitted", 1343 .insns = { 1344 BPF_MOV64_IMM(BPF_REG_0, 0), 1345 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1346 offsetof(struct __sk_buff, hash)), 1347 BPF_EXIT_INSN(), 1348 }, 1349 .errstr = "invalid bpf_context access", 1350 .result = REJECT, 1351 }, 1352 { 1353 "__sk_buff->tc_index, offset 3, byte store not permitted", 1354 .insns = { 1355 BPF_MOV64_IMM(BPF_REG_0, 0), 1356 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1357 offsetof(struct __sk_buff, tc_index) + 3), 1358 BPF_EXIT_INSN(), 1359 }, 1360 .errstr = "invalid bpf_context access", 1361 .result = REJECT, 1362 }, 1363 { 1364 "check skb->hash byte load permitted", 1365 .insns = { 1366 BPF_MOV64_IMM(BPF_REG_0, 0), 1367 #if __BYTE_ORDER == __LITTLE_ENDIAN 1368 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1369 offsetof(struct __sk_buff, hash)), 1370 #else 1371 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1372 offsetof(struct __sk_buff, hash) + 3), 1373 #endif 1374 BPF_EXIT_INSN(), 1375 }, 1376 .result = ACCEPT, 1377 }, 1378 { 1379 "check skb->hash byte load not permitted 1", 1380 .insns = { 1381 BPF_MOV64_IMM(BPF_REG_0, 0), 1382 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1383 offsetof(struct __sk_buff, hash) + 1), 1384 BPF_EXIT_INSN(), 1385 }, 1386 .errstr = "invalid bpf_context access", 1387 .result = REJECT, 1388 }, 1389 { 1390 "check skb->hash byte load not permitted 2", 1391 .insns = { 1392 BPF_MOV64_IMM(BPF_REG_0, 0), 1393 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1394 offsetof(struct __sk_buff, hash) + 2), 1395 BPF_EXIT_INSN(), 1396 }, 1397 .errstr = "invalid bpf_context access", 1398 .result = REJECT, 1399 }, 1400 { 1401 "check skb->hash byte load not permitted 3", 1402 .insns = { 1403 BPF_MOV64_IMM(BPF_REG_0, 0), 1404 #if __BYTE_ORDER == __LITTLE_ENDIAN 1405 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1406 offsetof(struct __sk_buff, hash) + 3), 1407 #else 1408 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1409 offsetof(struct __sk_buff, hash)), 1410 #endif 1411 BPF_EXIT_INSN(), 1412 }, 1413 .errstr = "invalid bpf_context access", 1414 .result = REJECT, 1415 }, 1416 { 1417 "check cb access: byte, wrong type", 1418 .insns = { 1419 BPF_MOV64_IMM(BPF_REG_0, 0), 1420 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 1421 offsetof(struct __sk_buff, cb[0])), 1422 BPF_EXIT_INSN(), 1423 }, 1424 .errstr = "invalid bpf_context access", 1425 .result = REJECT, 1426 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 1427 }, 1428 { 1429 "check cb access: half", 1430 .insns = { 1431 BPF_MOV64_IMM(BPF_REG_0, 0), 1432 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1433 offsetof(struct __sk_buff, cb[0])), 1434 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1435 offsetof(struct __sk_buff, cb[0]) + 2), 1436 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1437 offsetof(struct __sk_buff, cb[1])), 1438 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1439 offsetof(struct __sk_buff, cb[1]) + 2), 1440 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1441 offsetof(struct __sk_buff, cb[2])), 1442 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1443 offsetof(struct __sk_buff, cb[2]) + 2), 1444 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1445 offsetof(struct __sk_buff, cb[3])), 1446 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1447 offsetof(struct __sk_buff, cb[3]) + 2), 1448 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1449 offsetof(struct __sk_buff, cb[4])), 1450 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1451 offsetof(struct __sk_buff, cb[4]) + 2), 1452 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1453 offsetof(struct __sk_buff, cb[0])), 1454 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1455 offsetof(struct __sk_buff, cb[0]) + 2), 1456 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1457 offsetof(struct __sk_buff, cb[1])), 1458 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1459 offsetof(struct __sk_buff, cb[1]) + 2), 1460 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1461 offsetof(struct __sk_buff, cb[2])), 1462 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1463 offsetof(struct __sk_buff, cb[2]) + 2), 1464 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1465 offsetof(struct __sk_buff, cb[3])), 1466 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1467 offsetof(struct __sk_buff, cb[3]) + 2), 1468 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1469 offsetof(struct __sk_buff, cb[4])), 1470 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1471 offsetof(struct __sk_buff, cb[4]) + 2), 1472 BPF_EXIT_INSN(), 1473 }, 1474 .result = ACCEPT, 1475 }, 1476 { 1477 "check cb access: half, unaligned", 1478 .insns = { 1479 BPF_MOV64_IMM(BPF_REG_0, 0), 1480 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1481 offsetof(struct __sk_buff, cb[0]) + 1), 1482 BPF_EXIT_INSN(), 1483 }, 1484 .errstr = "misaligned context access", 1485 .result = REJECT, 1486 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1487 }, 1488 { 1489 "check __sk_buff->hash, offset 0, half store not permitted", 1490 .insns = { 1491 BPF_MOV64_IMM(BPF_REG_0, 0), 1492 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1493 offsetof(struct __sk_buff, hash)), 1494 BPF_EXIT_INSN(), 1495 }, 1496 .errstr = "invalid bpf_context access", 1497 .result = REJECT, 1498 }, 1499 { 1500 "check __sk_buff->tc_index, offset 2, half store not permitted", 1501 .insns = { 1502 BPF_MOV64_IMM(BPF_REG_0, 0), 1503 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1504 offsetof(struct __sk_buff, tc_index) + 2), 1505 BPF_EXIT_INSN(), 1506 }, 1507 .errstr = "invalid bpf_context access", 1508 .result = REJECT, 1509 }, 1510 { 1511 "check skb->hash half load permitted", 1512 .insns = { 1513 BPF_MOV64_IMM(BPF_REG_0, 0), 1514 #if __BYTE_ORDER == __LITTLE_ENDIAN 1515 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1516 offsetof(struct __sk_buff, hash)), 1517 #else 1518 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1519 offsetof(struct __sk_buff, hash) + 2), 1520 #endif 1521 BPF_EXIT_INSN(), 1522 }, 1523 .result = ACCEPT, 1524 }, 1525 { 1526 "check skb->hash half load not permitted", 1527 .insns = { 1528 BPF_MOV64_IMM(BPF_REG_0, 0), 1529 #if __BYTE_ORDER == __LITTLE_ENDIAN 1530 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1531 offsetof(struct __sk_buff, hash) + 2), 1532 #else 1533 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1534 offsetof(struct __sk_buff, hash)), 1535 #endif 1536 BPF_EXIT_INSN(), 1537 }, 1538 .errstr = "invalid bpf_context access", 1539 .result = REJECT, 1540 }, 1541 { 1542 "check cb access: half, wrong type", 1543 .insns = { 1544 BPF_MOV64_IMM(BPF_REG_0, 0), 1545 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 1546 offsetof(struct __sk_buff, cb[0])), 1547 BPF_EXIT_INSN(), 1548 }, 1549 .errstr = "invalid bpf_context access", 1550 .result = REJECT, 1551 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 1552 }, 1553 { 1554 "check cb access: word", 1555 .insns = { 1556 BPF_MOV64_IMM(BPF_REG_0, 0), 1557 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1558 offsetof(struct __sk_buff, cb[0])), 1559 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1560 offsetof(struct __sk_buff, cb[1])), 1561 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1562 offsetof(struct __sk_buff, cb[2])), 1563 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1564 offsetof(struct __sk_buff, cb[3])), 1565 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1566 offsetof(struct __sk_buff, cb[4])), 1567 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1568 offsetof(struct __sk_buff, cb[0])), 1569 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1570 offsetof(struct __sk_buff, cb[1])), 1571 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1572 offsetof(struct __sk_buff, cb[2])), 1573 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1574 offsetof(struct __sk_buff, cb[3])), 1575 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1576 offsetof(struct __sk_buff, cb[4])), 1577 BPF_EXIT_INSN(), 1578 }, 1579 .result = ACCEPT, 1580 }, 1581 { 1582 "check cb access: word, unaligned 1", 1583 .insns = { 1584 BPF_MOV64_IMM(BPF_REG_0, 0), 1585 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1586 offsetof(struct __sk_buff, cb[0]) + 2), 1587 BPF_EXIT_INSN(), 1588 }, 1589 .errstr = "misaligned context access", 1590 .result = REJECT, 1591 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1592 }, 1593 { 1594 "check cb access: word, unaligned 2", 1595 .insns = { 1596 BPF_MOV64_IMM(BPF_REG_0, 0), 1597 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1598 offsetof(struct __sk_buff, cb[4]) + 1), 1599 BPF_EXIT_INSN(), 1600 }, 1601 .errstr = "misaligned context access", 1602 .result = REJECT, 1603 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1604 }, 1605 { 1606 "check cb access: word, unaligned 3", 1607 .insns = { 1608 BPF_MOV64_IMM(BPF_REG_0, 0), 1609 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1610 offsetof(struct __sk_buff, cb[4]) + 2), 1611 BPF_EXIT_INSN(), 1612 }, 1613 .errstr = "misaligned context access", 1614 .result = REJECT, 1615 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1616 }, 1617 { 1618 "check cb access: word, unaligned 4", 1619 .insns = { 1620 BPF_MOV64_IMM(BPF_REG_0, 0), 1621 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1622 offsetof(struct __sk_buff, cb[4]) + 3), 1623 BPF_EXIT_INSN(), 1624 }, 1625 .errstr = "misaligned context access", 1626 .result = REJECT, 1627 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1628 }, 1629 { 1630 "check cb access: double", 1631 .insns = { 1632 BPF_MOV64_IMM(BPF_REG_0, 0), 1633 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1634 offsetof(struct __sk_buff, cb[0])), 1635 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1636 offsetof(struct __sk_buff, cb[2])), 1637 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1638 offsetof(struct __sk_buff, cb[0])), 1639 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1640 offsetof(struct __sk_buff, cb[2])), 1641 BPF_EXIT_INSN(), 1642 }, 1643 .result = ACCEPT, 1644 }, 1645 { 1646 "check cb access: double, unaligned 1", 1647 .insns = { 1648 BPF_MOV64_IMM(BPF_REG_0, 0), 1649 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1650 offsetof(struct __sk_buff, cb[1])), 1651 BPF_EXIT_INSN(), 1652 }, 1653 .errstr = "misaligned context access", 1654 .result = REJECT, 1655 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1656 }, 1657 { 1658 "check cb access: double, unaligned 2", 1659 .insns = { 1660 BPF_MOV64_IMM(BPF_REG_0, 0), 1661 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1662 offsetof(struct __sk_buff, cb[3])), 1663 BPF_EXIT_INSN(), 1664 }, 1665 .errstr = "misaligned context access", 1666 .result = REJECT, 1667 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1668 }, 1669 { 1670 "check cb access: double, oob 1", 1671 .insns = { 1672 BPF_MOV64_IMM(BPF_REG_0, 0), 1673 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1674 offsetof(struct __sk_buff, cb[4])), 1675 BPF_EXIT_INSN(), 1676 }, 1677 .errstr = "invalid bpf_context access", 1678 .result = REJECT, 1679 }, 1680 { 1681 "check cb access: double, oob 2", 1682 .insns = { 1683 BPF_MOV64_IMM(BPF_REG_0, 0), 1684 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1685 offsetof(struct __sk_buff, cb[4])), 1686 BPF_EXIT_INSN(), 1687 }, 1688 .errstr = "invalid bpf_context access", 1689 .result = REJECT, 1690 }, 1691 { 1692 "check __sk_buff->ifindex dw store not permitted", 1693 .insns = { 1694 BPF_MOV64_IMM(BPF_REG_0, 0), 1695 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1696 offsetof(struct __sk_buff, ifindex)), 1697 BPF_EXIT_INSN(), 1698 }, 1699 .errstr = "invalid bpf_context access", 1700 .result = REJECT, 1701 }, 1702 { 1703 "check __sk_buff->ifindex dw load not permitted", 1704 .insns = { 1705 BPF_MOV64_IMM(BPF_REG_0, 0), 1706 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1707 offsetof(struct __sk_buff, ifindex)), 1708 BPF_EXIT_INSN(), 1709 }, 1710 .errstr = "invalid bpf_context access", 1711 .result = REJECT, 1712 }, 1713 { 1714 "check cb access: double, wrong type", 1715 .insns = { 1716 BPF_MOV64_IMM(BPF_REG_0, 0), 1717 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1718 offsetof(struct __sk_buff, cb[0])), 1719 BPF_EXIT_INSN(), 1720 }, 1721 .errstr = "invalid bpf_context access", 1722 .result = REJECT, 1723 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 1724 }, 1725 { 1726 "check out of range skb->cb access", 1727 .insns = { 1728 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1729 offsetof(struct __sk_buff, cb[0]) + 256), 1730 BPF_EXIT_INSN(), 1731 }, 1732 .errstr = "invalid bpf_context access", 1733 .errstr_unpriv = "", 1734 .result = REJECT, 1735 .prog_type = BPF_PROG_TYPE_SCHED_ACT, 1736 }, 1737 { 1738 "write skb fields from socket prog", 1739 .insns = { 1740 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1741 offsetof(struct __sk_buff, cb[4])), 1742 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 1743 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1744 offsetof(struct __sk_buff, mark)), 1745 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1746 offsetof(struct __sk_buff, tc_index)), 1747 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), 1748 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 1749 offsetof(struct __sk_buff, cb[0])), 1750 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 1751 offsetof(struct __sk_buff, cb[2])), 1752 BPF_EXIT_INSN(), 1753 }, 1754 .result = ACCEPT, 1755 .errstr_unpriv = "R1 leaks addr", 1756 .result_unpriv = REJECT, 1757 }, 1758 { 1759 "write skb fields from tc_cls_act prog", 1760 .insns = { 1761 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1762 offsetof(struct __sk_buff, cb[0])), 1763 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1764 offsetof(struct __sk_buff, mark)), 1765 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1766 offsetof(struct __sk_buff, tc_index)), 1767 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1768 offsetof(struct __sk_buff, tc_index)), 1769 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1770 offsetof(struct __sk_buff, cb[3])), 1771 BPF_EXIT_INSN(), 1772 }, 1773 .errstr_unpriv = "", 1774 .result_unpriv = REJECT, 1775 .result = ACCEPT, 1776 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1777 }, 1778 { 1779 "PTR_TO_STACK store/load", 1780 .insns = { 1781 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1782 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), 1783 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), 1784 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), 1785 BPF_EXIT_INSN(), 1786 }, 1787 .result = ACCEPT, 1788 }, 1789 { 1790 "PTR_TO_STACK store/load - bad alignment on off", 1791 .insns = { 1792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1794 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), 1795 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), 1796 BPF_EXIT_INSN(), 1797 }, 1798 .result = REJECT, 1799 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", 1800 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1801 }, 1802 { 1803 "PTR_TO_STACK store/load - bad alignment on reg", 1804 .insns = { 1805 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), 1807 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), 1808 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), 1809 BPF_EXIT_INSN(), 1810 }, 1811 .result = REJECT, 1812 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", 1813 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 1814 }, 1815 { 1816 "PTR_TO_STACK store/load - out of bounds low", 1817 .insns = { 1818 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1819 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000), 1820 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), 1821 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), 1822 BPF_EXIT_INSN(), 1823 }, 1824 .result = REJECT, 1825 .errstr = "invalid stack off=-79992 size=8", 1826 }, 1827 { 1828 "PTR_TO_STACK store/load - out of bounds high", 1829 .insns = { 1830 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1832 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), 1833 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), 1834 BPF_EXIT_INSN(), 1835 }, 1836 .result = REJECT, 1837 .errstr = "invalid stack off=0 size=8", 1838 }, 1839 { 1840 "unpriv: return pointer", 1841 .insns = { 1842 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), 1843 BPF_EXIT_INSN(), 1844 }, 1845 .result = ACCEPT, 1846 .result_unpriv = REJECT, 1847 .errstr_unpriv = "R0 leaks addr", 1848 }, 1849 { 1850 "unpriv: add const to pointer", 1851 .insns = { 1852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 1853 BPF_MOV64_IMM(BPF_REG_0, 0), 1854 BPF_EXIT_INSN(), 1855 }, 1856 .result = ACCEPT, 1857 }, 1858 { 1859 "unpriv: add pointer to pointer", 1860 .insns = { 1861 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), 1862 BPF_MOV64_IMM(BPF_REG_0, 0), 1863 BPF_EXIT_INSN(), 1864 }, 1865 .result = ACCEPT, 1866 .result_unpriv = REJECT, 1867 .errstr_unpriv = "R1 pointer += pointer", 1868 }, 1869 { 1870 "unpriv: neg pointer", 1871 .insns = { 1872 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), 1873 BPF_MOV64_IMM(BPF_REG_0, 0), 1874 BPF_EXIT_INSN(), 1875 }, 1876 .result = ACCEPT, 1877 .result_unpriv = REJECT, 1878 .errstr_unpriv = "R1 pointer arithmetic", 1879 }, 1880 { 1881 "unpriv: cmp pointer with const", 1882 .insns = { 1883 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 1884 BPF_MOV64_IMM(BPF_REG_0, 0), 1885 BPF_EXIT_INSN(), 1886 }, 1887 .result = ACCEPT, 1888 .result_unpriv = REJECT, 1889 .errstr_unpriv = "R1 pointer comparison", 1890 }, 1891 { 1892 "unpriv: cmp pointer with pointer", 1893 .insns = { 1894 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), 1895 BPF_MOV64_IMM(BPF_REG_0, 0), 1896 BPF_EXIT_INSN(), 1897 }, 1898 .result = ACCEPT, 1899 .result_unpriv = REJECT, 1900 .errstr_unpriv = "R10 pointer comparison", 1901 }, 1902 { 1903 "unpriv: check that printk is disallowed", 1904 .insns = { 1905 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1906 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 1907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 1908 BPF_MOV64_IMM(BPF_REG_2, 8), 1909 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), 1910 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1911 BPF_FUNC_trace_printk), 1912 BPF_MOV64_IMM(BPF_REG_0, 0), 1913 BPF_EXIT_INSN(), 1914 }, 1915 .errstr_unpriv = "unknown func bpf_trace_printk#6", 1916 .result_unpriv = REJECT, 1917 .result = ACCEPT, 1918 }, 1919 { 1920 "unpriv: pass pointer to helper function", 1921 .insns = { 1922 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 1923 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1925 BPF_LD_MAP_FD(BPF_REG_1, 0), 1926 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 1927 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 1928 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1929 BPF_FUNC_map_update_elem), 1930 BPF_MOV64_IMM(BPF_REG_0, 0), 1931 BPF_EXIT_INSN(), 1932 }, 1933 .fixup_map1 = { 3 }, 1934 .errstr_unpriv = "R4 leaks addr", 1935 .result_unpriv = REJECT, 1936 .result = ACCEPT, 1937 }, 1938 { 1939 "unpriv: indirectly pass pointer on stack to helper function", 1940 .insns = { 1941 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), 1942 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 1943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 1944 BPF_LD_MAP_FD(BPF_REG_1, 0), 1945 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1946 BPF_FUNC_map_lookup_elem), 1947 BPF_MOV64_IMM(BPF_REG_0, 0), 1948 BPF_EXIT_INSN(), 1949 }, 1950 .fixup_map1 = { 3 }, 1951 .errstr = "invalid indirect read from stack off -8+0 size 8", 1952 .result = REJECT, 1953 }, 1954 { 1955 "unpriv: mangle pointer on stack 1", 1956 .insns = { 1957 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), 1958 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), 1959 BPF_MOV64_IMM(BPF_REG_0, 0), 1960 BPF_EXIT_INSN(), 1961 }, 1962 .errstr_unpriv = "attempt to corrupt spilled", 1963 .result_unpriv = REJECT, 1964 .result = ACCEPT, 1965 }, 1966 { 1967 "unpriv: mangle pointer on stack 2", 1968 .insns = { 1969 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), 1970 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0), 1971 BPF_MOV64_IMM(BPF_REG_0, 0), 1972 BPF_EXIT_INSN(), 1973 }, 1974 .errstr_unpriv = "attempt to corrupt spilled", 1975 .result_unpriv = REJECT, 1976 .result = ACCEPT, 1977 }, 1978 { 1979 "unpriv: read pointer from stack in small chunks", 1980 .insns = { 1981 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), 1982 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), 1983 BPF_MOV64_IMM(BPF_REG_0, 0), 1984 BPF_EXIT_INSN(), 1985 }, 1986 .errstr = "invalid size", 1987 .result = REJECT, 1988 }, 1989 { 1990 "unpriv: write pointer into ctx", 1991 .insns = { 1992 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), 1993 BPF_MOV64_IMM(BPF_REG_0, 0), 1994 BPF_EXIT_INSN(), 1995 }, 1996 .errstr_unpriv = "R1 leaks addr", 1997 .result_unpriv = REJECT, 1998 .errstr = "invalid bpf_context access", 1999 .result = REJECT, 2000 }, 2001 { 2002 "unpriv: spill/fill of ctx", 2003 .insns = { 2004 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2006 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2007 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 2008 BPF_MOV64_IMM(BPF_REG_0, 0), 2009 BPF_EXIT_INSN(), 2010 }, 2011 .result = ACCEPT, 2012 }, 2013 { 2014 "unpriv: spill/fill of ctx 2", 2015 .insns = { 2016 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2017 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2018 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2019 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 2020 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2021 BPF_FUNC_get_hash_recalc), 2022 BPF_EXIT_INSN(), 2023 }, 2024 .result = ACCEPT, 2025 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2026 }, 2027 { 2028 "unpriv: spill/fill of ctx 3", 2029 .insns = { 2030 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2032 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2033 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), 2034 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 2035 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2036 BPF_FUNC_get_hash_recalc), 2037 BPF_EXIT_INSN(), 2038 }, 2039 .result = REJECT, 2040 .errstr = "R1 type=fp expected=ctx", 2041 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2042 }, 2043 { 2044 "unpriv: spill/fill of ctx 4", 2045 .insns = { 2046 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2048 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2049 BPF_MOV64_IMM(BPF_REG_0, 1), 2050 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, 2051 BPF_REG_0, -8, 0), 2052 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 2053 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2054 BPF_FUNC_get_hash_recalc), 2055 BPF_EXIT_INSN(), 2056 }, 2057 .result = REJECT, 2058 .errstr = "R1 type=inv expected=ctx", 2059 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2060 }, 2061 { 2062 "unpriv: spill/fill of different pointers stx", 2063 .insns = { 2064 BPF_MOV64_IMM(BPF_REG_3, 42), 2065 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2067 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 2068 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), 2070 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), 2071 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 2072 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2073 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 2074 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, 2075 offsetof(struct __sk_buff, mark)), 2076 BPF_MOV64_IMM(BPF_REG_0, 0), 2077 BPF_EXIT_INSN(), 2078 }, 2079 .result = REJECT, 2080 .errstr = "same insn cannot be used with different pointers", 2081 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2082 }, 2083 { 2084 "unpriv: spill/fill of different pointers ldx", 2085 .insns = { 2086 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2088 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), 2089 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 2091 -(__s32)offsetof(struct bpf_perf_event_data, 2092 sample_period) - 8), 2093 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), 2094 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 2095 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2096 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 2097 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 2098 offsetof(struct bpf_perf_event_data, 2099 sample_period)), 2100 BPF_MOV64_IMM(BPF_REG_0, 0), 2101 BPF_EXIT_INSN(), 2102 }, 2103 .result = REJECT, 2104 .errstr = "same insn cannot be used with different pointers", 2105 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 2106 }, 2107 { 2108 "unpriv: write pointer into map elem value", 2109 .insns = { 2110 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 2111 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2113 BPF_LD_MAP_FD(BPF_REG_1, 0), 2114 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2115 BPF_FUNC_map_lookup_elem), 2116 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 2117 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), 2118 BPF_EXIT_INSN(), 2119 }, 2120 .fixup_map1 = { 3 }, 2121 .errstr_unpriv = "R0 leaks addr", 2122 .result_unpriv = REJECT, 2123 .result = ACCEPT, 2124 }, 2125 { 2126 "unpriv: partial copy of pointer", 2127 .insns = { 2128 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10), 2129 BPF_MOV64_IMM(BPF_REG_0, 0), 2130 BPF_EXIT_INSN(), 2131 }, 2132 .errstr_unpriv = "R10 partial copy", 2133 .result_unpriv = REJECT, 2134 .result = ACCEPT, 2135 }, 2136 { 2137 "unpriv: pass pointer to tail_call", 2138 .insns = { 2139 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), 2140 BPF_LD_MAP_FD(BPF_REG_2, 0), 2141 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2142 BPF_FUNC_tail_call), 2143 BPF_MOV64_IMM(BPF_REG_0, 0), 2144 BPF_EXIT_INSN(), 2145 }, 2146 .fixup_prog = { 1 }, 2147 .errstr_unpriv = "R3 leaks addr into helper", 2148 .result_unpriv = REJECT, 2149 .result = ACCEPT, 2150 }, 2151 { 2152 "unpriv: cmp map pointer with zero", 2153 .insns = { 2154 BPF_MOV64_IMM(BPF_REG_1, 0), 2155 BPF_LD_MAP_FD(BPF_REG_1, 0), 2156 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 2157 BPF_MOV64_IMM(BPF_REG_0, 0), 2158 BPF_EXIT_INSN(), 2159 }, 2160 .fixup_map1 = { 1 }, 2161 .errstr_unpriv = "R1 pointer comparison", 2162 .result_unpriv = REJECT, 2163 .result = ACCEPT, 2164 }, 2165 { 2166 "unpriv: write into frame pointer", 2167 .insns = { 2168 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1), 2169 BPF_MOV64_IMM(BPF_REG_0, 0), 2170 BPF_EXIT_INSN(), 2171 }, 2172 .errstr = "frame pointer is read only", 2173 .result = REJECT, 2174 }, 2175 { 2176 "unpriv: spill/fill frame pointer", 2177 .insns = { 2178 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2179 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2180 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), 2181 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0), 2182 BPF_MOV64_IMM(BPF_REG_0, 0), 2183 BPF_EXIT_INSN(), 2184 }, 2185 .errstr = "frame pointer is read only", 2186 .result = REJECT, 2187 }, 2188 { 2189 "unpriv: cmp of frame pointer", 2190 .insns = { 2191 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0), 2192 BPF_MOV64_IMM(BPF_REG_0, 0), 2193 BPF_EXIT_INSN(), 2194 }, 2195 .errstr_unpriv = "R10 pointer comparison", 2196 .result_unpriv = REJECT, 2197 .result = ACCEPT, 2198 }, 2199 { 2200 "unpriv: adding of fp", 2201 .insns = { 2202 BPF_MOV64_IMM(BPF_REG_0, 0), 2203 BPF_MOV64_IMM(BPF_REG_1, 0), 2204 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), 2205 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), 2206 BPF_EXIT_INSN(), 2207 }, 2208 .result = ACCEPT, 2209 }, 2210 { 2211 "unpriv: cmp of stack pointer", 2212 .insns = { 2213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 2214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 2215 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0), 2216 BPF_MOV64_IMM(BPF_REG_0, 0), 2217 BPF_EXIT_INSN(), 2218 }, 2219 .errstr_unpriv = "R2 pointer comparison", 2220 .result_unpriv = REJECT, 2221 .result = ACCEPT, 2222 }, 2223 { 2224 "stack pointer arithmetic", 2225 .insns = { 2226 BPF_MOV64_IMM(BPF_REG_1, 4), 2227 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 2228 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 2229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), 2230 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), 2231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 2232 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), 2233 BPF_ST_MEM(0, BPF_REG_2, 4, 0), 2234 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 2235 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 2236 BPF_ST_MEM(0, BPF_REG_2, 4, 0), 2237 BPF_MOV64_IMM(BPF_REG_0, 0), 2238 BPF_EXIT_INSN(), 2239 }, 2240 .result = ACCEPT, 2241 }, 2242 { 2243 "raw_stack: no skb_load_bytes", 2244 .insns = { 2245 BPF_MOV64_IMM(BPF_REG_2, 4), 2246 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2248 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2249 BPF_MOV64_IMM(BPF_REG_4, 8), 2250 /* Call to skb_load_bytes() omitted. */ 2251 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2252 BPF_EXIT_INSN(), 2253 }, 2254 .result = REJECT, 2255 .errstr = "invalid read from stack off -8+0 size 8", 2256 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2257 }, 2258 { 2259 "raw_stack: skb_load_bytes, negative len", 2260 .insns = { 2261 BPF_MOV64_IMM(BPF_REG_2, 4), 2262 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2264 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2265 BPF_MOV64_IMM(BPF_REG_4, -8), 2266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2267 BPF_FUNC_skb_load_bytes), 2268 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2269 BPF_EXIT_INSN(), 2270 }, 2271 .result = REJECT, 2272 .errstr = "R4 min value is negative", 2273 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2274 }, 2275 { 2276 "raw_stack: skb_load_bytes, negative len 2", 2277 .insns = { 2278 BPF_MOV64_IMM(BPF_REG_2, 4), 2279 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2281 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2282 BPF_MOV64_IMM(BPF_REG_4, ~0), 2283 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2284 BPF_FUNC_skb_load_bytes), 2285 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2286 BPF_EXIT_INSN(), 2287 }, 2288 .result = REJECT, 2289 .errstr = "R4 min value is negative", 2290 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2291 }, 2292 { 2293 "raw_stack: skb_load_bytes, zero len", 2294 .insns = { 2295 BPF_MOV64_IMM(BPF_REG_2, 4), 2296 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2298 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2299 BPF_MOV64_IMM(BPF_REG_4, 0), 2300 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2301 BPF_FUNC_skb_load_bytes), 2302 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2303 BPF_EXIT_INSN(), 2304 }, 2305 .result = REJECT, 2306 .errstr = "invalid stack type R3", 2307 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2308 }, 2309 { 2310 "raw_stack: skb_load_bytes, no init", 2311 .insns = { 2312 BPF_MOV64_IMM(BPF_REG_2, 4), 2313 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2314 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2315 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2316 BPF_MOV64_IMM(BPF_REG_4, 8), 2317 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2318 BPF_FUNC_skb_load_bytes), 2319 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2320 BPF_EXIT_INSN(), 2321 }, 2322 .result = ACCEPT, 2323 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2324 }, 2325 { 2326 "raw_stack: skb_load_bytes, init", 2327 .insns = { 2328 BPF_MOV64_IMM(BPF_REG_2, 4), 2329 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2331 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe), 2332 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2333 BPF_MOV64_IMM(BPF_REG_4, 8), 2334 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2335 BPF_FUNC_skb_load_bytes), 2336 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2337 BPF_EXIT_INSN(), 2338 }, 2339 .result = ACCEPT, 2340 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2341 }, 2342 { 2343 "raw_stack: skb_load_bytes, spilled regs around bounds", 2344 .insns = { 2345 BPF_MOV64_IMM(BPF_REG_2, 4), 2346 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 2348 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 2349 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 2350 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2351 BPF_MOV64_IMM(BPF_REG_4, 8), 2352 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2353 BPF_FUNC_skb_load_bytes), 2354 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 2355 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 2356 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2357 offsetof(struct __sk_buff, mark)), 2358 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 2359 offsetof(struct __sk_buff, priority)), 2360 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2361 BPF_EXIT_INSN(), 2362 }, 2363 .result = ACCEPT, 2364 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2365 }, 2366 { 2367 "raw_stack: skb_load_bytes, spilled regs corruption", 2368 .insns = { 2369 BPF_MOV64_IMM(BPF_REG_2, 4), 2370 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 2372 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2373 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2374 BPF_MOV64_IMM(BPF_REG_4, 8), 2375 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2376 BPF_FUNC_skb_load_bytes), 2377 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2378 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2379 offsetof(struct __sk_buff, mark)), 2380 BPF_EXIT_INSN(), 2381 }, 2382 .result = REJECT, 2383 .errstr = "R0 invalid mem access 'inv'", 2384 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2385 }, 2386 { 2387 "raw_stack: skb_load_bytes, spilled regs corruption 2", 2388 .insns = { 2389 BPF_MOV64_IMM(BPF_REG_2, 4), 2390 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 2392 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 2393 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2394 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 2395 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2396 BPF_MOV64_IMM(BPF_REG_4, 8), 2397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2398 BPF_FUNC_skb_load_bytes), 2399 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 2400 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 2401 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), 2402 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2403 offsetof(struct __sk_buff, mark)), 2404 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 2405 offsetof(struct __sk_buff, priority)), 2406 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2407 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3, 2408 offsetof(struct __sk_buff, pkt_type)), 2409 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), 2410 BPF_EXIT_INSN(), 2411 }, 2412 .result = REJECT, 2413 .errstr = "R3 invalid mem access 'inv'", 2414 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2415 }, 2416 { 2417 "raw_stack: skb_load_bytes, spilled regs + data", 2418 .insns = { 2419 BPF_MOV64_IMM(BPF_REG_2, 4), 2420 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), 2422 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), 2423 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 2424 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), 2425 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2426 BPF_MOV64_IMM(BPF_REG_4, 8), 2427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2428 BPF_FUNC_skb_load_bytes), 2429 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), 2430 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), 2431 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), 2432 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 2433 offsetof(struct __sk_buff, mark)), 2434 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 2435 offsetof(struct __sk_buff, priority)), 2436 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2437 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), 2438 BPF_EXIT_INSN(), 2439 }, 2440 .result = ACCEPT, 2441 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2442 }, 2443 { 2444 "raw_stack: skb_load_bytes, invalid access 1", 2445 .insns = { 2446 BPF_MOV64_IMM(BPF_REG_2, 4), 2447 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513), 2449 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2450 BPF_MOV64_IMM(BPF_REG_4, 8), 2451 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2452 BPF_FUNC_skb_load_bytes), 2453 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2454 BPF_EXIT_INSN(), 2455 }, 2456 .result = REJECT, 2457 .errstr = "invalid stack type R3 off=-513 access_size=8", 2458 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2459 }, 2460 { 2461 "raw_stack: skb_load_bytes, invalid access 2", 2462 .insns = { 2463 BPF_MOV64_IMM(BPF_REG_2, 4), 2464 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), 2466 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2467 BPF_MOV64_IMM(BPF_REG_4, 8), 2468 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2469 BPF_FUNC_skb_load_bytes), 2470 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2471 BPF_EXIT_INSN(), 2472 }, 2473 .result = REJECT, 2474 .errstr = "invalid stack type R3 off=-1 access_size=8", 2475 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2476 }, 2477 { 2478 "raw_stack: skb_load_bytes, invalid access 3", 2479 .insns = { 2480 BPF_MOV64_IMM(BPF_REG_2, 4), 2481 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff), 2483 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2484 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), 2485 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2486 BPF_FUNC_skb_load_bytes), 2487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2488 BPF_EXIT_INSN(), 2489 }, 2490 .result = REJECT, 2491 .errstr = "R4 min value is negative", 2492 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2493 }, 2494 { 2495 "raw_stack: skb_load_bytes, invalid access 4", 2496 .insns = { 2497 BPF_MOV64_IMM(BPF_REG_2, 4), 2498 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2499 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), 2500 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2501 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), 2502 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2503 BPF_FUNC_skb_load_bytes), 2504 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2505 BPF_EXIT_INSN(), 2506 }, 2507 .result = REJECT, 2508 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", 2509 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2510 }, 2511 { 2512 "raw_stack: skb_load_bytes, invalid access 5", 2513 .insns = { 2514 BPF_MOV64_IMM(BPF_REG_2, 4), 2515 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 2517 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2518 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), 2519 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2520 BPF_FUNC_skb_load_bytes), 2521 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2522 BPF_EXIT_INSN(), 2523 }, 2524 .result = REJECT, 2525 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", 2526 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2527 }, 2528 { 2529 "raw_stack: skb_load_bytes, invalid access 6", 2530 .insns = { 2531 BPF_MOV64_IMM(BPF_REG_2, 4), 2532 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 2534 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2535 BPF_MOV64_IMM(BPF_REG_4, 0), 2536 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2537 BPF_FUNC_skb_load_bytes), 2538 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2539 BPF_EXIT_INSN(), 2540 }, 2541 .result = REJECT, 2542 .errstr = "invalid stack type R3 off=-512 access_size=0", 2543 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2544 }, 2545 { 2546 "raw_stack: skb_load_bytes, large access", 2547 .insns = { 2548 BPF_MOV64_IMM(BPF_REG_2, 4), 2549 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), 2550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), 2551 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 2552 BPF_MOV64_IMM(BPF_REG_4, 512), 2553 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2554 BPF_FUNC_skb_load_bytes), 2555 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 2556 BPF_EXIT_INSN(), 2557 }, 2558 .result = ACCEPT, 2559 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2560 }, 2561 { 2562 "direct packet access: test1", 2563 .insns = { 2564 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2565 offsetof(struct __sk_buff, data)), 2566 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2567 offsetof(struct __sk_buff, data_end)), 2568 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2570 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2571 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2572 BPF_MOV64_IMM(BPF_REG_0, 0), 2573 BPF_EXIT_INSN(), 2574 }, 2575 .result = ACCEPT, 2576 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2577 }, 2578 { 2579 "direct packet access: test2", 2580 .insns = { 2581 BPF_MOV64_IMM(BPF_REG_0, 1), 2582 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 2583 offsetof(struct __sk_buff, data_end)), 2584 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2585 offsetof(struct __sk_buff, data)), 2586 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 2587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), 2588 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15), 2589 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7), 2590 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12), 2591 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14), 2592 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2593 offsetof(struct __sk_buff, data)), 2594 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), 2595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 2596 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), 2597 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), 2598 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), 2599 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), 2600 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), 2601 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 2602 offsetof(struct __sk_buff, data_end)), 2603 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 2604 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4), 2605 BPF_MOV64_IMM(BPF_REG_0, 0), 2606 BPF_EXIT_INSN(), 2607 }, 2608 .result = ACCEPT, 2609 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2610 }, 2611 { 2612 "direct packet access: test3", 2613 .insns = { 2614 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2615 offsetof(struct __sk_buff, data)), 2616 BPF_MOV64_IMM(BPF_REG_0, 0), 2617 BPF_EXIT_INSN(), 2618 }, 2619 .errstr = "invalid bpf_context access off=76", 2620 .result = REJECT, 2621 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, 2622 }, 2623 { 2624 "direct packet access: test4 (write)", 2625 .insns = { 2626 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2627 offsetof(struct __sk_buff, data)), 2628 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2629 offsetof(struct __sk_buff, data_end)), 2630 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2632 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2633 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 2634 BPF_MOV64_IMM(BPF_REG_0, 0), 2635 BPF_EXIT_INSN(), 2636 }, 2637 .result = ACCEPT, 2638 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2639 }, 2640 { 2641 "direct packet access: test5 (pkt_end >= reg, good access)", 2642 .insns = { 2643 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2644 offsetof(struct __sk_buff, data)), 2645 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2646 offsetof(struct __sk_buff, data_end)), 2647 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2648 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2649 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), 2650 BPF_MOV64_IMM(BPF_REG_0, 1), 2651 BPF_EXIT_INSN(), 2652 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2653 BPF_MOV64_IMM(BPF_REG_0, 0), 2654 BPF_EXIT_INSN(), 2655 }, 2656 .result = ACCEPT, 2657 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2658 }, 2659 { 2660 "direct packet access: test6 (pkt_end >= reg, bad access)", 2661 .insns = { 2662 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2663 offsetof(struct __sk_buff, data)), 2664 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2665 offsetof(struct __sk_buff, data_end)), 2666 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2667 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2668 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), 2669 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2670 BPF_MOV64_IMM(BPF_REG_0, 1), 2671 BPF_EXIT_INSN(), 2672 BPF_MOV64_IMM(BPF_REG_0, 0), 2673 BPF_EXIT_INSN(), 2674 }, 2675 .errstr = "invalid access to packet", 2676 .result = REJECT, 2677 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2678 }, 2679 { 2680 "direct packet access: test7 (pkt_end >= reg, both accesses)", 2681 .insns = { 2682 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2683 offsetof(struct __sk_buff, data)), 2684 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2685 offsetof(struct __sk_buff, data_end)), 2686 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2688 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), 2689 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2690 BPF_MOV64_IMM(BPF_REG_0, 1), 2691 BPF_EXIT_INSN(), 2692 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2693 BPF_MOV64_IMM(BPF_REG_0, 0), 2694 BPF_EXIT_INSN(), 2695 }, 2696 .errstr = "invalid access to packet", 2697 .result = REJECT, 2698 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2699 }, 2700 { 2701 "direct packet access: test8 (double test, variant 1)", 2702 .insns = { 2703 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2704 offsetof(struct __sk_buff, data)), 2705 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2706 offsetof(struct __sk_buff, data_end)), 2707 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2708 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2709 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4), 2710 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2711 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2712 BPF_MOV64_IMM(BPF_REG_0, 1), 2713 BPF_EXIT_INSN(), 2714 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2715 BPF_MOV64_IMM(BPF_REG_0, 0), 2716 BPF_EXIT_INSN(), 2717 }, 2718 .result = ACCEPT, 2719 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2720 }, 2721 { 2722 "direct packet access: test9 (double test, variant 2)", 2723 .insns = { 2724 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2725 offsetof(struct __sk_buff, data)), 2726 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2727 offsetof(struct __sk_buff, data_end)), 2728 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2730 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), 2731 BPF_MOV64_IMM(BPF_REG_0, 1), 2732 BPF_EXIT_INSN(), 2733 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2734 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2735 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 2736 BPF_MOV64_IMM(BPF_REG_0, 0), 2737 BPF_EXIT_INSN(), 2738 }, 2739 .result = ACCEPT, 2740 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2741 }, 2742 { 2743 "direct packet access: test10 (write invalid)", 2744 .insns = { 2745 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2746 offsetof(struct __sk_buff, data)), 2747 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2748 offsetof(struct __sk_buff, data_end)), 2749 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2751 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 2752 BPF_MOV64_IMM(BPF_REG_0, 0), 2753 BPF_EXIT_INSN(), 2754 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 2755 BPF_MOV64_IMM(BPF_REG_0, 0), 2756 BPF_EXIT_INSN(), 2757 }, 2758 .errstr = "invalid access to packet", 2759 .result = REJECT, 2760 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2761 }, 2762 { 2763 "direct packet access: test11 (shift, good access)", 2764 .insns = { 2765 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2766 offsetof(struct __sk_buff, data)), 2767 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2768 offsetof(struct __sk_buff, data_end)), 2769 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2770 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), 2771 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), 2772 BPF_MOV64_IMM(BPF_REG_3, 144), 2773 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 2774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), 2775 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3), 2776 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2777 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 2778 BPF_MOV64_IMM(BPF_REG_0, 1), 2779 BPF_EXIT_INSN(), 2780 BPF_MOV64_IMM(BPF_REG_0, 0), 2781 BPF_EXIT_INSN(), 2782 }, 2783 .result = ACCEPT, 2784 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2785 }, 2786 { 2787 "direct packet access: test12 (and, good access)", 2788 .insns = { 2789 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2790 offsetof(struct __sk_buff, data)), 2791 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2792 offsetof(struct __sk_buff, data_end)), 2793 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2794 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), 2795 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), 2796 BPF_MOV64_IMM(BPF_REG_3, 144), 2797 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 2798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), 2799 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), 2800 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2801 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 2802 BPF_MOV64_IMM(BPF_REG_0, 1), 2803 BPF_EXIT_INSN(), 2804 BPF_MOV64_IMM(BPF_REG_0, 0), 2805 BPF_EXIT_INSN(), 2806 }, 2807 .result = ACCEPT, 2808 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2809 }, 2810 { 2811 "direct packet access: test13 (branches, good access)", 2812 .insns = { 2813 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2814 offsetof(struct __sk_buff, data)), 2815 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2816 offsetof(struct __sk_buff, data_end)), 2817 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), 2819 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13), 2820 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2821 offsetof(struct __sk_buff, mark)), 2822 BPF_MOV64_IMM(BPF_REG_4, 1), 2823 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2), 2824 BPF_MOV64_IMM(BPF_REG_3, 14), 2825 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 2826 BPF_MOV64_IMM(BPF_REG_3, 24), 2827 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 2828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), 2829 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), 2830 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2831 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 2832 BPF_MOV64_IMM(BPF_REG_0, 1), 2833 BPF_EXIT_INSN(), 2834 BPF_MOV64_IMM(BPF_REG_0, 0), 2835 BPF_EXIT_INSN(), 2836 }, 2837 .result = ACCEPT, 2838 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2839 }, 2840 { 2841 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)", 2842 .insns = { 2843 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2844 offsetof(struct __sk_buff, data)), 2845 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2846 offsetof(struct __sk_buff, data_end)), 2847 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2848 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), 2849 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), 2850 BPF_MOV64_IMM(BPF_REG_5, 12), 2851 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4), 2852 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 2853 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), 2854 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), 2855 BPF_MOV64_IMM(BPF_REG_0, 1), 2856 BPF_EXIT_INSN(), 2857 BPF_MOV64_IMM(BPF_REG_0, 0), 2858 BPF_EXIT_INSN(), 2859 }, 2860 .result = ACCEPT, 2861 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2862 }, 2863 { 2864 "direct packet access: test15 (spill with xadd)", 2865 .insns = { 2866 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2867 offsetof(struct __sk_buff, data)), 2868 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2869 offsetof(struct __sk_buff, data_end)), 2870 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2872 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), 2873 BPF_MOV64_IMM(BPF_REG_5, 4096), 2874 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 2875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 2876 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 2877 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0), 2878 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 2879 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), 2880 BPF_MOV64_IMM(BPF_REG_0, 0), 2881 BPF_EXIT_INSN(), 2882 }, 2883 .errstr = "R2 invalid mem access 'inv'", 2884 .result = REJECT, 2885 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2886 }, 2887 { 2888 "direct packet access: test16 (arith on data_end)", 2889 .insns = { 2890 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2891 offsetof(struct __sk_buff, data)), 2892 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2893 offsetof(struct __sk_buff, data_end)), 2894 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), 2897 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2898 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 2899 BPF_MOV64_IMM(BPF_REG_0, 0), 2900 BPF_EXIT_INSN(), 2901 }, 2902 .errstr = "invalid access to packet", 2903 .result = REJECT, 2904 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2905 }, 2906 { 2907 "direct packet access: test17 (pruning, alignment)", 2908 .insns = { 2909 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2910 offsetof(struct __sk_buff, data)), 2911 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2912 offsetof(struct __sk_buff, data_end)), 2913 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 2914 offsetof(struct __sk_buff, mark)), 2915 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), 2917 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), 2918 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2919 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), 2920 BPF_MOV64_IMM(BPF_REG_0, 0), 2921 BPF_EXIT_INSN(), 2922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), 2923 BPF_JMP_A(-6), 2924 }, 2925 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4", 2926 .result = REJECT, 2927 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2928 .flags = F_LOAD_WITH_STRICT_ALIGNMENT, 2929 }, 2930 { 2931 "direct packet access: test18 (imm += pkt_ptr, 1)", 2932 .insns = { 2933 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2934 offsetof(struct __sk_buff, data)), 2935 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2936 offsetof(struct __sk_buff, data_end)), 2937 BPF_MOV64_IMM(BPF_REG_0, 8), 2938 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 2939 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 2940 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 2941 BPF_MOV64_IMM(BPF_REG_0, 0), 2942 BPF_EXIT_INSN(), 2943 }, 2944 .result = ACCEPT, 2945 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2946 }, 2947 { 2948 "direct packet access: test19 (imm += pkt_ptr, 2)", 2949 .insns = { 2950 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2951 offsetof(struct __sk_buff, data)), 2952 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2953 offsetof(struct __sk_buff, data_end)), 2954 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2956 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), 2957 BPF_MOV64_IMM(BPF_REG_4, 4), 2958 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), 2959 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0), 2960 BPF_MOV64_IMM(BPF_REG_0, 0), 2961 BPF_EXIT_INSN(), 2962 }, 2963 .result = ACCEPT, 2964 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2965 }, 2966 { 2967 "direct packet access: test20 (x += pkt_ptr, 1)", 2968 .insns = { 2969 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2970 offsetof(struct __sk_buff, data)), 2971 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2972 offsetof(struct __sk_buff, data_end)), 2973 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), 2974 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 2975 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 2976 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff), 2977 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 2978 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), 2979 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 2980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), 2981 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 2982 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), 2983 BPF_MOV64_IMM(BPF_REG_0, 0), 2984 BPF_EXIT_INSN(), 2985 }, 2986 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2987 .result = ACCEPT, 2988 }, 2989 { 2990 "direct packet access: test21 (x += pkt_ptr, 2)", 2991 .insns = { 2992 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2993 offsetof(struct __sk_buff, data)), 2994 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2995 offsetof(struct __sk_buff, data_end)), 2996 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 2997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 2998 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), 2999 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), 3000 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8), 3001 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 3002 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff), 3003 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), 3004 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 3005 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), 3006 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 3007 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), 3008 BPF_MOV64_IMM(BPF_REG_0, 0), 3009 BPF_EXIT_INSN(), 3010 }, 3011 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3012 .result = ACCEPT, 3013 }, 3014 { 3015 "direct packet access: test22 (x += pkt_ptr, 3)", 3016 .insns = { 3017 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3018 offsetof(struct __sk_buff, data)), 3019 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3020 offsetof(struct __sk_buff, data_end)), 3021 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3023 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), 3024 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16), 3025 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16), 3026 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11), 3027 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), 3028 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), 3029 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8), 3030 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 3031 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49), 3032 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), 3033 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), 3034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), 3035 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), 3036 BPF_MOV64_IMM(BPF_REG_2, 1), 3037 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0), 3038 BPF_MOV64_IMM(BPF_REG_0, 0), 3039 BPF_EXIT_INSN(), 3040 }, 3041 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3042 .result = ACCEPT, 3043 }, 3044 { 3045 "direct packet access: test23 (x += pkt_ptr, 4)", 3046 .insns = { 3047 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3048 offsetof(struct __sk_buff, data)), 3049 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3050 offsetof(struct __sk_buff, data_end)), 3051 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), 3052 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 3053 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 3054 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), 3055 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3056 BPF_MOV64_IMM(BPF_REG_0, 31), 3057 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 3058 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 3059 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), 3060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1), 3061 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 3062 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), 3063 BPF_MOV64_IMM(BPF_REG_0, 0), 3064 BPF_EXIT_INSN(), 3065 }, 3066 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3067 .result = REJECT, 3068 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)", 3069 }, 3070 { 3071 "direct packet access: test24 (x += pkt_ptr, 5)", 3072 .insns = { 3073 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3074 offsetof(struct __sk_buff, data)), 3075 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3076 offsetof(struct __sk_buff, data_end)), 3077 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), 3078 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 3079 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 3080 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff), 3081 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3082 BPF_MOV64_IMM(BPF_REG_0, 64), 3083 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), 3084 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 3085 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), 3086 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1), 3087 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 3088 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), 3089 BPF_MOV64_IMM(BPF_REG_0, 0), 3090 BPF_EXIT_INSN(), 3091 }, 3092 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3093 .result = ACCEPT, 3094 }, 3095 { 3096 "direct packet access: test25 (marking on <, good access)", 3097 .insns = { 3098 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3099 offsetof(struct __sk_buff, data)), 3100 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3101 offsetof(struct __sk_buff, data_end)), 3102 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3103 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3104 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2), 3105 BPF_MOV64_IMM(BPF_REG_0, 0), 3106 BPF_EXIT_INSN(), 3107 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 3108 BPF_JMP_IMM(BPF_JA, 0, 0, -4), 3109 }, 3110 .result = ACCEPT, 3111 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3112 }, 3113 { 3114 "direct packet access: test26 (marking on <, bad access)", 3115 .insns = { 3116 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3117 offsetof(struct __sk_buff, data)), 3118 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3119 offsetof(struct __sk_buff, data_end)), 3120 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3121 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3122 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3), 3123 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 3124 BPF_MOV64_IMM(BPF_REG_0, 0), 3125 BPF_EXIT_INSN(), 3126 BPF_JMP_IMM(BPF_JA, 0, 0, -3), 3127 }, 3128 .result = REJECT, 3129 .errstr = "invalid access to packet", 3130 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3131 }, 3132 { 3133 "direct packet access: test27 (marking on <=, good access)", 3134 .insns = { 3135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3136 offsetof(struct __sk_buff, data)), 3137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3138 offsetof(struct __sk_buff, data_end)), 3139 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3141 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1), 3142 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 3143 BPF_MOV64_IMM(BPF_REG_0, 1), 3144 BPF_EXIT_INSN(), 3145 }, 3146 .result = ACCEPT, 3147 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3148 }, 3149 { 3150 "direct packet access: test28 (marking on <=, bad access)", 3151 .insns = { 3152 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3153 offsetof(struct __sk_buff, data)), 3154 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3155 offsetof(struct __sk_buff, data_end)), 3156 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 3157 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 3158 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2), 3159 BPF_MOV64_IMM(BPF_REG_0, 1), 3160 BPF_EXIT_INSN(), 3161 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 3162 BPF_JMP_IMM(BPF_JA, 0, 0, -4), 3163 }, 3164 .result = REJECT, 3165 .errstr = "invalid access to packet", 3166 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3167 }, 3168 { 3169 "helper access to packet: test1, valid packet_ptr range", 3170 .insns = { 3171 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3172 offsetof(struct xdp_md, data)), 3173 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3174 offsetof(struct xdp_md, data_end)), 3175 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 3176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 3177 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), 3178 BPF_LD_MAP_FD(BPF_REG_1, 0), 3179 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 3180 BPF_MOV64_IMM(BPF_REG_4, 0), 3181 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3182 BPF_FUNC_map_update_elem), 3183 BPF_MOV64_IMM(BPF_REG_0, 0), 3184 BPF_EXIT_INSN(), 3185 }, 3186 .fixup_map1 = { 5 }, 3187 .result_unpriv = ACCEPT, 3188 .result = ACCEPT, 3189 .prog_type = BPF_PROG_TYPE_XDP, 3190 }, 3191 { 3192 "helper access to packet: test2, unchecked packet_ptr", 3193 .insns = { 3194 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3195 offsetof(struct xdp_md, data)), 3196 BPF_LD_MAP_FD(BPF_REG_1, 0), 3197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3198 BPF_FUNC_map_lookup_elem), 3199 BPF_MOV64_IMM(BPF_REG_0, 0), 3200 BPF_EXIT_INSN(), 3201 }, 3202 .fixup_map1 = { 1 }, 3203 .result = REJECT, 3204 .errstr = "invalid access to packet", 3205 .prog_type = BPF_PROG_TYPE_XDP, 3206 }, 3207 { 3208 "helper access to packet: test3, variable add", 3209 .insns = { 3210 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3211 offsetof(struct xdp_md, data)), 3212 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3213 offsetof(struct xdp_md, data_end)), 3214 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 3216 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), 3217 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), 3218 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3219 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), 3220 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 3221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), 3222 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), 3223 BPF_LD_MAP_FD(BPF_REG_1, 0), 3224 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), 3225 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3226 BPF_FUNC_map_lookup_elem), 3227 BPF_MOV64_IMM(BPF_REG_0, 0), 3228 BPF_EXIT_INSN(), 3229 }, 3230 .fixup_map1 = { 11 }, 3231 .result = ACCEPT, 3232 .prog_type = BPF_PROG_TYPE_XDP, 3233 }, 3234 { 3235 "helper access to packet: test4, packet_ptr with bad range", 3236 .insns = { 3237 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3238 offsetof(struct xdp_md, data)), 3239 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3240 offsetof(struct xdp_md, data_end)), 3241 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 3243 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), 3244 BPF_MOV64_IMM(BPF_REG_0, 0), 3245 BPF_EXIT_INSN(), 3246 BPF_LD_MAP_FD(BPF_REG_1, 0), 3247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3248 BPF_FUNC_map_lookup_elem), 3249 BPF_MOV64_IMM(BPF_REG_0, 0), 3250 BPF_EXIT_INSN(), 3251 }, 3252 .fixup_map1 = { 7 }, 3253 .result = REJECT, 3254 .errstr = "invalid access to packet", 3255 .prog_type = BPF_PROG_TYPE_XDP, 3256 }, 3257 { 3258 "helper access to packet: test5, packet_ptr with too short range", 3259 .insns = { 3260 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3261 offsetof(struct xdp_md, data)), 3262 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3263 offsetof(struct xdp_md, data_end)), 3264 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 3265 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), 3267 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), 3268 BPF_LD_MAP_FD(BPF_REG_1, 0), 3269 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3270 BPF_FUNC_map_lookup_elem), 3271 BPF_MOV64_IMM(BPF_REG_0, 0), 3272 BPF_EXIT_INSN(), 3273 }, 3274 .fixup_map1 = { 6 }, 3275 .result = REJECT, 3276 .errstr = "invalid access to packet", 3277 .prog_type = BPF_PROG_TYPE_XDP, 3278 }, 3279 { 3280 "helper access to packet: test6, cls valid packet_ptr range", 3281 .insns = { 3282 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3283 offsetof(struct __sk_buff, data)), 3284 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3285 offsetof(struct __sk_buff, data_end)), 3286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 3287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 3288 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), 3289 BPF_LD_MAP_FD(BPF_REG_1, 0), 3290 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), 3291 BPF_MOV64_IMM(BPF_REG_4, 0), 3292 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3293 BPF_FUNC_map_update_elem), 3294 BPF_MOV64_IMM(BPF_REG_0, 0), 3295 BPF_EXIT_INSN(), 3296 }, 3297 .fixup_map1 = { 5 }, 3298 .result = ACCEPT, 3299 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3300 }, 3301 { 3302 "helper access to packet: test7, cls unchecked packet_ptr", 3303 .insns = { 3304 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3305 offsetof(struct __sk_buff, data)), 3306 BPF_LD_MAP_FD(BPF_REG_1, 0), 3307 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3308 BPF_FUNC_map_lookup_elem), 3309 BPF_MOV64_IMM(BPF_REG_0, 0), 3310 BPF_EXIT_INSN(), 3311 }, 3312 .fixup_map1 = { 1 }, 3313 .result = REJECT, 3314 .errstr = "invalid access to packet", 3315 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3316 }, 3317 { 3318 "helper access to packet: test8, cls variable add", 3319 .insns = { 3320 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3321 offsetof(struct __sk_buff, data)), 3322 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3323 offsetof(struct __sk_buff, data_end)), 3324 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 3326 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), 3327 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), 3328 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3329 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), 3330 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), 3331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), 3332 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), 3333 BPF_LD_MAP_FD(BPF_REG_1, 0), 3334 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), 3335 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3336 BPF_FUNC_map_lookup_elem), 3337 BPF_MOV64_IMM(BPF_REG_0, 0), 3338 BPF_EXIT_INSN(), 3339 }, 3340 .fixup_map1 = { 11 }, 3341 .result = ACCEPT, 3342 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3343 }, 3344 { 3345 "helper access to packet: test9, cls packet_ptr with bad range", 3346 .insns = { 3347 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3348 offsetof(struct __sk_buff, data)), 3349 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3350 offsetof(struct __sk_buff, data_end)), 3351 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), 3353 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), 3354 BPF_MOV64_IMM(BPF_REG_0, 0), 3355 BPF_EXIT_INSN(), 3356 BPF_LD_MAP_FD(BPF_REG_1, 0), 3357 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3358 BPF_FUNC_map_lookup_elem), 3359 BPF_MOV64_IMM(BPF_REG_0, 0), 3360 BPF_EXIT_INSN(), 3361 }, 3362 .fixup_map1 = { 7 }, 3363 .result = REJECT, 3364 .errstr = "invalid access to packet", 3365 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3366 }, 3367 { 3368 "helper access to packet: test10, cls packet_ptr with too short range", 3369 .insns = { 3370 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 3371 offsetof(struct __sk_buff, data)), 3372 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 3373 offsetof(struct __sk_buff, data_end)), 3374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 3375 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 3376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), 3377 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), 3378 BPF_LD_MAP_FD(BPF_REG_1, 0), 3379 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3380 BPF_FUNC_map_lookup_elem), 3381 BPF_MOV64_IMM(BPF_REG_0, 0), 3382 BPF_EXIT_INSN(), 3383 }, 3384 .fixup_map1 = { 6 }, 3385 .result = REJECT, 3386 .errstr = "invalid access to packet", 3387 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3388 }, 3389 { 3390 "helper access to packet: test11, cls unsuitable helper 1", 3391 .insns = { 3392 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3393 offsetof(struct __sk_buff, data)), 3394 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3395 offsetof(struct __sk_buff, data_end)), 3396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3397 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 3398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7), 3399 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4), 3400 BPF_MOV64_IMM(BPF_REG_2, 0), 3401 BPF_MOV64_IMM(BPF_REG_4, 42), 3402 BPF_MOV64_IMM(BPF_REG_5, 0), 3403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3404 BPF_FUNC_skb_store_bytes), 3405 BPF_MOV64_IMM(BPF_REG_0, 0), 3406 BPF_EXIT_INSN(), 3407 }, 3408 .result = REJECT, 3409 .errstr = "helper access to the packet", 3410 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3411 }, 3412 { 3413 "helper access to packet: test12, cls unsuitable helper 2", 3414 .insns = { 3415 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3416 offsetof(struct __sk_buff, data)), 3417 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3418 offsetof(struct __sk_buff, data_end)), 3419 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 3420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), 3421 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3), 3422 BPF_MOV64_IMM(BPF_REG_2, 0), 3423 BPF_MOV64_IMM(BPF_REG_4, 4), 3424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3425 BPF_FUNC_skb_load_bytes), 3426 BPF_MOV64_IMM(BPF_REG_0, 0), 3427 BPF_EXIT_INSN(), 3428 }, 3429 .result = REJECT, 3430 .errstr = "helper access to the packet", 3431 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3432 }, 3433 { 3434 "helper access to packet: test13, cls helper ok", 3435 .insns = { 3436 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3437 offsetof(struct __sk_buff, data)), 3438 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3439 offsetof(struct __sk_buff, data_end)), 3440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3443 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3444 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3445 BPF_MOV64_IMM(BPF_REG_2, 4), 3446 BPF_MOV64_IMM(BPF_REG_3, 0), 3447 BPF_MOV64_IMM(BPF_REG_4, 0), 3448 BPF_MOV64_IMM(BPF_REG_5, 0), 3449 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3450 BPF_FUNC_csum_diff), 3451 BPF_MOV64_IMM(BPF_REG_0, 0), 3452 BPF_EXIT_INSN(), 3453 }, 3454 .result = ACCEPT, 3455 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3456 }, 3457 { 3458 "helper access to packet: test14, cls helper ok sub", 3459 .insns = { 3460 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3461 offsetof(struct __sk_buff, data)), 3462 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3463 offsetof(struct __sk_buff, data_end)), 3464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3465 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3466 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3467 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3468 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4), 3469 BPF_MOV64_IMM(BPF_REG_2, 4), 3470 BPF_MOV64_IMM(BPF_REG_3, 0), 3471 BPF_MOV64_IMM(BPF_REG_4, 0), 3472 BPF_MOV64_IMM(BPF_REG_5, 0), 3473 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3474 BPF_FUNC_csum_diff), 3475 BPF_MOV64_IMM(BPF_REG_0, 0), 3476 BPF_EXIT_INSN(), 3477 }, 3478 .result = ACCEPT, 3479 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3480 }, 3481 { 3482 "helper access to packet: test15, cls helper fail sub", 3483 .insns = { 3484 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3485 offsetof(struct __sk_buff, data)), 3486 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3487 offsetof(struct __sk_buff, data_end)), 3488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3489 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3491 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3492 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12), 3493 BPF_MOV64_IMM(BPF_REG_2, 4), 3494 BPF_MOV64_IMM(BPF_REG_3, 0), 3495 BPF_MOV64_IMM(BPF_REG_4, 0), 3496 BPF_MOV64_IMM(BPF_REG_5, 0), 3497 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3498 BPF_FUNC_csum_diff), 3499 BPF_MOV64_IMM(BPF_REG_0, 0), 3500 BPF_EXIT_INSN(), 3501 }, 3502 .result = REJECT, 3503 .errstr = "invalid access to packet", 3504 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3505 }, 3506 { 3507 "helper access to packet: test16, cls helper fail range 1", 3508 .insns = { 3509 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3510 offsetof(struct __sk_buff, data)), 3511 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3512 offsetof(struct __sk_buff, data_end)), 3513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3514 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3516 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3517 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3518 BPF_MOV64_IMM(BPF_REG_2, 8), 3519 BPF_MOV64_IMM(BPF_REG_3, 0), 3520 BPF_MOV64_IMM(BPF_REG_4, 0), 3521 BPF_MOV64_IMM(BPF_REG_5, 0), 3522 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3523 BPF_FUNC_csum_diff), 3524 BPF_MOV64_IMM(BPF_REG_0, 0), 3525 BPF_EXIT_INSN(), 3526 }, 3527 .result = REJECT, 3528 .errstr = "invalid access to packet", 3529 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3530 }, 3531 { 3532 "helper access to packet: test17, cls helper fail range 2", 3533 .insns = { 3534 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3535 offsetof(struct __sk_buff, data)), 3536 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3537 offsetof(struct __sk_buff, data_end)), 3538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3539 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3540 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3541 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3542 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3543 BPF_MOV64_IMM(BPF_REG_2, -9), 3544 BPF_MOV64_IMM(BPF_REG_3, 0), 3545 BPF_MOV64_IMM(BPF_REG_4, 0), 3546 BPF_MOV64_IMM(BPF_REG_5, 0), 3547 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3548 BPF_FUNC_csum_diff), 3549 BPF_MOV64_IMM(BPF_REG_0, 0), 3550 BPF_EXIT_INSN(), 3551 }, 3552 .result = REJECT, 3553 .errstr = "R2 min value is negative", 3554 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3555 }, 3556 { 3557 "helper access to packet: test18, cls helper fail range 3", 3558 .insns = { 3559 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3560 offsetof(struct __sk_buff, data)), 3561 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3562 offsetof(struct __sk_buff, data_end)), 3563 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3564 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3566 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3567 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3568 BPF_MOV64_IMM(BPF_REG_2, ~0), 3569 BPF_MOV64_IMM(BPF_REG_3, 0), 3570 BPF_MOV64_IMM(BPF_REG_4, 0), 3571 BPF_MOV64_IMM(BPF_REG_5, 0), 3572 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3573 BPF_FUNC_csum_diff), 3574 BPF_MOV64_IMM(BPF_REG_0, 0), 3575 BPF_EXIT_INSN(), 3576 }, 3577 .result = REJECT, 3578 .errstr = "R2 min value is negative", 3579 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3580 }, 3581 { 3582 "helper access to packet: test19, cls helper range zero", 3583 .insns = { 3584 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3585 offsetof(struct __sk_buff, data)), 3586 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3587 offsetof(struct __sk_buff, data_end)), 3588 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3591 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3592 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3593 BPF_MOV64_IMM(BPF_REG_2, 0), 3594 BPF_MOV64_IMM(BPF_REG_3, 0), 3595 BPF_MOV64_IMM(BPF_REG_4, 0), 3596 BPF_MOV64_IMM(BPF_REG_5, 0), 3597 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3598 BPF_FUNC_csum_diff), 3599 BPF_MOV64_IMM(BPF_REG_0, 0), 3600 BPF_EXIT_INSN(), 3601 }, 3602 .result = ACCEPT, 3603 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3604 }, 3605 { 3606 "helper access to packet: test20, pkt end as input", 3607 .insns = { 3608 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3609 offsetof(struct __sk_buff, data)), 3610 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3611 offsetof(struct __sk_buff, data_end)), 3612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3613 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3615 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3616 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 3617 BPF_MOV64_IMM(BPF_REG_2, 4), 3618 BPF_MOV64_IMM(BPF_REG_3, 0), 3619 BPF_MOV64_IMM(BPF_REG_4, 0), 3620 BPF_MOV64_IMM(BPF_REG_5, 0), 3621 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3622 BPF_FUNC_csum_diff), 3623 BPF_MOV64_IMM(BPF_REG_0, 0), 3624 BPF_EXIT_INSN(), 3625 }, 3626 .result = REJECT, 3627 .errstr = "R1 type=pkt_end expected=fp", 3628 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3629 }, 3630 { 3631 "helper access to packet: test21, wrong reg", 3632 .insns = { 3633 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3634 offsetof(struct __sk_buff, data)), 3635 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 3636 offsetof(struct __sk_buff, data_end)), 3637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), 3638 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 3639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), 3640 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), 3641 BPF_MOV64_IMM(BPF_REG_2, 4), 3642 BPF_MOV64_IMM(BPF_REG_3, 0), 3643 BPF_MOV64_IMM(BPF_REG_4, 0), 3644 BPF_MOV64_IMM(BPF_REG_5, 0), 3645 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3646 BPF_FUNC_csum_diff), 3647 BPF_MOV64_IMM(BPF_REG_0, 0), 3648 BPF_EXIT_INSN(), 3649 }, 3650 .result = REJECT, 3651 .errstr = "invalid access to packet", 3652 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3653 }, 3654 { 3655 "valid map access into an array with a constant", 3656 .insns = { 3657 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3658 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3659 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3660 BPF_LD_MAP_FD(BPF_REG_1, 0), 3661 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3662 BPF_FUNC_map_lookup_elem), 3663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3664 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3665 offsetof(struct test_val, foo)), 3666 BPF_EXIT_INSN(), 3667 }, 3668 .fixup_map2 = { 3 }, 3669 .errstr_unpriv = "R0 leaks addr", 3670 .result_unpriv = REJECT, 3671 .result = ACCEPT, 3672 }, 3673 { 3674 "valid map access into an array with a register", 3675 .insns = { 3676 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3677 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3678 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3679 BPF_LD_MAP_FD(BPF_REG_1, 0), 3680 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3681 BPF_FUNC_map_lookup_elem), 3682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 3683 BPF_MOV64_IMM(BPF_REG_1, 4), 3684 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3685 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3686 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3687 offsetof(struct test_val, foo)), 3688 BPF_EXIT_INSN(), 3689 }, 3690 .fixup_map2 = { 3 }, 3691 .errstr_unpriv = "R0 leaks addr", 3692 .result_unpriv = REJECT, 3693 .result = ACCEPT, 3694 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3695 }, 3696 { 3697 "valid map access into an array with a variable", 3698 .insns = { 3699 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3700 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3702 BPF_LD_MAP_FD(BPF_REG_1, 0), 3703 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3704 BPF_FUNC_map_lookup_elem), 3705 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 3706 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3707 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3), 3708 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3709 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3710 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3711 offsetof(struct test_val, foo)), 3712 BPF_EXIT_INSN(), 3713 }, 3714 .fixup_map2 = { 3 }, 3715 .errstr_unpriv = "R0 leaks addr", 3716 .result_unpriv = REJECT, 3717 .result = ACCEPT, 3718 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3719 }, 3720 { 3721 "valid map access into an array with a signed variable", 3722 .insns = { 3723 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3724 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3725 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3726 BPF_LD_MAP_FD(BPF_REG_1, 0), 3727 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3728 BPF_FUNC_map_lookup_elem), 3729 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 3730 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3731 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), 3732 BPF_MOV32_IMM(BPF_REG_1, 0), 3733 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), 3734 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), 3735 BPF_MOV32_IMM(BPF_REG_1, 0), 3736 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 3737 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3738 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3739 offsetof(struct test_val, foo)), 3740 BPF_EXIT_INSN(), 3741 }, 3742 .fixup_map2 = { 3 }, 3743 .errstr_unpriv = "R0 leaks addr", 3744 .result_unpriv = REJECT, 3745 .result = ACCEPT, 3746 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3747 }, 3748 { 3749 "invalid map access into an array with a constant", 3750 .insns = { 3751 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3752 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3754 BPF_LD_MAP_FD(BPF_REG_1, 0), 3755 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3756 BPF_FUNC_map_lookup_elem), 3757 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3758 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2, 3759 offsetof(struct test_val, foo)), 3760 BPF_EXIT_INSN(), 3761 }, 3762 .fixup_map2 = { 3 }, 3763 .errstr = "invalid access to map value, value_size=48 off=48 size=8", 3764 .result = REJECT, 3765 }, 3766 { 3767 "invalid map access into an array with a register", 3768 .insns = { 3769 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3770 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3772 BPF_LD_MAP_FD(BPF_REG_1, 0), 3773 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3774 BPF_FUNC_map_lookup_elem), 3775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 3776 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1), 3777 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3778 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3779 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3780 offsetof(struct test_val, foo)), 3781 BPF_EXIT_INSN(), 3782 }, 3783 .fixup_map2 = { 3 }, 3784 .errstr = "R0 min value is outside of the array range", 3785 .result = REJECT, 3786 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3787 }, 3788 { 3789 "invalid map access into an array with a variable", 3790 .insns = { 3791 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3792 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3794 BPF_LD_MAP_FD(BPF_REG_1, 0), 3795 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3796 BPF_FUNC_map_lookup_elem), 3797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 3798 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3799 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 3800 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3801 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3802 offsetof(struct test_val, foo)), 3803 BPF_EXIT_INSN(), 3804 }, 3805 .fixup_map2 = { 3 }, 3806 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map", 3807 .result = REJECT, 3808 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3809 }, 3810 { 3811 "invalid map access into an array with no floor check", 3812 .insns = { 3813 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3814 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3816 BPF_LD_MAP_FD(BPF_REG_1, 0), 3817 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3818 BPF_FUNC_map_lookup_elem), 3819 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 3820 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 3821 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), 3822 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), 3823 BPF_MOV32_IMM(BPF_REG_1, 0), 3824 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 3825 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3826 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3827 offsetof(struct test_val, foo)), 3828 BPF_EXIT_INSN(), 3829 }, 3830 .fixup_map2 = { 3 }, 3831 .errstr_unpriv = "R0 leaks addr", 3832 .errstr = "R0 unbounded memory access", 3833 .result_unpriv = REJECT, 3834 .result = REJECT, 3835 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3836 }, 3837 { 3838 "invalid map access into an array with a invalid max check", 3839 .insns = { 3840 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3841 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3842 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3843 BPF_LD_MAP_FD(BPF_REG_1, 0), 3844 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3845 BPF_FUNC_map_lookup_elem), 3846 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 3847 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 3848 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1), 3849 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), 3850 BPF_MOV32_IMM(BPF_REG_1, 0), 3851 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 3852 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 3853 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 3854 offsetof(struct test_val, foo)), 3855 BPF_EXIT_INSN(), 3856 }, 3857 .fixup_map2 = { 3 }, 3858 .errstr_unpriv = "R0 leaks addr", 3859 .errstr = "invalid access to map value, value_size=48 off=44 size=8", 3860 .result_unpriv = REJECT, 3861 .result = REJECT, 3862 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3863 }, 3864 { 3865 "invalid map access into an array with a invalid max check", 3866 .insns = { 3867 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3870 BPF_LD_MAP_FD(BPF_REG_1, 0), 3871 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3872 BPF_FUNC_map_lookup_elem), 3873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 3874 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 3875 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3876 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3878 BPF_LD_MAP_FD(BPF_REG_1, 0), 3879 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3880 BPF_FUNC_map_lookup_elem), 3881 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 3882 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), 3883 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3884 offsetof(struct test_val, foo)), 3885 BPF_EXIT_INSN(), 3886 }, 3887 .fixup_map2 = { 3, 11 }, 3888 .errstr_unpriv = "R0 pointer += pointer", 3889 .errstr = "R0 invalid mem access 'inv'", 3890 .result_unpriv = REJECT, 3891 .result = REJECT, 3892 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3893 }, 3894 { 3895 "multiple registers share map_lookup_elem result", 3896 .insns = { 3897 BPF_MOV64_IMM(BPF_REG_1, 10), 3898 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3899 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3901 BPF_LD_MAP_FD(BPF_REG_1, 0), 3902 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3903 BPF_FUNC_map_lookup_elem), 3904 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3905 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3906 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3907 BPF_EXIT_INSN(), 3908 }, 3909 .fixup_map1 = { 4 }, 3910 .result = ACCEPT, 3911 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3912 }, 3913 { 3914 "alu ops on ptr_to_map_value_or_null, 1", 3915 .insns = { 3916 BPF_MOV64_IMM(BPF_REG_1, 10), 3917 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3918 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3920 BPF_LD_MAP_FD(BPF_REG_1, 0), 3921 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3922 BPF_FUNC_map_lookup_elem), 3923 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3924 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), 3925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), 3926 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3927 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3928 BPF_EXIT_INSN(), 3929 }, 3930 .fixup_map1 = { 4 }, 3931 .errstr = "R4 invalid mem access", 3932 .result = REJECT, 3933 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3934 }, 3935 { 3936 "alu ops on ptr_to_map_value_or_null, 2", 3937 .insns = { 3938 BPF_MOV64_IMM(BPF_REG_1, 10), 3939 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3940 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3941 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3942 BPF_LD_MAP_FD(BPF_REG_1, 0), 3943 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3944 BPF_FUNC_map_lookup_elem), 3945 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3946 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), 3947 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3948 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3949 BPF_EXIT_INSN(), 3950 }, 3951 .fixup_map1 = { 4 }, 3952 .errstr = "R4 invalid mem access", 3953 .result = REJECT, 3954 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3955 }, 3956 { 3957 "alu ops on ptr_to_map_value_or_null, 3", 3958 .insns = { 3959 BPF_MOV64_IMM(BPF_REG_1, 10), 3960 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3961 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3962 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3963 BPF_LD_MAP_FD(BPF_REG_1, 0), 3964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3965 BPF_FUNC_map_lookup_elem), 3966 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3967 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), 3968 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3969 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3970 BPF_EXIT_INSN(), 3971 }, 3972 .fixup_map1 = { 4 }, 3973 .errstr = "R4 invalid mem access", 3974 .result = REJECT, 3975 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3976 }, 3977 { 3978 "invalid memory access with multiple map_lookup_elem calls", 3979 .insns = { 3980 BPF_MOV64_IMM(BPF_REG_1, 10), 3981 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 3982 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3983 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3984 BPF_LD_MAP_FD(BPF_REG_1, 0), 3985 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), 3986 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 3987 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3988 BPF_FUNC_map_lookup_elem), 3989 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 3990 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 3991 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 3992 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3993 BPF_FUNC_map_lookup_elem), 3994 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 3995 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 3996 BPF_EXIT_INSN(), 3997 }, 3998 .fixup_map1 = { 4 }, 3999 .result = REJECT, 4000 .errstr = "R4 !read_ok", 4001 .prog_type = BPF_PROG_TYPE_SCHED_CLS 4002 }, 4003 { 4004 "valid indirect map_lookup_elem access with 2nd lookup in branch", 4005 .insns = { 4006 BPF_MOV64_IMM(BPF_REG_1, 10), 4007 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), 4008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4010 BPF_LD_MAP_FD(BPF_REG_1, 0), 4011 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), 4012 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), 4013 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 4014 BPF_FUNC_map_lookup_elem), 4015 BPF_MOV64_IMM(BPF_REG_2, 10), 4016 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3), 4017 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), 4018 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), 4019 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 4020 BPF_FUNC_map_lookup_elem), 4021 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), 4022 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 4023 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), 4024 BPF_EXIT_INSN(), 4025 }, 4026 .fixup_map1 = { 4 }, 4027 .result = ACCEPT, 4028 .prog_type = BPF_PROG_TYPE_SCHED_CLS 4029 }, 4030 { 4031 "invalid map access from else condition", 4032 .insns = { 4033 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 4034 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4036 BPF_LD_MAP_FD(BPF_REG_1, 0), 4037 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 4038 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4039 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 4040 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1), 4041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), 4042 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 4043 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 4044 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 4045 BPF_EXIT_INSN(), 4046 }, 4047 .fixup_map2 = { 3 }, 4048 .errstr = "R0 unbounded memory access", 4049 .result = REJECT, 4050 .errstr_unpriv = "R0 leaks addr", 4051 .result_unpriv = REJECT, 4052 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 4053 }, 4054 { 4055 "constant register |= constant should keep constant type", 4056 .insns = { 4057 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), 4059 BPF_MOV64_IMM(BPF_REG_2, 34), 4060 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13), 4061 BPF_MOV64_IMM(BPF_REG_3, 0), 4062 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4063 BPF_EXIT_INSN(), 4064 }, 4065 .result = ACCEPT, 4066 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4067 }, 4068 { 4069 "constant register |= constant should not bypass stack boundary checks", 4070 .insns = { 4071 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), 4073 BPF_MOV64_IMM(BPF_REG_2, 34), 4074 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24), 4075 BPF_MOV64_IMM(BPF_REG_3, 0), 4076 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4077 BPF_EXIT_INSN(), 4078 }, 4079 .errstr = "invalid stack type R1 off=-48 access_size=58", 4080 .result = REJECT, 4081 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4082 }, 4083 { 4084 "constant register |= constant register should keep constant type", 4085 .insns = { 4086 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), 4088 BPF_MOV64_IMM(BPF_REG_2, 34), 4089 BPF_MOV64_IMM(BPF_REG_4, 13), 4090 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), 4091 BPF_MOV64_IMM(BPF_REG_3, 0), 4092 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4093 BPF_EXIT_INSN(), 4094 }, 4095 .result = ACCEPT, 4096 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4097 }, 4098 { 4099 "constant register |= constant register should not bypass stack boundary checks", 4100 .insns = { 4101 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 4102 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), 4103 BPF_MOV64_IMM(BPF_REG_2, 34), 4104 BPF_MOV64_IMM(BPF_REG_4, 24), 4105 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), 4106 BPF_MOV64_IMM(BPF_REG_3, 0), 4107 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4108 BPF_EXIT_INSN(), 4109 }, 4110 .errstr = "invalid stack type R1 off=-48 access_size=58", 4111 .result = REJECT, 4112 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4113 }, 4114 { 4115 "invalid direct packet write for LWT_IN", 4116 .insns = { 4117 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4118 offsetof(struct __sk_buff, data)), 4119 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 4120 offsetof(struct __sk_buff, data_end)), 4121 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 4122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 4123 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 4124 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 4125 BPF_MOV64_IMM(BPF_REG_0, 0), 4126 BPF_EXIT_INSN(), 4127 }, 4128 .errstr = "cannot write into packet", 4129 .result = REJECT, 4130 .prog_type = BPF_PROG_TYPE_LWT_IN, 4131 }, 4132 { 4133 "invalid direct packet write for LWT_OUT", 4134 .insns = { 4135 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4136 offsetof(struct __sk_buff, data)), 4137 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 4138 offsetof(struct __sk_buff, data_end)), 4139 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 4140 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 4141 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 4142 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 4143 BPF_MOV64_IMM(BPF_REG_0, 0), 4144 BPF_EXIT_INSN(), 4145 }, 4146 .errstr = "cannot write into packet", 4147 .result = REJECT, 4148 .prog_type = BPF_PROG_TYPE_LWT_OUT, 4149 }, 4150 { 4151 "direct packet write for LWT_XMIT", 4152 .insns = { 4153 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4154 offsetof(struct __sk_buff, data)), 4155 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 4156 offsetof(struct __sk_buff, data_end)), 4157 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 4158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 4159 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 4160 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 4161 BPF_MOV64_IMM(BPF_REG_0, 0), 4162 BPF_EXIT_INSN(), 4163 }, 4164 .result = ACCEPT, 4165 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 4166 }, 4167 { 4168 "direct packet read for LWT_IN", 4169 .insns = { 4170 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4171 offsetof(struct __sk_buff, data)), 4172 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 4173 offsetof(struct __sk_buff, data_end)), 4174 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 4175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 4176 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 4177 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 4178 BPF_MOV64_IMM(BPF_REG_0, 0), 4179 BPF_EXIT_INSN(), 4180 }, 4181 .result = ACCEPT, 4182 .prog_type = BPF_PROG_TYPE_LWT_IN, 4183 }, 4184 { 4185 "direct packet read for LWT_OUT", 4186 .insns = { 4187 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4188 offsetof(struct __sk_buff, data)), 4189 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 4190 offsetof(struct __sk_buff, data_end)), 4191 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 4192 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 4193 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 4194 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 4195 BPF_MOV64_IMM(BPF_REG_0, 0), 4196 BPF_EXIT_INSN(), 4197 }, 4198 .result = ACCEPT, 4199 .prog_type = BPF_PROG_TYPE_LWT_OUT, 4200 }, 4201 { 4202 "direct packet read for LWT_XMIT", 4203 .insns = { 4204 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4205 offsetof(struct __sk_buff, data)), 4206 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 4207 offsetof(struct __sk_buff, data_end)), 4208 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 4209 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 4210 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 4211 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 4212 BPF_MOV64_IMM(BPF_REG_0, 0), 4213 BPF_EXIT_INSN(), 4214 }, 4215 .result = ACCEPT, 4216 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 4217 }, 4218 { 4219 "overlapping checks for direct packet access", 4220 .insns = { 4221 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4222 offsetof(struct __sk_buff, data)), 4223 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 4224 offsetof(struct __sk_buff, data_end)), 4225 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 4226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 4227 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), 4228 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 4229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), 4230 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 4231 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), 4232 BPF_MOV64_IMM(BPF_REG_0, 0), 4233 BPF_EXIT_INSN(), 4234 }, 4235 .result = ACCEPT, 4236 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 4237 }, 4238 { 4239 "invalid access of tc_classid for LWT_IN", 4240 .insns = { 4241 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 4242 offsetof(struct __sk_buff, tc_classid)), 4243 BPF_EXIT_INSN(), 4244 }, 4245 .result = REJECT, 4246 .errstr = "invalid bpf_context access", 4247 }, 4248 { 4249 "invalid access of tc_classid for LWT_OUT", 4250 .insns = { 4251 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 4252 offsetof(struct __sk_buff, tc_classid)), 4253 BPF_EXIT_INSN(), 4254 }, 4255 .result = REJECT, 4256 .errstr = "invalid bpf_context access", 4257 }, 4258 { 4259 "invalid access of tc_classid for LWT_XMIT", 4260 .insns = { 4261 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 4262 offsetof(struct __sk_buff, tc_classid)), 4263 BPF_EXIT_INSN(), 4264 }, 4265 .result = REJECT, 4266 .errstr = "invalid bpf_context access", 4267 }, 4268 { 4269 "leak pointer into ctx 1", 4270 .insns = { 4271 BPF_MOV64_IMM(BPF_REG_0, 0), 4272 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 4273 offsetof(struct __sk_buff, cb[0])), 4274 BPF_LD_MAP_FD(BPF_REG_2, 0), 4275 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2, 4276 offsetof(struct __sk_buff, cb[0])), 4277 BPF_EXIT_INSN(), 4278 }, 4279 .fixup_map1 = { 2 }, 4280 .errstr_unpriv = "R2 leaks addr into mem", 4281 .result_unpriv = REJECT, 4282 .result = ACCEPT, 4283 }, 4284 { 4285 "leak pointer into ctx 2", 4286 .insns = { 4287 BPF_MOV64_IMM(BPF_REG_0, 0), 4288 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 4289 offsetof(struct __sk_buff, cb[0])), 4290 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10, 4291 offsetof(struct __sk_buff, cb[0])), 4292 BPF_EXIT_INSN(), 4293 }, 4294 .errstr_unpriv = "R10 leaks addr into mem", 4295 .result_unpriv = REJECT, 4296 .result = ACCEPT, 4297 }, 4298 { 4299 "leak pointer into ctx 3", 4300 .insns = { 4301 BPF_MOV64_IMM(BPF_REG_0, 0), 4302 BPF_LD_MAP_FD(BPF_REG_2, 0), 4303 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 4304 offsetof(struct __sk_buff, cb[0])), 4305 BPF_EXIT_INSN(), 4306 }, 4307 .fixup_map1 = { 1 }, 4308 .errstr_unpriv = "R2 leaks addr into ctx", 4309 .result_unpriv = REJECT, 4310 .result = ACCEPT, 4311 }, 4312 { 4313 "leak pointer into map val", 4314 .insns = { 4315 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 4316 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 4317 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4319 BPF_LD_MAP_FD(BPF_REG_1, 0), 4320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 4321 BPF_FUNC_map_lookup_elem), 4322 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 4323 BPF_MOV64_IMM(BPF_REG_3, 0), 4324 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 4325 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 4326 BPF_MOV64_IMM(BPF_REG_0, 0), 4327 BPF_EXIT_INSN(), 4328 }, 4329 .fixup_map1 = { 4 }, 4330 .errstr_unpriv = "R6 leaks addr into mem", 4331 .result_unpriv = REJECT, 4332 .result = ACCEPT, 4333 }, 4334 { 4335 "helper access to map: full range", 4336 .insns = { 4337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4339 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4340 BPF_LD_MAP_FD(BPF_REG_1, 0), 4341 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4342 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4343 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4344 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 4345 BPF_MOV64_IMM(BPF_REG_3, 0), 4346 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4347 BPF_EXIT_INSN(), 4348 }, 4349 .fixup_map2 = { 3 }, 4350 .result = ACCEPT, 4351 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4352 }, 4353 { 4354 "helper access to map: partial range", 4355 .insns = { 4356 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4358 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4359 BPF_LD_MAP_FD(BPF_REG_1, 0), 4360 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4361 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4362 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4363 BPF_MOV64_IMM(BPF_REG_2, 8), 4364 BPF_MOV64_IMM(BPF_REG_3, 0), 4365 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4366 BPF_EXIT_INSN(), 4367 }, 4368 .fixup_map2 = { 3 }, 4369 .result = ACCEPT, 4370 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4371 }, 4372 { 4373 "helper access to map: empty range", 4374 .insns = { 4375 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4377 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4378 BPF_LD_MAP_FD(BPF_REG_1, 0), 4379 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4380 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4381 BPF_MOV64_IMM(BPF_REG_1, 0), 4382 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 4383 BPF_MOV64_IMM(BPF_REG_3, 0), 4384 BPF_EMIT_CALL(BPF_FUNC_probe_write_user), 4385 BPF_EXIT_INSN(), 4386 }, 4387 .fixup_map2 = { 3 }, 4388 .errstr = "invalid access to map value, value_size=48 off=0 size=0", 4389 .result = REJECT, 4390 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4391 }, 4392 { 4393 "helper access to map: out-of-bound range", 4394 .insns = { 4395 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4397 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4398 BPF_LD_MAP_FD(BPF_REG_1, 0), 4399 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4400 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4401 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4402 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8), 4403 BPF_MOV64_IMM(BPF_REG_3, 0), 4404 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4405 BPF_EXIT_INSN(), 4406 }, 4407 .fixup_map2 = { 3 }, 4408 .errstr = "invalid access to map value, value_size=48 off=0 size=56", 4409 .result = REJECT, 4410 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4411 }, 4412 { 4413 "helper access to map: negative range", 4414 .insns = { 4415 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4416 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4417 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4418 BPF_LD_MAP_FD(BPF_REG_1, 0), 4419 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4420 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4421 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4422 BPF_MOV64_IMM(BPF_REG_2, -8), 4423 BPF_MOV64_IMM(BPF_REG_3, 0), 4424 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4425 BPF_EXIT_INSN(), 4426 }, 4427 .fixup_map2 = { 3 }, 4428 .errstr = "R2 min value is negative", 4429 .result = REJECT, 4430 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4431 }, 4432 { 4433 "helper access to adjusted map (via const imm): full range", 4434 .insns = { 4435 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4436 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4437 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4438 BPF_LD_MAP_FD(BPF_REG_1, 0), 4439 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4440 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4442 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4443 offsetof(struct test_val, foo)), 4444 BPF_MOV64_IMM(BPF_REG_2, 4445 sizeof(struct test_val) - 4446 offsetof(struct test_val, foo)), 4447 BPF_MOV64_IMM(BPF_REG_3, 0), 4448 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4449 BPF_EXIT_INSN(), 4450 }, 4451 .fixup_map2 = { 3 }, 4452 .result = ACCEPT, 4453 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4454 }, 4455 { 4456 "helper access to adjusted map (via const imm): partial range", 4457 .insns = { 4458 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4460 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4461 BPF_LD_MAP_FD(BPF_REG_1, 0), 4462 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4463 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4464 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4466 offsetof(struct test_val, foo)), 4467 BPF_MOV64_IMM(BPF_REG_2, 8), 4468 BPF_MOV64_IMM(BPF_REG_3, 0), 4469 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4470 BPF_EXIT_INSN(), 4471 }, 4472 .fixup_map2 = { 3 }, 4473 .result = ACCEPT, 4474 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4475 }, 4476 { 4477 "helper access to adjusted map (via const imm): empty range", 4478 .insns = { 4479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4481 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4482 BPF_LD_MAP_FD(BPF_REG_1, 0), 4483 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4485 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4487 offsetof(struct test_val, foo)), 4488 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4489 BPF_MOV64_IMM(BPF_REG_1, 0), 4490 BPF_MOV64_IMM(BPF_REG_3, 0), 4491 BPF_EMIT_CALL(BPF_FUNC_probe_write_user), 4492 BPF_EXIT_INSN(), 4493 }, 4494 .fixup_map2 = { 3 }, 4495 .errstr = "invalid access to map value, value_size=48 off=4 size=0", 4496 .result = REJECT, 4497 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4498 }, 4499 { 4500 "helper access to adjusted map (via const imm): out-of-bound range", 4501 .insns = { 4502 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4503 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4504 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4505 BPF_LD_MAP_FD(BPF_REG_1, 0), 4506 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4507 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4508 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4510 offsetof(struct test_val, foo)), 4511 BPF_MOV64_IMM(BPF_REG_2, 4512 sizeof(struct test_val) - 4513 offsetof(struct test_val, foo) + 8), 4514 BPF_MOV64_IMM(BPF_REG_3, 0), 4515 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4516 BPF_EXIT_INSN(), 4517 }, 4518 .fixup_map2 = { 3 }, 4519 .errstr = "invalid access to map value, value_size=48 off=4 size=52", 4520 .result = REJECT, 4521 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4522 }, 4523 { 4524 "helper access to adjusted map (via const imm): negative range (> adjustment)", 4525 .insns = { 4526 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4527 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4528 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4529 BPF_LD_MAP_FD(BPF_REG_1, 0), 4530 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4531 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4532 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4533 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4534 offsetof(struct test_val, foo)), 4535 BPF_MOV64_IMM(BPF_REG_2, -8), 4536 BPF_MOV64_IMM(BPF_REG_3, 0), 4537 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4538 BPF_EXIT_INSN(), 4539 }, 4540 .fixup_map2 = { 3 }, 4541 .errstr = "R2 min value is negative", 4542 .result = REJECT, 4543 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4544 }, 4545 { 4546 "helper access to adjusted map (via const imm): negative range (< adjustment)", 4547 .insns = { 4548 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4549 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4550 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4551 BPF_LD_MAP_FD(BPF_REG_1, 0), 4552 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4553 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4554 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4555 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4556 offsetof(struct test_val, foo)), 4557 BPF_MOV64_IMM(BPF_REG_2, -1), 4558 BPF_MOV64_IMM(BPF_REG_3, 0), 4559 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4560 BPF_EXIT_INSN(), 4561 }, 4562 .fixup_map2 = { 3 }, 4563 .errstr = "R2 min value is negative", 4564 .result = REJECT, 4565 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4566 }, 4567 { 4568 "helper access to adjusted map (via const reg): full range", 4569 .insns = { 4570 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4571 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4572 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4573 BPF_LD_MAP_FD(BPF_REG_1, 0), 4574 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4575 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4576 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4577 BPF_MOV64_IMM(BPF_REG_3, 4578 offsetof(struct test_val, foo)), 4579 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4580 BPF_MOV64_IMM(BPF_REG_2, 4581 sizeof(struct test_val) - 4582 offsetof(struct test_val, foo)), 4583 BPF_MOV64_IMM(BPF_REG_3, 0), 4584 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4585 BPF_EXIT_INSN(), 4586 }, 4587 .fixup_map2 = { 3 }, 4588 .result = ACCEPT, 4589 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4590 }, 4591 { 4592 "helper access to adjusted map (via const reg): partial range", 4593 .insns = { 4594 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4596 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4597 BPF_LD_MAP_FD(BPF_REG_1, 0), 4598 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4600 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4601 BPF_MOV64_IMM(BPF_REG_3, 4602 offsetof(struct test_val, foo)), 4603 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4604 BPF_MOV64_IMM(BPF_REG_2, 8), 4605 BPF_MOV64_IMM(BPF_REG_3, 0), 4606 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4607 BPF_EXIT_INSN(), 4608 }, 4609 .fixup_map2 = { 3 }, 4610 .result = ACCEPT, 4611 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4612 }, 4613 { 4614 "helper access to adjusted map (via const reg): empty range", 4615 .insns = { 4616 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4618 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4619 BPF_LD_MAP_FD(BPF_REG_1, 0), 4620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4623 BPF_MOV64_IMM(BPF_REG_3, 0), 4624 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4625 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4626 BPF_MOV64_IMM(BPF_REG_1, 0), 4627 BPF_MOV64_IMM(BPF_REG_3, 0), 4628 BPF_EMIT_CALL(BPF_FUNC_probe_write_user), 4629 BPF_EXIT_INSN(), 4630 }, 4631 .fixup_map2 = { 3 }, 4632 .errstr = "R2 min value is outside of the array range", 4633 .result = REJECT, 4634 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4635 }, 4636 { 4637 "helper access to adjusted map (via const reg): out-of-bound range", 4638 .insns = { 4639 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4640 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4641 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4642 BPF_LD_MAP_FD(BPF_REG_1, 0), 4643 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4644 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4645 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4646 BPF_MOV64_IMM(BPF_REG_3, 4647 offsetof(struct test_val, foo)), 4648 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4649 BPF_MOV64_IMM(BPF_REG_2, 4650 sizeof(struct test_val) - 4651 offsetof(struct test_val, foo) + 8), 4652 BPF_MOV64_IMM(BPF_REG_3, 0), 4653 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4654 BPF_EXIT_INSN(), 4655 }, 4656 .fixup_map2 = { 3 }, 4657 .errstr = "invalid access to map value, value_size=48 off=4 size=52", 4658 .result = REJECT, 4659 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4660 }, 4661 { 4662 "helper access to adjusted map (via const reg): negative range (> adjustment)", 4663 .insns = { 4664 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4665 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4666 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4667 BPF_LD_MAP_FD(BPF_REG_1, 0), 4668 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4670 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4671 BPF_MOV64_IMM(BPF_REG_3, 4672 offsetof(struct test_val, foo)), 4673 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4674 BPF_MOV64_IMM(BPF_REG_2, -8), 4675 BPF_MOV64_IMM(BPF_REG_3, 0), 4676 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4677 BPF_EXIT_INSN(), 4678 }, 4679 .fixup_map2 = { 3 }, 4680 .errstr = "R2 min value is negative", 4681 .result = REJECT, 4682 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4683 }, 4684 { 4685 "helper access to adjusted map (via const reg): negative range (< adjustment)", 4686 .insns = { 4687 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4688 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4689 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4690 BPF_LD_MAP_FD(BPF_REG_1, 0), 4691 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4692 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4693 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4694 BPF_MOV64_IMM(BPF_REG_3, 4695 offsetof(struct test_val, foo)), 4696 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4697 BPF_MOV64_IMM(BPF_REG_2, -1), 4698 BPF_MOV64_IMM(BPF_REG_3, 0), 4699 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4700 BPF_EXIT_INSN(), 4701 }, 4702 .fixup_map2 = { 3 }, 4703 .errstr = "R2 min value is negative", 4704 .result = REJECT, 4705 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4706 }, 4707 { 4708 "helper access to adjusted map (via variable): full range", 4709 .insns = { 4710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4712 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4713 BPF_LD_MAP_FD(BPF_REG_1, 0), 4714 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4715 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4718 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4719 offsetof(struct test_val, foo), 4), 4720 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4721 BPF_MOV64_IMM(BPF_REG_2, 4722 sizeof(struct test_val) - 4723 offsetof(struct test_val, foo)), 4724 BPF_MOV64_IMM(BPF_REG_3, 0), 4725 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4726 BPF_EXIT_INSN(), 4727 }, 4728 .fixup_map2 = { 3 }, 4729 .result = ACCEPT, 4730 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4731 }, 4732 { 4733 "helper access to adjusted map (via variable): partial range", 4734 .insns = { 4735 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4736 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4737 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4738 BPF_LD_MAP_FD(BPF_REG_1, 0), 4739 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4741 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4742 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4743 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4744 offsetof(struct test_val, foo), 4), 4745 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4746 BPF_MOV64_IMM(BPF_REG_2, 8), 4747 BPF_MOV64_IMM(BPF_REG_3, 0), 4748 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4749 BPF_EXIT_INSN(), 4750 }, 4751 .fixup_map2 = { 3 }, 4752 .result = ACCEPT, 4753 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4754 }, 4755 { 4756 "helper access to adjusted map (via variable): empty range", 4757 .insns = { 4758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4760 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4761 BPF_LD_MAP_FD(BPF_REG_1, 0), 4762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4763 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4765 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4766 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4767 offsetof(struct test_val, foo), 4), 4768 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4769 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4770 BPF_MOV64_IMM(BPF_REG_1, 0), 4771 BPF_MOV64_IMM(BPF_REG_3, 0), 4772 BPF_EMIT_CALL(BPF_FUNC_probe_write_user), 4773 BPF_EXIT_INSN(), 4774 }, 4775 .fixup_map2 = { 3 }, 4776 .errstr = "R2 min value is outside of the array range", 4777 .result = REJECT, 4778 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4779 }, 4780 { 4781 "helper access to adjusted map (via variable): no max check", 4782 .insns = { 4783 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4784 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4785 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4786 BPF_LD_MAP_FD(BPF_REG_1, 0), 4787 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4789 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4790 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4791 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4792 BPF_MOV64_IMM(BPF_REG_2, 1), 4793 BPF_MOV64_IMM(BPF_REG_3, 0), 4794 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4795 BPF_EXIT_INSN(), 4796 }, 4797 .fixup_map2 = { 3 }, 4798 .errstr = "R1 unbounded memory access", 4799 .result = REJECT, 4800 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4801 }, 4802 { 4803 "helper access to adjusted map (via variable): wrong max check", 4804 .insns = { 4805 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4806 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4807 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4808 BPF_LD_MAP_FD(BPF_REG_1, 0), 4809 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4811 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4812 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4813 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4814 offsetof(struct test_val, foo), 4), 4815 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4816 BPF_MOV64_IMM(BPF_REG_2, 4817 sizeof(struct test_val) - 4818 offsetof(struct test_val, foo) + 1), 4819 BPF_MOV64_IMM(BPF_REG_3, 0), 4820 BPF_EMIT_CALL(BPF_FUNC_probe_read), 4821 BPF_EXIT_INSN(), 4822 }, 4823 .fixup_map2 = { 3 }, 4824 .errstr = "invalid access to map value, value_size=48 off=4 size=45", 4825 .result = REJECT, 4826 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4827 }, 4828 { 4829 "helper access to map: bounds check using <, good access", 4830 .insns = { 4831 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4833 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4834 BPF_LD_MAP_FD(BPF_REG_1, 0), 4835 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4837 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4838 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4839 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2), 4840 BPF_MOV64_IMM(BPF_REG_0, 0), 4841 BPF_EXIT_INSN(), 4842 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4843 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 4844 BPF_MOV64_IMM(BPF_REG_0, 0), 4845 BPF_EXIT_INSN(), 4846 }, 4847 .fixup_map2 = { 3 }, 4848 .result = ACCEPT, 4849 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4850 }, 4851 { 4852 "helper access to map: bounds check using <, bad access", 4853 .insns = { 4854 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4855 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4856 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4857 BPF_LD_MAP_FD(BPF_REG_1, 0), 4858 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4859 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4860 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4861 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4862 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4), 4863 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4864 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 4865 BPF_MOV64_IMM(BPF_REG_0, 0), 4866 BPF_EXIT_INSN(), 4867 BPF_MOV64_IMM(BPF_REG_0, 0), 4868 BPF_EXIT_INSN(), 4869 }, 4870 .fixup_map2 = { 3 }, 4871 .result = REJECT, 4872 .errstr = "R1 unbounded memory access", 4873 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4874 }, 4875 { 4876 "helper access to map: bounds check using <=, good access", 4877 .insns = { 4878 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4880 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4881 BPF_LD_MAP_FD(BPF_REG_1, 0), 4882 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4883 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4884 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4885 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4886 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2), 4887 BPF_MOV64_IMM(BPF_REG_0, 0), 4888 BPF_EXIT_INSN(), 4889 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4890 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 4891 BPF_MOV64_IMM(BPF_REG_0, 0), 4892 BPF_EXIT_INSN(), 4893 }, 4894 .fixup_map2 = { 3 }, 4895 .result = ACCEPT, 4896 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4897 }, 4898 { 4899 "helper access to map: bounds check using <=, bad access", 4900 .insns = { 4901 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4902 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4903 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4904 BPF_LD_MAP_FD(BPF_REG_1, 0), 4905 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4906 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4907 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4908 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4909 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4), 4910 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4911 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 4912 BPF_MOV64_IMM(BPF_REG_0, 0), 4913 BPF_EXIT_INSN(), 4914 BPF_MOV64_IMM(BPF_REG_0, 0), 4915 BPF_EXIT_INSN(), 4916 }, 4917 .fixup_map2 = { 3 }, 4918 .result = REJECT, 4919 .errstr = "R1 unbounded memory access", 4920 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4921 }, 4922 { 4923 "helper access to map: bounds check using s<, good access", 4924 .insns = { 4925 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4927 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4928 BPF_LD_MAP_FD(BPF_REG_1, 0), 4929 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4930 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4931 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4932 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4933 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), 4934 BPF_MOV64_IMM(BPF_REG_0, 0), 4935 BPF_EXIT_INSN(), 4936 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3), 4937 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4938 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 4939 BPF_MOV64_IMM(BPF_REG_0, 0), 4940 BPF_EXIT_INSN(), 4941 }, 4942 .fixup_map2 = { 3 }, 4943 .result = ACCEPT, 4944 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4945 }, 4946 { 4947 "helper access to map: bounds check using s<, good access 2", 4948 .insns = { 4949 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4951 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4952 BPF_LD_MAP_FD(BPF_REG_1, 0), 4953 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4954 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4955 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4956 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4957 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), 4958 BPF_MOV64_IMM(BPF_REG_0, 0), 4959 BPF_EXIT_INSN(), 4960 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), 4961 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4962 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 4963 BPF_MOV64_IMM(BPF_REG_0, 0), 4964 BPF_EXIT_INSN(), 4965 }, 4966 .fixup_map2 = { 3 }, 4967 .result = ACCEPT, 4968 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4969 }, 4970 { 4971 "helper access to map: bounds check using s<, bad access", 4972 .insns = { 4973 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4974 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4975 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4976 BPF_LD_MAP_FD(BPF_REG_1, 0), 4977 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4978 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4979 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4980 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), 4981 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), 4982 BPF_MOV64_IMM(BPF_REG_0, 0), 4983 BPF_EXIT_INSN(), 4984 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), 4985 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4986 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 4987 BPF_MOV64_IMM(BPF_REG_0, 0), 4988 BPF_EXIT_INSN(), 4989 }, 4990 .fixup_map2 = { 3 }, 4991 .result = REJECT, 4992 .errstr = "R1 min value is negative", 4993 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4994 }, 4995 { 4996 "helper access to map: bounds check using s<=, good access", 4997 .insns = { 4998 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4999 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5000 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5001 BPF_LD_MAP_FD(BPF_REG_1, 0), 5002 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5003 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 5004 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5005 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 5006 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), 5007 BPF_MOV64_IMM(BPF_REG_0, 0), 5008 BPF_EXIT_INSN(), 5009 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3), 5010 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 5011 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 5012 BPF_MOV64_IMM(BPF_REG_0, 0), 5013 BPF_EXIT_INSN(), 5014 }, 5015 .fixup_map2 = { 3 }, 5016 .result = ACCEPT, 5017 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5018 }, 5019 { 5020 "helper access to map: bounds check using s<=, good access 2", 5021 .insns = { 5022 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5024 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5025 BPF_LD_MAP_FD(BPF_REG_1, 0), 5026 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5027 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 5028 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5029 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 5030 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), 5031 BPF_MOV64_IMM(BPF_REG_0, 0), 5032 BPF_EXIT_INSN(), 5033 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), 5034 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 5035 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 5036 BPF_MOV64_IMM(BPF_REG_0, 0), 5037 BPF_EXIT_INSN(), 5038 }, 5039 .fixup_map2 = { 3 }, 5040 .result = ACCEPT, 5041 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5042 }, 5043 { 5044 "helper access to map: bounds check using s<=, bad access", 5045 .insns = { 5046 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5047 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5048 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5049 BPF_LD_MAP_FD(BPF_REG_1, 0), 5050 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5051 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 5052 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5053 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), 5054 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), 5055 BPF_MOV64_IMM(BPF_REG_0, 0), 5056 BPF_EXIT_INSN(), 5057 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), 5058 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 5059 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), 5060 BPF_MOV64_IMM(BPF_REG_0, 0), 5061 BPF_EXIT_INSN(), 5062 }, 5063 .fixup_map2 = { 3 }, 5064 .result = REJECT, 5065 .errstr = "R1 min value is negative", 5066 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5067 }, 5068 { 5069 "map element value is preserved across register spilling", 5070 .insns = { 5071 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5073 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5074 BPF_LD_MAP_FD(BPF_REG_1, 0), 5075 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 5077 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 5078 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5079 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), 5080 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 5081 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), 5082 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), 5083 BPF_EXIT_INSN(), 5084 }, 5085 .fixup_map2 = { 3 }, 5086 .errstr_unpriv = "R0 leaks addr", 5087 .result = ACCEPT, 5088 .result_unpriv = REJECT, 5089 }, 5090 { 5091 "map element value or null is marked on register spilling", 5092 .insns = { 5093 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5095 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5096 BPF_LD_MAP_FD(BPF_REG_1, 0), 5097 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5098 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152), 5100 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 5101 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 5102 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), 5103 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), 5104 BPF_EXIT_INSN(), 5105 }, 5106 .fixup_map2 = { 3 }, 5107 .errstr_unpriv = "R0 leaks addr", 5108 .result = ACCEPT, 5109 .result_unpriv = REJECT, 5110 }, 5111 { 5112 "map element value store of cleared call register", 5113 .insns = { 5114 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5116 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5117 BPF_LD_MAP_FD(BPF_REG_1, 0), 5118 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5119 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), 5120 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 5121 BPF_EXIT_INSN(), 5122 }, 5123 .fixup_map2 = { 3 }, 5124 .errstr_unpriv = "R1 !read_ok", 5125 .errstr = "R1 !read_ok", 5126 .result = REJECT, 5127 .result_unpriv = REJECT, 5128 }, 5129 { 5130 "map element value with unaligned store", 5131 .insns = { 5132 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5133 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5134 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5135 BPF_LD_MAP_FD(BPF_REG_1, 0), 5136 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5137 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), 5138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), 5139 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 5140 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43), 5141 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44), 5142 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 5143 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32), 5144 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33), 5145 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34), 5146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5), 5147 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22), 5148 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23), 5149 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24), 5150 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), 5151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3), 5152 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22), 5153 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23), 5154 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24), 5155 BPF_EXIT_INSN(), 5156 }, 5157 .fixup_map2 = { 3 }, 5158 .errstr_unpriv = "R0 leaks addr", 5159 .result = ACCEPT, 5160 .result_unpriv = REJECT, 5161 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 5162 }, 5163 { 5164 "map element value with unaligned load", 5165 .insns = { 5166 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5167 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5168 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5169 BPF_LD_MAP_FD(BPF_REG_1, 0), 5170 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5171 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 5172 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 5173 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9), 5174 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), 5175 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 5176 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2), 5177 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 5178 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0), 5179 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2), 5180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5), 5181 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 5182 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4), 5183 BPF_EXIT_INSN(), 5184 }, 5185 .fixup_map2 = { 3 }, 5186 .errstr_unpriv = "R0 leaks addr", 5187 .result = ACCEPT, 5188 .result_unpriv = REJECT, 5189 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 5190 }, 5191 { 5192 "map element value illegal alu op, 1", 5193 .insns = { 5194 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5196 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5197 BPF_LD_MAP_FD(BPF_REG_1, 0), 5198 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 5200 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8), 5201 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 5202 BPF_EXIT_INSN(), 5203 }, 5204 .fixup_map2 = { 3 }, 5205 .errstr_unpriv = "R0 bitwise operator &= on pointer", 5206 .errstr = "invalid mem access 'inv'", 5207 .result = REJECT, 5208 .result_unpriv = REJECT, 5209 }, 5210 { 5211 "map element value illegal alu op, 2", 5212 .insns = { 5213 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5214 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5215 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5216 BPF_LD_MAP_FD(BPF_REG_1, 0), 5217 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5218 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 5219 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), 5220 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 5221 BPF_EXIT_INSN(), 5222 }, 5223 .fixup_map2 = { 3 }, 5224 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited", 5225 .errstr = "invalid mem access 'inv'", 5226 .result = REJECT, 5227 .result_unpriv = REJECT, 5228 }, 5229 { 5230 "map element value illegal alu op, 3", 5231 .insns = { 5232 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5233 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5234 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5235 BPF_LD_MAP_FD(BPF_REG_1, 0), 5236 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5237 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 5238 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42), 5239 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 5240 BPF_EXIT_INSN(), 5241 }, 5242 .fixup_map2 = { 3 }, 5243 .errstr_unpriv = "R0 pointer arithmetic with /= operator", 5244 .errstr = "invalid mem access 'inv'", 5245 .result = REJECT, 5246 .result_unpriv = REJECT, 5247 }, 5248 { 5249 "map element value illegal alu op, 4", 5250 .insns = { 5251 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5253 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5254 BPF_LD_MAP_FD(BPF_REG_1, 0), 5255 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5256 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 5257 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64), 5258 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 5259 BPF_EXIT_INSN(), 5260 }, 5261 .fixup_map2 = { 3 }, 5262 .errstr_unpriv = "R0 pointer arithmetic prohibited", 5263 .errstr = "invalid mem access 'inv'", 5264 .result = REJECT, 5265 .result_unpriv = REJECT, 5266 }, 5267 { 5268 "map element value illegal alu op, 5", 5269 .insns = { 5270 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5272 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5273 BPF_LD_MAP_FD(BPF_REG_1, 0), 5274 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5275 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 5276 BPF_MOV64_IMM(BPF_REG_3, 4096), 5277 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5279 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 5280 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0), 5281 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), 5282 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 5283 BPF_EXIT_INSN(), 5284 }, 5285 .fixup_map2 = { 3 }, 5286 .errstr = "R0 invalid mem access 'inv'", 5287 .result = REJECT, 5288 }, 5289 { 5290 "map element value is preserved across register spilling", 5291 .insns = { 5292 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5294 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5295 BPF_LD_MAP_FD(BPF_REG_1, 0), 5296 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5297 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 5298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5299 offsetof(struct test_val, foo)), 5300 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), 5301 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), 5303 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 5304 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), 5305 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), 5306 BPF_EXIT_INSN(), 5307 }, 5308 .fixup_map2 = { 3 }, 5309 .errstr_unpriv = "R0 leaks addr", 5310 .result = ACCEPT, 5311 .result_unpriv = REJECT, 5312 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 5313 }, 5314 { 5315 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", 5316 .insns = { 5317 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5318 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5319 BPF_MOV64_IMM(BPF_REG_0, 0), 5320 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 5321 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 5322 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 5323 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 5324 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 5325 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 5326 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 5327 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 5328 BPF_MOV64_IMM(BPF_REG_2, 16), 5329 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5330 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5331 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 5332 BPF_MOV64_IMM(BPF_REG_4, 0), 5333 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 5334 BPF_MOV64_IMM(BPF_REG_3, 0), 5335 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5336 BPF_MOV64_IMM(BPF_REG_0, 0), 5337 BPF_EXIT_INSN(), 5338 }, 5339 .result = ACCEPT, 5340 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5341 }, 5342 { 5343 "helper access to variable memory: stack, bitwise AND, zero included", 5344 .insns = { 5345 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5346 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5347 BPF_MOV64_IMM(BPF_REG_2, 16), 5348 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5349 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5350 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 5351 BPF_MOV64_IMM(BPF_REG_3, 0), 5352 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5353 BPF_EXIT_INSN(), 5354 }, 5355 .errstr = "invalid indirect read from stack off -64+0 size 64", 5356 .result = REJECT, 5357 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5358 }, 5359 { 5360 "helper access to variable memory: stack, bitwise AND + JMP, wrong max", 5361 .insns = { 5362 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5364 BPF_MOV64_IMM(BPF_REG_2, 16), 5365 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5366 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5367 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), 5368 BPF_MOV64_IMM(BPF_REG_4, 0), 5369 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 5370 BPF_MOV64_IMM(BPF_REG_3, 0), 5371 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5372 BPF_MOV64_IMM(BPF_REG_0, 0), 5373 BPF_EXIT_INSN(), 5374 }, 5375 .errstr = "invalid stack type R1 off=-64 access_size=65", 5376 .result = REJECT, 5377 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5378 }, 5379 { 5380 "helper access to variable memory: stack, JMP, correct bounds", 5381 .insns = { 5382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5384 BPF_MOV64_IMM(BPF_REG_0, 0), 5385 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 5386 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 5387 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 5388 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 5389 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 5390 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 5391 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 5392 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 5393 BPF_MOV64_IMM(BPF_REG_2, 16), 5394 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5395 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5396 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4), 5397 BPF_MOV64_IMM(BPF_REG_4, 0), 5398 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 5399 BPF_MOV64_IMM(BPF_REG_3, 0), 5400 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5401 BPF_MOV64_IMM(BPF_REG_0, 0), 5402 BPF_EXIT_INSN(), 5403 }, 5404 .result = ACCEPT, 5405 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5406 }, 5407 { 5408 "helper access to variable memory: stack, JMP (signed), correct bounds", 5409 .insns = { 5410 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5411 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5412 BPF_MOV64_IMM(BPF_REG_0, 0), 5413 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 5414 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 5415 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 5416 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 5417 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 5418 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 5419 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 5420 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 5421 BPF_MOV64_IMM(BPF_REG_2, 16), 5422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5423 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5424 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4), 5425 BPF_MOV64_IMM(BPF_REG_4, 0), 5426 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 5427 BPF_MOV64_IMM(BPF_REG_3, 0), 5428 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5429 BPF_MOV64_IMM(BPF_REG_0, 0), 5430 BPF_EXIT_INSN(), 5431 }, 5432 .result = ACCEPT, 5433 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5434 }, 5435 { 5436 "helper access to variable memory: stack, JMP, bounds + offset", 5437 .insns = { 5438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5440 BPF_MOV64_IMM(BPF_REG_2, 16), 5441 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5442 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5443 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), 5444 BPF_MOV64_IMM(BPF_REG_4, 0), 5445 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3), 5446 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 5447 BPF_MOV64_IMM(BPF_REG_3, 0), 5448 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5449 BPF_MOV64_IMM(BPF_REG_0, 0), 5450 BPF_EXIT_INSN(), 5451 }, 5452 .errstr = "invalid stack type R1 off=-64 access_size=65", 5453 .result = REJECT, 5454 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5455 }, 5456 { 5457 "helper access to variable memory: stack, JMP, wrong max", 5458 .insns = { 5459 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5461 BPF_MOV64_IMM(BPF_REG_2, 16), 5462 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5463 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5464 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), 5465 BPF_MOV64_IMM(BPF_REG_4, 0), 5466 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 5467 BPF_MOV64_IMM(BPF_REG_3, 0), 5468 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5469 BPF_MOV64_IMM(BPF_REG_0, 0), 5470 BPF_EXIT_INSN(), 5471 }, 5472 .errstr = "invalid stack type R1 off=-64 access_size=65", 5473 .result = REJECT, 5474 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5475 }, 5476 { 5477 "helper access to variable memory: stack, JMP, no max check", 5478 .insns = { 5479 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5481 BPF_MOV64_IMM(BPF_REG_2, 16), 5482 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5483 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5484 BPF_MOV64_IMM(BPF_REG_4, 0), 5485 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), 5486 BPF_MOV64_IMM(BPF_REG_3, 0), 5487 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5488 BPF_MOV64_IMM(BPF_REG_0, 0), 5489 BPF_EXIT_INSN(), 5490 }, 5491 /* because max wasn't checked, signed min is negative */ 5492 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'", 5493 .result = REJECT, 5494 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5495 }, 5496 { 5497 "helper access to variable memory: stack, JMP, no min check", 5498 .insns = { 5499 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5501 BPF_MOV64_IMM(BPF_REG_2, 16), 5502 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5503 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5504 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), 5505 BPF_MOV64_IMM(BPF_REG_3, 0), 5506 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5507 BPF_MOV64_IMM(BPF_REG_0, 0), 5508 BPF_EXIT_INSN(), 5509 }, 5510 .errstr = "invalid indirect read from stack off -64+0 size 64", 5511 .result = REJECT, 5512 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5513 }, 5514 { 5515 "helper access to variable memory: stack, JMP (signed), no min check", 5516 .insns = { 5517 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5519 BPF_MOV64_IMM(BPF_REG_2, 16), 5520 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), 5521 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), 5522 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), 5523 BPF_MOV64_IMM(BPF_REG_3, 0), 5524 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5525 BPF_MOV64_IMM(BPF_REG_0, 0), 5526 BPF_EXIT_INSN(), 5527 }, 5528 .errstr = "R2 min value is negative", 5529 .result = REJECT, 5530 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5531 }, 5532 { 5533 "helper access to variable memory: map, JMP, correct bounds", 5534 .insns = { 5535 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5537 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5538 BPF_LD_MAP_FD(BPF_REG_1, 0), 5539 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5540 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 5541 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5542 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 5543 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5544 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5545 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 5546 sizeof(struct test_val), 4), 5547 BPF_MOV64_IMM(BPF_REG_4, 0), 5548 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 5549 BPF_MOV64_IMM(BPF_REG_3, 0), 5550 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5551 BPF_MOV64_IMM(BPF_REG_0, 0), 5552 BPF_EXIT_INSN(), 5553 }, 5554 .fixup_map2 = { 3 }, 5555 .result = ACCEPT, 5556 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5557 }, 5558 { 5559 "helper access to variable memory: map, JMP, wrong max", 5560 .insns = { 5561 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5563 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5564 BPF_LD_MAP_FD(BPF_REG_1, 0), 5565 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5566 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 5567 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5568 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 5569 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5570 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5571 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 5572 sizeof(struct test_val) + 1, 4), 5573 BPF_MOV64_IMM(BPF_REG_4, 0), 5574 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 5575 BPF_MOV64_IMM(BPF_REG_3, 0), 5576 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5577 BPF_MOV64_IMM(BPF_REG_0, 0), 5578 BPF_EXIT_INSN(), 5579 }, 5580 .fixup_map2 = { 3 }, 5581 .errstr = "invalid access to map value, value_size=48 off=0 size=49", 5582 .result = REJECT, 5583 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5584 }, 5585 { 5586 "helper access to variable memory: map adjusted, JMP, correct bounds", 5587 .insns = { 5588 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5589 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5590 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5591 BPF_LD_MAP_FD(BPF_REG_1, 0), 5592 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 5594 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), 5596 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 5597 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5598 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5599 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 5600 sizeof(struct test_val) - 20, 4), 5601 BPF_MOV64_IMM(BPF_REG_4, 0), 5602 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 5603 BPF_MOV64_IMM(BPF_REG_3, 0), 5604 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5605 BPF_MOV64_IMM(BPF_REG_0, 0), 5606 BPF_EXIT_INSN(), 5607 }, 5608 .fixup_map2 = { 3 }, 5609 .result = ACCEPT, 5610 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5611 }, 5612 { 5613 "helper access to variable memory: map adjusted, JMP, wrong max", 5614 .insns = { 5615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5617 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 5618 BPF_LD_MAP_FD(BPF_REG_1, 0), 5619 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), 5621 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5622 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), 5623 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), 5624 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5625 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5626 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 5627 sizeof(struct test_val) - 19, 4), 5628 BPF_MOV64_IMM(BPF_REG_4, 0), 5629 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), 5630 BPF_MOV64_IMM(BPF_REG_3, 0), 5631 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5632 BPF_MOV64_IMM(BPF_REG_0, 0), 5633 BPF_EXIT_INSN(), 5634 }, 5635 .fixup_map2 = { 3 }, 5636 .errstr = "R1 min value is outside of the array range", 5637 .result = REJECT, 5638 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5639 }, 5640 { 5641 "helper access to variable memory: size = 0 allowed on NULL", 5642 .insns = { 5643 BPF_MOV64_IMM(BPF_REG_1, 0), 5644 BPF_MOV64_IMM(BPF_REG_2, 0), 5645 BPF_MOV64_IMM(BPF_REG_3, 0), 5646 BPF_MOV64_IMM(BPF_REG_4, 0), 5647 BPF_MOV64_IMM(BPF_REG_5, 0), 5648 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5649 BPF_EXIT_INSN(), 5650 }, 5651 .result = ACCEPT, 5652 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5653 }, 5654 { 5655 "helper access to variable memory: size > 0 not allowed on NULL", 5656 .insns = { 5657 BPF_MOV64_IMM(BPF_REG_1, 0), 5658 BPF_MOV64_IMM(BPF_REG_2, 0), 5659 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5660 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5661 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 5662 BPF_MOV64_IMM(BPF_REG_3, 0), 5663 BPF_MOV64_IMM(BPF_REG_4, 0), 5664 BPF_MOV64_IMM(BPF_REG_5, 0), 5665 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5666 BPF_EXIT_INSN(), 5667 }, 5668 .errstr = "R1 type=inv expected=fp", 5669 .result = REJECT, 5670 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5671 }, 5672 { 5673 "helper access to variable memory: size = 0 allowed on != NULL stack pointer", 5674 .insns = { 5675 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 5677 BPF_MOV64_IMM(BPF_REG_2, 0), 5678 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), 5679 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), 5680 BPF_MOV64_IMM(BPF_REG_3, 0), 5681 BPF_MOV64_IMM(BPF_REG_4, 0), 5682 BPF_MOV64_IMM(BPF_REG_5, 0), 5683 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5684 BPF_EXIT_INSN(), 5685 }, 5686 .result = ACCEPT, 5687 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5688 }, 5689 { 5690 "helper access to variable memory: size = 0 allowed on != NULL map pointer", 5691 .insns = { 5692 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5694 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5695 BPF_LD_MAP_FD(BPF_REG_1, 0), 5696 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5697 BPF_FUNC_map_lookup_elem), 5698 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 5699 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5700 BPF_MOV64_IMM(BPF_REG_2, 0), 5701 BPF_MOV64_IMM(BPF_REG_3, 0), 5702 BPF_MOV64_IMM(BPF_REG_4, 0), 5703 BPF_MOV64_IMM(BPF_REG_5, 0), 5704 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5705 BPF_EXIT_INSN(), 5706 }, 5707 .fixup_map1 = { 3 }, 5708 .result = ACCEPT, 5709 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5710 }, 5711 { 5712 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer", 5713 .insns = { 5714 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5715 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5716 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5717 BPF_LD_MAP_FD(BPF_REG_1, 0), 5718 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5719 BPF_FUNC_map_lookup_elem), 5720 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 5721 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 5722 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7), 5723 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5724 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 5725 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), 5726 BPF_MOV64_IMM(BPF_REG_3, 0), 5727 BPF_MOV64_IMM(BPF_REG_4, 0), 5728 BPF_MOV64_IMM(BPF_REG_5, 0), 5729 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5730 BPF_EXIT_INSN(), 5731 }, 5732 .fixup_map1 = { 3 }, 5733 .result = ACCEPT, 5734 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5735 }, 5736 { 5737 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer", 5738 .insns = { 5739 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5740 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5742 BPF_LD_MAP_FD(BPF_REG_1, 0), 5743 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5744 BPF_FUNC_map_lookup_elem), 5745 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 5746 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5747 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 5748 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), 5749 BPF_MOV64_IMM(BPF_REG_3, 0), 5750 BPF_MOV64_IMM(BPF_REG_4, 0), 5751 BPF_MOV64_IMM(BPF_REG_5, 0), 5752 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5753 BPF_EXIT_INSN(), 5754 }, 5755 .fixup_map1 = { 3 }, 5756 .result = ACCEPT, 5757 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5758 }, 5759 { 5760 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer", 5761 .insns = { 5762 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 5763 offsetof(struct __sk_buff, data)), 5764 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 5765 offsetof(struct __sk_buff, data_end)), 5766 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6), 5767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 5768 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), 5769 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 5770 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), 5771 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), 5772 BPF_MOV64_IMM(BPF_REG_3, 0), 5773 BPF_MOV64_IMM(BPF_REG_4, 0), 5774 BPF_MOV64_IMM(BPF_REG_5, 0), 5775 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5776 BPF_EXIT_INSN(), 5777 }, 5778 .result = ACCEPT, 5779 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5780 }, 5781 { 5782 "helper access to variable memory: 8 bytes leak", 5783 .insns = { 5784 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5786 BPF_MOV64_IMM(BPF_REG_0, 0), 5787 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 5788 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 5789 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 5790 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 5791 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 5792 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 5793 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 5794 BPF_MOV64_IMM(BPF_REG_2, 0), 5795 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5796 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5797 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), 5798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), 5799 BPF_MOV64_IMM(BPF_REG_3, 0), 5800 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5801 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5802 BPF_EXIT_INSN(), 5803 }, 5804 .errstr = "invalid indirect read from stack off -64+32 size 64", 5805 .result = REJECT, 5806 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5807 }, 5808 { 5809 "helper access to variable memory: 8 bytes no leak (init memory)", 5810 .insns = { 5811 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5812 BPF_MOV64_IMM(BPF_REG_0, 0), 5813 BPF_MOV64_IMM(BPF_REG_0, 0), 5814 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), 5815 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), 5816 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), 5817 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), 5818 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), 5819 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 5820 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 5821 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 5822 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), 5823 BPF_MOV64_IMM(BPF_REG_2, 0), 5824 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32), 5825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32), 5826 BPF_MOV64_IMM(BPF_REG_3, 0), 5827 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5828 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 5829 BPF_EXIT_INSN(), 5830 }, 5831 .result = ACCEPT, 5832 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5833 }, 5834 { 5835 "invalid and of negative number", 5836 .insns = { 5837 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5838 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5839 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5840 BPF_LD_MAP_FD(BPF_REG_1, 0), 5841 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5842 BPF_FUNC_map_lookup_elem), 5843 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 5844 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 5845 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4), 5846 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), 5847 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 5848 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 5849 offsetof(struct test_val, foo)), 5850 BPF_EXIT_INSN(), 5851 }, 5852 .fixup_map2 = { 3 }, 5853 .errstr = "R0 max value is outside of the array range", 5854 .result = REJECT, 5855 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 5856 }, 5857 { 5858 "invalid range check", 5859 .insns = { 5860 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5861 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5863 BPF_LD_MAP_FD(BPF_REG_1, 0), 5864 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5865 BPF_FUNC_map_lookup_elem), 5866 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12), 5867 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 5868 BPF_MOV64_IMM(BPF_REG_9, 1), 5869 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2), 5870 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), 5871 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1), 5872 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1), 5873 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1), 5874 BPF_MOV32_IMM(BPF_REG_3, 1), 5875 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9), 5876 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000), 5877 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), 5878 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), 5879 BPF_MOV64_REG(BPF_REG_0, 0), 5880 BPF_EXIT_INSN(), 5881 }, 5882 .fixup_map2 = { 3 }, 5883 .errstr = "R0 max value is outside of the array range", 5884 .result = REJECT, 5885 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 5886 }, 5887 { 5888 "map in map access", 5889 .insns = { 5890 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5891 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5893 BPF_LD_MAP_FD(BPF_REG_1, 0), 5894 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5895 BPF_FUNC_map_lookup_elem), 5896 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 5897 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5898 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5899 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5900 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5901 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5902 BPF_FUNC_map_lookup_elem), 5903 BPF_MOV64_REG(BPF_REG_0, 0), 5904 BPF_EXIT_INSN(), 5905 }, 5906 .fixup_map_in_map = { 3 }, 5907 .result = ACCEPT, 5908 }, 5909 { 5910 "invalid inner map pointer", 5911 .insns = { 5912 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5913 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5915 BPF_LD_MAP_FD(BPF_REG_1, 0), 5916 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5917 BPF_FUNC_map_lookup_elem), 5918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 5919 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5922 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 5924 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5925 BPF_FUNC_map_lookup_elem), 5926 BPF_MOV64_REG(BPF_REG_0, 0), 5927 BPF_EXIT_INSN(), 5928 }, 5929 .fixup_map_in_map = { 3 }, 5930 .errstr = "R1 type=inv expected=map_ptr", 5931 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited", 5932 .result = REJECT, 5933 }, 5934 { 5935 "forgot null checking on the inner map pointer", 5936 .insns = { 5937 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5940 BPF_LD_MAP_FD(BPF_REG_1, 0), 5941 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5942 BPF_FUNC_map_lookup_elem), 5943 BPF_ST_MEM(0, BPF_REG_10, -4, 0), 5944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), 5946 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5947 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 5948 BPF_FUNC_map_lookup_elem), 5949 BPF_MOV64_REG(BPF_REG_0, 0), 5950 BPF_EXIT_INSN(), 5951 }, 5952 .fixup_map_in_map = { 3 }, 5953 .errstr = "R1 type=map_value_or_null expected=map_ptr", 5954 .result = REJECT, 5955 }, 5956 { 5957 "ld_abs: check calling conv, r1", 5958 .insns = { 5959 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5960 BPF_MOV64_IMM(BPF_REG_1, 0), 5961 BPF_LD_ABS(BPF_W, -0x200000), 5962 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 5963 BPF_EXIT_INSN(), 5964 }, 5965 .errstr = "R1 !read_ok", 5966 .result = REJECT, 5967 }, 5968 { 5969 "ld_abs: check calling conv, r2", 5970 .insns = { 5971 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5972 BPF_MOV64_IMM(BPF_REG_2, 0), 5973 BPF_LD_ABS(BPF_W, -0x200000), 5974 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 5975 BPF_EXIT_INSN(), 5976 }, 5977 .errstr = "R2 !read_ok", 5978 .result = REJECT, 5979 }, 5980 { 5981 "ld_abs: check calling conv, r3", 5982 .insns = { 5983 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5984 BPF_MOV64_IMM(BPF_REG_3, 0), 5985 BPF_LD_ABS(BPF_W, -0x200000), 5986 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 5987 BPF_EXIT_INSN(), 5988 }, 5989 .errstr = "R3 !read_ok", 5990 .result = REJECT, 5991 }, 5992 { 5993 "ld_abs: check calling conv, r4", 5994 .insns = { 5995 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 5996 BPF_MOV64_IMM(BPF_REG_4, 0), 5997 BPF_LD_ABS(BPF_W, -0x200000), 5998 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), 5999 BPF_EXIT_INSN(), 6000 }, 6001 .errstr = "R4 !read_ok", 6002 .result = REJECT, 6003 }, 6004 { 6005 "ld_abs: check calling conv, r5", 6006 .insns = { 6007 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6008 BPF_MOV64_IMM(BPF_REG_5, 0), 6009 BPF_LD_ABS(BPF_W, -0x200000), 6010 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 6011 BPF_EXIT_INSN(), 6012 }, 6013 .errstr = "R5 !read_ok", 6014 .result = REJECT, 6015 }, 6016 { 6017 "ld_abs: check calling conv, r7", 6018 .insns = { 6019 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6020 BPF_MOV64_IMM(BPF_REG_7, 0), 6021 BPF_LD_ABS(BPF_W, -0x200000), 6022 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 6023 BPF_EXIT_INSN(), 6024 }, 6025 .result = ACCEPT, 6026 }, 6027 { 6028 "ld_ind: check calling conv, r1", 6029 .insns = { 6030 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6031 BPF_MOV64_IMM(BPF_REG_1, 1), 6032 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000), 6033 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 6034 BPF_EXIT_INSN(), 6035 }, 6036 .errstr = "R1 !read_ok", 6037 .result = REJECT, 6038 }, 6039 { 6040 "ld_ind: check calling conv, r2", 6041 .insns = { 6042 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6043 BPF_MOV64_IMM(BPF_REG_2, 1), 6044 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000), 6045 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 6046 BPF_EXIT_INSN(), 6047 }, 6048 .errstr = "R2 !read_ok", 6049 .result = REJECT, 6050 }, 6051 { 6052 "ld_ind: check calling conv, r3", 6053 .insns = { 6054 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6055 BPF_MOV64_IMM(BPF_REG_3, 1), 6056 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000), 6057 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 6058 BPF_EXIT_INSN(), 6059 }, 6060 .errstr = "R3 !read_ok", 6061 .result = REJECT, 6062 }, 6063 { 6064 "ld_ind: check calling conv, r4", 6065 .insns = { 6066 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6067 BPF_MOV64_IMM(BPF_REG_4, 1), 6068 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000), 6069 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), 6070 BPF_EXIT_INSN(), 6071 }, 6072 .errstr = "R4 !read_ok", 6073 .result = REJECT, 6074 }, 6075 { 6076 "ld_ind: check calling conv, r5", 6077 .insns = { 6078 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6079 BPF_MOV64_IMM(BPF_REG_5, 1), 6080 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000), 6081 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), 6082 BPF_EXIT_INSN(), 6083 }, 6084 .errstr = "R5 !read_ok", 6085 .result = REJECT, 6086 }, 6087 { 6088 "ld_ind: check calling conv, r7", 6089 .insns = { 6090 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6091 BPF_MOV64_IMM(BPF_REG_7, 1), 6092 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), 6093 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), 6094 BPF_EXIT_INSN(), 6095 }, 6096 .result = ACCEPT, 6097 }, 6098 { 6099 "check bpf_perf_event_data->sample_period byte load permitted", 6100 .insns = { 6101 BPF_MOV64_IMM(BPF_REG_0, 0), 6102 #if __BYTE_ORDER == __LITTLE_ENDIAN 6103 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 6104 offsetof(struct bpf_perf_event_data, sample_period)), 6105 #else 6106 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 6107 offsetof(struct bpf_perf_event_data, sample_period) + 7), 6108 #endif 6109 BPF_EXIT_INSN(), 6110 }, 6111 .result = ACCEPT, 6112 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 6113 }, 6114 { 6115 "check bpf_perf_event_data->sample_period half load permitted", 6116 .insns = { 6117 BPF_MOV64_IMM(BPF_REG_0, 0), 6118 #if __BYTE_ORDER == __LITTLE_ENDIAN 6119 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 6120 offsetof(struct bpf_perf_event_data, sample_period)), 6121 #else 6122 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 6123 offsetof(struct bpf_perf_event_data, sample_period) + 6), 6124 #endif 6125 BPF_EXIT_INSN(), 6126 }, 6127 .result = ACCEPT, 6128 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 6129 }, 6130 { 6131 "check bpf_perf_event_data->sample_period word load permitted", 6132 .insns = { 6133 BPF_MOV64_IMM(BPF_REG_0, 0), 6134 #if __BYTE_ORDER == __LITTLE_ENDIAN 6135 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 6136 offsetof(struct bpf_perf_event_data, sample_period)), 6137 #else 6138 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 6139 offsetof(struct bpf_perf_event_data, sample_period) + 4), 6140 #endif 6141 BPF_EXIT_INSN(), 6142 }, 6143 .result = ACCEPT, 6144 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 6145 }, 6146 { 6147 "check bpf_perf_event_data->sample_period dword load permitted", 6148 .insns = { 6149 BPF_MOV64_IMM(BPF_REG_0, 0), 6150 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 6151 offsetof(struct bpf_perf_event_data, sample_period)), 6152 BPF_EXIT_INSN(), 6153 }, 6154 .result = ACCEPT, 6155 .prog_type = BPF_PROG_TYPE_PERF_EVENT, 6156 }, 6157 { 6158 "check skb->data half load not permitted", 6159 .insns = { 6160 BPF_MOV64_IMM(BPF_REG_0, 0), 6161 #if __BYTE_ORDER == __LITTLE_ENDIAN 6162 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 6163 offsetof(struct __sk_buff, data)), 6164 #else 6165 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 6166 offsetof(struct __sk_buff, data) + 2), 6167 #endif 6168 BPF_EXIT_INSN(), 6169 }, 6170 .result = REJECT, 6171 .errstr = "invalid bpf_context access", 6172 }, 6173 { 6174 "check skb->tc_classid half load not permitted for lwt prog", 6175 .insns = { 6176 BPF_MOV64_IMM(BPF_REG_0, 0), 6177 #if __BYTE_ORDER == __LITTLE_ENDIAN 6178 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 6179 offsetof(struct __sk_buff, tc_classid)), 6180 #else 6181 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 6182 offsetof(struct __sk_buff, tc_classid) + 2), 6183 #endif 6184 BPF_EXIT_INSN(), 6185 }, 6186 .result = REJECT, 6187 .errstr = "invalid bpf_context access", 6188 .prog_type = BPF_PROG_TYPE_LWT_IN, 6189 }, 6190 { 6191 "bounds checks mixing signed and unsigned, positive bounds", 6192 .insns = { 6193 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6194 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6195 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6196 BPF_LD_MAP_FD(BPF_REG_1, 0), 6197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6198 BPF_FUNC_map_lookup_elem), 6199 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 6200 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6201 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6202 BPF_MOV64_IMM(BPF_REG_2, 2), 6203 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3), 6204 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2), 6205 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6206 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6207 BPF_MOV64_IMM(BPF_REG_0, 0), 6208 BPF_EXIT_INSN(), 6209 }, 6210 .fixup_map1 = { 3 }, 6211 .errstr = "R0 min value is negative", 6212 .result = REJECT, 6213 }, 6214 { 6215 "bounds checks mixing signed and unsigned", 6216 .insns = { 6217 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6218 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6220 BPF_LD_MAP_FD(BPF_REG_1, 0), 6221 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6222 BPF_FUNC_map_lookup_elem), 6223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 6224 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6225 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6226 BPF_MOV64_IMM(BPF_REG_2, -1), 6227 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), 6228 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6229 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6230 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6231 BPF_MOV64_IMM(BPF_REG_0, 0), 6232 BPF_EXIT_INSN(), 6233 }, 6234 .fixup_map1 = { 3 }, 6235 .errstr = "R0 min value is negative", 6236 .result = REJECT, 6237 }, 6238 { 6239 "bounds checks mixing signed and unsigned, variant 2", 6240 .insns = { 6241 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6242 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6244 BPF_LD_MAP_FD(BPF_REG_1, 0), 6245 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6246 BPF_FUNC_map_lookup_elem), 6247 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 6248 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6249 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6250 BPF_MOV64_IMM(BPF_REG_2, -1), 6251 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), 6252 BPF_MOV64_IMM(BPF_REG_8, 0), 6253 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1), 6254 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), 6255 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), 6256 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), 6257 BPF_MOV64_IMM(BPF_REG_0, 0), 6258 BPF_EXIT_INSN(), 6259 }, 6260 .fixup_map1 = { 3 }, 6261 .errstr = "R8 invalid mem access 'inv'", 6262 .result = REJECT, 6263 }, 6264 { 6265 "bounds checks mixing signed and unsigned, variant 3", 6266 .insns = { 6267 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6268 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6269 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6270 BPF_LD_MAP_FD(BPF_REG_1, 0), 6271 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6272 BPF_FUNC_map_lookup_elem), 6273 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 6274 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6275 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6276 BPF_MOV64_IMM(BPF_REG_2, -1), 6277 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4), 6278 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), 6279 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), 6280 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), 6281 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), 6282 BPF_MOV64_IMM(BPF_REG_0, 0), 6283 BPF_EXIT_INSN(), 6284 }, 6285 .fixup_map1 = { 3 }, 6286 .errstr = "R8 invalid mem access 'inv'", 6287 .result = REJECT, 6288 }, 6289 { 6290 "bounds checks mixing signed and unsigned, variant 4", 6291 .insns = { 6292 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6293 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6295 BPF_LD_MAP_FD(BPF_REG_1, 0), 6296 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6297 BPF_FUNC_map_lookup_elem), 6298 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 6299 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6300 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6301 BPF_MOV64_IMM(BPF_REG_2, 1), 6302 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), 6303 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6304 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6305 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6306 BPF_MOV64_IMM(BPF_REG_0, 0), 6307 BPF_EXIT_INSN(), 6308 }, 6309 .fixup_map1 = { 3 }, 6310 .result = ACCEPT, 6311 }, 6312 { 6313 "bounds checks mixing signed and unsigned, variant 5", 6314 .insns = { 6315 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6316 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6318 BPF_LD_MAP_FD(BPF_REG_1, 0), 6319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6320 BPF_FUNC_map_lookup_elem), 6321 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 6322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6323 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6324 BPF_MOV64_IMM(BPF_REG_2, -1), 6325 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), 6326 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4), 6327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4), 6328 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 6329 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6330 BPF_MOV64_IMM(BPF_REG_0, 0), 6331 BPF_EXIT_INSN(), 6332 }, 6333 .fixup_map1 = { 3 }, 6334 .errstr = "R0 min value is negative", 6335 .result = REJECT, 6336 }, 6337 { 6338 "bounds checks mixing signed and unsigned, variant 6", 6339 .insns = { 6340 BPF_MOV64_IMM(BPF_REG_2, 0), 6341 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), 6342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512), 6343 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6344 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16), 6345 BPF_MOV64_IMM(BPF_REG_6, -1), 6346 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5), 6347 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4), 6348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), 6349 BPF_MOV64_IMM(BPF_REG_5, 0), 6350 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0), 6351 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6352 BPF_FUNC_skb_load_bytes), 6353 BPF_MOV64_IMM(BPF_REG_0, 0), 6354 BPF_EXIT_INSN(), 6355 }, 6356 .errstr = "R4 min value is negative, either use unsigned", 6357 .result = REJECT, 6358 }, 6359 { 6360 "bounds checks mixing signed and unsigned, variant 7", 6361 .insns = { 6362 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6363 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6365 BPF_LD_MAP_FD(BPF_REG_1, 0), 6366 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6367 BPF_FUNC_map_lookup_elem), 6368 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 6369 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6370 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6371 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024), 6372 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), 6373 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6374 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6375 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6376 BPF_MOV64_IMM(BPF_REG_0, 0), 6377 BPF_EXIT_INSN(), 6378 }, 6379 .fixup_map1 = { 3 }, 6380 .result = ACCEPT, 6381 }, 6382 { 6383 "bounds checks mixing signed and unsigned, variant 8", 6384 .insns = { 6385 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6386 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6387 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6388 BPF_LD_MAP_FD(BPF_REG_1, 0), 6389 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6390 BPF_FUNC_map_lookup_elem), 6391 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 6392 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6393 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6394 BPF_MOV64_IMM(BPF_REG_2, -1), 6395 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), 6396 BPF_MOV64_IMM(BPF_REG_0, 0), 6397 BPF_EXIT_INSN(), 6398 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6399 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6400 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6401 BPF_MOV64_IMM(BPF_REG_0, 0), 6402 BPF_EXIT_INSN(), 6403 }, 6404 .fixup_map1 = { 3 }, 6405 .errstr = "R0 min value is negative", 6406 .result = REJECT, 6407 }, 6408 { 6409 "bounds checks mixing signed and unsigned, variant 9", 6410 .insns = { 6411 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6412 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6413 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6414 BPF_LD_MAP_FD(BPF_REG_1, 0), 6415 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6416 BPF_FUNC_map_lookup_elem), 6417 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), 6418 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6419 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6420 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL), 6421 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), 6422 BPF_MOV64_IMM(BPF_REG_0, 0), 6423 BPF_EXIT_INSN(), 6424 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6425 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6426 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6427 BPF_MOV64_IMM(BPF_REG_0, 0), 6428 BPF_EXIT_INSN(), 6429 }, 6430 .fixup_map1 = { 3 }, 6431 .result = ACCEPT, 6432 }, 6433 { 6434 "bounds checks mixing signed and unsigned, variant 10", 6435 .insns = { 6436 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6437 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6438 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6439 BPF_LD_MAP_FD(BPF_REG_1, 0), 6440 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6441 BPF_FUNC_map_lookup_elem), 6442 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 6443 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6444 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6445 BPF_MOV64_IMM(BPF_REG_2, 0), 6446 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), 6447 BPF_MOV64_IMM(BPF_REG_0, 0), 6448 BPF_EXIT_INSN(), 6449 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6450 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6451 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6452 BPF_MOV64_IMM(BPF_REG_0, 0), 6453 BPF_EXIT_INSN(), 6454 }, 6455 .fixup_map1 = { 3 }, 6456 .errstr = "R0 min value is negative", 6457 .result = REJECT, 6458 }, 6459 { 6460 "bounds checks mixing signed and unsigned, variant 11", 6461 .insns = { 6462 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6463 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6465 BPF_LD_MAP_FD(BPF_REG_1, 0), 6466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6467 BPF_FUNC_map_lookup_elem), 6468 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 6469 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6470 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6471 BPF_MOV64_IMM(BPF_REG_2, -1), 6472 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), 6473 /* Dead branch. */ 6474 BPF_MOV64_IMM(BPF_REG_0, 0), 6475 BPF_EXIT_INSN(), 6476 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6477 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6478 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6479 BPF_MOV64_IMM(BPF_REG_0, 0), 6480 BPF_EXIT_INSN(), 6481 }, 6482 .fixup_map1 = { 3 }, 6483 .errstr = "R0 min value is negative", 6484 .result = REJECT, 6485 }, 6486 { 6487 "bounds checks mixing signed and unsigned, variant 12", 6488 .insns = { 6489 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6490 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6492 BPF_LD_MAP_FD(BPF_REG_1, 0), 6493 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6494 BPF_FUNC_map_lookup_elem), 6495 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 6496 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6497 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6498 BPF_MOV64_IMM(BPF_REG_2, -6), 6499 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), 6500 BPF_MOV64_IMM(BPF_REG_0, 0), 6501 BPF_EXIT_INSN(), 6502 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6503 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6504 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6505 BPF_MOV64_IMM(BPF_REG_0, 0), 6506 BPF_EXIT_INSN(), 6507 }, 6508 .fixup_map1 = { 3 }, 6509 .errstr = "R0 min value is negative", 6510 .result = REJECT, 6511 }, 6512 { 6513 "bounds checks mixing signed and unsigned, variant 13", 6514 .insns = { 6515 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6516 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6517 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6518 BPF_LD_MAP_FD(BPF_REG_1, 0), 6519 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6520 BPF_FUNC_map_lookup_elem), 6521 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 6522 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6523 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6524 BPF_MOV64_IMM(BPF_REG_2, 2), 6525 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), 6526 BPF_MOV64_IMM(BPF_REG_7, 1), 6527 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2), 6528 BPF_MOV64_IMM(BPF_REG_0, 0), 6529 BPF_EXIT_INSN(), 6530 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1), 6531 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2), 6532 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7), 6533 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6534 BPF_MOV64_IMM(BPF_REG_0, 0), 6535 BPF_EXIT_INSN(), 6536 }, 6537 .fixup_map1 = { 3 }, 6538 .errstr = "R0 min value is negative", 6539 .result = REJECT, 6540 }, 6541 { 6542 "bounds checks mixing signed and unsigned, variant 14", 6543 .insns = { 6544 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, 6545 offsetof(struct __sk_buff, mark)), 6546 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6547 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6548 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6549 BPF_LD_MAP_FD(BPF_REG_1, 0), 6550 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6551 BPF_FUNC_map_lookup_elem), 6552 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 6553 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6554 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6555 BPF_MOV64_IMM(BPF_REG_2, -1), 6556 BPF_MOV64_IMM(BPF_REG_8, 2), 6557 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6), 6558 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3), 6559 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), 6560 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6561 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6562 BPF_MOV64_IMM(BPF_REG_0, 0), 6563 BPF_EXIT_INSN(), 6564 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3), 6565 BPF_JMP_IMM(BPF_JA, 0, 0, -7), 6566 }, 6567 .fixup_map1 = { 4 }, 6568 .errstr = "R0 min value is negative", 6569 .result = REJECT, 6570 }, 6571 { 6572 "bounds checks mixing signed and unsigned, variant 15", 6573 .insns = { 6574 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6575 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6576 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6577 BPF_LD_MAP_FD(BPF_REG_1, 0), 6578 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6579 BPF_FUNC_map_lookup_elem), 6580 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 6581 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), 6582 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), 6583 BPF_MOV64_IMM(BPF_REG_2, -6), 6584 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), 6585 BPF_MOV64_IMM(BPF_REG_0, 0), 6586 BPF_EXIT_INSN(), 6587 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6588 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2), 6589 BPF_MOV64_IMM(BPF_REG_0, 0), 6590 BPF_EXIT_INSN(), 6591 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), 6592 BPF_MOV64_IMM(BPF_REG_0, 0), 6593 BPF_EXIT_INSN(), 6594 }, 6595 .fixup_map1 = { 3 }, 6596 .errstr_unpriv = "R0 pointer comparison prohibited", 6597 .errstr = "R0 min value is negative", 6598 .result = REJECT, 6599 .result_unpriv = REJECT, 6600 }, 6601 { 6602 "subtraction bounds (map value) variant 1", 6603 .insns = { 6604 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6605 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6607 BPF_LD_MAP_FD(BPF_REG_1, 0), 6608 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6609 BPF_FUNC_map_lookup_elem), 6610 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 6611 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 6612 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), 6613 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), 6614 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), 6615 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), 6616 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), 6617 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6618 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 6619 BPF_EXIT_INSN(), 6620 BPF_MOV64_IMM(BPF_REG_0, 0), 6621 BPF_EXIT_INSN(), 6622 }, 6623 .fixup_map1 = { 3 }, 6624 .errstr = "R0 max value is outside of the array range", 6625 .result = REJECT, 6626 }, 6627 { 6628 "subtraction bounds (map value) variant 2", 6629 .insns = { 6630 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6631 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6632 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6633 BPF_LD_MAP_FD(BPF_REG_1, 0), 6634 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6635 BPF_FUNC_map_lookup_elem), 6636 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 6637 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 6638 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6), 6639 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), 6640 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4), 6641 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), 6642 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6643 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 6644 BPF_EXIT_INSN(), 6645 BPF_MOV64_IMM(BPF_REG_0, 0), 6646 BPF_EXIT_INSN(), 6647 }, 6648 .fixup_map1 = { 3 }, 6649 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 6650 .result = REJECT, 6651 }, 6652 { 6653 "variable-offset ctx access", 6654 .insns = { 6655 /* Get an unknown value */ 6656 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 6657 /* Make it small and 4-byte aligned */ 6658 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 6659 /* add it to skb. We now have either &skb->len or 6660 * &skb->pkt_type, but we don't know which 6661 */ 6662 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), 6663 /* dereference it */ 6664 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 6665 BPF_EXIT_INSN(), 6666 }, 6667 .errstr = "variable ctx access var_off=(0x0; 0x4)", 6668 .result = REJECT, 6669 .prog_type = BPF_PROG_TYPE_LWT_IN, 6670 }, 6671 { 6672 "variable-offset stack access", 6673 .insns = { 6674 /* Fill the top 8 bytes of the stack */ 6675 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6676 /* Get an unknown value */ 6677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 6678 /* Make it small and 4-byte aligned */ 6679 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), 6680 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), 6681 /* add it to fp. We now have either fp-4 or fp-8, but 6682 * we don't know which 6683 */ 6684 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), 6685 /* dereference it */ 6686 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), 6687 BPF_EXIT_INSN(), 6688 }, 6689 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)", 6690 .result = REJECT, 6691 .prog_type = BPF_PROG_TYPE_LWT_IN, 6692 }, 6693 { 6694 "liveness pruning and write screening", 6695 .insns = { 6696 /* Get an unknown value */ 6697 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), 6698 /* branch conditions teach us nothing about R2 */ 6699 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), 6700 BPF_MOV64_IMM(BPF_REG_0, 0), 6701 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), 6702 BPF_MOV64_IMM(BPF_REG_0, 0), 6703 BPF_EXIT_INSN(), 6704 }, 6705 .errstr = "R0 !read_ok", 6706 .result = REJECT, 6707 .prog_type = BPF_PROG_TYPE_LWT_IN, 6708 }, 6709 { 6710 "varlen_map_value_access pruning", 6711 .insns = { 6712 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 6713 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6714 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 6715 BPF_LD_MAP_FD(BPF_REG_1, 0), 6716 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6717 BPF_FUNC_map_lookup_elem), 6718 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 6719 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 6720 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), 6721 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), 6722 BPF_MOV32_IMM(BPF_REG_1, 0), 6723 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), 6724 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 6725 BPF_JMP_IMM(BPF_JA, 0, 0, 0), 6726 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 6727 offsetof(struct test_val, foo)), 6728 BPF_EXIT_INSN(), 6729 }, 6730 .fixup_map2 = { 3 }, 6731 .errstr_unpriv = "R0 leaks addr", 6732 .errstr = "R0 unbounded memory access", 6733 .result_unpriv = REJECT, 6734 .result = REJECT, 6735 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 6736 }, 6737 { 6738 "invalid 64-bit BPF_END", 6739 .insns = { 6740 BPF_MOV32_IMM(BPF_REG_0, 0), 6741 { 6742 .code = BPF_ALU64 | BPF_END | BPF_TO_LE, 6743 .dst_reg = BPF_REG_0, 6744 .src_reg = 0, 6745 .off = 0, 6746 .imm = 32, 6747 }, 6748 BPF_EXIT_INSN(), 6749 }, 6750 .errstr = "BPF_END uses reserved fields", 6751 .result = REJECT, 6752 }, 6753 { 6754 "meta access, test1", 6755 .insns = { 6756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6757 offsetof(struct xdp_md, data_meta)), 6758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6759 offsetof(struct xdp_md, data)), 6760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 6761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 6762 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 6763 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 6764 BPF_MOV64_IMM(BPF_REG_0, 0), 6765 BPF_EXIT_INSN(), 6766 }, 6767 .result = ACCEPT, 6768 .prog_type = BPF_PROG_TYPE_XDP, 6769 }, 6770 { 6771 "meta access, test2", 6772 .insns = { 6773 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6774 offsetof(struct xdp_md, data_meta)), 6775 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6776 offsetof(struct xdp_md, data)), 6777 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 6778 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8), 6779 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 6780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 6781 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 6782 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 6783 BPF_MOV64_IMM(BPF_REG_0, 0), 6784 BPF_EXIT_INSN(), 6785 }, 6786 .result = REJECT, 6787 .errstr = "invalid access to packet, off=-8", 6788 .prog_type = BPF_PROG_TYPE_XDP, 6789 }, 6790 { 6791 "meta access, test3", 6792 .insns = { 6793 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6794 offsetof(struct xdp_md, data_meta)), 6795 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6796 offsetof(struct xdp_md, data_end)), 6797 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), 6798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 6799 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 6800 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 6801 BPF_MOV64_IMM(BPF_REG_0, 0), 6802 BPF_EXIT_INSN(), 6803 }, 6804 .result = REJECT, 6805 .errstr = "invalid access to packet", 6806 .prog_type = BPF_PROG_TYPE_XDP, 6807 }, 6808 { 6809 "meta access, test4", 6810 .insns = { 6811 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6812 offsetof(struct xdp_md, data_meta)), 6813 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6814 offsetof(struct xdp_md, data_end)), 6815 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 6816 offsetof(struct xdp_md, data)), 6817 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), 6818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 6819 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), 6820 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 6821 BPF_MOV64_IMM(BPF_REG_0, 0), 6822 BPF_EXIT_INSN(), 6823 }, 6824 .result = REJECT, 6825 .errstr = "invalid access to packet", 6826 .prog_type = BPF_PROG_TYPE_XDP, 6827 }, 6828 { 6829 "meta access, test5", 6830 .insns = { 6831 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6832 offsetof(struct xdp_md, data_meta)), 6833 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 6834 offsetof(struct xdp_md, data)), 6835 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 6836 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 6837 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3), 6838 BPF_MOV64_IMM(BPF_REG_2, -8), 6839 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 6840 BPF_FUNC_xdp_adjust_meta), 6841 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), 6842 BPF_MOV64_IMM(BPF_REG_0, 0), 6843 BPF_EXIT_INSN(), 6844 }, 6845 .result = REJECT, 6846 .errstr = "R3 !read_ok", 6847 .prog_type = BPF_PROG_TYPE_XDP, 6848 }, 6849 { 6850 "meta access, test6", 6851 .insns = { 6852 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6853 offsetof(struct xdp_md, data_meta)), 6854 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6855 offsetof(struct xdp_md, data)), 6856 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 6857 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 6858 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 6859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 6860 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1), 6861 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 6862 BPF_MOV64_IMM(BPF_REG_0, 0), 6863 BPF_EXIT_INSN(), 6864 }, 6865 .result = REJECT, 6866 .errstr = "invalid access to packet", 6867 .prog_type = BPF_PROG_TYPE_XDP, 6868 }, 6869 { 6870 "meta access, test7", 6871 .insns = { 6872 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6873 offsetof(struct xdp_md, data_meta)), 6874 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6875 offsetof(struct xdp_md, data)), 6876 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), 6877 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), 6878 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 6879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), 6880 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 6881 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 6882 BPF_MOV64_IMM(BPF_REG_0, 0), 6883 BPF_EXIT_INSN(), 6884 }, 6885 .result = ACCEPT, 6886 .prog_type = BPF_PROG_TYPE_XDP, 6887 }, 6888 { 6889 "meta access, test8", 6890 .insns = { 6891 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6892 offsetof(struct xdp_md, data_meta)), 6893 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6894 offsetof(struct xdp_md, data)), 6895 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 6896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), 6897 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 6898 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 6899 BPF_MOV64_IMM(BPF_REG_0, 0), 6900 BPF_EXIT_INSN(), 6901 }, 6902 .result = ACCEPT, 6903 .prog_type = BPF_PROG_TYPE_XDP, 6904 }, 6905 { 6906 "meta access, test9", 6907 .insns = { 6908 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6909 offsetof(struct xdp_md, data_meta)), 6910 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6911 offsetof(struct xdp_md, data)), 6912 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), 6913 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), 6914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), 6915 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), 6916 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 6917 BPF_MOV64_IMM(BPF_REG_0, 0), 6918 BPF_EXIT_INSN(), 6919 }, 6920 .result = REJECT, 6921 .errstr = "invalid access to packet", 6922 .prog_type = BPF_PROG_TYPE_XDP, 6923 }, 6924 { 6925 "meta access, test10", 6926 .insns = { 6927 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6928 offsetof(struct xdp_md, data_meta)), 6929 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6930 offsetof(struct xdp_md, data)), 6931 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 6932 offsetof(struct xdp_md, data_end)), 6933 BPF_MOV64_IMM(BPF_REG_5, 42), 6934 BPF_MOV64_IMM(BPF_REG_6, 24), 6935 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), 6936 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 6937 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), 6938 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), 6939 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5), 6940 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 6941 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 6942 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), 6943 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1), 6944 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), 6945 BPF_MOV64_IMM(BPF_REG_0, 0), 6946 BPF_EXIT_INSN(), 6947 }, 6948 .result = REJECT, 6949 .errstr = "invalid access to packet", 6950 .prog_type = BPF_PROG_TYPE_XDP, 6951 }, 6952 { 6953 "meta access, test11", 6954 .insns = { 6955 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6956 offsetof(struct xdp_md, data_meta)), 6957 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6958 offsetof(struct xdp_md, data)), 6959 BPF_MOV64_IMM(BPF_REG_5, 42), 6960 BPF_MOV64_IMM(BPF_REG_6, 24), 6961 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), 6962 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 6963 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), 6964 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), 6965 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5), 6966 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 6967 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), 6968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), 6969 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1), 6970 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0), 6971 BPF_MOV64_IMM(BPF_REG_0, 0), 6972 BPF_EXIT_INSN(), 6973 }, 6974 .result = ACCEPT, 6975 .prog_type = BPF_PROG_TYPE_XDP, 6976 }, 6977 { 6978 "meta access, test12", 6979 .insns = { 6980 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 6981 offsetof(struct xdp_md, data_meta)), 6982 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 6983 offsetof(struct xdp_md, data)), 6984 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 6985 offsetof(struct xdp_md, data_end)), 6986 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), 6987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), 6988 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5), 6989 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), 6990 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), 6991 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), 6992 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1), 6993 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), 6994 BPF_MOV64_IMM(BPF_REG_0, 0), 6995 BPF_EXIT_INSN(), 6996 }, 6997 .result = ACCEPT, 6998 .prog_type = BPF_PROG_TYPE_XDP, 6999 }, 7000 { 7001 "arithmetic ops make PTR_TO_CTX unusable", 7002 .insns = { 7003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7004 offsetof(struct __sk_buff, data) - 7005 offsetof(struct __sk_buff, mark)), 7006 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 7007 offsetof(struct __sk_buff, mark)), 7008 BPF_EXIT_INSN(), 7009 }, 7010 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not", 7011 .result = REJECT, 7012 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 7013 }, 7014 { 7015 "XDP pkt read, pkt_end mangling, bad access 1", 7016 .insns = { 7017 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7018 offsetof(struct xdp_md, data)), 7019 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7020 offsetof(struct xdp_md, data_end)), 7021 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7022 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7023 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), 7024 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 7025 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7026 BPF_MOV64_IMM(BPF_REG_0, 0), 7027 BPF_EXIT_INSN(), 7028 }, 7029 .errstr = "R1 offset is outside of the packet", 7030 .result = REJECT, 7031 .prog_type = BPF_PROG_TYPE_XDP, 7032 }, 7033 { 7034 "XDP pkt read, pkt_end mangling, bad access 2", 7035 .insns = { 7036 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7037 offsetof(struct xdp_md, data)), 7038 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7039 offsetof(struct xdp_md, data_end)), 7040 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7041 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7042 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8), 7043 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 7044 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7045 BPF_MOV64_IMM(BPF_REG_0, 0), 7046 BPF_EXIT_INSN(), 7047 }, 7048 .errstr = "R1 offset is outside of the packet", 7049 .result = REJECT, 7050 .prog_type = BPF_PROG_TYPE_XDP, 7051 }, 7052 { 7053 "XDP pkt read, pkt_data' > pkt_end, good access", 7054 .insns = { 7055 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7056 offsetof(struct xdp_md, data)), 7057 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7058 offsetof(struct xdp_md, data_end)), 7059 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7060 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7061 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 7062 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7063 BPF_MOV64_IMM(BPF_REG_0, 0), 7064 BPF_EXIT_INSN(), 7065 }, 7066 .result = ACCEPT, 7067 .prog_type = BPF_PROG_TYPE_XDP, 7068 }, 7069 { 7070 "XDP pkt read, pkt_data' > pkt_end, bad access 1", 7071 .insns = { 7072 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7073 offsetof(struct xdp_md, data)), 7074 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7075 offsetof(struct xdp_md, data_end)), 7076 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7078 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 7079 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), 7080 BPF_MOV64_IMM(BPF_REG_0, 0), 7081 BPF_EXIT_INSN(), 7082 }, 7083 .errstr = "R1 offset is outside of the packet", 7084 .result = REJECT, 7085 .prog_type = BPF_PROG_TYPE_XDP, 7086 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7087 }, 7088 { 7089 "XDP pkt read, pkt_data' > pkt_end, bad access 2", 7090 .insns = { 7091 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7092 offsetof(struct xdp_md, data)), 7093 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7094 offsetof(struct xdp_md, data_end)), 7095 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7097 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), 7098 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7099 BPF_MOV64_IMM(BPF_REG_0, 0), 7100 BPF_EXIT_INSN(), 7101 }, 7102 .errstr = "R1 offset is outside of the packet", 7103 .result = REJECT, 7104 .prog_type = BPF_PROG_TYPE_XDP, 7105 }, 7106 { 7107 "XDP pkt read, pkt_end > pkt_data', good access", 7108 .insns = { 7109 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7110 offsetof(struct xdp_md, data)), 7111 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7112 offsetof(struct xdp_md, data_end)), 7113 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7115 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), 7116 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7117 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7118 BPF_MOV64_IMM(BPF_REG_0, 0), 7119 BPF_EXIT_INSN(), 7120 }, 7121 .result = ACCEPT, 7122 .prog_type = BPF_PROG_TYPE_XDP, 7123 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7124 }, 7125 { 7126 "XDP pkt read, pkt_end > pkt_data', bad access 1", 7127 .insns = { 7128 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7129 offsetof(struct xdp_md, data)), 7130 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7131 offsetof(struct xdp_md, data_end)), 7132 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7133 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7134 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), 7135 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7136 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7137 BPF_MOV64_IMM(BPF_REG_0, 0), 7138 BPF_EXIT_INSN(), 7139 }, 7140 .errstr = "R1 offset is outside of the packet", 7141 .result = REJECT, 7142 .prog_type = BPF_PROG_TYPE_XDP, 7143 }, 7144 { 7145 "XDP pkt read, pkt_end > pkt_data', bad access 2", 7146 .insns = { 7147 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7148 offsetof(struct xdp_md, data)), 7149 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7150 offsetof(struct xdp_md, data_end)), 7151 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7153 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), 7154 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7155 BPF_MOV64_IMM(BPF_REG_0, 0), 7156 BPF_EXIT_INSN(), 7157 }, 7158 .errstr = "R1 offset is outside of the packet", 7159 .result = REJECT, 7160 .prog_type = BPF_PROG_TYPE_XDP, 7161 }, 7162 { 7163 "XDP pkt read, pkt_data' < pkt_end, good access", 7164 .insns = { 7165 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7166 offsetof(struct xdp_md, data)), 7167 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7168 offsetof(struct xdp_md, data_end)), 7169 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7170 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7171 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), 7172 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7173 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7174 BPF_MOV64_IMM(BPF_REG_0, 0), 7175 BPF_EXIT_INSN(), 7176 }, 7177 .result = ACCEPT, 7178 .prog_type = BPF_PROG_TYPE_XDP, 7179 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7180 }, 7181 { 7182 "XDP pkt read, pkt_data' < pkt_end, bad access 1", 7183 .insns = { 7184 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7185 offsetof(struct xdp_md, data)), 7186 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7187 offsetof(struct xdp_md, data_end)), 7188 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7189 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7190 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), 7191 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7192 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7193 BPF_MOV64_IMM(BPF_REG_0, 0), 7194 BPF_EXIT_INSN(), 7195 }, 7196 .errstr = "R1 offset is outside of the packet", 7197 .result = REJECT, 7198 .prog_type = BPF_PROG_TYPE_XDP, 7199 }, 7200 { 7201 "XDP pkt read, pkt_data' < pkt_end, bad access 2", 7202 .insns = { 7203 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7204 offsetof(struct xdp_md, data)), 7205 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7206 offsetof(struct xdp_md, data_end)), 7207 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7208 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7209 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), 7210 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7211 BPF_MOV64_IMM(BPF_REG_0, 0), 7212 BPF_EXIT_INSN(), 7213 }, 7214 .errstr = "R1 offset is outside of the packet", 7215 .result = REJECT, 7216 .prog_type = BPF_PROG_TYPE_XDP, 7217 }, 7218 { 7219 "XDP pkt read, pkt_end < pkt_data', good access", 7220 .insns = { 7221 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7222 offsetof(struct xdp_md, data)), 7223 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7224 offsetof(struct xdp_md, data_end)), 7225 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7227 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), 7228 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7229 BPF_MOV64_IMM(BPF_REG_0, 0), 7230 BPF_EXIT_INSN(), 7231 }, 7232 .result = ACCEPT, 7233 .prog_type = BPF_PROG_TYPE_XDP, 7234 }, 7235 { 7236 "XDP pkt read, pkt_end < pkt_data', bad access 1", 7237 .insns = { 7238 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7239 offsetof(struct xdp_md, data)), 7240 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7241 offsetof(struct xdp_md, data_end)), 7242 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7243 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7244 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), 7245 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), 7246 BPF_MOV64_IMM(BPF_REG_0, 0), 7247 BPF_EXIT_INSN(), 7248 }, 7249 .errstr = "R1 offset is outside of the packet", 7250 .result = REJECT, 7251 .prog_type = BPF_PROG_TYPE_XDP, 7252 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7253 }, 7254 { 7255 "XDP pkt read, pkt_end < pkt_data', bad access 2", 7256 .insns = { 7257 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7258 offsetof(struct xdp_md, data)), 7259 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7260 offsetof(struct xdp_md, data_end)), 7261 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7262 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7263 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), 7264 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7265 BPF_MOV64_IMM(BPF_REG_0, 0), 7266 BPF_EXIT_INSN(), 7267 }, 7268 .errstr = "R1 offset is outside of the packet", 7269 .result = REJECT, 7270 .prog_type = BPF_PROG_TYPE_XDP, 7271 }, 7272 { 7273 "XDP pkt read, pkt_data' >= pkt_end, good access", 7274 .insns = { 7275 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7276 offsetof(struct xdp_md, data)), 7277 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7278 offsetof(struct xdp_md, data_end)), 7279 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7281 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), 7282 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7283 BPF_MOV64_IMM(BPF_REG_0, 0), 7284 BPF_EXIT_INSN(), 7285 }, 7286 .result = ACCEPT, 7287 .prog_type = BPF_PROG_TYPE_XDP, 7288 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7289 }, 7290 { 7291 "XDP pkt read, pkt_data' >= pkt_end, bad access 1", 7292 .insns = { 7293 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7294 offsetof(struct xdp_md, data)), 7295 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7296 offsetof(struct xdp_md, data_end)), 7297 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7299 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), 7300 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7301 BPF_MOV64_IMM(BPF_REG_0, 0), 7302 BPF_EXIT_INSN(), 7303 }, 7304 .errstr = "R1 offset is outside of the packet", 7305 .result = REJECT, 7306 .prog_type = BPF_PROG_TYPE_XDP, 7307 }, 7308 { 7309 "XDP pkt read, pkt_data' >= pkt_end, bad access 2", 7310 .insns = { 7311 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7312 offsetof(struct xdp_md, data)), 7313 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7314 offsetof(struct xdp_md, data_end)), 7315 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7317 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), 7318 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7319 BPF_MOV64_IMM(BPF_REG_0, 0), 7320 BPF_EXIT_INSN(), 7321 }, 7322 .errstr = "R1 offset is outside of the packet", 7323 .result = REJECT, 7324 .prog_type = BPF_PROG_TYPE_XDP, 7325 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7326 }, 7327 { 7328 "XDP pkt read, pkt_end >= pkt_data', good access", 7329 .insns = { 7330 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7331 offsetof(struct xdp_md, data)), 7332 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7333 offsetof(struct xdp_md, data_end)), 7334 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7335 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7336 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), 7337 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7338 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7339 BPF_MOV64_IMM(BPF_REG_0, 0), 7340 BPF_EXIT_INSN(), 7341 }, 7342 .result = ACCEPT, 7343 .prog_type = BPF_PROG_TYPE_XDP, 7344 }, 7345 { 7346 "XDP pkt read, pkt_end >= pkt_data', bad access 1", 7347 .insns = { 7348 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7349 offsetof(struct xdp_md, data)), 7350 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7351 offsetof(struct xdp_md, data_end)), 7352 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7354 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), 7355 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7356 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), 7357 BPF_MOV64_IMM(BPF_REG_0, 0), 7358 BPF_EXIT_INSN(), 7359 }, 7360 .errstr = "R1 offset is outside of the packet", 7361 .result = REJECT, 7362 .prog_type = BPF_PROG_TYPE_XDP, 7363 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7364 }, 7365 { 7366 "XDP pkt read, pkt_end >= pkt_data', bad access 2", 7367 .insns = { 7368 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7369 offsetof(struct xdp_md, data)), 7370 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7371 offsetof(struct xdp_md, data_end)), 7372 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7374 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), 7375 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7376 BPF_MOV64_IMM(BPF_REG_0, 0), 7377 BPF_EXIT_INSN(), 7378 }, 7379 .errstr = "R1 offset is outside of the packet", 7380 .result = REJECT, 7381 .prog_type = BPF_PROG_TYPE_XDP, 7382 }, 7383 { 7384 "XDP pkt read, pkt_data' <= pkt_end, good access", 7385 .insns = { 7386 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7387 offsetof(struct xdp_md, data)), 7388 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7389 offsetof(struct xdp_md, data_end)), 7390 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7391 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7392 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), 7393 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7394 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7395 BPF_MOV64_IMM(BPF_REG_0, 0), 7396 BPF_EXIT_INSN(), 7397 }, 7398 .result = ACCEPT, 7399 .prog_type = BPF_PROG_TYPE_XDP, 7400 }, 7401 { 7402 "XDP pkt read, pkt_data' <= pkt_end, bad access 1", 7403 .insns = { 7404 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7405 offsetof(struct xdp_md, data)), 7406 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7407 offsetof(struct xdp_md, data_end)), 7408 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7409 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7410 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), 7411 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7412 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), 7413 BPF_MOV64_IMM(BPF_REG_0, 0), 7414 BPF_EXIT_INSN(), 7415 }, 7416 .errstr = "R1 offset is outside of the packet", 7417 .result = REJECT, 7418 .prog_type = BPF_PROG_TYPE_XDP, 7419 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7420 }, 7421 { 7422 "XDP pkt read, pkt_data' <= pkt_end, bad access 2", 7423 .insns = { 7424 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7425 offsetof(struct xdp_md, data)), 7426 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7427 offsetof(struct xdp_md, data_end)), 7428 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7429 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7430 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), 7431 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7432 BPF_MOV64_IMM(BPF_REG_0, 0), 7433 BPF_EXIT_INSN(), 7434 }, 7435 .errstr = "R1 offset is outside of the packet", 7436 .result = REJECT, 7437 .prog_type = BPF_PROG_TYPE_XDP, 7438 }, 7439 { 7440 "XDP pkt read, pkt_end <= pkt_data', good access", 7441 .insns = { 7442 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7443 offsetof(struct xdp_md, data)), 7444 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7445 offsetof(struct xdp_md, data_end)), 7446 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7448 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), 7449 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7450 BPF_MOV64_IMM(BPF_REG_0, 0), 7451 BPF_EXIT_INSN(), 7452 }, 7453 .result = ACCEPT, 7454 .prog_type = BPF_PROG_TYPE_XDP, 7455 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7456 }, 7457 { 7458 "XDP pkt read, pkt_end <= pkt_data', bad access 1", 7459 .insns = { 7460 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7461 offsetof(struct xdp_md, data)), 7462 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7463 offsetof(struct xdp_md, data_end)), 7464 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7465 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7466 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), 7467 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7468 BPF_MOV64_IMM(BPF_REG_0, 0), 7469 BPF_EXIT_INSN(), 7470 }, 7471 .errstr = "R1 offset is outside of the packet", 7472 .result = REJECT, 7473 .prog_type = BPF_PROG_TYPE_XDP, 7474 }, 7475 { 7476 "XDP pkt read, pkt_end <= pkt_data', bad access 2", 7477 .insns = { 7478 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7479 offsetof(struct xdp_md, data)), 7480 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7481 offsetof(struct xdp_md, data_end)), 7482 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7484 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), 7485 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7486 BPF_MOV64_IMM(BPF_REG_0, 0), 7487 BPF_EXIT_INSN(), 7488 }, 7489 .errstr = "R1 offset is outside of the packet", 7490 .result = REJECT, 7491 .prog_type = BPF_PROG_TYPE_XDP, 7492 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7493 }, 7494 { 7495 "XDP pkt read, pkt_meta' > pkt_data, good access", 7496 .insns = { 7497 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7498 offsetof(struct xdp_md, data_meta)), 7499 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7500 offsetof(struct xdp_md, data)), 7501 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7503 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 7504 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7505 BPF_MOV64_IMM(BPF_REG_0, 0), 7506 BPF_EXIT_INSN(), 7507 }, 7508 .result = ACCEPT, 7509 .prog_type = BPF_PROG_TYPE_XDP, 7510 }, 7511 { 7512 "XDP pkt read, pkt_meta' > pkt_data, bad access 1", 7513 .insns = { 7514 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7515 offsetof(struct xdp_md, data_meta)), 7516 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7517 offsetof(struct xdp_md, data)), 7518 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7519 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7520 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), 7521 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), 7522 BPF_MOV64_IMM(BPF_REG_0, 0), 7523 BPF_EXIT_INSN(), 7524 }, 7525 .errstr = "R1 offset is outside of the packet", 7526 .result = REJECT, 7527 .prog_type = BPF_PROG_TYPE_XDP, 7528 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7529 }, 7530 { 7531 "XDP pkt read, pkt_meta' > pkt_data, bad access 2", 7532 .insns = { 7533 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7534 offsetof(struct xdp_md, data_meta)), 7535 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7536 offsetof(struct xdp_md, data)), 7537 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7539 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), 7540 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7541 BPF_MOV64_IMM(BPF_REG_0, 0), 7542 BPF_EXIT_INSN(), 7543 }, 7544 .errstr = "R1 offset is outside of the packet", 7545 .result = REJECT, 7546 .prog_type = BPF_PROG_TYPE_XDP, 7547 }, 7548 { 7549 "XDP pkt read, pkt_data > pkt_meta', good access", 7550 .insns = { 7551 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7552 offsetof(struct xdp_md, data_meta)), 7553 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7554 offsetof(struct xdp_md, data)), 7555 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7557 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), 7558 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7559 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7560 BPF_MOV64_IMM(BPF_REG_0, 0), 7561 BPF_EXIT_INSN(), 7562 }, 7563 .result = ACCEPT, 7564 .prog_type = BPF_PROG_TYPE_XDP, 7565 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7566 }, 7567 { 7568 "XDP pkt read, pkt_data > pkt_meta', bad access 1", 7569 .insns = { 7570 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7571 offsetof(struct xdp_md, data_meta)), 7572 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7573 offsetof(struct xdp_md, data)), 7574 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7576 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), 7577 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7578 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7579 BPF_MOV64_IMM(BPF_REG_0, 0), 7580 BPF_EXIT_INSN(), 7581 }, 7582 .errstr = "R1 offset is outside of the packet", 7583 .result = REJECT, 7584 .prog_type = BPF_PROG_TYPE_XDP, 7585 }, 7586 { 7587 "XDP pkt read, pkt_data > pkt_meta', bad access 2", 7588 .insns = { 7589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7590 offsetof(struct xdp_md, data_meta)), 7591 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7592 offsetof(struct xdp_md, data)), 7593 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7594 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7595 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), 7596 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7597 BPF_MOV64_IMM(BPF_REG_0, 0), 7598 BPF_EXIT_INSN(), 7599 }, 7600 .errstr = "R1 offset is outside of the packet", 7601 .result = REJECT, 7602 .prog_type = BPF_PROG_TYPE_XDP, 7603 }, 7604 { 7605 "XDP pkt read, pkt_meta' < pkt_data, good access", 7606 .insns = { 7607 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7608 offsetof(struct xdp_md, data_meta)), 7609 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7610 offsetof(struct xdp_md, data)), 7611 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7613 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), 7614 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7615 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7616 BPF_MOV64_IMM(BPF_REG_0, 0), 7617 BPF_EXIT_INSN(), 7618 }, 7619 .result = ACCEPT, 7620 .prog_type = BPF_PROG_TYPE_XDP, 7621 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7622 }, 7623 { 7624 "XDP pkt read, pkt_meta' < pkt_data, bad access 1", 7625 .insns = { 7626 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7627 offsetof(struct xdp_md, data_meta)), 7628 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7629 offsetof(struct xdp_md, data)), 7630 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7632 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), 7633 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7634 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7635 BPF_MOV64_IMM(BPF_REG_0, 0), 7636 BPF_EXIT_INSN(), 7637 }, 7638 .errstr = "R1 offset is outside of the packet", 7639 .result = REJECT, 7640 .prog_type = BPF_PROG_TYPE_XDP, 7641 }, 7642 { 7643 "XDP pkt read, pkt_meta' < pkt_data, bad access 2", 7644 .insns = { 7645 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7646 offsetof(struct xdp_md, data_meta)), 7647 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7648 offsetof(struct xdp_md, data)), 7649 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7650 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7651 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), 7652 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7653 BPF_MOV64_IMM(BPF_REG_0, 0), 7654 BPF_EXIT_INSN(), 7655 }, 7656 .errstr = "R1 offset is outside of the packet", 7657 .result = REJECT, 7658 .prog_type = BPF_PROG_TYPE_XDP, 7659 }, 7660 { 7661 "XDP pkt read, pkt_data < pkt_meta', good access", 7662 .insns = { 7663 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7664 offsetof(struct xdp_md, data_meta)), 7665 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7666 offsetof(struct xdp_md, data)), 7667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7668 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7669 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), 7670 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7671 BPF_MOV64_IMM(BPF_REG_0, 0), 7672 BPF_EXIT_INSN(), 7673 }, 7674 .result = ACCEPT, 7675 .prog_type = BPF_PROG_TYPE_XDP, 7676 }, 7677 { 7678 "XDP pkt read, pkt_data < pkt_meta', bad access 1", 7679 .insns = { 7680 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7681 offsetof(struct xdp_md, data_meta)), 7682 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7683 offsetof(struct xdp_md, data)), 7684 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7685 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7686 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), 7687 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), 7688 BPF_MOV64_IMM(BPF_REG_0, 0), 7689 BPF_EXIT_INSN(), 7690 }, 7691 .errstr = "R1 offset is outside of the packet", 7692 .result = REJECT, 7693 .prog_type = BPF_PROG_TYPE_XDP, 7694 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7695 }, 7696 { 7697 "XDP pkt read, pkt_data < pkt_meta', bad access 2", 7698 .insns = { 7699 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7700 offsetof(struct xdp_md, data_meta)), 7701 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7702 offsetof(struct xdp_md, data)), 7703 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7705 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), 7706 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7707 BPF_MOV64_IMM(BPF_REG_0, 0), 7708 BPF_EXIT_INSN(), 7709 }, 7710 .errstr = "R1 offset is outside of the packet", 7711 .result = REJECT, 7712 .prog_type = BPF_PROG_TYPE_XDP, 7713 }, 7714 { 7715 "XDP pkt read, pkt_meta' >= pkt_data, good access", 7716 .insns = { 7717 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7718 offsetof(struct xdp_md, data_meta)), 7719 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7720 offsetof(struct xdp_md, data)), 7721 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7722 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7723 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), 7724 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7725 BPF_MOV64_IMM(BPF_REG_0, 0), 7726 BPF_EXIT_INSN(), 7727 }, 7728 .result = ACCEPT, 7729 .prog_type = BPF_PROG_TYPE_XDP, 7730 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7731 }, 7732 { 7733 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1", 7734 .insns = { 7735 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7736 offsetof(struct xdp_md, data_meta)), 7737 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7738 offsetof(struct xdp_md, data)), 7739 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7740 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7741 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), 7742 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7743 BPF_MOV64_IMM(BPF_REG_0, 0), 7744 BPF_EXIT_INSN(), 7745 }, 7746 .errstr = "R1 offset is outside of the packet", 7747 .result = REJECT, 7748 .prog_type = BPF_PROG_TYPE_XDP, 7749 }, 7750 { 7751 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2", 7752 .insns = { 7753 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7754 offsetof(struct xdp_md, data_meta)), 7755 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7756 offsetof(struct xdp_md, data)), 7757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7759 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), 7760 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7761 BPF_MOV64_IMM(BPF_REG_0, 0), 7762 BPF_EXIT_INSN(), 7763 }, 7764 .errstr = "R1 offset is outside of the packet", 7765 .result = REJECT, 7766 .prog_type = BPF_PROG_TYPE_XDP, 7767 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7768 }, 7769 { 7770 "XDP pkt read, pkt_data >= pkt_meta', good access", 7771 .insns = { 7772 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7773 offsetof(struct xdp_md, data_meta)), 7774 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7775 offsetof(struct xdp_md, data)), 7776 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7777 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7778 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), 7779 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7780 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7781 BPF_MOV64_IMM(BPF_REG_0, 0), 7782 BPF_EXIT_INSN(), 7783 }, 7784 .result = ACCEPT, 7785 .prog_type = BPF_PROG_TYPE_XDP, 7786 }, 7787 { 7788 "XDP pkt read, pkt_data >= pkt_meta', bad access 1", 7789 .insns = { 7790 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7791 offsetof(struct xdp_md, data_meta)), 7792 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7793 offsetof(struct xdp_md, data)), 7794 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7796 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), 7797 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7798 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), 7799 BPF_MOV64_IMM(BPF_REG_0, 0), 7800 BPF_EXIT_INSN(), 7801 }, 7802 .errstr = "R1 offset is outside of the packet", 7803 .result = REJECT, 7804 .prog_type = BPF_PROG_TYPE_XDP, 7805 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7806 }, 7807 { 7808 "XDP pkt read, pkt_data >= pkt_meta', bad access 2", 7809 .insns = { 7810 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7811 offsetof(struct xdp_md, data_meta)), 7812 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7813 offsetof(struct xdp_md, data)), 7814 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7816 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), 7817 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7818 BPF_MOV64_IMM(BPF_REG_0, 0), 7819 BPF_EXIT_INSN(), 7820 }, 7821 .errstr = "R1 offset is outside of the packet", 7822 .result = REJECT, 7823 .prog_type = BPF_PROG_TYPE_XDP, 7824 }, 7825 { 7826 "XDP pkt read, pkt_meta' <= pkt_data, good access", 7827 .insns = { 7828 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7829 offsetof(struct xdp_md, data_meta)), 7830 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7831 offsetof(struct xdp_md, data)), 7832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7833 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7834 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), 7835 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7836 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7837 BPF_MOV64_IMM(BPF_REG_0, 0), 7838 BPF_EXIT_INSN(), 7839 }, 7840 .result = ACCEPT, 7841 .prog_type = BPF_PROG_TYPE_XDP, 7842 }, 7843 { 7844 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1", 7845 .insns = { 7846 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7847 offsetof(struct xdp_md, data_meta)), 7848 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7849 offsetof(struct xdp_md, data)), 7850 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7852 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), 7853 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 7854 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), 7855 BPF_MOV64_IMM(BPF_REG_0, 0), 7856 BPF_EXIT_INSN(), 7857 }, 7858 .errstr = "R1 offset is outside of the packet", 7859 .result = REJECT, 7860 .prog_type = BPF_PROG_TYPE_XDP, 7861 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7862 }, 7863 { 7864 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2", 7865 .insns = { 7866 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7867 offsetof(struct xdp_md, data_meta)), 7868 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7869 offsetof(struct xdp_md, data)), 7870 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7871 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7872 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), 7873 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7874 BPF_MOV64_IMM(BPF_REG_0, 0), 7875 BPF_EXIT_INSN(), 7876 }, 7877 .errstr = "R1 offset is outside of the packet", 7878 .result = REJECT, 7879 .prog_type = BPF_PROG_TYPE_XDP, 7880 }, 7881 { 7882 "XDP pkt read, pkt_data <= pkt_meta', good access", 7883 .insns = { 7884 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7885 offsetof(struct xdp_md, data_meta)), 7886 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7887 offsetof(struct xdp_md, data)), 7888 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7889 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7890 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), 7891 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7892 BPF_MOV64_IMM(BPF_REG_0, 0), 7893 BPF_EXIT_INSN(), 7894 }, 7895 .result = ACCEPT, 7896 .prog_type = BPF_PROG_TYPE_XDP, 7897 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7898 }, 7899 { 7900 "XDP pkt read, pkt_data <= pkt_meta', bad access 1", 7901 .insns = { 7902 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7903 offsetof(struct xdp_md, data_meta)), 7904 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7905 offsetof(struct xdp_md, data)), 7906 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7907 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7908 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), 7909 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), 7910 BPF_MOV64_IMM(BPF_REG_0, 0), 7911 BPF_EXIT_INSN(), 7912 }, 7913 .errstr = "R1 offset is outside of the packet", 7914 .result = REJECT, 7915 .prog_type = BPF_PROG_TYPE_XDP, 7916 }, 7917 { 7918 "XDP pkt read, pkt_data <= pkt_meta', bad access 2", 7919 .insns = { 7920 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7921 offsetof(struct xdp_md, data_meta)), 7922 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 7923 offsetof(struct xdp_md, data)), 7924 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), 7925 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), 7926 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), 7927 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), 7928 BPF_MOV64_IMM(BPF_REG_0, 0), 7929 BPF_EXIT_INSN(), 7930 }, 7931 .errstr = "R1 offset is outside of the packet", 7932 .result = REJECT, 7933 .prog_type = BPF_PROG_TYPE_XDP, 7934 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 7935 }, 7936 { 7937 "bpf_exit with invalid return code. test1", 7938 .insns = { 7939 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 7940 BPF_EXIT_INSN(), 7941 }, 7942 .errstr = "R0 has value (0x0; 0xffffffff)", 7943 .result = REJECT, 7944 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 7945 }, 7946 { 7947 "bpf_exit with invalid return code. test2", 7948 .insns = { 7949 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 7950 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), 7951 BPF_EXIT_INSN(), 7952 }, 7953 .result = ACCEPT, 7954 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 7955 }, 7956 { 7957 "bpf_exit with invalid return code. test3", 7958 .insns = { 7959 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 7960 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3), 7961 BPF_EXIT_INSN(), 7962 }, 7963 .errstr = "R0 has value (0x0; 0x3)", 7964 .result = REJECT, 7965 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 7966 }, 7967 { 7968 "bpf_exit with invalid return code. test4", 7969 .insns = { 7970 BPF_MOV64_IMM(BPF_REG_0, 1), 7971 BPF_EXIT_INSN(), 7972 }, 7973 .result = ACCEPT, 7974 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 7975 }, 7976 { 7977 "bpf_exit with invalid return code. test5", 7978 .insns = { 7979 BPF_MOV64_IMM(BPF_REG_0, 2), 7980 BPF_EXIT_INSN(), 7981 }, 7982 .errstr = "R0 has value (0x2; 0x0)", 7983 .result = REJECT, 7984 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 7985 }, 7986 { 7987 "bpf_exit with invalid return code. test6", 7988 .insns = { 7989 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 7990 BPF_EXIT_INSN(), 7991 }, 7992 .errstr = "R0 is not a known value (ctx)", 7993 .result = REJECT, 7994 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 7995 }, 7996 { 7997 "bpf_exit with invalid return code. test7", 7998 .insns = { 7999 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 8000 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4), 8001 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2), 8002 BPF_EXIT_INSN(), 8003 }, 8004 .errstr = "R0 has unknown scalar value", 8005 .result = REJECT, 8006 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 8007 }, 8008 }; 8009 8010 static int probe_filter_length(const struct bpf_insn *fp) 8011 { 8012 int len; 8013 8014 for (len = MAX_INSNS - 1; len > 0; --len) 8015 if (fp[len].code != 0 || fp[len].imm != 0) 8016 break; 8017 return len + 1; 8018 } 8019 8020 static int create_map(uint32_t size_value, uint32_t max_elem) 8021 { 8022 int fd; 8023 8024 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 8025 size_value, max_elem, BPF_F_NO_PREALLOC); 8026 if (fd < 0) 8027 printf("Failed to create hash map '%s'!\n", strerror(errno)); 8028 8029 return fd; 8030 } 8031 8032 static int create_prog_array(void) 8033 { 8034 int fd; 8035 8036 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), 8037 sizeof(int), 4, 0); 8038 if (fd < 0) 8039 printf("Failed to create prog array '%s'!\n", strerror(errno)); 8040 8041 return fd; 8042 } 8043 8044 static int create_map_in_map(void) 8045 { 8046 int inner_map_fd, outer_map_fd; 8047 8048 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 8049 sizeof(int), 1, 0); 8050 if (inner_map_fd < 0) { 8051 printf("Failed to create array '%s'!\n", strerror(errno)); 8052 return inner_map_fd; 8053 } 8054 8055 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, 8056 sizeof(int), inner_map_fd, 1, 0); 8057 if (outer_map_fd < 0) 8058 printf("Failed to create array of maps '%s'!\n", 8059 strerror(errno)); 8060 8061 close(inner_map_fd); 8062 8063 return outer_map_fd; 8064 } 8065 8066 static char bpf_vlog[32768]; 8067 8068 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, 8069 int *map_fds) 8070 { 8071 int *fixup_map1 = test->fixup_map1; 8072 int *fixup_map2 = test->fixup_map2; 8073 int *fixup_prog = test->fixup_prog; 8074 int *fixup_map_in_map = test->fixup_map_in_map; 8075 8076 /* Allocating HTs with 1 elem is fine here, since we only test 8077 * for verifier and not do a runtime lookup, so the only thing 8078 * that really matters is value size in this case. 8079 */ 8080 if (*fixup_map1) { 8081 map_fds[0] = create_map(sizeof(long long), 1); 8082 do { 8083 prog[*fixup_map1].imm = map_fds[0]; 8084 fixup_map1++; 8085 } while (*fixup_map1); 8086 } 8087 8088 if (*fixup_map2) { 8089 map_fds[1] = create_map(sizeof(struct test_val), 1); 8090 do { 8091 prog[*fixup_map2].imm = map_fds[1]; 8092 fixup_map2++; 8093 } while (*fixup_map2); 8094 } 8095 8096 if (*fixup_prog) { 8097 map_fds[2] = create_prog_array(); 8098 do { 8099 prog[*fixup_prog].imm = map_fds[2]; 8100 fixup_prog++; 8101 } while (*fixup_prog); 8102 } 8103 8104 if (*fixup_map_in_map) { 8105 map_fds[3] = create_map_in_map(); 8106 do { 8107 prog[*fixup_map_in_map].imm = map_fds[3]; 8108 fixup_map_in_map++; 8109 } while (*fixup_map_in_map); 8110 } 8111 } 8112 8113 static void do_test_single(struct bpf_test *test, bool unpriv, 8114 int *passes, int *errors) 8115 { 8116 int fd_prog, expected_ret, reject_from_alignment; 8117 struct bpf_insn *prog = test->insns; 8118 int prog_len = probe_filter_length(prog); 8119 int prog_type = test->prog_type; 8120 int map_fds[MAX_NR_MAPS]; 8121 const char *expected_err; 8122 int i; 8123 8124 for (i = 0; i < MAX_NR_MAPS; i++) 8125 map_fds[i] = -1; 8126 8127 do_test_fixup(test, prog, map_fds); 8128 8129 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, 8130 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, 8131 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1); 8132 8133 expected_ret = unpriv && test->result_unpriv != UNDEF ? 8134 test->result_unpriv : test->result; 8135 expected_err = unpriv && test->errstr_unpriv ? 8136 test->errstr_unpriv : test->errstr; 8137 8138 reject_from_alignment = fd_prog < 0 && 8139 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && 8140 strstr(bpf_vlog, "Unknown alignment."); 8141 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 8142 if (reject_from_alignment) { 8143 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", 8144 strerror(errno)); 8145 goto fail_log; 8146 } 8147 #endif 8148 if (expected_ret == ACCEPT) { 8149 if (fd_prog < 0 && !reject_from_alignment) { 8150 printf("FAIL\nFailed to load prog '%s'!\n", 8151 strerror(errno)); 8152 goto fail_log; 8153 } 8154 } else { 8155 if (fd_prog >= 0) { 8156 printf("FAIL\nUnexpected success to load!\n"); 8157 goto fail_log; 8158 } 8159 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) { 8160 printf("FAIL\nUnexpected error message!\n"); 8161 goto fail_log; 8162 } 8163 } 8164 8165 (*passes)++; 8166 printf("OK%s\n", reject_from_alignment ? 8167 " (NOTE: reject due to unknown alignment)" : ""); 8168 close_fds: 8169 close(fd_prog); 8170 for (i = 0; i < MAX_NR_MAPS; i++) 8171 close(map_fds[i]); 8172 sched_yield(); 8173 return; 8174 fail_log: 8175 (*errors)++; 8176 printf("%s", bpf_vlog); 8177 goto close_fds; 8178 } 8179 8180 static bool is_admin(void) 8181 { 8182 cap_t caps; 8183 cap_flag_value_t sysadmin = CAP_CLEAR; 8184 const cap_value_t cap_val = CAP_SYS_ADMIN; 8185 8186 #ifdef CAP_IS_SUPPORTED 8187 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) { 8188 perror("cap_get_flag"); 8189 return false; 8190 } 8191 #endif 8192 caps = cap_get_proc(); 8193 if (!caps) { 8194 perror("cap_get_proc"); 8195 return false; 8196 } 8197 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin)) 8198 perror("cap_get_flag"); 8199 if (cap_free(caps)) 8200 perror("cap_free"); 8201 return (sysadmin == CAP_SET); 8202 } 8203 8204 static int set_admin(bool admin) 8205 { 8206 cap_t caps; 8207 const cap_value_t cap_val = CAP_SYS_ADMIN; 8208 int ret = -1; 8209 8210 caps = cap_get_proc(); 8211 if (!caps) { 8212 perror("cap_get_proc"); 8213 return -1; 8214 } 8215 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val, 8216 admin ? CAP_SET : CAP_CLEAR)) { 8217 perror("cap_set_flag"); 8218 goto out; 8219 } 8220 if (cap_set_proc(caps)) { 8221 perror("cap_set_proc"); 8222 goto out; 8223 } 8224 ret = 0; 8225 out: 8226 if (cap_free(caps)) 8227 perror("cap_free"); 8228 return ret; 8229 } 8230 8231 static int do_test(bool unpriv, unsigned int from, unsigned int to) 8232 { 8233 int i, passes = 0, errors = 0; 8234 8235 for (i = from; i < to; i++) { 8236 struct bpf_test *test = &tests[i]; 8237 8238 /* Program types that are not supported by non-root we 8239 * skip right away. 8240 */ 8241 if (!test->prog_type) { 8242 if (!unpriv) 8243 set_admin(false); 8244 printf("#%d/u %s ", i, test->descr); 8245 do_test_single(test, true, &passes, &errors); 8246 if (!unpriv) 8247 set_admin(true); 8248 } 8249 8250 if (!unpriv) { 8251 printf("#%d/p %s ", i, test->descr); 8252 do_test_single(test, false, &passes, &errors); 8253 } 8254 } 8255 8256 printf("Summary: %d PASSED, %d FAILED\n", passes, errors); 8257 return errors ? EXIT_FAILURE : EXIT_SUCCESS; 8258 } 8259 8260 int main(int argc, char **argv) 8261 { 8262 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 8263 struct rlimit rlim = { 1 << 20, 1 << 20 }; 8264 unsigned int from = 0, to = ARRAY_SIZE(tests); 8265 bool unpriv = !is_admin(); 8266 8267 if (argc == 3) { 8268 unsigned int l = atoi(argv[argc - 2]); 8269 unsigned int u = atoi(argv[argc - 1]); 8270 8271 if (l < to && u < to) { 8272 from = l; 8273 to = u + 1; 8274 } 8275 } else if (argc == 2) { 8276 unsigned int t = atoi(argv[argc - 1]); 8277 8278 if (t < to) { 8279 from = t; 8280 to = t + 1; 8281 } 8282 } 8283 8284 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf); 8285 return do_test(unpriv, from, to); 8286 } 8287