1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/filter.h> 18 #include <net/netlink.h> 19 #include <linux/file.h> 20 #include <linux/vmalloc.h> 21 22 /* bpf_check() is a static code analyzer that walks eBPF program 23 * instruction by instruction and updates register/stack state. 24 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 25 * 26 * The first pass is depth-first-search to check that the program is a DAG. 27 * It rejects the following programs: 28 * - larger than BPF_MAXINSNS insns 29 * - if loop is present (detected via back-edge) 30 * - unreachable insns exist (shouldn't be a forest. program = one function) 31 * - out of bounds or malformed jumps 32 * The second pass is all possible path descent from the 1st insn. 33 * Since it's analyzing all pathes through the program, the length of the 34 * analysis is limited to 32k insn, which may be hit even if total number of 35 * insn is less then 4K, but there are too many branches that change stack/regs. 36 * Number of 'branches to be analyzed' is limited to 1k 37 * 38 * On entry to each instruction, each register has a type, and the instruction 39 * changes the types of the registers depending on instruction semantics. 40 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 41 * copied to R1. 42 * 43 * All registers are 64-bit. 44 * R0 - return register 45 * R1-R5 argument passing registers 46 * R6-R9 callee saved registers 47 * R10 - frame pointer read-only 48 * 49 * At the start of BPF program the register R1 contains a pointer to bpf_context 50 * and has type PTR_TO_CTX. 51 * 52 * Verifier tracks arithmetic operations on pointers in case: 53 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 54 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 55 * 1st insn copies R10 (which has FRAME_PTR) type into R1 56 * and 2nd arithmetic instruction is pattern matched to recognize 57 * that it wants to construct a pointer to some element within stack. 58 * So after 2nd insn, the register R1 has type PTR_TO_STACK 59 * (and -20 constant is saved for further stack bounds checking). 60 * Meaning that this reg is a pointer to stack plus known immediate constant. 61 * 62 * Most of the time the registers have UNKNOWN_VALUE type, which 63 * means the register has some value, but it's not a valid pointer. 64 * (like pointer plus pointer becomes UNKNOWN_VALUE type) 65 * 66 * When verifier sees load or store instructions the type of base register 67 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer 68 * types recognized by check_mem_access() function. 69 * 70 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 71 * and the range of [ptr, ptr + map's value_size) is accessible. 72 * 73 * registers used to pass values to function calls are checked against 74 * function argument constraints. 75 * 76 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 77 * It means that the register type passed to this function must be 78 * PTR_TO_STACK and it will be used inside the function as 79 * 'pointer to map element key' 80 * 81 * For example the argument constraints for bpf_map_lookup_elem(): 82 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 83 * .arg1_type = ARG_CONST_MAP_PTR, 84 * .arg2_type = ARG_PTR_TO_MAP_KEY, 85 * 86 * ret_type says that this function returns 'pointer to map elem value or null' 87 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 88 * 2nd argument should be a pointer to stack, which will be used inside 89 * the helper function as a pointer to map element key. 90 * 91 * On the kernel side the helper function looks like: 92 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 93 * { 94 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 95 * void *key = (void *) (unsigned long) r2; 96 * void *value; 97 * 98 * here kernel can access 'key' and 'map' pointers safely, knowing that 99 * [key, key + map->key_size) bytes are valid and were initialized on 100 * the stack of eBPF program. 101 * } 102 * 103 * Corresponding eBPF program may look like: 104 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 105 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 106 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 107 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 108 * here verifier looks at prototype of map_lookup_elem() and sees: 109 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 110 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 111 * 112 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 113 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 114 * and were initialized prior to this call. 115 * If it's ok, then verifier allows this BPF_CALL insn and looks at 116 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 117 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 118 * returns ether pointer to map value or NULL. 119 * 120 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 121 * insn, the register holding that pointer in the true branch changes state to 122 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 123 * branch. See check_cond_jmp_op(). 124 * 125 * After the call R0 is set to return type of the function and registers R1-R5 126 * are set to NOT_INIT to indicate that they are no longer readable. 127 */ 128 129 struct reg_state { 130 enum bpf_reg_type type; 131 union { 132 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ 133 s64 imm; 134 135 /* valid when type == PTR_TO_PACKET* */ 136 struct { 137 u32 id; 138 u16 off; 139 u16 range; 140 }; 141 142 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 143 * PTR_TO_MAP_VALUE_OR_NULL 144 */ 145 struct bpf_map *map_ptr; 146 }; 147 }; 148 149 enum bpf_stack_slot_type { 150 STACK_INVALID, /* nothing was stored in this stack slot */ 151 STACK_SPILL, /* register spilled into stack */ 152 STACK_MISC /* BPF program wrote some data into this slot */ 153 }; 154 155 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 156 157 /* state of the program: 158 * type of all registers and stack info 159 */ 160 struct verifier_state { 161 struct reg_state regs[MAX_BPF_REG]; 162 u8 stack_slot_type[MAX_BPF_STACK]; 163 struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; 164 }; 165 166 /* linked list of verifier states used to prune search */ 167 struct verifier_state_list { 168 struct verifier_state state; 169 struct verifier_state_list *next; 170 }; 171 172 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 173 struct verifier_stack_elem { 174 /* verifer state is 'st' 175 * before processing instruction 'insn_idx' 176 * and after processing instruction 'prev_insn_idx' 177 */ 178 struct verifier_state st; 179 int insn_idx; 180 int prev_insn_idx; 181 struct verifier_stack_elem *next; 182 }; 183 184 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 185 186 /* single container for all structs 187 * one verifier_env per bpf_check() call 188 */ 189 struct verifier_env { 190 struct bpf_prog *prog; /* eBPF program being verified */ 191 struct verifier_stack_elem *head; /* stack of verifier states to be processed */ 192 int stack_size; /* number of states to be processed */ 193 struct verifier_state cur_state; /* current verifier state */ 194 struct verifier_state_list **explored_states; /* search pruning optimization */ 195 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 196 u32 used_map_cnt; /* number of used maps */ 197 u32 id_gen; /* used to generate unique reg IDs */ 198 bool allow_ptr_leaks; 199 }; 200 201 #define BPF_COMPLEXITY_LIMIT_INSNS 65536 202 #define BPF_COMPLEXITY_LIMIT_STACK 1024 203 204 struct bpf_call_arg_meta { 205 struct bpf_map *map_ptr; 206 bool raw_mode; 207 int regno; 208 int access_size; 209 }; 210 211 /* verbose verifier prints what it's seeing 212 * bpf_check() is called under lock, so no race to access these global vars 213 */ 214 static u32 log_level, log_size, log_len; 215 static char *log_buf; 216 217 static DEFINE_MUTEX(bpf_verifier_lock); 218 219 /* log_level controls verbosity level of eBPF verifier. 220 * verbose() is used to dump the verification trace to the log, so the user 221 * can figure out what's wrong with the program 222 */ 223 static __printf(1, 2) void verbose(const char *fmt, ...) 224 { 225 va_list args; 226 227 if (log_level == 0 || log_len >= log_size - 1) 228 return; 229 230 va_start(args, fmt); 231 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); 232 va_end(args); 233 } 234 235 /* string representation of 'enum bpf_reg_type' */ 236 static const char * const reg_type_str[] = { 237 [NOT_INIT] = "?", 238 [UNKNOWN_VALUE] = "inv", 239 [PTR_TO_CTX] = "ctx", 240 [CONST_PTR_TO_MAP] = "map_ptr", 241 [PTR_TO_MAP_VALUE] = "map_value", 242 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 243 [FRAME_PTR] = "fp", 244 [PTR_TO_STACK] = "fp", 245 [CONST_IMM] = "imm", 246 [PTR_TO_PACKET] = "pkt", 247 [PTR_TO_PACKET_END] = "pkt_end", 248 }; 249 250 static void print_verifier_state(struct verifier_state *state) 251 { 252 struct reg_state *reg; 253 enum bpf_reg_type t; 254 int i; 255 256 for (i = 0; i < MAX_BPF_REG; i++) { 257 reg = &state->regs[i]; 258 t = reg->type; 259 if (t == NOT_INIT) 260 continue; 261 verbose(" R%d=%s", i, reg_type_str[t]); 262 if (t == CONST_IMM || t == PTR_TO_STACK) 263 verbose("%lld", reg->imm); 264 else if (t == PTR_TO_PACKET) 265 verbose("(id=%d,off=%d,r=%d)", 266 reg->id, reg->off, reg->range); 267 else if (t == UNKNOWN_VALUE && reg->imm) 268 verbose("%lld", reg->imm); 269 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || 270 t == PTR_TO_MAP_VALUE_OR_NULL) 271 verbose("(ks=%d,vs=%d)", 272 reg->map_ptr->key_size, 273 reg->map_ptr->value_size); 274 } 275 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 276 if (state->stack_slot_type[i] == STACK_SPILL) 277 verbose(" fp%d=%s", -MAX_BPF_STACK + i, 278 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); 279 } 280 verbose("\n"); 281 } 282 283 static const char *const bpf_class_string[] = { 284 [BPF_LD] = "ld", 285 [BPF_LDX] = "ldx", 286 [BPF_ST] = "st", 287 [BPF_STX] = "stx", 288 [BPF_ALU] = "alu", 289 [BPF_JMP] = "jmp", 290 [BPF_RET] = "BUG", 291 [BPF_ALU64] = "alu64", 292 }; 293 294 static const char *const bpf_alu_string[16] = { 295 [BPF_ADD >> 4] = "+=", 296 [BPF_SUB >> 4] = "-=", 297 [BPF_MUL >> 4] = "*=", 298 [BPF_DIV >> 4] = "/=", 299 [BPF_OR >> 4] = "|=", 300 [BPF_AND >> 4] = "&=", 301 [BPF_LSH >> 4] = "<<=", 302 [BPF_RSH >> 4] = ">>=", 303 [BPF_NEG >> 4] = "neg", 304 [BPF_MOD >> 4] = "%=", 305 [BPF_XOR >> 4] = "^=", 306 [BPF_MOV >> 4] = "=", 307 [BPF_ARSH >> 4] = "s>>=", 308 [BPF_END >> 4] = "endian", 309 }; 310 311 static const char *const bpf_ldst_string[] = { 312 [BPF_W >> 3] = "u32", 313 [BPF_H >> 3] = "u16", 314 [BPF_B >> 3] = "u8", 315 [BPF_DW >> 3] = "u64", 316 }; 317 318 static const char *const bpf_jmp_string[16] = { 319 [BPF_JA >> 4] = "jmp", 320 [BPF_JEQ >> 4] = "==", 321 [BPF_JGT >> 4] = ">", 322 [BPF_JGE >> 4] = ">=", 323 [BPF_JSET >> 4] = "&", 324 [BPF_JNE >> 4] = "!=", 325 [BPF_JSGT >> 4] = "s>", 326 [BPF_JSGE >> 4] = "s>=", 327 [BPF_CALL >> 4] = "call", 328 [BPF_EXIT >> 4] = "exit", 329 }; 330 331 static void print_bpf_insn(struct bpf_insn *insn) 332 { 333 u8 class = BPF_CLASS(insn->code); 334 335 if (class == BPF_ALU || class == BPF_ALU64) { 336 if (BPF_SRC(insn->code) == BPF_X) 337 verbose("(%02x) %sr%d %s %sr%d\n", 338 insn->code, class == BPF_ALU ? "(u32) " : "", 339 insn->dst_reg, 340 bpf_alu_string[BPF_OP(insn->code) >> 4], 341 class == BPF_ALU ? "(u32) " : "", 342 insn->src_reg); 343 else 344 verbose("(%02x) %sr%d %s %s%d\n", 345 insn->code, class == BPF_ALU ? "(u32) " : "", 346 insn->dst_reg, 347 bpf_alu_string[BPF_OP(insn->code) >> 4], 348 class == BPF_ALU ? "(u32) " : "", 349 insn->imm); 350 } else if (class == BPF_STX) { 351 if (BPF_MODE(insn->code) == BPF_MEM) 352 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", 353 insn->code, 354 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 355 insn->dst_reg, 356 insn->off, insn->src_reg); 357 else if (BPF_MODE(insn->code) == BPF_XADD) 358 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", 359 insn->code, 360 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 361 insn->dst_reg, insn->off, 362 insn->src_reg); 363 else 364 verbose("BUG_%02x\n", insn->code); 365 } else if (class == BPF_ST) { 366 if (BPF_MODE(insn->code) != BPF_MEM) { 367 verbose("BUG_st_%02x\n", insn->code); 368 return; 369 } 370 verbose("(%02x) *(%s *)(r%d %+d) = %d\n", 371 insn->code, 372 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 373 insn->dst_reg, 374 insn->off, insn->imm); 375 } else if (class == BPF_LDX) { 376 if (BPF_MODE(insn->code) != BPF_MEM) { 377 verbose("BUG_ldx_%02x\n", insn->code); 378 return; 379 } 380 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", 381 insn->code, insn->dst_reg, 382 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 383 insn->src_reg, insn->off); 384 } else if (class == BPF_LD) { 385 if (BPF_MODE(insn->code) == BPF_ABS) { 386 verbose("(%02x) r0 = *(%s *)skb[%d]\n", 387 insn->code, 388 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 389 insn->imm); 390 } else if (BPF_MODE(insn->code) == BPF_IND) { 391 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", 392 insn->code, 393 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 394 insn->src_reg, insn->imm); 395 } else if (BPF_MODE(insn->code) == BPF_IMM) { 396 verbose("(%02x) r%d = 0x%x\n", 397 insn->code, insn->dst_reg, insn->imm); 398 } else { 399 verbose("BUG_ld_%02x\n", insn->code); 400 return; 401 } 402 } else if (class == BPF_JMP) { 403 u8 opcode = BPF_OP(insn->code); 404 405 if (opcode == BPF_CALL) { 406 verbose("(%02x) call %d\n", insn->code, insn->imm); 407 } else if (insn->code == (BPF_JMP | BPF_JA)) { 408 verbose("(%02x) goto pc%+d\n", 409 insn->code, insn->off); 410 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 411 verbose("(%02x) exit\n", insn->code); 412 } else if (BPF_SRC(insn->code) == BPF_X) { 413 verbose("(%02x) if r%d %s r%d goto pc%+d\n", 414 insn->code, insn->dst_reg, 415 bpf_jmp_string[BPF_OP(insn->code) >> 4], 416 insn->src_reg, insn->off); 417 } else { 418 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", 419 insn->code, insn->dst_reg, 420 bpf_jmp_string[BPF_OP(insn->code) >> 4], 421 insn->imm, insn->off); 422 } 423 } else { 424 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); 425 } 426 } 427 428 static int pop_stack(struct verifier_env *env, int *prev_insn_idx) 429 { 430 struct verifier_stack_elem *elem; 431 int insn_idx; 432 433 if (env->head == NULL) 434 return -1; 435 436 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); 437 insn_idx = env->head->insn_idx; 438 if (prev_insn_idx) 439 *prev_insn_idx = env->head->prev_insn_idx; 440 elem = env->head->next; 441 kfree(env->head); 442 env->head = elem; 443 env->stack_size--; 444 return insn_idx; 445 } 446 447 static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx, 448 int prev_insn_idx) 449 { 450 struct verifier_stack_elem *elem; 451 452 elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL); 453 if (!elem) 454 goto err; 455 456 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); 457 elem->insn_idx = insn_idx; 458 elem->prev_insn_idx = prev_insn_idx; 459 elem->next = env->head; 460 env->head = elem; 461 env->stack_size++; 462 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 463 verbose("BPF program is too complex\n"); 464 goto err; 465 } 466 return &elem->st; 467 err: 468 /* pop all elements and return */ 469 while (pop_stack(env, NULL) >= 0); 470 return NULL; 471 } 472 473 #define CALLER_SAVED_REGS 6 474 static const int caller_saved[CALLER_SAVED_REGS] = { 475 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 476 }; 477 478 static void init_reg_state(struct reg_state *regs) 479 { 480 int i; 481 482 for (i = 0; i < MAX_BPF_REG; i++) { 483 regs[i].type = NOT_INIT; 484 regs[i].imm = 0; 485 } 486 487 /* frame pointer */ 488 regs[BPF_REG_FP].type = FRAME_PTR; 489 490 /* 1st arg to a function */ 491 regs[BPF_REG_1].type = PTR_TO_CTX; 492 } 493 494 static void mark_reg_unknown_value(struct reg_state *regs, u32 regno) 495 { 496 BUG_ON(regno >= MAX_BPF_REG); 497 regs[regno].type = UNKNOWN_VALUE; 498 regs[regno].imm = 0; 499 } 500 501 enum reg_arg_type { 502 SRC_OP, /* register is used as source operand */ 503 DST_OP, /* register is used as destination operand */ 504 DST_OP_NO_MARK /* same as above, check only, don't mark */ 505 }; 506 507 static int check_reg_arg(struct reg_state *regs, u32 regno, 508 enum reg_arg_type t) 509 { 510 if (regno >= MAX_BPF_REG) { 511 verbose("R%d is invalid\n", regno); 512 return -EINVAL; 513 } 514 515 if (t == SRC_OP) { 516 /* check whether register used as source operand can be read */ 517 if (regs[regno].type == NOT_INIT) { 518 verbose("R%d !read_ok\n", regno); 519 return -EACCES; 520 } 521 } else { 522 /* check whether register used as dest operand can be written to */ 523 if (regno == BPF_REG_FP) { 524 verbose("frame pointer is read only\n"); 525 return -EACCES; 526 } 527 if (t == DST_OP) 528 mark_reg_unknown_value(regs, regno); 529 } 530 return 0; 531 } 532 533 static int bpf_size_to_bytes(int bpf_size) 534 { 535 if (bpf_size == BPF_W) 536 return 4; 537 else if (bpf_size == BPF_H) 538 return 2; 539 else if (bpf_size == BPF_B) 540 return 1; 541 else if (bpf_size == BPF_DW) 542 return 8; 543 else 544 return -EINVAL; 545 } 546 547 static bool is_spillable_regtype(enum bpf_reg_type type) 548 { 549 switch (type) { 550 case PTR_TO_MAP_VALUE: 551 case PTR_TO_MAP_VALUE_OR_NULL: 552 case PTR_TO_STACK: 553 case PTR_TO_CTX: 554 case PTR_TO_PACKET: 555 case PTR_TO_PACKET_END: 556 case FRAME_PTR: 557 case CONST_PTR_TO_MAP: 558 return true; 559 default: 560 return false; 561 } 562 } 563 564 /* check_stack_read/write functions track spill/fill of registers, 565 * stack boundary and alignment are checked in check_mem_access() 566 */ 567 static int check_stack_write(struct verifier_state *state, int off, int size, 568 int value_regno) 569 { 570 int i; 571 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 572 * so it's aligned access and [off, off + size) are within stack limits 573 */ 574 575 if (value_regno >= 0 && 576 is_spillable_regtype(state->regs[value_regno].type)) { 577 578 /* register containing pointer is being spilled into stack */ 579 if (size != BPF_REG_SIZE) { 580 verbose("invalid size of register spill\n"); 581 return -EACCES; 582 } 583 584 /* save register state */ 585 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 586 state->regs[value_regno]; 587 588 for (i = 0; i < BPF_REG_SIZE; i++) 589 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; 590 } else { 591 /* regular write of data into stack */ 592 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 593 (struct reg_state) {}; 594 595 for (i = 0; i < size; i++) 596 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; 597 } 598 return 0; 599 } 600 601 static int check_stack_read(struct verifier_state *state, int off, int size, 602 int value_regno) 603 { 604 u8 *slot_type; 605 int i; 606 607 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; 608 609 if (slot_type[0] == STACK_SPILL) { 610 if (size != BPF_REG_SIZE) { 611 verbose("invalid size of register spill\n"); 612 return -EACCES; 613 } 614 for (i = 1; i < BPF_REG_SIZE; i++) { 615 if (slot_type[i] != STACK_SPILL) { 616 verbose("corrupted spill memory\n"); 617 return -EACCES; 618 } 619 } 620 621 if (value_regno >= 0) 622 /* restore register state from stack */ 623 state->regs[value_regno] = 624 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; 625 return 0; 626 } else { 627 for (i = 0; i < size; i++) { 628 if (slot_type[i] != STACK_MISC) { 629 verbose("invalid read from stack off %d+%d size %d\n", 630 off, i, size); 631 return -EACCES; 632 } 633 } 634 if (value_regno >= 0) 635 /* have read misc data from the stack */ 636 mark_reg_unknown_value(state->regs, value_regno); 637 return 0; 638 } 639 } 640 641 /* check read/write into map element returned by bpf_map_lookup_elem() */ 642 static int check_map_access(struct verifier_env *env, u32 regno, int off, 643 int size) 644 { 645 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; 646 647 if (off < 0 || off + size > map->value_size) { 648 verbose("invalid access to map value, value_size=%d off=%d size=%d\n", 649 map->value_size, off, size); 650 return -EACCES; 651 } 652 return 0; 653 } 654 655 #define MAX_PACKET_OFF 0xffff 656 657 static bool may_write_pkt_data(enum bpf_prog_type type) 658 { 659 switch (type) { 660 case BPF_PROG_TYPE_XDP: 661 return true; 662 default: 663 return false; 664 } 665 } 666 667 static int check_packet_access(struct verifier_env *env, u32 regno, int off, 668 int size) 669 { 670 struct reg_state *regs = env->cur_state.regs; 671 struct reg_state *reg = ®s[regno]; 672 673 off += reg->off; 674 if (off < 0 || off + size > reg->range) { 675 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 676 off, size, regno, reg->id, reg->off, reg->range); 677 return -EACCES; 678 } 679 return 0; 680 } 681 682 /* check access to 'struct bpf_context' fields */ 683 static int check_ctx_access(struct verifier_env *env, int off, int size, 684 enum bpf_access_type t, enum bpf_reg_type *reg_type) 685 { 686 if (env->prog->aux->ops->is_valid_access && 687 env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) { 688 /* remember the offset of last byte accessed in ctx */ 689 if (env->prog->aux->max_ctx_offset < off + size) 690 env->prog->aux->max_ctx_offset = off + size; 691 return 0; 692 } 693 694 verbose("invalid bpf_context access off=%d size=%d\n", off, size); 695 return -EACCES; 696 } 697 698 static bool is_pointer_value(struct verifier_env *env, int regno) 699 { 700 if (env->allow_ptr_leaks) 701 return false; 702 703 switch (env->cur_state.regs[regno].type) { 704 case UNKNOWN_VALUE: 705 case CONST_IMM: 706 return false; 707 default: 708 return true; 709 } 710 } 711 712 static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg, 713 int off, int size) 714 { 715 if (reg->type != PTR_TO_PACKET) { 716 if (off % size != 0) { 717 verbose("misaligned access off %d size %d\n", off, size); 718 return -EACCES; 719 } else { 720 return 0; 721 } 722 } 723 724 switch (env->prog->type) { 725 case BPF_PROG_TYPE_SCHED_CLS: 726 case BPF_PROG_TYPE_SCHED_ACT: 727 case BPF_PROG_TYPE_XDP: 728 break; 729 default: 730 verbose("verifier is misconfigured\n"); 731 return -EACCES; 732 } 733 734 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 735 /* misaligned access to packet is ok on x86,arm,arm64 */ 736 return 0; 737 738 if (reg->id && size != 1) { 739 verbose("Unknown packet alignment. Only byte-sized access allowed\n"); 740 return -EACCES; 741 } 742 743 /* skb->data is NET_IP_ALIGN-ed */ 744 if ((NET_IP_ALIGN + reg->off + off) % size != 0) { 745 verbose("misaligned packet access off %d+%d+%d size %d\n", 746 NET_IP_ALIGN, reg->off, off, size); 747 return -EACCES; 748 } 749 return 0; 750 } 751 752 /* check whether memory at (regno + off) is accessible for t = (read | write) 753 * if t==write, value_regno is a register which value is stored into memory 754 * if t==read, value_regno is a register which will receive the value from memory 755 * if t==write && value_regno==-1, some unknown value is stored into memory 756 * if t==read && value_regno==-1, don't care what we read from memory 757 */ 758 static int check_mem_access(struct verifier_env *env, u32 regno, int off, 759 int bpf_size, enum bpf_access_type t, 760 int value_regno) 761 { 762 struct verifier_state *state = &env->cur_state; 763 struct reg_state *reg = &state->regs[regno]; 764 int size, err = 0; 765 766 if (reg->type == PTR_TO_STACK) 767 off += reg->imm; 768 769 size = bpf_size_to_bytes(bpf_size); 770 if (size < 0) 771 return size; 772 773 err = check_ptr_alignment(env, reg, off, size); 774 if (err) 775 return err; 776 777 if (reg->type == PTR_TO_MAP_VALUE) { 778 if (t == BPF_WRITE && value_regno >= 0 && 779 is_pointer_value(env, value_regno)) { 780 verbose("R%d leaks addr into map\n", value_regno); 781 return -EACCES; 782 } 783 err = check_map_access(env, regno, off, size); 784 if (!err && t == BPF_READ && value_regno >= 0) 785 mark_reg_unknown_value(state->regs, value_regno); 786 787 } else if (reg->type == PTR_TO_CTX) { 788 enum bpf_reg_type reg_type = UNKNOWN_VALUE; 789 790 if (t == BPF_WRITE && value_regno >= 0 && 791 is_pointer_value(env, value_regno)) { 792 verbose("R%d leaks addr into ctx\n", value_regno); 793 return -EACCES; 794 } 795 err = check_ctx_access(env, off, size, t, ®_type); 796 if (!err && t == BPF_READ && value_regno >= 0) { 797 mark_reg_unknown_value(state->regs, value_regno); 798 if (env->allow_ptr_leaks) 799 /* note that reg.[id|off|range] == 0 */ 800 state->regs[value_regno].type = reg_type; 801 } 802 803 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 804 if (off >= 0 || off < -MAX_BPF_STACK) { 805 verbose("invalid stack off=%d size=%d\n", off, size); 806 return -EACCES; 807 } 808 if (t == BPF_WRITE) { 809 if (!env->allow_ptr_leaks && 810 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && 811 size != BPF_REG_SIZE) { 812 verbose("attempt to corrupt spilled pointer on stack\n"); 813 return -EACCES; 814 } 815 err = check_stack_write(state, off, size, value_regno); 816 } else { 817 err = check_stack_read(state, off, size, value_regno); 818 } 819 } else if (state->regs[regno].type == PTR_TO_PACKET) { 820 if (t == BPF_WRITE && !may_write_pkt_data(env->prog->type)) { 821 verbose("cannot write into packet\n"); 822 return -EACCES; 823 } 824 if (t == BPF_WRITE && value_regno >= 0 && 825 is_pointer_value(env, value_regno)) { 826 verbose("R%d leaks addr into packet\n", value_regno); 827 return -EACCES; 828 } 829 err = check_packet_access(env, regno, off, size); 830 if (!err && t == BPF_READ && value_regno >= 0) 831 mark_reg_unknown_value(state->regs, value_regno); 832 } else { 833 verbose("R%d invalid mem access '%s'\n", 834 regno, reg_type_str[reg->type]); 835 return -EACCES; 836 } 837 838 if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks && 839 state->regs[value_regno].type == UNKNOWN_VALUE) { 840 /* 1 or 2 byte load zero-extends, determine the number of 841 * zero upper bits. Not doing it fo 4 byte load, since 842 * such values cannot be added to ptr_to_packet anyway. 843 */ 844 state->regs[value_regno].imm = 64 - size * 8; 845 } 846 return err; 847 } 848 849 static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) 850 { 851 struct reg_state *regs = env->cur_state.regs; 852 int err; 853 854 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 855 insn->imm != 0) { 856 verbose("BPF_XADD uses reserved fields\n"); 857 return -EINVAL; 858 } 859 860 /* check src1 operand */ 861 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 862 if (err) 863 return err; 864 865 /* check src2 operand */ 866 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 867 if (err) 868 return err; 869 870 /* check whether atomic_add can read the memory */ 871 err = check_mem_access(env, insn->dst_reg, insn->off, 872 BPF_SIZE(insn->code), BPF_READ, -1); 873 if (err) 874 return err; 875 876 /* check whether atomic_add can write into the same memory */ 877 return check_mem_access(env, insn->dst_reg, insn->off, 878 BPF_SIZE(insn->code), BPF_WRITE, -1); 879 } 880 881 /* when register 'regno' is passed into function that will read 'access_size' 882 * bytes from that pointer, make sure that it's within stack boundary 883 * and all elements of stack are initialized 884 */ 885 static int check_stack_boundary(struct verifier_env *env, int regno, 886 int access_size, bool zero_size_allowed, 887 struct bpf_call_arg_meta *meta) 888 { 889 struct verifier_state *state = &env->cur_state; 890 struct reg_state *regs = state->regs; 891 int off, i; 892 893 if (regs[regno].type != PTR_TO_STACK) { 894 if (zero_size_allowed && access_size == 0 && 895 regs[regno].type == CONST_IMM && 896 regs[regno].imm == 0) 897 return 0; 898 899 verbose("R%d type=%s expected=%s\n", regno, 900 reg_type_str[regs[regno].type], 901 reg_type_str[PTR_TO_STACK]); 902 return -EACCES; 903 } 904 905 off = regs[regno].imm; 906 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 907 access_size <= 0) { 908 verbose("invalid stack type R%d off=%d access_size=%d\n", 909 regno, off, access_size); 910 return -EACCES; 911 } 912 913 if (meta && meta->raw_mode) { 914 meta->access_size = access_size; 915 meta->regno = regno; 916 return 0; 917 } 918 919 for (i = 0; i < access_size; i++) { 920 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { 921 verbose("invalid indirect read from stack off %d+%d size %d\n", 922 off, i, access_size); 923 return -EACCES; 924 } 925 } 926 return 0; 927 } 928 929 static int check_func_arg(struct verifier_env *env, u32 regno, 930 enum bpf_arg_type arg_type, 931 struct bpf_call_arg_meta *meta) 932 { 933 struct reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; 934 enum bpf_reg_type expected_type, type = reg->type; 935 int err = 0; 936 937 if (arg_type == ARG_DONTCARE) 938 return 0; 939 940 if (type == NOT_INIT) { 941 verbose("R%d !read_ok\n", regno); 942 return -EACCES; 943 } 944 945 if (arg_type == ARG_ANYTHING) { 946 if (is_pointer_value(env, regno)) { 947 verbose("R%d leaks addr into helper function\n", regno); 948 return -EACCES; 949 } 950 return 0; 951 } 952 953 if (type == PTR_TO_PACKET && !may_write_pkt_data(env->prog->type)) { 954 verbose("helper access to the packet is not allowed for clsact\n"); 955 return -EACCES; 956 } 957 958 if (arg_type == ARG_PTR_TO_MAP_KEY || 959 arg_type == ARG_PTR_TO_MAP_VALUE) { 960 expected_type = PTR_TO_STACK; 961 if (type != PTR_TO_PACKET && type != expected_type) 962 goto err_type; 963 } else if (arg_type == ARG_CONST_STACK_SIZE || 964 arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { 965 expected_type = CONST_IMM; 966 if (type != expected_type) 967 goto err_type; 968 } else if (arg_type == ARG_CONST_MAP_PTR) { 969 expected_type = CONST_PTR_TO_MAP; 970 if (type != expected_type) 971 goto err_type; 972 } else if (arg_type == ARG_PTR_TO_CTX) { 973 expected_type = PTR_TO_CTX; 974 if (type != expected_type) 975 goto err_type; 976 } else if (arg_type == ARG_PTR_TO_STACK || 977 arg_type == ARG_PTR_TO_RAW_STACK) { 978 expected_type = PTR_TO_STACK; 979 /* One exception here. In case function allows for NULL to be 980 * passed in as argument, it's a CONST_IMM type. Final test 981 * happens during stack boundary checking. 982 */ 983 if (type == CONST_IMM && reg->imm == 0) 984 /* final test in check_stack_boundary() */; 985 else if (type != PTR_TO_PACKET && type != expected_type) 986 goto err_type; 987 meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK; 988 } else { 989 verbose("unsupported arg_type %d\n", arg_type); 990 return -EFAULT; 991 } 992 993 if (arg_type == ARG_CONST_MAP_PTR) { 994 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 995 meta->map_ptr = reg->map_ptr; 996 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 997 /* bpf_map_xxx(..., map_ptr, ..., key) call: 998 * check that [key, key + map->key_size) are within 999 * stack limits and initialized 1000 */ 1001 if (!meta->map_ptr) { 1002 /* in function declaration map_ptr must come before 1003 * map_key, so that it's verified and known before 1004 * we have to check map_key here. Otherwise it means 1005 * that kernel subsystem misconfigured verifier 1006 */ 1007 verbose("invalid map_ptr to access map->key\n"); 1008 return -EACCES; 1009 } 1010 if (type == PTR_TO_PACKET) 1011 err = check_packet_access(env, regno, 0, 1012 meta->map_ptr->key_size); 1013 else 1014 err = check_stack_boundary(env, regno, 1015 meta->map_ptr->key_size, 1016 false, NULL); 1017 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 1018 /* bpf_map_xxx(..., map_ptr, ..., value) call: 1019 * check [value, value + map->value_size) validity 1020 */ 1021 if (!meta->map_ptr) { 1022 /* kernel subsystem misconfigured verifier */ 1023 verbose("invalid map_ptr to access map->value\n"); 1024 return -EACCES; 1025 } 1026 if (type == PTR_TO_PACKET) 1027 err = check_packet_access(env, regno, 0, 1028 meta->map_ptr->value_size); 1029 else 1030 err = check_stack_boundary(env, regno, 1031 meta->map_ptr->value_size, 1032 false, NULL); 1033 } else if (arg_type == ARG_CONST_STACK_SIZE || 1034 arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { 1035 bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO); 1036 1037 /* bpf_xxx(..., buf, len) call will access 'len' bytes 1038 * from stack pointer 'buf'. Check it 1039 * note: regno == len, regno - 1 == buf 1040 */ 1041 if (regno == 0) { 1042 /* kernel subsystem misconfigured verifier */ 1043 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); 1044 return -EACCES; 1045 } 1046 if (regs[regno - 1].type == PTR_TO_PACKET) 1047 err = check_packet_access(env, regno - 1, 0, reg->imm); 1048 else 1049 err = check_stack_boundary(env, regno - 1, reg->imm, 1050 zero_size_allowed, meta); 1051 } 1052 1053 return err; 1054 err_type: 1055 verbose("R%d type=%s expected=%s\n", regno, 1056 reg_type_str[type], reg_type_str[expected_type]); 1057 return -EACCES; 1058 } 1059 1060 static int check_map_func_compatibility(struct bpf_map *map, int func_id) 1061 { 1062 if (!map) 1063 return 0; 1064 1065 /* We need a two way check, first is from map perspective ... */ 1066 switch (map->map_type) { 1067 case BPF_MAP_TYPE_PROG_ARRAY: 1068 if (func_id != BPF_FUNC_tail_call) 1069 goto error; 1070 break; 1071 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1072 if (func_id != BPF_FUNC_perf_event_read && 1073 func_id != BPF_FUNC_perf_event_output) 1074 goto error; 1075 break; 1076 case BPF_MAP_TYPE_STACK_TRACE: 1077 if (func_id != BPF_FUNC_get_stackid) 1078 goto error; 1079 break; 1080 case BPF_MAP_TYPE_CGROUP_ARRAY: 1081 if (func_id != BPF_FUNC_skb_under_cgroup && 1082 func_id != BPF_FUNC_current_task_under_cgroup) 1083 goto error; 1084 break; 1085 default: 1086 break; 1087 } 1088 1089 /* ... and second from the function itself. */ 1090 switch (func_id) { 1091 case BPF_FUNC_tail_call: 1092 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1093 goto error; 1094 break; 1095 case BPF_FUNC_perf_event_read: 1096 case BPF_FUNC_perf_event_output: 1097 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 1098 goto error; 1099 break; 1100 case BPF_FUNC_get_stackid: 1101 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1102 goto error; 1103 break; 1104 case BPF_FUNC_current_task_under_cgroup: 1105 case BPF_FUNC_skb_under_cgroup: 1106 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1107 goto error; 1108 break; 1109 default: 1110 break; 1111 } 1112 1113 return 0; 1114 error: 1115 verbose("cannot pass map_type %d into func %d\n", 1116 map->map_type, func_id); 1117 return -EINVAL; 1118 } 1119 1120 static int check_raw_mode(const struct bpf_func_proto *fn) 1121 { 1122 int count = 0; 1123 1124 if (fn->arg1_type == ARG_PTR_TO_RAW_STACK) 1125 count++; 1126 if (fn->arg2_type == ARG_PTR_TO_RAW_STACK) 1127 count++; 1128 if (fn->arg3_type == ARG_PTR_TO_RAW_STACK) 1129 count++; 1130 if (fn->arg4_type == ARG_PTR_TO_RAW_STACK) 1131 count++; 1132 if (fn->arg5_type == ARG_PTR_TO_RAW_STACK) 1133 count++; 1134 1135 return count > 1 ? -EINVAL : 0; 1136 } 1137 1138 static void clear_all_pkt_pointers(struct verifier_env *env) 1139 { 1140 struct verifier_state *state = &env->cur_state; 1141 struct reg_state *regs = state->regs, *reg; 1142 int i; 1143 1144 for (i = 0; i < MAX_BPF_REG; i++) 1145 if (regs[i].type == PTR_TO_PACKET || 1146 regs[i].type == PTR_TO_PACKET_END) 1147 mark_reg_unknown_value(regs, i); 1148 1149 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1150 if (state->stack_slot_type[i] != STACK_SPILL) 1151 continue; 1152 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1153 if (reg->type != PTR_TO_PACKET && 1154 reg->type != PTR_TO_PACKET_END) 1155 continue; 1156 reg->type = UNKNOWN_VALUE; 1157 reg->imm = 0; 1158 } 1159 } 1160 1161 static int check_call(struct verifier_env *env, int func_id) 1162 { 1163 struct verifier_state *state = &env->cur_state; 1164 const struct bpf_func_proto *fn = NULL; 1165 struct reg_state *regs = state->regs; 1166 struct reg_state *reg; 1167 struct bpf_call_arg_meta meta; 1168 bool changes_data; 1169 int i, err; 1170 1171 /* find function prototype */ 1172 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 1173 verbose("invalid func %d\n", func_id); 1174 return -EINVAL; 1175 } 1176 1177 if (env->prog->aux->ops->get_func_proto) 1178 fn = env->prog->aux->ops->get_func_proto(func_id); 1179 1180 if (!fn) { 1181 verbose("unknown func %d\n", func_id); 1182 return -EINVAL; 1183 } 1184 1185 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1186 if (!env->prog->gpl_compatible && fn->gpl_only) { 1187 verbose("cannot call GPL only function from proprietary program\n"); 1188 return -EINVAL; 1189 } 1190 1191 changes_data = bpf_helper_changes_skb_data(fn->func); 1192 1193 memset(&meta, 0, sizeof(meta)); 1194 1195 /* We only support one arg being in raw mode at the moment, which 1196 * is sufficient for the helper functions we have right now. 1197 */ 1198 err = check_raw_mode(fn); 1199 if (err) { 1200 verbose("kernel subsystem misconfigured func %d\n", func_id); 1201 return err; 1202 } 1203 1204 /* check args */ 1205 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 1206 if (err) 1207 return err; 1208 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1209 if (err) 1210 return err; 1211 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1212 if (err) 1213 return err; 1214 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 1215 if (err) 1216 return err; 1217 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 1218 if (err) 1219 return err; 1220 1221 /* Mark slots with STACK_MISC in case of raw mode, stack offset 1222 * is inferred from register state. 1223 */ 1224 for (i = 0; i < meta.access_size; i++) { 1225 err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); 1226 if (err) 1227 return err; 1228 } 1229 1230 /* reset caller saved regs */ 1231 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1232 reg = regs + caller_saved[i]; 1233 reg->type = NOT_INIT; 1234 reg->imm = 0; 1235 } 1236 1237 /* update return register */ 1238 if (fn->ret_type == RET_INTEGER) { 1239 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1240 } else if (fn->ret_type == RET_VOID) { 1241 regs[BPF_REG_0].type = NOT_INIT; 1242 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 1243 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 1244 /* remember map_ptr, so that check_map_access() 1245 * can check 'value_size' boundary of memory access 1246 * to map element returned from bpf_map_lookup_elem() 1247 */ 1248 if (meta.map_ptr == NULL) { 1249 verbose("kernel subsystem misconfigured verifier\n"); 1250 return -EINVAL; 1251 } 1252 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1253 } else { 1254 verbose("unknown return type %d of func %d\n", 1255 fn->ret_type, func_id); 1256 return -EINVAL; 1257 } 1258 1259 err = check_map_func_compatibility(meta.map_ptr, func_id); 1260 if (err) 1261 return err; 1262 1263 if (changes_data) 1264 clear_all_pkt_pointers(env); 1265 return 0; 1266 } 1267 1268 static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn) 1269 { 1270 struct reg_state *regs = env->cur_state.regs; 1271 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1272 struct reg_state *src_reg = ®s[insn->src_reg]; 1273 struct reg_state tmp_reg; 1274 s32 imm; 1275 1276 if (BPF_SRC(insn->code) == BPF_K) { 1277 /* pkt_ptr += imm */ 1278 imm = insn->imm; 1279 1280 add_imm: 1281 if (imm <= 0) { 1282 verbose("addition of negative constant to packet pointer is not allowed\n"); 1283 return -EACCES; 1284 } 1285 if (imm >= MAX_PACKET_OFF || 1286 imm + dst_reg->off >= MAX_PACKET_OFF) { 1287 verbose("constant %d is too large to add to packet pointer\n", 1288 imm); 1289 return -EACCES; 1290 } 1291 /* a constant was added to pkt_ptr. 1292 * Remember it while keeping the same 'id' 1293 */ 1294 dst_reg->off += imm; 1295 } else { 1296 if (src_reg->type == PTR_TO_PACKET) { 1297 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1298 tmp_reg = *dst_reg; /* save r7 state */ 1299 *dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */ 1300 src_reg = &tmp_reg; /* pretend it's src_reg state */ 1301 /* if the checks below reject it, the copy won't matter, 1302 * since we're rejecting the whole program. If all ok, 1303 * then imm22 state will be added to r7 1304 * and r7 will be pkt(id=0,off=22,r=62) while 1305 * r6 will stay as pkt(id=0,off=0,r=62) 1306 */ 1307 } 1308 1309 if (src_reg->type == CONST_IMM) { 1310 /* pkt_ptr += reg where reg is known constant */ 1311 imm = src_reg->imm; 1312 goto add_imm; 1313 } 1314 /* disallow pkt_ptr += reg 1315 * if reg is not uknown_value with guaranteed zero upper bits 1316 * otherwise pkt_ptr may overflow and addition will become 1317 * subtraction which is not allowed 1318 */ 1319 if (src_reg->type != UNKNOWN_VALUE) { 1320 verbose("cannot add '%s' to ptr_to_packet\n", 1321 reg_type_str[src_reg->type]); 1322 return -EACCES; 1323 } 1324 if (src_reg->imm < 48) { 1325 verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n", 1326 src_reg->imm); 1327 return -EACCES; 1328 } 1329 /* dst_reg stays as pkt_ptr type and since some positive 1330 * integer value was added to the pointer, increment its 'id' 1331 */ 1332 dst_reg->id = ++env->id_gen; 1333 1334 /* something was added to pkt_ptr, set range and off to zero */ 1335 dst_reg->off = 0; 1336 dst_reg->range = 0; 1337 } 1338 return 0; 1339 } 1340 1341 static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn) 1342 { 1343 struct reg_state *regs = env->cur_state.regs; 1344 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1345 u8 opcode = BPF_OP(insn->code); 1346 s64 imm_log2; 1347 1348 /* for type == UNKNOWN_VALUE: 1349 * imm > 0 -> number of zero upper bits 1350 * imm == 0 -> don't track which is the same as all bits can be non-zero 1351 */ 1352 1353 if (BPF_SRC(insn->code) == BPF_X) { 1354 struct reg_state *src_reg = ®s[insn->src_reg]; 1355 1356 if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && 1357 dst_reg->imm && opcode == BPF_ADD) { 1358 /* dreg += sreg 1359 * where both have zero upper bits. Adding them 1360 * can only result making one more bit non-zero 1361 * in the larger value. 1362 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) 1363 * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) 1364 */ 1365 dst_reg->imm = min(dst_reg->imm, src_reg->imm); 1366 dst_reg->imm--; 1367 return 0; 1368 } 1369 if (src_reg->type == CONST_IMM && src_reg->imm > 0 && 1370 dst_reg->imm && opcode == BPF_ADD) { 1371 /* dreg += sreg 1372 * where dreg has zero upper bits and sreg is const. 1373 * Adding them can only result making one more bit 1374 * non-zero in the larger value. 1375 */ 1376 imm_log2 = __ilog2_u64((long long)src_reg->imm); 1377 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1378 dst_reg->imm--; 1379 return 0; 1380 } 1381 /* all other cases non supported yet, just mark dst_reg */ 1382 dst_reg->imm = 0; 1383 return 0; 1384 } 1385 1386 /* sign extend 32-bit imm into 64-bit to make sure that 1387 * negative values occupy bit 63. Note ilog2() would have 1388 * been incorrect, since sizeof(insn->imm) == 4 1389 */ 1390 imm_log2 = __ilog2_u64((long long)insn->imm); 1391 1392 if (dst_reg->imm && opcode == BPF_LSH) { 1393 /* reg <<= imm 1394 * if reg was a result of 2 byte load, then its imm == 48 1395 * which means that upper 48 bits are zero and shifting this reg 1396 * left by 4 would mean that upper 44 bits are still zero 1397 */ 1398 dst_reg->imm -= insn->imm; 1399 } else if (dst_reg->imm && opcode == BPF_MUL) { 1400 /* reg *= imm 1401 * if multiplying by 14 subtract 4 1402 * This is conservative calculation of upper zero bits. 1403 * It's not trying to special case insn->imm == 1 or 0 cases 1404 */ 1405 dst_reg->imm -= imm_log2 + 1; 1406 } else if (opcode == BPF_AND) { 1407 /* reg &= imm */ 1408 dst_reg->imm = 63 - imm_log2; 1409 } else if (dst_reg->imm && opcode == BPF_ADD) { 1410 /* reg += imm */ 1411 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1412 dst_reg->imm--; 1413 } else if (opcode == BPF_RSH) { 1414 /* reg >>= imm 1415 * which means that after right shift, upper bits will be zero 1416 * note that verifier already checked that 1417 * 0 <= imm < 64 for shift insn 1418 */ 1419 dst_reg->imm += insn->imm; 1420 if (unlikely(dst_reg->imm > 64)) 1421 /* some dumb code did: 1422 * r2 = *(u32 *)mem; 1423 * r2 >>= 32; 1424 * and all bits are zero now */ 1425 dst_reg->imm = 64; 1426 } else { 1427 /* all other alu ops, means that we don't know what will 1428 * happen to the value, mark it with unknown number of zero bits 1429 */ 1430 dst_reg->imm = 0; 1431 } 1432 1433 if (dst_reg->imm < 0) { 1434 /* all 64 bits of the register can contain non-zero bits 1435 * and such value cannot be added to ptr_to_packet, since it 1436 * may overflow, mark it as unknown to avoid further eval 1437 */ 1438 dst_reg->imm = 0; 1439 } 1440 return 0; 1441 } 1442 1443 static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn) 1444 { 1445 struct reg_state *regs = env->cur_state.regs; 1446 struct reg_state *dst_reg = ®s[insn->dst_reg]; 1447 struct reg_state *src_reg = ®s[insn->src_reg]; 1448 u8 opcode = BPF_OP(insn->code); 1449 1450 /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn. 1451 * Don't care about overflow or negative values, just add them 1452 */ 1453 if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) 1454 dst_reg->imm += insn->imm; 1455 else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && 1456 src_reg->type == CONST_IMM) 1457 dst_reg->imm += src_reg->imm; 1458 else 1459 mark_reg_unknown_value(regs, insn->dst_reg); 1460 return 0; 1461 } 1462 1463 /* check validity of 32-bit and 64-bit arithmetic operations */ 1464 static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) 1465 { 1466 struct reg_state *regs = env->cur_state.regs, *dst_reg; 1467 u8 opcode = BPF_OP(insn->code); 1468 int err; 1469 1470 if (opcode == BPF_END || opcode == BPF_NEG) { 1471 if (opcode == BPF_NEG) { 1472 if (BPF_SRC(insn->code) != 0 || 1473 insn->src_reg != BPF_REG_0 || 1474 insn->off != 0 || insn->imm != 0) { 1475 verbose("BPF_NEG uses reserved fields\n"); 1476 return -EINVAL; 1477 } 1478 } else { 1479 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 1480 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { 1481 verbose("BPF_END uses reserved fields\n"); 1482 return -EINVAL; 1483 } 1484 } 1485 1486 /* check src operand */ 1487 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1488 if (err) 1489 return err; 1490 1491 if (is_pointer_value(env, insn->dst_reg)) { 1492 verbose("R%d pointer arithmetic prohibited\n", 1493 insn->dst_reg); 1494 return -EACCES; 1495 } 1496 1497 /* check dest operand */ 1498 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1499 if (err) 1500 return err; 1501 1502 } else if (opcode == BPF_MOV) { 1503 1504 if (BPF_SRC(insn->code) == BPF_X) { 1505 if (insn->imm != 0 || insn->off != 0) { 1506 verbose("BPF_MOV uses reserved fields\n"); 1507 return -EINVAL; 1508 } 1509 1510 /* check src operand */ 1511 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1512 if (err) 1513 return err; 1514 } else { 1515 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1516 verbose("BPF_MOV uses reserved fields\n"); 1517 return -EINVAL; 1518 } 1519 } 1520 1521 /* check dest operand */ 1522 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1523 if (err) 1524 return err; 1525 1526 if (BPF_SRC(insn->code) == BPF_X) { 1527 if (BPF_CLASS(insn->code) == BPF_ALU64) { 1528 /* case: R1 = R2 1529 * copy register state to dest reg 1530 */ 1531 regs[insn->dst_reg] = regs[insn->src_reg]; 1532 } else { 1533 if (is_pointer_value(env, insn->src_reg)) { 1534 verbose("R%d partial copy of pointer\n", 1535 insn->src_reg); 1536 return -EACCES; 1537 } 1538 regs[insn->dst_reg].type = UNKNOWN_VALUE; 1539 regs[insn->dst_reg].map_ptr = NULL; 1540 } 1541 } else { 1542 /* case: R = imm 1543 * remember the value we stored into this reg 1544 */ 1545 regs[insn->dst_reg].type = CONST_IMM; 1546 regs[insn->dst_reg].imm = insn->imm; 1547 } 1548 1549 } else if (opcode > BPF_END) { 1550 verbose("invalid BPF_ALU opcode %x\n", opcode); 1551 return -EINVAL; 1552 1553 } else { /* all other ALU ops: and, sub, xor, add, ... */ 1554 1555 if (BPF_SRC(insn->code) == BPF_X) { 1556 if (insn->imm != 0 || insn->off != 0) { 1557 verbose("BPF_ALU uses reserved fields\n"); 1558 return -EINVAL; 1559 } 1560 /* check src1 operand */ 1561 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1562 if (err) 1563 return err; 1564 } else { 1565 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1566 verbose("BPF_ALU uses reserved fields\n"); 1567 return -EINVAL; 1568 } 1569 } 1570 1571 /* check src2 operand */ 1572 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1573 if (err) 1574 return err; 1575 1576 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 1577 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 1578 verbose("div by zero\n"); 1579 return -EINVAL; 1580 } 1581 1582 if ((opcode == BPF_LSH || opcode == BPF_RSH || 1583 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 1584 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 1585 1586 if (insn->imm < 0 || insn->imm >= size) { 1587 verbose("invalid shift %d\n", insn->imm); 1588 return -EINVAL; 1589 } 1590 } 1591 1592 /* check dest operand */ 1593 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 1594 if (err) 1595 return err; 1596 1597 dst_reg = ®s[insn->dst_reg]; 1598 1599 /* pattern match 'bpf_add Rx, imm' instruction */ 1600 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && 1601 dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { 1602 dst_reg->type = PTR_TO_STACK; 1603 dst_reg->imm = insn->imm; 1604 return 0; 1605 } else if (opcode == BPF_ADD && 1606 BPF_CLASS(insn->code) == BPF_ALU64 && 1607 (dst_reg->type == PTR_TO_PACKET || 1608 (BPF_SRC(insn->code) == BPF_X && 1609 regs[insn->src_reg].type == PTR_TO_PACKET))) { 1610 /* ptr_to_packet += K|X */ 1611 return check_packet_ptr_add(env, insn); 1612 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 1613 dst_reg->type == UNKNOWN_VALUE && 1614 env->allow_ptr_leaks) { 1615 /* unknown += K|X */ 1616 return evaluate_reg_alu(env, insn); 1617 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 1618 dst_reg->type == CONST_IMM && 1619 env->allow_ptr_leaks) { 1620 /* reg_imm += K|X */ 1621 return evaluate_reg_imm_alu(env, insn); 1622 } else if (is_pointer_value(env, insn->dst_reg)) { 1623 verbose("R%d pointer arithmetic prohibited\n", 1624 insn->dst_reg); 1625 return -EACCES; 1626 } else if (BPF_SRC(insn->code) == BPF_X && 1627 is_pointer_value(env, insn->src_reg)) { 1628 verbose("R%d pointer arithmetic prohibited\n", 1629 insn->src_reg); 1630 return -EACCES; 1631 } 1632 1633 /* mark dest operand */ 1634 mark_reg_unknown_value(regs, insn->dst_reg); 1635 } 1636 1637 return 0; 1638 } 1639 1640 static void find_good_pkt_pointers(struct verifier_state *state, 1641 const struct reg_state *dst_reg) 1642 { 1643 struct reg_state *regs = state->regs, *reg; 1644 int i; 1645 1646 /* LLVM can generate two kind of checks: 1647 * 1648 * Type 1: 1649 * 1650 * r2 = r3; 1651 * r2 += 8; 1652 * if (r2 > pkt_end) goto <handle exception> 1653 * <access okay> 1654 * 1655 * Where: 1656 * r2 == dst_reg, pkt_end == src_reg 1657 * r2=pkt(id=n,off=8,r=0) 1658 * r3=pkt(id=n,off=0,r=0) 1659 * 1660 * Type 2: 1661 * 1662 * r2 = r3; 1663 * r2 += 8; 1664 * if (pkt_end >= r2) goto <access okay> 1665 * <handle exception> 1666 * 1667 * Where: 1668 * pkt_end == dst_reg, r2 == src_reg 1669 * r2=pkt(id=n,off=8,r=0) 1670 * r3=pkt(id=n,off=0,r=0) 1671 * 1672 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 1673 * so that range of bytes [r3, r3 + 8) is safe to access. 1674 */ 1675 1676 for (i = 0; i < MAX_BPF_REG; i++) 1677 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 1678 regs[i].range = dst_reg->off; 1679 1680 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1681 if (state->stack_slot_type[i] != STACK_SPILL) 1682 continue; 1683 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1684 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 1685 reg->range = dst_reg->off; 1686 } 1687 } 1688 1689 static int check_cond_jmp_op(struct verifier_env *env, 1690 struct bpf_insn *insn, int *insn_idx) 1691 { 1692 struct verifier_state *other_branch, *this_branch = &env->cur_state; 1693 struct reg_state *regs = this_branch->regs, *dst_reg; 1694 u8 opcode = BPF_OP(insn->code); 1695 int err; 1696 1697 if (opcode > BPF_EXIT) { 1698 verbose("invalid BPF_JMP opcode %x\n", opcode); 1699 return -EINVAL; 1700 } 1701 1702 if (BPF_SRC(insn->code) == BPF_X) { 1703 if (insn->imm != 0) { 1704 verbose("BPF_JMP uses reserved fields\n"); 1705 return -EINVAL; 1706 } 1707 1708 /* check src1 operand */ 1709 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1710 if (err) 1711 return err; 1712 1713 if (is_pointer_value(env, insn->src_reg)) { 1714 verbose("R%d pointer comparison prohibited\n", 1715 insn->src_reg); 1716 return -EACCES; 1717 } 1718 } else { 1719 if (insn->src_reg != BPF_REG_0) { 1720 verbose("BPF_JMP uses reserved fields\n"); 1721 return -EINVAL; 1722 } 1723 } 1724 1725 /* check src2 operand */ 1726 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1727 if (err) 1728 return err; 1729 1730 dst_reg = ®s[insn->dst_reg]; 1731 1732 /* detect if R == 0 where R was initialized to zero earlier */ 1733 if (BPF_SRC(insn->code) == BPF_K && 1734 (opcode == BPF_JEQ || opcode == BPF_JNE) && 1735 dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) { 1736 if (opcode == BPF_JEQ) { 1737 /* if (imm == imm) goto pc+off; 1738 * only follow the goto, ignore fall-through 1739 */ 1740 *insn_idx += insn->off; 1741 return 0; 1742 } else { 1743 /* if (imm != imm) goto pc+off; 1744 * only follow fall-through branch, since 1745 * that's where the program will go 1746 */ 1747 return 0; 1748 } 1749 } 1750 1751 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 1752 if (!other_branch) 1753 return -EFAULT; 1754 1755 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */ 1756 if (BPF_SRC(insn->code) == BPF_K && 1757 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 1758 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 1759 if (opcode == BPF_JEQ) { 1760 /* next fallthrough insn can access memory via 1761 * this register 1762 */ 1763 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 1764 /* branch targer cannot access it, since reg == 0 */ 1765 mark_reg_unknown_value(other_branch->regs, 1766 insn->dst_reg); 1767 } else { 1768 other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 1769 mark_reg_unknown_value(regs, insn->dst_reg); 1770 } 1771 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 1772 dst_reg->type == PTR_TO_PACKET && 1773 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 1774 find_good_pkt_pointers(this_branch, dst_reg); 1775 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 1776 dst_reg->type == PTR_TO_PACKET_END && 1777 regs[insn->src_reg].type == PTR_TO_PACKET) { 1778 find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); 1779 } else if (is_pointer_value(env, insn->dst_reg)) { 1780 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 1781 return -EACCES; 1782 } 1783 if (log_level) 1784 print_verifier_state(this_branch); 1785 return 0; 1786 } 1787 1788 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 1789 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 1790 { 1791 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 1792 1793 return (struct bpf_map *) (unsigned long) imm64; 1794 } 1795 1796 /* verify BPF_LD_IMM64 instruction */ 1797 static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) 1798 { 1799 struct reg_state *regs = env->cur_state.regs; 1800 int err; 1801 1802 if (BPF_SIZE(insn->code) != BPF_DW) { 1803 verbose("invalid BPF_LD_IMM insn\n"); 1804 return -EINVAL; 1805 } 1806 if (insn->off != 0) { 1807 verbose("BPF_LD_IMM64 uses reserved fields\n"); 1808 return -EINVAL; 1809 } 1810 1811 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1812 if (err) 1813 return err; 1814 1815 if (insn->src_reg == 0) 1816 /* generic move 64-bit immediate into a register */ 1817 return 0; 1818 1819 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 1820 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 1821 1822 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 1823 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 1824 return 0; 1825 } 1826 1827 static bool may_access_skb(enum bpf_prog_type type) 1828 { 1829 switch (type) { 1830 case BPF_PROG_TYPE_SOCKET_FILTER: 1831 case BPF_PROG_TYPE_SCHED_CLS: 1832 case BPF_PROG_TYPE_SCHED_ACT: 1833 return true; 1834 default: 1835 return false; 1836 } 1837 } 1838 1839 /* verify safety of LD_ABS|LD_IND instructions: 1840 * - they can only appear in the programs where ctx == skb 1841 * - since they are wrappers of function calls, they scratch R1-R5 registers, 1842 * preserve R6-R9, and store return value into R0 1843 * 1844 * Implicit input: 1845 * ctx == skb == R6 == CTX 1846 * 1847 * Explicit input: 1848 * SRC == any register 1849 * IMM == 32-bit immediate 1850 * 1851 * Output: 1852 * R0 - 8/16/32-bit skb data converted to cpu endianness 1853 */ 1854 static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) 1855 { 1856 struct reg_state *regs = env->cur_state.regs; 1857 u8 mode = BPF_MODE(insn->code); 1858 struct reg_state *reg; 1859 int i, err; 1860 1861 if (!may_access_skb(env->prog->type)) { 1862 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 1863 return -EINVAL; 1864 } 1865 1866 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1867 BPF_SIZE(insn->code) == BPF_DW || 1868 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1869 verbose("BPF_LD_[ABS|IND] uses reserved fields\n"); 1870 return -EINVAL; 1871 } 1872 1873 /* check whether implicit source operand (register R6) is readable */ 1874 err = check_reg_arg(regs, BPF_REG_6, SRC_OP); 1875 if (err) 1876 return err; 1877 1878 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 1879 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 1880 return -EINVAL; 1881 } 1882 1883 if (mode == BPF_IND) { 1884 /* check explicit source operand */ 1885 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1886 if (err) 1887 return err; 1888 } 1889 1890 /* reset caller saved regs to unreadable */ 1891 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1892 reg = regs + caller_saved[i]; 1893 reg->type = NOT_INIT; 1894 reg->imm = 0; 1895 } 1896 1897 /* mark destination R0 register as readable, since it contains 1898 * the value fetched from the packet 1899 */ 1900 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1901 return 0; 1902 } 1903 1904 /* non-recursive DFS pseudo code 1905 * 1 procedure DFS-iterative(G,v): 1906 * 2 label v as discovered 1907 * 3 let S be a stack 1908 * 4 S.push(v) 1909 * 5 while S is not empty 1910 * 6 t <- S.pop() 1911 * 7 if t is what we're looking for: 1912 * 8 return t 1913 * 9 for all edges e in G.adjacentEdges(t) do 1914 * 10 if edge e is already labelled 1915 * 11 continue with the next edge 1916 * 12 w <- G.adjacentVertex(t,e) 1917 * 13 if vertex w is not discovered and not explored 1918 * 14 label e as tree-edge 1919 * 15 label w as discovered 1920 * 16 S.push(w) 1921 * 17 continue at 5 1922 * 18 else if vertex w is discovered 1923 * 19 label e as back-edge 1924 * 20 else 1925 * 21 // vertex w is explored 1926 * 22 label e as forward- or cross-edge 1927 * 23 label t as explored 1928 * 24 S.pop() 1929 * 1930 * convention: 1931 * 0x10 - discovered 1932 * 0x11 - discovered and fall-through edge labelled 1933 * 0x12 - discovered and fall-through and branch edges labelled 1934 * 0x20 - explored 1935 */ 1936 1937 enum { 1938 DISCOVERED = 0x10, 1939 EXPLORED = 0x20, 1940 FALLTHROUGH = 1, 1941 BRANCH = 2, 1942 }; 1943 1944 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L) 1945 1946 static int *insn_stack; /* stack of insns to process */ 1947 static int cur_stack; /* current stack index */ 1948 static int *insn_state; 1949 1950 /* t, w, e - match pseudo-code above: 1951 * t - index of current instruction 1952 * w - next instruction 1953 * e - edge 1954 */ 1955 static int push_insn(int t, int w, int e, struct verifier_env *env) 1956 { 1957 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 1958 return 0; 1959 1960 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 1961 return 0; 1962 1963 if (w < 0 || w >= env->prog->len) { 1964 verbose("jump out of range from insn %d to %d\n", t, w); 1965 return -EINVAL; 1966 } 1967 1968 if (e == BRANCH) 1969 /* mark branch target for state pruning */ 1970 env->explored_states[w] = STATE_LIST_MARK; 1971 1972 if (insn_state[w] == 0) { 1973 /* tree-edge */ 1974 insn_state[t] = DISCOVERED | e; 1975 insn_state[w] = DISCOVERED; 1976 if (cur_stack >= env->prog->len) 1977 return -E2BIG; 1978 insn_stack[cur_stack++] = w; 1979 return 1; 1980 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 1981 verbose("back-edge from insn %d to %d\n", t, w); 1982 return -EINVAL; 1983 } else if (insn_state[w] == EXPLORED) { 1984 /* forward- or cross-edge */ 1985 insn_state[t] = DISCOVERED | e; 1986 } else { 1987 verbose("insn state internal bug\n"); 1988 return -EFAULT; 1989 } 1990 return 0; 1991 } 1992 1993 /* non-recursive depth-first-search to detect loops in BPF program 1994 * loop == back-edge in directed graph 1995 */ 1996 static int check_cfg(struct verifier_env *env) 1997 { 1998 struct bpf_insn *insns = env->prog->insnsi; 1999 int insn_cnt = env->prog->len; 2000 int ret = 0; 2001 int i, t; 2002 2003 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 2004 if (!insn_state) 2005 return -ENOMEM; 2006 2007 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 2008 if (!insn_stack) { 2009 kfree(insn_state); 2010 return -ENOMEM; 2011 } 2012 2013 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 2014 insn_stack[0] = 0; /* 0 is the first instruction */ 2015 cur_stack = 1; 2016 2017 peek_stack: 2018 if (cur_stack == 0) 2019 goto check_state; 2020 t = insn_stack[cur_stack - 1]; 2021 2022 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 2023 u8 opcode = BPF_OP(insns[t].code); 2024 2025 if (opcode == BPF_EXIT) { 2026 goto mark_explored; 2027 } else if (opcode == BPF_CALL) { 2028 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2029 if (ret == 1) 2030 goto peek_stack; 2031 else if (ret < 0) 2032 goto err_free; 2033 if (t + 1 < insn_cnt) 2034 env->explored_states[t + 1] = STATE_LIST_MARK; 2035 } else if (opcode == BPF_JA) { 2036 if (BPF_SRC(insns[t].code) != BPF_K) { 2037 ret = -EINVAL; 2038 goto err_free; 2039 } 2040 /* unconditional jump with single edge */ 2041 ret = push_insn(t, t + insns[t].off + 1, 2042 FALLTHROUGH, env); 2043 if (ret == 1) 2044 goto peek_stack; 2045 else if (ret < 0) 2046 goto err_free; 2047 /* tell verifier to check for equivalent states 2048 * after every call and jump 2049 */ 2050 if (t + 1 < insn_cnt) 2051 env->explored_states[t + 1] = STATE_LIST_MARK; 2052 } else { 2053 /* conditional jump with two edges */ 2054 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2055 if (ret == 1) 2056 goto peek_stack; 2057 else if (ret < 0) 2058 goto err_free; 2059 2060 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 2061 if (ret == 1) 2062 goto peek_stack; 2063 else if (ret < 0) 2064 goto err_free; 2065 } 2066 } else { 2067 /* all other non-branch instructions with single 2068 * fall-through edge 2069 */ 2070 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2071 if (ret == 1) 2072 goto peek_stack; 2073 else if (ret < 0) 2074 goto err_free; 2075 } 2076 2077 mark_explored: 2078 insn_state[t] = EXPLORED; 2079 if (cur_stack-- <= 0) { 2080 verbose("pop stack internal bug\n"); 2081 ret = -EFAULT; 2082 goto err_free; 2083 } 2084 goto peek_stack; 2085 2086 check_state: 2087 for (i = 0; i < insn_cnt; i++) { 2088 if (insn_state[i] != EXPLORED) { 2089 verbose("unreachable insn %d\n", i); 2090 ret = -EINVAL; 2091 goto err_free; 2092 } 2093 } 2094 ret = 0; /* cfg looks good */ 2095 2096 err_free: 2097 kfree(insn_state); 2098 kfree(insn_stack); 2099 return ret; 2100 } 2101 2102 /* the following conditions reduce the number of explored insns 2103 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet 2104 */ 2105 static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur) 2106 { 2107 if (old->id != cur->id) 2108 return false; 2109 2110 /* old ptr_to_packet is more conservative, since it allows smaller 2111 * range. Ex: 2112 * old(off=0,r=10) is equal to cur(off=0,r=20), because 2113 * old(off=0,r=10) means that with range=10 the verifier proceeded 2114 * further and found no issues with the program. Now we're in the same 2115 * spot with cur(off=0,r=20), so we're safe too, since anything further 2116 * will only be looking at most 10 bytes after this pointer. 2117 */ 2118 if (old->off == cur->off && old->range < cur->range) 2119 return true; 2120 2121 /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0) 2122 * since both cannot be used for packet access and safe(old) 2123 * pointer has smaller off that could be used for further 2124 * 'if (ptr > data_end)' check 2125 * Ex: 2126 * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean 2127 * that we cannot access the packet. 2128 * The safe range is: 2129 * [ptr, ptr + range - off) 2130 * so whenever off >=range, it means no safe bytes from this pointer. 2131 * When comparing old->off <= cur->off, it means that older code 2132 * went with smaller offset and that offset was later 2133 * used to figure out the safe range after 'if (ptr > data_end)' check 2134 * Say, 'old' state was explored like: 2135 * ... R3(off=0, r=0) 2136 * R4 = R3 + 20 2137 * ... now R4(off=20,r=0) <-- here 2138 * if (R4 > data_end) 2139 * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access. 2140 * ... the code further went all the way to bpf_exit. 2141 * Now the 'cur' state at the mark 'here' has R4(off=30,r=0). 2142 * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier 2143 * goes further, such cur_R4 will give larger safe packet range after 2144 * 'if (R4 > data_end)' and all further insn were already good with r=20, 2145 * so they will be good with r=30 and we can prune the search. 2146 */ 2147 if (old->off <= cur->off && 2148 old->off >= old->range && cur->off >= cur->range) 2149 return true; 2150 2151 return false; 2152 } 2153 2154 /* compare two verifier states 2155 * 2156 * all states stored in state_list are known to be valid, since 2157 * verifier reached 'bpf_exit' instruction through them 2158 * 2159 * this function is called when verifier exploring different branches of 2160 * execution popped from the state stack. If it sees an old state that has 2161 * more strict register state and more strict stack state then this execution 2162 * branch doesn't need to be explored further, since verifier already 2163 * concluded that more strict state leads to valid finish. 2164 * 2165 * Therefore two states are equivalent if register state is more conservative 2166 * and explored stack state is more conservative than the current one. 2167 * Example: 2168 * explored current 2169 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 2170 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 2171 * 2172 * In other words if current stack state (one being explored) has more 2173 * valid slots than old one that already passed validation, it means 2174 * the verifier can stop exploring and conclude that current state is valid too 2175 * 2176 * Similarly with registers. If explored state has register type as invalid 2177 * whereas register type in current state is meaningful, it means that 2178 * the current state will reach 'bpf_exit' instruction safely 2179 */ 2180 static bool states_equal(struct verifier_state *old, struct verifier_state *cur) 2181 { 2182 struct reg_state *rold, *rcur; 2183 int i; 2184 2185 for (i = 0; i < MAX_BPF_REG; i++) { 2186 rold = &old->regs[i]; 2187 rcur = &cur->regs[i]; 2188 2189 if (memcmp(rold, rcur, sizeof(*rold)) == 0) 2190 continue; 2191 2192 if (rold->type == NOT_INIT || 2193 (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) 2194 continue; 2195 2196 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2197 compare_ptrs_to_packet(rold, rcur)) 2198 continue; 2199 2200 return false; 2201 } 2202 2203 for (i = 0; i < MAX_BPF_STACK; i++) { 2204 if (old->stack_slot_type[i] == STACK_INVALID) 2205 continue; 2206 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) 2207 /* Ex: old explored (safe) state has STACK_SPILL in 2208 * this stack slot, but current has has STACK_MISC -> 2209 * this verifier states are not equivalent, 2210 * return false to continue verification of this path 2211 */ 2212 return false; 2213 if (i % BPF_REG_SIZE) 2214 continue; 2215 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], 2216 &cur->spilled_regs[i / BPF_REG_SIZE], 2217 sizeof(old->spilled_regs[0]))) 2218 /* when explored and current stack slot types are 2219 * the same, check that stored pointers types 2220 * are the same as well. 2221 * Ex: explored safe path could have stored 2222 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} 2223 * but current path has stored: 2224 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} 2225 * such verifier states are not equivalent. 2226 * return false to continue verification of this path 2227 */ 2228 return false; 2229 else 2230 continue; 2231 } 2232 return true; 2233 } 2234 2235 static int is_state_visited(struct verifier_env *env, int insn_idx) 2236 { 2237 struct verifier_state_list *new_sl; 2238 struct verifier_state_list *sl; 2239 2240 sl = env->explored_states[insn_idx]; 2241 if (!sl) 2242 /* this 'insn_idx' instruction wasn't marked, so we will not 2243 * be doing state search here 2244 */ 2245 return 0; 2246 2247 while (sl != STATE_LIST_MARK) { 2248 if (states_equal(&sl->state, &env->cur_state)) 2249 /* reached equivalent register/stack state, 2250 * prune the search 2251 */ 2252 return 1; 2253 sl = sl->next; 2254 } 2255 2256 /* there were no equivalent states, remember current one. 2257 * technically the current state is not proven to be safe yet, 2258 * but it will either reach bpf_exit (which means it's safe) or 2259 * it will be rejected. Since there are no loops, we won't be 2260 * seeing this 'insn_idx' instruction again on the way to bpf_exit 2261 */ 2262 new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER); 2263 if (!new_sl) 2264 return -ENOMEM; 2265 2266 /* add new state to the head of linked list */ 2267 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); 2268 new_sl->next = env->explored_states[insn_idx]; 2269 env->explored_states[insn_idx] = new_sl; 2270 return 0; 2271 } 2272 2273 static int do_check(struct verifier_env *env) 2274 { 2275 struct verifier_state *state = &env->cur_state; 2276 struct bpf_insn *insns = env->prog->insnsi; 2277 struct reg_state *regs = state->regs; 2278 int insn_cnt = env->prog->len; 2279 int insn_idx, prev_insn_idx = 0; 2280 int insn_processed = 0; 2281 bool do_print_state = false; 2282 2283 init_reg_state(regs); 2284 insn_idx = 0; 2285 for (;;) { 2286 struct bpf_insn *insn; 2287 u8 class; 2288 int err; 2289 2290 if (insn_idx >= insn_cnt) { 2291 verbose("invalid insn idx %d insn_cnt %d\n", 2292 insn_idx, insn_cnt); 2293 return -EFAULT; 2294 } 2295 2296 insn = &insns[insn_idx]; 2297 class = BPF_CLASS(insn->code); 2298 2299 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 2300 verbose("BPF program is too large. Proccessed %d insn\n", 2301 insn_processed); 2302 return -E2BIG; 2303 } 2304 2305 err = is_state_visited(env, insn_idx); 2306 if (err < 0) 2307 return err; 2308 if (err == 1) { 2309 /* found equivalent state, can prune the search */ 2310 if (log_level) { 2311 if (do_print_state) 2312 verbose("\nfrom %d to %d: safe\n", 2313 prev_insn_idx, insn_idx); 2314 else 2315 verbose("%d: safe\n", insn_idx); 2316 } 2317 goto process_bpf_exit; 2318 } 2319 2320 if (log_level && do_print_state) { 2321 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); 2322 print_verifier_state(&env->cur_state); 2323 do_print_state = false; 2324 } 2325 2326 if (log_level) { 2327 verbose("%d: ", insn_idx); 2328 print_bpf_insn(insn); 2329 } 2330 2331 if (class == BPF_ALU || class == BPF_ALU64) { 2332 err = check_alu_op(env, insn); 2333 if (err) 2334 return err; 2335 2336 } else if (class == BPF_LDX) { 2337 enum bpf_reg_type src_reg_type; 2338 2339 /* check for reserved fields is already done */ 2340 2341 /* check src operand */ 2342 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2343 if (err) 2344 return err; 2345 2346 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 2347 if (err) 2348 return err; 2349 2350 src_reg_type = regs[insn->src_reg].type; 2351 2352 /* check that memory (src_reg + off) is readable, 2353 * the state of dst_reg will be updated by this func 2354 */ 2355 err = check_mem_access(env, insn->src_reg, insn->off, 2356 BPF_SIZE(insn->code), BPF_READ, 2357 insn->dst_reg); 2358 if (err) 2359 return err; 2360 2361 if (BPF_SIZE(insn->code) != BPF_W && 2362 BPF_SIZE(insn->code) != BPF_DW) { 2363 insn_idx++; 2364 continue; 2365 } 2366 2367 if (insn->imm == 0) { 2368 /* saw a valid insn 2369 * dst_reg = *(u32 *)(src_reg + off) 2370 * use reserved 'imm' field to mark this insn 2371 */ 2372 insn->imm = src_reg_type; 2373 2374 } else if (src_reg_type != insn->imm && 2375 (src_reg_type == PTR_TO_CTX || 2376 insn->imm == PTR_TO_CTX)) { 2377 /* ABuser program is trying to use the same insn 2378 * dst_reg = *(u32*) (src_reg + off) 2379 * with different pointer types: 2380 * src_reg == ctx in one branch and 2381 * src_reg == stack|map in some other branch. 2382 * Reject it. 2383 */ 2384 verbose("same insn cannot be used with different pointers\n"); 2385 return -EINVAL; 2386 } 2387 2388 } else if (class == BPF_STX) { 2389 enum bpf_reg_type dst_reg_type; 2390 2391 if (BPF_MODE(insn->code) == BPF_XADD) { 2392 err = check_xadd(env, insn); 2393 if (err) 2394 return err; 2395 insn_idx++; 2396 continue; 2397 } 2398 2399 /* check src1 operand */ 2400 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2401 if (err) 2402 return err; 2403 /* check src2 operand */ 2404 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2405 if (err) 2406 return err; 2407 2408 dst_reg_type = regs[insn->dst_reg].type; 2409 2410 /* check that memory (dst_reg + off) is writeable */ 2411 err = check_mem_access(env, insn->dst_reg, insn->off, 2412 BPF_SIZE(insn->code), BPF_WRITE, 2413 insn->src_reg); 2414 if (err) 2415 return err; 2416 2417 if (insn->imm == 0) { 2418 insn->imm = dst_reg_type; 2419 } else if (dst_reg_type != insn->imm && 2420 (dst_reg_type == PTR_TO_CTX || 2421 insn->imm == PTR_TO_CTX)) { 2422 verbose("same insn cannot be used with different pointers\n"); 2423 return -EINVAL; 2424 } 2425 2426 } else if (class == BPF_ST) { 2427 if (BPF_MODE(insn->code) != BPF_MEM || 2428 insn->src_reg != BPF_REG_0) { 2429 verbose("BPF_ST uses reserved fields\n"); 2430 return -EINVAL; 2431 } 2432 /* check src operand */ 2433 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2434 if (err) 2435 return err; 2436 2437 /* check that memory (dst_reg + off) is writeable */ 2438 err = check_mem_access(env, insn->dst_reg, insn->off, 2439 BPF_SIZE(insn->code), BPF_WRITE, 2440 -1); 2441 if (err) 2442 return err; 2443 2444 } else if (class == BPF_JMP) { 2445 u8 opcode = BPF_OP(insn->code); 2446 2447 if (opcode == BPF_CALL) { 2448 if (BPF_SRC(insn->code) != BPF_K || 2449 insn->off != 0 || 2450 insn->src_reg != BPF_REG_0 || 2451 insn->dst_reg != BPF_REG_0) { 2452 verbose("BPF_CALL uses reserved fields\n"); 2453 return -EINVAL; 2454 } 2455 2456 err = check_call(env, insn->imm); 2457 if (err) 2458 return err; 2459 2460 } else if (opcode == BPF_JA) { 2461 if (BPF_SRC(insn->code) != BPF_K || 2462 insn->imm != 0 || 2463 insn->src_reg != BPF_REG_0 || 2464 insn->dst_reg != BPF_REG_0) { 2465 verbose("BPF_JA uses reserved fields\n"); 2466 return -EINVAL; 2467 } 2468 2469 insn_idx += insn->off + 1; 2470 continue; 2471 2472 } else if (opcode == BPF_EXIT) { 2473 if (BPF_SRC(insn->code) != BPF_K || 2474 insn->imm != 0 || 2475 insn->src_reg != BPF_REG_0 || 2476 insn->dst_reg != BPF_REG_0) { 2477 verbose("BPF_EXIT uses reserved fields\n"); 2478 return -EINVAL; 2479 } 2480 2481 /* eBPF calling convetion is such that R0 is used 2482 * to return the value from eBPF program. 2483 * Make sure that it's readable at this time 2484 * of bpf_exit, which means that program wrote 2485 * something into it earlier 2486 */ 2487 err = check_reg_arg(regs, BPF_REG_0, SRC_OP); 2488 if (err) 2489 return err; 2490 2491 if (is_pointer_value(env, BPF_REG_0)) { 2492 verbose("R0 leaks addr as return value\n"); 2493 return -EACCES; 2494 } 2495 2496 process_bpf_exit: 2497 insn_idx = pop_stack(env, &prev_insn_idx); 2498 if (insn_idx < 0) { 2499 break; 2500 } else { 2501 do_print_state = true; 2502 continue; 2503 } 2504 } else { 2505 err = check_cond_jmp_op(env, insn, &insn_idx); 2506 if (err) 2507 return err; 2508 } 2509 } else if (class == BPF_LD) { 2510 u8 mode = BPF_MODE(insn->code); 2511 2512 if (mode == BPF_ABS || mode == BPF_IND) { 2513 err = check_ld_abs(env, insn); 2514 if (err) 2515 return err; 2516 2517 } else if (mode == BPF_IMM) { 2518 err = check_ld_imm(env, insn); 2519 if (err) 2520 return err; 2521 2522 insn_idx++; 2523 } else { 2524 verbose("invalid BPF_LD mode\n"); 2525 return -EINVAL; 2526 } 2527 } else { 2528 verbose("unknown insn class %d\n", class); 2529 return -EINVAL; 2530 } 2531 2532 insn_idx++; 2533 } 2534 2535 verbose("processed %d insns\n", insn_processed); 2536 return 0; 2537 } 2538 2539 static int check_map_prog_compatibility(struct bpf_map *map, 2540 struct bpf_prog *prog) 2541 2542 { 2543 if (prog->type == BPF_PROG_TYPE_PERF_EVENT && 2544 (map->map_type == BPF_MAP_TYPE_HASH || 2545 map->map_type == BPF_MAP_TYPE_PERCPU_HASH) && 2546 (map->map_flags & BPF_F_NO_PREALLOC)) { 2547 verbose("perf_event programs can only use preallocated hash map\n"); 2548 return -EINVAL; 2549 } 2550 return 0; 2551 } 2552 2553 /* look for pseudo eBPF instructions that access map FDs and 2554 * replace them with actual map pointers 2555 */ 2556 static int replace_map_fd_with_map_ptr(struct verifier_env *env) 2557 { 2558 struct bpf_insn *insn = env->prog->insnsi; 2559 int insn_cnt = env->prog->len; 2560 int i, j, err; 2561 2562 for (i = 0; i < insn_cnt; i++, insn++) { 2563 if (BPF_CLASS(insn->code) == BPF_LDX && 2564 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 2565 verbose("BPF_LDX uses reserved fields\n"); 2566 return -EINVAL; 2567 } 2568 2569 if (BPF_CLASS(insn->code) == BPF_STX && 2570 ((BPF_MODE(insn->code) != BPF_MEM && 2571 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 2572 verbose("BPF_STX uses reserved fields\n"); 2573 return -EINVAL; 2574 } 2575 2576 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 2577 struct bpf_map *map; 2578 struct fd f; 2579 2580 if (i == insn_cnt - 1 || insn[1].code != 0 || 2581 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 2582 insn[1].off != 0) { 2583 verbose("invalid bpf_ld_imm64 insn\n"); 2584 return -EINVAL; 2585 } 2586 2587 if (insn->src_reg == 0) 2588 /* valid generic load 64-bit imm */ 2589 goto next_insn; 2590 2591 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 2592 verbose("unrecognized bpf_ld_imm64 insn\n"); 2593 return -EINVAL; 2594 } 2595 2596 f = fdget(insn->imm); 2597 map = __bpf_map_get(f); 2598 if (IS_ERR(map)) { 2599 verbose("fd %d is not pointing to valid bpf_map\n", 2600 insn->imm); 2601 return PTR_ERR(map); 2602 } 2603 2604 err = check_map_prog_compatibility(map, env->prog); 2605 if (err) { 2606 fdput(f); 2607 return err; 2608 } 2609 2610 /* store map pointer inside BPF_LD_IMM64 instruction */ 2611 insn[0].imm = (u32) (unsigned long) map; 2612 insn[1].imm = ((u64) (unsigned long) map) >> 32; 2613 2614 /* check whether we recorded this map already */ 2615 for (j = 0; j < env->used_map_cnt; j++) 2616 if (env->used_maps[j] == map) { 2617 fdput(f); 2618 goto next_insn; 2619 } 2620 2621 if (env->used_map_cnt >= MAX_USED_MAPS) { 2622 fdput(f); 2623 return -E2BIG; 2624 } 2625 2626 /* hold the map. If the program is rejected by verifier, 2627 * the map will be released by release_maps() or it 2628 * will be used by the valid program until it's unloaded 2629 * and all maps are released in free_bpf_prog_info() 2630 */ 2631 map = bpf_map_inc(map, false); 2632 if (IS_ERR(map)) { 2633 fdput(f); 2634 return PTR_ERR(map); 2635 } 2636 env->used_maps[env->used_map_cnt++] = map; 2637 2638 fdput(f); 2639 next_insn: 2640 insn++; 2641 i++; 2642 } 2643 } 2644 2645 /* now all pseudo BPF_LD_IMM64 instructions load valid 2646 * 'struct bpf_map *' into a register instead of user map_fd. 2647 * These pointers will be used later by verifier to validate map access. 2648 */ 2649 return 0; 2650 } 2651 2652 /* drop refcnt of maps used by the rejected program */ 2653 static void release_maps(struct verifier_env *env) 2654 { 2655 int i; 2656 2657 for (i = 0; i < env->used_map_cnt; i++) 2658 bpf_map_put(env->used_maps[i]); 2659 } 2660 2661 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 2662 static void convert_pseudo_ld_imm64(struct verifier_env *env) 2663 { 2664 struct bpf_insn *insn = env->prog->insnsi; 2665 int insn_cnt = env->prog->len; 2666 int i; 2667 2668 for (i = 0; i < insn_cnt; i++, insn++) 2669 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 2670 insn->src_reg = 0; 2671 } 2672 2673 /* convert load instructions that access fields of 'struct __sk_buff' 2674 * into sequence of instructions that access fields of 'struct sk_buff' 2675 */ 2676 static int convert_ctx_accesses(struct verifier_env *env) 2677 { 2678 struct bpf_insn *insn = env->prog->insnsi; 2679 int insn_cnt = env->prog->len; 2680 struct bpf_insn insn_buf[16]; 2681 struct bpf_prog *new_prog; 2682 enum bpf_access_type type; 2683 int i; 2684 2685 if (!env->prog->aux->ops->convert_ctx_access) 2686 return 0; 2687 2688 for (i = 0; i < insn_cnt; i++, insn++) { 2689 u32 insn_delta, cnt; 2690 2691 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 2692 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 2693 type = BPF_READ; 2694 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W) || 2695 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 2696 type = BPF_WRITE; 2697 else 2698 continue; 2699 2700 if (insn->imm != PTR_TO_CTX) { 2701 /* clear internal mark */ 2702 insn->imm = 0; 2703 continue; 2704 } 2705 2706 cnt = env->prog->aux->ops-> 2707 convert_ctx_access(type, insn->dst_reg, insn->src_reg, 2708 insn->off, insn_buf, env->prog); 2709 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 2710 verbose("bpf verifier is misconfigured\n"); 2711 return -EINVAL; 2712 } 2713 2714 new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt); 2715 if (!new_prog) 2716 return -ENOMEM; 2717 2718 insn_delta = cnt - 1; 2719 2720 /* keep walking new program and skip insns we just inserted */ 2721 env->prog = new_prog; 2722 insn = new_prog->insnsi + i + insn_delta; 2723 2724 insn_cnt += insn_delta; 2725 i += insn_delta; 2726 } 2727 2728 return 0; 2729 } 2730 2731 static void free_states(struct verifier_env *env) 2732 { 2733 struct verifier_state_list *sl, *sln; 2734 int i; 2735 2736 if (!env->explored_states) 2737 return; 2738 2739 for (i = 0; i < env->prog->len; i++) { 2740 sl = env->explored_states[i]; 2741 2742 if (sl) 2743 while (sl != STATE_LIST_MARK) { 2744 sln = sl->next; 2745 kfree(sl); 2746 sl = sln; 2747 } 2748 } 2749 2750 kfree(env->explored_states); 2751 } 2752 2753 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 2754 { 2755 char __user *log_ubuf = NULL; 2756 struct verifier_env *env; 2757 int ret = -EINVAL; 2758 2759 if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS) 2760 return -E2BIG; 2761 2762 /* 'struct verifier_env' can be global, but since it's not small, 2763 * allocate/free it every time bpf_check() is called 2764 */ 2765 env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL); 2766 if (!env) 2767 return -ENOMEM; 2768 2769 env->prog = *prog; 2770 2771 /* grab the mutex to protect few globals used by verifier */ 2772 mutex_lock(&bpf_verifier_lock); 2773 2774 if (attr->log_level || attr->log_buf || attr->log_size) { 2775 /* user requested verbose verifier output 2776 * and supplied buffer to store the verification trace 2777 */ 2778 log_level = attr->log_level; 2779 log_ubuf = (char __user *) (unsigned long) attr->log_buf; 2780 log_size = attr->log_size; 2781 log_len = 0; 2782 2783 ret = -EINVAL; 2784 /* log_* values have to be sane */ 2785 if (log_size < 128 || log_size > UINT_MAX >> 8 || 2786 log_level == 0 || log_ubuf == NULL) 2787 goto free_env; 2788 2789 ret = -ENOMEM; 2790 log_buf = vmalloc(log_size); 2791 if (!log_buf) 2792 goto free_env; 2793 } else { 2794 log_level = 0; 2795 } 2796 2797 ret = replace_map_fd_with_map_ptr(env); 2798 if (ret < 0) 2799 goto skip_full_check; 2800 2801 env->explored_states = kcalloc(env->prog->len, 2802 sizeof(struct verifier_state_list *), 2803 GFP_USER); 2804 ret = -ENOMEM; 2805 if (!env->explored_states) 2806 goto skip_full_check; 2807 2808 ret = check_cfg(env); 2809 if (ret < 0) 2810 goto skip_full_check; 2811 2812 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 2813 2814 ret = do_check(env); 2815 2816 skip_full_check: 2817 while (pop_stack(env, NULL) >= 0); 2818 free_states(env); 2819 2820 if (ret == 0) 2821 /* program is valid, convert *(u32*)(ctx + off) accesses */ 2822 ret = convert_ctx_accesses(env); 2823 2824 if (log_level && log_len >= log_size - 1) { 2825 BUG_ON(log_len >= log_size); 2826 /* verifier log exceeded user supplied buffer */ 2827 ret = -ENOSPC; 2828 /* fall through to return what was recorded */ 2829 } 2830 2831 /* copy verifier log back to user space including trailing zero */ 2832 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { 2833 ret = -EFAULT; 2834 goto free_log_buf; 2835 } 2836 2837 if (ret == 0 && env->used_map_cnt) { 2838 /* if program passed verifier, update used_maps in bpf_prog_info */ 2839 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 2840 sizeof(env->used_maps[0]), 2841 GFP_KERNEL); 2842 2843 if (!env->prog->aux->used_maps) { 2844 ret = -ENOMEM; 2845 goto free_log_buf; 2846 } 2847 2848 memcpy(env->prog->aux->used_maps, env->used_maps, 2849 sizeof(env->used_maps[0]) * env->used_map_cnt); 2850 env->prog->aux->used_map_cnt = env->used_map_cnt; 2851 2852 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 2853 * bpf_ld_imm64 instructions 2854 */ 2855 convert_pseudo_ld_imm64(env); 2856 } 2857 2858 free_log_buf: 2859 if (log_level) 2860 vfree(log_buf); 2861 free_env: 2862 if (!env->prog->aux->used_maps) 2863 /* if we didn't copy map pointers into bpf_prog_info, release 2864 * them now. Otherwise free_bpf_prog_info() will release them. 2865 */ 2866 release_maps(env); 2867 *prog = env->prog; 2868 kfree(env); 2869 mutex_unlock(&bpf_verifier_lock); 2870 return ret; 2871 } 2872