1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/filter.h> 19 #include <net/netlink.h> 20 #include <linux/file.h> 21 #include <linux/vmalloc.h> 22 #include <linux/stringify.h> 23 24 /* bpf_check() is a static code analyzer that walks eBPF program 25 * instruction by instruction and updates register/stack state. 26 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 27 * 28 * The first pass is depth-first-search to check that the program is a DAG. 29 * It rejects the following programs: 30 * - larger than BPF_MAXINSNS insns 31 * - if loop is present (detected via back-edge) 32 * - unreachable insns exist (shouldn't be a forest. program = one function) 33 * - out of bounds or malformed jumps 34 * The second pass is all possible path descent from the 1st insn. 35 * Since it's analyzing all pathes through the program, the length of the 36 * analysis is limited to 32k insn, which may be hit even if total number of 37 * insn is less then 4K, but there are too many branches that change stack/regs. 38 * Number of 'branches to be analyzed' is limited to 1k 39 * 40 * On entry to each instruction, each register has a type, and the instruction 41 * changes the types of the registers depending on instruction semantics. 42 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 43 * copied to R1. 44 * 45 * All registers are 64-bit. 46 * R0 - return register 47 * R1-R5 argument passing registers 48 * R6-R9 callee saved registers 49 * R10 - frame pointer read-only 50 * 51 * At the start of BPF program the register R1 contains a pointer to bpf_context 52 * and has type PTR_TO_CTX. 53 * 54 * Verifier tracks arithmetic operations on pointers in case: 55 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 56 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 57 * 1st insn copies R10 (which has FRAME_PTR) type into R1 58 * and 2nd arithmetic instruction is pattern matched to recognize 59 * that it wants to construct a pointer to some element within stack. 60 * So after 2nd insn, the register R1 has type PTR_TO_STACK 61 * (and -20 constant is saved for further stack bounds checking). 62 * Meaning that this reg is a pointer to stack plus known immediate constant. 63 * 64 * Most of the time the registers have UNKNOWN_VALUE type, which 65 * means the register has some value, but it's not a valid pointer. 66 * (like pointer plus pointer becomes UNKNOWN_VALUE type) 67 * 68 * When verifier sees load or store instructions the type of base register 69 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer 70 * types recognized by check_mem_access() function. 71 * 72 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 73 * and the range of [ptr, ptr + map's value_size) is accessible. 74 * 75 * registers used to pass values to function calls are checked against 76 * function argument constraints. 77 * 78 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 79 * It means that the register type passed to this function must be 80 * PTR_TO_STACK and it will be used inside the function as 81 * 'pointer to map element key' 82 * 83 * For example the argument constraints for bpf_map_lookup_elem(): 84 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 85 * .arg1_type = ARG_CONST_MAP_PTR, 86 * .arg2_type = ARG_PTR_TO_MAP_KEY, 87 * 88 * ret_type says that this function returns 'pointer to map elem value or null' 89 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 90 * 2nd argument should be a pointer to stack, which will be used inside 91 * the helper function as a pointer to map element key. 92 * 93 * On the kernel side the helper function looks like: 94 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 95 * { 96 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 97 * void *key = (void *) (unsigned long) r2; 98 * void *value; 99 * 100 * here kernel can access 'key' and 'map' pointers safely, knowing that 101 * [key, key + map->key_size) bytes are valid and were initialized on 102 * the stack of eBPF program. 103 * } 104 * 105 * Corresponding eBPF program may look like: 106 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 107 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 108 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 109 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 110 * here verifier looks at prototype of map_lookup_elem() and sees: 111 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 112 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 113 * 114 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 115 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 116 * and were initialized prior to this call. 117 * If it's ok, then verifier allows this BPF_CALL insn and looks at 118 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 119 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 120 * returns ether pointer to map value or NULL. 121 * 122 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 123 * insn, the register holding that pointer in the true branch changes state to 124 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 125 * branch. See check_cond_jmp_op(). 126 * 127 * After the call R0 is set to return type of the function and registers R1-R5 128 * are set to NOT_INIT to indicate that they are no longer readable. 129 */ 130 131 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 132 struct bpf_verifier_stack_elem { 133 /* verifer state is 'st' 134 * before processing instruction 'insn_idx' 135 * and after processing instruction 'prev_insn_idx' 136 */ 137 struct bpf_verifier_state st; 138 int insn_idx; 139 int prev_insn_idx; 140 struct bpf_verifier_stack_elem *next; 141 }; 142 143 #define BPF_COMPLEXITY_LIMIT_INSNS 65536 144 #define BPF_COMPLEXITY_LIMIT_STACK 1024 145 146 struct bpf_call_arg_meta { 147 struct bpf_map *map_ptr; 148 bool raw_mode; 149 bool pkt_access; 150 int regno; 151 int access_size; 152 }; 153 154 /* verbose verifier prints what it's seeing 155 * bpf_check() is called under lock, so no race to access these global vars 156 */ 157 static u32 log_level, log_size, log_len; 158 static char *log_buf; 159 160 static DEFINE_MUTEX(bpf_verifier_lock); 161 162 /* log_level controls verbosity level of eBPF verifier. 163 * verbose() is used to dump the verification trace to the log, so the user 164 * can figure out what's wrong with the program 165 */ 166 static __printf(1, 2) void verbose(const char *fmt, ...) 167 { 168 va_list args; 169 170 if (log_level == 0 || log_len >= log_size - 1) 171 return; 172 173 va_start(args, fmt); 174 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); 175 va_end(args); 176 } 177 178 /* string representation of 'enum bpf_reg_type' */ 179 static const char * const reg_type_str[] = { 180 [NOT_INIT] = "?", 181 [UNKNOWN_VALUE] = "inv", 182 [PTR_TO_CTX] = "ctx", 183 [CONST_PTR_TO_MAP] = "map_ptr", 184 [PTR_TO_MAP_VALUE] = "map_value", 185 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 186 [PTR_TO_MAP_VALUE_ADJ] = "map_value_adj", 187 [FRAME_PTR] = "fp", 188 [PTR_TO_STACK] = "fp", 189 [CONST_IMM] = "imm", 190 [PTR_TO_PACKET] = "pkt", 191 [PTR_TO_PACKET_END] = "pkt_end", 192 }; 193 194 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x) 195 static const char * const func_id_str[] = { 196 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN) 197 }; 198 #undef __BPF_FUNC_STR_FN 199 200 static const char *func_id_name(int id) 201 { 202 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); 203 204 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) 205 return func_id_str[id]; 206 else 207 return "unknown"; 208 } 209 210 static void print_verifier_state(struct bpf_verifier_state *state) 211 { 212 struct bpf_reg_state *reg; 213 enum bpf_reg_type t; 214 int i; 215 216 for (i = 0; i < MAX_BPF_REG; i++) { 217 reg = &state->regs[i]; 218 t = reg->type; 219 if (t == NOT_INIT) 220 continue; 221 verbose(" R%d=%s", i, reg_type_str[t]); 222 if (t == CONST_IMM || t == PTR_TO_STACK) 223 verbose("%lld", reg->imm); 224 else if (t == PTR_TO_PACKET) 225 verbose("(id=%d,off=%d,r=%d)", 226 reg->id, reg->off, reg->range); 227 else if (t == UNKNOWN_VALUE && reg->imm) 228 verbose("%lld", reg->imm); 229 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || 230 t == PTR_TO_MAP_VALUE_OR_NULL || 231 t == PTR_TO_MAP_VALUE_ADJ) 232 verbose("(ks=%d,vs=%d,id=%u)", 233 reg->map_ptr->key_size, 234 reg->map_ptr->value_size, 235 reg->id); 236 if (reg->min_value != BPF_REGISTER_MIN_RANGE) 237 verbose(",min_value=%lld", 238 (long long)reg->min_value); 239 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 240 verbose(",max_value=%llu", 241 (unsigned long long)reg->max_value); 242 } 243 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 244 if (state->stack_slot_type[i] == STACK_SPILL) 245 verbose(" fp%d=%s", -MAX_BPF_STACK + i, 246 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); 247 } 248 verbose("\n"); 249 } 250 251 static const char *const bpf_class_string[] = { 252 [BPF_LD] = "ld", 253 [BPF_LDX] = "ldx", 254 [BPF_ST] = "st", 255 [BPF_STX] = "stx", 256 [BPF_ALU] = "alu", 257 [BPF_JMP] = "jmp", 258 [BPF_RET] = "BUG", 259 [BPF_ALU64] = "alu64", 260 }; 261 262 static const char *const bpf_alu_string[16] = { 263 [BPF_ADD >> 4] = "+=", 264 [BPF_SUB >> 4] = "-=", 265 [BPF_MUL >> 4] = "*=", 266 [BPF_DIV >> 4] = "/=", 267 [BPF_OR >> 4] = "|=", 268 [BPF_AND >> 4] = "&=", 269 [BPF_LSH >> 4] = "<<=", 270 [BPF_RSH >> 4] = ">>=", 271 [BPF_NEG >> 4] = "neg", 272 [BPF_MOD >> 4] = "%=", 273 [BPF_XOR >> 4] = "^=", 274 [BPF_MOV >> 4] = "=", 275 [BPF_ARSH >> 4] = "s>>=", 276 [BPF_END >> 4] = "endian", 277 }; 278 279 static const char *const bpf_ldst_string[] = { 280 [BPF_W >> 3] = "u32", 281 [BPF_H >> 3] = "u16", 282 [BPF_B >> 3] = "u8", 283 [BPF_DW >> 3] = "u64", 284 }; 285 286 static const char *const bpf_jmp_string[16] = { 287 [BPF_JA >> 4] = "jmp", 288 [BPF_JEQ >> 4] = "==", 289 [BPF_JGT >> 4] = ">", 290 [BPF_JGE >> 4] = ">=", 291 [BPF_JSET >> 4] = "&", 292 [BPF_JNE >> 4] = "!=", 293 [BPF_JSGT >> 4] = "s>", 294 [BPF_JSGE >> 4] = "s>=", 295 [BPF_CALL >> 4] = "call", 296 [BPF_EXIT >> 4] = "exit", 297 }; 298 299 static void print_bpf_insn(struct bpf_insn *insn) 300 { 301 u8 class = BPF_CLASS(insn->code); 302 303 if (class == BPF_ALU || class == BPF_ALU64) { 304 if (BPF_SRC(insn->code) == BPF_X) 305 verbose("(%02x) %sr%d %s %sr%d\n", 306 insn->code, class == BPF_ALU ? "(u32) " : "", 307 insn->dst_reg, 308 bpf_alu_string[BPF_OP(insn->code) >> 4], 309 class == BPF_ALU ? "(u32) " : "", 310 insn->src_reg); 311 else 312 verbose("(%02x) %sr%d %s %s%d\n", 313 insn->code, class == BPF_ALU ? "(u32) " : "", 314 insn->dst_reg, 315 bpf_alu_string[BPF_OP(insn->code) >> 4], 316 class == BPF_ALU ? "(u32) " : "", 317 insn->imm); 318 } else if (class == BPF_STX) { 319 if (BPF_MODE(insn->code) == BPF_MEM) 320 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", 321 insn->code, 322 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 323 insn->dst_reg, 324 insn->off, insn->src_reg); 325 else if (BPF_MODE(insn->code) == BPF_XADD) 326 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", 327 insn->code, 328 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 329 insn->dst_reg, insn->off, 330 insn->src_reg); 331 else 332 verbose("BUG_%02x\n", insn->code); 333 } else if (class == BPF_ST) { 334 if (BPF_MODE(insn->code) != BPF_MEM) { 335 verbose("BUG_st_%02x\n", insn->code); 336 return; 337 } 338 verbose("(%02x) *(%s *)(r%d %+d) = %d\n", 339 insn->code, 340 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 341 insn->dst_reg, 342 insn->off, insn->imm); 343 } else if (class == BPF_LDX) { 344 if (BPF_MODE(insn->code) != BPF_MEM) { 345 verbose("BUG_ldx_%02x\n", insn->code); 346 return; 347 } 348 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", 349 insn->code, insn->dst_reg, 350 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 351 insn->src_reg, insn->off); 352 } else if (class == BPF_LD) { 353 if (BPF_MODE(insn->code) == BPF_ABS) { 354 verbose("(%02x) r0 = *(%s *)skb[%d]\n", 355 insn->code, 356 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 357 insn->imm); 358 } else if (BPF_MODE(insn->code) == BPF_IND) { 359 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", 360 insn->code, 361 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 362 insn->src_reg, insn->imm); 363 } else if (BPF_MODE(insn->code) == BPF_IMM) { 364 verbose("(%02x) r%d = 0x%x\n", 365 insn->code, insn->dst_reg, insn->imm); 366 } else { 367 verbose("BUG_ld_%02x\n", insn->code); 368 return; 369 } 370 } else if (class == BPF_JMP) { 371 u8 opcode = BPF_OP(insn->code); 372 373 if (opcode == BPF_CALL) { 374 verbose("(%02x) call %s#%d\n", insn->code, 375 func_id_name(insn->imm), insn->imm); 376 } else if (insn->code == (BPF_JMP | BPF_JA)) { 377 verbose("(%02x) goto pc%+d\n", 378 insn->code, insn->off); 379 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 380 verbose("(%02x) exit\n", insn->code); 381 } else if (BPF_SRC(insn->code) == BPF_X) { 382 verbose("(%02x) if r%d %s r%d goto pc%+d\n", 383 insn->code, insn->dst_reg, 384 bpf_jmp_string[BPF_OP(insn->code) >> 4], 385 insn->src_reg, insn->off); 386 } else { 387 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", 388 insn->code, insn->dst_reg, 389 bpf_jmp_string[BPF_OP(insn->code) >> 4], 390 insn->imm, insn->off); 391 } 392 } else { 393 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); 394 } 395 } 396 397 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx) 398 { 399 struct bpf_verifier_stack_elem *elem; 400 int insn_idx; 401 402 if (env->head == NULL) 403 return -1; 404 405 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); 406 insn_idx = env->head->insn_idx; 407 if (prev_insn_idx) 408 *prev_insn_idx = env->head->prev_insn_idx; 409 elem = env->head->next; 410 kfree(env->head); 411 env->head = elem; 412 env->stack_size--; 413 return insn_idx; 414 } 415 416 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 417 int insn_idx, int prev_insn_idx) 418 { 419 struct bpf_verifier_stack_elem *elem; 420 421 elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 422 if (!elem) 423 goto err; 424 425 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); 426 elem->insn_idx = insn_idx; 427 elem->prev_insn_idx = prev_insn_idx; 428 elem->next = env->head; 429 env->head = elem; 430 env->stack_size++; 431 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 432 verbose("BPF program is too complex\n"); 433 goto err; 434 } 435 return &elem->st; 436 err: 437 /* pop all elements and return */ 438 while (pop_stack(env, NULL) >= 0); 439 return NULL; 440 } 441 442 #define CALLER_SAVED_REGS 6 443 static const int caller_saved[CALLER_SAVED_REGS] = { 444 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 445 }; 446 447 static void init_reg_state(struct bpf_reg_state *regs) 448 { 449 int i; 450 451 for (i = 0; i < MAX_BPF_REG; i++) { 452 regs[i].type = NOT_INIT; 453 regs[i].imm = 0; 454 regs[i].min_value = BPF_REGISTER_MIN_RANGE; 455 regs[i].max_value = BPF_REGISTER_MAX_RANGE; 456 } 457 458 /* frame pointer */ 459 regs[BPF_REG_FP].type = FRAME_PTR; 460 461 /* 1st arg to a function */ 462 regs[BPF_REG_1].type = PTR_TO_CTX; 463 } 464 465 static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) 466 { 467 regs[regno].type = UNKNOWN_VALUE; 468 regs[regno].id = 0; 469 regs[regno].imm = 0; 470 } 471 472 static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) 473 { 474 BUG_ON(regno >= MAX_BPF_REG); 475 __mark_reg_unknown_value(regs, regno); 476 } 477 478 static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) 479 { 480 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 481 regs[regno].max_value = BPF_REGISTER_MAX_RANGE; 482 } 483 484 enum reg_arg_type { 485 SRC_OP, /* register is used as source operand */ 486 DST_OP, /* register is used as destination operand */ 487 DST_OP_NO_MARK /* same as above, check only, don't mark */ 488 }; 489 490 static int check_reg_arg(struct bpf_reg_state *regs, u32 regno, 491 enum reg_arg_type t) 492 { 493 if (regno >= MAX_BPF_REG) { 494 verbose("R%d is invalid\n", regno); 495 return -EINVAL; 496 } 497 498 if (t == SRC_OP) { 499 /* check whether register used as source operand can be read */ 500 if (regs[regno].type == NOT_INIT) { 501 verbose("R%d !read_ok\n", regno); 502 return -EACCES; 503 } 504 } else { 505 /* check whether register used as dest operand can be written to */ 506 if (regno == BPF_REG_FP) { 507 verbose("frame pointer is read only\n"); 508 return -EACCES; 509 } 510 if (t == DST_OP) 511 mark_reg_unknown_value(regs, regno); 512 } 513 return 0; 514 } 515 516 static int bpf_size_to_bytes(int bpf_size) 517 { 518 if (bpf_size == BPF_W) 519 return 4; 520 else if (bpf_size == BPF_H) 521 return 2; 522 else if (bpf_size == BPF_B) 523 return 1; 524 else if (bpf_size == BPF_DW) 525 return 8; 526 else 527 return -EINVAL; 528 } 529 530 static bool is_spillable_regtype(enum bpf_reg_type type) 531 { 532 switch (type) { 533 case PTR_TO_MAP_VALUE: 534 case PTR_TO_MAP_VALUE_OR_NULL: 535 case PTR_TO_STACK: 536 case PTR_TO_CTX: 537 case PTR_TO_PACKET: 538 case PTR_TO_PACKET_END: 539 case FRAME_PTR: 540 case CONST_PTR_TO_MAP: 541 return true; 542 default: 543 return false; 544 } 545 } 546 547 /* check_stack_read/write functions track spill/fill of registers, 548 * stack boundary and alignment are checked in check_mem_access() 549 */ 550 static int check_stack_write(struct bpf_verifier_state *state, int off, 551 int size, int value_regno) 552 { 553 int i; 554 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 555 * so it's aligned access and [off, off + size) are within stack limits 556 */ 557 558 if (value_regno >= 0 && 559 is_spillable_regtype(state->regs[value_regno].type)) { 560 561 /* register containing pointer is being spilled into stack */ 562 if (size != BPF_REG_SIZE) { 563 verbose("invalid size of register spill\n"); 564 return -EACCES; 565 } 566 567 /* save register state */ 568 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 569 state->regs[value_regno]; 570 571 for (i = 0; i < BPF_REG_SIZE; i++) 572 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; 573 } else { 574 /* regular write of data into stack */ 575 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 576 (struct bpf_reg_state) {}; 577 578 for (i = 0; i < size; i++) 579 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; 580 } 581 return 0; 582 } 583 584 static int check_stack_read(struct bpf_verifier_state *state, int off, int size, 585 int value_regno) 586 { 587 u8 *slot_type; 588 int i; 589 590 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; 591 592 if (slot_type[0] == STACK_SPILL) { 593 if (size != BPF_REG_SIZE) { 594 verbose("invalid size of register spill\n"); 595 return -EACCES; 596 } 597 for (i = 1; i < BPF_REG_SIZE; i++) { 598 if (slot_type[i] != STACK_SPILL) { 599 verbose("corrupted spill memory\n"); 600 return -EACCES; 601 } 602 } 603 604 if (value_regno >= 0) 605 /* restore register state from stack */ 606 state->regs[value_regno] = 607 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; 608 return 0; 609 } else { 610 for (i = 0; i < size; i++) { 611 if (slot_type[i] != STACK_MISC) { 612 verbose("invalid read from stack off %d+%d size %d\n", 613 off, i, size); 614 return -EACCES; 615 } 616 } 617 if (value_regno >= 0) 618 /* have read misc data from the stack */ 619 mark_reg_unknown_value(state->regs, value_regno); 620 return 0; 621 } 622 } 623 624 /* check read/write into map element returned by bpf_map_lookup_elem() */ 625 static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 626 int size) 627 { 628 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; 629 630 if (off < 0 || off + size > map->value_size) { 631 verbose("invalid access to map value, value_size=%d off=%d size=%d\n", 632 map->value_size, off, size); 633 return -EACCES; 634 } 635 return 0; 636 } 637 638 #define MAX_PACKET_OFF 0xffff 639 640 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 641 const struct bpf_call_arg_meta *meta, 642 enum bpf_access_type t) 643 { 644 switch (env->prog->type) { 645 case BPF_PROG_TYPE_LWT_IN: 646 case BPF_PROG_TYPE_LWT_OUT: 647 /* dst_input() and dst_output() can't write for now */ 648 if (t == BPF_WRITE) 649 return false; 650 case BPF_PROG_TYPE_SCHED_CLS: 651 case BPF_PROG_TYPE_SCHED_ACT: 652 case BPF_PROG_TYPE_XDP: 653 case BPF_PROG_TYPE_LWT_XMIT: 654 if (meta) 655 return meta->pkt_access; 656 657 env->seen_direct_write = true; 658 return true; 659 default: 660 return false; 661 } 662 } 663 664 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 665 int size) 666 { 667 struct bpf_reg_state *regs = env->cur_state.regs; 668 struct bpf_reg_state *reg = ®s[regno]; 669 670 off += reg->off; 671 if (off < 0 || size <= 0 || off + size > reg->range) { 672 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 673 off, size, regno, reg->id, reg->off, reg->range); 674 return -EACCES; 675 } 676 return 0; 677 } 678 679 /* check access to 'struct bpf_context' fields */ 680 static int check_ctx_access(struct bpf_verifier_env *env, int off, int size, 681 enum bpf_access_type t, enum bpf_reg_type *reg_type) 682 { 683 /* for analyzer ctx accesses are already validated and converted */ 684 if (env->analyzer_ops) 685 return 0; 686 687 if (env->prog->aux->ops->is_valid_access && 688 env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) { 689 /* remember the offset of last byte accessed in ctx */ 690 if (env->prog->aux->max_ctx_offset < off + size) 691 env->prog->aux->max_ctx_offset = off + size; 692 return 0; 693 } 694 695 verbose("invalid bpf_context access off=%d size=%d\n", off, size); 696 return -EACCES; 697 } 698 699 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 700 { 701 if (env->allow_ptr_leaks) 702 return false; 703 704 switch (env->cur_state.regs[regno].type) { 705 case UNKNOWN_VALUE: 706 case CONST_IMM: 707 return false; 708 default: 709 return true; 710 } 711 } 712 713 static int check_ptr_alignment(struct bpf_verifier_env *env, 714 struct bpf_reg_state *reg, int off, int size) 715 { 716 if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) { 717 if (off % size != 0) { 718 verbose("misaligned access off %d size %d\n", 719 off, size); 720 return -EACCES; 721 } else { 722 return 0; 723 } 724 } 725 726 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 727 /* misaligned access to packet is ok on x86,arm,arm64 */ 728 return 0; 729 730 if (reg->id && size != 1) { 731 verbose("Unknown packet alignment. Only byte-sized access allowed\n"); 732 return -EACCES; 733 } 734 735 /* skb->data is NET_IP_ALIGN-ed */ 736 if (reg->type == PTR_TO_PACKET && 737 (NET_IP_ALIGN + reg->off + off) % size != 0) { 738 verbose("misaligned packet access off %d+%d+%d size %d\n", 739 NET_IP_ALIGN, reg->off, off, size); 740 return -EACCES; 741 } 742 return 0; 743 } 744 745 /* check whether memory at (regno + off) is accessible for t = (read | write) 746 * if t==write, value_regno is a register which value is stored into memory 747 * if t==read, value_regno is a register which will receive the value from memory 748 * if t==write && value_regno==-1, some unknown value is stored into memory 749 * if t==read && value_regno==-1, don't care what we read from memory 750 */ 751 static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, 752 int bpf_size, enum bpf_access_type t, 753 int value_regno) 754 { 755 struct bpf_verifier_state *state = &env->cur_state; 756 struct bpf_reg_state *reg = &state->regs[regno]; 757 int size, err = 0; 758 759 if (reg->type == PTR_TO_STACK) 760 off += reg->imm; 761 762 size = bpf_size_to_bytes(bpf_size); 763 if (size < 0) 764 return size; 765 766 err = check_ptr_alignment(env, reg, off, size); 767 if (err) 768 return err; 769 770 if (reg->type == PTR_TO_MAP_VALUE || 771 reg->type == PTR_TO_MAP_VALUE_ADJ) { 772 if (t == BPF_WRITE && value_regno >= 0 && 773 is_pointer_value(env, value_regno)) { 774 verbose("R%d leaks addr into map\n", value_regno); 775 return -EACCES; 776 } 777 778 /* If we adjusted the register to this map value at all then we 779 * need to change off and size to min_value and max_value 780 * respectively to make sure our theoretical access will be 781 * safe. 782 */ 783 if (reg->type == PTR_TO_MAP_VALUE_ADJ) { 784 if (log_level) 785 print_verifier_state(state); 786 env->varlen_map_value_access = true; 787 /* The minimum value is only important with signed 788 * comparisons where we can't assume the floor of a 789 * value is 0. If we are using signed variables for our 790 * index'es we need to make sure that whatever we use 791 * will have a set floor within our range. 792 */ 793 if (reg->min_value < 0) { 794 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 795 regno); 796 return -EACCES; 797 } 798 err = check_map_access(env, regno, reg->min_value + off, 799 size); 800 if (err) { 801 verbose("R%d min value is outside of the array range\n", 802 regno); 803 return err; 804 } 805 806 /* If we haven't set a max value then we need to bail 807 * since we can't be sure we won't do bad things. 808 */ 809 if (reg->max_value == BPF_REGISTER_MAX_RANGE) { 810 verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n", 811 regno); 812 return -EACCES; 813 } 814 off += reg->max_value; 815 } 816 err = check_map_access(env, regno, off, size); 817 if (!err && t == BPF_READ && value_regno >= 0) 818 mark_reg_unknown_value(state->regs, value_regno); 819 820 } else if (reg->type == PTR_TO_CTX) { 821 enum bpf_reg_type reg_type = UNKNOWN_VALUE; 822 823 if (t == BPF_WRITE && value_regno >= 0 && 824 is_pointer_value(env, value_regno)) { 825 verbose("R%d leaks addr into ctx\n", value_regno); 826 return -EACCES; 827 } 828 err = check_ctx_access(env, off, size, t, ®_type); 829 if (!err && t == BPF_READ && value_regno >= 0) { 830 mark_reg_unknown_value(state->regs, value_regno); 831 /* note that reg.[id|off|range] == 0 */ 832 state->regs[value_regno].type = reg_type; 833 } 834 835 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 836 if (off >= 0 || off < -MAX_BPF_STACK) { 837 verbose("invalid stack off=%d size=%d\n", off, size); 838 return -EACCES; 839 } 840 if (t == BPF_WRITE) { 841 if (!env->allow_ptr_leaks && 842 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && 843 size != BPF_REG_SIZE) { 844 verbose("attempt to corrupt spilled pointer on stack\n"); 845 return -EACCES; 846 } 847 err = check_stack_write(state, off, size, value_regno); 848 } else { 849 err = check_stack_read(state, off, size, value_regno); 850 } 851 } else if (state->regs[regno].type == PTR_TO_PACKET) { 852 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 853 verbose("cannot write into packet\n"); 854 return -EACCES; 855 } 856 if (t == BPF_WRITE && value_regno >= 0 && 857 is_pointer_value(env, value_regno)) { 858 verbose("R%d leaks addr into packet\n", value_regno); 859 return -EACCES; 860 } 861 err = check_packet_access(env, regno, off, size); 862 if (!err && t == BPF_READ && value_regno >= 0) 863 mark_reg_unknown_value(state->regs, value_regno); 864 } else { 865 verbose("R%d invalid mem access '%s'\n", 866 regno, reg_type_str[reg->type]); 867 return -EACCES; 868 } 869 870 if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks && 871 state->regs[value_regno].type == UNKNOWN_VALUE) { 872 /* 1 or 2 byte load zero-extends, determine the number of 873 * zero upper bits. Not doing it fo 4 byte load, since 874 * such values cannot be added to ptr_to_packet anyway. 875 */ 876 state->regs[value_regno].imm = 64 - size * 8; 877 } 878 return err; 879 } 880 881 static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) 882 { 883 struct bpf_reg_state *regs = env->cur_state.regs; 884 int err; 885 886 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 887 insn->imm != 0) { 888 verbose("BPF_XADD uses reserved fields\n"); 889 return -EINVAL; 890 } 891 892 /* check src1 operand */ 893 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 894 if (err) 895 return err; 896 897 /* check src2 operand */ 898 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 899 if (err) 900 return err; 901 902 /* check whether atomic_add can read the memory */ 903 err = check_mem_access(env, insn->dst_reg, insn->off, 904 BPF_SIZE(insn->code), BPF_READ, -1); 905 if (err) 906 return err; 907 908 /* check whether atomic_add can write into the same memory */ 909 return check_mem_access(env, insn->dst_reg, insn->off, 910 BPF_SIZE(insn->code), BPF_WRITE, -1); 911 } 912 913 /* when register 'regno' is passed into function that will read 'access_size' 914 * bytes from that pointer, make sure that it's within stack boundary 915 * and all elements of stack are initialized 916 */ 917 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 918 int access_size, bool zero_size_allowed, 919 struct bpf_call_arg_meta *meta) 920 { 921 struct bpf_verifier_state *state = &env->cur_state; 922 struct bpf_reg_state *regs = state->regs; 923 int off, i; 924 925 if (regs[regno].type != PTR_TO_STACK) { 926 if (zero_size_allowed && access_size == 0 && 927 regs[regno].type == CONST_IMM && 928 regs[regno].imm == 0) 929 return 0; 930 931 verbose("R%d type=%s expected=%s\n", regno, 932 reg_type_str[regs[regno].type], 933 reg_type_str[PTR_TO_STACK]); 934 return -EACCES; 935 } 936 937 off = regs[regno].imm; 938 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 939 access_size <= 0) { 940 verbose("invalid stack type R%d off=%d access_size=%d\n", 941 regno, off, access_size); 942 return -EACCES; 943 } 944 945 if (meta && meta->raw_mode) { 946 meta->access_size = access_size; 947 meta->regno = regno; 948 return 0; 949 } 950 951 for (i = 0; i < access_size; i++) { 952 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { 953 verbose("invalid indirect read from stack off %d+%d size %d\n", 954 off, i, access_size); 955 return -EACCES; 956 } 957 } 958 return 0; 959 } 960 961 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 962 enum bpf_arg_type arg_type, 963 struct bpf_call_arg_meta *meta) 964 { 965 struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; 966 enum bpf_reg_type expected_type, type = reg->type; 967 int err = 0; 968 969 if (arg_type == ARG_DONTCARE) 970 return 0; 971 972 if (type == NOT_INIT) { 973 verbose("R%d !read_ok\n", regno); 974 return -EACCES; 975 } 976 977 if (arg_type == ARG_ANYTHING) { 978 if (is_pointer_value(env, regno)) { 979 verbose("R%d leaks addr into helper function\n", regno); 980 return -EACCES; 981 } 982 return 0; 983 } 984 985 if (type == PTR_TO_PACKET && 986 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 987 verbose("helper access to the packet is not allowed\n"); 988 return -EACCES; 989 } 990 991 if (arg_type == ARG_PTR_TO_MAP_KEY || 992 arg_type == ARG_PTR_TO_MAP_VALUE) { 993 expected_type = PTR_TO_STACK; 994 if (type != PTR_TO_PACKET && type != expected_type) 995 goto err_type; 996 } else if (arg_type == ARG_CONST_STACK_SIZE || 997 arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { 998 expected_type = CONST_IMM; 999 if (type != expected_type) 1000 goto err_type; 1001 } else if (arg_type == ARG_CONST_MAP_PTR) { 1002 expected_type = CONST_PTR_TO_MAP; 1003 if (type != expected_type) 1004 goto err_type; 1005 } else if (arg_type == ARG_PTR_TO_CTX) { 1006 expected_type = PTR_TO_CTX; 1007 if (type != expected_type) 1008 goto err_type; 1009 } else if (arg_type == ARG_PTR_TO_STACK || 1010 arg_type == ARG_PTR_TO_RAW_STACK) { 1011 expected_type = PTR_TO_STACK; 1012 /* One exception here. In case function allows for NULL to be 1013 * passed in as argument, it's a CONST_IMM type. Final test 1014 * happens during stack boundary checking. 1015 */ 1016 if (type == CONST_IMM && reg->imm == 0) 1017 /* final test in check_stack_boundary() */; 1018 else if (type != PTR_TO_PACKET && type != expected_type) 1019 goto err_type; 1020 meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK; 1021 } else { 1022 verbose("unsupported arg_type %d\n", arg_type); 1023 return -EFAULT; 1024 } 1025 1026 if (arg_type == ARG_CONST_MAP_PTR) { 1027 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 1028 meta->map_ptr = reg->map_ptr; 1029 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 1030 /* bpf_map_xxx(..., map_ptr, ..., key) call: 1031 * check that [key, key + map->key_size) are within 1032 * stack limits and initialized 1033 */ 1034 if (!meta->map_ptr) { 1035 /* in function declaration map_ptr must come before 1036 * map_key, so that it's verified and known before 1037 * we have to check map_key here. Otherwise it means 1038 * that kernel subsystem misconfigured verifier 1039 */ 1040 verbose("invalid map_ptr to access map->key\n"); 1041 return -EACCES; 1042 } 1043 if (type == PTR_TO_PACKET) 1044 err = check_packet_access(env, regno, 0, 1045 meta->map_ptr->key_size); 1046 else 1047 err = check_stack_boundary(env, regno, 1048 meta->map_ptr->key_size, 1049 false, NULL); 1050 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 1051 /* bpf_map_xxx(..., map_ptr, ..., value) call: 1052 * check [value, value + map->value_size) validity 1053 */ 1054 if (!meta->map_ptr) { 1055 /* kernel subsystem misconfigured verifier */ 1056 verbose("invalid map_ptr to access map->value\n"); 1057 return -EACCES; 1058 } 1059 if (type == PTR_TO_PACKET) 1060 err = check_packet_access(env, regno, 0, 1061 meta->map_ptr->value_size); 1062 else 1063 err = check_stack_boundary(env, regno, 1064 meta->map_ptr->value_size, 1065 false, NULL); 1066 } else if (arg_type == ARG_CONST_STACK_SIZE || 1067 arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) { 1068 bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO); 1069 1070 /* bpf_xxx(..., buf, len) call will access 'len' bytes 1071 * from stack pointer 'buf'. Check it 1072 * note: regno == len, regno - 1 == buf 1073 */ 1074 if (regno == 0) { 1075 /* kernel subsystem misconfigured verifier */ 1076 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); 1077 return -EACCES; 1078 } 1079 if (regs[regno - 1].type == PTR_TO_PACKET) 1080 err = check_packet_access(env, regno - 1, 0, reg->imm); 1081 else 1082 err = check_stack_boundary(env, regno - 1, reg->imm, 1083 zero_size_allowed, meta); 1084 } 1085 1086 return err; 1087 err_type: 1088 verbose("R%d type=%s expected=%s\n", regno, 1089 reg_type_str[type], reg_type_str[expected_type]); 1090 return -EACCES; 1091 } 1092 1093 static int check_map_func_compatibility(struct bpf_map *map, int func_id) 1094 { 1095 if (!map) 1096 return 0; 1097 1098 /* We need a two way check, first is from map perspective ... */ 1099 switch (map->map_type) { 1100 case BPF_MAP_TYPE_PROG_ARRAY: 1101 if (func_id != BPF_FUNC_tail_call) 1102 goto error; 1103 break; 1104 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1105 if (func_id != BPF_FUNC_perf_event_read && 1106 func_id != BPF_FUNC_perf_event_output) 1107 goto error; 1108 break; 1109 case BPF_MAP_TYPE_STACK_TRACE: 1110 if (func_id != BPF_FUNC_get_stackid) 1111 goto error; 1112 break; 1113 case BPF_MAP_TYPE_CGROUP_ARRAY: 1114 if (func_id != BPF_FUNC_skb_under_cgroup && 1115 func_id != BPF_FUNC_current_task_under_cgroup) 1116 goto error; 1117 break; 1118 default: 1119 break; 1120 } 1121 1122 /* ... and second from the function itself. */ 1123 switch (func_id) { 1124 case BPF_FUNC_tail_call: 1125 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1126 goto error; 1127 break; 1128 case BPF_FUNC_perf_event_read: 1129 case BPF_FUNC_perf_event_output: 1130 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 1131 goto error; 1132 break; 1133 case BPF_FUNC_get_stackid: 1134 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1135 goto error; 1136 break; 1137 case BPF_FUNC_current_task_under_cgroup: 1138 case BPF_FUNC_skb_under_cgroup: 1139 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1140 goto error; 1141 break; 1142 default: 1143 break; 1144 } 1145 1146 return 0; 1147 error: 1148 verbose("cannot pass map_type %d into func %s#%d\n", 1149 map->map_type, func_id_name(func_id), func_id); 1150 return -EINVAL; 1151 } 1152 1153 static int check_raw_mode(const struct bpf_func_proto *fn) 1154 { 1155 int count = 0; 1156 1157 if (fn->arg1_type == ARG_PTR_TO_RAW_STACK) 1158 count++; 1159 if (fn->arg2_type == ARG_PTR_TO_RAW_STACK) 1160 count++; 1161 if (fn->arg3_type == ARG_PTR_TO_RAW_STACK) 1162 count++; 1163 if (fn->arg4_type == ARG_PTR_TO_RAW_STACK) 1164 count++; 1165 if (fn->arg5_type == ARG_PTR_TO_RAW_STACK) 1166 count++; 1167 1168 return count > 1 ? -EINVAL : 0; 1169 } 1170 1171 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 1172 { 1173 struct bpf_verifier_state *state = &env->cur_state; 1174 struct bpf_reg_state *regs = state->regs, *reg; 1175 int i; 1176 1177 for (i = 0; i < MAX_BPF_REG; i++) 1178 if (regs[i].type == PTR_TO_PACKET || 1179 regs[i].type == PTR_TO_PACKET_END) 1180 mark_reg_unknown_value(regs, i); 1181 1182 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1183 if (state->stack_slot_type[i] != STACK_SPILL) 1184 continue; 1185 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1186 if (reg->type != PTR_TO_PACKET && 1187 reg->type != PTR_TO_PACKET_END) 1188 continue; 1189 reg->type = UNKNOWN_VALUE; 1190 reg->imm = 0; 1191 } 1192 } 1193 1194 static int check_call(struct bpf_verifier_env *env, int func_id) 1195 { 1196 struct bpf_verifier_state *state = &env->cur_state; 1197 const struct bpf_func_proto *fn = NULL; 1198 struct bpf_reg_state *regs = state->regs; 1199 struct bpf_reg_state *reg; 1200 struct bpf_call_arg_meta meta; 1201 bool changes_data; 1202 int i, err; 1203 1204 /* find function prototype */ 1205 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 1206 verbose("invalid func %s#%d\n", func_id_name(func_id), func_id); 1207 return -EINVAL; 1208 } 1209 1210 if (env->prog->aux->ops->get_func_proto) 1211 fn = env->prog->aux->ops->get_func_proto(func_id); 1212 1213 if (!fn) { 1214 verbose("unknown func %s#%d\n", func_id_name(func_id), func_id); 1215 return -EINVAL; 1216 } 1217 1218 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1219 if (!env->prog->gpl_compatible && fn->gpl_only) { 1220 verbose("cannot call GPL only function from proprietary program\n"); 1221 return -EINVAL; 1222 } 1223 1224 changes_data = bpf_helper_changes_pkt_data(fn->func); 1225 1226 memset(&meta, 0, sizeof(meta)); 1227 meta.pkt_access = fn->pkt_access; 1228 1229 /* We only support one arg being in raw mode at the moment, which 1230 * is sufficient for the helper functions we have right now. 1231 */ 1232 err = check_raw_mode(fn); 1233 if (err) { 1234 verbose("kernel subsystem misconfigured func %s#%d\n", 1235 func_id_name(func_id), func_id); 1236 return err; 1237 } 1238 1239 /* check args */ 1240 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 1241 if (err) 1242 return err; 1243 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1244 if (err) 1245 return err; 1246 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1247 if (err) 1248 return err; 1249 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 1250 if (err) 1251 return err; 1252 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 1253 if (err) 1254 return err; 1255 1256 /* Mark slots with STACK_MISC in case of raw mode, stack offset 1257 * is inferred from register state. 1258 */ 1259 for (i = 0; i < meta.access_size; i++) { 1260 err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); 1261 if (err) 1262 return err; 1263 } 1264 1265 /* reset caller saved regs */ 1266 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1267 reg = regs + caller_saved[i]; 1268 reg->type = NOT_INIT; 1269 reg->imm = 0; 1270 } 1271 1272 /* update return register */ 1273 if (fn->ret_type == RET_INTEGER) { 1274 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1275 } else if (fn->ret_type == RET_VOID) { 1276 regs[BPF_REG_0].type = NOT_INIT; 1277 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 1278 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 1279 regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0; 1280 /* remember map_ptr, so that check_map_access() 1281 * can check 'value_size' boundary of memory access 1282 * to map element returned from bpf_map_lookup_elem() 1283 */ 1284 if (meta.map_ptr == NULL) { 1285 verbose("kernel subsystem misconfigured verifier\n"); 1286 return -EINVAL; 1287 } 1288 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1289 regs[BPF_REG_0].id = ++env->id_gen; 1290 } else { 1291 verbose("unknown return type %d of func %s#%d\n", 1292 fn->ret_type, func_id_name(func_id), func_id); 1293 return -EINVAL; 1294 } 1295 1296 err = check_map_func_compatibility(meta.map_ptr, func_id); 1297 if (err) 1298 return err; 1299 1300 if (changes_data) 1301 clear_all_pkt_pointers(env); 1302 return 0; 1303 } 1304 1305 static int check_packet_ptr_add(struct bpf_verifier_env *env, 1306 struct bpf_insn *insn) 1307 { 1308 struct bpf_reg_state *regs = env->cur_state.regs; 1309 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1310 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1311 struct bpf_reg_state tmp_reg; 1312 s32 imm; 1313 1314 if (BPF_SRC(insn->code) == BPF_K) { 1315 /* pkt_ptr += imm */ 1316 imm = insn->imm; 1317 1318 add_imm: 1319 if (imm <= 0) { 1320 verbose("addition of negative constant to packet pointer is not allowed\n"); 1321 return -EACCES; 1322 } 1323 if (imm >= MAX_PACKET_OFF || 1324 imm + dst_reg->off >= MAX_PACKET_OFF) { 1325 verbose("constant %d is too large to add to packet pointer\n", 1326 imm); 1327 return -EACCES; 1328 } 1329 /* a constant was added to pkt_ptr. 1330 * Remember it while keeping the same 'id' 1331 */ 1332 dst_reg->off += imm; 1333 } else { 1334 if (src_reg->type == PTR_TO_PACKET) { 1335 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1336 tmp_reg = *dst_reg; /* save r7 state */ 1337 *dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */ 1338 src_reg = &tmp_reg; /* pretend it's src_reg state */ 1339 /* if the checks below reject it, the copy won't matter, 1340 * since we're rejecting the whole program. If all ok, 1341 * then imm22 state will be added to r7 1342 * and r7 will be pkt(id=0,off=22,r=62) while 1343 * r6 will stay as pkt(id=0,off=0,r=62) 1344 */ 1345 } 1346 1347 if (src_reg->type == CONST_IMM) { 1348 /* pkt_ptr += reg where reg is known constant */ 1349 imm = src_reg->imm; 1350 goto add_imm; 1351 } 1352 /* disallow pkt_ptr += reg 1353 * if reg is not uknown_value with guaranteed zero upper bits 1354 * otherwise pkt_ptr may overflow and addition will become 1355 * subtraction which is not allowed 1356 */ 1357 if (src_reg->type != UNKNOWN_VALUE) { 1358 verbose("cannot add '%s' to ptr_to_packet\n", 1359 reg_type_str[src_reg->type]); 1360 return -EACCES; 1361 } 1362 if (src_reg->imm < 48) { 1363 verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n", 1364 src_reg->imm); 1365 return -EACCES; 1366 } 1367 /* dst_reg stays as pkt_ptr type and since some positive 1368 * integer value was added to the pointer, increment its 'id' 1369 */ 1370 dst_reg->id = ++env->id_gen; 1371 1372 /* something was added to pkt_ptr, set range and off to zero */ 1373 dst_reg->off = 0; 1374 dst_reg->range = 0; 1375 } 1376 return 0; 1377 } 1378 1379 static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) 1380 { 1381 struct bpf_reg_state *regs = env->cur_state.regs; 1382 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1383 u8 opcode = BPF_OP(insn->code); 1384 s64 imm_log2; 1385 1386 /* for type == UNKNOWN_VALUE: 1387 * imm > 0 -> number of zero upper bits 1388 * imm == 0 -> don't track which is the same as all bits can be non-zero 1389 */ 1390 1391 if (BPF_SRC(insn->code) == BPF_X) { 1392 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1393 1394 if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && 1395 dst_reg->imm && opcode == BPF_ADD) { 1396 /* dreg += sreg 1397 * where both have zero upper bits. Adding them 1398 * can only result making one more bit non-zero 1399 * in the larger value. 1400 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) 1401 * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) 1402 */ 1403 dst_reg->imm = min(dst_reg->imm, src_reg->imm); 1404 dst_reg->imm--; 1405 return 0; 1406 } 1407 if (src_reg->type == CONST_IMM && src_reg->imm > 0 && 1408 dst_reg->imm && opcode == BPF_ADD) { 1409 /* dreg += sreg 1410 * where dreg has zero upper bits and sreg is const. 1411 * Adding them can only result making one more bit 1412 * non-zero in the larger value. 1413 */ 1414 imm_log2 = __ilog2_u64((long long)src_reg->imm); 1415 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1416 dst_reg->imm--; 1417 return 0; 1418 } 1419 /* all other cases non supported yet, just mark dst_reg */ 1420 dst_reg->imm = 0; 1421 return 0; 1422 } 1423 1424 /* sign extend 32-bit imm into 64-bit to make sure that 1425 * negative values occupy bit 63. Note ilog2() would have 1426 * been incorrect, since sizeof(insn->imm) == 4 1427 */ 1428 imm_log2 = __ilog2_u64((long long)insn->imm); 1429 1430 if (dst_reg->imm && opcode == BPF_LSH) { 1431 /* reg <<= imm 1432 * if reg was a result of 2 byte load, then its imm == 48 1433 * which means that upper 48 bits are zero and shifting this reg 1434 * left by 4 would mean that upper 44 bits are still zero 1435 */ 1436 dst_reg->imm -= insn->imm; 1437 } else if (dst_reg->imm && opcode == BPF_MUL) { 1438 /* reg *= imm 1439 * if multiplying by 14 subtract 4 1440 * This is conservative calculation of upper zero bits. 1441 * It's not trying to special case insn->imm == 1 or 0 cases 1442 */ 1443 dst_reg->imm -= imm_log2 + 1; 1444 } else if (opcode == BPF_AND) { 1445 /* reg &= imm */ 1446 dst_reg->imm = 63 - imm_log2; 1447 } else if (dst_reg->imm && opcode == BPF_ADD) { 1448 /* reg += imm */ 1449 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1450 dst_reg->imm--; 1451 } else if (opcode == BPF_RSH) { 1452 /* reg >>= imm 1453 * which means that after right shift, upper bits will be zero 1454 * note that verifier already checked that 1455 * 0 <= imm < 64 for shift insn 1456 */ 1457 dst_reg->imm += insn->imm; 1458 if (unlikely(dst_reg->imm > 64)) 1459 /* some dumb code did: 1460 * r2 = *(u32 *)mem; 1461 * r2 >>= 32; 1462 * and all bits are zero now */ 1463 dst_reg->imm = 64; 1464 } else { 1465 /* all other alu ops, means that we don't know what will 1466 * happen to the value, mark it with unknown number of zero bits 1467 */ 1468 dst_reg->imm = 0; 1469 } 1470 1471 if (dst_reg->imm < 0) { 1472 /* all 64 bits of the register can contain non-zero bits 1473 * and such value cannot be added to ptr_to_packet, since it 1474 * may overflow, mark it as unknown to avoid further eval 1475 */ 1476 dst_reg->imm = 0; 1477 } 1478 return 0; 1479 } 1480 1481 static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, 1482 struct bpf_insn *insn) 1483 { 1484 struct bpf_reg_state *regs = env->cur_state.regs; 1485 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1486 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1487 u8 opcode = BPF_OP(insn->code); 1488 1489 /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or' 1490 * insn. Don't care about overflow or negative values, just add them 1491 */ 1492 if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) 1493 dst_reg->imm += insn->imm; 1494 else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && 1495 src_reg->type == CONST_IMM) 1496 dst_reg->imm += src_reg->imm; 1497 else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) 1498 dst_reg->imm |= insn->imm; 1499 else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X && 1500 src_reg->type == CONST_IMM) 1501 dst_reg->imm |= src_reg->imm; 1502 else 1503 mark_reg_unknown_value(regs, insn->dst_reg); 1504 return 0; 1505 } 1506 1507 static void check_reg_overflow(struct bpf_reg_state *reg) 1508 { 1509 if (reg->max_value > BPF_REGISTER_MAX_RANGE) 1510 reg->max_value = BPF_REGISTER_MAX_RANGE; 1511 if (reg->min_value < BPF_REGISTER_MIN_RANGE || 1512 reg->min_value > BPF_REGISTER_MAX_RANGE) 1513 reg->min_value = BPF_REGISTER_MIN_RANGE; 1514 } 1515 1516 static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, 1517 struct bpf_insn *insn) 1518 { 1519 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1520 s64 min_val = BPF_REGISTER_MIN_RANGE; 1521 u64 max_val = BPF_REGISTER_MAX_RANGE; 1522 u8 opcode = BPF_OP(insn->code); 1523 1524 dst_reg = ®s[insn->dst_reg]; 1525 if (BPF_SRC(insn->code) == BPF_X) { 1526 check_reg_overflow(®s[insn->src_reg]); 1527 min_val = regs[insn->src_reg].min_value; 1528 max_val = regs[insn->src_reg].max_value; 1529 1530 /* If the source register is a random pointer then the 1531 * min_value/max_value values represent the range of the known 1532 * accesses into that value, not the actual min/max value of the 1533 * register itself. In this case we have to reset the reg range 1534 * values so we know it is not safe to look at. 1535 */ 1536 if (regs[insn->src_reg].type != CONST_IMM && 1537 regs[insn->src_reg].type != UNKNOWN_VALUE) { 1538 min_val = BPF_REGISTER_MIN_RANGE; 1539 max_val = BPF_REGISTER_MAX_RANGE; 1540 } 1541 } else if (insn->imm < BPF_REGISTER_MAX_RANGE && 1542 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { 1543 min_val = max_val = insn->imm; 1544 } 1545 1546 /* We don't know anything about what was done to this register, mark it 1547 * as unknown. 1548 */ 1549 if (min_val == BPF_REGISTER_MIN_RANGE && 1550 max_val == BPF_REGISTER_MAX_RANGE) { 1551 reset_reg_range_values(regs, insn->dst_reg); 1552 return; 1553 } 1554 1555 /* If one of our values was at the end of our ranges then we can't just 1556 * do our normal operations to the register, we need to set the values 1557 * to the min/max since they are undefined. 1558 */ 1559 if (min_val == BPF_REGISTER_MIN_RANGE) 1560 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1561 if (max_val == BPF_REGISTER_MAX_RANGE) 1562 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1563 1564 switch (opcode) { 1565 case BPF_ADD: 1566 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1567 dst_reg->min_value += min_val; 1568 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1569 dst_reg->max_value += max_val; 1570 break; 1571 case BPF_SUB: 1572 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1573 dst_reg->min_value -= min_val; 1574 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1575 dst_reg->max_value -= max_val; 1576 break; 1577 case BPF_MUL: 1578 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1579 dst_reg->min_value *= min_val; 1580 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1581 dst_reg->max_value *= max_val; 1582 break; 1583 case BPF_AND: 1584 /* Disallow AND'ing of negative numbers, ain't nobody got time 1585 * for that. Otherwise the minimum is 0 and the max is the max 1586 * value we could AND against. 1587 */ 1588 if (min_val < 0) 1589 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1590 else 1591 dst_reg->min_value = 0; 1592 dst_reg->max_value = max_val; 1593 break; 1594 case BPF_LSH: 1595 /* Gotta have special overflow logic here, if we're shifting 1596 * more than MAX_RANGE then just assume we have an invalid 1597 * range. 1598 */ 1599 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1600 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1601 else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1602 dst_reg->min_value <<= min_val; 1603 1604 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1605 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1606 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1607 dst_reg->max_value <<= max_val; 1608 break; 1609 case BPF_RSH: 1610 /* RSH by a negative number is undefined, and the BPF_RSH is an 1611 * unsigned shift, so make the appropriate casts. 1612 */ 1613 if (min_val < 0 || dst_reg->min_value < 0) 1614 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1615 else 1616 dst_reg->min_value = 1617 (u64)(dst_reg->min_value) >> min_val; 1618 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1619 dst_reg->max_value >>= max_val; 1620 break; 1621 default: 1622 reset_reg_range_values(regs, insn->dst_reg); 1623 break; 1624 } 1625 1626 check_reg_overflow(dst_reg); 1627 } 1628 1629 /* check validity of 32-bit and 64-bit arithmetic operations */ 1630 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 1631 { 1632 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1633 u8 opcode = BPF_OP(insn->code); 1634 int err; 1635 1636 if (opcode == BPF_END || opcode == BPF_NEG) { 1637 if (opcode == BPF_NEG) { 1638 if (BPF_SRC(insn->code) != 0 || 1639 insn->src_reg != BPF_REG_0 || 1640 insn->off != 0 || insn->imm != 0) { 1641 verbose("BPF_NEG uses reserved fields\n"); 1642 return -EINVAL; 1643 } 1644 } else { 1645 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 1646 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { 1647 verbose("BPF_END uses reserved fields\n"); 1648 return -EINVAL; 1649 } 1650 } 1651 1652 /* check src operand */ 1653 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1654 if (err) 1655 return err; 1656 1657 if (is_pointer_value(env, insn->dst_reg)) { 1658 verbose("R%d pointer arithmetic prohibited\n", 1659 insn->dst_reg); 1660 return -EACCES; 1661 } 1662 1663 /* check dest operand */ 1664 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1665 if (err) 1666 return err; 1667 1668 } else if (opcode == BPF_MOV) { 1669 1670 if (BPF_SRC(insn->code) == BPF_X) { 1671 if (insn->imm != 0 || insn->off != 0) { 1672 verbose("BPF_MOV uses reserved fields\n"); 1673 return -EINVAL; 1674 } 1675 1676 /* check src operand */ 1677 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1678 if (err) 1679 return err; 1680 } else { 1681 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1682 verbose("BPF_MOV uses reserved fields\n"); 1683 return -EINVAL; 1684 } 1685 } 1686 1687 /* check dest operand */ 1688 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1689 if (err) 1690 return err; 1691 1692 /* we are setting our register to something new, we need to 1693 * reset its range values. 1694 */ 1695 reset_reg_range_values(regs, insn->dst_reg); 1696 1697 if (BPF_SRC(insn->code) == BPF_X) { 1698 if (BPF_CLASS(insn->code) == BPF_ALU64) { 1699 /* case: R1 = R2 1700 * copy register state to dest reg 1701 */ 1702 regs[insn->dst_reg] = regs[insn->src_reg]; 1703 } else { 1704 if (is_pointer_value(env, insn->src_reg)) { 1705 verbose("R%d partial copy of pointer\n", 1706 insn->src_reg); 1707 return -EACCES; 1708 } 1709 mark_reg_unknown_value(regs, insn->dst_reg); 1710 } 1711 } else { 1712 /* case: R = imm 1713 * remember the value we stored into this reg 1714 */ 1715 regs[insn->dst_reg].type = CONST_IMM; 1716 regs[insn->dst_reg].imm = insn->imm; 1717 regs[insn->dst_reg].max_value = insn->imm; 1718 regs[insn->dst_reg].min_value = insn->imm; 1719 } 1720 1721 } else if (opcode > BPF_END) { 1722 verbose("invalid BPF_ALU opcode %x\n", opcode); 1723 return -EINVAL; 1724 1725 } else { /* all other ALU ops: and, sub, xor, add, ... */ 1726 1727 if (BPF_SRC(insn->code) == BPF_X) { 1728 if (insn->imm != 0 || insn->off != 0) { 1729 verbose("BPF_ALU uses reserved fields\n"); 1730 return -EINVAL; 1731 } 1732 /* check src1 operand */ 1733 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1734 if (err) 1735 return err; 1736 } else { 1737 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1738 verbose("BPF_ALU uses reserved fields\n"); 1739 return -EINVAL; 1740 } 1741 } 1742 1743 /* check src2 operand */ 1744 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1745 if (err) 1746 return err; 1747 1748 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 1749 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 1750 verbose("div by zero\n"); 1751 return -EINVAL; 1752 } 1753 1754 if ((opcode == BPF_LSH || opcode == BPF_RSH || 1755 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 1756 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 1757 1758 if (insn->imm < 0 || insn->imm >= size) { 1759 verbose("invalid shift %d\n", insn->imm); 1760 return -EINVAL; 1761 } 1762 } 1763 1764 /* check dest operand */ 1765 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 1766 if (err) 1767 return err; 1768 1769 dst_reg = ®s[insn->dst_reg]; 1770 1771 /* first we want to adjust our ranges. */ 1772 adjust_reg_min_max_vals(env, insn); 1773 1774 /* pattern match 'bpf_add Rx, imm' instruction */ 1775 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && 1776 dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { 1777 dst_reg->type = PTR_TO_STACK; 1778 dst_reg->imm = insn->imm; 1779 return 0; 1780 } else if (opcode == BPF_ADD && 1781 BPF_CLASS(insn->code) == BPF_ALU64 && 1782 (dst_reg->type == PTR_TO_PACKET || 1783 (BPF_SRC(insn->code) == BPF_X && 1784 regs[insn->src_reg].type == PTR_TO_PACKET))) { 1785 /* ptr_to_packet += K|X */ 1786 return check_packet_ptr_add(env, insn); 1787 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 1788 dst_reg->type == UNKNOWN_VALUE && 1789 env->allow_ptr_leaks) { 1790 /* unknown += K|X */ 1791 return evaluate_reg_alu(env, insn); 1792 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 1793 dst_reg->type == CONST_IMM && 1794 env->allow_ptr_leaks) { 1795 /* reg_imm += K|X */ 1796 return evaluate_reg_imm_alu(env, insn); 1797 } else if (is_pointer_value(env, insn->dst_reg)) { 1798 verbose("R%d pointer arithmetic prohibited\n", 1799 insn->dst_reg); 1800 return -EACCES; 1801 } else if (BPF_SRC(insn->code) == BPF_X && 1802 is_pointer_value(env, insn->src_reg)) { 1803 verbose("R%d pointer arithmetic prohibited\n", 1804 insn->src_reg); 1805 return -EACCES; 1806 } 1807 1808 /* If we did pointer math on a map value then just set it to our 1809 * PTR_TO_MAP_VALUE_ADJ type so we can deal with any stores or 1810 * loads to this register appropriately, otherwise just mark the 1811 * register as unknown. 1812 */ 1813 if (env->allow_ptr_leaks && 1814 (dst_reg->type == PTR_TO_MAP_VALUE || 1815 dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) 1816 dst_reg->type = PTR_TO_MAP_VALUE_ADJ; 1817 else 1818 mark_reg_unknown_value(regs, insn->dst_reg); 1819 } 1820 1821 return 0; 1822 } 1823 1824 static void find_good_pkt_pointers(struct bpf_verifier_state *state, 1825 struct bpf_reg_state *dst_reg) 1826 { 1827 struct bpf_reg_state *regs = state->regs, *reg; 1828 int i; 1829 1830 /* LLVM can generate two kind of checks: 1831 * 1832 * Type 1: 1833 * 1834 * r2 = r3; 1835 * r2 += 8; 1836 * if (r2 > pkt_end) goto <handle exception> 1837 * <access okay> 1838 * 1839 * Where: 1840 * r2 == dst_reg, pkt_end == src_reg 1841 * r2=pkt(id=n,off=8,r=0) 1842 * r3=pkt(id=n,off=0,r=0) 1843 * 1844 * Type 2: 1845 * 1846 * r2 = r3; 1847 * r2 += 8; 1848 * if (pkt_end >= r2) goto <access okay> 1849 * <handle exception> 1850 * 1851 * Where: 1852 * pkt_end == dst_reg, r2 == src_reg 1853 * r2=pkt(id=n,off=8,r=0) 1854 * r3=pkt(id=n,off=0,r=0) 1855 * 1856 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 1857 * so that range of bytes [r3, r3 + 8) is safe to access. 1858 */ 1859 1860 for (i = 0; i < MAX_BPF_REG; i++) 1861 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 1862 regs[i].range = dst_reg->off; 1863 1864 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1865 if (state->stack_slot_type[i] != STACK_SPILL) 1866 continue; 1867 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1868 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 1869 reg->range = dst_reg->off; 1870 } 1871 } 1872 1873 /* Adjusts the register min/max values in the case that the dst_reg is the 1874 * variable register that we are working on, and src_reg is a constant or we're 1875 * simply doing a BPF_K check. 1876 */ 1877 static void reg_set_min_max(struct bpf_reg_state *true_reg, 1878 struct bpf_reg_state *false_reg, u64 val, 1879 u8 opcode) 1880 { 1881 switch (opcode) { 1882 case BPF_JEQ: 1883 /* If this is false then we know nothing Jon Snow, but if it is 1884 * true then we know for sure. 1885 */ 1886 true_reg->max_value = true_reg->min_value = val; 1887 break; 1888 case BPF_JNE: 1889 /* If this is true we know nothing Jon Snow, but if it is false 1890 * we know the value for sure; 1891 */ 1892 false_reg->max_value = false_reg->min_value = val; 1893 break; 1894 case BPF_JGT: 1895 /* Unsigned comparison, the minimum value is 0. */ 1896 false_reg->min_value = 0; 1897 case BPF_JSGT: 1898 /* If this is false then we know the maximum val is val, 1899 * otherwise we know the min val is val+1. 1900 */ 1901 false_reg->max_value = val; 1902 true_reg->min_value = val + 1; 1903 break; 1904 case BPF_JGE: 1905 /* Unsigned comparison, the minimum value is 0. */ 1906 false_reg->min_value = 0; 1907 case BPF_JSGE: 1908 /* If this is false then we know the maximum value is val - 1, 1909 * otherwise we know the mimimum value is val. 1910 */ 1911 false_reg->max_value = val - 1; 1912 true_reg->min_value = val; 1913 break; 1914 default: 1915 break; 1916 } 1917 1918 check_reg_overflow(false_reg); 1919 check_reg_overflow(true_reg); 1920 } 1921 1922 /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg 1923 * is the variable reg. 1924 */ 1925 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 1926 struct bpf_reg_state *false_reg, u64 val, 1927 u8 opcode) 1928 { 1929 switch (opcode) { 1930 case BPF_JEQ: 1931 /* If this is false then we know nothing Jon Snow, but if it is 1932 * true then we know for sure. 1933 */ 1934 true_reg->max_value = true_reg->min_value = val; 1935 break; 1936 case BPF_JNE: 1937 /* If this is true we know nothing Jon Snow, but if it is false 1938 * we know the value for sure; 1939 */ 1940 false_reg->max_value = false_reg->min_value = val; 1941 break; 1942 case BPF_JGT: 1943 /* Unsigned comparison, the minimum value is 0. */ 1944 true_reg->min_value = 0; 1945 case BPF_JSGT: 1946 /* 1947 * If this is false, then the val is <= the register, if it is 1948 * true the register <= to the val. 1949 */ 1950 false_reg->min_value = val; 1951 true_reg->max_value = val - 1; 1952 break; 1953 case BPF_JGE: 1954 /* Unsigned comparison, the minimum value is 0. */ 1955 true_reg->min_value = 0; 1956 case BPF_JSGE: 1957 /* If this is false then constant < register, if it is true then 1958 * the register < constant. 1959 */ 1960 false_reg->min_value = val + 1; 1961 true_reg->max_value = val; 1962 break; 1963 default: 1964 break; 1965 } 1966 1967 check_reg_overflow(false_reg); 1968 check_reg_overflow(true_reg); 1969 } 1970 1971 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, 1972 enum bpf_reg_type type) 1973 { 1974 struct bpf_reg_state *reg = ®s[regno]; 1975 1976 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { 1977 reg->type = type; 1978 /* We don't need id from this point onwards anymore, thus we 1979 * should better reset it, so that state pruning has chances 1980 * to take effect. 1981 */ 1982 reg->id = 0; 1983 if (type == UNKNOWN_VALUE) 1984 __mark_reg_unknown_value(regs, regno); 1985 } 1986 } 1987 1988 /* The logic is similar to find_good_pkt_pointers(), both could eventually 1989 * be folded together at some point. 1990 */ 1991 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, 1992 enum bpf_reg_type type) 1993 { 1994 struct bpf_reg_state *regs = state->regs; 1995 u32 id = regs[regno].id; 1996 int i; 1997 1998 for (i = 0; i < MAX_BPF_REG; i++) 1999 mark_map_reg(regs, i, id, type); 2000 2001 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2002 if (state->stack_slot_type[i] != STACK_SPILL) 2003 continue; 2004 mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type); 2005 } 2006 } 2007 2008 static int check_cond_jmp_op(struct bpf_verifier_env *env, 2009 struct bpf_insn *insn, int *insn_idx) 2010 { 2011 struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state; 2012 struct bpf_reg_state *regs = this_branch->regs, *dst_reg; 2013 u8 opcode = BPF_OP(insn->code); 2014 int err; 2015 2016 if (opcode > BPF_EXIT) { 2017 verbose("invalid BPF_JMP opcode %x\n", opcode); 2018 return -EINVAL; 2019 } 2020 2021 if (BPF_SRC(insn->code) == BPF_X) { 2022 if (insn->imm != 0) { 2023 verbose("BPF_JMP uses reserved fields\n"); 2024 return -EINVAL; 2025 } 2026 2027 /* check src1 operand */ 2028 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2029 if (err) 2030 return err; 2031 2032 if (is_pointer_value(env, insn->src_reg)) { 2033 verbose("R%d pointer comparison prohibited\n", 2034 insn->src_reg); 2035 return -EACCES; 2036 } 2037 } else { 2038 if (insn->src_reg != BPF_REG_0) { 2039 verbose("BPF_JMP uses reserved fields\n"); 2040 return -EINVAL; 2041 } 2042 } 2043 2044 /* check src2 operand */ 2045 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2046 if (err) 2047 return err; 2048 2049 dst_reg = ®s[insn->dst_reg]; 2050 2051 /* detect if R == 0 where R was initialized to zero earlier */ 2052 if (BPF_SRC(insn->code) == BPF_K && 2053 (opcode == BPF_JEQ || opcode == BPF_JNE) && 2054 dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) { 2055 if (opcode == BPF_JEQ) { 2056 /* if (imm == imm) goto pc+off; 2057 * only follow the goto, ignore fall-through 2058 */ 2059 *insn_idx += insn->off; 2060 return 0; 2061 } else { 2062 /* if (imm != imm) goto pc+off; 2063 * only follow fall-through branch, since 2064 * that's where the program will go 2065 */ 2066 return 0; 2067 } 2068 } 2069 2070 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 2071 if (!other_branch) 2072 return -EFAULT; 2073 2074 /* detect if we are comparing against a constant value so we can adjust 2075 * our min/max values for our dst register. 2076 */ 2077 if (BPF_SRC(insn->code) == BPF_X) { 2078 if (regs[insn->src_reg].type == CONST_IMM) 2079 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2080 dst_reg, regs[insn->src_reg].imm, 2081 opcode); 2082 else if (dst_reg->type == CONST_IMM) 2083 reg_set_min_max_inv(&other_branch->regs[insn->src_reg], 2084 ®s[insn->src_reg], dst_reg->imm, 2085 opcode); 2086 } else { 2087 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2088 dst_reg, insn->imm, opcode); 2089 } 2090 2091 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ 2092 if (BPF_SRC(insn->code) == BPF_K && 2093 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 2094 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 2095 /* Mark all identical map registers in each branch as either 2096 * safe or unknown depending R == 0 or R != 0 conditional. 2097 */ 2098 mark_map_regs(this_branch, insn->dst_reg, 2099 opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); 2100 mark_map_regs(other_branch, insn->dst_reg, 2101 opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); 2102 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 2103 dst_reg->type == PTR_TO_PACKET && 2104 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2105 find_good_pkt_pointers(this_branch, dst_reg); 2106 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 2107 dst_reg->type == PTR_TO_PACKET_END && 2108 regs[insn->src_reg].type == PTR_TO_PACKET) { 2109 find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); 2110 } else if (is_pointer_value(env, insn->dst_reg)) { 2111 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 2112 return -EACCES; 2113 } 2114 if (log_level) 2115 print_verifier_state(this_branch); 2116 return 0; 2117 } 2118 2119 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 2120 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 2121 { 2122 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 2123 2124 return (struct bpf_map *) (unsigned long) imm64; 2125 } 2126 2127 /* verify BPF_LD_IMM64 instruction */ 2128 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 2129 { 2130 struct bpf_reg_state *regs = env->cur_state.regs; 2131 int err; 2132 2133 if (BPF_SIZE(insn->code) != BPF_DW) { 2134 verbose("invalid BPF_LD_IMM insn\n"); 2135 return -EINVAL; 2136 } 2137 if (insn->off != 0) { 2138 verbose("BPF_LD_IMM64 uses reserved fields\n"); 2139 return -EINVAL; 2140 } 2141 2142 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 2143 if (err) 2144 return err; 2145 2146 if (insn->src_reg == 0) { 2147 /* generic move 64-bit immediate into a register, 2148 * only analyzer needs to collect the ld_imm value. 2149 */ 2150 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 2151 2152 if (!env->analyzer_ops) 2153 return 0; 2154 2155 regs[insn->dst_reg].type = CONST_IMM; 2156 regs[insn->dst_reg].imm = imm; 2157 return 0; 2158 } 2159 2160 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 2161 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 2162 2163 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 2164 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 2165 return 0; 2166 } 2167 2168 static bool may_access_skb(enum bpf_prog_type type) 2169 { 2170 switch (type) { 2171 case BPF_PROG_TYPE_SOCKET_FILTER: 2172 case BPF_PROG_TYPE_SCHED_CLS: 2173 case BPF_PROG_TYPE_SCHED_ACT: 2174 return true; 2175 default: 2176 return false; 2177 } 2178 } 2179 2180 /* verify safety of LD_ABS|LD_IND instructions: 2181 * - they can only appear in the programs where ctx == skb 2182 * - since they are wrappers of function calls, they scratch R1-R5 registers, 2183 * preserve R6-R9, and store return value into R0 2184 * 2185 * Implicit input: 2186 * ctx == skb == R6 == CTX 2187 * 2188 * Explicit input: 2189 * SRC == any register 2190 * IMM == 32-bit immediate 2191 * 2192 * Output: 2193 * R0 - 8/16/32-bit skb data converted to cpu endianness 2194 */ 2195 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 2196 { 2197 struct bpf_reg_state *regs = env->cur_state.regs; 2198 u8 mode = BPF_MODE(insn->code); 2199 struct bpf_reg_state *reg; 2200 int i, err; 2201 2202 if (!may_access_skb(env->prog->type)) { 2203 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 2204 return -EINVAL; 2205 } 2206 2207 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 2208 BPF_SIZE(insn->code) == BPF_DW || 2209 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 2210 verbose("BPF_LD_[ABS|IND] uses reserved fields\n"); 2211 return -EINVAL; 2212 } 2213 2214 /* check whether implicit source operand (register R6) is readable */ 2215 err = check_reg_arg(regs, BPF_REG_6, SRC_OP); 2216 if (err) 2217 return err; 2218 2219 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 2220 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 2221 return -EINVAL; 2222 } 2223 2224 if (mode == BPF_IND) { 2225 /* check explicit source operand */ 2226 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2227 if (err) 2228 return err; 2229 } 2230 2231 /* reset caller saved regs to unreadable */ 2232 for (i = 0; i < CALLER_SAVED_REGS; i++) { 2233 reg = regs + caller_saved[i]; 2234 reg->type = NOT_INIT; 2235 reg->imm = 0; 2236 } 2237 2238 /* mark destination R0 register as readable, since it contains 2239 * the value fetched from the packet 2240 */ 2241 regs[BPF_REG_0].type = UNKNOWN_VALUE; 2242 return 0; 2243 } 2244 2245 /* non-recursive DFS pseudo code 2246 * 1 procedure DFS-iterative(G,v): 2247 * 2 label v as discovered 2248 * 3 let S be a stack 2249 * 4 S.push(v) 2250 * 5 while S is not empty 2251 * 6 t <- S.pop() 2252 * 7 if t is what we're looking for: 2253 * 8 return t 2254 * 9 for all edges e in G.adjacentEdges(t) do 2255 * 10 if edge e is already labelled 2256 * 11 continue with the next edge 2257 * 12 w <- G.adjacentVertex(t,e) 2258 * 13 if vertex w is not discovered and not explored 2259 * 14 label e as tree-edge 2260 * 15 label w as discovered 2261 * 16 S.push(w) 2262 * 17 continue at 5 2263 * 18 else if vertex w is discovered 2264 * 19 label e as back-edge 2265 * 20 else 2266 * 21 // vertex w is explored 2267 * 22 label e as forward- or cross-edge 2268 * 23 label t as explored 2269 * 24 S.pop() 2270 * 2271 * convention: 2272 * 0x10 - discovered 2273 * 0x11 - discovered and fall-through edge labelled 2274 * 0x12 - discovered and fall-through and branch edges labelled 2275 * 0x20 - explored 2276 */ 2277 2278 enum { 2279 DISCOVERED = 0x10, 2280 EXPLORED = 0x20, 2281 FALLTHROUGH = 1, 2282 BRANCH = 2, 2283 }; 2284 2285 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) 2286 2287 static int *insn_stack; /* stack of insns to process */ 2288 static int cur_stack; /* current stack index */ 2289 static int *insn_state; 2290 2291 /* t, w, e - match pseudo-code above: 2292 * t - index of current instruction 2293 * w - next instruction 2294 * e - edge 2295 */ 2296 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) 2297 { 2298 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 2299 return 0; 2300 2301 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 2302 return 0; 2303 2304 if (w < 0 || w >= env->prog->len) { 2305 verbose("jump out of range from insn %d to %d\n", t, w); 2306 return -EINVAL; 2307 } 2308 2309 if (e == BRANCH) 2310 /* mark branch target for state pruning */ 2311 env->explored_states[w] = STATE_LIST_MARK; 2312 2313 if (insn_state[w] == 0) { 2314 /* tree-edge */ 2315 insn_state[t] = DISCOVERED | e; 2316 insn_state[w] = DISCOVERED; 2317 if (cur_stack >= env->prog->len) 2318 return -E2BIG; 2319 insn_stack[cur_stack++] = w; 2320 return 1; 2321 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 2322 verbose("back-edge from insn %d to %d\n", t, w); 2323 return -EINVAL; 2324 } else if (insn_state[w] == EXPLORED) { 2325 /* forward- or cross-edge */ 2326 insn_state[t] = DISCOVERED | e; 2327 } else { 2328 verbose("insn state internal bug\n"); 2329 return -EFAULT; 2330 } 2331 return 0; 2332 } 2333 2334 /* non-recursive depth-first-search to detect loops in BPF program 2335 * loop == back-edge in directed graph 2336 */ 2337 static int check_cfg(struct bpf_verifier_env *env) 2338 { 2339 struct bpf_insn *insns = env->prog->insnsi; 2340 int insn_cnt = env->prog->len; 2341 int ret = 0; 2342 int i, t; 2343 2344 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 2345 if (!insn_state) 2346 return -ENOMEM; 2347 2348 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 2349 if (!insn_stack) { 2350 kfree(insn_state); 2351 return -ENOMEM; 2352 } 2353 2354 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 2355 insn_stack[0] = 0; /* 0 is the first instruction */ 2356 cur_stack = 1; 2357 2358 peek_stack: 2359 if (cur_stack == 0) 2360 goto check_state; 2361 t = insn_stack[cur_stack - 1]; 2362 2363 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 2364 u8 opcode = BPF_OP(insns[t].code); 2365 2366 if (opcode == BPF_EXIT) { 2367 goto mark_explored; 2368 } else if (opcode == BPF_CALL) { 2369 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2370 if (ret == 1) 2371 goto peek_stack; 2372 else if (ret < 0) 2373 goto err_free; 2374 if (t + 1 < insn_cnt) 2375 env->explored_states[t + 1] = STATE_LIST_MARK; 2376 } else if (opcode == BPF_JA) { 2377 if (BPF_SRC(insns[t].code) != BPF_K) { 2378 ret = -EINVAL; 2379 goto err_free; 2380 } 2381 /* unconditional jump with single edge */ 2382 ret = push_insn(t, t + insns[t].off + 1, 2383 FALLTHROUGH, env); 2384 if (ret == 1) 2385 goto peek_stack; 2386 else if (ret < 0) 2387 goto err_free; 2388 /* tell verifier to check for equivalent states 2389 * after every call and jump 2390 */ 2391 if (t + 1 < insn_cnt) 2392 env->explored_states[t + 1] = STATE_LIST_MARK; 2393 } else { 2394 /* conditional jump with two edges */ 2395 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2396 if (ret == 1) 2397 goto peek_stack; 2398 else if (ret < 0) 2399 goto err_free; 2400 2401 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 2402 if (ret == 1) 2403 goto peek_stack; 2404 else if (ret < 0) 2405 goto err_free; 2406 } 2407 } else { 2408 /* all other non-branch instructions with single 2409 * fall-through edge 2410 */ 2411 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2412 if (ret == 1) 2413 goto peek_stack; 2414 else if (ret < 0) 2415 goto err_free; 2416 } 2417 2418 mark_explored: 2419 insn_state[t] = EXPLORED; 2420 if (cur_stack-- <= 0) { 2421 verbose("pop stack internal bug\n"); 2422 ret = -EFAULT; 2423 goto err_free; 2424 } 2425 goto peek_stack; 2426 2427 check_state: 2428 for (i = 0; i < insn_cnt; i++) { 2429 if (insn_state[i] != EXPLORED) { 2430 verbose("unreachable insn %d\n", i); 2431 ret = -EINVAL; 2432 goto err_free; 2433 } 2434 } 2435 ret = 0; /* cfg looks good */ 2436 2437 err_free: 2438 kfree(insn_state); 2439 kfree(insn_stack); 2440 return ret; 2441 } 2442 2443 /* the following conditions reduce the number of explored insns 2444 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet 2445 */ 2446 static bool compare_ptrs_to_packet(struct bpf_reg_state *old, 2447 struct bpf_reg_state *cur) 2448 { 2449 if (old->id != cur->id) 2450 return false; 2451 2452 /* old ptr_to_packet is more conservative, since it allows smaller 2453 * range. Ex: 2454 * old(off=0,r=10) is equal to cur(off=0,r=20), because 2455 * old(off=0,r=10) means that with range=10 the verifier proceeded 2456 * further and found no issues with the program. Now we're in the same 2457 * spot with cur(off=0,r=20), so we're safe too, since anything further 2458 * will only be looking at most 10 bytes after this pointer. 2459 */ 2460 if (old->off == cur->off && old->range < cur->range) 2461 return true; 2462 2463 /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0) 2464 * since both cannot be used for packet access and safe(old) 2465 * pointer has smaller off that could be used for further 2466 * 'if (ptr > data_end)' check 2467 * Ex: 2468 * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean 2469 * that we cannot access the packet. 2470 * The safe range is: 2471 * [ptr, ptr + range - off) 2472 * so whenever off >=range, it means no safe bytes from this pointer. 2473 * When comparing old->off <= cur->off, it means that older code 2474 * went with smaller offset and that offset was later 2475 * used to figure out the safe range after 'if (ptr > data_end)' check 2476 * Say, 'old' state was explored like: 2477 * ... R3(off=0, r=0) 2478 * R4 = R3 + 20 2479 * ... now R4(off=20,r=0) <-- here 2480 * if (R4 > data_end) 2481 * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access. 2482 * ... the code further went all the way to bpf_exit. 2483 * Now the 'cur' state at the mark 'here' has R4(off=30,r=0). 2484 * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier 2485 * goes further, such cur_R4 will give larger safe packet range after 2486 * 'if (R4 > data_end)' and all further insn were already good with r=20, 2487 * so they will be good with r=30 and we can prune the search. 2488 */ 2489 if (old->off <= cur->off && 2490 old->off >= old->range && cur->off >= cur->range) 2491 return true; 2492 2493 return false; 2494 } 2495 2496 /* compare two verifier states 2497 * 2498 * all states stored in state_list are known to be valid, since 2499 * verifier reached 'bpf_exit' instruction through them 2500 * 2501 * this function is called when verifier exploring different branches of 2502 * execution popped from the state stack. If it sees an old state that has 2503 * more strict register state and more strict stack state then this execution 2504 * branch doesn't need to be explored further, since verifier already 2505 * concluded that more strict state leads to valid finish. 2506 * 2507 * Therefore two states are equivalent if register state is more conservative 2508 * and explored stack state is more conservative than the current one. 2509 * Example: 2510 * explored current 2511 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 2512 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 2513 * 2514 * In other words if current stack state (one being explored) has more 2515 * valid slots than old one that already passed validation, it means 2516 * the verifier can stop exploring and conclude that current state is valid too 2517 * 2518 * Similarly with registers. If explored state has register type as invalid 2519 * whereas register type in current state is meaningful, it means that 2520 * the current state will reach 'bpf_exit' instruction safely 2521 */ 2522 static bool states_equal(struct bpf_verifier_env *env, 2523 struct bpf_verifier_state *old, 2524 struct bpf_verifier_state *cur) 2525 { 2526 bool varlen_map_access = env->varlen_map_value_access; 2527 struct bpf_reg_state *rold, *rcur; 2528 int i; 2529 2530 for (i = 0; i < MAX_BPF_REG; i++) { 2531 rold = &old->regs[i]; 2532 rcur = &cur->regs[i]; 2533 2534 if (memcmp(rold, rcur, sizeof(*rold)) == 0) 2535 continue; 2536 2537 /* If the ranges were not the same, but everything else was and 2538 * we didn't do a variable access into a map then we are a-ok. 2539 */ 2540 if (!varlen_map_access && 2541 memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0) 2542 continue; 2543 2544 /* If we didn't map access then again we don't care about the 2545 * mismatched range values and it's ok if our old type was 2546 * UNKNOWN and we didn't go to a NOT_INIT'ed reg. 2547 */ 2548 if (rold->type == NOT_INIT || 2549 (!varlen_map_access && rold->type == UNKNOWN_VALUE && 2550 rcur->type != NOT_INIT)) 2551 continue; 2552 2553 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2554 compare_ptrs_to_packet(rold, rcur)) 2555 continue; 2556 2557 return false; 2558 } 2559 2560 for (i = 0; i < MAX_BPF_STACK; i++) { 2561 if (old->stack_slot_type[i] == STACK_INVALID) 2562 continue; 2563 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) 2564 /* Ex: old explored (safe) state has STACK_SPILL in 2565 * this stack slot, but current has has STACK_MISC -> 2566 * this verifier states are not equivalent, 2567 * return false to continue verification of this path 2568 */ 2569 return false; 2570 if (i % BPF_REG_SIZE) 2571 continue; 2572 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], 2573 &cur->spilled_regs[i / BPF_REG_SIZE], 2574 sizeof(old->spilled_regs[0]))) 2575 /* when explored and current stack slot types are 2576 * the same, check that stored pointers types 2577 * are the same as well. 2578 * Ex: explored safe path could have stored 2579 * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8} 2580 * but current path has stored: 2581 * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16} 2582 * such verifier states are not equivalent. 2583 * return false to continue verification of this path 2584 */ 2585 return false; 2586 else 2587 continue; 2588 } 2589 return true; 2590 } 2591 2592 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 2593 { 2594 struct bpf_verifier_state_list *new_sl; 2595 struct bpf_verifier_state_list *sl; 2596 2597 sl = env->explored_states[insn_idx]; 2598 if (!sl) 2599 /* this 'insn_idx' instruction wasn't marked, so we will not 2600 * be doing state search here 2601 */ 2602 return 0; 2603 2604 while (sl != STATE_LIST_MARK) { 2605 if (states_equal(env, &sl->state, &env->cur_state)) 2606 /* reached equivalent register/stack state, 2607 * prune the search 2608 */ 2609 return 1; 2610 sl = sl->next; 2611 } 2612 2613 /* there were no equivalent states, remember current one. 2614 * technically the current state is not proven to be safe yet, 2615 * but it will either reach bpf_exit (which means it's safe) or 2616 * it will be rejected. Since there are no loops, we won't be 2617 * seeing this 'insn_idx' instruction again on the way to bpf_exit 2618 */ 2619 new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER); 2620 if (!new_sl) 2621 return -ENOMEM; 2622 2623 /* add new state to the head of linked list */ 2624 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); 2625 new_sl->next = env->explored_states[insn_idx]; 2626 env->explored_states[insn_idx] = new_sl; 2627 return 0; 2628 } 2629 2630 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, 2631 int insn_idx, int prev_insn_idx) 2632 { 2633 if (!env->analyzer_ops || !env->analyzer_ops->insn_hook) 2634 return 0; 2635 2636 return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx); 2637 } 2638 2639 static int do_check(struct bpf_verifier_env *env) 2640 { 2641 struct bpf_verifier_state *state = &env->cur_state; 2642 struct bpf_insn *insns = env->prog->insnsi; 2643 struct bpf_reg_state *regs = state->regs; 2644 int insn_cnt = env->prog->len; 2645 int insn_idx, prev_insn_idx = 0; 2646 int insn_processed = 0; 2647 bool do_print_state = false; 2648 2649 init_reg_state(regs); 2650 insn_idx = 0; 2651 env->varlen_map_value_access = false; 2652 for (;;) { 2653 struct bpf_insn *insn; 2654 u8 class; 2655 int err; 2656 2657 if (insn_idx >= insn_cnt) { 2658 verbose("invalid insn idx %d insn_cnt %d\n", 2659 insn_idx, insn_cnt); 2660 return -EFAULT; 2661 } 2662 2663 insn = &insns[insn_idx]; 2664 class = BPF_CLASS(insn->code); 2665 2666 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 2667 verbose("BPF program is too large. Proccessed %d insn\n", 2668 insn_processed); 2669 return -E2BIG; 2670 } 2671 2672 err = is_state_visited(env, insn_idx); 2673 if (err < 0) 2674 return err; 2675 if (err == 1) { 2676 /* found equivalent state, can prune the search */ 2677 if (log_level) { 2678 if (do_print_state) 2679 verbose("\nfrom %d to %d: safe\n", 2680 prev_insn_idx, insn_idx); 2681 else 2682 verbose("%d: safe\n", insn_idx); 2683 } 2684 goto process_bpf_exit; 2685 } 2686 2687 if (log_level && do_print_state) { 2688 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); 2689 print_verifier_state(&env->cur_state); 2690 do_print_state = false; 2691 } 2692 2693 if (log_level) { 2694 verbose("%d: ", insn_idx); 2695 print_bpf_insn(insn); 2696 } 2697 2698 err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); 2699 if (err) 2700 return err; 2701 2702 if (class == BPF_ALU || class == BPF_ALU64) { 2703 err = check_alu_op(env, insn); 2704 if (err) 2705 return err; 2706 2707 } else if (class == BPF_LDX) { 2708 enum bpf_reg_type *prev_src_type, src_reg_type; 2709 2710 /* check for reserved fields is already done */ 2711 2712 /* check src operand */ 2713 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2714 if (err) 2715 return err; 2716 2717 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 2718 if (err) 2719 return err; 2720 2721 src_reg_type = regs[insn->src_reg].type; 2722 2723 /* check that memory (src_reg + off) is readable, 2724 * the state of dst_reg will be updated by this func 2725 */ 2726 err = check_mem_access(env, insn->src_reg, insn->off, 2727 BPF_SIZE(insn->code), BPF_READ, 2728 insn->dst_reg); 2729 if (err) 2730 return err; 2731 2732 reset_reg_range_values(regs, insn->dst_reg); 2733 if (BPF_SIZE(insn->code) != BPF_W && 2734 BPF_SIZE(insn->code) != BPF_DW) { 2735 insn_idx++; 2736 continue; 2737 } 2738 2739 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; 2740 2741 if (*prev_src_type == NOT_INIT) { 2742 /* saw a valid insn 2743 * dst_reg = *(u32 *)(src_reg + off) 2744 * save type to validate intersecting paths 2745 */ 2746 *prev_src_type = src_reg_type; 2747 2748 } else if (src_reg_type != *prev_src_type && 2749 (src_reg_type == PTR_TO_CTX || 2750 *prev_src_type == PTR_TO_CTX)) { 2751 /* ABuser program is trying to use the same insn 2752 * dst_reg = *(u32*) (src_reg + off) 2753 * with different pointer types: 2754 * src_reg == ctx in one branch and 2755 * src_reg == stack|map in some other branch. 2756 * Reject it. 2757 */ 2758 verbose("same insn cannot be used with different pointers\n"); 2759 return -EINVAL; 2760 } 2761 2762 } else if (class == BPF_STX) { 2763 enum bpf_reg_type *prev_dst_type, dst_reg_type; 2764 2765 if (BPF_MODE(insn->code) == BPF_XADD) { 2766 err = check_xadd(env, insn); 2767 if (err) 2768 return err; 2769 insn_idx++; 2770 continue; 2771 } 2772 2773 /* check src1 operand */ 2774 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2775 if (err) 2776 return err; 2777 /* check src2 operand */ 2778 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2779 if (err) 2780 return err; 2781 2782 dst_reg_type = regs[insn->dst_reg].type; 2783 2784 /* check that memory (dst_reg + off) is writeable */ 2785 err = check_mem_access(env, insn->dst_reg, insn->off, 2786 BPF_SIZE(insn->code), BPF_WRITE, 2787 insn->src_reg); 2788 if (err) 2789 return err; 2790 2791 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; 2792 2793 if (*prev_dst_type == NOT_INIT) { 2794 *prev_dst_type = dst_reg_type; 2795 } else if (dst_reg_type != *prev_dst_type && 2796 (dst_reg_type == PTR_TO_CTX || 2797 *prev_dst_type == PTR_TO_CTX)) { 2798 verbose("same insn cannot be used with different pointers\n"); 2799 return -EINVAL; 2800 } 2801 2802 } else if (class == BPF_ST) { 2803 if (BPF_MODE(insn->code) != BPF_MEM || 2804 insn->src_reg != BPF_REG_0) { 2805 verbose("BPF_ST uses reserved fields\n"); 2806 return -EINVAL; 2807 } 2808 /* check src operand */ 2809 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2810 if (err) 2811 return err; 2812 2813 /* check that memory (dst_reg + off) is writeable */ 2814 err = check_mem_access(env, insn->dst_reg, insn->off, 2815 BPF_SIZE(insn->code), BPF_WRITE, 2816 -1); 2817 if (err) 2818 return err; 2819 2820 } else if (class == BPF_JMP) { 2821 u8 opcode = BPF_OP(insn->code); 2822 2823 if (opcode == BPF_CALL) { 2824 if (BPF_SRC(insn->code) != BPF_K || 2825 insn->off != 0 || 2826 insn->src_reg != BPF_REG_0 || 2827 insn->dst_reg != BPF_REG_0) { 2828 verbose("BPF_CALL uses reserved fields\n"); 2829 return -EINVAL; 2830 } 2831 2832 err = check_call(env, insn->imm); 2833 if (err) 2834 return err; 2835 2836 } else if (opcode == BPF_JA) { 2837 if (BPF_SRC(insn->code) != BPF_K || 2838 insn->imm != 0 || 2839 insn->src_reg != BPF_REG_0 || 2840 insn->dst_reg != BPF_REG_0) { 2841 verbose("BPF_JA uses reserved fields\n"); 2842 return -EINVAL; 2843 } 2844 2845 insn_idx += insn->off + 1; 2846 continue; 2847 2848 } else if (opcode == BPF_EXIT) { 2849 if (BPF_SRC(insn->code) != BPF_K || 2850 insn->imm != 0 || 2851 insn->src_reg != BPF_REG_0 || 2852 insn->dst_reg != BPF_REG_0) { 2853 verbose("BPF_EXIT uses reserved fields\n"); 2854 return -EINVAL; 2855 } 2856 2857 /* eBPF calling convetion is such that R0 is used 2858 * to return the value from eBPF program. 2859 * Make sure that it's readable at this time 2860 * of bpf_exit, which means that program wrote 2861 * something into it earlier 2862 */ 2863 err = check_reg_arg(regs, BPF_REG_0, SRC_OP); 2864 if (err) 2865 return err; 2866 2867 if (is_pointer_value(env, BPF_REG_0)) { 2868 verbose("R0 leaks addr as return value\n"); 2869 return -EACCES; 2870 } 2871 2872 process_bpf_exit: 2873 insn_idx = pop_stack(env, &prev_insn_idx); 2874 if (insn_idx < 0) { 2875 break; 2876 } else { 2877 do_print_state = true; 2878 continue; 2879 } 2880 } else { 2881 err = check_cond_jmp_op(env, insn, &insn_idx); 2882 if (err) 2883 return err; 2884 } 2885 } else if (class == BPF_LD) { 2886 u8 mode = BPF_MODE(insn->code); 2887 2888 if (mode == BPF_ABS || mode == BPF_IND) { 2889 err = check_ld_abs(env, insn); 2890 if (err) 2891 return err; 2892 2893 } else if (mode == BPF_IMM) { 2894 err = check_ld_imm(env, insn); 2895 if (err) 2896 return err; 2897 2898 insn_idx++; 2899 } else { 2900 verbose("invalid BPF_LD mode\n"); 2901 return -EINVAL; 2902 } 2903 reset_reg_range_values(regs, insn->dst_reg); 2904 } else { 2905 verbose("unknown insn class %d\n", class); 2906 return -EINVAL; 2907 } 2908 2909 insn_idx++; 2910 } 2911 2912 verbose("processed %d insns\n", insn_processed); 2913 return 0; 2914 } 2915 2916 static int check_map_prog_compatibility(struct bpf_map *map, 2917 struct bpf_prog *prog) 2918 2919 { 2920 if (prog->type == BPF_PROG_TYPE_PERF_EVENT && 2921 (map->map_type == BPF_MAP_TYPE_HASH || 2922 map->map_type == BPF_MAP_TYPE_PERCPU_HASH) && 2923 (map->map_flags & BPF_F_NO_PREALLOC)) { 2924 verbose("perf_event programs can only use preallocated hash map\n"); 2925 return -EINVAL; 2926 } 2927 return 0; 2928 } 2929 2930 /* look for pseudo eBPF instructions that access map FDs and 2931 * replace them with actual map pointers 2932 */ 2933 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 2934 { 2935 struct bpf_insn *insn = env->prog->insnsi; 2936 int insn_cnt = env->prog->len; 2937 int i, j, err; 2938 2939 err = bpf_prog_calc_digest(env->prog); 2940 if (err) 2941 return err; 2942 2943 for (i = 0; i < insn_cnt; i++, insn++) { 2944 if (BPF_CLASS(insn->code) == BPF_LDX && 2945 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 2946 verbose("BPF_LDX uses reserved fields\n"); 2947 return -EINVAL; 2948 } 2949 2950 if (BPF_CLASS(insn->code) == BPF_STX && 2951 ((BPF_MODE(insn->code) != BPF_MEM && 2952 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 2953 verbose("BPF_STX uses reserved fields\n"); 2954 return -EINVAL; 2955 } 2956 2957 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 2958 struct bpf_map *map; 2959 struct fd f; 2960 2961 if (i == insn_cnt - 1 || insn[1].code != 0 || 2962 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 2963 insn[1].off != 0) { 2964 verbose("invalid bpf_ld_imm64 insn\n"); 2965 return -EINVAL; 2966 } 2967 2968 if (insn->src_reg == 0) 2969 /* valid generic load 64-bit imm */ 2970 goto next_insn; 2971 2972 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 2973 verbose("unrecognized bpf_ld_imm64 insn\n"); 2974 return -EINVAL; 2975 } 2976 2977 f = fdget(insn->imm); 2978 map = __bpf_map_get(f); 2979 if (IS_ERR(map)) { 2980 verbose("fd %d is not pointing to valid bpf_map\n", 2981 insn->imm); 2982 return PTR_ERR(map); 2983 } 2984 2985 err = check_map_prog_compatibility(map, env->prog); 2986 if (err) { 2987 fdput(f); 2988 return err; 2989 } 2990 2991 /* store map pointer inside BPF_LD_IMM64 instruction */ 2992 insn[0].imm = (u32) (unsigned long) map; 2993 insn[1].imm = ((u64) (unsigned long) map) >> 32; 2994 2995 /* check whether we recorded this map already */ 2996 for (j = 0; j < env->used_map_cnt; j++) 2997 if (env->used_maps[j] == map) { 2998 fdput(f); 2999 goto next_insn; 3000 } 3001 3002 if (env->used_map_cnt >= MAX_USED_MAPS) { 3003 fdput(f); 3004 return -E2BIG; 3005 } 3006 3007 /* hold the map. If the program is rejected by verifier, 3008 * the map will be released by release_maps() or it 3009 * will be used by the valid program until it's unloaded 3010 * and all maps are released in free_bpf_prog_info() 3011 */ 3012 map = bpf_map_inc(map, false); 3013 if (IS_ERR(map)) { 3014 fdput(f); 3015 return PTR_ERR(map); 3016 } 3017 env->used_maps[env->used_map_cnt++] = map; 3018 3019 fdput(f); 3020 next_insn: 3021 insn++; 3022 i++; 3023 } 3024 } 3025 3026 /* now all pseudo BPF_LD_IMM64 instructions load valid 3027 * 'struct bpf_map *' into a register instead of user map_fd. 3028 * These pointers will be used later by verifier to validate map access. 3029 */ 3030 return 0; 3031 } 3032 3033 /* drop refcnt of maps used by the rejected program */ 3034 static void release_maps(struct bpf_verifier_env *env) 3035 { 3036 int i; 3037 3038 for (i = 0; i < env->used_map_cnt; i++) 3039 bpf_map_put(env->used_maps[i]); 3040 } 3041 3042 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 3043 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 3044 { 3045 struct bpf_insn *insn = env->prog->insnsi; 3046 int insn_cnt = env->prog->len; 3047 int i; 3048 3049 for (i = 0; i < insn_cnt; i++, insn++) 3050 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 3051 insn->src_reg = 0; 3052 } 3053 3054 /* convert load instructions that access fields of 'struct __sk_buff' 3055 * into sequence of instructions that access fields of 'struct sk_buff' 3056 */ 3057 static int convert_ctx_accesses(struct bpf_verifier_env *env) 3058 { 3059 const struct bpf_verifier_ops *ops = env->prog->aux->ops; 3060 const int insn_cnt = env->prog->len; 3061 struct bpf_insn insn_buf[16], *insn; 3062 struct bpf_prog *new_prog; 3063 enum bpf_access_type type; 3064 int i, cnt, delta = 0; 3065 3066 if (ops->gen_prologue) { 3067 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 3068 env->prog); 3069 if (cnt >= ARRAY_SIZE(insn_buf)) { 3070 verbose("bpf verifier is misconfigured\n"); 3071 return -EINVAL; 3072 } else if (cnt) { 3073 new_prog = bpf_patch_insn_single(env->prog, 0, 3074 insn_buf, cnt); 3075 if (!new_prog) 3076 return -ENOMEM; 3077 env->prog = new_prog; 3078 delta += cnt - 1; 3079 } 3080 } 3081 3082 if (!ops->convert_ctx_access) 3083 return 0; 3084 3085 insn = env->prog->insnsi + delta; 3086 3087 for (i = 0; i < insn_cnt; i++, insn++) { 3088 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 3089 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 3090 type = BPF_READ; 3091 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W) || 3092 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 3093 type = BPF_WRITE; 3094 else 3095 continue; 3096 3097 if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX) 3098 continue; 3099 3100 cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg, 3101 insn->off, insn_buf, env->prog); 3102 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 3103 verbose("bpf verifier is misconfigured\n"); 3104 return -EINVAL; 3105 } 3106 3107 new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf, 3108 cnt); 3109 if (!new_prog) 3110 return -ENOMEM; 3111 3112 delta += cnt - 1; 3113 3114 /* keep walking new program and skip insns we just inserted */ 3115 env->prog = new_prog; 3116 insn = new_prog->insnsi + i + delta; 3117 } 3118 3119 return 0; 3120 } 3121 3122 static void free_states(struct bpf_verifier_env *env) 3123 { 3124 struct bpf_verifier_state_list *sl, *sln; 3125 int i; 3126 3127 if (!env->explored_states) 3128 return; 3129 3130 for (i = 0; i < env->prog->len; i++) { 3131 sl = env->explored_states[i]; 3132 3133 if (sl) 3134 while (sl != STATE_LIST_MARK) { 3135 sln = sl->next; 3136 kfree(sl); 3137 sl = sln; 3138 } 3139 } 3140 3141 kfree(env->explored_states); 3142 } 3143 3144 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 3145 { 3146 char __user *log_ubuf = NULL; 3147 struct bpf_verifier_env *env; 3148 int ret = -EINVAL; 3149 3150 /* 'struct bpf_verifier_env' can be global, but since it's not small, 3151 * allocate/free it every time bpf_check() is called 3152 */ 3153 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 3154 if (!env) 3155 return -ENOMEM; 3156 3157 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 3158 (*prog)->len); 3159 ret = -ENOMEM; 3160 if (!env->insn_aux_data) 3161 goto err_free_env; 3162 env->prog = *prog; 3163 3164 /* grab the mutex to protect few globals used by verifier */ 3165 mutex_lock(&bpf_verifier_lock); 3166 3167 if (attr->log_level || attr->log_buf || attr->log_size) { 3168 /* user requested verbose verifier output 3169 * and supplied buffer to store the verification trace 3170 */ 3171 log_level = attr->log_level; 3172 log_ubuf = (char __user *) (unsigned long) attr->log_buf; 3173 log_size = attr->log_size; 3174 log_len = 0; 3175 3176 ret = -EINVAL; 3177 /* log_* values have to be sane */ 3178 if (log_size < 128 || log_size > UINT_MAX >> 8 || 3179 log_level == 0 || log_ubuf == NULL) 3180 goto err_unlock; 3181 3182 ret = -ENOMEM; 3183 log_buf = vmalloc(log_size); 3184 if (!log_buf) 3185 goto err_unlock; 3186 } else { 3187 log_level = 0; 3188 } 3189 3190 ret = replace_map_fd_with_map_ptr(env); 3191 if (ret < 0) 3192 goto skip_full_check; 3193 3194 env->explored_states = kcalloc(env->prog->len, 3195 sizeof(struct bpf_verifier_state_list *), 3196 GFP_USER); 3197 ret = -ENOMEM; 3198 if (!env->explored_states) 3199 goto skip_full_check; 3200 3201 ret = check_cfg(env); 3202 if (ret < 0) 3203 goto skip_full_check; 3204 3205 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 3206 3207 ret = do_check(env); 3208 3209 skip_full_check: 3210 while (pop_stack(env, NULL) >= 0); 3211 free_states(env); 3212 3213 if (ret == 0) 3214 /* program is valid, convert *(u32*)(ctx + off) accesses */ 3215 ret = convert_ctx_accesses(env); 3216 3217 if (log_level && log_len >= log_size - 1) { 3218 BUG_ON(log_len >= log_size); 3219 /* verifier log exceeded user supplied buffer */ 3220 ret = -ENOSPC; 3221 /* fall through to return what was recorded */ 3222 } 3223 3224 /* copy verifier log back to user space including trailing zero */ 3225 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { 3226 ret = -EFAULT; 3227 goto free_log_buf; 3228 } 3229 3230 if (ret == 0 && env->used_map_cnt) { 3231 /* if program passed verifier, update used_maps in bpf_prog_info */ 3232 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 3233 sizeof(env->used_maps[0]), 3234 GFP_KERNEL); 3235 3236 if (!env->prog->aux->used_maps) { 3237 ret = -ENOMEM; 3238 goto free_log_buf; 3239 } 3240 3241 memcpy(env->prog->aux->used_maps, env->used_maps, 3242 sizeof(env->used_maps[0]) * env->used_map_cnt); 3243 env->prog->aux->used_map_cnt = env->used_map_cnt; 3244 3245 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 3246 * bpf_ld_imm64 instructions 3247 */ 3248 convert_pseudo_ld_imm64(env); 3249 } 3250 3251 free_log_buf: 3252 if (log_level) 3253 vfree(log_buf); 3254 if (!env->prog->aux->used_maps) 3255 /* if we didn't copy map pointers into bpf_prog_info, release 3256 * them now. Otherwise free_bpf_prog_info() will release them. 3257 */ 3258 release_maps(env); 3259 *prog = env->prog; 3260 err_unlock: 3261 mutex_unlock(&bpf_verifier_lock); 3262 vfree(env->insn_aux_data); 3263 err_free_env: 3264 kfree(env); 3265 return ret; 3266 } 3267 3268 int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, 3269 void *priv) 3270 { 3271 struct bpf_verifier_env *env; 3272 int ret; 3273 3274 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 3275 if (!env) 3276 return -ENOMEM; 3277 3278 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 3279 prog->len); 3280 ret = -ENOMEM; 3281 if (!env->insn_aux_data) 3282 goto err_free_env; 3283 env->prog = prog; 3284 env->analyzer_ops = ops; 3285 env->analyzer_priv = priv; 3286 3287 /* grab the mutex to protect few globals used by verifier */ 3288 mutex_lock(&bpf_verifier_lock); 3289 3290 log_level = 0; 3291 3292 env->explored_states = kcalloc(env->prog->len, 3293 sizeof(struct bpf_verifier_state_list *), 3294 GFP_KERNEL); 3295 ret = -ENOMEM; 3296 if (!env->explored_states) 3297 goto skip_full_check; 3298 3299 ret = check_cfg(env); 3300 if (ret < 0) 3301 goto skip_full_check; 3302 3303 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 3304 3305 ret = do_check(env); 3306 3307 skip_full_check: 3308 while (pop_stack(env, NULL) >= 0); 3309 free_states(env); 3310 3311 mutex_unlock(&bpf_verifier_lock); 3312 vfree(env->insn_aux_data); 3313 err_free_env: 3314 kfree(env); 3315 return ret; 3316 } 3317 EXPORT_SYMBOL_GPL(bpf_analyzer); 3318