1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/filter.h> 19 #include <net/netlink.h> 20 #include <linux/file.h> 21 #include <linux/vmalloc.h> 22 #include <linux/stringify.h> 23 24 /* bpf_check() is a static code analyzer that walks eBPF program 25 * instruction by instruction and updates register/stack state. 26 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 27 * 28 * The first pass is depth-first-search to check that the program is a DAG. 29 * It rejects the following programs: 30 * - larger than BPF_MAXINSNS insns 31 * - if loop is present (detected via back-edge) 32 * - unreachable insns exist (shouldn't be a forest. program = one function) 33 * - out of bounds or malformed jumps 34 * The second pass is all possible path descent from the 1st insn. 35 * Since it's analyzing all pathes through the program, the length of the 36 * analysis is limited to 64k insn, which may be hit even if total number of 37 * insn is less then 4K, but there are too many branches that change stack/regs. 38 * Number of 'branches to be analyzed' is limited to 1k 39 * 40 * On entry to each instruction, each register has a type, and the instruction 41 * changes the types of the registers depending on instruction semantics. 42 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 43 * copied to R1. 44 * 45 * All registers are 64-bit. 46 * R0 - return register 47 * R1-R5 argument passing registers 48 * R6-R9 callee saved registers 49 * R10 - frame pointer read-only 50 * 51 * At the start of BPF program the register R1 contains a pointer to bpf_context 52 * and has type PTR_TO_CTX. 53 * 54 * Verifier tracks arithmetic operations on pointers in case: 55 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 56 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 57 * 1st insn copies R10 (which has FRAME_PTR) type into R1 58 * and 2nd arithmetic instruction is pattern matched to recognize 59 * that it wants to construct a pointer to some element within stack. 60 * So after 2nd insn, the register R1 has type PTR_TO_STACK 61 * (and -20 constant is saved for further stack bounds checking). 62 * Meaning that this reg is a pointer to stack plus known immediate constant. 63 * 64 * Most of the time the registers have UNKNOWN_VALUE type, which 65 * means the register has some value, but it's not a valid pointer. 66 * (like pointer plus pointer becomes UNKNOWN_VALUE type) 67 * 68 * When verifier sees load or store instructions the type of base register 69 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer 70 * types recognized by check_mem_access() function. 71 * 72 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 73 * and the range of [ptr, ptr + map's value_size) is accessible. 74 * 75 * registers used to pass values to function calls are checked against 76 * function argument constraints. 77 * 78 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 79 * It means that the register type passed to this function must be 80 * PTR_TO_STACK and it will be used inside the function as 81 * 'pointer to map element key' 82 * 83 * For example the argument constraints for bpf_map_lookup_elem(): 84 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 85 * .arg1_type = ARG_CONST_MAP_PTR, 86 * .arg2_type = ARG_PTR_TO_MAP_KEY, 87 * 88 * ret_type says that this function returns 'pointer to map elem value or null' 89 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 90 * 2nd argument should be a pointer to stack, which will be used inside 91 * the helper function as a pointer to map element key. 92 * 93 * On the kernel side the helper function looks like: 94 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 95 * { 96 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 97 * void *key = (void *) (unsigned long) r2; 98 * void *value; 99 * 100 * here kernel can access 'key' and 'map' pointers safely, knowing that 101 * [key, key + map->key_size) bytes are valid and were initialized on 102 * the stack of eBPF program. 103 * } 104 * 105 * Corresponding eBPF program may look like: 106 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 107 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 108 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 109 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 110 * here verifier looks at prototype of map_lookup_elem() and sees: 111 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 112 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 113 * 114 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 115 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 116 * and were initialized prior to this call. 117 * If it's ok, then verifier allows this BPF_CALL insn and looks at 118 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 119 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 120 * returns ether pointer to map value or NULL. 121 * 122 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 123 * insn, the register holding that pointer in the true branch changes state to 124 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 125 * branch. See check_cond_jmp_op(). 126 * 127 * After the call R0 is set to return type of the function and registers R1-R5 128 * are set to NOT_INIT to indicate that they are no longer readable. 129 */ 130 131 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 132 struct bpf_verifier_stack_elem { 133 /* verifer state is 'st' 134 * before processing instruction 'insn_idx' 135 * and after processing instruction 'prev_insn_idx' 136 */ 137 struct bpf_verifier_state st; 138 int insn_idx; 139 int prev_insn_idx; 140 struct bpf_verifier_stack_elem *next; 141 }; 142 143 #define BPF_COMPLEXITY_LIMIT_INSNS 98304 144 #define BPF_COMPLEXITY_LIMIT_STACK 1024 145 146 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 147 148 struct bpf_call_arg_meta { 149 struct bpf_map *map_ptr; 150 bool raw_mode; 151 bool pkt_access; 152 int regno; 153 int access_size; 154 }; 155 156 /* verbose verifier prints what it's seeing 157 * bpf_check() is called under lock, so no race to access these global vars 158 */ 159 static u32 log_level, log_size, log_len; 160 static char *log_buf; 161 162 static DEFINE_MUTEX(bpf_verifier_lock); 163 164 /* log_level controls verbosity level of eBPF verifier. 165 * verbose() is used to dump the verification trace to the log, so the user 166 * can figure out what's wrong with the program 167 */ 168 static __printf(1, 2) void verbose(const char *fmt, ...) 169 { 170 va_list args; 171 172 if (log_level == 0 || log_len >= log_size - 1) 173 return; 174 175 va_start(args, fmt); 176 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); 177 va_end(args); 178 } 179 180 /* string representation of 'enum bpf_reg_type' */ 181 static const char * const reg_type_str[] = { 182 [NOT_INIT] = "?", 183 [UNKNOWN_VALUE] = "inv", 184 [PTR_TO_CTX] = "ctx", 185 [CONST_PTR_TO_MAP] = "map_ptr", 186 [PTR_TO_MAP_VALUE] = "map_value", 187 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 188 [PTR_TO_MAP_VALUE_ADJ] = "map_value_adj", 189 [FRAME_PTR] = "fp", 190 [PTR_TO_STACK] = "fp", 191 [CONST_IMM] = "imm", 192 [PTR_TO_PACKET] = "pkt", 193 [PTR_TO_PACKET_END] = "pkt_end", 194 }; 195 196 #define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x) 197 static const char * const func_id_str[] = { 198 __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN) 199 }; 200 #undef __BPF_FUNC_STR_FN 201 202 static const char *func_id_name(int id) 203 { 204 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); 205 206 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) 207 return func_id_str[id]; 208 else 209 return "unknown"; 210 } 211 212 static void print_verifier_state(struct bpf_verifier_state *state) 213 { 214 struct bpf_reg_state *reg; 215 enum bpf_reg_type t; 216 int i; 217 218 for (i = 0; i < MAX_BPF_REG; i++) { 219 reg = &state->regs[i]; 220 t = reg->type; 221 if (t == NOT_INIT) 222 continue; 223 verbose(" R%d=%s", i, reg_type_str[t]); 224 if (t == CONST_IMM || t == PTR_TO_STACK) 225 verbose("%lld", reg->imm); 226 else if (t == PTR_TO_PACKET) 227 verbose("(id=%d,off=%d,r=%d)", 228 reg->id, reg->off, reg->range); 229 else if (t == UNKNOWN_VALUE && reg->imm) 230 verbose("%lld", reg->imm); 231 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || 232 t == PTR_TO_MAP_VALUE_OR_NULL || 233 t == PTR_TO_MAP_VALUE_ADJ) 234 verbose("(ks=%d,vs=%d,id=%u)", 235 reg->map_ptr->key_size, 236 reg->map_ptr->value_size, 237 reg->id); 238 if (reg->min_value != BPF_REGISTER_MIN_RANGE) 239 verbose(",min_value=%lld", 240 (long long)reg->min_value); 241 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 242 verbose(",max_value=%llu", 243 (unsigned long long)reg->max_value); 244 if (reg->min_align) 245 verbose(",min_align=%u", reg->min_align); 246 if (reg->aux_off) 247 verbose(",aux_off=%u", reg->aux_off); 248 if (reg->aux_off_align) 249 verbose(",aux_off_align=%u", reg->aux_off_align); 250 } 251 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 252 if (state->stack_slot_type[i] == STACK_SPILL) 253 verbose(" fp%d=%s", -MAX_BPF_STACK + i, 254 reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); 255 } 256 verbose("\n"); 257 } 258 259 static const char *const bpf_class_string[] = { 260 [BPF_LD] = "ld", 261 [BPF_LDX] = "ldx", 262 [BPF_ST] = "st", 263 [BPF_STX] = "stx", 264 [BPF_ALU] = "alu", 265 [BPF_JMP] = "jmp", 266 [BPF_RET] = "BUG", 267 [BPF_ALU64] = "alu64", 268 }; 269 270 static const char *const bpf_alu_string[16] = { 271 [BPF_ADD >> 4] = "+=", 272 [BPF_SUB >> 4] = "-=", 273 [BPF_MUL >> 4] = "*=", 274 [BPF_DIV >> 4] = "/=", 275 [BPF_OR >> 4] = "|=", 276 [BPF_AND >> 4] = "&=", 277 [BPF_LSH >> 4] = "<<=", 278 [BPF_RSH >> 4] = ">>=", 279 [BPF_NEG >> 4] = "neg", 280 [BPF_MOD >> 4] = "%=", 281 [BPF_XOR >> 4] = "^=", 282 [BPF_MOV >> 4] = "=", 283 [BPF_ARSH >> 4] = "s>>=", 284 [BPF_END >> 4] = "endian", 285 }; 286 287 static const char *const bpf_ldst_string[] = { 288 [BPF_W >> 3] = "u32", 289 [BPF_H >> 3] = "u16", 290 [BPF_B >> 3] = "u8", 291 [BPF_DW >> 3] = "u64", 292 }; 293 294 static const char *const bpf_jmp_string[16] = { 295 [BPF_JA >> 4] = "jmp", 296 [BPF_JEQ >> 4] = "==", 297 [BPF_JGT >> 4] = ">", 298 [BPF_JGE >> 4] = ">=", 299 [BPF_JSET >> 4] = "&", 300 [BPF_JNE >> 4] = "!=", 301 [BPF_JSGT >> 4] = "s>", 302 [BPF_JSGE >> 4] = "s>=", 303 [BPF_CALL >> 4] = "call", 304 [BPF_EXIT >> 4] = "exit", 305 }; 306 307 static void print_bpf_insn(const struct bpf_verifier_env *env, 308 const struct bpf_insn *insn) 309 { 310 u8 class = BPF_CLASS(insn->code); 311 312 if (class == BPF_ALU || class == BPF_ALU64) { 313 if (BPF_SRC(insn->code) == BPF_X) 314 verbose("(%02x) %sr%d %s %sr%d\n", 315 insn->code, class == BPF_ALU ? "(u32) " : "", 316 insn->dst_reg, 317 bpf_alu_string[BPF_OP(insn->code) >> 4], 318 class == BPF_ALU ? "(u32) " : "", 319 insn->src_reg); 320 else 321 verbose("(%02x) %sr%d %s %s%d\n", 322 insn->code, class == BPF_ALU ? "(u32) " : "", 323 insn->dst_reg, 324 bpf_alu_string[BPF_OP(insn->code) >> 4], 325 class == BPF_ALU ? "(u32) " : "", 326 insn->imm); 327 } else if (class == BPF_STX) { 328 if (BPF_MODE(insn->code) == BPF_MEM) 329 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", 330 insn->code, 331 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 332 insn->dst_reg, 333 insn->off, insn->src_reg); 334 else if (BPF_MODE(insn->code) == BPF_XADD) 335 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", 336 insn->code, 337 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 338 insn->dst_reg, insn->off, 339 insn->src_reg); 340 else 341 verbose("BUG_%02x\n", insn->code); 342 } else if (class == BPF_ST) { 343 if (BPF_MODE(insn->code) != BPF_MEM) { 344 verbose("BUG_st_%02x\n", insn->code); 345 return; 346 } 347 verbose("(%02x) *(%s *)(r%d %+d) = %d\n", 348 insn->code, 349 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 350 insn->dst_reg, 351 insn->off, insn->imm); 352 } else if (class == BPF_LDX) { 353 if (BPF_MODE(insn->code) != BPF_MEM) { 354 verbose("BUG_ldx_%02x\n", insn->code); 355 return; 356 } 357 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", 358 insn->code, insn->dst_reg, 359 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 360 insn->src_reg, insn->off); 361 } else if (class == BPF_LD) { 362 if (BPF_MODE(insn->code) == BPF_ABS) { 363 verbose("(%02x) r0 = *(%s *)skb[%d]\n", 364 insn->code, 365 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 366 insn->imm); 367 } else if (BPF_MODE(insn->code) == BPF_IND) { 368 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", 369 insn->code, 370 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 371 insn->src_reg, insn->imm); 372 } else if (BPF_MODE(insn->code) == BPF_IMM && 373 BPF_SIZE(insn->code) == BPF_DW) { 374 /* At this point, we already made sure that the second 375 * part of the ldimm64 insn is accessible. 376 */ 377 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 378 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; 379 380 if (map_ptr && !env->allow_ptr_leaks) 381 imm = 0; 382 383 verbose("(%02x) r%d = 0x%llx\n", insn->code, 384 insn->dst_reg, (unsigned long long)imm); 385 } else { 386 verbose("BUG_ld_%02x\n", insn->code); 387 return; 388 } 389 } else if (class == BPF_JMP) { 390 u8 opcode = BPF_OP(insn->code); 391 392 if (opcode == BPF_CALL) { 393 verbose("(%02x) call %s#%d\n", insn->code, 394 func_id_name(insn->imm), insn->imm); 395 } else if (insn->code == (BPF_JMP | BPF_JA)) { 396 verbose("(%02x) goto pc%+d\n", 397 insn->code, insn->off); 398 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 399 verbose("(%02x) exit\n", insn->code); 400 } else if (BPF_SRC(insn->code) == BPF_X) { 401 verbose("(%02x) if r%d %s r%d goto pc%+d\n", 402 insn->code, insn->dst_reg, 403 bpf_jmp_string[BPF_OP(insn->code) >> 4], 404 insn->src_reg, insn->off); 405 } else { 406 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", 407 insn->code, insn->dst_reg, 408 bpf_jmp_string[BPF_OP(insn->code) >> 4], 409 insn->imm, insn->off); 410 } 411 } else { 412 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); 413 } 414 } 415 416 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx) 417 { 418 struct bpf_verifier_stack_elem *elem; 419 int insn_idx; 420 421 if (env->head == NULL) 422 return -1; 423 424 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); 425 insn_idx = env->head->insn_idx; 426 if (prev_insn_idx) 427 *prev_insn_idx = env->head->prev_insn_idx; 428 elem = env->head->next; 429 kfree(env->head); 430 env->head = elem; 431 env->stack_size--; 432 return insn_idx; 433 } 434 435 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 436 int insn_idx, int prev_insn_idx) 437 { 438 struct bpf_verifier_stack_elem *elem; 439 440 elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 441 if (!elem) 442 goto err; 443 444 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); 445 elem->insn_idx = insn_idx; 446 elem->prev_insn_idx = prev_insn_idx; 447 elem->next = env->head; 448 env->head = elem; 449 env->stack_size++; 450 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 451 verbose("BPF program is too complex\n"); 452 goto err; 453 } 454 return &elem->st; 455 err: 456 /* pop all elements and return */ 457 while (pop_stack(env, NULL) >= 0); 458 return NULL; 459 } 460 461 #define CALLER_SAVED_REGS 6 462 static const int caller_saved[CALLER_SAVED_REGS] = { 463 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 464 }; 465 466 static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno) 467 { 468 BUG_ON(regno >= MAX_BPF_REG); 469 470 memset(®s[regno], 0, sizeof(regs[regno])); 471 regs[regno].type = NOT_INIT; 472 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 473 regs[regno].max_value = BPF_REGISTER_MAX_RANGE; 474 } 475 476 static void init_reg_state(struct bpf_reg_state *regs) 477 { 478 int i; 479 480 for (i = 0; i < MAX_BPF_REG; i++) 481 mark_reg_not_init(regs, i); 482 483 /* frame pointer */ 484 regs[BPF_REG_FP].type = FRAME_PTR; 485 486 /* 1st arg to a function */ 487 regs[BPF_REG_1].type = PTR_TO_CTX; 488 } 489 490 static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) 491 { 492 regs[regno].type = UNKNOWN_VALUE; 493 regs[regno].id = 0; 494 regs[regno].imm = 0; 495 } 496 497 static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) 498 { 499 BUG_ON(regno >= MAX_BPF_REG); 500 __mark_reg_unknown_value(regs, regno); 501 } 502 503 static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) 504 { 505 regs[regno].min_value = BPF_REGISTER_MIN_RANGE; 506 regs[regno].max_value = BPF_REGISTER_MAX_RANGE; 507 regs[regno].min_align = 0; 508 } 509 510 static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, 511 u32 regno) 512 { 513 mark_reg_unknown_value(regs, regno); 514 reset_reg_range_values(regs, regno); 515 } 516 517 enum reg_arg_type { 518 SRC_OP, /* register is used as source operand */ 519 DST_OP, /* register is used as destination operand */ 520 DST_OP_NO_MARK /* same as above, check only, don't mark */ 521 }; 522 523 static int check_reg_arg(struct bpf_reg_state *regs, u32 regno, 524 enum reg_arg_type t) 525 { 526 if (regno >= MAX_BPF_REG) { 527 verbose("R%d is invalid\n", regno); 528 return -EINVAL; 529 } 530 531 if (t == SRC_OP) { 532 /* check whether register used as source operand can be read */ 533 if (regs[regno].type == NOT_INIT) { 534 verbose("R%d !read_ok\n", regno); 535 return -EACCES; 536 } 537 } else { 538 /* check whether register used as dest operand can be written to */ 539 if (regno == BPF_REG_FP) { 540 verbose("frame pointer is read only\n"); 541 return -EACCES; 542 } 543 if (t == DST_OP) 544 mark_reg_unknown_value(regs, regno); 545 } 546 return 0; 547 } 548 549 static int bpf_size_to_bytes(int bpf_size) 550 { 551 if (bpf_size == BPF_W) 552 return 4; 553 else if (bpf_size == BPF_H) 554 return 2; 555 else if (bpf_size == BPF_B) 556 return 1; 557 else if (bpf_size == BPF_DW) 558 return 8; 559 else 560 return -EINVAL; 561 } 562 563 static bool is_spillable_regtype(enum bpf_reg_type type) 564 { 565 switch (type) { 566 case PTR_TO_MAP_VALUE: 567 case PTR_TO_MAP_VALUE_OR_NULL: 568 case PTR_TO_MAP_VALUE_ADJ: 569 case PTR_TO_STACK: 570 case PTR_TO_CTX: 571 case PTR_TO_PACKET: 572 case PTR_TO_PACKET_END: 573 case FRAME_PTR: 574 case CONST_PTR_TO_MAP: 575 return true; 576 default: 577 return false; 578 } 579 } 580 581 /* check_stack_read/write functions track spill/fill of registers, 582 * stack boundary and alignment are checked in check_mem_access() 583 */ 584 static int check_stack_write(struct bpf_verifier_state *state, int off, 585 int size, int value_regno) 586 { 587 int i; 588 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 589 * so it's aligned access and [off, off + size) are within stack limits 590 */ 591 592 if (value_regno >= 0 && 593 is_spillable_regtype(state->regs[value_regno].type)) { 594 595 /* register containing pointer is being spilled into stack */ 596 if (size != BPF_REG_SIZE) { 597 verbose("invalid size of register spill\n"); 598 return -EACCES; 599 } 600 601 /* save register state */ 602 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 603 state->regs[value_regno]; 604 605 for (i = 0; i < BPF_REG_SIZE; i++) 606 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; 607 } else { 608 /* regular write of data into stack */ 609 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 610 (struct bpf_reg_state) {}; 611 612 for (i = 0; i < size; i++) 613 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; 614 } 615 return 0; 616 } 617 618 static int check_stack_read(struct bpf_verifier_state *state, int off, int size, 619 int value_regno) 620 { 621 u8 *slot_type; 622 int i; 623 624 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; 625 626 if (slot_type[0] == STACK_SPILL) { 627 if (size != BPF_REG_SIZE) { 628 verbose("invalid size of register spill\n"); 629 return -EACCES; 630 } 631 for (i = 1; i < BPF_REG_SIZE; i++) { 632 if (slot_type[i] != STACK_SPILL) { 633 verbose("corrupted spill memory\n"); 634 return -EACCES; 635 } 636 } 637 638 if (value_regno >= 0) 639 /* restore register state from stack */ 640 state->regs[value_regno] = 641 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; 642 return 0; 643 } else { 644 for (i = 0; i < size; i++) { 645 if (slot_type[i] != STACK_MISC) { 646 verbose("invalid read from stack off %d+%d size %d\n", 647 off, i, size); 648 return -EACCES; 649 } 650 } 651 if (value_regno >= 0) 652 /* have read misc data from the stack */ 653 mark_reg_unknown_value_and_range(state->regs, 654 value_regno); 655 return 0; 656 } 657 } 658 659 /* check read/write into map element returned by bpf_map_lookup_elem() */ 660 static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 661 int size) 662 { 663 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; 664 665 if (off < 0 || size <= 0 || off + size > map->value_size) { 666 verbose("invalid access to map value, value_size=%d off=%d size=%d\n", 667 map->value_size, off, size); 668 return -EACCES; 669 } 670 return 0; 671 } 672 673 /* check read/write into an adjusted map element */ 674 static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno, 675 int off, int size) 676 { 677 struct bpf_verifier_state *state = &env->cur_state; 678 struct bpf_reg_state *reg = &state->regs[regno]; 679 int err; 680 681 /* We adjusted the register to this map value, so we 682 * need to change off and size to min_value and max_value 683 * respectively to make sure our theoretical access will be 684 * safe. 685 */ 686 if (log_level) 687 print_verifier_state(state); 688 env->varlen_map_value_access = true; 689 /* The minimum value is only important with signed 690 * comparisons where we can't assume the floor of a 691 * value is 0. If we are using signed variables for our 692 * index'es we need to make sure that whatever we use 693 * will have a set floor within our range. 694 */ 695 if (reg->min_value < 0) { 696 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 697 regno); 698 return -EACCES; 699 } 700 err = check_map_access(env, regno, reg->min_value + off, size); 701 if (err) { 702 verbose("R%d min value is outside of the array range\n", 703 regno); 704 return err; 705 } 706 707 /* If we haven't set a max value then we need to bail 708 * since we can't be sure we won't do bad things. 709 */ 710 if (reg->max_value == BPF_REGISTER_MAX_RANGE) { 711 verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n", 712 regno); 713 return -EACCES; 714 } 715 return check_map_access(env, regno, reg->max_value + off, size); 716 } 717 718 #define MAX_PACKET_OFF 0xffff 719 720 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 721 const struct bpf_call_arg_meta *meta, 722 enum bpf_access_type t) 723 { 724 switch (env->prog->type) { 725 case BPF_PROG_TYPE_LWT_IN: 726 case BPF_PROG_TYPE_LWT_OUT: 727 /* dst_input() and dst_output() can't write for now */ 728 if (t == BPF_WRITE) 729 return false; 730 /* fallthrough */ 731 case BPF_PROG_TYPE_SCHED_CLS: 732 case BPF_PROG_TYPE_SCHED_ACT: 733 case BPF_PROG_TYPE_XDP: 734 case BPF_PROG_TYPE_LWT_XMIT: 735 if (meta) 736 return meta->pkt_access; 737 738 env->seen_direct_write = true; 739 return true; 740 default: 741 return false; 742 } 743 } 744 745 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 746 int size) 747 { 748 struct bpf_reg_state *regs = env->cur_state.regs; 749 struct bpf_reg_state *reg = ®s[regno]; 750 751 off += reg->off; 752 if (off < 0 || size <= 0 || off + size > reg->range) { 753 verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 754 off, size, regno, reg->id, reg->off, reg->range); 755 return -EACCES; 756 } 757 return 0; 758 } 759 760 /* check access to 'struct bpf_context' fields */ 761 static int check_ctx_access(struct bpf_verifier_env *env, int off, int size, 762 enum bpf_access_type t, enum bpf_reg_type *reg_type) 763 { 764 /* for analyzer ctx accesses are already validated and converted */ 765 if (env->analyzer_ops) 766 return 0; 767 768 if (env->prog->aux->ops->is_valid_access && 769 env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) { 770 /* remember the offset of last byte accessed in ctx */ 771 if (env->prog->aux->max_ctx_offset < off + size) 772 env->prog->aux->max_ctx_offset = off + size; 773 return 0; 774 } 775 776 verbose("invalid bpf_context access off=%d size=%d\n", off, size); 777 return -EACCES; 778 } 779 780 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 781 { 782 if (env->allow_ptr_leaks) 783 return false; 784 785 switch (env->cur_state.regs[regno].type) { 786 case UNKNOWN_VALUE: 787 case CONST_IMM: 788 return false; 789 default: 790 return true; 791 } 792 } 793 794 static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, 795 int off, int size, bool strict) 796 { 797 int ip_align; 798 int reg_off; 799 800 /* Byte size accesses are always allowed. */ 801 if (!strict || size == 1) 802 return 0; 803 804 reg_off = reg->off; 805 if (reg->id) { 806 if (reg->aux_off_align % size) { 807 verbose("Packet access is only %u byte aligned, %d byte access not allowed\n", 808 reg->aux_off_align, size); 809 return -EACCES; 810 } 811 reg_off += reg->aux_off; 812 } 813 814 /* For platforms that do not have a Kconfig enabling 815 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 816 * NET_IP_ALIGN is universally set to '2'. And on platforms 817 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 818 * to this code only in strict mode where we want to emulate 819 * the NET_IP_ALIGN==2 checking. Therefore use an 820 * unconditional IP align value of '2'. 821 */ 822 ip_align = 2; 823 if ((ip_align + reg_off + off) % size != 0) { 824 verbose("misaligned packet access off %d+%d+%d size %d\n", 825 ip_align, reg_off, off, size); 826 return -EACCES; 827 } 828 829 return 0; 830 } 831 832 static int check_val_ptr_alignment(const struct bpf_reg_state *reg, 833 int size, bool strict) 834 { 835 if (strict && size != 1) { 836 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); 837 return -EACCES; 838 } 839 840 return 0; 841 } 842 843 static int check_ptr_alignment(struct bpf_verifier_env *env, 844 const struct bpf_reg_state *reg, 845 int off, int size) 846 { 847 bool strict = env->strict_alignment; 848 849 switch (reg->type) { 850 case PTR_TO_PACKET: 851 return check_pkt_ptr_alignment(reg, off, size, strict); 852 case PTR_TO_MAP_VALUE_ADJ: 853 return check_val_ptr_alignment(reg, size, strict); 854 default: 855 if (off % size != 0) { 856 verbose("misaligned access off %d size %d\n", 857 off, size); 858 return -EACCES; 859 } 860 861 return 0; 862 } 863 } 864 865 /* check whether memory at (regno + off) is accessible for t = (read | write) 866 * if t==write, value_regno is a register which value is stored into memory 867 * if t==read, value_regno is a register which will receive the value from memory 868 * if t==write && value_regno==-1, some unknown value is stored into memory 869 * if t==read && value_regno==-1, don't care what we read from memory 870 */ 871 static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, 872 int bpf_size, enum bpf_access_type t, 873 int value_regno) 874 { 875 struct bpf_verifier_state *state = &env->cur_state; 876 struct bpf_reg_state *reg = &state->regs[regno]; 877 int size, err = 0; 878 879 if (reg->type == PTR_TO_STACK) 880 off += reg->imm; 881 882 size = bpf_size_to_bytes(bpf_size); 883 if (size < 0) 884 return size; 885 886 err = check_ptr_alignment(env, reg, off, size); 887 if (err) 888 return err; 889 890 if (reg->type == PTR_TO_MAP_VALUE || 891 reg->type == PTR_TO_MAP_VALUE_ADJ) { 892 if (t == BPF_WRITE && value_regno >= 0 && 893 is_pointer_value(env, value_regno)) { 894 verbose("R%d leaks addr into map\n", value_regno); 895 return -EACCES; 896 } 897 898 if (reg->type == PTR_TO_MAP_VALUE_ADJ) 899 err = check_map_access_adj(env, regno, off, size); 900 else 901 err = check_map_access(env, regno, off, size); 902 if (!err && t == BPF_READ && value_regno >= 0) 903 mark_reg_unknown_value_and_range(state->regs, 904 value_regno); 905 906 } else if (reg->type == PTR_TO_CTX) { 907 enum bpf_reg_type reg_type = UNKNOWN_VALUE; 908 909 if (t == BPF_WRITE && value_regno >= 0 && 910 is_pointer_value(env, value_regno)) { 911 verbose("R%d leaks addr into ctx\n", value_regno); 912 return -EACCES; 913 } 914 err = check_ctx_access(env, off, size, t, ®_type); 915 if (!err && t == BPF_READ && value_regno >= 0) { 916 mark_reg_unknown_value_and_range(state->regs, 917 value_regno); 918 /* note that reg.[id|off|range] == 0 */ 919 state->regs[value_regno].type = reg_type; 920 state->regs[value_regno].aux_off = 0; 921 state->regs[value_regno].aux_off_align = 0; 922 } 923 924 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 925 if (off >= 0 || off < -MAX_BPF_STACK) { 926 verbose("invalid stack off=%d size=%d\n", off, size); 927 return -EACCES; 928 } 929 if (t == BPF_WRITE) { 930 if (!env->allow_ptr_leaks && 931 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && 932 size != BPF_REG_SIZE) { 933 verbose("attempt to corrupt spilled pointer on stack\n"); 934 return -EACCES; 935 } 936 err = check_stack_write(state, off, size, value_regno); 937 } else { 938 err = check_stack_read(state, off, size, value_regno); 939 } 940 } else if (state->regs[regno].type == PTR_TO_PACKET) { 941 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 942 verbose("cannot write into packet\n"); 943 return -EACCES; 944 } 945 if (t == BPF_WRITE && value_regno >= 0 && 946 is_pointer_value(env, value_regno)) { 947 verbose("R%d leaks addr into packet\n", value_regno); 948 return -EACCES; 949 } 950 err = check_packet_access(env, regno, off, size); 951 if (!err && t == BPF_READ && value_regno >= 0) 952 mark_reg_unknown_value_and_range(state->regs, 953 value_regno); 954 } else { 955 verbose("R%d invalid mem access '%s'\n", 956 regno, reg_type_str[reg->type]); 957 return -EACCES; 958 } 959 960 if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks && 961 state->regs[value_regno].type == UNKNOWN_VALUE) { 962 /* 1 or 2 byte load zero-extends, determine the number of 963 * zero upper bits. Not doing it fo 4 byte load, since 964 * such values cannot be added to ptr_to_packet anyway. 965 */ 966 state->regs[value_regno].imm = 64 - size * 8; 967 } 968 return err; 969 } 970 971 static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn) 972 { 973 struct bpf_reg_state *regs = env->cur_state.regs; 974 int err; 975 976 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 977 insn->imm != 0) { 978 verbose("BPF_XADD uses reserved fields\n"); 979 return -EINVAL; 980 } 981 982 /* check src1 operand */ 983 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 984 if (err) 985 return err; 986 987 /* check src2 operand */ 988 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 989 if (err) 990 return err; 991 992 if (is_pointer_value(env, insn->src_reg)) { 993 verbose("R%d leaks addr into mem\n", insn->src_reg); 994 return -EACCES; 995 } 996 997 /* check whether atomic_add can read the memory */ 998 err = check_mem_access(env, insn->dst_reg, insn->off, 999 BPF_SIZE(insn->code), BPF_READ, -1); 1000 if (err) 1001 return err; 1002 1003 /* check whether atomic_add can write into the same memory */ 1004 return check_mem_access(env, insn->dst_reg, insn->off, 1005 BPF_SIZE(insn->code), BPF_WRITE, -1); 1006 } 1007 1008 /* when register 'regno' is passed into function that will read 'access_size' 1009 * bytes from that pointer, make sure that it's within stack boundary 1010 * and all elements of stack are initialized 1011 */ 1012 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 1013 int access_size, bool zero_size_allowed, 1014 struct bpf_call_arg_meta *meta) 1015 { 1016 struct bpf_verifier_state *state = &env->cur_state; 1017 struct bpf_reg_state *regs = state->regs; 1018 int off, i; 1019 1020 if (regs[regno].type != PTR_TO_STACK) { 1021 if (zero_size_allowed && access_size == 0 && 1022 regs[regno].type == CONST_IMM && 1023 regs[regno].imm == 0) 1024 return 0; 1025 1026 verbose("R%d type=%s expected=%s\n", regno, 1027 reg_type_str[regs[regno].type], 1028 reg_type_str[PTR_TO_STACK]); 1029 return -EACCES; 1030 } 1031 1032 off = regs[regno].imm; 1033 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1034 access_size <= 0) { 1035 verbose("invalid stack type R%d off=%d access_size=%d\n", 1036 regno, off, access_size); 1037 return -EACCES; 1038 } 1039 1040 if (meta && meta->raw_mode) { 1041 meta->access_size = access_size; 1042 meta->regno = regno; 1043 return 0; 1044 } 1045 1046 for (i = 0; i < access_size; i++) { 1047 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { 1048 verbose("invalid indirect read from stack off %d+%d size %d\n", 1049 off, i, access_size); 1050 return -EACCES; 1051 } 1052 } 1053 return 0; 1054 } 1055 1056 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 1057 int access_size, bool zero_size_allowed, 1058 struct bpf_call_arg_meta *meta) 1059 { 1060 struct bpf_reg_state *regs = env->cur_state.regs; 1061 1062 switch (regs[regno].type) { 1063 case PTR_TO_PACKET: 1064 return check_packet_access(env, regno, 0, access_size); 1065 case PTR_TO_MAP_VALUE: 1066 return check_map_access(env, regno, 0, access_size); 1067 case PTR_TO_MAP_VALUE_ADJ: 1068 return check_map_access_adj(env, regno, 0, access_size); 1069 default: /* const_imm|ptr_to_stack or invalid ptr */ 1070 return check_stack_boundary(env, regno, access_size, 1071 zero_size_allowed, meta); 1072 } 1073 } 1074 1075 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 1076 enum bpf_arg_type arg_type, 1077 struct bpf_call_arg_meta *meta) 1078 { 1079 struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; 1080 enum bpf_reg_type expected_type, type = reg->type; 1081 int err = 0; 1082 1083 if (arg_type == ARG_DONTCARE) 1084 return 0; 1085 1086 if (type == NOT_INIT) { 1087 verbose("R%d !read_ok\n", regno); 1088 return -EACCES; 1089 } 1090 1091 if (arg_type == ARG_ANYTHING) { 1092 if (is_pointer_value(env, regno)) { 1093 verbose("R%d leaks addr into helper function\n", regno); 1094 return -EACCES; 1095 } 1096 return 0; 1097 } 1098 1099 if (type == PTR_TO_PACKET && 1100 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 1101 verbose("helper access to the packet is not allowed\n"); 1102 return -EACCES; 1103 } 1104 1105 if (arg_type == ARG_PTR_TO_MAP_KEY || 1106 arg_type == ARG_PTR_TO_MAP_VALUE) { 1107 expected_type = PTR_TO_STACK; 1108 if (type != PTR_TO_PACKET && type != expected_type) 1109 goto err_type; 1110 } else if (arg_type == ARG_CONST_SIZE || 1111 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1112 expected_type = CONST_IMM; 1113 /* One exception. Allow UNKNOWN_VALUE registers when the 1114 * boundaries are known and don't cause unsafe memory accesses 1115 */ 1116 if (type != UNKNOWN_VALUE && type != expected_type) 1117 goto err_type; 1118 } else if (arg_type == ARG_CONST_MAP_PTR) { 1119 expected_type = CONST_PTR_TO_MAP; 1120 if (type != expected_type) 1121 goto err_type; 1122 } else if (arg_type == ARG_PTR_TO_CTX) { 1123 expected_type = PTR_TO_CTX; 1124 if (type != expected_type) 1125 goto err_type; 1126 } else if (arg_type == ARG_PTR_TO_MEM || 1127 arg_type == ARG_PTR_TO_UNINIT_MEM) { 1128 expected_type = PTR_TO_STACK; 1129 /* One exception here. In case function allows for NULL to be 1130 * passed in as argument, it's a CONST_IMM type. Final test 1131 * happens during stack boundary checking. 1132 */ 1133 if (type == CONST_IMM && reg->imm == 0) 1134 /* final test in check_stack_boundary() */; 1135 else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE && 1136 type != PTR_TO_MAP_VALUE_ADJ && type != expected_type) 1137 goto err_type; 1138 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 1139 } else { 1140 verbose("unsupported arg_type %d\n", arg_type); 1141 return -EFAULT; 1142 } 1143 1144 if (arg_type == ARG_CONST_MAP_PTR) { 1145 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 1146 meta->map_ptr = reg->map_ptr; 1147 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 1148 /* bpf_map_xxx(..., map_ptr, ..., key) call: 1149 * check that [key, key + map->key_size) are within 1150 * stack limits and initialized 1151 */ 1152 if (!meta->map_ptr) { 1153 /* in function declaration map_ptr must come before 1154 * map_key, so that it's verified and known before 1155 * we have to check map_key here. Otherwise it means 1156 * that kernel subsystem misconfigured verifier 1157 */ 1158 verbose("invalid map_ptr to access map->key\n"); 1159 return -EACCES; 1160 } 1161 if (type == PTR_TO_PACKET) 1162 err = check_packet_access(env, regno, 0, 1163 meta->map_ptr->key_size); 1164 else 1165 err = check_stack_boundary(env, regno, 1166 meta->map_ptr->key_size, 1167 false, NULL); 1168 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 1169 /* bpf_map_xxx(..., map_ptr, ..., value) call: 1170 * check [value, value + map->value_size) validity 1171 */ 1172 if (!meta->map_ptr) { 1173 /* kernel subsystem misconfigured verifier */ 1174 verbose("invalid map_ptr to access map->value\n"); 1175 return -EACCES; 1176 } 1177 if (type == PTR_TO_PACKET) 1178 err = check_packet_access(env, regno, 0, 1179 meta->map_ptr->value_size); 1180 else 1181 err = check_stack_boundary(env, regno, 1182 meta->map_ptr->value_size, 1183 false, NULL); 1184 } else if (arg_type == ARG_CONST_SIZE || 1185 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1186 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 1187 1188 /* bpf_xxx(..., buf, len) call will access 'len' bytes 1189 * from stack pointer 'buf'. Check it 1190 * note: regno == len, regno - 1 == buf 1191 */ 1192 if (regno == 0) { 1193 /* kernel subsystem misconfigured verifier */ 1194 verbose("ARG_CONST_SIZE cannot be first argument\n"); 1195 return -EACCES; 1196 } 1197 1198 /* If the register is UNKNOWN_VALUE, the access check happens 1199 * using its boundaries. Otherwise, just use its imm 1200 */ 1201 if (type == UNKNOWN_VALUE) { 1202 /* For unprivileged variable accesses, disable raw 1203 * mode so that the program is required to 1204 * initialize all the memory that the helper could 1205 * just partially fill up. 1206 */ 1207 meta = NULL; 1208 1209 if (reg->min_value < 0) { 1210 verbose("R%d min value is negative, either use unsigned or 'var &= const'\n", 1211 regno); 1212 return -EACCES; 1213 } 1214 1215 if (reg->min_value == 0) { 1216 err = check_helper_mem_access(env, regno - 1, 0, 1217 zero_size_allowed, 1218 meta); 1219 if (err) 1220 return err; 1221 } 1222 1223 if (reg->max_value == BPF_REGISTER_MAX_RANGE) { 1224 verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 1225 regno); 1226 return -EACCES; 1227 } 1228 err = check_helper_mem_access(env, regno - 1, 1229 reg->max_value, 1230 zero_size_allowed, meta); 1231 if (err) 1232 return err; 1233 } else { 1234 /* register is CONST_IMM */ 1235 err = check_helper_mem_access(env, regno - 1, reg->imm, 1236 zero_size_allowed, meta); 1237 } 1238 } 1239 1240 return err; 1241 err_type: 1242 verbose("R%d type=%s expected=%s\n", regno, 1243 reg_type_str[type], reg_type_str[expected_type]); 1244 return -EACCES; 1245 } 1246 1247 static int check_map_func_compatibility(struct bpf_map *map, int func_id) 1248 { 1249 if (!map) 1250 return 0; 1251 1252 /* We need a two way check, first is from map perspective ... */ 1253 switch (map->map_type) { 1254 case BPF_MAP_TYPE_PROG_ARRAY: 1255 if (func_id != BPF_FUNC_tail_call) 1256 goto error; 1257 break; 1258 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1259 if (func_id != BPF_FUNC_perf_event_read && 1260 func_id != BPF_FUNC_perf_event_output) 1261 goto error; 1262 break; 1263 case BPF_MAP_TYPE_STACK_TRACE: 1264 if (func_id != BPF_FUNC_get_stackid) 1265 goto error; 1266 break; 1267 case BPF_MAP_TYPE_CGROUP_ARRAY: 1268 if (func_id != BPF_FUNC_skb_under_cgroup && 1269 func_id != BPF_FUNC_current_task_under_cgroup) 1270 goto error; 1271 break; 1272 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1273 case BPF_MAP_TYPE_HASH_OF_MAPS: 1274 if (func_id != BPF_FUNC_map_lookup_elem) 1275 goto error; 1276 default: 1277 break; 1278 } 1279 1280 /* ... and second from the function itself. */ 1281 switch (func_id) { 1282 case BPF_FUNC_tail_call: 1283 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1284 goto error; 1285 break; 1286 case BPF_FUNC_perf_event_read: 1287 case BPF_FUNC_perf_event_output: 1288 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 1289 goto error; 1290 break; 1291 case BPF_FUNC_get_stackid: 1292 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1293 goto error; 1294 break; 1295 case BPF_FUNC_current_task_under_cgroup: 1296 case BPF_FUNC_skb_under_cgroup: 1297 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1298 goto error; 1299 break; 1300 default: 1301 break; 1302 } 1303 1304 return 0; 1305 error: 1306 verbose("cannot pass map_type %d into func %s#%d\n", 1307 map->map_type, func_id_name(func_id), func_id); 1308 return -EINVAL; 1309 } 1310 1311 static int check_raw_mode(const struct bpf_func_proto *fn) 1312 { 1313 int count = 0; 1314 1315 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 1316 count++; 1317 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 1318 count++; 1319 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 1320 count++; 1321 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 1322 count++; 1323 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 1324 count++; 1325 1326 return count > 1 ? -EINVAL : 0; 1327 } 1328 1329 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 1330 { 1331 struct bpf_verifier_state *state = &env->cur_state; 1332 struct bpf_reg_state *regs = state->regs, *reg; 1333 int i; 1334 1335 for (i = 0; i < MAX_BPF_REG; i++) 1336 if (regs[i].type == PTR_TO_PACKET || 1337 regs[i].type == PTR_TO_PACKET_END) 1338 mark_reg_unknown_value(regs, i); 1339 1340 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 1341 if (state->stack_slot_type[i] != STACK_SPILL) 1342 continue; 1343 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 1344 if (reg->type != PTR_TO_PACKET && 1345 reg->type != PTR_TO_PACKET_END) 1346 continue; 1347 reg->type = UNKNOWN_VALUE; 1348 reg->imm = 0; 1349 } 1350 } 1351 1352 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 1353 { 1354 struct bpf_verifier_state *state = &env->cur_state; 1355 const struct bpf_func_proto *fn = NULL; 1356 struct bpf_reg_state *regs = state->regs; 1357 struct bpf_call_arg_meta meta; 1358 bool changes_data; 1359 int i, err; 1360 1361 /* find function prototype */ 1362 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 1363 verbose("invalid func %s#%d\n", func_id_name(func_id), func_id); 1364 return -EINVAL; 1365 } 1366 1367 if (env->prog->aux->ops->get_func_proto) 1368 fn = env->prog->aux->ops->get_func_proto(func_id); 1369 1370 if (!fn) { 1371 verbose("unknown func %s#%d\n", func_id_name(func_id), func_id); 1372 return -EINVAL; 1373 } 1374 1375 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1376 if (!env->prog->gpl_compatible && fn->gpl_only) { 1377 verbose("cannot call GPL only function from proprietary program\n"); 1378 return -EINVAL; 1379 } 1380 1381 changes_data = bpf_helper_changes_pkt_data(fn->func); 1382 1383 memset(&meta, 0, sizeof(meta)); 1384 meta.pkt_access = fn->pkt_access; 1385 1386 /* We only support one arg being in raw mode at the moment, which 1387 * is sufficient for the helper functions we have right now. 1388 */ 1389 err = check_raw_mode(fn); 1390 if (err) { 1391 verbose("kernel subsystem misconfigured func %s#%d\n", 1392 func_id_name(func_id), func_id); 1393 return err; 1394 } 1395 1396 /* check args */ 1397 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 1398 if (err) 1399 return err; 1400 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1401 if (err) 1402 return err; 1403 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1404 if (err) 1405 return err; 1406 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 1407 if (err) 1408 return err; 1409 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 1410 if (err) 1411 return err; 1412 1413 /* Mark slots with STACK_MISC in case of raw mode, stack offset 1414 * is inferred from register state. 1415 */ 1416 for (i = 0; i < meta.access_size; i++) { 1417 err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1); 1418 if (err) 1419 return err; 1420 } 1421 1422 /* reset caller saved regs */ 1423 for (i = 0; i < CALLER_SAVED_REGS; i++) 1424 mark_reg_not_init(regs, caller_saved[i]); 1425 1426 /* update return register */ 1427 if (fn->ret_type == RET_INTEGER) { 1428 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1429 } else if (fn->ret_type == RET_VOID) { 1430 regs[BPF_REG_0].type = NOT_INIT; 1431 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 1432 struct bpf_insn_aux_data *insn_aux; 1433 1434 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 1435 regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0; 1436 /* remember map_ptr, so that check_map_access() 1437 * can check 'value_size' boundary of memory access 1438 * to map element returned from bpf_map_lookup_elem() 1439 */ 1440 if (meta.map_ptr == NULL) { 1441 verbose("kernel subsystem misconfigured verifier\n"); 1442 return -EINVAL; 1443 } 1444 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1445 regs[BPF_REG_0].id = ++env->id_gen; 1446 insn_aux = &env->insn_aux_data[insn_idx]; 1447 if (!insn_aux->map_ptr) 1448 insn_aux->map_ptr = meta.map_ptr; 1449 else if (insn_aux->map_ptr != meta.map_ptr) 1450 insn_aux->map_ptr = BPF_MAP_PTR_POISON; 1451 } else { 1452 verbose("unknown return type %d of func %s#%d\n", 1453 fn->ret_type, func_id_name(func_id), func_id); 1454 return -EINVAL; 1455 } 1456 1457 err = check_map_func_compatibility(meta.map_ptr, func_id); 1458 if (err) 1459 return err; 1460 1461 if (changes_data) 1462 clear_all_pkt_pointers(env); 1463 return 0; 1464 } 1465 1466 static int check_packet_ptr_add(struct bpf_verifier_env *env, 1467 struct bpf_insn *insn) 1468 { 1469 struct bpf_reg_state *regs = env->cur_state.regs; 1470 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1471 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1472 struct bpf_reg_state tmp_reg; 1473 s32 imm; 1474 1475 if (BPF_SRC(insn->code) == BPF_K) { 1476 /* pkt_ptr += imm */ 1477 imm = insn->imm; 1478 1479 add_imm: 1480 if (imm < 0) { 1481 verbose("addition of negative constant to packet pointer is not allowed\n"); 1482 return -EACCES; 1483 } 1484 if (imm >= MAX_PACKET_OFF || 1485 imm + dst_reg->off >= MAX_PACKET_OFF) { 1486 verbose("constant %d is too large to add to packet pointer\n", 1487 imm); 1488 return -EACCES; 1489 } 1490 /* a constant was added to pkt_ptr. 1491 * Remember it while keeping the same 'id' 1492 */ 1493 dst_reg->off += imm; 1494 } else { 1495 bool had_id; 1496 1497 if (src_reg->type == PTR_TO_PACKET) { 1498 /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ 1499 tmp_reg = *dst_reg; /* save r7 state */ 1500 *dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */ 1501 src_reg = &tmp_reg; /* pretend it's src_reg state */ 1502 /* if the checks below reject it, the copy won't matter, 1503 * since we're rejecting the whole program. If all ok, 1504 * then imm22 state will be added to r7 1505 * and r7 will be pkt(id=0,off=22,r=62) while 1506 * r6 will stay as pkt(id=0,off=0,r=62) 1507 */ 1508 } 1509 1510 if (src_reg->type == CONST_IMM) { 1511 /* pkt_ptr += reg where reg is known constant */ 1512 imm = src_reg->imm; 1513 goto add_imm; 1514 } 1515 /* disallow pkt_ptr += reg 1516 * if reg is not uknown_value with guaranteed zero upper bits 1517 * otherwise pkt_ptr may overflow and addition will become 1518 * subtraction which is not allowed 1519 */ 1520 if (src_reg->type != UNKNOWN_VALUE) { 1521 verbose("cannot add '%s' to ptr_to_packet\n", 1522 reg_type_str[src_reg->type]); 1523 return -EACCES; 1524 } 1525 if (src_reg->imm < 48) { 1526 verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n", 1527 src_reg->imm); 1528 return -EACCES; 1529 } 1530 1531 had_id = (dst_reg->id != 0); 1532 1533 /* dst_reg stays as pkt_ptr type and since some positive 1534 * integer value was added to the pointer, increment its 'id' 1535 */ 1536 dst_reg->id = ++env->id_gen; 1537 1538 /* something was added to pkt_ptr, set range to zero */ 1539 dst_reg->aux_off += dst_reg->off; 1540 dst_reg->off = 0; 1541 dst_reg->range = 0; 1542 if (had_id) 1543 dst_reg->aux_off_align = min(dst_reg->aux_off_align, 1544 src_reg->min_align); 1545 else 1546 dst_reg->aux_off_align = src_reg->min_align; 1547 } 1548 return 0; 1549 } 1550 1551 static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) 1552 { 1553 struct bpf_reg_state *regs = env->cur_state.regs; 1554 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1555 u8 opcode = BPF_OP(insn->code); 1556 s64 imm_log2; 1557 1558 /* for type == UNKNOWN_VALUE: 1559 * imm > 0 -> number of zero upper bits 1560 * imm == 0 -> don't track which is the same as all bits can be non-zero 1561 */ 1562 1563 if (BPF_SRC(insn->code) == BPF_X) { 1564 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1565 1566 if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && 1567 dst_reg->imm && opcode == BPF_ADD) { 1568 /* dreg += sreg 1569 * where both have zero upper bits. Adding them 1570 * can only result making one more bit non-zero 1571 * in the larger value. 1572 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) 1573 * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) 1574 */ 1575 dst_reg->imm = min(dst_reg->imm, src_reg->imm); 1576 dst_reg->imm--; 1577 return 0; 1578 } 1579 if (src_reg->type == CONST_IMM && src_reg->imm > 0 && 1580 dst_reg->imm && opcode == BPF_ADD) { 1581 /* dreg += sreg 1582 * where dreg has zero upper bits and sreg is const. 1583 * Adding them can only result making one more bit 1584 * non-zero in the larger value. 1585 */ 1586 imm_log2 = __ilog2_u64((long long)src_reg->imm); 1587 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1588 dst_reg->imm--; 1589 return 0; 1590 } 1591 /* all other cases non supported yet, just mark dst_reg */ 1592 dst_reg->imm = 0; 1593 return 0; 1594 } 1595 1596 /* sign extend 32-bit imm into 64-bit to make sure that 1597 * negative values occupy bit 63. Note ilog2() would have 1598 * been incorrect, since sizeof(insn->imm) == 4 1599 */ 1600 imm_log2 = __ilog2_u64((long long)insn->imm); 1601 1602 if (dst_reg->imm && opcode == BPF_LSH) { 1603 /* reg <<= imm 1604 * if reg was a result of 2 byte load, then its imm == 48 1605 * which means that upper 48 bits are zero and shifting this reg 1606 * left by 4 would mean that upper 44 bits are still zero 1607 */ 1608 dst_reg->imm -= insn->imm; 1609 } else if (dst_reg->imm && opcode == BPF_MUL) { 1610 /* reg *= imm 1611 * if multiplying by 14 subtract 4 1612 * This is conservative calculation of upper zero bits. 1613 * It's not trying to special case insn->imm == 1 or 0 cases 1614 */ 1615 dst_reg->imm -= imm_log2 + 1; 1616 } else if (opcode == BPF_AND) { 1617 /* reg &= imm */ 1618 dst_reg->imm = 63 - imm_log2; 1619 } else if (dst_reg->imm && opcode == BPF_ADD) { 1620 /* reg += imm */ 1621 dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); 1622 dst_reg->imm--; 1623 } else if (opcode == BPF_RSH) { 1624 /* reg >>= imm 1625 * which means that after right shift, upper bits will be zero 1626 * note that verifier already checked that 1627 * 0 <= imm < 64 for shift insn 1628 */ 1629 dst_reg->imm += insn->imm; 1630 if (unlikely(dst_reg->imm > 64)) 1631 /* some dumb code did: 1632 * r2 = *(u32 *)mem; 1633 * r2 >>= 32; 1634 * and all bits are zero now */ 1635 dst_reg->imm = 64; 1636 } else { 1637 /* all other alu ops, means that we don't know what will 1638 * happen to the value, mark it with unknown number of zero bits 1639 */ 1640 dst_reg->imm = 0; 1641 } 1642 1643 if (dst_reg->imm < 0) { 1644 /* all 64 bits of the register can contain non-zero bits 1645 * and such value cannot be added to ptr_to_packet, since it 1646 * may overflow, mark it as unknown to avoid further eval 1647 */ 1648 dst_reg->imm = 0; 1649 } 1650 return 0; 1651 } 1652 1653 static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, 1654 struct bpf_insn *insn) 1655 { 1656 struct bpf_reg_state *regs = env->cur_state.regs; 1657 struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; 1658 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 1659 u8 opcode = BPF_OP(insn->code); 1660 u64 dst_imm = dst_reg->imm; 1661 1662 /* dst_reg->type == CONST_IMM here. Simulate execution of insns 1663 * containing ALU ops. Don't care about overflow or negative 1664 * values, just add/sub/... them; registers are in u64. 1665 */ 1666 if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) { 1667 dst_imm += insn->imm; 1668 } else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && 1669 src_reg->type == CONST_IMM) { 1670 dst_imm += src_reg->imm; 1671 } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) { 1672 dst_imm -= insn->imm; 1673 } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X && 1674 src_reg->type == CONST_IMM) { 1675 dst_imm -= src_reg->imm; 1676 } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) { 1677 dst_imm *= insn->imm; 1678 } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X && 1679 src_reg->type == CONST_IMM) { 1680 dst_imm *= src_reg->imm; 1681 } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) { 1682 dst_imm |= insn->imm; 1683 } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X && 1684 src_reg->type == CONST_IMM) { 1685 dst_imm |= src_reg->imm; 1686 } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) { 1687 dst_imm &= insn->imm; 1688 } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X && 1689 src_reg->type == CONST_IMM) { 1690 dst_imm &= src_reg->imm; 1691 } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) { 1692 dst_imm >>= insn->imm; 1693 } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X && 1694 src_reg->type == CONST_IMM) { 1695 dst_imm >>= src_reg->imm; 1696 } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) { 1697 dst_imm <<= insn->imm; 1698 } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X && 1699 src_reg->type == CONST_IMM) { 1700 dst_imm <<= src_reg->imm; 1701 } else { 1702 mark_reg_unknown_value(regs, insn->dst_reg); 1703 goto out; 1704 } 1705 1706 dst_reg->imm = dst_imm; 1707 out: 1708 return 0; 1709 } 1710 1711 static void check_reg_overflow(struct bpf_reg_state *reg) 1712 { 1713 if (reg->max_value > BPF_REGISTER_MAX_RANGE) 1714 reg->max_value = BPF_REGISTER_MAX_RANGE; 1715 if (reg->min_value < BPF_REGISTER_MIN_RANGE || 1716 reg->min_value > BPF_REGISTER_MAX_RANGE) 1717 reg->min_value = BPF_REGISTER_MIN_RANGE; 1718 } 1719 1720 static u32 calc_align(u32 imm) 1721 { 1722 if (!imm) 1723 return 1U << 31; 1724 return imm - ((imm - 1) & imm); 1725 } 1726 1727 static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, 1728 struct bpf_insn *insn) 1729 { 1730 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1731 s64 min_val = BPF_REGISTER_MIN_RANGE; 1732 u64 max_val = BPF_REGISTER_MAX_RANGE; 1733 u8 opcode = BPF_OP(insn->code); 1734 u32 dst_align, src_align; 1735 1736 dst_reg = ®s[insn->dst_reg]; 1737 src_align = 0; 1738 if (BPF_SRC(insn->code) == BPF_X) { 1739 check_reg_overflow(®s[insn->src_reg]); 1740 min_val = regs[insn->src_reg].min_value; 1741 max_val = regs[insn->src_reg].max_value; 1742 1743 /* If the source register is a random pointer then the 1744 * min_value/max_value values represent the range of the known 1745 * accesses into that value, not the actual min/max value of the 1746 * register itself. In this case we have to reset the reg range 1747 * values so we know it is not safe to look at. 1748 */ 1749 if (regs[insn->src_reg].type != CONST_IMM && 1750 regs[insn->src_reg].type != UNKNOWN_VALUE) { 1751 min_val = BPF_REGISTER_MIN_RANGE; 1752 max_val = BPF_REGISTER_MAX_RANGE; 1753 src_align = 0; 1754 } else { 1755 src_align = regs[insn->src_reg].min_align; 1756 } 1757 } else if (insn->imm < BPF_REGISTER_MAX_RANGE && 1758 (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { 1759 min_val = max_val = insn->imm; 1760 src_align = calc_align(insn->imm); 1761 } 1762 1763 dst_align = dst_reg->min_align; 1764 1765 /* We don't know anything about what was done to this register, mark it 1766 * as unknown. 1767 */ 1768 if (min_val == BPF_REGISTER_MIN_RANGE && 1769 max_val == BPF_REGISTER_MAX_RANGE) { 1770 reset_reg_range_values(regs, insn->dst_reg); 1771 return; 1772 } 1773 1774 /* If one of our values was at the end of our ranges then we can't just 1775 * do our normal operations to the register, we need to set the values 1776 * to the min/max since they are undefined. 1777 */ 1778 if (min_val == BPF_REGISTER_MIN_RANGE) 1779 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1780 if (max_val == BPF_REGISTER_MAX_RANGE) 1781 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1782 1783 switch (opcode) { 1784 case BPF_ADD: 1785 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1786 dst_reg->min_value += min_val; 1787 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1788 dst_reg->max_value += max_val; 1789 dst_reg->min_align = min(src_align, dst_align); 1790 break; 1791 case BPF_SUB: 1792 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1793 dst_reg->min_value -= min_val; 1794 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1795 dst_reg->max_value -= max_val; 1796 dst_reg->min_align = min(src_align, dst_align); 1797 break; 1798 case BPF_MUL: 1799 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1800 dst_reg->min_value *= min_val; 1801 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1802 dst_reg->max_value *= max_val; 1803 dst_reg->min_align = max(src_align, dst_align); 1804 break; 1805 case BPF_AND: 1806 /* Disallow AND'ing of negative numbers, ain't nobody got time 1807 * for that. Otherwise the minimum is 0 and the max is the max 1808 * value we could AND against. 1809 */ 1810 if (min_val < 0) 1811 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1812 else 1813 dst_reg->min_value = 0; 1814 dst_reg->max_value = max_val; 1815 dst_reg->min_align = max(src_align, dst_align); 1816 break; 1817 case BPF_LSH: 1818 /* Gotta have special overflow logic here, if we're shifting 1819 * more than MAX_RANGE then just assume we have an invalid 1820 * range. 1821 */ 1822 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) { 1823 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1824 dst_reg->min_align = 1; 1825 } else { 1826 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) 1827 dst_reg->min_value <<= min_val; 1828 if (!dst_reg->min_align) 1829 dst_reg->min_align = 1; 1830 dst_reg->min_align <<= min_val; 1831 } 1832 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1833 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1834 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1835 dst_reg->max_value <<= max_val; 1836 break; 1837 case BPF_RSH: 1838 /* RSH by a negative number is undefined, and the BPF_RSH is an 1839 * unsigned shift, so make the appropriate casts. 1840 */ 1841 if (min_val < 0 || dst_reg->min_value < 0) { 1842 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1843 } else { 1844 dst_reg->min_value = 1845 (u64)(dst_reg->min_value) >> min_val; 1846 } 1847 if (min_val < 0) { 1848 dst_reg->min_align = 1; 1849 } else { 1850 dst_reg->min_align >>= (u64) min_val; 1851 if (!dst_reg->min_align) 1852 dst_reg->min_align = 1; 1853 } 1854 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) 1855 dst_reg->max_value >>= max_val; 1856 break; 1857 default: 1858 reset_reg_range_values(regs, insn->dst_reg); 1859 break; 1860 } 1861 1862 check_reg_overflow(dst_reg); 1863 } 1864 1865 /* check validity of 32-bit and 64-bit arithmetic operations */ 1866 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 1867 { 1868 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1869 u8 opcode = BPF_OP(insn->code); 1870 int err; 1871 1872 if (opcode == BPF_END || opcode == BPF_NEG) { 1873 if (opcode == BPF_NEG) { 1874 if (BPF_SRC(insn->code) != 0 || 1875 insn->src_reg != BPF_REG_0 || 1876 insn->off != 0 || insn->imm != 0) { 1877 verbose("BPF_NEG uses reserved fields\n"); 1878 return -EINVAL; 1879 } 1880 } else { 1881 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 1882 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { 1883 verbose("BPF_END uses reserved fields\n"); 1884 return -EINVAL; 1885 } 1886 } 1887 1888 /* check src operand */ 1889 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1890 if (err) 1891 return err; 1892 1893 if (is_pointer_value(env, insn->dst_reg)) { 1894 verbose("R%d pointer arithmetic prohibited\n", 1895 insn->dst_reg); 1896 return -EACCES; 1897 } 1898 1899 /* check dest operand */ 1900 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1901 if (err) 1902 return err; 1903 1904 } else if (opcode == BPF_MOV) { 1905 1906 if (BPF_SRC(insn->code) == BPF_X) { 1907 if (insn->imm != 0 || insn->off != 0) { 1908 verbose("BPF_MOV uses reserved fields\n"); 1909 return -EINVAL; 1910 } 1911 1912 /* check src operand */ 1913 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1914 if (err) 1915 return err; 1916 } else { 1917 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1918 verbose("BPF_MOV uses reserved fields\n"); 1919 return -EINVAL; 1920 } 1921 } 1922 1923 /* check dest operand */ 1924 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1925 if (err) 1926 return err; 1927 1928 /* we are setting our register to something new, we need to 1929 * reset its range values. 1930 */ 1931 reset_reg_range_values(regs, insn->dst_reg); 1932 1933 if (BPF_SRC(insn->code) == BPF_X) { 1934 if (BPF_CLASS(insn->code) == BPF_ALU64) { 1935 /* case: R1 = R2 1936 * copy register state to dest reg 1937 */ 1938 regs[insn->dst_reg] = regs[insn->src_reg]; 1939 } else { 1940 if (is_pointer_value(env, insn->src_reg)) { 1941 verbose("R%d partial copy of pointer\n", 1942 insn->src_reg); 1943 return -EACCES; 1944 } 1945 mark_reg_unknown_value(regs, insn->dst_reg); 1946 } 1947 } else { 1948 /* case: R = imm 1949 * remember the value we stored into this reg 1950 */ 1951 regs[insn->dst_reg].type = CONST_IMM; 1952 regs[insn->dst_reg].imm = insn->imm; 1953 regs[insn->dst_reg].max_value = insn->imm; 1954 regs[insn->dst_reg].min_value = insn->imm; 1955 regs[insn->dst_reg].min_align = calc_align(insn->imm); 1956 } 1957 1958 } else if (opcode > BPF_END) { 1959 verbose("invalid BPF_ALU opcode %x\n", opcode); 1960 return -EINVAL; 1961 1962 } else { /* all other ALU ops: and, sub, xor, add, ... */ 1963 1964 if (BPF_SRC(insn->code) == BPF_X) { 1965 if (insn->imm != 0 || insn->off != 0) { 1966 verbose("BPF_ALU uses reserved fields\n"); 1967 return -EINVAL; 1968 } 1969 /* check src1 operand */ 1970 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1971 if (err) 1972 return err; 1973 } else { 1974 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1975 verbose("BPF_ALU uses reserved fields\n"); 1976 return -EINVAL; 1977 } 1978 } 1979 1980 /* check src2 operand */ 1981 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1982 if (err) 1983 return err; 1984 1985 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 1986 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 1987 verbose("div by zero\n"); 1988 return -EINVAL; 1989 } 1990 1991 if ((opcode == BPF_LSH || opcode == BPF_RSH || 1992 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 1993 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 1994 1995 if (insn->imm < 0 || insn->imm >= size) { 1996 verbose("invalid shift %d\n", insn->imm); 1997 return -EINVAL; 1998 } 1999 } 2000 2001 /* check dest operand */ 2002 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 2003 if (err) 2004 return err; 2005 2006 dst_reg = ®s[insn->dst_reg]; 2007 2008 /* first we want to adjust our ranges. */ 2009 adjust_reg_min_max_vals(env, insn); 2010 2011 /* pattern match 'bpf_add Rx, imm' instruction */ 2012 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && 2013 dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { 2014 dst_reg->type = PTR_TO_STACK; 2015 dst_reg->imm = insn->imm; 2016 return 0; 2017 } else if (opcode == BPF_ADD && 2018 BPF_CLASS(insn->code) == BPF_ALU64 && 2019 dst_reg->type == PTR_TO_STACK && 2020 ((BPF_SRC(insn->code) == BPF_X && 2021 regs[insn->src_reg].type == CONST_IMM) || 2022 BPF_SRC(insn->code) == BPF_K)) { 2023 if (BPF_SRC(insn->code) == BPF_X) 2024 dst_reg->imm += regs[insn->src_reg].imm; 2025 else 2026 dst_reg->imm += insn->imm; 2027 return 0; 2028 } else if (opcode == BPF_ADD && 2029 BPF_CLASS(insn->code) == BPF_ALU64 && 2030 (dst_reg->type == PTR_TO_PACKET || 2031 (BPF_SRC(insn->code) == BPF_X && 2032 regs[insn->src_reg].type == PTR_TO_PACKET))) { 2033 /* ptr_to_packet += K|X */ 2034 return check_packet_ptr_add(env, insn); 2035 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 2036 dst_reg->type == UNKNOWN_VALUE && 2037 env->allow_ptr_leaks) { 2038 /* unknown += K|X */ 2039 return evaluate_reg_alu(env, insn); 2040 } else if (BPF_CLASS(insn->code) == BPF_ALU64 && 2041 dst_reg->type == CONST_IMM && 2042 env->allow_ptr_leaks) { 2043 /* reg_imm += K|X */ 2044 return evaluate_reg_imm_alu(env, insn); 2045 } else if (is_pointer_value(env, insn->dst_reg)) { 2046 verbose("R%d pointer arithmetic prohibited\n", 2047 insn->dst_reg); 2048 return -EACCES; 2049 } else if (BPF_SRC(insn->code) == BPF_X && 2050 is_pointer_value(env, insn->src_reg)) { 2051 verbose("R%d pointer arithmetic prohibited\n", 2052 insn->src_reg); 2053 return -EACCES; 2054 } 2055 2056 /* If we did pointer math on a map value then just set it to our 2057 * PTR_TO_MAP_VALUE_ADJ type so we can deal with any stores or 2058 * loads to this register appropriately, otherwise just mark the 2059 * register as unknown. 2060 */ 2061 if (env->allow_ptr_leaks && 2062 BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD && 2063 (dst_reg->type == PTR_TO_MAP_VALUE || 2064 dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) 2065 dst_reg->type = PTR_TO_MAP_VALUE_ADJ; 2066 else 2067 mark_reg_unknown_value(regs, insn->dst_reg); 2068 } 2069 2070 return 0; 2071 } 2072 2073 static void find_good_pkt_pointers(struct bpf_verifier_state *state, 2074 struct bpf_reg_state *dst_reg) 2075 { 2076 struct bpf_reg_state *regs = state->regs, *reg; 2077 int i; 2078 2079 /* LLVM can generate two kind of checks: 2080 * 2081 * Type 1: 2082 * 2083 * r2 = r3; 2084 * r2 += 8; 2085 * if (r2 > pkt_end) goto <handle exception> 2086 * <access okay> 2087 * 2088 * Where: 2089 * r2 == dst_reg, pkt_end == src_reg 2090 * r2=pkt(id=n,off=8,r=0) 2091 * r3=pkt(id=n,off=0,r=0) 2092 * 2093 * Type 2: 2094 * 2095 * r2 = r3; 2096 * r2 += 8; 2097 * if (pkt_end >= r2) goto <access okay> 2098 * <handle exception> 2099 * 2100 * Where: 2101 * pkt_end == dst_reg, r2 == src_reg 2102 * r2=pkt(id=n,off=8,r=0) 2103 * r3=pkt(id=n,off=0,r=0) 2104 * 2105 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 2106 * so that range of bytes [r3, r3 + 8) is safe to access. 2107 */ 2108 2109 for (i = 0; i < MAX_BPF_REG; i++) 2110 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 2111 /* keep the maximum range already checked */ 2112 regs[i].range = max(regs[i].range, dst_reg->off); 2113 2114 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2115 if (state->stack_slot_type[i] != STACK_SPILL) 2116 continue; 2117 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2118 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2119 reg->range = max(reg->range, dst_reg->off); 2120 } 2121 } 2122 2123 /* Adjusts the register min/max values in the case that the dst_reg is the 2124 * variable register that we are working on, and src_reg is a constant or we're 2125 * simply doing a BPF_K check. 2126 */ 2127 static void reg_set_min_max(struct bpf_reg_state *true_reg, 2128 struct bpf_reg_state *false_reg, u64 val, 2129 u8 opcode) 2130 { 2131 switch (opcode) { 2132 case BPF_JEQ: 2133 /* If this is false then we know nothing Jon Snow, but if it is 2134 * true then we know for sure. 2135 */ 2136 true_reg->max_value = true_reg->min_value = val; 2137 break; 2138 case BPF_JNE: 2139 /* If this is true we know nothing Jon Snow, but if it is false 2140 * we know the value for sure; 2141 */ 2142 false_reg->max_value = false_reg->min_value = val; 2143 break; 2144 case BPF_JGT: 2145 /* Unsigned comparison, the minimum value is 0. */ 2146 false_reg->min_value = 0; 2147 /* fallthrough */ 2148 case BPF_JSGT: 2149 /* If this is false then we know the maximum val is val, 2150 * otherwise we know the min val is val+1. 2151 */ 2152 false_reg->max_value = val; 2153 true_reg->min_value = val + 1; 2154 break; 2155 case BPF_JGE: 2156 /* Unsigned comparison, the minimum value is 0. */ 2157 false_reg->min_value = 0; 2158 /* fallthrough */ 2159 case BPF_JSGE: 2160 /* If this is false then we know the maximum value is val - 1, 2161 * otherwise we know the mimimum value is val. 2162 */ 2163 false_reg->max_value = val - 1; 2164 true_reg->min_value = val; 2165 break; 2166 default: 2167 break; 2168 } 2169 2170 check_reg_overflow(false_reg); 2171 check_reg_overflow(true_reg); 2172 } 2173 2174 /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg 2175 * is the variable reg. 2176 */ 2177 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 2178 struct bpf_reg_state *false_reg, u64 val, 2179 u8 opcode) 2180 { 2181 switch (opcode) { 2182 case BPF_JEQ: 2183 /* If this is false then we know nothing Jon Snow, but if it is 2184 * true then we know for sure. 2185 */ 2186 true_reg->max_value = true_reg->min_value = val; 2187 break; 2188 case BPF_JNE: 2189 /* If this is true we know nothing Jon Snow, but if it is false 2190 * we know the value for sure; 2191 */ 2192 false_reg->max_value = false_reg->min_value = val; 2193 break; 2194 case BPF_JGT: 2195 /* Unsigned comparison, the minimum value is 0. */ 2196 true_reg->min_value = 0; 2197 /* fallthrough */ 2198 case BPF_JSGT: 2199 /* 2200 * If this is false, then the val is <= the register, if it is 2201 * true the register <= to the val. 2202 */ 2203 false_reg->min_value = val; 2204 true_reg->max_value = val - 1; 2205 break; 2206 case BPF_JGE: 2207 /* Unsigned comparison, the minimum value is 0. */ 2208 true_reg->min_value = 0; 2209 /* fallthrough */ 2210 case BPF_JSGE: 2211 /* If this is false then constant < register, if it is true then 2212 * the register < constant. 2213 */ 2214 false_reg->min_value = val + 1; 2215 true_reg->max_value = val; 2216 break; 2217 default: 2218 break; 2219 } 2220 2221 check_reg_overflow(false_reg); 2222 check_reg_overflow(true_reg); 2223 } 2224 2225 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, 2226 enum bpf_reg_type type) 2227 { 2228 struct bpf_reg_state *reg = ®s[regno]; 2229 2230 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { 2231 if (type == UNKNOWN_VALUE) { 2232 __mark_reg_unknown_value(regs, regno); 2233 } else if (reg->map_ptr->inner_map_meta) { 2234 reg->type = CONST_PTR_TO_MAP; 2235 reg->map_ptr = reg->map_ptr->inner_map_meta; 2236 } else { 2237 reg->type = type; 2238 } 2239 /* We don't need id from this point onwards anymore, thus we 2240 * should better reset it, so that state pruning has chances 2241 * to take effect. 2242 */ 2243 reg->id = 0; 2244 } 2245 } 2246 2247 /* The logic is similar to find_good_pkt_pointers(), both could eventually 2248 * be folded together at some point. 2249 */ 2250 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, 2251 enum bpf_reg_type type) 2252 { 2253 struct bpf_reg_state *regs = state->regs; 2254 u32 id = regs[regno].id; 2255 int i; 2256 2257 for (i = 0; i < MAX_BPF_REG; i++) 2258 mark_map_reg(regs, i, id, type); 2259 2260 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2261 if (state->stack_slot_type[i] != STACK_SPILL) 2262 continue; 2263 mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type); 2264 } 2265 } 2266 2267 static int check_cond_jmp_op(struct bpf_verifier_env *env, 2268 struct bpf_insn *insn, int *insn_idx) 2269 { 2270 struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state; 2271 struct bpf_reg_state *regs = this_branch->regs, *dst_reg; 2272 u8 opcode = BPF_OP(insn->code); 2273 int err; 2274 2275 if (opcode > BPF_EXIT) { 2276 verbose("invalid BPF_JMP opcode %x\n", opcode); 2277 return -EINVAL; 2278 } 2279 2280 if (BPF_SRC(insn->code) == BPF_X) { 2281 if (insn->imm != 0) { 2282 verbose("BPF_JMP uses reserved fields\n"); 2283 return -EINVAL; 2284 } 2285 2286 /* check src1 operand */ 2287 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2288 if (err) 2289 return err; 2290 2291 if (is_pointer_value(env, insn->src_reg)) { 2292 verbose("R%d pointer comparison prohibited\n", 2293 insn->src_reg); 2294 return -EACCES; 2295 } 2296 } else { 2297 if (insn->src_reg != BPF_REG_0) { 2298 verbose("BPF_JMP uses reserved fields\n"); 2299 return -EINVAL; 2300 } 2301 } 2302 2303 /* check src2 operand */ 2304 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 2305 if (err) 2306 return err; 2307 2308 dst_reg = ®s[insn->dst_reg]; 2309 2310 /* detect if R == 0 where R was initialized to zero earlier */ 2311 if (BPF_SRC(insn->code) == BPF_K && 2312 (opcode == BPF_JEQ || opcode == BPF_JNE) && 2313 dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) { 2314 if (opcode == BPF_JEQ) { 2315 /* if (imm == imm) goto pc+off; 2316 * only follow the goto, ignore fall-through 2317 */ 2318 *insn_idx += insn->off; 2319 return 0; 2320 } else { 2321 /* if (imm != imm) goto pc+off; 2322 * only follow fall-through branch, since 2323 * that's where the program will go 2324 */ 2325 return 0; 2326 } 2327 } 2328 2329 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 2330 if (!other_branch) 2331 return -EFAULT; 2332 2333 /* detect if we are comparing against a constant value so we can adjust 2334 * our min/max values for our dst register. 2335 */ 2336 if (BPF_SRC(insn->code) == BPF_X) { 2337 if (regs[insn->src_reg].type == CONST_IMM) 2338 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2339 dst_reg, regs[insn->src_reg].imm, 2340 opcode); 2341 else if (dst_reg->type == CONST_IMM) 2342 reg_set_min_max_inv(&other_branch->regs[insn->src_reg], 2343 ®s[insn->src_reg], dst_reg->imm, 2344 opcode); 2345 } else { 2346 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2347 dst_reg, insn->imm, opcode); 2348 } 2349 2350 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ 2351 if (BPF_SRC(insn->code) == BPF_K && 2352 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 2353 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 2354 /* Mark all identical map registers in each branch as either 2355 * safe or unknown depending R == 0 or R != 0 conditional. 2356 */ 2357 mark_map_regs(this_branch, insn->dst_reg, 2358 opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); 2359 mark_map_regs(other_branch, insn->dst_reg, 2360 opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); 2361 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 2362 dst_reg->type == PTR_TO_PACKET && 2363 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2364 find_good_pkt_pointers(this_branch, dst_reg); 2365 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 2366 dst_reg->type == PTR_TO_PACKET_END && 2367 regs[insn->src_reg].type == PTR_TO_PACKET) { 2368 find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); 2369 } else if (is_pointer_value(env, insn->dst_reg)) { 2370 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 2371 return -EACCES; 2372 } 2373 if (log_level) 2374 print_verifier_state(this_branch); 2375 return 0; 2376 } 2377 2378 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 2379 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 2380 { 2381 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 2382 2383 return (struct bpf_map *) (unsigned long) imm64; 2384 } 2385 2386 /* verify BPF_LD_IMM64 instruction */ 2387 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 2388 { 2389 struct bpf_reg_state *regs = env->cur_state.regs; 2390 int err; 2391 2392 if (BPF_SIZE(insn->code) != BPF_DW) { 2393 verbose("invalid BPF_LD_IMM insn\n"); 2394 return -EINVAL; 2395 } 2396 if (insn->off != 0) { 2397 verbose("BPF_LD_IMM64 uses reserved fields\n"); 2398 return -EINVAL; 2399 } 2400 2401 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 2402 if (err) 2403 return err; 2404 2405 if (insn->src_reg == 0) { 2406 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 2407 2408 regs[insn->dst_reg].type = CONST_IMM; 2409 regs[insn->dst_reg].imm = imm; 2410 return 0; 2411 } 2412 2413 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 2414 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 2415 2416 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 2417 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 2418 return 0; 2419 } 2420 2421 static bool may_access_skb(enum bpf_prog_type type) 2422 { 2423 switch (type) { 2424 case BPF_PROG_TYPE_SOCKET_FILTER: 2425 case BPF_PROG_TYPE_SCHED_CLS: 2426 case BPF_PROG_TYPE_SCHED_ACT: 2427 return true; 2428 default: 2429 return false; 2430 } 2431 } 2432 2433 /* verify safety of LD_ABS|LD_IND instructions: 2434 * - they can only appear in the programs where ctx == skb 2435 * - since they are wrappers of function calls, they scratch R1-R5 registers, 2436 * preserve R6-R9, and store return value into R0 2437 * 2438 * Implicit input: 2439 * ctx == skb == R6 == CTX 2440 * 2441 * Explicit input: 2442 * SRC == any register 2443 * IMM == 32-bit immediate 2444 * 2445 * Output: 2446 * R0 - 8/16/32-bit skb data converted to cpu endianness 2447 */ 2448 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 2449 { 2450 struct bpf_reg_state *regs = env->cur_state.regs; 2451 u8 mode = BPF_MODE(insn->code); 2452 int i, err; 2453 2454 if (!may_access_skb(env->prog->type)) { 2455 verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 2456 return -EINVAL; 2457 } 2458 2459 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 2460 BPF_SIZE(insn->code) == BPF_DW || 2461 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 2462 verbose("BPF_LD_[ABS|IND] uses reserved fields\n"); 2463 return -EINVAL; 2464 } 2465 2466 /* check whether implicit source operand (register R6) is readable */ 2467 err = check_reg_arg(regs, BPF_REG_6, SRC_OP); 2468 if (err) 2469 return err; 2470 2471 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 2472 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 2473 return -EINVAL; 2474 } 2475 2476 if (mode == BPF_IND) { 2477 /* check explicit source operand */ 2478 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2479 if (err) 2480 return err; 2481 } 2482 2483 /* reset caller saved regs to unreadable */ 2484 for (i = 0; i < CALLER_SAVED_REGS; i++) 2485 mark_reg_not_init(regs, caller_saved[i]); 2486 2487 /* mark destination R0 register as readable, since it contains 2488 * the value fetched from the packet 2489 */ 2490 regs[BPF_REG_0].type = UNKNOWN_VALUE; 2491 return 0; 2492 } 2493 2494 /* non-recursive DFS pseudo code 2495 * 1 procedure DFS-iterative(G,v): 2496 * 2 label v as discovered 2497 * 3 let S be a stack 2498 * 4 S.push(v) 2499 * 5 while S is not empty 2500 * 6 t <- S.pop() 2501 * 7 if t is what we're looking for: 2502 * 8 return t 2503 * 9 for all edges e in G.adjacentEdges(t) do 2504 * 10 if edge e is already labelled 2505 * 11 continue with the next edge 2506 * 12 w <- G.adjacentVertex(t,e) 2507 * 13 if vertex w is not discovered and not explored 2508 * 14 label e as tree-edge 2509 * 15 label w as discovered 2510 * 16 S.push(w) 2511 * 17 continue at 5 2512 * 18 else if vertex w is discovered 2513 * 19 label e as back-edge 2514 * 20 else 2515 * 21 // vertex w is explored 2516 * 22 label e as forward- or cross-edge 2517 * 23 label t as explored 2518 * 24 S.pop() 2519 * 2520 * convention: 2521 * 0x10 - discovered 2522 * 0x11 - discovered and fall-through edge labelled 2523 * 0x12 - discovered and fall-through and branch edges labelled 2524 * 0x20 - explored 2525 */ 2526 2527 enum { 2528 DISCOVERED = 0x10, 2529 EXPLORED = 0x20, 2530 FALLTHROUGH = 1, 2531 BRANCH = 2, 2532 }; 2533 2534 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) 2535 2536 static int *insn_stack; /* stack of insns to process */ 2537 static int cur_stack; /* current stack index */ 2538 static int *insn_state; 2539 2540 /* t, w, e - match pseudo-code above: 2541 * t - index of current instruction 2542 * w - next instruction 2543 * e - edge 2544 */ 2545 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) 2546 { 2547 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 2548 return 0; 2549 2550 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 2551 return 0; 2552 2553 if (w < 0 || w >= env->prog->len) { 2554 verbose("jump out of range from insn %d to %d\n", t, w); 2555 return -EINVAL; 2556 } 2557 2558 if (e == BRANCH) 2559 /* mark branch target for state pruning */ 2560 env->explored_states[w] = STATE_LIST_MARK; 2561 2562 if (insn_state[w] == 0) { 2563 /* tree-edge */ 2564 insn_state[t] = DISCOVERED | e; 2565 insn_state[w] = DISCOVERED; 2566 if (cur_stack >= env->prog->len) 2567 return -E2BIG; 2568 insn_stack[cur_stack++] = w; 2569 return 1; 2570 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 2571 verbose("back-edge from insn %d to %d\n", t, w); 2572 return -EINVAL; 2573 } else if (insn_state[w] == EXPLORED) { 2574 /* forward- or cross-edge */ 2575 insn_state[t] = DISCOVERED | e; 2576 } else { 2577 verbose("insn state internal bug\n"); 2578 return -EFAULT; 2579 } 2580 return 0; 2581 } 2582 2583 /* non-recursive depth-first-search to detect loops in BPF program 2584 * loop == back-edge in directed graph 2585 */ 2586 static int check_cfg(struct bpf_verifier_env *env) 2587 { 2588 struct bpf_insn *insns = env->prog->insnsi; 2589 int insn_cnt = env->prog->len; 2590 int ret = 0; 2591 int i, t; 2592 2593 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 2594 if (!insn_state) 2595 return -ENOMEM; 2596 2597 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 2598 if (!insn_stack) { 2599 kfree(insn_state); 2600 return -ENOMEM; 2601 } 2602 2603 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 2604 insn_stack[0] = 0; /* 0 is the first instruction */ 2605 cur_stack = 1; 2606 2607 peek_stack: 2608 if (cur_stack == 0) 2609 goto check_state; 2610 t = insn_stack[cur_stack - 1]; 2611 2612 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 2613 u8 opcode = BPF_OP(insns[t].code); 2614 2615 if (opcode == BPF_EXIT) { 2616 goto mark_explored; 2617 } else if (opcode == BPF_CALL) { 2618 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2619 if (ret == 1) 2620 goto peek_stack; 2621 else if (ret < 0) 2622 goto err_free; 2623 if (t + 1 < insn_cnt) 2624 env->explored_states[t + 1] = STATE_LIST_MARK; 2625 } else if (opcode == BPF_JA) { 2626 if (BPF_SRC(insns[t].code) != BPF_K) { 2627 ret = -EINVAL; 2628 goto err_free; 2629 } 2630 /* unconditional jump with single edge */ 2631 ret = push_insn(t, t + insns[t].off + 1, 2632 FALLTHROUGH, env); 2633 if (ret == 1) 2634 goto peek_stack; 2635 else if (ret < 0) 2636 goto err_free; 2637 /* tell verifier to check for equivalent states 2638 * after every call and jump 2639 */ 2640 if (t + 1 < insn_cnt) 2641 env->explored_states[t + 1] = STATE_LIST_MARK; 2642 } else { 2643 /* conditional jump with two edges */ 2644 env->explored_states[t] = STATE_LIST_MARK; 2645 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2646 if (ret == 1) 2647 goto peek_stack; 2648 else if (ret < 0) 2649 goto err_free; 2650 2651 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 2652 if (ret == 1) 2653 goto peek_stack; 2654 else if (ret < 0) 2655 goto err_free; 2656 } 2657 } else { 2658 /* all other non-branch instructions with single 2659 * fall-through edge 2660 */ 2661 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2662 if (ret == 1) 2663 goto peek_stack; 2664 else if (ret < 0) 2665 goto err_free; 2666 } 2667 2668 mark_explored: 2669 insn_state[t] = EXPLORED; 2670 if (cur_stack-- <= 0) { 2671 verbose("pop stack internal bug\n"); 2672 ret = -EFAULT; 2673 goto err_free; 2674 } 2675 goto peek_stack; 2676 2677 check_state: 2678 for (i = 0; i < insn_cnt; i++) { 2679 if (insn_state[i] != EXPLORED) { 2680 verbose("unreachable insn %d\n", i); 2681 ret = -EINVAL; 2682 goto err_free; 2683 } 2684 } 2685 ret = 0; /* cfg looks good */ 2686 2687 err_free: 2688 kfree(insn_state); 2689 kfree(insn_stack); 2690 return ret; 2691 } 2692 2693 /* the following conditions reduce the number of explored insns 2694 * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet 2695 */ 2696 static bool compare_ptrs_to_packet(struct bpf_verifier_env *env, 2697 struct bpf_reg_state *old, 2698 struct bpf_reg_state *cur) 2699 { 2700 if (old->id != cur->id) 2701 return false; 2702 2703 /* old ptr_to_packet is more conservative, since it allows smaller 2704 * range. Ex: 2705 * old(off=0,r=10) is equal to cur(off=0,r=20), because 2706 * old(off=0,r=10) means that with range=10 the verifier proceeded 2707 * further and found no issues with the program. Now we're in the same 2708 * spot with cur(off=0,r=20), so we're safe too, since anything further 2709 * will only be looking at most 10 bytes after this pointer. 2710 */ 2711 if (old->off == cur->off && old->range < cur->range) 2712 return true; 2713 2714 /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0) 2715 * since both cannot be used for packet access and safe(old) 2716 * pointer has smaller off that could be used for further 2717 * 'if (ptr > data_end)' check 2718 * Ex: 2719 * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean 2720 * that we cannot access the packet. 2721 * The safe range is: 2722 * [ptr, ptr + range - off) 2723 * so whenever off >=range, it means no safe bytes from this pointer. 2724 * When comparing old->off <= cur->off, it means that older code 2725 * went with smaller offset and that offset was later 2726 * used to figure out the safe range after 'if (ptr > data_end)' check 2727 * Say, 'old' state was explored like: 2728 * ... R3(off=0, r=0) 2729 * R4 = R3 + 20 2730 * ... now R4(off=20,r=0) <-- here 2731 * if (R4 > data_end) 2732 * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access. 2733 * ... the code further went all the way to bpf_exit. 2734 * Now the 'cur' state at the mark 'here' has R4(off=30,r=0). 2735 * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier 2736 * goes further, such cur_R4 will give larger safe packet range after 2737 * 'if (R4 > data_end)' and all further insn were already good with r=20, 2738 * so they will be good with r=30 and we can prune the search. 2739 */ 2740 if (!env->strict_alignment && old->off <= cur->off && 2741 old->off >= old->range && cur->off >= cur->range) 2742 return true; 2743 2744 return false; 2745 } 2746 2747 /* compare two verifier states 2748 * 2749 * all states stored in state_list are known to be valid, since 2750 * verifier reached 'bpf_exit' instruction through them 2751 * 2752 * this function is called when verifier exploring different branches of 2753 * execution popped from the state stack. If it sees an old state that has 2754 * more strict register state and more strict stack state then this execution 2755 * branch doesn't need to be explored further, since verifier already 2756 * concluded that more strict state leads to valid finish. 2757 * 2758 * Therefore two states are equivalent if register state is more conservative 2759 * and explored stack state is more conservative than the current one. 2760 * Example: 2761 * explored current 2762 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 2763 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 2764 * 2765 * In other words if current stack state (one being explored) has more 2766 * valid slots than old one that already passed validation, it means 2767 * the verifier can stop exploring and conclude that current state is valid too 2768 * 2769 * Similarly with registers. If explored state has register type as invalid 2770 * whereas register type in current state is meaningful, it means that 2771 * the current state will reach 'bpf_exit' instruction safely 2772 */ 2773 static bool states_equal(struct bpf_verifier_env *env, 2774 struct bpf_verifier_state *old, 2775 struct bpf_verifier_state *cur) 2776 { 2777 bool varlen_map_access = env->varlen_map_value_access; 2778 struct bpf_reg_state *rold, *rcur; 2779 int i; 2780 2781 for (i = 0; i < MAX_BPF_REG; i++) { 2782 rold = &old->regs[i]; 2783 rcur = &cur->regs[i]; 2784 2785 if (memcmp(rold, rcur, sizeof(*rold)) == 0) 2786 continue; 2787 2788 /* If the ranges were not the same, but everything else was and 2789 * we didn't do a variable access into a map then we are a-ok. 2790 */ 2791 if (!varlen_map_access && 2792 memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0) 2793 continue; 2794 2795 /* If we didn't map access then again we don't care about the 2796 * mismatched range values and it's ok if our old type was 2797 * UNKNOWN and we didn't go to a NOT_INIT'ed reg. 2798 */ 2799 if (rold->type == NOT_INIT || 2800 (!varlen_map_access && rold->type == UNKNOWN_VALUE && 2801 rcur->type != NOT_INIT)) 2802 continue; 2803 2804 /* Don't care about the reg->id in this case. */ 2805 if (rold->type == PTR_TO_MAP_VALUE_OR_NULL && 2806 rcur->type == PTR_TO_MAP_VALUE_OR_NULL && 2807 rold->map_ptr == rcur->map_ptr) 2808 continue; 2809 2810 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2811 compare_ptrs_to_packet(env, rold, rcur)) 2812 continue; 2813 2814 return false; 2815 } 2816 2817 for (i = 0; i < MAX_BPF_STACK; i++) { 2818 if (old->stack_slot_type[i] == STACK_INVALID) 2819 continue; 2820 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) 2821 /* Ex: old explored (safe) state has STACK_SPILL in 2822 * this stack slot, but current has has STACK_MISC -> 2823 * this verifier states are not equivalent, 2824 * return false to continue verification of this path 2825 */ 2826 return false; 2827 if (i % BPF_REG_SIZE) 2828 continue; 2829 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], 2830 &cur->spilled_regs[i / BPF_REG_SIZE], 2831 sizeof(old->spilled_regs[0]))) 2832 /* when explored and current stack slot types are 2833 * the same, check that stored pointers types 2834 * are the same as well. 2835 * Ex: explored safe path could have stored 2836 * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8} 2837 * but current path has stored: 2838 * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16} 2839 * such verifier states are not equivalent. 2840 * return false to continue verification of this path 2841 */ 2842 return false; 2843 else 2844 continue; 2845 } 2846 return true; 2847 } 2848 2849 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 2850 { 2851 struct bpf_verifier_state_list *new_sl; 2852 struct bpf_verifier_state_list *sl; 2853 2854 sl = env->explored_states[insn_idx]; 2855 if (!sl) 2856 /* this 'insn_idx' instruction wasn't marked, so we will not 2857 * be doing state search here 2858 */ 2859 return 0; 2860 2861 while (sl != STATE_LIST_MARK) { 2862 if (states_equal(env, &sl->state, &env->cur_state)) 2863 /* reached equivalent register/stack state, 2864 * prune the search 2865 */ 2866 return 1; 2867 sl = sl->next; 2868 } 2869 2870 /* there were no equivalent states, remember current one. 2871 * technically the current state is not proven to be safe yet, 2872 * but it will either reach bpf_exit (which means it's safe) or 2873 * it will be rejected. Since there are no loops, we won't be 2874 * seeing this 'insn_idx' instruction again on the way to bpf_exit 2875 */ 2876 new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER); 2877 if (!new_sl) 2878 return -ENOMEM; 2879 2880 /* add new state to the head of linked list */ 2881 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); 2882 new_sl->next = env->explored_states[insn_idx]; 2883 env->explored_states[insn_idx] = new_sl; 2884 return 0; 2885 } 2886 2887 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, 2888 int insn_idx, int prev_insn_idx) 2889 { 2890 if (!env->analyzer_ops || !env->analyzer_ops->insn_hook) 2891 return 0; 2892 2893 return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx); 2894 } 2895 2896 static int do_check(struct bpf_verifier_env *env) 2897 { 2898 struct bpf_verifier_state *state = &env->cur_state; 2899 struct bpf_insn *insns = env->prog->insnsi; 2900 struct bpf_reg_state *regs = state->regs; 2901 int insn_cnt = env->prog->len; 2902 int insn_idx, prev_insn_idx = 0; 2903 int insn_processed = 0; 2904 bool do_print_state = false; 2905 2906 init_reg_state(regs); 2907 insn_idx = 0; 2908 env->varlen_map_value_access = false; 2909 for (;;) { 2910 struct bpf_insn *insn; 2911 u8 class; 2912 int err; 2913 2914 if (insn_idx >= insn_cnt) { 2915 verbose("invalid insn idx %d insn_cnt %d\n", 2916 insn_idx, insn_cnt); 2917 return -EFAULT; 2918 } 2919 2920 insn = &insns[insn_idx]; 2921 class = BPF_CLASS(insn->code); 2922 2923 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 2924 verbose("BPF program is too large. Processed %d insn\n", 2925 insn_processed); 2926 return -E2BIG; 2927 } 2928 2929 err = is_state_visited(env, insn_idx); 2930 if (err < 0) 2931 return err; 2932 if (err == 1) { 2933 /* found equivalent state, can prune the search */ 2934 if (log_level) { 2935 if (do_print_state) 2936 verbose("\nfrom %d to %d: safe\n", 2937 prev_insn_idx, insn_idx); 2938 else 2939 verbose("%d: safe\n", insn_idx); 2940 } 2941 goto process_bpf_exit; 2942 } 2943 2944 if (need_resched()) 2945 cond_resched(); 2946 2947 if (log_level > 1 || (log_level && do_print_state)) { 2948 if (log_level > 1) 2949 verbose("%d:", insn_idx); 2950 else 2951 verbose("\nfrom %d to %d:", 2952 prev_insn_idx, insn_idx); 2953 print_verifier_state(&env->cur_state); 2954 do_print_state = false; 2955 } 2956 2957 if (log_level) { 2958 verbose("%d: ", insn_idx); 2959 print_bpf_insn(env, insn); 2960 } 2961 2962 err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); 2963 if (err) 2964 return err; 2965 2966 if (class == BPF_ALU || class == BPF_ALU64) { 2967 err = check_alu_op(env, insn); 2968 if (err) 2969 return err; 2970 2971 } else if (class == BPF_LDX) { 2972 enum bpf_reg_type *prev_src_type, src_reg_type; 2973 2974 /* check for reserved fields is already done */ 2975 2976 /* check src operand */ 2977 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 2978 if (err) 2979 return err; 2980 2981 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 2982 if (err) 2983 return err; 2984 2985 src_reg_type = regs[insn->src_reg].type; 2986 2987 /* check that memory (src_reg + off) is readable, 2988 * the state of dst_reg will be updated by this func 2989 */ 2990 err = check_mem_access(env, insn->src_reg, insn->off, 2991 BPF_SIZE(insn->code), BPF_READ, 2992 insn->dst_reg); 2993 if (err) 2994 return err; 2995 2996 if (BPF_SIZE(insn->code) != BPF_W && 2997 BPF_SIZE(insn->code) != BPF_DW) { 2998 insn_idx++; 2999 continue; 3000 } 3001 3002 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; 3003 3004 if (*prev_src_type == NOT_INIT) { 3005 /* saw a valid insn 3006 * dst_reg = *(u32 *)(src_reg + off) 3007 * save type to validate intersecting paths 3008 */ 3009 *prev_src_type = src_reg_type; 3010 3011 } else if (src_reg_type != *prev_src_type && 3012 (src_reg_type == PTR_TO_CTX || 3013 *prev_src_type == PTR_TO_CTX)) { 3014 /* ABuser program is trying to use the same insn 3015 * dst_reg = *(u32*) (src_reg + off) 3016 * with different pointer types: 3017 * src_reg == ctx in one branch and 3018 * src_reg == stack|map in some other branch. 3019 * Reject it. 3020 */ 3021 verbose("same insn cannot be used with different pointers\n"); 3022 return -EINVAL; 3023 } 3024 3025 } else if (class == BPF_STX) { 3026 enum bpf_reg_type *prev_dst_type, dst_reg_type; 3027 3028 if (BPF_MODE(insn->code) == BPF_XADD) { 3029 err = check_xadd(env, insn); 3030 if (err) 3031 return err; 3032 insn_idx++; 3033 continue; 3034 } 3035 3036 /* check src1 operand */ 3037 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 3038 if (err) 3039 return err; 3040 /* check src2 operand */ 3041 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 3042 if (err) 3043 return err; 3044 3045 dst_reg_type = regs[insn->dst_reg].type; 3046 3047 /* check that memory (dst_reg + off) is writeable */ 3048 err = check_mem_access(env, insn->dst_reg, insn->off, 3049 BPF_SIZE(insn->code), BPF_WRITE, 3050 insn->src_reg); 3051 if (err) 3052 return err; 3053 3054 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; 3055 3056 if (*prev_dst_type == NOT_INIT) { 3057 *prev_dst_type = dst_reg_type; 3058 } else if (dst_reg_type != *prev_dst_type && 3059 (dst_reg_type == PTR_TO_CTX || 3060 *prev_dst_type == PTR_TO_CTX)) { 3061 verbose("same insn cannot be used with different pointers\n"); 3062 return -EINVAL; 3063 } 3064 3065 } else if (class == BPF_ST) { 3066 if (BPF_MODE(insn->code) != BPF_MEM || 3067 insn->src_reg != BPF_REG_0) { 3068 verbose("BPF_ST uses reserved fields\n"); 3069 return -EINVAL; 3070 } 3071 /* check src operand */ 3072 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 3073 if (err) 3074 return err; 3075 3076 /* check that memory (dst_reg + off) is writeable */ 3077 err = check_mem_access(env, insn->dst_reg, insn->off, 3078 BPF_SIZE(insn->code), BPF_WRITE, 3079 -1); 3080 if (err) 3081 return err; 3082 3083 } else if (class == BPF_JMP) { 3084 u8 opcode = BPF_OP(insn->code); 3085 3086 if (opcode == BPF_CALL) { 3087 if (BPF_SRC(insn->code) != BPF_K || 3088 insn->off != 0 || 3089 insn->src_reg != BPF_REG_0 || 3090 insn->dst_reg != BPF_REG_0) { 3091 verbose("BPF_CALL uses reserved fields\n"); 3092 return -EINVAL; 3093 } 3094 3095 err = check_call(env, insn->imm, insn_idx); 3096 if (err) 3097 return err; 3098 3099 } else if (opcode == BPF_JA) { 3100 if (BPF_SRC(insn->code) != BPF_K || 3101 insn->imm != 0 || 3102 insn->src_reg != BPF_REG_0 || 3103 insn->dst_reg != BPF_REG_0) { 3104 verbose("BPF_JA uses reserved fields\n"); 3105 return -EINVAL; 3106 } 3107 3108 insn_idx += insn->off + 1; 3109 continue; 3110 3111 } else if (opcode == BPF_EXIT) { 3112 if (BPF_SRC(insn->code) != BPF_K || 3113 insn->imm != 0 || 3114 insn->src_reg != BPF_REG_0 || 3115 insn->dst_reg != BPF_REG_0) { 3116 verbose("BPF_EXIT uses reserved fields\n"); 3117 return -EINVAL; 3118 } 3119 3120 /* eBPF calling convetion is such that R0 is used 3121 * to return the value from eBPF program. 3122 * Make sure that it's readable at this time 3123 * of bpf_exit, which means that program wrote 3124 * something into it earlier 3125 */ 3126 err = check_reg_arg(regs, BPF_REG_0, SRC_OP); 3127 if (err) 3128 return err; 3129 3130 if (is_pointer_value(env, BPF_REG_0)) { 3131 verbose("R0 leaks addr as return value\n"); 3132 return -EACCES; 3133 } 3134 3135 process_bpf_exit: 3136 insn_idx = pop_stack(env, &prev_insn_idx); 3137 if (insn_idx < 0) { 3138 break; 3139 } else { 3140 do_print_state = true; 3141 continue; 3142 } 3143 } else { 3144 err = check_cond_jmp_op(env, insn, &insn_idx); 3145 if (err) 3146 return err; 3147 } 3148 } else if (class == BPF_LD) { 3149 u8 mode = BPF_MODE(insn->code); 3150 3151 if (mode == BPF_ABS || mode == BPF_IND) { 3152 err = check_ld_abs(env, insn); 3153 if (err) 3154 return err; 3155 3156 } else if (mode == BPF_IMM) { 3157 err = check_ld_imm(env, insn); 3158 if (err) 3159 return err; 3160 3161 insn_idx++; 3162 } else { 3163 verbose("invalid BPF_LD mode\n"); 3164 return -EINVAL; 3165 } 3166 reset_reg_range_values(regs, insn->dst_reg); 3167 } else { 3168 verbose("unknown insn class %d\n", class); 3169 return -EINVAL; 3170 } 3171 3172 insn_idx++; 3173 } 3174 3175 verbose("processed %d insns\n", insn_processed); 3176 return 0; 3177 } 3178 3179 static int check_map_prealloc(struct bpf_map *map) 3180 { 3181 return (map->map_type != BPF_MAP_TYPE_HASH && 3182 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 3183 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 3184 !(map->map_flags & BPF_F_NO_PREALLOC); 3185 } 3186 3187 static int check_map_prog_compatibility(struct bpf_map *map, 3188 struct bpf_prog *prog) 3189 3190 { 3191 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use 3192 * preallocated hash maps, since doing memory allocation 3193 * in overflow_handler can crash depending on where nmi got 3194 * triggered. 3195 */ 3196 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 3197 if (!check_map_prealloc(map)) { 3198 verbose("perf_event programs can only use preallocated hash map\n"); 3199 return -EINVAL; 3200 } 3201 if (map->inner_map_meta && 3202 !check_map_prealloc(map->inner_map_meta)) { 3203 verbose("perf_event programs can only use preallocated inner hash map\n"); 3204 return -EINVAL; 3205 } 3206 } 3207 return 0; 3208 } 3209 3210 /* look for pseudo eBPF instructions that access map FDs and 3211 * replace them with actual map pointers 3212 */ 3213 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 3214 { 3215 struct bpf_insn *insn = env->prog->insnsi; 3216 int insn_cnt = env->prog->len; 3217 int i, j, err; 3218 3219 err = bpf_prog_calc_tag(env->prog); 3220 if (err) 3221 return err; 3222 3223 for (i = 0; i < insn_cnt; i++, insn++) { 3224 if (BPF_CLASS(insn->code) == BPF_LDX && 3225 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 3226 verbose("BPF_LDX uses reserved fields\n"); 3227 return -EINVAL; 3228 } 3229 3230 if (BPF_CLASS(insn->code) == BPF_STX && 3231 ((BPF_MODE(insn->code) != BPF_MEM && 3232 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 3233 verbose("BPF_STX uses reserved fields\n"); 3234 return -EINVAL; 3235 } 3236 3237 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 3238 struct bpf_map *map; 3239 struct fd f; 3240 3241 if (i == insn_cnt - 1 || insn[1].code != 0 || 3242 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 3243 insn[1].off != 0) { 3244 verbose("invalid bpf_ld_imm64 insn\n"); 3245 return -EINVAL; 3246 } 3247 3248 if (insn->src_reg == 0) 3249 /* valid generic load 64-bit imm */ 3250 goto next_insn; 3251 3252 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 3253 verbose("unrecognized bpf_ld_imm64 insn\n"); 3254 return -EINVAL; 3255 } 3256 3257 f = fdget(insn->imm); 3258 map = __bpf_map_get(f); 3259 if (IS_ERR(map)) { 3260 verbose("fd %d is not pointing to valid bpf_map\n", 3261 insn->imm); 3262 return PTR_ERR(map); 3263 } 3264 3265 err = check_map_prog_compatibility(map, env->prog); 3266 if (err) { 3267 fdput(f); 3268 return err; 3269 } 3270 3271 /* store map pointer inside BPF_LD_IMM64 instruction */ 3272 insn[0].imm = (u32) (unsigned long) map; 3273 insn[1].imm = ((u64) (unsigned long) map) >> 32; 3274 3275 /* check whether we recorded this map already */ 3276 for (j = 0; j < env->used_map_cnt; j++) 3277 if (env->used_maps[j] == map) { 3278 fdput(f); 3279 goto next_insn; 3280 } 3281 3282 if (env->used_map_cnt >= MAX_USED_MAPS) { 3283 fdput(f); 3284 return -E2BIG; 3285 } 3286 3287 /* hold the map. If the program is rejected by verifier, 3288 * the map will be released by release_maps() or it 3289 * will be used by the valid program until it's unloaded 3290 * and all maps are released in free_bpf_prog_info() 3291 */ 3292 map = bpf_map_inc(map, false); 3293 if (IS_ERR(map)) { 3294 fdput(f); 3295 return PTR_ERR(map); 3296 } 3297 env->used_maps[env->used_map_cnt++] = map; 3298 3299 fdput(f); 3300 next_insn: 3301 insn++; 3302 i++; 3303 } 3304 } 3305 3306 /* now all pseudo BPF_LD_IMM64 instructions load valid 3307 * 'struct bpf_map *' into a register instead of user map_fd. 3308 * These pointers will be used later by verifier to validate map access. 3309 */ 3310 return 0; 3311 } 3312 3313 /* drop refcnt of maps used by the rejected program */ 3314 static void release_maps(struct bpf_verifier_env *env) 3315 { 3316 int i; 3317 3318 for (i = 0; i < env->used_map_cnt; i++) 3319 bpf_map_put(env->used_maps[i]); 3320 } 3321 3322 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 3323 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 3324 { 3325 struct bpf_insn *insn = env->prog->insnsi; 3326 int insn_cnt = env->prog->len; 3327 int i; 3328 3329 for (i = 0; i < insn_cnt; i++, insn++) 3330 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 3331 insn->src_reg = 0; 3332 } 3333 3334 /* single env->prog->insni[off] instruction was replaced with the range 3335 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 3336 * [0, off) and [off, end) to new locations, so the patched range stays zero 3337 */ 3338 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, 3339 u32 off, u32 cnt) 3340 { 3341 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 3342 3343 if (cnt == 1) 3344 return 0; 3345 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); 3346 if (!new_data) 3347 return -ENOMEM; 3348 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 3349 memcpy(new_data + off + cnt - 1, old_data + off, 3350 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 3351 env->insn_aux_data = new_data; 3352 vfree(old_data); 3353 return 0; 3354 } 3355 3356 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 3357 const struct bpf_insn *patch, u32 len) 3358 { 3359 struct bpf_prog *new_prog; 3360 3361 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 3362 if (!new_prog) 3363 return NULL; 3364 if (adjust_insn_aux_data(env, new_prog->len, off, len)) 3365 return NULL; 3366 return new_prog; 3367 } 3368 3369 /* convert load instructions that access fields of 'struct __sk_buff' 3370 * into sequence of instructions that access fields of 'struct sk_buff' 3371 */ 3372 static int convert_ctx_accesses(struct bpf_verifier_env *env) 3373 { 3374 const struct bpf_verifier_ops *ops = env->prog->aux->ops; 3375 const int insn_cnt = env->prog->len; 3376 struct bpf_insn insn_buf[16], *insn; 3377 struct bpf_prog *new_prog; 3378 enum bpf_access_type type; 3379 int i, cnt, delta = 0; 3380 3381 if (ops->gen_prologue) { 3382 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 3383 env->prog); 3384 if (cnt >= ARRAY_SIZE(insn_buf)) { 3385 verbose("bpf verifier is misconfigured\n"); 3386 return -EINVAL; 3387 } else if (cnt) { 3388 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 3389 if (!new_prog) 3390 return -ENOMEM; 3391 3392 env->prog = new_prog; 3393 delta += cnt - 1; 3394 } 3395 } 3396 3397 if (!ops->convert_ctx_access) 3398 return 0; 3399 3400 insn = env->prog->insnsi + delta; 3401 3402 for (i = 0; i < insn_cnt; i++, insn++) { 3403 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 3404 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 3405 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 3406 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 3407 type = BPF_READ; 3408 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 3409 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 3410 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 3411 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 3412 type = BPF_WRITE; 3413 else 3414 continue; 3415 3416 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) 3417 continue; 3418 3419 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog); 3420 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 3421 verbose("bpf verifier is misconfigured\n"); 3422 return -EINVAL; 3423 } 3424 3425 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 3426 if (!new_prog) 3427 return -ENOMEM; 3428 3429 delta += cnt - 1; 3430 3431 /* keep walking new program and skip insns we just inserted */ 3432 env->prog = new_prog; 3433 insn = new_prog->insnsi + i + delta; 3434 } 3435 3436 return 0; 3437 } 3438 3439 /* fixup insn->imm field of bpf_call instructions 3440 * and inline eligible helpers as explicit sequence of BPF instructions 3441 * 3442 * this function is called after eBPF program passed verification 3443 */ 3444 static int fixup_bpf_calls(struct bpf_verifier_env *env) 3445 { 3446 struct bpf_prog *prog = env->prog; 3447 struct bpf_insn *insn = prog->insnsi; 3448 const struct bpf_func_proto *fn; 3449 const int insn_cnt = prog->len; 3450 struct bpf_insn insn_buf[16]; 3451 struct bpf_prog *new_prog; 3452 struct bpf_map *map_ptr; 3453 int i, cnt, delta = 0; 3454 3455 for (i = 0; i < insn_cnt; i++, insn++) { 3456 if (insn->code != (BPF_JMP | BPF_CALL)) 3457 continue; 3458 3459 if (insn->imm == BPF_FUNC_get_route_realm) 3460 prog->dst_needed = 1; 3461 if (insn->imm == BPF_FUNC_get_prandom_u32) 3462 bpf_user_rnd_init_once(); 3463 if (insn->imm == BPF_FUNC_tail_call) { 3464 /* If we tail call into other programs, we 3465 * cannot make any assumptions since they can 3466 * be replaced dynamically during runtime in 3467 * the program array. 3468 */ 3469 prog->cb_access = 1; 3470 3471 /* mark bpf_tail_call as different opcode to avoid 3472 * conditional branch in the interpeter for every normal 3473 * call and to prevent accidental JITing by JIT compiler 3474 * that doesn't support bpf_tail_call yet 3475 */ 3476 insn->imm = 0; 3477 insn->code |= BPF_X; 3478 continue; 3479 } 3480 3481 if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) { 3482 map_ptr = env->insn_aux_data[i + delta].map_ptr; 3483 if (map_ptr == BPF_MAP_PTR_POISON || 3484 !map_ptr->ops->map_gen_lookup) 3485 goto patch_call_imm; 3486 3487 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 3488 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 3489 verbose("bpf verifier is misconfigured\n"); 3490 return -EINVAL; 3491 } 3492 3493 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 3494 cnt); 3495 if (!new_prog) 3496 return -ENOMEM; 3497 3498 delta += cnt - 1; 3499 3500 /* keep walking new program and skip insns we just inserted */ 3501 env->prog = prog = new_prog; 3502 insn = new_prog->insnsi + i + delta; 3503 continue; 3504 } 3505 3506 patch_call_imm: 3507 fn = prog->aux->ops->get_func_proto(insn->imm); 3508 /* all functions that have prototype and verifier allowed 3509 * programs to call them, must be real in-kernel functions 3510 */ 3511 if (!fn->func) { 3512 verbose("kernel subsystem misconfigured func %s#%d\n", 3513 func_id_name(insn->imm), insn->imm); 3514 return -EFAULT; 3515 } 3516 insn->imm = fn->func - __bpf_call_base; 3517 } 3518 3519 return 0; 3520 } 3521 3522 static void free_states(struct bpf_verifier_env *env) 3523 { 3524 struct bpf_verifier_state_list *sl, *sln; 3525 int i; 3526 3527 if (!env->explored_states) 3528 return; 3529 3530 for (i = 0; i < env->prog->len; i++) { 3531 sl = env->explored_states[i]; 3532 3533 if (sl) 3534 while (sl != STATE_LIST_MARK) { 3535 sln = sl->next; 3536 kfree(sl); 3537 sl = sln; 3538 } 3539 } 3540 3541 kfree(env->explored_states); 3542 } 3543 3544 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 3545 { 3546 char __user *log_ubuf = NULL; 3547 struct bpf_verifier_env *env; 3548 int ret = -EINVAL; 3549 3550 /* 'struct bpf_verifier_env' can be global, but since it's not small, 3551 * allocate/free it every time bpf_check() is called 3552 */ 3553 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 3554 if (!env) 3555 return -ENOMEM; 3556 3557 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 3558 (*prog)->len); 3559 ret = -ENOMEM; 3560 if (!env->insn_aux_data) 3561 goto err_free_env; 3562 env->prog = *prog; 3563 3564 /* grab the mutex to protect few globals used by verifier */ 3565 mutex_lock(&bpf_verifier_lock); 3566 3567 if (attr->log_level || attr->log_buf || attr->log_size) { 3568 /* user requested verbose verifier output 3569 * and supplied buffer to store the verification trace 3570 */ 3571 log_level = attr->log_level; 3572 log_ubuf = (char __user *) (unsigned long) attr->log_buf; 3573 log_size = attr->log_size; 3574 log_len = 0; 3575 3576 ret = -EINVAL; 3577 /* log_* values have to be sane */ 3578 if (log_size < 128 || log_size > UINT_MAX >> 8 || 3579 log_level == 0 || log_ubuf == NULL) 3580 goto err_unlock; 3581 3582 ret = -ENOMEM; 3583 log_buf = vmalloc(log_size); 3584 if (!log_buf) 3585 goto err_unlock; 3586 } else { 3587 log_level = 0; 3588 } 3589 3590 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 3591 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 3592 env->strict_alignment = true; 3593 3594 ret = replace_map_fd_with_map_ptr(env); 3595 if (ret < 0) 3596 goto skip_full_check; 3597 3598 env->explored_states = kcalloc(env->prog->len, 3599 sizeof(struct bpf_verifier_state_list *), 3600 GFP_USER); 3601 ret = -ENOMEM; 3602 if (!env->explored_states) 3603 goto skip_full_check; 3604 3605 ret = check_cfg(env); 3606 if (ret < 0) 3607 goto skip_full_check; 3608 3609 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 3610 3611 ret = do_check(env); 3612 3613 skip_full_check: 3614 while (pop_stack(env, NULL) >= 0); 3615 free_states(env); 3616 3617 if (ret == 0) 3618 /* program is valid, convert *(u32*)(ctx + off) accesses */ 3619 ret = convert_ctx_accesses(env); 3620 3621 if (ret == 0) 3622 ret = fixup_bpf_calls(env); 3623 3624 if (log_level && log_len >= log_size - 1) { 3625 BUG_ON(log_len >= log_size); 3626 /* verifier log exceeded user supplied buffer */ 3627 ret = -ENOSPC; 3628 /* fall through to return what was recorded */ 3629 } 3630 3631 /* copy verifier log back to user space including trailing zero */ 3632 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { 3633 ret = -EFAULT; 3634 goto free_log_buf; 3635 } 3636 3637 if (ret == 0 && env->used_map_cnt) { 3638 /* if program passed verifier, update used_maps in bpf_prog_info */ 3639 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 3640 sizeof(env->used_maps[0]), 3641 GFP_KERNEL); 3642 3643 if (!env->prog->aux->used_maps) { 3644 ret = -ENOMEM; 3645 goto free_log_buf; 3646 } 3647 3648 memcpy(env->prog->aux->used_maps, env->used_maps, 3649 sizeof(env->used_maps[0]) * env->used_map_cnt); 3650 env->prog->aux->used_map_cnt = env->used_map_cnt; 3651 3652 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 3653 * bpf_ld_imm64 instructions 3654 */ 3655 convert_pseudo_ld_imm64(env); 3656 } 3657 3658 free_log_buf: 3659 if (log_level) 3660 vfree(log_buf); 3661 if (!env->prog->aux->used_maps) 3662 /* if we didn't copy map pointers into bpf_prog_info, release 3663 * them now. Otherwise free_bpf_prog_info() will release them. 3664 */ 3665 release_maps(env); 3666 *prog = env->prog; 3667 err_unlock: 3668 mutex_unlock(&bpf_verifier_lock); 3669 vfree(env->insn_aux_data); 3670 err_free_env: 3671 kfree(env); 3672 return ret; 3673 } 3674 3675 int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, 3676 void *priv) 3677 { 3678 struct bpf_verifier_env *env; 3679 int ret; 3680 3681 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 3682 if (!env) 3683 return -ENOMEM; 3684 3685 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 3686 prog->len); 3687 ret = -ENOMEM; 3688 if (!env->insn_aux_data) 3689 goto err_free_env; 3690 env->prog = prog; 3691 env->analyzer_ops = ops; 3692 env->analyzer_priv = priv; 3693 3694 /* grab the mutex to protect few globals used by verifier */ 3695 mutex_lock(&bpf_verifier_lock); 3696 3697 log_level = 0; 3698 3699 env->strict_alignment = false; 3700 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 3701 env->strict_alignment = true; 3702 3703 env->explored_states = kcalloc(env->prog->len, 3704 sizeof(struct bpf_verifier_state_list *), 3705 GFP_KERNEL); 3706 ret = -ENOMEM; 3707 if (!env->explored_states) 3708 goto skip_full_check; 3709 3710 ret = check_cfg(env); 3711 if (ret < 0) 3712 goto skip_full_check; 3713 3714 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 3715 3716 ret = do_check(env); 3717 3718 skip_full_check: 3719 while (pop_stack(env, NULL) >= 0); 3720 free_states(env); 3721 3722 mutex_unlock(&bpf_verifier_lock); 3723 vfree(env->insn_aux_data); 3724 err_free_env: 3725 kfree(env); 3726 return ret; 3727 } 3728 EXPORT_SYMBOL_GPL(bpf_analyzer); 3729