1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, but 8 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * General Public License for more details. 11 */ 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/slab.h> 15 #include <linux/bpf.h> 16 #include <linux/filter.h> 17 #include <net/netlink.h> 18 #include <linux/file.h> 19 #include <linux/vmalloc.h> 20 21 /* bpf_check() is a static code analyzer that walks eBPF program 22 * instruction by instruction and updates register/stack state. 23 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 24 * 25 * The first pass is depth-first-search to check that the program is a DAG. 26 * It rejects the following programs: 27 * - larger than BPF_MAXINSNS insns 28 * - if loop is present (detected via back-edge) 29 * - unreachable insns exist (shouldn't be a forest. program = one function) 30 * - out of bounds or malformed jumps 31 * The second pass is all possible path descent from the 1st insn. 32 * Since it's analyzing all pathes through the program, the length of the 33 * analysis is limited to 32k insn, which may be hit even if total number of 34 * insn is less then 4K, but there are too many branches that change stack/regs. 35 * Number of 'branches to be analyzed' is limited to 1k 36 * 37 * On entry to each instruction, each register has a type, and the instruction 38 * changes the types of the registers depending on instruction semantics. 39 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 40 * copied to R1. 41 * 42 * All registers are 64-bit. 43 * R0 - return register 44 * R1-R5 argument passing registers 45 * R6-R9 callee saved registers 46 * R10 - frame pointer read-only 47 * 48 * At the start of BPF program the register R1 contains a pointer to bpf_context 49 * and has type PTR_TO_CTX. 50 * 51 * Verifier tracks arithmetic operations on pointers in case: 52 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 53 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 54 * 1st insn copies R10 (which has FRAME_PTR) type into R1 55 * and 2nd arithmetic instruction is pattern matched to recognize 56 * that it wants to construct a pointer to some element within stack. 57 * So after 2nd insn, the register R1 has type PTR_TO_STACK 58 * (and -20 constant is saved for further stack bounds checking). 59 * Meaning that this reg is a pointer to stack plus known immediate constant. 60 * 61 * Most of the time the registers have UNKNOWN_VALUE type, which 62 * means the register has some value, but it's not a valid pointer. 63 * (like pointer plus pointer becomes UNKNOWN_VALUE type) 64 * 65 * When verifier sees load or store instructions the type of base register 66 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer 67 * types recognized by check_mem_access() function. 68 * 69 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 70 * and the range of [ptr, ptr + map's value_size) is accessible. 71 * 72 * registers used to pass values to function calls are checked against 73 * function argument constraints. 74 * 75 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 76 * It means that the register type passed to this function must be 77 * PTR_TO_STACK and it will be used inside the function as 78 * 'pointer to map element key' 79 * 80 * For example the argument constraints for bpf_map_lookup_elem(): 81 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 82 * .arg1_type = ARG_CONST_MAP_PTR, 83 * .arg2_type = ARG_PTR_TO_MAP_KEY, 84 * 85 * ret_type says that this function returns 'pointer to map elem value or null' 86 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 87 * 2nd argument should be a pointer to stack, which will be used inside 88 * the helper function as a pointer to map element key. 89 * 90 * On the kernel side the helper function looks like: 91 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 92 * { 93 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 94 * void *key = (void *) (unsigned long) r2; 95 * void *value; 96 * 97 * here kernel can access 'key' and 'map' pointers safely, knowing that 98 * [key, key + map->key_size) bytes are valid and were initialized on 99 * the stack of eBPF program. 100 * } 101 * 102 * Corresponding eBPF program may look like: 103 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 104 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 105 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 106 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 107 * here verifier looks at prototype of map_lookup_elem() and sees: 108 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 109 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 110 * 111 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 112 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 113 * and were initialized prior to this call. 114 * If it's ok, then verifier allows this BPF_CALL insn and looks at 115 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 116 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 117 * returns ether pointer to map value or NULL. 118 * 119 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 120 * insn, the register holding that pointer in the true branch changes state to 121 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 122 * branch. See check_cond_jmp_op(). 123 * 124 * After the call R0 is set to return type of the function and registers R1-R5 125 * are set to NOT_INIT to indicate that they are no longer readable. 126 */ 127 128 /* types of values stored in eBPF registers */ 129 enum bpf_reg_type { 130 NOT_INIT = 0, /* nothing was written into register */ 131 UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */ 132 PTR_TO_CTX, /* reg points to bpf_context */ 133 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 134 PTR_TO_MAP_VALUE, /* reg points to map element value */ 135 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ 136 FRAME_PTR, /* reg == frame_pointer */ 137 PTR_TO_STACK, /* reg == frame_pointer + imm */ 138 CONST_IMM, /* constant integer value */ 139 }; 140 141 struct reg_state { 142 enum bpf_reg_type type; 143 union { 144 /* valid when type == CONST_IMM | PTR_TO_STACK */ 145 int imm; 146 147 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 148 * PTR_TO_MAP_VALUE_OR_NULL 149 */ 150 struct bpf_map *map_ptr; 151 }; 152 }; 153 154 enum bpf_stack_slot_type { 155 STACK_INVALID, /* nothing was stored in this stack slot */ 156 STACK_SPILL, /* register spilled into stack */ 157 STACK_MISC /* BPF program wrote some data into this slot */ 158 }; 159 160 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 161 162 /* state of the program: 163 * type of all registers and stack info 164 */ 165 struct verifier_state { 166 struct reg_state regs[MAX_BPF_REG]; 167 u8 stack_slot_type[MAX_BPF_STACK]; 168 struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; 169 }; 170 171 /* linked list of verifier states used to prune search */ 172 struct verifier_state_list { 173 struct verifier_state state; 174 struct verifier_state_list *next; 175 }; 176 177 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 178 struct verifier_stack_elem { 179 /* verifer state is 'st' 180 * before processing instruction 'insn_idx' 181 * and after processing instruction 'prev_insn_idx' 182 */ 183 struct verifier_state st; 184 int insn_idx; 185 int prev_insn_idx; 186 struct verifier_stack_elem *next; 187 }; 188 189 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 190 191 /* single container for all structs 192 * one verifier_env per bpf_check() call 193 */ 194 struct verifier_env { 195 struct bpf_prog *prog; /* eBPF program being verified */ 196 struct verifier_stack_elem *head; /* stack of verifier states to be processed */ 197 int stack_size; /* number of states to be processed */ 198 struct verifier_state cur_state; /* current verifier state */ 199 struct verifier_state_list **explored_states; /* search pruning optimization */ 200 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 201 u32 used_map_cnt; /* number of used maps */ 202 bool allow_ptr_leaks; 203 }; 204 205 /* verbose verifier prints what it's seeing 206 * bpf_check() is called under lock, so no race to access these global vars 207 */ 208 static u32 log_level, log_size, log_len; 209 static char *log_buf; 210 211 static DEFINE_MUTEX(bpf_verifier_lock); 212 213 /* log_level controls verbosity level of eBPF verifier. 214 * verbose() is used to dump the verification trace to the log, so the user 215 * can figure out what's wrong with the program 216 */ 217 static __printf(1, 2) void verbose(const char *fmt, ...) 218 { 219 va_list args; 220 221 if (log_level == 0 || log_len >= log_size - 1) 222 return; 223 224 va_start(args, fmt); 225 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); 226 va_end(args); 227 } 228 229 /* string representation of 'enum bpf_reg_type' */ 230 static const char * const reg_type_str[] = { 231 [NOT_INIT] = "?", 232 [UNKNOWN_VALUE] = "inv", 233 [PTR_TO_CTX] = "ctx", 234 [CONST_PTR_TO_MAP] = "map_ptr", 235 [PTR_TO_MAP_VALUE] = "map_value", 236 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 237 [FRAME_PTR] = "fp", 238 [PTR_TO_STACK] = "fp", 239 [CONST_IMM] = "imm", 240 }; 241 242 static const struct { 243 int map_type; 244 int func_id; 245 } func_limit[] = { 246 {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call}, 247 {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read}, 248 {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output}, 249 }; 250 251 static void print_verifier_state(struct verifier_env *env) 252 { 253 enum bpf_reg_type t; 254 int i; 255 256 for (i = 0; i < MAX_BPF_REG; i++) { 257 t = env->cur_state.regs[i].type; 258 if (t == NOT_INIT) 259 continue; 260 verbose(" R%d=%s", i, reg_type_str[t]); 261 if (t == CONST_IMM || t == PTR_TO_STACK) 262 verbose("%d", env->cur_state.regs[i].imm); 263 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || 264 t == PTR_TO_MAP_VALUE_OR_NULL) 265 verbose("(ks=%d,vs=%d)", 266 env->cur_state.regs[i].map_ptr->key_size, 267 env->cur_state.regs[i].map_ptr->value_size); 268 } 269 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 270 if (env->cur_state.stack_slot_type[i] == STACK_SPILL) 271 verbose(" fp%d=%s", -MAX_BPF_STACK + i, 272 reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]); 273 } 274 verbose("\n"); 275 } 276 277 static const char *const bpf_class_string[] = { 278 [BPF_LD] = "ld", 279 [BPF_LDX] = "ldx", 280 [BPF_ST] = "st", 281 [BPF_STX] = "stx", 282 [BPF_ALU] = "alu", 283 [BPF_JMP] = "jmp", 284 [BPF_RET] = "BUG", 285 [BPF_ALU64] = "alu64", 286 }; 287 288 static const char *const bpf_alu_string[16] = { 289 [BPF_ADD >> 4] = "+=", 290 [BPF_SUB >> 4] = "-=", 291 [BPF_MUL >> 4] = "*=", 292 [BPF_DIV >> 4] = "/=", 293 [BPF_OR >> 4] = "|=", 294 [BPF_AND >> 4] = "&=", 295 [BPF_LSH >> 4] = "<<=", 296 [BPF_RSH >> 4] = ">>=", 297 [BPF_NEG >> 4] = "neg", 298 [BPF_MOD >> 4] = "%=", 299 [BPF_XOR >> 4] = "^=", 300 [BPF_MOV >> 4] = "=", 301 [BPF_ARSH >> 4] = "s>>=", 302 [BPF_END >> 4] = "endian", 303 }; 304 305 static const char *const bpf_ldst_string[] = { 306 [BPF_W >> 3] = "u32", 307 [BPF_H >> 3] = "u16", 308 [BPF_B >> 3] = "u8", 309 [BPF_DW >> 3] = "u64", 310 }; 311 312 static const char *const bpf_jmp_string[16] = { 313 [BPF_JA >> 4] = "jmp", 314 [BPF_JEQ >> 4] = "==", 315 [BPF_JGT >> 4] = ">", 316 [BPF_JGE >> 4] = ">=", 317 [BPF_JSET >> 4] = "&", 318 [BPF_JNE >> 4] = "!=", 319 [BPF_JSGT >> 4] = "s>", 320 [BPF_JSGE >> 4] = "s>=", 321 [BPF_CALL >> 4] = "call", 322 [BPF_EXIT >> 4] = "exit", 323 }; 324 325 static void print_bpf_insn(struct bpf_insn *insn) 326 { 327 u8 class = BPF_CLASS(insn->code); 328 329 if (class == BPF_ALU || class == BPF_ALU64) { 330 if (BPF_SRC(insn->code) == BPF_X) 331 verbose("(%02x) %sr%d %s %sr%d\n", 332 insn->code, class == BPF_ALU ? "(u32) " : "", 333 insn->dst_reg, 334 bpf_alu_string[BPF_OP(insn->code) >> 4], 335 class == BPF_ALU ? "(u32) " : "", 336 insn->src_reg); 337 else 338 verbose("(%02x) %sr%d %s %s%d\n", 339 insn->code, class == BPF_ALU ? "(u32) " : "", 340 insn->dst_reg, 341 bpf_alu_string[BPF_OP(insn->code) >> 4], 342 class == BPF_ALU ? "(u32) " : "", 343 insn->imm); 344 } else if (class == BPF_STX) { 345 if (BPF_MODE(insn->code) == BPF_MEM) 346 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", 347 insn->code, 348 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 349 insn->dst_reg, 350 insn->off, insn->src_reg); 351 else if (BPF_MODE(insn->code) == BPF_XADD) 352 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", 353 insn->code, 354 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 355 insn->dst_reg, insn->off, 356 insn->src_reg); 357 else 358 verbose("BUG_%02x\n", insn->code); 359 } else if (class == BPF_ST) { 360 if (BPF_MODE(insn->code) != BPF_MEM) { 361 verbose("BUG_st_%02x\n", insn->code); 362 return; 363 } 364 verbose("(%02x) *(%s *)(r%d %+d) = %d\n", 365 insn->code, 366 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 367 insn->dst_reg, 368 insn->off, insn->imm); 369 } else if (class == BPF_LDX) { 370 if (BPF_MODE(insn->code) != BPF_MEM) { 371 verbose("BUG_ldx_%02x\n", insn->code); 372 return; 373 } 374 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", 375 insn->code, insn->dst_reg, 376 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 377 insn->src_reg, insn->off); 378 } else if (class == BPF_LD) { 379 if (BPF_MODE(insn->code) == BPF_ABS) { 380 verbose("(%02x) r0 = *(%s *)skb[%d]\n", 381 insn->code, 382 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 383 insn->imm); 384 } else if (BPF_MODE(insn->code) == BPF_IND) { 385 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", 386 insn->code, 387 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 388 insn->src_reg, insn->imm); 389 } else if (BPF_MODE(insn->code) == BPF_IMM) { 390 verbose("(%02x) r%d = 0x%x\n", 391 insn->code, insn->dst_reg, insn->imm); 392 } else { 393 verbose("BUG_ld_%02x\n", insn->code); 394 return; 395 } 396 } else if (class == BPF_JMP) { 397 u8 opcode = BPF_OP(insn->code); 398 399 if (opcode == BPF_CALL) { 400 verbose("(%02x) call %d\n", insn->code, insn->imm); 401 } else if (insn->code == (BPF_JMP | BPF_JA)) { 402 verbose("(%02x) goto pc%+d\n", 403 insn->code, insn->off); 404 } else if (insn->code == (BPF_JMP | BPF_EXIT)) { 405 verbose("(%02x) exit\n", insn->code); 406 } else if (BPF_SRC(insn->code) == BPF_X) { 407 verbose("(%02x) if r%d %s r%d goto pc%+d\n", 408 insn->code, insn->dst_reg, 409 bpf_jmp_string[BPF_OP(insn->code) >> 4], 410 insn->src_reg, insn->off); 411 } else { 412 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", 413 insn->code, insn->dst_reg, 414 bpf_jmp_string[BPF_OP(insn->code) >> 4], 415 insn->imm, insn->off); 416 } 417 } else { 418 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); 419 } 420 } 421 422 static int pop_stack(struct verifier_env *env, int *prev_insn_idx) 423 { 424 struct verifier_stack_elem *elem; 425 int insn_idx; 426 427 if (env->head == NULL) 428 return -1; 429 430 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); 431 insn_idx = env->head->insn_idx; 432 if (prev_insn_idx) 433 *prev_insn_idx = env->head->prev_insn_idx; 434 elem = env->head->next; 435 kfree(env->head); 436 env->head = elem; 437 env->stack_size--; 438 return insn_idx; 439 } 440 441 static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx, 442 int prev_insn_idx) 443 { 444 struct verifier_stack_elem *elem; 445 446 elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL); 447 if (!elem) 448 goto err; 449 450 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); 451 elem->insn_idx = insn_idx; 452 elem->prev_insn_idx = prev_insn_idx; 453 elem->next = env->head; 454 env->head = elem; 455 env->stack_size++; 456 if (env->stack_size > 1024) { 457 verbose("BPF program is too complex\n"); 458 goto err; 459 } 460 return &elem->st; 461 err: 462 /* pop all elements and return */ 463 while (pop_stack(env, NULL) >= 0); 464 return NULL; 465 } 466 467 #define CALLER_SAVED_REGS 6 468 static const int caller_saved[CALLER_SAVED_REGS] = { 469 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 470 }; 471 472 static void init_reg_state(struct reg_state *regs) 473 { 474 int i; 475 476 for (i = 0; i < MAX_BPF_REG; i++) { 477 regs[i].type = NOT_INIT; 478 regs[i].imm = 0; 479 regs[i].map_ptr = NULL; 480 } 481 482 /* frame pointer */ 483 regs[BPF_REG_FP].type = FRAME_PTR; 484 485 /* 1st arg to a function */ 486 regs[BPF_REG_1].type = PTR_TO_CTX; 487 } 488 489 static void mark_reg_unknown_value(struct reg_state *regs, u32 regno) 490 { 491 BUG_ON(regno >= MAX_BPF_REG); 492 regs[regno].type = UNKNOWN_VALUE; 493 regs[regno].imm = 0; 494 regs[regno].map_ptr = NULL; 495 } 496 497 enum reg_arg_type { 498 SRC_OP, /* register is used as source operand */ 499 DST_OP, /* register is used as destination operand */ 500 DST_OP_NO_MARK /* same as above, check only, don't mark */ 501 }; 502 503 static int check_reg_arg(struct reg_state *regs, u32 regno, 504 enum reg_arg_type t) 505 { 506 if (regno >= MAX_BPF_REG) { 507 verbose("R%d is invalid\n", regno); 508 return -EINVAL; 509 } 510 511 if (t == SRC_OP) { 512 /* check whether register used as source operand can be read */ 513 if (regs[regno].type == NOT_INIT) { 514 verbose("R%d !read_ok\n", regno); 515 return -EACCES; 516 } 517 } else { 518 /* check whether register used as dest operand can be written to */ 519 if (regno == BPF_REG_FP) { 520 verbose("frame pointer is read only\n"); 521 return -EACCES; 522 } 523 if (t == DST_OP) 524 mark_reg_unknown_value(regs, regno); 525 } 526 return 0; 527 } 528 529 static int bpf_size_to_bytes(int bpf_size) 530 { 531 if (bpf_size == BPF_W) 532 return 4; 533 else if (bpf_size == BPF_H) 534 return 2; 535 else if (bpf_size == BPF_B) 536 return 1; 537 else if (bpf_size == BPF_DW) 538 return 8; 539 else 540 return -EINVAL; 541 } 542 543 static bool is_spillable_regtype(enum bpf_reg_type type) 544 { 545 switch (type) { 546 case PTR_TO_MAP_VALUE: 547 case PTR_TO_MAP_VALUE_OR_NULL: 548 case PTR_TO_STACK: 549 case PTR_TO_CTX: 550 case FRAME_PTR: 551 case CONST_PTR_TO_MAP: 552 return true; 553 default: 554 return false; 555 } 556 } 557 558 /* check_stack_read/write functions track spill/fill of registers, 559 * stack boundary and alignment are checked in check_mem_access() 560 */ 561 static int check_stack_write(struct verifier_state *state, int off, int size, 562 int value_regno) 563 { 564 int i; 565 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 566 * so it's aligned access and [off, off + size) are within stack limits 567 */ 568 569 if (value_regno >= 0 && 570 is_spillable_regtype(state->regs[value_regno].type)) { 571 572 /* register containing pointer is being spilled into stack */ 573 if (size != BPF_REG_SIZE) { 574 verbose("invalid size of register spill\n"); 575 return -EACCES; 576 } 577 578 /* save register state */ 579 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 580 state->regs[value_regno]; 581 582 for (i = 0; i < BPF_REG_SIZE; i++) 583 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; 584 } else { 585 /* regular write of data into stack */ 586 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = 587 (struct reg_state) {}; 588 589 for (i = 0; i < size; i++) 590 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; 591 } 592 return 0; 593 } 594 595 static int check_stack_read(struct verifier_state *state, int off, int size, 596 int value_regno) 597 { 598 u8 *slot_type; 599 int i; 600 601 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; 602 603 if (slot_type[0] == STACK_SPILL) { 604 if (size != BPF_REG_SIZE) { 605 verbose("invalid size of register spill\n"); 606 return -EACCES; 607 } 608 for (i = 1; i < BPF_REG_SIZE; i++) { 609 if (slot_type[i] != STACK_SPILL) { 610 verbose("corrupted spill memory\n"); 611 return -EACCES; 612 } 613 } 614 615 if (value_regno >= 0) 616 /* restore register state from stack */ 617 state->regs[value_regno] = 618 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; 619 return 0; 620 } else { 621 for (i = 0; i < size; i++) { 622 if (slot_type[i] != STACK_MISC) { 623 verbose("invalid read from stack off %d+%d size %d\n", 624 off, i, size); 625 return -EACCES; 626 } 627 } 628 if (value_regno >= 0) 629 /* have read misc data from the stack */ 630 mark_reg_unknown_value(state->regs, value_regno); 631 return 0; 632 } 633 } 634 635 /* check read/write into map element returned by bpf_map_lookup_elem() */ 636 static int check_map_access(struct verifier_env *env, u32 regno, int off, 637 int size) 638 { 639 struct bpf_map *map = env->cur_state.regs[regno].map_ptr; 640 641 if (off < 0 || off + size > map->value_size) { 642 verbose("invalid access to map value, value_size=%d off=%d size=%d\n", 643 map->value_size, off, size); 644 return -EACCES; 645 } 646 return 0; 647 } 648 649 /* check access to 'struct bpf_context' fields */ 650 static int check_ctx_access(struct verifier_env *env, int off, int size, 651 enum bpf_access_type t) 652 { 653 if (env->prog->aux->ops->is_valid_access && 654 env->prog->aux->ops->is_valid_access(off, size, t)) 655 return 0; 656 657 verbose("invalid bpf_context access off=%d size=%d\n", off, size); 658 return -EACCES; 659 } 660 661 static bool is_pointer_value(struct verifier_env *env, int regno) 662 { 663 if (env->allow_ptr_leaks) 664 return false; 665 666 switch (env->cur_state.regs[regno].type) { 667 case UNKNOWN_VALUE: 668 case CONST_IMM: 669 return false; 670 default: 671 return true; 672 } 673 } 674 675 /* check whether memory at (regno + off) is accessible for t = (read | write) 676 * if t==write, value_regno is a register which value is stored into memory 677 * if t==read, value_regno is a register which will receive the value from memory 678 * if t==write && value_regno==-1, some unknown value is stored into memory 679 * if t==read && value_regno==-1, don't care what we read from memory 680 */ 681 static int check_mem_access(struct verifier_env *env, u32 regno, int off, 682 int bpf_size, enum bpf_access_type t, 683 int value_regno) 684 { 685 struct verifier_state *state = &env->cur_state; 686 int size, err = 0; 687 688 if (state->regs[regno].type == PTR_TO_STACK) 689 off += state->regs[regno].imm; 690 691 size = bpf_size_to_bytes(bpf_size); 692 if (size < 0) 693 return size; 694 695 if (off % size != 0) { 696 verbose("misaligned access off %d size %d\n", off, size); 697 return -EACCES; 698 } 699 700 if (state->regs[regno].type == PTR_TO_MAP_VALUE) { 701 if (t == BPF_WRITE && value_regno >= 0 && 702 is_pointer_value(env, value_regno)) { 703 verbose("R%d leaks addr into map\n", value_regno); 704 return -EACCES; 705 } 706 err = check_map_access(env, regno, off, size); 707 if (!err && t == BPF_READ && value_regno >= 0) 708 mark_reg_unknown_value(state->regs, value_regno); 709 710 } else if (state->regs[regno].type == PTR_TO_CTX) { 711 if (t == BPF_WRITE && value_regno >= 0 && 712 is_pointer_value(env, value_regno)) { 713 verbose("R%d leaks addr into ctx\n", value_regno); 714 return -EACCES; 715 } 716 err = check_ctx_access(env, off, size, t); 717 if (!err && t == BPF_READ && value_regno >= 0) 718 mark_reg_unknown_value(state->regs, value_regno); 719 720 } else if (state->regs[regno].type == FRAME_PTR || 721 state->regs[regno].type == PTR_TO_STACK) { 722 if (off >= 0 || off < -MAX_BPF_STACK) { 723 verbose("invalid stack off=%d size=%d\n", off, size); 724 return -EACCES; 725 } 726 if (t == BPF_WRITE) { 727 if (!env->allow_ptr_leaks && 728 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && 729 size != BPF_REG_SIZE) { 730 verbose("attempt to corrupt spilled pointer on stack\n"); 731 return -EACCES; 732 } 733 err = check_stack_write(state, off, size, value_regno); 734 } else { 735 err = check_stack_read(state, off, size, value_regno); 736 } 737 } else { 738 verbose("R%d invalid mem access '%s'\n", 739 regno, reg_type_str[state->regs[regno].type]); 740 return -EACCES; 741 } 742 return err; 743 } 744 745 static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) 746 { 747 struct reg_state *regs = env->cur_state.regs; 748 int err; 749 750 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 751 insn->imm != 0) { 752 verbose("BPF_XADD uses reserved fields\n"); 753 return -EINVAL; 754 } 755 756 /* check src1 operand */ 757 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 758 if (err) 759 return err; 760 761 /* check src2 operand */ 762 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 763 if (err) 764 return err; 765 766 /* check whether atomic_add can read the memory */ 767 err = check_mem_access(env, insn->dst_reg, insn->off, 768 BPF_SIZE(insn->code), BPF_READ, -1); 769 if (err) 770 return err; 771 772 /* check whether atomic_add can write into the same memory */ 773 return check_mem_access(env, insn->dst_reg, insn->off, 774 BPF_SIZE(insn->code), BPF_WRITE, -1); 775 } 776 777 /* when register 'regno' is passed into function that will read 'access_size' 778 * bytes from that pointer, make sure that it's within stack boundary 779 * and all elements of stack are initialized 780 */ 781 static int check_stack_boundary(struct verifier_env *env, 782 int regno, int access_size) 783 { 784 struct verifier_state *state = &env->cur_state; 785 struct reg_state *regs = state->regs; 786 int off, i; 787 788 if (regs[regno].type != PTR_TO_STACK) 789 return -EACCES; 790 791 off = regs[regno].imm; 792 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 793 access_size <= 0) { 794 verbose("invalid stack type R%d off=%d access_size=%d\n", 795 regno, off, access_size); 796 return -EACCES; 797 } 798 799 for (i = 0; i < access_size; i++) { 800 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { 801 verbose("invalid indirect read from stack off %d+%d size %d\n", 802 off, i, access_size); 803 return -EACCES; 804 } 805 } 806 return 0; 807 } 808 809 static int check_func_arg(struct verifier_env *env, u32 regno, 810 enum bpf_arg_type arg_type, struct bpf_map **mapp) 811 { 812 struct reg_state *reg = env->cur_state.regs + regno; 813 enum bpf_reg_type expected_type; 814 int err = 0; 815 816 if (arg_type == ARG_DONTCARE) 817 return 0; 818 819 if (reg->type == NOT_INIT) { 820 verbose("R%d !read_ok\n", regno); 821 return -EACCES; 822 } 823 824 if (arg_type == ARG_ANYTHING) { 825 if (is_pointer_value(env, regno)) { 826 verbose("R%d leaks addr into helper function\n", regno); 827 return -EACCES; 828 } 829 return 0; 830 } 831 832 if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY || 833 arg_type == ARG_PTR_TO_MAP_VALUE) { 834 expected_type = PTR_TO_STACK; 835 } else if (arg_type == ARG_CONST_STACK_SIZE) { 836 expected_type = CONST_IMM; 837 } else if (arg_type == ARG_CONST_MAP_PTR) { 838 expected_type = CONST_PTR_TO_MAP; 839 } else if (arg_type == ARG_PTR_TO_CTX) { 840 expected_type = PTR_TO_CTX; 841 } else { 842 verbose("unsupported arg_type %d\n", arg_type); 843 return -EFAULT; 844 } 845 846 if (reg->type != expected_type) { 847 verbose("R%d type=%s expected=%s\n", regno, 848 reg_type_str[reg->type], reg_type_str[expected_type]); 849 return -EACCES; 850 } 851 852 if (arg_type == ARG_CONST_MAP_PTR) { 853 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 854 *mapp = reg->map_ptr; 855 856 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 857 /* bpf_map_xxx(..., map_ptr, ..., key) call: 858 * check that [key, key + map->key_size) are within 859 * stack limits and initialized 860 */ 861 if (!*mapp) { 862 /* in function declaration map_ptr must come before 863 * map_key, so that it's verified and known before 864 * we have to check map_key here. Otherwise it means 865 * that kernel subsystem misconfigured verifier 866 */ 867 verbose("invalid map_ptr to access map->key\n"); 868 return -EACCES; 869 } 870 err = check_stack_boundary(env, regno, (*mapp)->key_size); 871 872 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 873 /* bpf_map_xxx(..., map_ptr, ..., value) call: 874 * check [value, value + map->value_size) validity 875 */ 876 if (!*mapp) { 877 /* kernel subsystem misconfigured verifier */ 878 verbose("invalid map_ptr to access map->value\n"); 879 return -EACCES; 880 } 881 err = check_stack_boundary(env, regno, (*mapp)->value_size); 882 883 } else if (arg_type == ARG_CONST_STACK_SIZE) { 884 /* bpf_xxx(..., buf, len) call will access 'len' bytes 885 * from stack pointer 'buf'. Check it 886 * note: regno == len, regno - 1 == buf 887 */ 888 if (regno == 0) { 889 /* kernel subsystem misconfigured verifier */ 890 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); 891 return -EACCES; 892 } 893 err = check_stack_boundary(env, regno - 1, reg->imm); 894 } 895 896 return err; 897 } 898 899 static int check_map_func_compatibility(struct bpf_map *map, int func_id) 900 { 901 bool bool_map, bool_func; 902 int i; 903 904 if (!map) 905 return 0; 906 907 for (i = 0; i < ARRAY_SIZE(func_limit); i++) { 908 bool_map = (map->map_type == func_limit[i].map_type); 909 bool_func = (func_id == func_limit[i].func_id); 910 /* only when map & func pair match it can continue. 911 * don't allow any other map type to be passed into 912 * the special func; 913 */ 914 if (bool_func && bool_map != bool_func) 915 return -EINVAL; 916 } 917 918 return 0; 919 } 920 921 static int check_call(struct verifier_env *env, int func_id) 922 { 923 struct verifier_state *state = &env->cur_state; 924 const struct bpf_func_proto *fn = NULL; 925 struct reg_state *regs = state->regs; 926 struct bpf_map *map = NULL; 927 struct reg_state *reg; 928 int i, err; 929 930 /* find function prototype */ 931 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 932 verbose("invalid func %d\n", func_id); 933 return -EINVAL; 934 } 935 936 if (env->prog->aux->ops->get_func_proto) 937 fn = env->prog->aux->ops->get_func_proto(func_id); 938 939 if (!fn) { 940 verbose("unknown func %d\n", func_id); 941 return -EINVAL; 942 } 943 944 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 945 if (!env->prog->gpl_compatible && fn->gpl_only) { 946 verbose("cannot call GPL only function from proprietary program\n"); 947 return -EINVAL; 948 } 949 950 /* check args */ 951 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map); 952 if (err) 953 return err; 954 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map); 955 if (err) 956 return err; 957 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map); 958 if (err) 959 return err; 960 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map); 961 if (err) 962 return err; 963 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map); 964 if (err) 965 return err; 966 967 /* reset caller saved regs */ 968 for (i = 0; i < CALLER_SAVED_REGS; i++) { 969 reg = regs + caller_saved[i]; 970 reg->type = NOT_INIT; 971 reg->imm = 0; 972 } 973 974 /* update return register */ 975 if (fn->ret_type == RET_INTEGER) { 976 regs[BPF_REG_0].type = UNKNOWN_VALUE; 977 } else if (fn->ret_type == RET_VOID) { 978 regs[BPF_REG_0].type = NOT_INIT; 979 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 980 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 981 /* remember map_ptr, so that check_map_access() 982 * can check 'value_size' boundary of memory access 983 * to map element returned from bpf_map_lookup_elem() 984 */ 985 if (map == NULL) { 986 verbose("kernel subsystem misconfigured verifier\n"); 987 return -EINVAL; 988 } 989 regs[BPF_REG_0].map_ptr = map; 990 } else { 991 verbose("unknown return type %d of func %d\n", 992 fn->ret_type, func_id); 993 return -EINVAL; 994 } 995 996 err = check_map_func_compatibility(map, func_id); 997 if (err) 998 return err; 999 1000 return 0; 1001 } 1002 1003 /* check validity of 32-bit and 64-bit arithmetic operations */ 1004 static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) 1005 { 1006 struct reg_state *regs = env->cur_state.regs; 1007 u8 opcode = BPF_OP(insn->code); 1008 int err; 1009 1010 if (opcode == BPF_END || opcode == BPF_NEG) { 1011 if (opcode == BPF_NEG) { 1012 if (BPF_SRC(insn->code) != 0 || 1013 insn->src_reg != BPF_REG_0 || 1014 insn->off != 0 || insn->imm != 0) { 1015 verbose("BPF_NEG uses reserved fields\n"); 1016 return -EINVAL; 1017 } 1018 } else { 1019 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 1020 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { 1021 verbose("BPF_END uses reserved fields\n"); 1022 return -EINVAL; 1023 } 1024 } 1025 1026 /* check src operand */ 1027 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1028 if (err) 1029 return err; 1030 1031 if (is_pointer_value(env, insn->dst_reg)) { 1032 verbose("R%d pointer arithmetic prohibited\n", 1033 insn->dst_reg); 1034 return -EACCES; 1035 } 1036 1037 /* check dest operand */ 1038 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1039 if (err) 1040 return err; 1041 1042 } else if (opcode == BPF_MOV) { 1043 1044 if (BPF_SRC(insn->code) == BPF_X) { 1045 if (insn->imm != 0 || insn->off != 0) { 1046 verbose("BPF_MOV uses reserved fields\n"); 1047 return -EINVAL; 1048 } 1049 1050 /* check src operand */ 1051 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1052 if (err) 1053 return err; 1054 } else { 1055 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1056 verbose("BPF_MOV uses reserved fields\n"); 1057 return -EINVAL; 1058 } 1059 } 1060 1061 /* check dest operand */ 1062 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1063 if (err) 1064 return err; 1065 1066 if (BPF_SRC(insn->code) == BPF_X) { 1067 if (BPF_CLASS(insn->code) == BPF_ALU64) { 1068 /* case: R1 = R2 1069 * copy register state to dest reg 1070 */ 1071 regs[insn->dst_reg] = regs[insn->src_reg]; 1072 } else { 1073 if (is_pointer_value(env, insn->src_reg)) { 1074 verbose("R%d partial copy of pointer\n", 1075 insn->src_reg); 1076 return -EACCES; 1077 } 1078 regs[insn->dst_reg].type = UNKNOWN_VALUE; 1079 regs[insn->dst_reg].map_ptr = NULL; 1080 } 1081 } else { 1082 /* case: R = imm 1083 * remember the value we stored into this reg 1084 */ 1085 regs[insn->dst_reg].type = CONST_IMM; 1086 regs[insn->dst_reg].imm = insn->imm; 1087 } 1088 1089 } else if (opcode > BPF_END) { 1090 verbose("invalid BPF_ALU opcode %x\n", opcode); 1091 return -EINVAL; 1092 1093 } else { /* all other ALU ops: and, sub, xor, add, ... */ 1094 1095 bool stack_relative = false; 1096 1097 if (BPF_SRC(insn->code) == BPF_X) { 1098 if (insn->imm != 0 || insn->off != 0) { 1099 verbose("BPF_ALU uses reserved fields\n"); 1100 return -EINVAL; 1101 } 1102 /* check src1 operand */ 1103 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1104 if (err) 1105 return err; 1106 } else { 1107 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 1108 verbose("BPF_ALU uses reserved fields\n"); 1109 return -EINVAL; 1110 } 1111 } 1112 1113 /* check src2 operand */ 1114 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1115 if (err) 1116 return err; 1117 1118 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 1119 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 1120 verbose("div by zero\n"); 1121 return -EINVAL; 1122 } 1123 1124 if ((opcode == BPF_LSH || opcode == BPF_RSH || 1125 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 1126 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 1127 1128 if (insn->imm < 0 || insn->imm >= size) { 1129 verbose("invalid shift %d\n", insn->imm); 1130 return -EINVAL; 1131 } 1132 } 1133 1134 /* pattern match 'bpf_add Rx, imm' instruction */ 1135 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && 1136 regs[insn->dst_reg].type == FRAME_PTR && 1137 BPF_SRC(insn->code) == BPF_K) { 1138 stack_relative = true; 1139 } else if (is_pointer_value(env, insn->dst_reg)) { 1140 verbose("R%d pointer arithmetic prohibited\n", 1141 insn->dst_reg); 1142 return -EACCES; 1143 } else if (BPF_SRC(insn->code) == BPF_X && 1144 is_pointer_value(env, insn->src_reg)) { 1145 verbose("R%d pointer arithmetic prohibited\n", 1146 insn->src_reg); 1147 return -EACCES; 1148 } 1149 1150 /* check dest operand */ 1151 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1152 if (err) 1153 return err; 1154 1155 if (stack_relative) { 1156 regs[insn->dst_reg].type = PTR_TO_STACK; 1157 regs[insn->dst_reg].imm = insn->imm; 1158 } 1159 } 1160 1161 return 0; 1162 } 1163 1164 static int check_cond_jmp_op(struct verifier_env *env, 1165 struct bpf_insn *insn, int *insn_idx) 1166 { 1167 struct reg_state *regs = env->cur_state.regs; 1168 struct verifier_state *other_branch; 1169 u8 opcode = BPF_OP(insn->code); 1170 int err; 1171 1172 if (opcode > BPF_EXIT) { 1173 verbose("invalid BPF_JMP opcode %x\n", opcode); 1174 return -EINVAL; 1175 } 1176 1177 if (BPF_SRC(insn->code) == BPF_X) { 1178 if (insn->imm != 0) { 1179 verbose("BPF_JMP uses reserved fields\n"); 1180 return -EINVAL; 1181 } 1182 1183 /* check src1 operand */ 1184 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1185 if (err) 1186 return err; 1187 1188 if (is_pointer_value(env, insn->src_reg)) { 1189 verbose("R%d pointer comparison prohibited\n", 1190 insn->src_reg); 1191 return -EACCES; 1192 } 1193 } else { 1194 if (insn->src_reg != BPF_REG_0) { 1195 verbose("BPF_JMP uses reserved fields\n"); 1196 return -EINVAL; 1197 } 1198 } 1199 1200 /* check src2 operand */ 1201 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1202 if (err) 1203 return err; 1204 1205 /* detect if R == 0 where R was initialized to zero earlier */ 1206 if (BPF_SRC(insn->code) == BPF_K && 1207 (opcode == BPF_JEQ || opcode == BPF_JNE) && 1208 regs[insn->dst_reg].type == CONST_IMM && 1209 regs[insn->dst_reg].imm == insn->imm) { 1210 if (opcode == BPF_JEQ) { 1211 /* if (imm == imm) goto pc+off; 1212 * only follow the goto, ignore fall-through 1213 */ 1214 *insn_idx += insn->off; 1215 return 0; 1216 } else { 1217 /* if (imm != imm) goto pc+off; 1218 * only follow fall-through branch, since 1219 * that's where the program will go 1220 */ 1221 return 0; 1222 } 1223 } 1224 1225 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 1226 if (!other_branch) 1227 return -EFAULT; 1228 1229 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */ 1230 if (BPF_SRC(insn->code) == BPF_K && 1231 insn->imm == 0 && (opcode == BPF_JEQ || 1232 opcode == BPF_JNE) && 1233 regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) { 1234 if (opcode == BPF_JEQ) { 1235 /* next fallthrough insn can access memory via 1236 * this register 1237 */ 1238 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 1239 /* branch targer cannot access it, since reg == 0 */ 1240 other_branch->regs[insn->dst_reg].type = CONST_IMM; 1241 other_branch->regs[insn->dst_reg].imm = 0; 1242 } else { 1243 other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; 1244 regs[insn->dst_reg].type = CONST_IMM; 1245 regs[insn->dst_reg].imm = 0; 1246 } 1247 } else if (is_pointer_value(env, insn->dst_reg)) { 1248 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 1249 return -EACCES; 1250 } else if (BPF_SRC(insn->code) == BPF_K && 1251 (opcode == BPF_JEQ || opcode == BPF_JNE)) { 1252 1253 if (opcode == BPF_JEQ) { 1254 /* detect if (R == imm) goto 1255 * and in the target state recognize that R = imm 1256 */ 1257 other_branch->regs[insn->dst_reg].type = CONST_IMM; 1258 other_branch->regs[insn->dst_reg].imm = insn->imm; 1259 } else { 1260 /* detect if (R != imm) goto 1261 * and in the fall-through state recognize that R = imm 1262 */ 1263 regs[insn->dst_reg].type = CONST_IMM; 1264 regs[insn->dst_reg].imm = insn->imm; 1265 } 1266 } 1267 if (log_level) 1268 print_verifier_state(env); 1269 return 0; 1270 } 1271 1272 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 1273 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 1274 { 1275 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 1276 1277 return (struct bpf_map *) (unsigned long) imm64; 1278 } 1279 1280 /* verify BPF_LD_IMM64 instruction */ 1281 static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) 1282 { 1283 struct reg_state *regs = env->cur_state.regs; 1284 int err; 1285 1286 if (BPF_SIZE(insn->code) != BPF_DW) { 1287 verbose("invalid BPF_LD_IMM insn\n"); 1288 return -EINVAL; 1289 } 1290 if (insn->off != 0) { 1291 verbose("BPF_LD_IMM64 uses reserved fields\n"); 1292 return -EINVAL; 1293 } 1294 1295 err = check_reg_arg(regs, insn->dst_reg, DST_OP); 1296 if (err) 1297 return err; 1298 1299 if (insn->src_reg == 0) 1300 /* generic move 64-bit immediate into a register */ 1301 return 0; 1302 1303 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 1304 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 1305 1306 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 1307 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 1308 return 0; 1309 } 1310 1311 static bool may_access_skb(enum bpf_prog_type type) 1312 { 1313 switch (type) { 1314 case BPF_PROG_TYPE_SOCKET_FILTER: 1315 case BPF_PROG_TYPE_SCHED_CLS: 1316 case BPF_PROG_TYPE_SCHED_ACT: 1317 return true; 1318 default: 1319 return false; 1320 } 1321 } 1322 1323 /* verify safety of LD_ABS|LD_IND instructions: 1324 * - they can only appear in the programs where ctx == skb 1325 * - since they are wrappers of function calls, they scratch R1-R5 registers, 1326 * preserve R6-R9, and store return value into R0 1327 * 1328 * Implicit input: 1329 * ctx == skb == R6 == CTX 1330 * 1331 * Explicit input: 1332 * SRC == any register 1333 * IMM == 32-bit immediate 1334 * 1335 * Output: 1336 * R0 - 8/16/32-bit skb data converted to cpu endianness 1337 */ 1338 static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) 1339 { 1340 struct reg_state *regs = env->cur_state.regs; 1341 u8 mode = BPF_MODE(insn->code); 1342 struct reg_state *reg; 1343 int i, err; 1344 1345 if (!may_access_skb(env->prog->type)) { 1346 verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n"); 1347 return -EINVAL; 1348 } 1349 1350 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1351 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1352 verbose("BPF_LD_ABS uses reserved fields\n"); 1353 return -EINVAL; 1354 } 1355 1356 /* check whether implicit source operand (register R6) is readable */ 1357 err = check_reg_arg(regs, BPF_REG_6, SRC_OP); 1358 if (err) 1359 return err; 1360 1361 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 1362 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 1363 return -EINVAL; 1364 } 1365 1366 if (mode == BPF_IND) { 1367 /* check explicit source operand */ 1368 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1369 if (err) 1370 return err; 1371 } 1372 1373 /* reset caller saved regs to unreadable */ 1374 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1375 reg = regs + caller_saved[i]; 1376 reg->type = NOT_INIT; 1377 reg->imm = 0; 1378 } 1379 1380 /* mark destination R0 register as readable, since it contains 1381 * the value fetched from the packet 1382 */ 1383 regs[BPF_REG_0].type = UNKNOWN_VALUE; 1384 return 0; 1385 } 1386 1387 /* non-recursive DFS pseudo code 1388 * 1 procedure DFS-iterative(G,v): 1389 * 2 label v as discovered 1390 * 3 let S be a stack 1391 * 4 S.push(v) 1392 * 5 while S is not empty 1393 * 6 t <- S.pop() 1394 * 7 if t is what we're looking for: 1395 * 8 return t 1396 * 9 for all edges e in G.adjacentEdges(t) do 1397 * 10 if edge e is already labelled 1398 * 11 continue with the next edge 1399 * 12 w <- G.adjacentVertex(t,e) 1400 * 13 if vertex w is not discovered and not explored 1401 * 14 label e as tree-edge 1402 * 15 label w as discovered 1403 * 16 S.push(w) 1404 * 17 continue at 5 1405 * 18 else if vertex w is discovered 1406 * 19 label e as back-edge 1407 * 20 else 1408 * 21 // vertex w is explored 1409 * 22 label e as forward- or cross-edge 1410 * 23 label t as explored 1411 * 24 S.pop() 1412 * 1413 * convention: 1414 * 0x10 - discovered 1415 * 0x11 - discovered and fall-through edge labelled 1416 * 0x12 - discovered and fall-through and branch edges labelled 1417 * 0x20 - explored 1418 */ 1419 1420 enum { 1421 DISCOVERED = 0x10, 1422 EXPLORED = 0x20, 1423 FALLTHROUGH = 1, 1424 BRANCH = 2, 1425 }; 1426 1427 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L) 1428 1429 static int *insn_stack; /* stack of insns to process */ 1430 static int cur_stack; /* current stack index */ 1431 static int *insn_state; 1432 1433 /* t, w, e - match pseudo-code above: 1434 * t - index of current instruction 1435 * w - next instruction 1436 * e - edge 1437 */ 1438 static int push_insn(int t, int w, int e, struct verifier_env *env) 1439 { 1440 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 1441 return 0; 1442 1443 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 1444 return 0; 1445 1446 if (w < 0 || w >= env->prog->len) { 1447 verbose("jump out of range from insn %d to %d\n", t, w); 1448 return -EINVAL; 1449 } 1450 1451 if (e == BRANCH) 1452 /* mark branch target for state pruning */ 1453 env->explored_states[w] = STATE_LIST_MARK; 1454 1455 if (insn_state[w] == 0) { 1456 /* tree-edge */ 1457 insn_state[t] = DISCOVERED | e; 1458 insn_state[w] = DISCOVERED; 1459 if (cur_stack >= env->prog->len) 1460 return -E2BIG; 1461 insn_stack[cur_stack++] = w; 1462 return 1; 1463 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 1464 verbose("back-edge from insn %d to %d\n", t, w); 1465 return -EINVAL; 1466 } else if (insn_state[w] == EXPLORED) { 1467 /* forward- or cross-edge */ 1468 insn_state[t] = DISCOVERED | e; 1469 } else { 1470 verbose("insn state internal bug\n"); 1471 return -EFAULT; 1472 } 1473 return 0; 1474 } 1475 1476 /* non-recursive depth-first-search to detect loops in BPF program 1477 * loop == back-edge in directed graph 1478 */ 1479 static int check_cfg(struct verifier_env *env) 1480 { 1481 struct bpf_insn *insns = env->prog->insnsi; 1482 int insn_cnt = env->prog->len; 1483 int ret = 0; 1484 int i, t; 1485 1486 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 1487 if (!insn_state) 1488 return -ENOMEM; 1489 1490 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 1491 if (!insn_stack) { 1492 kfree(insn_state); 1493 return -ENOMEM; 1494 } 1495 1496 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 1497 insn_stack[0] = 0; /* 0 is the first instruction */ 1498 cur_stack = 1; 1499 1500 peek_stack: 1501 if (cur_stack == 0) 1502 goto check_state; 1503 t = insn_stack[cur_stack - 1]; 1504 1505 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 1506 u8 opcode = BPF_OP(insns[t].code); 1507 1508 if (opcode == BPF_EXIT) { 1509 goto mark_explored; 1510 } else if (opcode == BPF_CALL) { 1511 ret = push_insn(t, t + 1, FALLTHROUGH, env); 1512 if (ret == 1) 1513 goto peek_stack; 1514 else if (ret < 0) 1515 goto err_free; 1516 } else if (opcode == BPF_JA) { 1517 if (BPF_SRC(insns[t].code) != BPF_K) { 1518 ret = -EINVAL; 1519 goto err_free; 1520 } 1521 /* unconditional jump with single edge */ 1522 ret = push_insn(t, t + insns[t].off + 1, 1523 FALLTHROUGH, env); 1524 if (ret == 1) 1525 goto peek_stack; 1526 else if (ret < 0) 1527 goto err_free; 1528 /* tell verifier to check for equivalent states 1529 * after every call and jump 1530 */ 1531 if (t + 1 < insn_cnt) 1532 env->explored_states[t + 1] = STATE_LIST_MARK; 1533 } else { 1534 /* conditional jump with two edges */ 1535 ret = push_insn(t, t + 1, FALLTHROUGH, env); 1536 if (ret == 1) 1537 goto peek_stack; 1538 else if (ret < 0) 1539 goto err_free; 1540 1541 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 1542 if (ret == 1) 1543 goto peek_stack; 1544 else if (ret < 0) 1545 goto err_free; 1546 } 1547 } else { 1548 /* all other non-branch instructions with single 1549 * fall-through edge 1550 */ 1551 ret = push_insn(t, t + 1, FALLTHROUGH, env); 1552 if (ret == 1) 1553 goto peek_stack; 1554 else if (ret < 0) 1555 goto err_free; 1556 } 1557 1558 mark_explored: 1559 insn_state[t] = EXPLORED; 1560 if (cur_stack-- <= 0) { 1561 verbose("pop stack internal bug\n"); 1562 ret = -EFAULT; 1563 goto err_free; 1564 } 1565 goto peek_stack; 1566 1567 check_state: 1568 for (i = 0; i < insn_cnt; i++) { 1569 if (insn_state[i] != EXPLORED) { 1570 verbose("unreachable insn %d\n", i); 1571 ret = -EINVAL; 1572 goto err_free; 1573 } 1574 } 1575 ret = 0; /* cfg looks good */ 1576 1577 err_free: 1578 kfree(insn_state); 1579 kfree(insn_stack); 1580 return ret; 1581 } 1582 1583 /* compare two verifier states 1584 * 1585 * all states stored in state_list are known to be valid, since 1586 * verifier reached 'bpf_exit' instruction through them 1587 * 1588 * this function is called when verifier exploring different branches of 1589 * execution popped from the state stack. If it sees an old state that has 1590 * more strict register state and more strict stack state then this execution 1591 * branch doesn't need to be explored further, since verifier already 1592 * concluded that more strict state leads to valid finish. 1593 * 1594 * Therefore two states are equivalent if register state is more conservative 1595 * and explored stack state is more conservative than the current one. 1596 * Example: 1597 * explored current 1598 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 1599 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 1600 * 1601 * In other words if current stack state (one being explored) has more 1602 * valid slots than old one that already passed validation, it means 1603 * the verifier can stop exploring and conclude that current state is valid too 1604 * 1605 * Similarly with registers. If explored state has register type as invalid 1606 * whereas register type in current state is meaningful, it means that 1607 * the current state will reach 'bpf_exit' instruction safely 1608 */ 1609 static bool states_equal(struct verifier_state *old, struct verifier_state *cur) 1610 { 1611 int i; 1612 1613 for (i = 0; i < MAX_BPF_REG; i++) { 1614 if (memcmp(&old->regs[i], &cur->regs[i], 1615 sizeof(old->regs[0])) != 0) { 1616 if (old->regs[i].type == NOT_INIT || 1617 (old->regs[i].type == UNKNOWN_VALUE && 1618 cur->regs[i].type != NOT_INIT)) 1619 continue; 1620 return false; 1621 } 1622 } 1623 1624 for (i = 0; i < MAX_BPF_STACK; i++) { 1625 if (old->stack_slot_type[i] == STACK_INVALID) 1626 continue; 1627 if (old->stack_slot_type[i] != cur->stack_slot_type[i]) 1628 /* Ex: old explored (safe) state has STACK_SPILL in 1629 * this stack slot, but current has has STACK_MISC -> 1630 * this verifier states are not equivalent, 1631 * return false to continue verification of this path 1632 */ 1633 return false; 1634 if (i % BPF_REG_SIZE) 1635 continue; 1636 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], 1637 &cur->spilled_regs[i / BPF_REG_SIZE], 1638 sizeof(old->spilled_regs[0]))) 1639 /* when explored and current stack slot types are 1640 * the same, check that stored pointers types 1641 * are the same as well. 1642 * Ex: explored safe path could have stored 1643 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} 1644 * but current path has stored: 1645 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} 1646 * such verifier states are not equivalent. 1647 * return false to continue verification of this path 1648 */ 1649 return false; 1650 else 1651 continue; 1652 } 1653 return true; 1654 } 1655 1656 static int is_state_visited(struct verifier_env *env, int insn_idx) 1657 { 1658 struct verifier_state_list *new_sl; 1659 struct verifier_state_list *sl; 1660 1661 sl = env->explored_states[insn_idx]; 1662 if (!sl) 1663 /* this 'insn_idx' instruction wasn't marked, so we will not 1664 * be doing state search here 1665 */ 1666 return 0; 1667 1668 while (sl != STATE_LIST_MARK) { 1669 if (states_equal(&sl->state, &env->cur_state)) 1670 /* reached equivalent register/stack state, 1671 * prune the search 1672 */ 1673 return 1; 1674 sl = sl->next; 1675 } 1676 1677 /* there were no equivalent states, remember current one. 1678 * technically the current state is not proven to be safe yet, 1679 * but it will either reach bpf_exit (which means it's safe) or 1680 * it will be rejected. Since there are no loops, we won't be 1681 * seeing this 'insn_idx' instruction again on the way to bpf_exit 1682 */ 1683 new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER); 1684 if (!new_sl) 1685 return -ENOMEM; 1686 1687 /* add new state to the head of linked list */ 1688 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); 1689 new_sl->next = env->explored_states[insn_idx]; 1690 env->explored_states[insn_idx] = new_sl; 1691 return 0; 1692 } 1693 1694 static int do_check(struct verifier_env *env) 1695 { 1696 struct verifier_state *state = &env->cur_state; 1697 struct bpf_insn *insns = env->prog->insnsi; 1698 struct reg_state *regs = state->regs; 1699 int insn_cnt = env->prog->len; 1700 int insn_idx, prev_insn_idx = 0; 1701 int insn_processed = 0; 1702 bool do_print_state = false; 1703 1704 init_reg_state(regs); 1705 insn_idx = 0; 1706 for (;;) { 1707 struct bpf_insn *insn; 1708 u8 class; 1709 int err; 1710 1711 if (insn_idx >= insn_cnt) { 1712 verbose("invalid insn idx %d insn_cnt %d\n", 1713 insn_idx, insn_cnt); 1714 return -EFAULT; 1715 } 1716 1717 insn = &insns[insn_idx]; 1718 class = BPF_CLASS(insn->code); 1719 1720 if (++insn_processed > 32768) { 1721 verbose("BPF program is too large. Proccessed %d insn\n", 1722 insn_processed); 1723 return -E2BIG; 1724 } 1725 1726 err = is_state_visited(env, insn_idx); 1727 if (err < 0) 1728 return err; 1729 if (err == 1) { 1730 /* found equivalent state, can prune the search */ 1731 if (log_level) { 1732 if (do_print_state) 1733 verbose("\nfrom %d to %d: safe\n", 1734 prev_insn_idx, insn_idx); 1735 else 1736 verbose("%d: safe\n", insn_idx); 1737 } 1738 goto process_bpf_exit; 1739 } 1740 1741 if (log_level && do_print_state) { 1742 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); 1743 print_verifier_state(env); 1744 do_print_state = false; 1745 } 1746 1747 if (log_level) { 1748 verbose("%d: ", insn_idx); 1749 print_bpf_insn(insn); 1750 } 1751 1752 if (class == BPF_ALU || class == BPF_ALU64) { 1753 err = check_alu_op(env, insn); 1754 if (err) 1755 return err; 1756 1757 } else if (class == BPF_LDX) { 1758 enum bpf_reg_type src_reg_type; 1759 1760 /* check for reserved fields is already done */ 1761 1762 /* check src operand */ 1763 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1764 if (err) 1765 return err; 1766 1767 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); 1768 if (err) 1769 return err; 1770 1771 src_reg_type = regs[insn->src_reg].type; 1772 1773 /* check that memory (src_reg + off) is readable, 1774 * the state of dst_reg will be updated by this func 1775 */ 1776 err = check_mem_access(env, insn->src_reg, insn->off, 1777 BPF_SIZE(insn->code), BPF_READ, 1778 insn->dst_reg); 1779 if (err) 1780 return err; 1781 1782 if (BPF_SIZE(insn->code) != BPF_W) { 1783 insn_idx++; 1784 continue; 1785 } 1786 1787 if (insn->imm == 0) { 1788 /* saw a valid insn 1789 * dst_reg = *(u32 *)(src_reg + off) 1790 * use reserved 'imm' field to mark this insn 1791 */ 1792 insn->imm = src_reg_type; 1793 1794 } else if (src_reg_type != insn->imm && 1795 (src_reg_type == PTR_TO_CTX || 1796 insn->imm == PTR_TO_CTX)) { 1797 /* ABuser program is trying to use the same insn 1798 * dst_reg = *(u32*) (src_reg + off) 1799 * with different pointer types: 1800 * src_reg == ctx in one branch and 1801 * src_reg == stack|map in some other branch. 1802 * Reject it. 1803 */ 1804 verbose("same insn cannot be used with different pointers\n"); 1805 return -EINVAL; 1806 } 1807 1808 } else if (class == BPF_STX) { 1809 enum bpf_reg_type dst_reg_type; 1810 1811 if (BPF_MODE(insn->code) == BPF_XADD) { 1812 err = check_xadd(env, insn); 1813 if (err) 1814 return err; 1815 insn_idx++; 1816 continue; 1817 } 1818 1819 /* check src1 operand */ 1820 err = check_reg_arg(regs, insn->src_reg, SRC_OP); 1821 if (err) 1822 return err; 1823 /* check src2 operand */ 1824 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1825 if (err) 1826 return err; 1827 1828 dst_reg_type = regs[insn->dst_reg].type; 1829 1830 /* check that memory (dst_reg + off) is writeable */ 1831 err = check_mem_access(env, insn->dst_reg, insn->off, 1832 BPF_SIZE(insn->code), BPF_WRITE, 1833 insn->src_reg); 1834 if (err) 1835 return err; 1836 1837 if (insn->imm == 0) { 1838 insn->imm = dst_reg_type; 1839 } else if (dst_reg_type != insn->imm && 1840 (dst_reg_type == PTR_TO_CTX || 1841 insn->imm == PTR_TO_CTX)) { 1842 verbose("same insn cannot be used with different pointers\n"); 1843 return -EINVAL; 1844 } 1845 1846 } else if (class == BPF_ST) { 1847 if (BPF_MODE(insn->code) != BPF_MEM || 1848 insn->src_reg != BPF_REG_0) { 1849 verbose("BPF_ST uses reserved fields\n"); 1850 return -EINVAL; 1851 } 1852 /* check src operand */ 1853 err = check_reg_arg(regs, insn->dst_reg, SRC_OP); 1854 if (err) 1855 return err; 1856 1857 /* check that memory (dst_reg + off) is writeable */ 1858 err = check_mem_access(env, insn->dst_reg, insn->off, 1859 BPF_SIZE(insn->code), BPF_WRITE, 1860 -1); 1861 if (err) 1862 return err; 1863 1864 } else if (class == BPF_JMP) { 1865 u8 opcode = BPF_OP(insn->code); 1866 1867 if (opcode == BPF_CALL) { 1868 if (BPF_SRC(insn->code) != BPF_K || 1869 insn->off != 0 || 1870 insn->src_reg != BPF_REG_0 || 1871 insn->dst_reg != BPF_REG_0) { 1872 verbose("BPF_CALL uses reserved fields\n"); 1873 return -EINVAL; 1874 } 1875 1876 err = check_call(env, insn->imm); 1877 if (err) 1878 return err; 1879 1880 } else if (opcode == BPF_JA) { 1881 if (BPF_SRC(insn->code) != BPF_K || 1882 insn->imm != 0 || 1883 insn->src_reg != BPF_REG_0 || 1884 insn->dst_reg != BPF_REG_0) { 1885 verbose("BPF_JA uses reserved fields\n"); 1886 return -EINVAL; 1887 } 1888 1889 insn_idx += insn->off + 1; 1890 continue; 1891 1892 } else if (opcode == BPF_EXIT) { 1893 if (BPF_SRC(insn->code) != BPF_K || 1894 insn->imm != 0 || 1895 insn->src_reg != BPF_REG_0 || 1896 insn->dst_reg != BPF_REG_0) { 1897 verbose("BPF_EXIT uses reserved fields\n"); 1898 return -EINVAL; 1899 } 1900 1901 /* eBPF calling convetion is such that R0 is used 1902 * to return the value from eBPF program. 1903 * Make sure that it's readable at this time 1904 * of bpf_exit, which means that program wrote 1905 * something into it earlier 1906 */ 1907 err = check_reg_arg(regs, BPF_REG_0, SRC_OP); 1908 if (err) 1909 return err; 1910 1911 if (is_pointer_value(env, BPF_REG_0)) { 1912 verbose("R0 leaks addr as return value\n"); 1913 return -EACCES; 1914 } 1915 1916 process_bpf_exit: 1917 insn_idx = pop_stack(env, &prev_insn_idx); 1918 if (insn_idx < 0) { 1919 break; 1920 } else { 1921 do_print_state = true; 1922 continue; 1923 } 1924 } else { 1925 err = check_cond_jmp_op(env, insn, &insn_idx); 1926 if (err) 1927 return err; 1928 } 1929 } else if (class == BPF_LD) { 1930 u8 mode = BPF_MODE(insn->code); 1931 1932 if (mode == BPF_ABS || mode == BPF_IND) { 1933 err = check_ld_abs(env, insn); 1934 if (err) 1935 return err; 1936 1937 } else if (mode == BPF_IMM) { 1938 err = check_ld_imm(env, insn); 1939 if (err) 1940 return err; 1941 1942 insn_idx++; 1943 } else { 1944 verbose("invalid BPF_LD mode\n"); 1945 return -EINVAL; 1946 } 1947 } else { 1948 verbose("unknown insn class %d\n", class); 1949 return -EINVAL; 1950 } 1951 1952 insn_idx++; 1953 } 1954 1955 return 0; 1956 } 1957 1958 /* look for pseudo eBPF instructions that access map FDs and 1959 * replace them with actual map pointers 1960 */ 1961 static int replace_map_fd_with_map_ptr(struct verifier_env *env) 1962 { 1963 struct bpf_insn *insn = env->prog->insnsi; 1964 int insn_cnt = env->prog->len; 1965 int i, j; 1966 1967 for (i = 0; i < insn_cnt; i++, insn++) { 1968 if (BPF_CLASS(insn->code) == BPF_LDX && 1969 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 1970 verbose("BPF_LDX uses reserved fields\n"); 1971 return -EINVAL; 1972 } 1973 1974 if (BPF_CLASS(insn->code) == BPF_STX && 1975 ((BPF_MODE(insn->code) != BPF_MEM && 1976 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 1977 verbose("BPF_STX uses reserved fields\n"); 1978 return -EINVAL; 1979 } 1980 1981 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 1982 struct bpf_map *map; 1983 struct fd f; 1984 1985 if (i == insn_cnt - 1 || insn[1].code != 0 || 1986 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 1987 insn[1].off != 0) { 1988 verbose("invalid bpf_ld_imm64 insn\n"); 1989 return -EINVAL; 1990 } 1991 1992 if (insn->src_reg == 0) 1993 /* valid generic load 64-bit imm */ 1994 goto next_insn; 1995 1996 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 1997 verbose("unrecognized bpf_ld_imm64 insn\n"); 1998 return -EINVAL; 1999 } 2000 2001 f = fdget(insn->imm); 2002 map = __bpf_map_get(f); 2003 if (IS_ERR(map)) { 2004 verbose("fd %d is not pointing to valid bpf_map\n", 2005 insn->imm); 2006 fdput(f); 2007 return PTR_ERR(map); 2008 } 2009 2010 /* store map pointer inside BPF_LD_IMM64 instruction */ 2011 insn[0].imm = (u32) (unsigned long) map; 2012 insn[1].imm = ((u64) (unsigned long) map) >> 32; 2013 2014 /* check whether we recorded this map already */ 2015 for (j = 0; j < env->used_map_cnt; j++) 2016 if (env->used_maps[j] == map) { 2017 fdput(f); 2018 goto next_insn; 2019 } 2020 2021 if (env->used_map_cnt >= MAX_USED_MAPS) { 2022 fdput(f); 2023 return -E2BIG; 2024 } 2025 2026 /* remember this map */ 2027 env->used_maps[env->used_map_cnt++] = map; 2028 2029 /* hold the map. If the program is rejected by verifier, 2030 * the map will be released by release_maps() or it 2031 * will be used by the valid program until it's unloaded 2032 * and all maps are released in free_bpf_prog_info() 2033 */ 2034 bpf_map_inc(map, false); 2035 fdput(f); 2036 next_insn: 2037 insn++; 2038 i++; 2039 } 2040 } 2041 2042 /* now all pseudo BPF_LD_IMM64 instructions load valid 2043 * 'struct bpf_map *' into a register instead of user map_fd. 2044 * These pointers will be used later by verifier to validate map access. 2045 */ 2046 return 0; 2047 } 2048 2049 /* drop refcnt of maps used by the rejected program */ 2050 static void release_maps(struct verifier_env *env) 2051 { 2052 int i; 2053 2054 for (i = 0; i < env->used_map_cnt; i++) 2055 bpf_map_put(env->used_maps[i]); 2056 } 2057 2058 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 2059 static void convert_pseudo_ld_imm64(struct verifier_env *env) 2060 { 2061 struct bpf_insn *insn = env->prog->insnsi; 2062 int insn_cnt = env->prog->len; 2063 int i; 2064 2065 for (i = 0; i < insn_cnt; i++, insn++) 2066 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 2067 insn->src_reg = 0; 2068 } 2069 2070 static void adjust_branches(struct bpf_prog *prog, int pos, int delta) 2071 { 2072 struct bpf_insn *insn = prog->insnsi; 2073 int insn_cnt = prog->len; 2074 int i; 2075 2076 for (i = 0; i < insn_cnt; i++, insn++) { 2077 if (BPF_CLASS(insn->code) != BPF_JMP || 2078 BPF_OP(insn->code) == BPF_CALL || 2079 BPF_OP(insn->code) == BPF_EXIT) 2080 continue; 2081 2082 /* adjust offset of jmps if necessary */ 2083 if (i < pos && i + insn->off + 1 > pos) 2084 insn->off += delta; 2085 else if (i > pos && i + insn->off + 1 < pos) 2086 insn->off -= delta; 2087 } 2088 } 2089 2090 /* convert load instructions that access fields of 'struct __sk_buff' 2091 * into sequence of instructions that access fields of 'struct sk_buff' 2092 */ 2093 static int convert_ctx_accesses(struct verifier_env *env) 2094 { 2095 struct bpf_insn *insn = env->prog->insnsi; 2096 int insn_cnt = env->prog->len; 2097 struct bpf_insn insn_buf[16]; 2098 struct bpf_prog *new_prog; 2099 u32 cnt; 2100 int i; 2101 enum bpf_access_type type; 2102 2103 if (!env->prog->aux->ops->convert_ctx_access) 2104 return 0; 2105 2106 for (i = 0; i < insn_cnt; i++, insn++) { 2107 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) 2108 type = BPF_READ; 2109 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) 2110 type = BPF_WRITE; 2111 else 2112 continue; 2113 2114 if (insn->imm != PTR_TO_CTX) { 2115 /* clear internal mark */ 2116 insn->imm = 0; 2117 continue; 2118 } 2119 2120 cnt = env->prog->aux->ops-> 2121 convert_ctx_access(type, insn->dst_reg, insn->src_reg, 2122 insn->off, insn_buf, env->prog); 2123 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 2124 verbose("bpf verifier is misconfigured\n"); 2125 return -EINVAL; 2126 } 2127 2128 if (cnt == 1) { 2129 memcpy(insn, insn_buf, sizeof(*insn)); 2130 continue; 2131 } 2132 2133 /* several new insns need to be inserted. Make room for them */ 2134 insn_cnt += cnt - 1; 2135 new_prog = bpf_prog_realloc(env->prog, 2136 bpf_prog_size(insn_cnt), 2137 GFP_USER); 2138 if (!new_prog) 2139 return -ENOMEM; 2140 2141 new_prog->len = insn_cnt; 2142 2143 memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1, 2144 sizeof(*insn) * (insn_cnt - i - cnt)); 2145 2146 /* copy substitute insns in place of load instruction */ 2147 memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt); 2148 2149 /* adjust branches in the whole program */ 2150 adjust_branches(new_prog, i, cnt - 1); 2151 2152 /* keep walking new program and skip insns we just inserted */ 2153 env->prog = new_prog; 2154 insn = new_prog->insnsi + i + cnt - 1; 2155 i += cnt - 1; 2156 } 2157 2158 return 0; 2159 } 2160 2161 static void free_states(struct verifier_env *env) 2162 { 2163 struct verifier_state_list *sl, *sln; 2164 int i; 2165 2166 if (!env->explored_states) 2167 return; 2168 2169 for (i = 0; i < env->prog->len; i++) { 2170 sl = env->explored_states[i]; 2171 2172 if (sl) 2173 while (sl != STATE_LIST_MARK) { 2174 sln = sl->next; 2175 kfree(sl); 2176 sl = sln; 2177 } 2178 } 2179 2180 kfree(env->explored_states); 2181 } 2182 2183 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 2184 { 2185 char __user *log_ubuf = NULL; 2186 struct verifier_env *env; 2187 int ret = -EINVAL; 2188 2189 if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS) 2190 return -E2BIG; 2191 2192 /* 'struct verifier_env' can be global, but since it's not small, 2193 * allocate/free it every time bpf_check() is called 2194 */ 2195 env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL); 2196 if (!env) 2197 return -ENOMEM; 2198 2199 env->prog = *prog; 2200 2201 /* grab the mutex to protect few globals used by verifier */ 2202 mutex_lock(&bpf_verifier_lock); 2203 2204 if (attr->log_level || attr->log_buf || attr->log_size) { 2205 /* user requested verbose verifier output 2206 * and supplied buffer to store the verification trace 2207 */ 2208 log_level = attr->log_level; 2209 log_ubuf = (char __user *) (unsigned long) attr->log_buf; 2210 log_size = attr->log_size; 2211 log_len = 0; 2212 2213 ret = -EINVAL; 2214 /* log_* values have to be sane */ 2215 if (log_size < 128 || log_size > UINT_MAX >> 8 || 2216 log_level == 0 || log_ubuf == NULL) 2217 goto free_env; 2218 2219 ret = -ENOMEM; 2220 log_buf = vmalloc(log_size); 2221 if (!log_buf) 2222 goto free_env; 2223 } else { 2224 log_level = 0; 2225 } 2226 2227 ret = replace_map_fd_with_map_ptr(env); 2228 if (ret < 0) 2229 goto skip_full_check; 2230 2231 env->explored_states = kcalloc(env->prog->len, 2232 sizeof(struct verifier_state_list *), 2233 GFP_USER); 2234 ret = -ENOMEM; 2235 if (!env->explored_states) 2236 goto skip_full_check; 2237 2238 ret = check_cfg(env); 2239 if (ret < 0) 2240 goto skip_full_check; 2241 2242 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 2243 2244 ret = do_check(env); 2245 2246 skip_full_check: 2247 while (pop_stack(env, NULL) >= 0); 2248 free_states(env); 2249 2250 if (ret == 0) 2251 /* program is valid, convert *(u32*)(ctx + off) accesses */ 2252 ret = convert_ctx_accesses(env); 2253 2254 if (log_level && log_len >= log_size - 1) { 2255 BUG_ON(log_len >= log_size); 2256 /* verifier log exceeded user supplied buffer */ 2257 ret = -ENOSPC; 2258 /* fall through to return what was recorded */ 2259 } 2260 2261 /* copy verifier log back to user space including trailing zero */ 2262 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { 2263 ret = -EFAULT; 2264 goto free_log_buf; 2265 } 2266 2267 if (ret == 0 && env->used_map_cnt) { 2268 /* if program passed verifier, update used_maps in bpf_prog_info */ 2269 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 2270 sizeof(env->used_maps[0]), 2271 GFP_KERNEL); 2272 2273 if (!env->prog->aux->used_maps) { 2274 ret = -ENOMEM; 2275 goto free_log_buf; 2276 } 2277 2278 memcpy(env->prog->aux->used_maps, env->used_maps, 2279 sizeof(env->used_maps[0]) * env->used_map_cnt); 2280 env->prog->aux->used_map_cnt = env->used_map_cnt; 2281 2282 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 2283 * bpf_ld_imm64 instructions 2284 */ 2285 convert_pseudo_ld_imm64(env); 2286 } 2287 2288 free_log_buf: 2289 if (log_level) 2290 vfree(log_buf); 2291 free_env: 2292 if (!env->prog->aux->used_maps) 2293 /* if we didn't copy map pointers into bpf_prog_info, release 2294 * them now. Otherwise free_bpf_prog_info() will release them. 2295 */ 2296 release_maps(env); 2297 *prog = env->prog; 2298 kfree(env); 2299 mutex_unlock(&bpf_verifier_lock); 2300 return ret; 2301 } 2302