1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * Copyright (c) 2016 Facebook 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/filter.h> 19 #include <net/netlink.h> 20 #include <linux/file.h> 21 #include <linux/vmalloc.h> 22 #include <linux/stringify.h> 23 24 #include "disasm.h" 25 26 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 27 #define BPF_PROG_TYPE(_id, _name) \ 28 [_id] = & _name ## _verifier_ops, 29 #define BPF_MAP_TYPE(_id, _ops) 30 #include <linux/bpf_types.h> 31 #undef BPF_PROG_TYPE 32 #undef BPF_MAP_TYPE 33 }; 34 35 /* bpf_check() is a static code analyzer that walks eBPF program 36 * instruction by instruction and updates register/stack state. 37 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 38 * 39 * The first pass is depth-first-search to check that the program is a DAG. 40 * It rejects the following programs: 41 * - larger than BPF_MAXINSNS insns 42 * - if loop is present (detected via back-edge) 43 * - unreachable insns exist (shouldn't be a forest. program = one function) 44 * - out of bounds or malformed jumps 45 * The second pass is all possible path descent from the 1st insn. 46 * Since it's analyzing all pathes through the program, the length of the 47 * analysis is limited to 64k insn, which may be hit even if total number of 48 * insn is less then 4K, but there are too many branches that change stack/regs. 49 * Number of 'branches to be analyzed' is limited to 1k 50 * 51 * On entry to each instruction, each register has a type, and the instruction 52 * changes the types of the registers depending on instruction semantics. 53 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 54 * copied to R1. 55 * 56 * All registers are 64-bit. 57 * R0 - return register 58 * R1-R5 argument passing registers 59 * R6-R9 callee saved registers 60 * R10 - frame pointer read-only 61 * 62 * At the start of BPF program the register R1 contains a pointer to bpf_context 63 * and has type PTR_TO_CTX. 64 * 65 * Verifier tracks arithmetic operations on pointers in case: 66 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 67 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 68 * 1st insn copies R10 (which has FRAME_PTR) type into R1 69 * and 2nd arithmetic instruction is pattern matched to recognize 70 * that it wants to construct a pointer to some element within stack. 71 * So after 2nd insn, the register R1 has type PTR_TO_STACK 72 * (and -20 constant is saved for further stack bounds checking). 73 * Meaning that this reg is a pointer to stack plus known immediate constant. 74 * 75 * Most of the time the registers have SCALAR_VALUE type, which 76 * means the register has some value, but it's not a valid pointer. 77 * (like pointer plus pointer becomes SCALAR_VALUE type) 78 * 79 * When verifier sees load or store instructions the type of base register 80 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer 81 * types recognized by check_mem_access() function. 82 * 83 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 84 * and the range of [ptr, ptr + map's value_size) is accessible. 85 * 86 * registers used to pass values to function calls are checked against 87 * function argument constraints. 88 * 89 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 90 * It means that the register type passed to this function must be 91 * PTR_TO_STACK and it will be used inside the function as 92 * 'pointer to map element key' 93 * 94 * For example the argument constraints for bpf_map_lookup_elem(): 95 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 96 * .arg1_type = ARG_CONST_MAP_PTR, 97 * .arg2_type = ARG_PTR_TO_MAP_KEY, 98 * 99 * ret_type says that this function returns 'pointer to map elem value or null' 100 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 101 * 2nd argument should be a pointer to stack, which will be used inside 102 * the helper function as a pointer to map element key. 103 * 104 * On the kernel side the helper function looks like: 105 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 106 * { 107 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 108 * void *key = (void *) (unsigned long) r2; 109 * void *value; 110 * 111 * here kernel can access 'key' and 'map' pointers safely, knowing that 112 * [key, key + map->key_size) bytes are valid and were initialized on 113 * the stack of eBPF program. 114 * } 115 * 116 * Corresponding eBPF program may look like: 117 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 118 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 119 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 120 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 121 * here verifier looks at prototype of map_lookup_elem() and sees: 122 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 123 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 124 * 125 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 126 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 127 * and were initialized prior to this call. 128 * If it's ok, then verifier allows this BPF_CALL insn and looks at 129 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 130 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 131 * returns ether pointer to map value or NULL. 132 * 133 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 134 * insn, the register holding that pointer in the true branch changes state to 135 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 136 * branch. See check_cond_jmp_op(). 137 * 138 * After the call R0 is set to return type of the function and registers R1-R5 139 * are set to NOT_INIT to indicate that they are no longer readable. 140 */ 141 142 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 143 struct bpf_verifier_stack_elem { 144 /* verifer state is 'st' 145 * before processing instruction 'insn_idx' 146 * and after processing instruction 'prev_insn_idx' 147 */ 148 struct bpf_verifier_state st; 149 int insn_idx; 150 int prev_insn_idx; 151 struct bpf_verifier_stack_elem *next; 152 }; 153 154 #define BPF_COMPLEXITY_LIMIT_INSNS 131072 155 #define BPF_COMPLEXITY_LIMIT_STACK 1024 156 157 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 158 159 struct bpf_call_arg_meta { 160 struct bpf_map *map_ptr; 161 bool raw_mode; 162 bool pkt_access; 163 int regno; 164 int access_size; 165 }; 166 167 static DEFINE_MUTEX(bpf_verifier_lock); 168 169 /* log_level controls verbosity level of eBPF verifier. 170 * verbose() is used to dump the verification trace to the log, so the user 171 * can figure out what's wrong with the program 172 */ 173 static __printf(2, 3) void verbose(struct bpf_verifier_env *env, 174 const char *fmt, ...) 175 { 176 struct bpf_verifer_log *log = &env->log; 177 unsigned int n; 178 va_list args; 179 180 if (!log->level || !log->ubuf || bpf_verifier_log_full(log)) 181 return; 182 183 va_start(args, fmt); 184 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 185 va_end(args); 186 187 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 188 "verifier log line truncated - local buffer too short\n"); 189 190 n = min(log->len_total - log->len_used - 1, n); 191 log->kbuf[n] = '\0'; 192 193 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 194 log->len_used += n; 195 else 196 log->ubuf = NULL; 197 } 198 199 static bool type_is_pkt_pointer(enum bpf_reg_type type) 200 { 201 return type == PTR_TO_PACKET || 202 type == PTR_TO_PACKET_META; 203 } 204 205 /* string representation of 'enum bpf_reg_type' */ 206 static const char * const reg_type_str[] = { 207 [NOT_INIT] = "?", 208 [SCALAR_VALUE] = "inv", 209 [PTR_TO_CTX] = "ctx", 210 [CONST_PTR_TO_MAP] = "map_ptr", 211 [PTR_TO_MAP_VALUE] = "map_value", 212 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 213 [PTR_TO_STACK] = "fp", 214 [PTR_TO_PACKET] = "pkt", 215 [PTR_TO_PACKET_META] = "pkt_meta", 216 [PTR_TO_PACKET_END] = "pkt_end", 217 }; 218 219 static void print_verifier_state(struct bpf_verifier_env *env, 220 struct bpf_verifier_state *state) 221 { 222 struct bpf_reg_state *reg; 223 enum bpf_reg_type t; 224 int i; 225 226 for (i = 0; i < MAX_BPF_REG; i++) { 227 reg = &state->regs[i]; 228 t = reg->type; 229 if (t == NOT_INIT) 230 continue; 231 verbose(env, " R%d=%s", i, reg_type_str[t]); 232 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 233 tnum_is_const(reg->var_off)) { 234 /* reg->off should be 0 for SCALAR_VALUE */ 235 verbose(env, "%lld", reg->var_off.value + reg->off); 236 } else { 237 verbose(env, "(id=%d", reg->id); 238 if (t != SCALAR_VALUE) 239 verbose(env, ",off=%d", reg->off); 240 if (type_is_pkt_pointer(t)) 241 verbose(env, ",r=%d", reg->range); 242 else if (t == CONST_PTR_TO_MAP || 243 t == PTR_TO_MAP_VALUE || 244 t == PTR_TO_MAP_VALUE_OR_NULL) 245 verbose(env, ",ks=%d,vs=%d", 246 reg->map_ptr->key_size, 247 reg->map_ptr->value_size); 248 if (tnum_is_const(reg->var_off)) { 249 /* Typically an immediate SCALAR_VALUE, but 250 * could be a pointer whose offset is too big 251 * for reg->off 252 */ 253 verbose(env, ",imm=%llx", reg->var_off.value); 254 } else { 255 if (reg->smin_value != reg->umin_value && 256 reg->smin_value != S64_MIN) 257 verbose(env, ",smin_value=%lld", 258 (long long)reg->smin_value); 259 if (reg->smax_value != reg->umax_value && 260 reg->smax_value != S64_MAX) 261 verbose(env, ",smax_value=%lld", 262 (long long)reg->smax_value); 263 if (reg->umin_value != 0) 264 verbose(env, ",umin_value=%llu", 265 (unsigned long long)reg->umin_value); 266 if (reg->umax_value != U64_MAX) 267 verbose(env, ",umax_value=%llu", 268 (unsigned long long)reg->umax_value); 269 if (!tnum_is_unknown(reg->var_off)) { 270 char tn_buf[48]; 271 272 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 273 verbose(env, ",var_off=%s", tn_buf); 274 } 275 } 276 verbose(env, ")"); 277 } 278 } 279 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 280 if (state->stack[i].slot_type[0] == STACK_SPILL) 281 verbose(env, " fp%d=%s", 282 -MAX_BPF_STACK + i * BPF_REG_SIZE, 283 reg_type_str[state->stack[i].spilled_ptr.type]); 284 } 285 verbose(env, "\n"); 286 } 287 288 static int copy_stack_state(struct bpf_verifier_state *dst, 289 const struct bpf_verifier_state *src) 290 { 291 if (!src->stack) 292 return 0; 293 if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { 294 /* internal bug, make state invalid to reject the program */ 295 memset(dst, 0, sizeof(*dst)); 296 return -EFAULT; 297 } 298 memcpy(dst->stack, src->stack, 299 sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); 300 return 0; 301 } 302 303 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to 304 * make it consume minimal amount of memory. check_stack_write() access from 305 * the program calls into realloc_verifier_state() to grow the stack size. 306 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state 307 * which this function copies over. It points to previous bpf_verifier_state 308 * which is never reallocated 309 */ 310 static int realloc_verifier_state(struct bpf_verifier_state *state, int size, 311 bool copy_old) 312 { 313 u32 old_size = state->allocated_stack; 314 struct bpf_stack_state *new_stack; 315 int slot = size / BPF_REG_SIZE; 316 317 if (size <= old_size || !size) { 318 if (copy_old) 319 return 0; 320 state->allocated_stack = slot * BPF_REG_SIZE; 321 if (!size && old_size) { 322 kfree(state->stack); 323 state->stack = NULL; 324 } 325 return 0; 326 } 327 new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), 328 GFP_KERNEL); 329 if (!new_stack) 330 return -ENOMEM; 331 if (copy_old) { 332 if (state->stack) 333 memcpy(new_stack, state->stack, 334 sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); 335 memset(new_stack + old_size / BPF_REG_SIZE, 0, 336 sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); 337 } 338 state->allocated_stack = slot * BPF_REG_SIZE; 339 kfree(state->stack); 340 state->stack = new_stack; 341 return 0; 342 } 343 344 static void free_verifier_state(struct bpf_verifier_state *state, 345 bool free_self) 346 { 347 kfree(state->stack); 348 if (free_self) 349 kfree(state); 350 } 351 352 /* copy verifier state from src to dst growing dst stack space 353 * when necessary to accommodate larger src stack 354 */ 355 static int copy_verifier_state(struct bpf_verifier_state *dst, 356 const struct bpf_verifier_state *src) 357 { 358 int err; 359 360 err = realloc_verifier_state(dst, src->allocated_stack, false); 361 if (err) 362 return err; 363 memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack)); 364 return copy_stack_state(dst, src); 365 } 366 367 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 368 int *insn_idx) 369 { 370 struct bpf_verifier_state *cur = env->cur_state; 371 struct bpf_verifier_stack_elem *elem, *head = env->head; 372 int err; 373 374 if (env->head == NULL) 375 return -ENOENT; 376 377 if (cur) { 378 err = copy_verifier_state(cur, &head->st); 379 if (err) 380 return err; 381 } 382 if (insn_idx) 383 *insn_idx = head->insn_idx; 384 if (prev_insn_idx) 385 *prev_insn_idx = head->prev_insn_idx; 386 elem = head->next; 387 free_verifier_state(&head->st, false); 388 kfree(head); 389 env->head = elem; 390 env->stack_size--; 391 return 0; 392 } 393 394 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 395 int insn_idx, int prev_insn_idx) 396 { 397 struct bpf_verifier_state *cur = env->cur_state; 398 struct bpf_verifier_stack_elem *elem; 399 int err; 400 401 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 402 if (!elem) 403 goto err; 404 405 elem->insn_idx = insn_idx; 406 elem->prev_insn_idx = prev_insn_idx; 407 elem->next = env->head; 408 env->head = elem; 409 env->stack_size++; 410 err = copy_verifier_state(&elem->st, cur); 411 if (err) 412 goto err; 413 if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { 414 verbose(env, "BPF program is too complex\n"); 415 goto err; 416 } 417 return &elem->st; 418 err: 419 /* pop all elements and return */ 420 while (!pop_stack(env, NULL, NULL)); 421 return NULL; 422 } 423 424 #define CALLER_SAVED_REGS 6 425 static const int caller_saved[CALLER_SAVED_REGS] = { 426 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 427 }; 428 429 static void __mark_reg_not_init(struct bpf_reg_state *reg); 430 431 /* Mark the unknown part of a register (variable offset or scalar value) as 432 * known to have the value @imm. 433 */ 434 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 435 { 436 reg->id = 0; 437 reg->var_off = tnum_const(imm); 438 reg->smin_value = (s64)imm; 439 reg->smax_value = (s64)imm; 440 reg->umin_value = imm; 441 reg->umax_value = imm; 442 } 443 444 /* Mark the 'variable offset' part of a register as zero. This should be 445 * used only on registers holding a pointer type. 446 */ 447 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 448 { 449 __mark_reg_known(reg, 0); 450 } 451 452 static void mark_reg_known_zero(struct bpf_verifier_env *env, 453 struct bpf_reg_state *regs, u32 regno) 454 { 455 if (WARN_ON(regno >= MAX_BPF_REG)) { 456 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 457 /* Something bad happened, let's kill all regs */ 458 for (regno = 0; regno < MAX_BPF_REG; regno++) 459 __mark_reg_not_init(regs + regno); 460 return; 461 } 462 __mark_reg_known_zero(regs + regno); 463 } 464 465 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 466 { 467 return type_is_pkt_pointer(reg->type); 468 } 469 470 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 471 { 472 return reg_is_pkt_pointer(reg) || 473 reg->type == PTR_TO_PACKET_END; 474 } 475 476 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 477 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 478 enum bpf_reg_type which) 479 { 480 /* The register can already have a range from prior markings. 481 * This is fine as long as it hasn't been advanced from its 482 * origin. 483 */ 484 return reg->type == which && 485 reg->id == 0 && 486 reg->off == 0 && 487 tnum_equals_const(reg->var_off, 0); 488 } 489 490 /* Attempts to improve min/max values based on var_off information */ 491 static void __update_reg_bounds(struct bpf_reg_state *reg) 492 { 493 /* min signed is max(sign bit) | min(other bits) */ 494 reg->smin_value = max_t(s64, reg->smin_value, 495 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 496 /* max signed is min(sign bit) | max(other bits) */ 497 reg->smax_value = min_t(s64, reg->smax_value, 498 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 499 reg->umin_value = max(reg->umin_value, reg->var_off.value); 500 reg->umax_value = min(reg->umax_value, 501 reg->var_off.value | reg->var_off.mask); 502 } 503 504 /* Uses signed min/max values to inform unsigned, and vice-versa */ 505 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 506 { 507 /* Learn sign from signed bounds. 508 * If we cannot cross the sign boundary, then signed and unsigned bounds 509 * are the same, so combine. This works even in the negative case, e.g. 510 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 511 */ 512 if (reg->smin_value >= 0 || reg->smax_value < 0) { 513 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 514 reg->umin_value); 515 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 516 reg->umax_value); 517 return; 518 } 519 /* Learn sign from unsigned bounds. Signed bounds cross the sign 520 * boundary, so we must be careful. 521 */ 522 if ((s64)reg->umax_value >= 0) { 523 /* Positive. We can't learn anything from the smin, but smax 524 * is positive, hence safe. 525 */ 526 reg->smin_value = reg->umin_value; 527 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 528 reg->umax_value); 529 } else if ((s64)reg->umin_value < 0) { 530 /* Negative. We can't learn anything from the smax, but smin 531 * is negative, hence safe. 532 */ 533 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 534 reg->umin_value); 535 reg->smax_value = reg->umax_value; 536 } 537 } 538 539 /* Attempts to improve var_off based on unsigned min/max information */ 540 static void __reg_bound_offset(struct bpf_reg_state *reg) 541 { 542 reg->var_off = tnum_intersect(reg->var_off, 543 tnum_range(reg->umin_value, 544 reg->umax_value)); 545 } 546 547 /* Reset the min/max bounds of a register */ 548 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 549 { 550 reg->smin_value = S64_MIN; 551 reg->smax_value = S64_MAX; 552 reg->umin_value = 0; 553 reg->umax_value = U64_MAX; 554 } 555 556 /* Mark a register as having a completely unknown (scalar) value. */ 557 static void __mark_reg_unknown(struct bpf_reg_state *reg) 558 { 559 reg->type = SCALAR_VALUE; 560 reg->id = 0; 561 reg->off = 0; 562 reg->var_off = tnum_unknown; 563 __mark_reg_unbounded(reg); 564 } 565 566 static void mark_reg_unknown(struct bpf_verifier_env *env, 567 struct bpf_reg_state *regs, u32 regno) 568 { 569 if (WARN_ON(regno >= MAX_BPF_REG)) { 570 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 571 /* Something bad happened, let's kill all regs */ 572 for (regno = 0; regno < MAX_BPF_REG; regno++) 573 __mark_reg_not_init(regs + regno); 574 return; 575 } 576 __mark_reg_unknown(regs + regno); 577 } 578 579 static void __mark_reg_not_init(struct bpf_reg_state *reg) 580 { 581 __mark_reg_unknown(reg); 582 reg->type = NOT_INIT; 583 } 584 585 static void mark_reg_not_init(struct bpf_verifier_env *env, 586 struct bpf_reg_state *regs, u32 regno) 587 { 588 if (WARN_ON(regno >= MAX_BPF_REG)) { 589 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 590 /* Something bad happened, let's kill all regs */ 591 for (regno = 0; regno < MAX_BPF_REG; regno++) 592 __mark_reg_not_init(regs + regno); 593 return; 594 } 595 __mark_reg_not_init(regs + regno); 596 } 597 598 static void init_reg_state(struct bpf_verifier_env *env, 599 struct bpf_reg_state *regs) 600 { 601 int i; 602 603 for (i = 0; i < MAX_BPF_REG; i++) { 604 mark_reg_not_init(env, regs, i); 605 regs[i].live = REG_LIVE_NONE; 606 } 607 608 /* frame pointer */ 609 regs[BPF_REG_FP].type = PTR_TO_STACK; 610 mark_reg_known_zero(env, regs, BPF_REG_FP); 611 612 /* 1st arg to a function */ 613 regs[BPF_REG_1].type = PTR_TO_CTX; 614 mark_reg_known_zero(env, regs, BPF_REG_1); 615 } 616 617 enum reg_arg_type { 618 SRC_OP, /* register is used as source operand */ 619 DST_OP, /* register is used as destination operand */ 620 DST_OP_NO_MARK /* same as above, check only, don't mark */ 621 }; 622 623 static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) 624 { 625 struct bpf_verifier_state *parent = state->parent; 626 627 if (regno == BPF_REG_FP) 628 /* We don't need to worry about FP liveness because it's read-only */ 629 return; 630 631 while (parent) { 632 /* if read wasn't screened by an earlier write ... */ 633 if (state->regs[regno].live & REG_LIVE_WRITTEN) 634 break; 635 /* ... then we depend on parent's value */ 636 parent->regs[regno].live |= REG_LIVE_READ; 637 state = parent; 638 parent = state->parent; 639 } 640 } 641 642 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 643 enum reg_arg_type t) 644 { 645 struct bpf_reg_state *regs = env->cur_state->regs; 646 647 if (regno >= MAX_BPF_REG) { 648 verbose(env, "R%d is invalid\n", regno); 649 return -EINVAL; 650 } 651 652 if (t == SRC_OP) { 653 /* check whether register used as source operand can be read */ 654 if (regs[regno].type == NOT_INIT) { 655 verbose(env, "R%d !read_ok\n", regno); 656 return -EACCES; 657 } 658 mark_reg_read(env->cur_state, regno); 659 } else { 660 /* check whether register used as dest operand can be written to */ 661 if (regno == BPF_REG_FP) { 662 verbose(env, "frame pointer is read only\n"); 663 return -EACCES; 664 } 665 regs[regno].live |= REG_LIVE_WRITTEN; 666 if (t == DST_OP) 667 mark_reg_unknown(env, regs, regno); 668 } 669 return 0; 670 } 671 672 static bool is_spillable_regtype(enum bpf_reg_type type) 673 { 674 switch (type) { 675 case PTR_TO_MAP_VALUE: 676 case PTR_TO_MAP_VALUE_OR_NULL: 677 case PTR_TO_STACK: 678 case PTR_TO_CTX: 679 case PTR_TO_PACKET: 680 case PTR_TO_PACKET_META: 681 case PTR_TO_PACKET_END: 682 case CONST_PTR_TO_MAP: 683 return true; 684 default: 685 return false; 686 } 687 } 688 689 /* check_stack_read/write functions track spill/fill of registers, 690 * stack boundary and alignment are checked in check_mem_access() 691 */ 692 static int check_stack_write(struct bpf_verifier_env *env, 693 struct bpf_verifier_state *state, int off, 694 int size, int value_regno) 695 { 696 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 697 698 err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE), 699 true); 700 if (err) 701 return err; 702 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 703 * so it's aligned access and [off, off + size) are within stack limits 704 */ 705 if (!env->allow_ptr_leaks && 706 state->stack[spi].slot_type[0] == STACK_SPILL && 707 size != BPF_REG_SIZE) { 708 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 709 return -EACCES; 710 } 711 712 if (value_regno >= 0 && 713 is_spillable_regtype(state->regs[value_regno].type)) { 714 715 /* register containing pointer is being spilled into stack */ 716 if (size != BPF_REG_SIZE) { 717 verbose(env, "invalid size of register spill\n"); 718 return -EACCES; 719 } 720 721 /* save register state */ 722 state->stack[spi].spilled_ptr = state->regs[value_regno]; 723 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 724 725 for (i = 0; i < BPF_REG_SIZE; i++) 726 state->stack[spi].slot_type[i] = STACK_SPILL; 727 } else { 728 /* regular write of data into stack */ 729 state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; 730 731 for (i = 0; i < size; i++) 732 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 733 STACK_MISC; 734 } 735 return 0; 736 } 737 738 static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot) 739 { 740 struct bpf_verifier_state *parent = state->parent; 741 742 while (parent) { 743 /* if read wasn't screened by an earlier write ... */ 744 if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) 745 break; 746 /* ... then we depend on parent's value */ 747 parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ; 748 state = parent; 749 parent = state->parent; 750 } 751 } 752 753 static int check_stack_read(struct bpf_verifier_env *env, 754 struct bpf_verifier_state *state, int off, int size, 755 int value_regno) 756 { 757 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 758 u8 *stype; 759 760 if (state->allocated_stack <= slot) { 761 verbose(env, "invalid read from stack off %d+0 size %d\n", 762 off, size); 763 return -EACCES; 764 } 765 stype = state->stack[spi].slot_type; 766 767 if (stype[0] == STACK_SPILL) { 768 if (size != BPF_REG_SIZE) { 769 verbose(env, "invalid size of register spill\n"); 770 return -EACCES; 771 } 772 for (i = 1; i < BPF_REG_SIZE; i++) { 773 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 774 verbose(env, "corrupted spill memory\n"); 775 return -EACCES; 776 } 777 } 778 779 if (value_regno >= 0) { 780 /* restore register state from stack */ 781 state->regs[value_regno] = state->stack[spi].spilled_ptr; 782 mark_stack_slot_read(state, spi); 783 } 784 return 0; 785 } else { 786 for (i = 0; i < size; i++) { 787 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) { 788 verbose(env, "invalid read from stack off %d+%d size %d\n", 789 off, i, size); 790 return -EACCES; 791 } 792 } 793 if (value_regno >= 0) 794 /* have read misc data from the stack */ 795 mark_reg_unknown(env, state->regs, value_regno); 796 return 0; 797 } 798 } 799 800 /* check read/write into map element returned by bpf_map_lookup_elem() */ 801 static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, 802 int size, bool zero_size_allowed) 803 { 804 struct bpf_reg_state *regs = cur_regs(env); 805 struct bpf_map *map = regs[regno].map_ptr; 806 807 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 808 off + size > map->value_size) { 809 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 810 map->value_size, off, size); 811 return -EACCES; 812 } 813 return 0; 814 } 815 816 /* check read/write into a map element with possible variable offset */ 817 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 818 int off, int size, bool zero_size_allowed) 819 { 820 struct bpf_verifier_state *state = env->cur_state; 821 struct bpf_reg_state *reg = &state->regs[regno]; 822 int err; 823 824 /* We may have adjusted the register to this map value, so we 825 * need to try adding each of min_value and max_value to off 826 * to make sure our theoretical access will be safe. 827 */ 828 if (env->log.level) 829 print_verifier_state(env, state); 830 /* The minimum value is only important with signed 831 * comparisons where we can't assume the floor of a 832 * value is 0. If we are using signed variables for our 833 * index'es we need to make sure that whatever we use 834 * will have a set floor within our range. 835 */ 836 if (reg->smin_value < 0) { 837 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 838 regno); 839 return -EACCES; 840 } 841 err = __check_map_access(env, regno, reg->smin_value + off, size, 842 zero_size_allowed); 843 if (err) { 844 verbose(env, "R%d min value is outside of the array range\n", 845 regno); 846 return err; 847 } 848 849 /* If we haven't set a max value then we need to bail since we can't be 850 * sure we won't do bad things. 851 * If reg->umax_value + off could overflow, treat that as unbounded too. 852 */ 853 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 854 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", 855 regno); 856 return -EACCES; 857 } 858 err = __check_map_access(env, regno, reg->umax_value + off, size, 859 zero_size_allowed); 860 if (err) 861 verbose(env, "R%d max value is outside of the array range\n", 862 regno); 863 return err; 864 } 865 866 #define MAX_PACKET_OFF 0xffff 867 868 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 869 const struct bpf_call_arg_meta *meta, 870 enum bpf_access_type t) 871 { 872 switch (env->prog->type) { 873 case BPF_PROG_TYPE_LWT_IN: 874 case BPF_PROG_TYPE_LWT_OUT: 875 /* dst_input() and dst_output() can't write for now */ 876 if (t == BPF_WRITE) 877 return false; 878 /* fallthrough */ 879 case BPF_PROG_TYPE_SCHED_CLS: 880 case BPF_PROG_TYPE_SCHED_ACT: 881 case BPF_PROG_TYPE_XDP: 882 case BPF_PROG_TYPE_LWT_XMIT: 883 case BPF_PROG_TYPE_SK_SKB: 884 if (meta) 885 return meta->pkt_access; 886 887 env->seen_direct_write = true; 888 return true; 889 default: 890 return false; 891 } 892 } 893 894 static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, 895 int off, int size, bool zero_size_allowed) 896 { 897 struct bpf_reg_state *regs = cur_regs(env); 898 struct bpf_reg_state *reg = ®s[regno]; 899 900 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || 901 (u64)off + size > reg->range) { 902 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 903 off, size, regno, reg->id, reg->off, reg->range); 904 return -EACCES; 905 } 906 return 0; 907 } 908 909 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 910 int size, bool zero_size_allowed) 911 { 912 struct bpf_reg_state *regs = cur_regs(env); 913 struct bpf_reg_state *reg = ®s[regno]; 914 int err; 915 916 /* We may have added a variable offset to the packet pointer; but any 917 * reg->range we have comes after that. We are only checking the fixed 918 * offset. 919 */ 920 921 /* We don't allow negative numbers, because we aren't tracking enough 922 * detail to prove they're safe. 923 */ 924 if (reg->smin_value < 0) { 925 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 926 regno); 927 return -EACCES; 928 } 929 err = __check_packet_access(env, regno, off, size, zero_size_allowed); 930 if (err) { 931 verbose(env, "R%d offset is outside of the packet\n", regno); 932 return err; 933 } 934 return err; 935 } 936 937 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 938 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 939 enum bpf_access_type t, enum bpf_reg_type *reg_type) 940 { 941 struct bpf_insn_access_aux info = { 942 .reg_type = *reg_type, 943 }; 944 945 if (env->ops->is_valid_access && 946 env->ops->is_valid_access(off, size, t, &info)) { 947 /* A non zero info.ctx_field_size indicates that this field is a 948 * candidate for later verifier transformation to load the whole 949 * field and then apply a mask when accessed with a narrower 950 * access than actual ctx access size. A zero info.ctx_field_size 951 * will only allow for whole field access and rejects any other 952 * type of narrower access. 953 */ 954 *reg_type = info.reg_type; 955 956 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 957 /* remember the offset of last byte accessed in ctx */ 958 if (env->prog->aux->max_ctx_offset < off + size) 959 env->prog->aux->max_ctx_offset = off + size; 960 return 0; 961 } 962 963 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 964 return -EACCES; 965 } 966 967 static bool __is_pointer_value(bool allow_ptr_leaks, 968 const struct bpf_reg_state *reg) 969 { 970 if (allow_ptr_leaks) 971 return false; 972 973 return reg->type != SCALAR_VALUE; 974 } 975 976 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 977 { 978 return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); 979 } 980 981 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 982 const struct bpf_reg_state *reg, 983 int off, int size, bool strict) 984 { 985 struct tnum reg_off; 986 int ip_align; 987 988 /* Byte size accesses are always allowed. */ 989 if (!strict || size == 1) 990 return 0; 991 992 /* For platforms that do not have a Kconfig enabling 993 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 994 * NET_IP_ALIGN is universally set to '2'. And on platforms 995 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 996 * to this code only in strict mode where we want to emulate 997 * the NET_IP_ALIGN==2 checking. Therefore use an 998 * unconditional IP align value of '2'. 999 */ 1000 ip_align = 2; 1001 1002 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 1003 if (!tnum_is_aligned(reg_off, size)) { 1004 char tn_buf[48]; 1005 1006 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1007 verbose(env, 1008 "misaligned packet access off %d+%s+%d+%d size %d\n", 1009 ip_align, tn_buf, reg->off, off, size); 1010 return -EACCES; 1011 } 1012 1013 return 0; 1014 } 1015 1016 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 1017 const struct bpf_reg_state *reg, 1018 const char *pointer_desc, 1019 int off, int size, bool strict) 1020 { 1021 struct tnum reg_off; 1022 1023 /* Byte size accesses are always allowed. */ 1024 if (!strict || size == 1) 1025 return 0; 1026 1027 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 1028 if (!tnum_is_aligned(reg_off, size)) { 1029 char tn_buf[48]; 1030 1031 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1032 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 1033 pointer_desc, tn_buf, reg->off, off, size); 1034 return -EACCES; 1035 } 1036 1037 return 0; 1038 } 1039 1040 static int check_ptr_alignment(struct bpf_verifier_env *env, 1041 const struct bpf_reg_state *reg, 1042 int off, int size) 1043 { 1044 bool strict = env->strict_alignment; 1045 const char *pointer_desc = ""; 1046 1047 switch (reg->type) { 1048 case PTR_TO_PACKET: 1049 case PTR_TO_PACKET_META: 1050 /* Special case, because of NET_IP_ALIGN. Given metadata sits 1051 * right in front, treat it the very same way. 1052 */ 1053 return check_pkt_ptr_alignment(env, reg, off, size, strict); 1054 case PTR_TO_MAP_VALUE: 1055 pointer_desc = "value "; 1056 break; 1057 case PTR_TO_CTX: 1058 pointer_desc = "context "; 1059 break; 1060 case PTR_TO_STACK: 1061 pointer_desc = "stack "; 1062 break; 1063 default: 1064 break; 1065 } 1066 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 1067 strict); 1068 } 1069 1070 /* check whether memory at (regno + off) is accessible for t = (read | write) 1071 * if t==write, value_regno is a register which value is stored into memory 1072 * if t==read, value_regno is a register which will receive the value from memory 1073 * if t==write && value_regno==-1, some unknown value is stored into memory 1074 * if t==read && value_regno==-1, don't care what we read from memory 1075 */ 1076 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, 1077 int bpf_size, enum bpf_access_type t, 1078 int value_regno) 1079 { 1080 struct bpf_verifier_state *state = env->cur_state; 1081 struct bpf_reg_state *regs = cur_regs(env); 1082 struct bpf_reg_state *reg = regs + regno; 1083 int size, err = 0; 1084 1085 size = bpf_size_to_bytes(bpf_size); 1086 if (size < 0) 1087 return size; 1088 1089 /* alignment checks will add in reg->off themselves */ 1090 err = check_ptr_alignment(env, reg, off, size); 1091 if (err) 1092 return err; 1093 1094 /* for access checks, reg->off is just part of off */ 1095 off += reg->off; 1096 1097 if (reg->type == PTR_TO_MAP_VALUE) { 1098 if (t == BPF_WRITE && value_regno >= 0 && 1099 is_pointer_value(env, value_regno)) { 1100 verbose(env, "R%d leaks addr into map\n", value_regno); 1101 return -EACCES; 1102 } 1103 1104 err = check_map_access(env, regno, off, size, false); 1105 if (!err && t == BPF_READ && value_regno >= 0) 1106 mark_reg_unknown(env, regs, value_regno); 1107 1108 } else if (reg->type == PTR_TO_CTX) { 1109 enum bpf_reg_type reg_type = SCALAR_VALUE; 1110 1111 if (t == BPF_WRITE && value_regno >= 0 && 1112 is_pointer_value(env, value_regno)) { 1113 verbose(env, "R%d leaks addr into ctx\n", value_regno); 1114 return -EACCES; 1115 } 1116 /* ctx accesses must be at a fixed offset, so that we can 1117 * determine what type of data were returned. 1118 */ 1119 if (reg->off) { 1120 verbose(env, 1121 "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", 1122 regno, reg->off, off - reg->off); 1123 return -EACCES; 1124 } 1125 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 1126 char tn_buf[48]; 1127 1128 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1129 verbose(env, 1130 "variable ctx access var_off=%s off=%d size=%d", 1131 tn_buf, off, size); 1132 return -EACCES; 1133 } 1134 err = check_ctx_access(env, insn_idx, off, size, t, ®_type); 1135 if (!err && t == BPF_READ && value_regno >= 0) { 1136 /* ctx access returns either a scalar, or a 1137 * PTR_TO_PACKET[_META,_END]. In the latter 1138 * case, we know the offset is zero. 1139 */ 1140 if (reg_type == SCALAR_VALUE) 1141 mark_reg_unknown(env, regs, value_regno); 1142 else 1143 mark_reg_known_zero(env, regs, 1144 value_regno); 1145 regs[value_regno].id = 0; 1146 regs[value_regno].off = 0; 1147 regs[value_regno].range = 0; 1148 regs[value_regno].type = reg_type; 1149 } 1150 1151 } else if (reg->type == PTR_TO_STACK) { 1152 /* stack accesses must be at a fixed offset, so that we can 1153 * determine what type of data were returned. 1154 * See check_stack_read(). 1155 */ 1156 if (!tnum_is_const(reg->var_off)) { 1157 char tn_buf[48]; 1158 1159 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1160 verbose(env, "variable stack access var_off=%s off=%d size=%d", 1161 tn_buf, off, size); 1162 return -EACCES; 1163 } 1164 off += reg->var_off.value; 1165 if (off >= 0 || off < -MAX_BPF_STACK) { 1166 verbose(env, "invalid stack off=%d size=%d\n", off, 1167 size); 1168 return -EACCES; 1169 } 1170 1171 if (env->prog->aux->stack_depth < -off) 1172 env->prog->aux->stack_depth = -off; 1173 1174 if (t == BPF_WRITE) 1175 err = check_stack_write(env, state, off, size, 1176 value_regno); 1177 else 1178 err = check_stack_read(env, state, off, size, 1179 value_regno); 1180 } else if (reg_is_pkt_pointer(reg)) { 1181 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 1182 verbose(env, "cannot write into packet\n"); 1183 return -EACCES; 1184 } 1185 if (t == BPF_WRITE && value_regno >= 0 && 1186 is_pointer_value(env, value_regno)) { 1187 verbose(env, "R%d leaks addr into packet\n", 1188 value_regno); 1189 return -EACCES; 1190 } 1191 err = check_packet_access(env, regno, off, size, false); 1192 if (!err && t == BPF_READ && value_regno >= 0) 1193 mark_reg_unknown(env, regs, value_regno); 1194 } else { 1195 verbose(env, "R%d invalid mem access '%s'\n", regno, 1196 reg_type_str[reg->type]); 1197 return -EACCES; 1198 } 1199 1200 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 1201 regs[value_regno].type == SCALAR_VALUE) { 1202 /* b/h/w load zero-extends, mark upper bits as known 0 */ 1203 regs[value_regno].var_off = 1204 tnum_cast(regs[value_regno].var_off, size); 1205 __update_reg_bounds(®s[value_regno]); 1206 } 1207 return err; 1208 } 1209 1210 static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 1211 { 1212 int err; 1213 1214 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 1215 insn->imm != 0) { 1216 verbose(env, "BPF_XADD uses reserved fields\n"); 1217 return -EINVAL; 1218 } 1219 1220 /* check src1 operand */ 1221 err = check_reg_arg(env, insn->src_reg, SRC_OP); 1222 if (err) 1223 return err; 1224 1225 /* check src2 operand */ 1226 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 1227 if (err) 1228 return err; 1229 1230 if (is_pointer_value(env, insn->src_reg)) { 1231 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 1232 return -EACCES; 1233 } 1234 1235 /* check whether atomic_add can read the memory */ 1236 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1237 BPF_SIZE(insn->code), BPF_READ, -1); 1238 if (err) 1239 return err; 1240 1241 /* check whether atomic_add can write into the same memory */ 1242 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 1243 BPF_SIZE(insn->code), BPF_WRITE, -1); 1244 } 1245 1246 /* Does this register contain a constant zero? */ 1247 static bool register_is_null(struct bpf_reg_state reg) 1248 { 1249 return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0); 1250 } 1251 1252 /* when register 'regno' is passed into function that will read 'access_size' 1253 * bytes from that pointer, make sure that it's within stack boundary 1254 * and all elements of stack are initialized. 1255 * Unlike most pointer bounds-checking functions, this one doesn't take an 1256 * 'off' argument, so it has to add in reg->off itself. 1257 */ 1258 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 1259 int access_size, bool zero_size_allowed, 1260 struct bpf_call_arg_meta *meta) 1261 { 1262 struct bpf_verifier_state *state = env->cur_state; 1263 struct bpf_reg_state *regs = state->regs; 1264 int off, i, slot, spi; 1265 1266 if (regs[regno].type != PTR_TO_STACK) { 1267 /* Allow zero-byte read from NULL, regardless of pointer type */ 1268 if (zero_size_allowed && access_size == 0 && 1269 register_is_null(regs[regno])) 1270 return 0; 1271 1272 verbose(env, "R%d type=%s expected=%s\n", regno, 1273 reg_type_str[regs[regno].type], 1274 reg_type_str[PTR_TO_STACK]); 1275 return -EACCES; 1276 } 1277 1278 /* Only allow fixed-offset stack reads */ 1279 if (!tnum_is_const(regs[regno].var_off)) { 1280 char tn_buf[48]; 1281 1282 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); 1283 verbose(env, "invalid variable stack read R%d var_off=%s\n", 1284 regno, tn_buf); 1285 } 1286 off = regs[regno].off + regs[regno].var_off.value; 1287 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1288 access_size < 0 || (access_size == 0 && !zero_size_allowed)) { 1289 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", 1290 regno, off, access_size); 1291 return -EACCES; 1292 } 1293 1294 if (env->prog->aux->stack_depth < -off) 1295 env->prog->aux->stack_depth = -off; 1296 1297 if (meta && meta->raw_mode) { 1298 meta->access_size = access_size; 1299 meta->regno = regno; 1300 return 0; 1301 } 1302 1303 for (i = 0; i < access_size; i++) { 1304 slot = -(off + i) - 1; 1305 spi = slot / BPF_REG_SIZE; 1306 if (state->allocated_stack <= slot || 1307 state->stack[spi].slot_type[slot % BPF_REG_SIZE] != 1308 STACK_MISC) { 1309 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", 1310 off, i, access_size); 1311 return -EACCES; 1312 } 1313 } 1314 return 0; 1315 } 1316 1317 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 1318 int access_size, bool zero_size_allowed, 1319 struct bpf_call_arg_meta *meta) 1320 { 1321 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 1322 1323 switch (reg->type) { 1324 case PTR_TO_PACKET: 1325 case PTR_TO_PACKET_META: 1326 return check_packet_access(env, regno, reg->off, access_size, 1327 zero_size_allowed); 1328 case PTR_TO_MAP_VALUE: 1329 return check_map_access(env, regno, reg->off, access_size, 1330 zero_size_allowed); 1331 default: /* scalar_value|ptr_to_stack or invalid ptr */ 1332 return check_stack_boundary(env, regno, access_size, 1333 zero_size_allowed, meta); 1334 } 1335 } 1336 1337 static int check_func_arg(struct bpf_verifier_env *env, u32 regno, 1338 enum bpf_arg_type arg_type, 1339 struct bpf_call_arg_meta *meta) 1340 { 1341 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 1342 enum bpf_reg_type expected_type, type = reg->type; 1343 int err = 0; 1344 1345 if (arg_type == ARG_DONTCARE) 1346 return 0; 1347 1348 err = check_reg_arg(env, regno, SRC_OP); 1349 if (err) 1350 return err; 1351 1352 if (arg_type == ARG_ANYTHING) { 1353 if (is_pointer_value(env, regno)) { 1354 verbose(env, "R%d leaks addr into helper function\n", 1355 regno); 1356 return -EACCES; 1357 } 1358 return 0; 1359 } 1360 1361 if (type_is_pkt_pointer(type) && 1362 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 1363 verbose(env, "helper access to the packet is not allowed\n"); 1364 return -EACCES; 1365 } 1366 1367 if (arg_type == ARG_PTR_TO_MAP_KEY || 1368 arg_type == ARG_PTR_TO_MAP_VALUE) { 1369 expected_type = PTR_TO_STACK; 1370 if (!type_is_pkt_pointer(type) && 1371 type != expected_type) 1372 goto err_type; 1373 } else if (arg_type == ARG_CONST_SIZE || 1374 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1375 expected_type = SCALAR_VALUE; 1376 if (type != expected_type) 1377 goto err_type; 1378 } else if (arg_type == ARG_CONST_MAP_PTR) { 1379 expected_type = CONST_PTR_TO_MAP; 1380 if (type != expected_type) 1381 goto err_type; 1382 } else if (arg_type == ARG_PTR_TO_CTX) { 1383 expected_type = PTR_TO_CTX; 1384 if (type != expected_type) 1385 goto err_type; 1386 } else if (arg_type == ARG_PTR_TO_MEM || 1387 arg_type == ARG_PTR_TO_MEM_OR_NULL || 1388 arg_type == ARG_PTR_TO_UNINIT_MEM) { 1389 expected_type = PTR_TO_STACK; 1390 /* One exception here. In case function allows for NULL to be 1391 * passed in as argument, it's a SCALAR_VALUE type. Final test 1392 * happens during stack boundary checking. 1393 */ 1394 if (register_is_null(*reg) && 1395 arg_type == ARG_PTR_TO_MEM_OR_NULL) 1396 /* final test in check_stack_boundary() */; 1397 else if (!type_is_pkt_pointer(type) && 1398 type != PTR_TO_MAP_VALUE && 1399 type != expected_type) 1400 goto err_type; 1401 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; 1402 } else { 1403 verbose(env, "unsupported arg_type %d\n", arg_type); 1404 return -EFAULT; 1405 } 1406 1407 if (arg_type == ARG_CONST_MAP_PTR) { 1408 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 1409 meta->map_ptr = reg->map_ptr; 1410 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 1411 /* bpf_map_xxx(..., map_ptr, ..., key) call: 1412 * check that [key, key + map->key_size) are within 1413 * stack limits and initialized 1414 */ 1415 if (!meta->map_ptr) { 1416 /* in function declaration map_ptr must come before 1417 * map_key, so that it's verified and known before 1418 * we have to check map_key here. Otherwise it means 1419 * that kernel subsystem misconfigured verifier 1420 */ 1421 verbose(env, "invalid map_ptr to access map->key\n"); 1422 return -EACCES; 1423 } 1424 if (type_is_pkt_pointer(type)) 1425 err = check_packet_access(env, regno, reg->off, 1426 meta->map_ptr->key_size, 1427 false); 1428 else 1429 err = check_stack_boundary(env, regno, 1430 meta->map_ptr->key_size, 1431 false, NULL); 1432 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { 1433 /* bpf_map_xxx(..., map_ptr, ..., value) call: 1434 * check [value, value + map->value_size) validity 1435 */ 1436 if (!meta->map_ptr) { 1437 /* kernel subsystem misconfigured verifier */ 1438 verbose(env, "invalid map_ptr to access map->value\n"); 1439 return -EACCES; 1440 } 1441 if (type_is_pkt_pointer(type)) 1442 err = check_packet_access(env, regno, reg->off, 1443 meta->map_ptr->value_size, 1444 false); 1445 else 1446 err = check_stack_boundary(env, regno, 1447 meta->map_ptr->value_size, 1448 false, NULL); 1449 } else if (arg_type == ARG_CONST_SIZE || 1450 arg_type == ARG_CONST_SIZE_OR_ZERO) { 1451 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 1452 1453 /* bpf_xxx(..., buf, len) call will access 'len' bytes 1454 * from stack pointer 'buf'. Check it 1455 * note: regno == len, regno - 1 == buf 1456 */ 1457 if (regno == 0) { 1458 /* kernel subsystem misconfigured verifier */ 1459 verbose(env, 1460 "ARG_CONST_SIZE cannot be first argument\n"); 1461 return -EACCES; 1462 } 1463 1464 /* The register is SCALAR_VALUE; the access check 1465 * happens using its boundaries. 1466 */ 1467 1468 if (!tnum_is_const(reg->var_off)) 1469 /* For unprivileged variable accesses, disable raw 1470 * mode so that the program is required to 1471 * initialize all the memory that the helper could 1472 * just partially fill up. 1473 */ 1474 meta = NULL; 1475 1476 if (reg->smin_value < 0) { 1477 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 1478 regno); 1479 return -EACCES; 1480 } 1481 1482 if (reg->umin_value == 0) { 1483 err = check_helper_mem_access(env, regno - 1, 0, 1484 zero_size_allowed, 1485 meta); 1486 if (err) 1487 return err; 1488 } 1489 1490 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 1491 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 1492 regno); 1493 return -EACCES; 1494 } 1495 err = check_helper_mem_access(env, regno - 1, 1496 reg->umax_value, 1497 zero_size_allowed, meta); 1498 } 1499 1500 return err; 1501 err_type: 1502 verbose(env, "R%d type=%s expected=%s\n", regno, 1503 reg_type_str[type], reg_type_str[expected_type]); 1504 return -EACCES; 1505 } 1506 1507 static int check_map_func_compatibility(struct bpf_verifier_env *env, 1508 struct bpf_map *map, int func_id) 1509 { 1510 if (!map) 1511 return 0; 1512 1513 /* We need a two way check, first is from map perspective ... */ 1514 switch (map->map_type) { 1515 case BPF_MAP_TYPE_PROG_ARRAY: 1516 if (func_id != BPF_FUNC_tail_call) 1517 goto error; 1518 break; 1519 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1520 if (func_id != BPF_FUNC_perf_event_read && 1521 func_id != BPF_FUNC_perf_event_output && 1522 func_id != BPF_FUNC_perf_event_read_value) 1523 goto error; 1524 break; 1525 case BPF_MAP_TYPE_STACK_TRACE: 1526 if (func_id != BPF_FUNC_get_stackid) 1527 goto error; 1528 break; 1529 case BPF_MAP_TYPE_CGROUP_ARRAY: 1530 if (func_id != BPF_FUNC_skb_under_cgroup && 1531 func_id != BPF_FUNC_current_task_under_cgroup) 1532 goto error; 1533 break; 1534 /* devmap returns a pointer to a live net_device ifindex that we cannot 1535 * allow to be modified from bpf side. So do not allow lookup elements 1536 * for now. 1537 */ 1538 case BPF_MAP_TYPE_DEVMAP: 1539 if (func_id != BPF_FUNC_redirect_map) 1540 goto error; 1541 break; 1542 /* Restrict bpf side of cpumap, open when use-cases appear */ 1543 case BPF_MAP_TYPE_CPUMAP: 1544 if (func_id != BPF_FUNC_redirect_map) 1545 goto error; 1546 break; 1547 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1548 case BPF_MAP_TYPE_HASH_OF_MAPS: 1549 if (func_id != BPF_FUNC_map_lookup_elem) 1550 goto error; 1551 break; 1552 case BPF_MAP_TYPE_SOCKMAP: 1553 if (func_id != BPF_FUNC_sk_redirect_map && 1554 func_id != BPF_FUNC_sock_map_update && 1555 func_id != BPF_FUNC_map_delete_elem) 1556 goto error; 1557 break; 1558 default: 1559 break; 1560 } 1561 1562 /* ... and second from the function itself. */ 1563 switch (func_id) { 1564 case BPF_FUNC_tail_call: 1565 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 1566 goto error; 1567 break; 1568 case BPF_FUNC_perf_event_read: 1569 case BPF_FUNC_perf_event_output: 1570 case BPF_FUNC_perf_event_read_value: 1571 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 1572 goto error; 1573 break; 1574 case BPF_FUNC_get_stackid: 1575 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1576 goto error; 1577 break; 1578 case BPF_FUNC_current_task_under_cgroup: 1579 case BPF_FUNC_skb_under_cgroup: 1580 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1581 goto error; 1582 break; 1583 case BPF_FUNC_redirect_map: 1584 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 1585 map->map_type != BPF_MAP_TYPE_CPUMAP) 1586 goto error; 1587 break; 1588 case BPF_FUNC_sk_redirect_map: 1589 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 1590 goto error; 1591 break; 1592 case BPF_FUNC_sock_map_update: 1593 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 1594 goto error; 1595 break; 1596 default: 1597 break; 1598 } 1599 1600 return 0; 1601 error: 1602 verbose(env, "cannot pass map_type %d into func %s#%d\n", 1603 map->map_type, func_id_name(func_id), func_id); 1604 return -EINVAL; 1605 } 1606 1607 static int check_raw_mode(const struct bpf_func_proto *fn) 1608 { 1609 int count = 0; 1610 1611 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 1612 count++; 1613 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 1614 count++; 1615 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 1616 count++; 1617 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 1618 count++; 1619 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 1620 count++; 1621 1622 return count > 1 ? -EINVAL : 0; 1623 } 1624 1625 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 1626 * are now invalid, so turn them into unknown SCALAR_VALUE. 1627 */ 1628 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 1629 { 1630 struct bpf_verifier_state *state = env->cur_state; 1631 struct bpf_reg_state *regs = state->regs, *reg; 1632 int i; 1633 1634 for (i = 0; i < MAX_BPF_REG; i++) 1635 if (reg_is_pkt_pointer_any(®s[i])) 1636 mark_reg_unknown(env, regs, i); 1637 1638 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 1639 if (state->stack[i].slot_type[0] != STACK_SPILL) 1640 continue; 1641 reg = &state->stack[i].spilled_ptr; 1642 if (reg_is_pkt_pointer_any(reg)) 1643 __mark_reg_unknown(reg); 1644 } 1645 } 1646 1647 static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 1648 { 1649 const struct bpf_func_proto *fn = NULL; 1650 struct bpf_reg_state *regs; 1651 struct bpf_call_arg_meta meta; 1652 bool changes_data; 1653 int i, err; 1654 1655 /* find function prototype */ 1656 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 1657 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 1658 func_id); 1659 return -EINVAL; 1660 } 1661 1662 if (env->ops->get_func_proto) 1663 fn = env->ops->get_func_proto(func_id); 1664 1665 if (!fn) { 1666 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 1667 func_id); 1668 return -EINVAL; 1669 } 1670 1671 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1672 if (!env->prog->gpl_compatible && fn->gpl_only) { 1673 verbose(env, "cannot call GPL only function from proprietary program\n"); 1674 return -EINVAL; 1675 } 1676 1677 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 1678 changes_data = bpf_helper_changes_pkt_data(fn->func); 1679 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 1680 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 1681 func_id_name(func_id), func_id); 1682 return -EINVAL; 1683 } 1684 1685 memset(&meta, 0, sizeof(meta)); 1686 meta.pkt_access = fn->pkt_access; 1687 1688 /* We only support one arg being in raw mode at the moment, which 1689 * is sufficient for the helper functions we have right now. 1690 */ 1691 err = check_raw_mode(fn); 1692 if (err) { 1693 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 1694 func_id_name(func_id), func_id); 1695 return err; 1696 } 1697 1698 /* check args */ 1699 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); 1700 if (err) 1701 return err; 1702 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1703 if (err) 1704 return err; 1705 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1706 if (err) 1707 return err; 1708 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); 1709 if (err) 1710 return err; 1711 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); 1712 if (err) 1713 return err; 1714 1715 /* Mark slots with STACK_MISC in case of raw mode, stack offset 1716 * is inferred from register state. 1717 */ 1718 for (i = 0; i < meta.access_size; i++) { 1719 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); 1720 if (err) 1721 return err; 1722 } 1723 1724 regs = cur_regs(env); 1725 /* reset caller saved regs */ 1726 for (i = 0; i < CALLER_SAVED_REGS; i++) { 1727 mark_reg_not_init(env, regs, caller_saved[i]); 1728 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 1729 } 1730 1731 /* update return register (already marked as written above) */ 1732 if (fn->ret_type == RET_INTEGER) { 1733 /* sets type to SCALAR_VALUE */ 1734 mark_reg_unknown(env, regs, BPF_REG_0); 1735 } else if (fn->ret_type == RET_VOID) { 1736 regs[BPF_REG_0].type = NOT_INIT; 1737 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 1738 struct bpf_insn_aux_data *insn_aux; 1739 1740 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 1741 /* There is no offset yet applied, variable or fixed */ 1742 mark_reg_known_zero(env, regs, BPF_REG_0); 1743 regs[BPF_REG_0].off = 0; 1744 /* remember map_ptr, so that check_map_access() 1745 * can check 'value_size' boundary of memory access 1746 * to map element returned from bpf_map_lookup_elem() 1747 */ 1748 if (meta.map_ptr == NULL) { 1749 verbose(env, 1750 "kernel subsystem misconfigured verifier\n"); 1751 return -EINVAL; 1752 } 1753 regs[BPF_REG_0].map_ptr = meta.map_ptr; 1754 regs[BPF_REG_0].id = ++env->id_gen; 1755 insn_aux = &env->insn_aux_data[insn_idx]; 1756 if (!insn_aux->map_ptr) 1757 insn_aux->map_ptr = meta.map_ptr; 1758 else if (insn_aux->map_ptr != meta.map_ptr) 1759 insn_aux->map_ptr = BPF_MAP_PTR_POISON; 1760 } else { 1761 verbose(env, "unknown return type %d of func %s#%d\n", 1762 fn->ret_type, func_id_name(func_id), func_id); 1763 return -EINVAL; 1764 } 1765 1766 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 1767 if (err) 1768 return err; 1769 1770 if (changes_data) 1771 clear_all_pkt_pointers(env); 1772 return 0; 1773 } 1774 1775 static void coerce_reg_to_32(struct bpf_reg_state *reg) 1776 { 1777 /* clear high 32 bits */ 1778 reg->var_off = tnum_cast(reg->var_off, 4); 1779 /* Update bounds */ 1780 __update_reg_bounds(reg); 1781 } 1782 1783 static bool signed_add_overflows(s64 a, s64 b) 1784 { 1785 /* Do the add in u64, where overflow is well-defined */ 1786 s64 res = (s64)((u64)a + (u64)b); 1787 1788 if (b < 0) 1789 return res > a; 1790 return res < a; 1791 } 1792 1793 static bool signed_sub_overflows(s64 a, s64 b) 1794 { 1795 /* Do the sub in u64, where overflow is well-defined */ 1796 s64 res = (s64)((u64)a - (u64)b); 1797 1798 if (b < 0) 1799 return res < a; 1800 return res > a; 1801 } 1802 1803 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 1804 * Caller should also handle BPF_MOV case separately. 1805 * If we return -EACCES, caller may want to try again treating pointer as a 1806 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 1807 */ 1808 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 1809 struct bpf_insn *insn, 1810 const struct bpf_reg_state *ptr_reg, 1811 const struct bpf_reg_state *off_reg) 1812 { 1813 struct bpf_reg_state *regs = cur_regs(env), *dst_reg; 1814 bool known = tnum_is_const(off_reg->var_off); 1815 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 1816 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 1817 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 1818 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 1819 u8 opcode = BPF_OP(insn->code); 1820 u32 dst = insn->dst_reg; 1821 1822 dst_reg = ®s[dst]; 1823 1824 if (WARN_ON_ONCE(known && (smin_val != smax_val))) { 1825 print_verifier_state(env, env->cur_state); 1826 verbose(env, 1827 "verifier internal error: known but bad sbounds\n"); 1828 return -EINVAL; 1829 } 1830 if (WARN_ON_ONCE(known && (umin_val != umax_val))) { 1831 print_verifier_state(env, env->cur_state); 1832 verbose(env, 1833 "verifier internal error: known but bad ubounds\n"); 1834 return -EINVAL; 1835 } 1836 1837 if (BPF_CLASS(insn->code) != BPF_ALU64) { 1838 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 1839 if (!env->allow_ptr_leaks) 1840 verbose(env, 1841 "R%d 32-bit pointer arithmetic prohibited\n", 1842 dst); 1843 return -EACCES; 1844 } 1845 1846 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 1847 if (!env->allow_ptr_leaks) 1848 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", 1849 dst); 1850 return -EACCES; 1851 } 1852 if (ptr_reg->type == CONST_PTR_TO_MAP) { 1853 if (!env->allow_ptr_leaks) 1854 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", 1855 dst); 1856 return -EACCES; 1857 } 1858 if (ptr_reg->type == PTR_TO_PACKET_END) { 1859 if (!env->allow_ptr_leaks) 1860 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", 1861 dst); 1862 return -EACCES; 1863 } 1864 1865 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 1866 * The id may be overwritten later if we create a new variable offset. 1867 */ 1868 dst_reg->type = ptr_reg->type; 1869 dst_reg->id = ptr_reg->id; 1870 1871 switch (opcode) { 1872 case BPF_ADD: 1873 /* We can take a fixed offset as long as it doesn't overflow 1874 * the s32 'off' field 1875 */ 1876 if (known && (ptr_reg->off + smin_val == 1877 (s64)(s32)(ptr_reg->off + smin_val))) { 1878 /* pointer += K. Accumulate it into fixed offset */ 1879 dst_reg->smin_value = smin_ptr; 1880 dst_reg->smax_value = smax_ptr; 1881 dst_reg->umin_value = umin_ptr; 1882 dst_reg->umax_value = umax_ptr; 1883 dst_reg->var_off = ptr_reg->var_off; 1884 dst_reg->off = ptr_reg->off + smin_val; 1885 dst_reg->range = ptr_reg->range; 1886 break; 1887 } 1888 /* A new variable offset is created. Note that off_reg->off 1889 * == 0, since it's a scalar. 1890 * dst_reg gets the pointer type and since some positive 1891 * integer value was added to the pointer, give it a new 'id' 1892 * if it's a PTR_TO_PACKET. 1893 * this creates a new 'base' pointer, off_reg (variable) gets 1894 * added into the variable offset, and we copy the fixed offset 1895 * from ptr_reg. 1896 */ 1897 if (signed_add_overflows(smin_ptr, smin_val) || 1898 signed_add_overflows(smax_ptr, smax_val)) { 1899 dst_reg->smin_value = S64_MIN; 1900 dst_reg->smax_value = S64_MAX; 1901 } else { 1902 dst_reg->smin_value = smin_ptr + smin_val; 1903 dst_reg->smax_value = smax_ptr + smax_val; 1904 } 1905 if (umin_ptr + umin_val < umin_ptr || 1906 umax_ptr + umax_val < umax_ptr) { 1907 dst_reg->umin_value = 0; 1908 dst_reg->umax_value = U64_MAX; 1909 } else { 1910 dst_reg->umin_value = umin_ptr + umin_val; 1911 dst_reg->umax_value = umax_ptr + umax_val; 1912 } 1913 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 1914 dst_reg->off = ptr_reg->off; 1915 if (reg_is_pkt_pointer(ptr_reg)) { 1916 dst_reg->id = ++env->id_gen; 1917 /* something was added to pkt_ptr, set range to zero */ 1918 dst_reg->range = 0; 1919 } 1920 break; 1921 case BPF_SUB: 1922 if (dst_reg == off_reg) { 1923 /* scalar -= pointer. Creates an unknown scalar */ 1924 if (!env->allow_ptr_leaks) 1925 verbose(env, "R%d tried to subtract pointer from scalar\n", 1926 dst); 1927 return -EACCES; 1928 } 1929 /* We don't allow subtraction from FP, because (according to 1930 * test_verifier.c test "invalid fp arithmetic", JITs might not 1931 * be able to deal with it. 1932 */ 1933 if (ptr_reg->type == PTR_TO_STACK) { 1934 if (!env->allow_ptr_leaks) 1935 verbose(env, "R%d subtraction from stack pointer prohibited\n", 1936 dst); 1937 return -EACCES; 1938 } 1939 if (known && (ptr_reg->off - smin_val == 1940 (s64)(s32)(ptr_reg->off - smin_val))) { 1941 /* pointer -= K. Subtract it from fixed offset */ 1942 dst_reg->smin_value = smin_ptr; 1943 dst_reg->smax_value = smax_ptr; 1944 dst_reg->umin_value = umin_ptr; 1945 dst_reg->umax_value = umax_ptr; 1946 dst_reg->var_off = ptr_reg->var_off; 1947 dst_reg->id = ptr_reg->id; 1948 dst_reg->off = ptr_reg->off - smin_val; 1949 dst_reg->range = ptr_reg->range; 1950 break; 1951 } 1952 /* A new variable offset is created. If the subtrahend is known 1953 * nonnegative, then any reg->range we had before is still good. 1954 */ 1955 if (signed_sub_overflows(smin_ptr, smax_val) || 1956 signed_sub_overflows(smax_ptr, smin_val)) { 1957 /* Overflow possible, we know nothing */ 1958 dst_reg->smin_value = S64_MIN; 1959 dst_reg->smax_value = S64_MAX; 1960 } else { 1961 dst_reg->smin_value = smin_ptr - smax_val; 1962 dst_reg->smax_value = smax_ptr - smin_val; 1963 } 1964 if (umin_ptr < umax_val) { 1965 /* Overflow possible, we know nothing */ 1966 dst_reg->umin_value = 0; 1967 dst_reg->umax_value = U64_MAX; 1968 } else { 1969 /* Cannot overflow (as long as bounds are consistent) */ 1970 dst_reg->umin_value = umin_ptr - umax_val; 1971 dst_reg->umax_value = umax_ptr - umin_val; 1972 } 1973 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 1974 dst_reg->off = ptr_reg->off; 1975 if (reg_is_pkt_pointer(ptr_reg)) { 1976 dst_reg->id = ++env->id_gen; 1977 /* something was added to pkt_ptr, set range to zero */ 1978 if (smin_val < 0) 1979 dst_reg->range = 0; 1980 } 1981 break; 1982 case BPF_AND: 1983 case BPF_OR: 1984 case BPF_XOR: 1985 /* bitwise ops on pointers are troublesome, prohibit for now. 1986 * (However, in principle we could allow some cases, e.g. 1987 * ptr &= ~3 which would reduce min_value by 3.) 1988 */ 1989 if (!env->allow_ptr_leaks) 1990 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 1991 dst, bpf_alu_string[opcode >> 4]); 1992 return -EACCES; 1993 default: 1994 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 1995 if (!env->allow_ptr_leaks) 1996 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 1997 dst, bpf_alu_string[opcode >> 4]); 1998 return -EACCES; 1999 } 2000 2001 __update_reg_bounds(dst_reg); 2002 __reg_deduce_bounds(dst_reg); 2003 __reg_bound_offset(dst_reg); 2004 return 0; 2005 } 2006 2007 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 2008 struct bpf_insn *insn, 2009 struct bpf_reg_state *dst_reg, 2010 struct bpf_reg_state src_reg) 2011 { 2012 struct bpf_reg_state *regs = cur_regs(env); 2013 u8 opcode = BPF_OP(insn->code); 2014 bool src_known, dst_known; 2015 s64 smin_val, smax_val; 2016 u64 umin_val, umax_val; 2017 2018 if (BPF_CLASS(insn->code) != BPF_ALU64) { 2019 /* 32-bit ALU ops are (32,32)->64 */ 2020 coerce_reg_to_32(dst_reg); 2021 coerce_reg_to_32(&src_reg); 2022 } 2023 smin_val = src_reg.smin_value; 2024 smax_val = src_reg.smax_value; 2025 umin_val = src_reg.umin_value; 2026 umax_val = src_reg.umax_value; 2027 src_known = tnum_is_const(src_reg.var_off); 2028 dst_known = tnum_is_const(dst_reg->var_off); 2029 2030 switch (opcode) { 2031 case BPF_ADD: 2032 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 2033 signed_add_overflows(dst_reg->smax_value, smax_val)) { 2034 dst_reg->smin_value = S64_MIN; 2035 dst_reg->smax_value = S64_MAX; 2036 } else { 2037 dst_reg->smin_value += smin_val; 2038 dst_reg->smax_value += smax_val; 2039 } 2040 if (dst_reg->umin_value + umin_val < umin_val || 2041 dst_reg->umax_value + umax_val < umax_val) { 2042 dst_reg->umin_value = 0; 2043 dst_reg->umax_value = U64_MAX; 2044 } else { 2045 dst_reg->umin_value += umin_val; 2046 dst_reg->umax_value += umax_val; 2047 } 2048 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 2049 break; 2050 case BPF_SUB: 2051 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 2052 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 2053 /* Overflow possible, we know nothing */ 2054 dst_reg->smin_value = S64_MIN; 2055 dst_reg->smax_value = S64_MAX; 2056 } else { 2057 dst_reg->smin_value -= smax_val; 2058 dst_reg->smax_value -= smin_val; 2059 } 2060 if (dst_reg->umin_value < umax_val) { 2061 /* Overflow possible, we know nothing */ 2062 dst_reg->umin_value = 0; 2063 dst_reg->umax_value = U64_MAX; 2064 } else { 2065 /* Cannot overflow (as long as bounds are consistent) */ 2066 dst_reg->umin_value -= umax_val; 2067 dst_reg->umax_value -= umin_val; 2068 } 2069 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 2070 break; 2071 case BPF_MUL: 2072 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 2073 if (smin_val < 0 || dst_reg->smin_value < 0) { 2074 /* Ain't nobody got time to multiply that sign */ 2075 __mark_reg_unbounded(dst_reg); 2076 __update_reg_bounds(dst_reg); 2077 break; 2078 } 2079 /* Both values are positive, so we can work with unsigned and 2080 * copy the result to signed (unless it exceeds S64_MAX). 2081 */ 2082 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 2083 /* Potential overflow, we know nothing */ 2084 __mark_reg_unbounded(dst_reg); 2085 /* (except what we can learn from the var_off) */ 2086 __update_reg_bounds(dst_reg); 2087 break; 2088 } 2089 dst_reg->umin_value *= umin_val; 2090 dst_reg->umax_value *= umax_val; 2091 if (dst_reg->umax_value > S64_MAX) { 2092 /* Overflow possible, we know nothing */ 2093 dst_reg->smin_value = S64_MIN; 2094 dst_reg->smax_value = S64_MAX; 2095 } else { 2096 dst_reg->smin_value = dst_reg->umin_value; 2097 dst_reg->smax_value = dst_reg->umax_value; 2098 } 2099 break; 2100 case BPF_AND: 2101 if (src_known && dst_known) { 2102 __mark_reg_known(dst_reg, dst_reg->var_off.value & 2103 src_reg.var_off.value); 2104 break; 2105 } 2106 /* We get our minimum from the var_off, since that's inherently 2107 * bitwise. Our maximum is the minimum of the operands' maxima. 2108 */ 2109 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 2110 dst_reg->umin_value = dst_reg->var_off.value; 2111 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 2112 if (dst_reg->smin_value < 0 || smin_val < 0) { 2113 /* Lose signed bounds when ANDing negative numbers, 2114 * ain't nobody got time for that. 2115 */ 2116 dst_reg->smin_value = S64_MIN; 2117 dst_reg->smax_value = S64_MAX; 2118 } else { 2119 /* ANDing two positives gives a positive, so safe to 2120 * cast result into s64. 2121 */ 2122 dst_reg->smin_value = dst_reg->umin_value; 2123 dst_reg->smax_value = dst_reg->umax_value; 2124 } 2125 /* We may learn something more from the var_off */ 2126 __update_reg_bounds(dst_reg); 2127 break; 2128 case BPF_OR: 2129 if (src_known && dst_known) { 2130 __mark_reg_known(dst_reg, dst_reg->var_off.value | 2131 src_reg.var_off.value); 2132 break; 2133 } 2134 /* We get our maximum from the var_off, and our minimum is the 2135 * maximum of the operands' minima 2136 */ 2137 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 2138 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 2139 dst_reg->umax_value = dst_reg->var_off.value | 2140 dst_reg->var_off.mask; 2141 if (dst_reg->smin_value < 0 || smin_val < 0) { 2142 /* Lose signed bounds when ORing negative numbers, 2143 * ain't nobody got time for that. 2144 */ 2145 dst_reg->smin_value = S64_MIN; 2146 dst_reg->smax_value = S64_MAX; 2147 } else { 2148 /* ORing two positives gives a positive, so safe to 2149 * cast result into s64. 2150 */ 2151 dst_reg->smin_value = dst_reg->umin_value; 2152 dst_reg->smax_value = dst_reg->umax_value; 2153 } 2154 /* We may learn something more from the var_off */ 2155 __update_reg_bounds(dst_reg); 2156 break; 2157 case BPF_LSH: 2158 if (umax_val > 63) { 2159 /* Shifts greater than 63 are undefined. This includes 2160 * shifts by a negative number. 2161 */ 2162 mark_reg_unknown(env, regs, insn->dst_reg); 2163 break; 2164 } 2165 /* We lose all sign bit information (except what we can pick 2166 * up from var_off) 2167 */ 2168 dst_reg->smin_value = S64_MIN; 2169 dst_reg->smax_value = S64_MAX; 2170 /* If we might shift our top bit out, then we know nothing */ 2171 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 2172 dst_reg->umin_value = 0; 2173 dst_reg->umax_value = U64_MAX; 2174 } else { 2175 dst_reg->umin_value <<= umin_val; 2176 dst_reg->umax_value <<= umax_val; 2177 } 2178 if (src_known) 2179 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 2180 else 2181 dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); 2182 /* We may learn something more from the var_off */ 2183 __update_reg_bounds(dst_reg); 2184 break; 2185 case BPF_RSH: 2186 if (umax_val > 63) { 2187 /* Shifts greater than 63 are undefined. This includes 2188 * shifts by a negative number. 2189 */ 2190 mark_reg_unknown(env, regs, insn->dst_reg); 2191 break; 2192 } 2193 /* BPF_RSH is an unsigned shift, so make the appropriate casts */ 2194 if (dst_reg->smin_value < 0) { 2195 if (umin_val) { 2196 /* Sign bit will be cleared */ 2197 dst_reg->smin_value = 0; 2198 } else { 2199 /* Lost sign bit information */ 2200 dst_reg->smin_value = S64_MIN; 2201 dst_reg->smax_value = S64_MAX; 2202 } 2203 } else { 2204 dst_reg->smin_value = 2205 (u64)(dst_reg->smin_value) >> umax_val; 2206 } 2207 if (src_known) 2208 dst_reg->var_off = tnum_rshift(dst_reg->var_off, 2209 umin_val); 2210 else 2211 dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); 2212 dst_reg->umin_value >>= umax_val; 2213 dst_reg->umax_value >>= umin_val; 2214 /* We may learn something more from the var_off */ 2215 __update_reg_bounds(dst_reg); 2216 break; 2217 default: 2218 mark_reg_unknown(env, regs, insn->dst_reg); 2219 break; 2220 } 2221 2222 __reg_deduce_bounds(dst_reg); 2223 __reg_bound_offset(dst_reg); 2224 return 0; 2225 } 2226 2227 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 2228 * and var_off. 2229 */ 2230 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 2231 struct bpf_insn *insn) 2232 { 2233 struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; 2234 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 2235 u8 opcode = BPF_OP(insn->code); 2236 int rc; 2237 2238 dst_reg = ®s[insn->dst_reg]; 2239 src_reg = NULL; 2240 if (dst_reg->type != SCALAR_VALUE) 2241 ptr_reg = dst_reg; 2242 if (BPF_SRC(insn->code) == BPF_X) { 2243 src_reg = ®s[insn->src_reg]; 2244 if (src_reg->type != SCALAR_VALUE) { 2245 if (dst_reg->type != SCALAR_VALUE) { 2246 /* Combining two pointers by any ALU op yields 2247 * an arbitrary scalar. 2248 */ 2249 if (!env->allow_ptr_leaks) { 2250 verbose(env, "R%d pointer %s pointer prohibited\n", 2251 insn->dst_reg, 2252 bpf_alu_string[opcode >> 4]); 2253 return -EACCES; 2254 } 2255 mark_reg_unknown(env, regs, insn->dst_reg); 2256 return 0; 2257 } else { 2258 /* scalar += pointer 2259 * This is legal, but we have to reverse our 2260 * src/dest handling in computing the range 2261 */ 2262 rc = adjust_ptr_min_max_vals(env, insn, 2263 src_reg, dst_reg); 2264 if (rc == -EACCES && env->allow_ptr_leaks) { 2265 /* scalar += unknown scalar */ 2266 __mark_reg_unknown(&off_reg); 2267 return adjust_scalar_min_max_vals( 2268 env, insn, 2269 dst_reg, off_reg); 2270 } 2271 return rc; 2272 } 2273 } else if (ptr_reg) { 2274 /* pointer += scalar */ 2275 rc = adjust_ptr_min_max_vals(env, insn, 2276 dst_reg, src_reg); 2277 if (rc == -EACCES && env->allow_ptr_leaks) { 2278 /* unknown scalar += scalar */ 2279 __mark_reg_unknown(dst_reg); 2280 return adjust_scalar_min_max_vals( 2281 env, insn, dst_reg, *src_reg); 2282 } 2283 return rc; 2284 } 2285 } else { 2286 /* Pretend the src is a reg with a known value, since we only 2287 * need to be able to read from this state. 2288 */ 2289 off_reg.type = SCALAR_VALUE; 2290 __mark_reg_known(&off_reg, insn->imm); 2291 src_reg = &off_reg; 2292 if (ptr_reg) { /* pointer += K */ 2293 rc = adjust_ptr_min_max_vals(env, insn, 2294 ptr_reg, src_reg); 2295 if (rc == -EACCES && env->allow_ptr_leaks) { 2296 /* unknown scalar += K */ 2297 __mark_reg_unknown(dst_reg); 2298 return adjust_scalar_min_max_vals( 2299 env, insn, dst_reg, off_reg); 2300 } 2301 return rc; 2302 } 2303 } 2304 2305 /* Got here implies adding two SCALAR_VALUEs */ 2306 if (WARN_ON_ONCE(ptr_reg)) { 2307 print_verifier_state(env, env->cur_state); 2308 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 2309 return -EINVAL; 2310 } 2311 if (WARN_ON(!src_reg)) { 2312 print_verifier_state(env, env->cur_state); 2313 verbose(env, "verifier internal error: no src_reg\n"); 2314 return -EINVAL; 2315 } 2316 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 2317 } 2318 2319 /* check validity of 32-bit and 64-bit arithmetic operations */ 2320 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 2321 { 2322 struct bpf_reg_state *regs = cur_regs(env); 2323 u8 opcode = BPF_OP(insn->code); 2324 int err; 2325 2326 if (opcode == BPF_END || opcode == BPF_NEG) { 2327 if (opcode == BPF_NEG) { 2328 if (BPF_SRC(insn->code) != 0 || 2329 insn->src_reg != BPF_REG_0 || 2330 insn->off != 0 || insn->imm != 0) { 2331 verbose(env, "BPF_NEG uses reserved fields\n"); 2332 return -EINVAL; 2333 } 2334 } else { 2335 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 2336 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 2337 BPF_CLASS(insn->code) == BPF_ALU64) { 2338 verbose(env, "BPF_END uses reserved fields\n"); 2339 return -EINVAL; 2340 } 2341 } 2342 2343 /* check src operand */ 2344 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 2345 if (err) 2346 return err; 2347 2348 if (is_pointer_value(env, insn->dst_reg)) { 2349 verbose(env, "R%d pointer arithmetic prohibited\n", 2350 insn->dst_reg); 2351 return -EACCES; 2352 } 2353 2354 /* check dest operand */ 2355 err = check_reg_arg(env, insn->dst_reg, DST_OP); 2356 if (err) 2357 return err; 2358 2359 } else if (opcode == BPF_MOV) { 2360 2361 if (BPF_SRC(insn->code) == BPF_X) { 2362 if (insn->imm != 0 || insn->off != 0) { 2363 verbose(env, "BPF_MOV uses reserved fields\n"); 2364 return -EINVAL; 2365 } 2366 2367 /* check src operand */ 2368 err = check_reg_arg(env, insn->src_reg, SRC_OP); 2369 if (err) 2370 return err; 2371 } else { 2372 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 2373 verbose(env, "BPF_MOV uses reserved fields\n"); 2374 return -EINVAL; 2375 } 2376 } 2377 2378 /* check dest operand */ 2379 err = check_reg_arg(env, insn->dst_reg, DST_OP); 2380 if (err) 2381 return err; 2382 2383 if (BPF_SRC(insn->code) == BPF_X) { 2384 if (BPF_CLASS(insn->code) == BPF_ALU64) { 2385 /* case: R1 = R2 2386 * copy register state to dest reg 2387 */ 2388 regs[insn->dst_reg] = regs[insn->src_reg]; 2389 regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; 2390 } else { 2391 /* R1 = (u32) R2 */ 2392 if (is_pointer_value(env, insn->src_reg)) { 2393 verbose(env, 2394 "R%d partial copy of pointer\n", 2395 insn->src_reg); 2396 return -EACCES; 2397 } 2398 mark_reg_unknown(env, regs, insn->dst_reg); 2399 /* high 32 bits are known zero. */ 2400 regs[insn->dst_reg].var_off = tnum_cast( 2401 regs[insn->dst_reg].var_off, 4); 2402 __update_reg_bounds(®s[insn->dst_reg]); 2403 } 2404 } else { 2405 /* case: R = imm 2406 * remember the value we stored into this reg 2407 */ 2408 regs[insn->dst_reg].type = SCALAR_VALUE; 2409 __mark_reg_known(regs + insn->dst_reg, insn->imm); 2410 } 2411 2412 } else if (opcode > BPF_END) { 2413 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 2414 return -EINVAL; 2415 2416 } else { /* all other ALU ops: and, sub, xor, add, ... */ 2417 2418 if (BPF_SRC(insn->code) == BPF_X) { 2419 if (insn->imm != 0 || insn->off != 0) { 2420 verbose(env, "BPF_ALU uses reserved fields\n"); 2421 return -EINVAL; 2422 } 2423 /* check src1 operand */ 2424 err = check_reg_arg(env, insn->src_reg, SRC_OP); 2425 if (err) 2426 return err; 2427 } else { 2428 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 2429 verbose(env, "BPF_ALU uses reserved fields\n"); 2430 return -EINVAL; 2431 } 2432 } 2433 2434 /* check src2 operand */ 2435 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 2436 if (err) 2437 return err; 2438 2439 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 2440 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 2441 verbose(env, "div by zero\n"); 2442 return -EINVAL; 2443 } 2444 2445 if ((opcode == BPF_LSH || opcode == BPF_RSH || 2446 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 2447 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 2448 2449 if (insn->imm < 0 || insn->imm >= size) { 2450 verbose(env, "invalid shift %d\n", insn->imm); 2451 return -EINVAL; 2452 } 2453 } 2454 2455 /* check dest operand */ 2456 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 2457 if (err) 2458 return err; 2459 2460 return adjust_reg_min_max_vals(env, insn); 2461 } 2462 2463 return 0; 2464 } 2465 2466 static void find_good_pkt_pointers(struct bpf_verifier_state *state, 2467 struct bpf_reg_state *dst_reg, 2468 enum bpf_reg_type type, 2469 bool range_right_open) 2470 { 2471 struct bpf_reg_state *regs = state->regs, *reg; 2472 u16 new_range; 2473 int i; 2474 2475 if (dst_reg->off < 0 || 2476 (dst_reg->off == 0 && range_right_open)) 2477 /* This doesn't give us any range */ 2478 return; 2479 2480 if (dst_reg->umax_value > MAX_PACKET_OFF || 2481 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 2482 /* Risk of overflow. For instance, ptr + (1<<63) may be less 2483 * than pkt_end, but that's because it's also less than pkt. 2484 */ 2485 return; 2486 2487 new_range = dst_reg->off; 2488 if (range_right_open) 2489 new_range--; 2490 2491 /* Examples for register markings: 2492 * 2493 * pkt_data in dst register: 2494 * 2495 * r2 = r3; 2496 * r2 += 8; 2497 * if (r2 > pkt_end) goto <handle exception> 2498 * <access okay> 2499 * 2500 * r2 = r3; 2501 * r2 += 8; 2502 * if (r2 < pkt_end) goto <access okay> 2503 * <handle exception> 2504 * 2505 * Where: 2506 * r2 == dst_reg, pkt_end == src_reg 2507 * r2=pkt(id=n,off=8,r=0) 2508 * r3=pkt(id=n,off=0,r=0) 2509 * 2510 * pkt_data in src register: 2511 * 2512 * r2 = r3; 2513 * r2 += 8; 2514 * if (pkt_end >= r2) goto <access okay> 2515 * <handle exception> 2516 * 2517 * r2 = r3; 2518 * r2 += 8; 2519 * if (pkt_end <= r2) goto <handle exception> 2520 * <access okay> 2521 * 2522 * Where: 2523 * pkt_end == dst_reg, r2 == src_reg 2524 * r2=pkt(id=n,off=8,r=0) 2525 * r3=pkt(id=n,off=0,r=0) 2526 * 2527 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 2528 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 2529 * and [r3, r3 + 8-1) respectively is safe to access depending on 2530 * the check. 2531 */ 2532 2533 /* If our ids match, then we must have the same max_value. And we 2534 * don't care about the other reg's fixed offset, since if it's too big 2535 * the range won't allow anything. 2536 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 2537 */ 2538 for (i = 0; i < MAX_BPF_REG; i++) 2539 if (regs[i].type == type && regs[i].id == dst_reg->id) 2540 /* keep the maximum range already checked */ 2541 regs[i].range = max(regs[i].range, new_range); 2542 2543 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 2544 if (state->stack[i].slot_type[0] != STACK_SPILL) 2545 continue; 2546 reg = &state->stack[i].spilled_ptr; 2547 if (reg->type == type && reg->id == dst_reg->id) 2548 reg->range = max(reg->range, new_range); 2549 } 2550 } 2551 2552 /* Adjusts the register min/max values in the case that the dst_reg is the 2553 * variable register that we are working on, and src_reg is a constant or we're 2554 * simply doing a BPF_K check. 2555 * In JEQ/JNE cases we also adjust the var_off values. 2556 */ 2557 static void reg_set_min_max(struct bpf_reg_state *true_reg, 2558 struct bpf_reg_state *false_reg, u64 val, 2559 u8 opcode) 2560 { 2561 /* If the dst_reg is a pointer, we can't learn anything about its 2562 * variable offset from the compare (unless src_reg were a pointer into 2563 * the same object, but we don't bother with that. 2564 * Since false_reg and true_reg have the same type by construction, we 2565 * only need to check one of them for pointerness. 2566 */ 2567 if (__is_pointer_value(false, false_reg)) 2568 return; 2569 2570 switch (opcode) { 2571 case BPF_JEQ: 2572 /* If this is false then we know nothing Jon Snow, but if it is 2573 * true then we know for sure. 2574 */ 2575 __mark_reg_known(true_reg, val); 2576 break; 2577 case BPF_JNE: 2578 /* If this is true we know nothing Jon Snow, but if it is false 2579 * we know the value for sure; 2580 */ 2581 __mark_reg_known(false_reg, val); 2582 break; 2583 case BPF_JGT: 2584 false_reg->umax_value = min(false_reg->umax_value, val); 2585 true_reg->umin_value = max(true_reg->umin_value, val + 1); 2586 break; 2587 case BPF_JSGT: 2588 false_reg->smax_value = min_t(s64, false_reg->smax_value, val); 2589 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); 2590 break; 2591 case BPF_JLT: 2592 false_reg->umin_value = max(false_reg->umin_value, val); 2593 true_reg->umax_value = min(true_reg->umax_value, val - 1); 2594 break; 2595 case BPF_JSLT: 2596 false_reg->smin_value = max_t(s64, false_reg->smin_value, val); 2597 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); 2598 break; 2599 case BPF_JGE: 2600 false_reg->umax_value = min(false_reg->umax_value, val - 1); 2601 true_reg->umin_value = max(true_reg->umin_value, val); 2602 break; 2603 case BPF_JSGE: 2604 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); 2605 true_reg->smin_value = max_t(s64, true_reg->smin_value, val); 2606 break; 2607 case BPF_JLE: 2608 false_reg->umin_value = max(false_reg->umin_value, val + 1); 2609 true_reg->umax_value = min(true_reg->umax_value, val); 2610 break; 2611 case BPF_JSLE: 2612 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); 2613 true_reg->smax_value = min_t(s64, true_reg->smax_value, val); 2614 break; 2615 default: 2616 break; 2617 } 2618 2619 __reg_deduce_bounds(false_reg); 2620 __reg_deduce_bounds(true_reg); 2621 /* We might have learned some bits from the bounds. */ 2622 __reg_bound_offset(false_reg); 2623 __reg_bound_offset(true_reg); 2624 /* Intersecting with the old var_off might have improved our bounds 2625 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2626 * then new var_off is (0; 0x7f...fc) which improves our umax. 2627 */ 2628 __update_reg_bounds(false_reg); 2629 __update_reg_bounds(true_reg); 2630 } 2631 2632 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 2633 * the variable reg. 2634 */ 2635 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 2636 struct bpf_reg_state *false_reg, u64 val, 2637 u8 opcode) 2638 { 2639 if (__is_pointer_value(false, false_reg)) 2640 return; 2641 2642 switch (opcode) { 2643 case BPF_JEQ: 2644 /* If this is false then we know nothing Jon Snow, but if it is 2645 * true then we know for sure. 2646 */ 2647 __mark_reg_known(true_reg, val); 2648 break; 2649 case BPF_JNE: 2650 /* If this is true we know nothing Jon Snow, but if it is false 2651 * we know the value for sure; 2652 */ 2653 __mark_reg_known(false_reg, val); 2654 break; 2655 case BPF_JGT: 2656 true_reg->umax_value = min(true_reg->umax_value, val - 1); 2657 false_reg->umin_value = max(false_reg->umin_value, val); 2658 break; 2659 case BPF_JSGT: 2660 true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); 2661 false_reg->smin_value = max_t(s64, false_reg->smin_value, val); 2662 break; 2663 case BPF_JLT: 2664 true_reg->umin_value = max(true_reg->umin_value, val + 1); 2665 false_reg->umax_value = min(false_reg->umax_value, val); 2666 break; 2667 case BPF_JSLT: 2668 true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); 2669 false_reg->smax_value = min_t(s64, false_reg->smax_value, val); 2670 break; 2671 case BPF_JGE: 2672 true_reg->umax_value = min(true_reg->umax_value, val); 2673 false_reg->umin_value = max(false_reg->umin_value, val + 1); 2674 break; 2675 case BPF_JSGE: 2676 true_reg->smax_value = min_t(s64, true_reg->smax_value, val); 2677 false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); 2678 break; 2679 case BPF_JLE: 2680 true_reg->umin_value = max(true_reg->umin_value, val); 2681 false_reg->umax_value = min(false_reg->umax_value, val - 1); 2682 break; 2683 case BPF_JSLE: 2684 true_reg->smin_value = max_t(s64, true_reg->smin_value, val); 2685 false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); 2686 break; 2687 default: 2688 break; 2689 } 2690 2691 __reg_deduce_bounds(false_reg); 2692 __reg_deduce_bounds(true_reg); 2693 /* We might have learned some bits from the bounds. */ 2694 __reg_bound_offset(false_reg); 2695 __reg_bound_offset(true_reg); 2696 /* Intersecting with the old var_off might have improved our bounds 2697 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2698 * then new var_off is (0; 0x7f...fc) which improves our umax. 2699 */ 2700 __update_reg_bounds(false_reg); 2701 __update_reg_bounds(true_reg); 2702 } 2703 2704 /* Regs are known to be equal, so intersect their min/max/var_off */ 2705 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 2706 struct bpf_reg_state *dst_reg) 2707 { 2708 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 2709 dst_reg->umin_value); 2710 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 2711 dst_reg->umax_value); 2712 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 2713 dst_reg->smin_value); 2714 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 2715 dst_reg->smax_value); 2716 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 2717 dst_reg->var_off); 2718 /* We might have learned new bounds from the var_off. */ 2719 __update_reg_bounds(src_reg); 2720 __update_reg_bounds(dst_reg); 2721 /* We might have learned something about the sign bit. */ 2722 __reg_deduce_bounds(src_reg); 2723 __reg_deduce_bounds(dst_reg); 2724 /* We might have learned some bits from the bounds. */ 2725 __reg_bound_offset(src_reg); 2726 __reg_bound_offset(dst_reg); 2727 /* Intersecting with the old var_off might have improved our bounds 2728 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2729 * then new var_off is (0; 0x7f...fc) which improves our umax. 2730 */ 2731 __update_reg_bounds(src_reg); 2732 __update_reg_bounds(dst_reg); 2733 } 2734 2735 static void reg_combine_min_max(struct bpf_reg_state *true_src, 2736 struct bpf_reg_state *true_dst, 2737 struct bpf_reg_state *false_src, 2738 struct bpf_reg_state *false_dst, 2739 u8 opcode) 2740 { 2741 switch (opcode) { 2742 case BPF_JEQ: 2743 __reg_combine_min_max(true_src, true_dst); 2744 break; 2745 case BPF_JNE: 2746 __reg_combine_min_max(false_src, false_dst); 2747 break; 2748 } 2749 } 2750 2751 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, 2752 bool is_null) 2753 { 2754 struct bpf_reg_state *reg = ®s[regno]; 2755 2756 if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { 2757 /* Old offset (both fixed and variable parts) should 2758 * have been known-zero, because we don't allow pointer 2759 * arithmetic on pointers that might be NULL. 2760 */ 2761 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 2762 !tnum_equals_const(reg->var_off, 0) || 2763 reg->off)) { 2764 __mark_reg_known_zero(reg); 2765 reg->off = 0; 2766 } 2767 if (is_null) { 2768 reg->type = SCALAR_VALUE; 2769 } else if (reg->map_ptr->inner_map_meta) { 2770 reg->type = CONST_PTR_TO_MAP; 2771 reg->map_ptr = reg->map_ptr->inner_map_meta; 2772 } else { 2773 reg->type = PTR_TO_MAP_VALUE; 2774 } 2775 /* We don't need id from this point onwards anymore, thus we 2776 * should better reset it, so that state pruning has chances 2777 * to take effect. 2778 */ 2779 reg->id = 0; 2780 } 2781 } 2782 2783 /* The logic is similar to find_good_pkt_pointers(), both could eventually 2784 * be folded together at some point. 2785 */ 2786 static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, 2787 bool is_null) 2788 { 2789 struct bpf_reg_state *regs = state->regs; 2790 u32 id = regs[regno].id; 2791 int i; 2792 2793 for (i = 0; i < MAX_BPF_REG; i++) 2794 mark_map_reg(regs, i, id, is_null); 2795 2796 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 2797 if (state->stack[i].slot_type[0] != STACK_SPILL) 2798 continue; 2799 mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); 2800 } 2801 } 2802 2803 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 2804 struct bpf_reg_state *dst_reg, 2805 struct bpf_reg_state *src_reg, 2806 struct bpf_verifier_state *this_branch, 2807 struct bpf_verifier_state *other_branch) 2808 { 2809 if (BPF_SRC(insn->code) != BPF_X) 2810 return false; 2811 2812 switch (BPF_OP(insn->code)) { 2813 case BPF_JGT: 2814 if ((dst_reg->type == PTR_TO_PACKET && 2815 src_reg->type == PTR_TO_PACKET_END) || 2816 (dst_reg->type == PTR_TO_PACKET_META && 2817 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 2818 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 2819 find_good_pkt_pointers(this_branch, dst_reg, 2820 dst_reg->type, false); 2821 } else if ((dst_reg->type == PTR_TO_PACKET_END && 2822 src_reg->type == PTR_TO_PACKET) || 2823 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 2824 src_reg->type == PTR_TO_PACKET_META)) { 2825 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 2826 find_good_pkt_pointers(other_branch, src_reg, 2827 src_reg->type, true); 2828 } else { 2829 return false; 2830 } 2831 break; 2832 case BPF_JLT: 2833 if ((dst_reg->type == PTR_TO_PACKET && 2834 src_reg->type == PTR_TO_PACKET_END) || 2835 (dst_reg->type == PTR_TO_PACKET_META && 2836 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 2837 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 2838 find_good_pkt_pointers(other_branch, dst_reg, 2839 dst_reg->type, true); 2840 } else if ((dst_reg->type == PTR_TO_PACKET_END && 2841 src_reg->type == PTR_TO_PACKET) || 2842 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 2843 src_reg->type == PTR_TO_PACKET_META)) { 2844 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 2845 find_good_pkt_pointers(this_branch, src_reg, 2846 src_reg->type, false); 2847 } else { 2848 return false; 2849 } 2850 break; 2851 case BPF_JGE: 2852 if ((dst_reg->type == PTR_TO_PACKET && 2853 src_reg->type == PTR_TO_PACKET_END) || 2854 (dst_reg->type == PTR_TO_PACKET_META && 2855 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 2856 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 2857 find_good_pkt_pointers(this_branch, dst_reg, 2858 dst_reg->type, true); 2859 } else if ((dst_reg->type == PTR_TO_PACKET_END && 2860 src_reg->type == PTR_TO_PACKET) || 2861 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 2862 src_reg->type == PTR_TO_PACKET_META)) { 2863 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 2864 find_good_pkt_pointers(other_branch, src_reg, 2865 src_reg->type, false); 2866 } else { 2867 return false; 2868 } 2869 break; 2870 case BPF_JLE: 2871 if ((dst_reg->type == PTR_TO_PACKET && 2872 src_reg->type == PTR_TO_PACKET_END) || 2873 (dst_reg->type == PTR_TO_PACKET_META && 2874 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 2875 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 2876 find_good_pkt_pointers(other_branch, dst_reg, 2877 dst_reg->type, false); 2878 } else if ((dst_reg->type == PTR_TO_PACKET_END && 2879 src_reg->type == PTR_TO_PACKET) || 2880 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 2881 src_reg->type == PTR_TO_PACKET_META)) { 2882 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 2883 find_good_pkt_pointers(this_branch, src_reg, 2884 src_reg->type, true); 2885 } else { 2886 return false; 2887 } 2888 break; 2889 default: 2890 return false; 2891 } 2892 2893 return true; 2894 } 2895 2896 static int check_cond_jmp_op(struct bpf_verifier_env *env, 2897 struct bpf_insn *insn, int *insn_idx) 2898 { 2899 struct bpf_verifier_state *other_branch, *this_branch = env->cur_state; 2900 struct bpf_reg_state *regs = this_branch->regs, *dst_reg; 2901 u8 opcode = BPF_OP(insn->code); 2902 int err; 2903 2904 if (opcode > BPF_JSLE) { 2905 verbose(env, "invalid BPF_JMP opcode %x\n", opcode); 2906 return -EINVAL; 2907 } 2908 2909 if (BPF_SRC(insn->code) == BPF_X) { 2910 if (insn->imm != 0) { 2911 verbose(env, "BPF_JMP uses reserved fields\n"); 2912 return -EINVAL; 2913 } 2914 2915 /* check src1 operand */ 2916 err = check_reg_arg(env, insn->src_reg, SRC_OP); 2917 if (err) 2918 return err; 2919 2920 if (is_pointer_value(env, insn->src_reg)) { 2921 verbose(env, "R%d pointer comparison prohibited\n", 2922 insn->src_reg); 2923 return -EACCES; 2924 } 2925 } else { 2926 if (insn->src_reg != BPF_REG_0) { 2927 verbose(env, "BPF_JMP uses reserved fields\n"); 2928 return -EINVAL; 2929 } 2930 } 2931 2932 /* check src2 operand */ 2933 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 2934 if (err) 2935 return err; 2936 2937 dst_reg = ®s[insn->dst_reg]; 2938 2939 /* detect if R == 0 where R was initialized to zero earlier */ 2940 if (BPF_SRC(insn->code) == BPF_K && 2941 (opcode == BPF_JEQ || opcode == BPF_JNE) && 2942 dst_reg->type == SCALAR_VALUE && 2943 tnum_equals_const(dst_reg->var_off, insn->imm)) { 2944 if (opcode == BPF_JEQ) { 2945 /* if (imm == imm) goto pc+off; 2946 * only follow the goto, ignore fall-through 2947 */ 2948 *insn_idx += insn->off; 2949 return 0; 2950 } else { 2951 /* if (imm != imm) goto pc+off; 2952 * only follow fall-through branch, since 2953 * that's where the program will go 2954 */ 2955 return 0; 2956 } 2957 } 2958 2959 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); 2960 if (!other_branch) 2961 return -EFAULT; 2962 2963 /* detect if we are comparing against a constant value so we can adjust 2964 * our min/max values for our dst register. 2965 * this is only legit if both are scalars (or pointers to the same 2966 * object, I suppose, but we don't support that right now), because 2967 * otherwise the different base pointers mean the offsets aren't 2968 * comparable. 2969 */ 2970 if (BPF_SRC(insn->code) == BPF_X) { 2971 if (dst_reg->type == SCALAR_VALUE && 2972 regs[insn->src_reg].type == SCALAR_VALUE) { 2973 if (tnum_is_const(regs[insn->src_reg].var_off)) 2974 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2975 dst_reg, regs[insn->src_reg].var_off.value, 2976 opcode); 2977 else if (tnum_is_const(dst_reg->var_off)) 2978 reg_set_min_max_inv(&other_branch->regs[insn->src_reg], 2979 ®s[insn->src_reg], 2980 dst_reg->var_off.value, opcode); 2981 else if (opcode == BPF_JEQ || opcode == BPF_JNE) 2982 /* Comparing for equality, we can combine knowledge */ 2983 reg_combine_min_max(&other_branch->regs[insn->src_reg], 2984 &other_branch->regs[insn->dst_reg], 2985 ®s[insn->src_reg], 2986 ®s[insn->dst_reg], opcode); 2987 } 2988 } else if (dst_reg->type == SCALAR_VALUE) { 2989 reg_set_min_max(&other_branch->regs[insn->dst_reg], 2990 dst_reg, insn->imm, opcode); 2991 } 2992 2993 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ 2994 if (BPF_SRC(insn->code) == BPF_K && 2995 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 2996 dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 2997 /* Mark all identical map registers in each branch as either 2998 * safe or unknown depending R == 0 or R != 0 conditional. 2999 */ 3000 mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); 3001 mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); 3002 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 3003 this_branch, other_branch) && 3004 is_pointer_value(env, insn->dst_reg)) { 3005 verbose(env, "R%d pointer comparison prohibited\n", 3006 insn->dst_reg); 3007 return -EACCES; 3008 } 3009 if (env->log.level) 3010 print_verifier_state(env, this_branch); 3011 return 0; 3012 } 3013 3014 /* return the map pointer stored inside BPF_LD_IMM64 instruction */ 3015 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) 3016 { 3017 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; 3018 3019 return (struct bpf_map *) (unsigned long) imm64; 3020 } 3021 3022 /* verify BPF_LD_IMM64 instruction */ 3023 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 3024 { 3025 struct bpf_reg_state *regs = cur_regs(env); 3026 int err; 3027 3028 if (BPF_SIZE(insn->code) != BPF_DW) { 3029 verbose(env, "invalid BPF_LD_IMM insn\n"); 3030 return -EINVAL; 3031 } 3032 if (insn->off != 0) { 3033 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 3034 return -EINVAL; 3035 } 3036 3037 err = check_reg_arg(env, insn->dst_reg, DST_OP); 3038 if (err) 3039 return err; 3040 3041 if (insn->src_reg == 0) { 3042 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 3043 3044 regs[insn->dst_reg].type = SCALAR_VALUE; 3045 __mark_reg_known(®s[insn->dst_reg], imm); 3046 return 0; 3047 } 3048 3049 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ 3050 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); 3051 3052 regs[insn->dst_reg].type = CONST_PTR_TO_MAP; 3053 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); 3054 return 0; 3055 } 3056 3057 static bool may_access_skb(enum bpf_prog_type type) 3058 { 3059 switch (type) { 3060 case BPF_PROG_TYPE_SOCKET_FILTER: 3061 case BPF_PROG_TYPE_SCHED_CLS: 3062 case BPF_PROG_TYPE_SCHED_ACT: 3063 return true; 3064 default: 3065 return false; 3066 } 3067 } 3068 3069 /* verify safety of LD_ABS|LD_IND instructions: 3070 * - they can only appear in the programs where ctx == skb 3071 * - since they are wrappers of function calls, they scratch R1-R5 registers, 3072 * preserve R6-R9, and store return value into R0 3073 * 3074 * Implicit input: 3075 * ctx == skb == R6 == CTX 3076 * 3077 * Explicit input: 3078 * SRC == any register 3079 * IMM == 32-bit immediate 3080 * 3081 * Output: 3082 * R0 - 8/16/32-bit skb data converted to cpu endianness 3083 */ 3084 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 3085 { 3086 struct bpf_reg_state *regs = cur_regs(env); 3087 u8 mode = BPF_MODE(insn->code); 3088 int i, err; 3089 3090 if (!may_access_skb(env->prog->type)) { 3091 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 3092 return -EINVAL; 3093 } 3094 3095 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 3096 BPF_SIZE(insn->code) == BPF_DW || 3097 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 3098 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 3099 return -EINVAL; 3100 } 3101 3102 /* check whether implicit source operand (register R6) is readable */ 3103 err = check_reg_arg(env, BPF_REG_6, SRC_OP); 3104 if (err) 3105 return err; 3106 3107 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 3108 verbose(env, 3109 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 3110 return -EINVAL; 3111 } 3112 3113 if (mode == BPF_IND) { 3114 /* check explicit source operand */ 3115 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3116 if (err) 3117 return err; 3118 } 3119 3120 /* reset caller saved regs to unreadable */ 3121 for (i = 0; i < CALLER_SAVED_REGS; i++) { 3122 mark_reg_not_init(env, regs, caller_saved[i]); 3123 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 3124 } 3125 3126 /* mark destination R0 register as readable, since it contains 3127 * the value fetched from the packet. 3128 * Already marked as written above. 3129 */ 3130 mark_reg_unknown(env, regs, BPF_REG_0); 3131 return 0; 3132 } 3133 3134 static int check_return_code(struct bpf_verifier_env *env) 3135 { 3136 struct bpf_reg_state *reg; 3137 struct tnum range = tnum_range(0, 1); 3138 3139 switch (env->prog->type) { 3140 case BPF_PROG_TYPE_CGROUP_SKB: 3141 case BPF_PROG_TYPE_CGROUP_SOCK: 3142 case BPF_PROG_TYPE_SOCK_OPS: 3143 case BPF_PROG_TYPE_CGROUP_DEVICE: 3144 break; 3145 default: 3146 return 0; 3147 } 3148 3149 reg = cur_regs(env) + BPF_REG_0; 3150 if (reg->type != SCALAR_VALUE) { 3151 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 3152 reg_type_str[reg->type]); 3153 return -EINVAL; 3154 } 3155 3156 if (!tnum_in(range, reg->var_off)) { 3157 verbose(env, "At program exit the register R0 "); 3158 if (!tnum_is_unknown(reg->var_off)) { 3159 char tn_buf[48]; 3160 3161 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3162 verbose(env, "has value %s", tn_buf); 3163 } else { 3164 verbose(env, "has unknown scalar value"); 3165 } 3166 verbose(env, " should have been 0 or 1\n"); 3167 return -EINVAL; 3168 } 3169 return 0; 3170 } 3171 3172 /* non-recursive DFS pseudo code 3173 * 1 procedure DFS-iterative(G,v): 3174 * 2 label v as discovered 3175 * 3 let S be a stack 3176 * 4 S.push(v) 3177 * 5 while S is not empty 3178 * 6 t <- S.pop() 3179 * 7 if t is what we're looking for: 3180 * 8 return t 3181 * 9 for all edges e in G.adjacentEdges(t) do 3182 * 10 if edge e is already labelled 3183 * 11 continue with the next edge 3184 * 12 w <- G.adjacentVertex(t,e) 3185 * 13 if vertex w is not discovered and not explored 3186 * 14 label e as tree-edge 3187 * 15 label w as discovered 3188 * 16 S.push(w) 3189 * 17 continue at 5 3190 * 18 else if vertex w is discovered 3191 * 19 label e as back-edge 3192 * 20 else 3193 * 21 // vertex w is explored 3194 * 22 label e as forward- or cross-edge 3195 * 23 label t as explored 3196 * 24 S.pop() 3197 * 3198 * convention: 3199 * 0x10 - discovered 3200 * 0x11 - discovered and fall-through edge labelled 3201 * 0x12 - discovered and fall-through and branch edges labelled 3202 * 0x20 - explored 3203 */ 3204 3205 enum { 3206 DISCOVERED = 0x10, 3207 EXPLORED = 0x20, 3208 FALLTHROUGH = 1, 3209 BRANCH = 2, 3210 }; 3211 3212 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) 3213 3214 static int *insn_stack; /* stack of insns to process */ 3215 static int cur_stack; /* current stack index */ 3216 static int *insn_state; 3217 3218 /* t, w, e - match pseudo-code above: 3219 * t - index of current instruction 3220 * w - next instruction 3221 * e - edge 3222 */ 3223 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) 3224 { 3225 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 3226 return 0; 3227 3228 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 3229 return 0; 3230 3231 if (w < 0 || w >= env->prog->len) { 3232 verbose(env, "jump out of range from insn %d to %d\n", t, w); 3233 return -EINVAL; 3234 } 3235 3236 if (e == BRANCH) 3237 /* mark branch target for state pruning */ 3238 env->explored_states[w] = STATE_LIST_MARK; 3239 3240 if (insn_state[w] == 0) { 3241 /* tree-edge */ 3242 insn_state[t] = DISCOVERED | e; 3243 insn_state[w] = DISCOVERED; 3244 if (cur_stack >= env->prog->len) 3245 return -E2BIG; 3246 insn_stack[cur_stack++] = w; 3247 return 1; 3248 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 3249 verbose(env, "back-edge from insn %d to %d\n", t, w); 3250 return -EINVAL; 3251 } else if (insn_state[w] == EXPLORED) { 3252 /* forward- or cross-edge */ 3253 insn_state[t] = DISCOVERED | e; 3254 } else { 3255 verbose(env, "insn state internal bug\n"); 3256 return -EFAULT; 3257 } 3258 return 0; 3259 } 3260 3261 /* non-recursive depth-first-search to detect loops in BPF program 3262 * loop == back-edge in directed graph 3263 */ 3264 static int check_cfg(struct bpf_verifier_env *env) 3265 { 3266 struct bpf_insn *insns = env->prog->insnsi; 3267 int insn_cnt = env->prog->len; 3268 int ret = 0; 3269 int i, t; 3270 3271 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 3272 if (!insn_state) 3273 return -ENOMEM; 3274 3275 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 3276 if (!insn_stack) { 3277 kfree(insn_state); 3278 return -ENOMEM; 3279 } 3280 3281 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 3282 insn_stack[0] = 0; /* 0 is the first instruction */ 3283 cur_stack = 1; 3284 3285 peek_stack: 3286 if (cur_stack == 0) 3287 goto check_state; 3288 t = insn_stack[cur_stack - 1]; 3289 3290 if (BPF_CLASS(insns[t].code) == BPF_JMP) { 3291 u8 opcode = BPF_OP(insns[t].code); 3292 3293 if (opcode == BPF_EXIT) { 3294 goto mark_explored; 3295 } else if (opcode == BPF_CALL) { 3296 ret = push_insn(t, t + 1, FALLTHROUGH, env); 3297 if (ret == 1) 3298 goto peek_stack; 3299 else if (ret < 0) 3300 goto err_free; 3301 if (t + 1 < insn_cnt) 3302 env->explored_states[t + 1] = STATE_LIST_MARK; 3303 } else if (opcode == BPF_JA) { 3304 if (BPF_SRC(insns[t].code) != BPF_K) { 3305 ret = -EINVAL; 3306 goto err_free; 3307 } 3308 /* unconditional jump with single edge */ 3309 ret = push_insn(t, t + insns[t].off + 1, 3310 FALLTHROUGH, env); 3311 if (ret == 1) 3312 goto peek_stack; 3313 else if (ret < 0) 3314 goto err_free; 3315 /* tell verifier to check for equivalent states 3316 * after every call and jump 3317 */ 3318 if (t + 1 < insn_cnt) 3319 env->explored_states[t + 1] = STATE_LIST_MARK; 3320 } else { 3321 /* conditional jump with two edges */ 3322 env->explored_states[t] = STATE_LIST_MARK; 3323 ret = push_insn(t, t + 1, FALLTHROUGH, env); 3324 if (ret == 1) 3325 goto peek_stack; 3326 else if (ret < 0) 3327 goto err_free; 3328 3329 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); 3330 if (ret == 1) 3331 goto peek_stack; 3332 else if (ret < 0) 3333 goto err_free; 3334 } 3335 } else { 3336 /* all other non-branch instructions with single 3337 * fall-through edge 3338 */ 3339 ret = push_insn(t, t + 1, FALLTHROUGH, env); 3340 if (ret == 1) 3341 goto peek_stack; 3342 else if (ret < 0) 3343 goto err_free; 3344 } 3345 3346 mark_explored: 3347 insn_state[t] = EXPLORED; 3348 if (cur_stack-- <= 0) { 3349 verbose(env, "pop stack internal bug\n"); 3350 ret = -EFAULT; 3351 goto err_free; 3352 } 3353 goto peek_stack; 3354 3355 check_state: 3356 for (i = 0; i < insn_cnt; i++) { 3357 if (insn_state[i] != EXPLORED) { 3358 verbose(env, "unreachable insn %d\n", i); 3359 ret = -EINVAL; 3360 goto err_free; 3361 } 3362 } 3363 ret = 0; /* cfg looks good */ 3364 3365 err_free: 3366 kfree(insn_state); 3367 kfree(insn_stack); 3368 return ret; 3369 } 3370 3371 /* check %cur's range satisfies %old's */ 3372 static bool range_within(struct bpf_reg_state *old, 3373 struct bpf_reg_state *cur) 3374 { 3375 return old->umin_value <= cur->umin_value && 3376 old->umax_value >= cur->umax_value && 3377 old->smin_value <= cur->smin_value && 3378 old->smax_value >= cur->smax_value; 3379 } 3380 3381 /* Maximum number of register states that can exist at once */ 3382 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 3383 struct idpair { 3384 u32 old; 3385 u32 cur; 3386 }; 3387 3388 /* If in the old state two registers had the same id, then they need to have 3389 * the same id in the new state as well. But that id could be different from 3390 * the old state, so we need to track the mapping from old to new ids. 3391 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 3392 * regs with old id 5 must also have new id 9 for the new state to be safe. But 3393 * regs with a different old id could still have new id 9, we don't care about 3394 * that. 3395 * So we look through our idmap to see if this old id has been seen before. If 3396 * so, we require the new id to match; otherwise, we add the id pair to the map. 3397 */ 3398 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 3399 { 3400 unsigned int i; 3401 3402 for (i = 0; i < ID_MAP_SIZE; i++) { 3403 if (!idmap[i].old) { 3404 /* Reached an empty slot; haven't seen this id before */ 3405 idmap[i].old = old_id; 3406 idmap[i].cur = cur_id; 3407 return true; 3408 } 3409 if (idmap[i].old == old_id) 3410 return idmap[i].cur == cur_id; 3411 } 3412 /* We ran out of idmap slots, which should be impossible */ 3413 WARN_ON_ONCE(1); 3414 return false; 3415 } 3416 3417 /* Returns true if (rold safe implies rcur safe) */ 3418 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 3419 struct idpair *idmap) 3420 { 3421 if (!(rold->live & REG_LIVE_READ)) 3422 /* explored state didn't use this */ 3423 return true; 3424 3425 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) 3426 return true; 3427 3428 if (rold->type == NOT_INIT) 3429 /* explored state can't have used this */ 3430 return true; 3431 if (rcur->type == NOT_INIT) 3432 return false; 3433 switch (rold->type) { 3434 case SCALAR_VALUE: 3435 if (rcur->type == SCALAR_VALUE) { 3436 /* new val must satisfy old val knowledge */ 3437 return range_within(rold, rcur) && 3438 tnum_in(rold->var_off, rcur->var_off); 3439 } else { 3440 /* if we knew anything about the old value, we're not 3441 * equal, because we can't know anything about the 3442 * scalar value of the pointer in the new value. 3443 */ 3444 return rold->umin_value == 0 && 3445 rold->umax_value == U64_MAX && 3446 rold->smin_value == S64_MIN && 3447 rold->smax_value == S64_MAX && 3448 tnum_is_unknown(rold->var_off); 3449 } 3450 case PTR_TO_MAP_VALUE: 3451 /* If the new min/max/var_off satisfy the old ones and 3452 * everything else matches, we are OK. 3453 * We don't care about the 'id' value, because nothing 3454 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) 3455 */ 3456 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 3457 range_within(rold, rcur) && 3458 tnum_in(rold->var_off, rcur->var_off); 3459 case PTR_TO_MAP_VALUE_OR_NULL: 3460 /* a PTR_TO_MAP_VALUE could be safe to use as a 3461 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 3462 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 3463 * checked, doing so could have affected others with the same 3464 * id, and we can't check for that because we lost the id when 3465 * we converted to a PTR_TO_MAP_VALUE. 3466 */ 3467 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 3468 return false; 3469 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 3470 return false; 3471 /* Check our ids match any regs they're supposed to */ 3472 return check_ids(rold->id, rcur->id, idmap); 3473 case PTR_TO_PACKET_META: 3474 case PTR_TO_PACKET: 3475 if (rcur->type != rold->type) 3476 return false; 3477 /* We must have at least as much range as the old ptr 3478 * did, so that any accesses which were safe before are 3479 * still safe. This is true even if old range < old off, 3480 * since someone could have accessed through (ptr - k), or 3481 * even done ptr -= k in a register, to get a safe access. 3482 */ 3483 if (rold->range > rcur->range) 3484 return false; 3485 /* If the offsets don't match, we can't trust our alignment; 3486 * nor can we be sure that we won't fall out of range. 3487 */ 3488 if (rold->off != rcur->off) 3489 return false; 3490 /* id relations must be preserved */ 3491 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 3492 return false; 3493 /* new val must satisfy old val knowledge */ 3494 return range_within(rold, rcur) && 3495 tnum_in(rold->var_off, rcur->var_off); 3496 case PTR_TO_CTX: 3497 case CONST_PTR_TO_MAP: 3498 case PTR_TO_STACK: 3499 case PTR_TO_PACKET_END: 3500 /* Only valid matches are exact, which memcmp() above 3501 * would have accepted 3502 */ 3503 default: 3504 /* Don't know what's going on, just say it's not safe */ 3505 return false; 3506 } 3507 3508 /* Shouldn't get here; if we do, say it's not safe */ 3509 WARN_ON_ONCE(1); 3510 return false; 3511 } 3512 3513 static bool stacksafe(struct bpf_verifier_state *old, 3514 struct bpf_verifier_state *cur, 3515 struct idpair *idmap) 3516 { 3517 int i, spi; 3518 3519 /* if explored stack has more populated slots than current stack 3520 * such stacks are not equivalent 3521 */ 3522 if (old->allocated_stack > cur->allocated_stack) 3523 return false; 3524 3525 /* walk slots of the explored stack and ignore any additional 3526 * slots in the current stack, since explored(safe) state 3527 * didn't use them 3528 */ 3529 for (i = 0; i < old->allocated_stack; i++) { 3530 spi = i / BPF_REG_SIZE; 3531 3532 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 3533 continue; 3534 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 3535 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 3536 /* Ex: old explored (safe) state has STACK_SPILL in 3537 * this stack slot, but current has has STACK_MISC -> 3538 * this verifier states are not equivalent, 3539 * return false to continue verification of this path 3540 */ 3541 return false; 3542 if (i % BPF_REG_SIZE) 3543 continue; 3544 if (old->stack[spi].slot_type[0] != STACK_SPILL) 3545 continue; 3546 if (!regsafe(&old->stack[spi].spilled_ptr, 3547 &cur->stack[spi].spilled_ptr, 3548 idmap)) 3549 /* when explored and current stack slot are both storing 3550 * spilled registers, check that stored pointers types 3551 * are the same as well. 3552 * Ex: explored safe path could have stored 3553 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 3554 * but current path has stored: 3555 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 3556 * such verifier states are not equivalent. 3557 * return false to continue verification of this path 3558 */ 3559 return false; 3560 } 3561 return true; 3562 } 3563 3564 /* compare two verifier states 3565 * 3566 * all states stored in state_list are known to be valid, since 3567 * verifier reached 'bpf_exit' instruction through them 3568 * 3569 * this function is called when verifier exploring different branches of 3570 * execution popped from the state stack. If it sees an old state that has 3571 * more strict register state and more strict stack state then this execution 3572 * branch doesn't need to be explored further, since verifier already 3573 * concluded that more strict state leads to valid finish. 3574 * 3575 * Therefore two states are equivalent if register state is more conservative 3576 * and explored stack state is more conservative than the current one. 3577 * Example: 3578 * explored current 3579 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 3580 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 3581 * 3582 * In other words if current stack state (one being explored) has more 3583 * valid slots than old one that already passed validation, it means 3584 * the verifier can stop exploring and conclude that current state is valid too 3585 * 3586 * Similarly with registers. If explored state has register type as invalid 3587 * whereas register type in current state is meaningful, it means that 3588 * the current state will reach 'bpf_exit' instruction safely 3589 */ 3590 static bool states_equal(struct bpf_verifier_env *env, 3591 struct bpf_verifier_state *old, 3592 struct bpf_verifier_state *cur) 3593 { 3594 struct idpair *idmap; 3595 bool ret = false; 3596 int i; 3597 3598 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 3599 /* If we failed to allocate the idmap, just say it's not safe */ 3600 if (!idmap) 3601 return false; 3602 3603 for (i = 0; i < MAX_BPF_REG; i++) { 3604 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 3605 goto out_free; 3606 } 3607 3608 if (!stacksafe(old, cur, idmap)) 3609 goto out_free; 3610 ret = true; 3611 out_free: 3612 kfree(idmap); 3613 return ret; 3614 } 3615 3616 /* A write screens off any subsequent reads; but write marks come from the 3617 * straight-line code between a state and its parent. When we arrive at a 3618 * jump target (in the first iteration of the propagate_liveness() loop), 3619 * we didn't arrive by the straight-line code, so read marks in state must 3620 * propagate to parent regardless of state's write marks. 3621 */ 3622 static bool do_propagate_liveness(const struct bpf_verifier_state *state, 3623 struct bpf_verifier_state *parent) 3624 { 3625 bool writes = parent == state->parent; /* Observe write marks */ 3626 bool touched = false; /* any changes made? */ 3627 int i; 3628 3629 if (!parent) 3630 return touched; 3631 /* Propagate read liveness of registers... */ 3632 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 3633 /* We don't need to worry about FP liveness because it's read-only */ 3634 for (i = 0; i < BPF_REG_FP; i++) { 3635 if (parent->regs[i].live & REG_LIVE_READ) 3636 continue; 3637 if (writes && (state->regs[i].live & REG_LIVE_WRITTEN)) 3638 continue; 3639 if (state->regs[i].live & REG_LIVE_READ) { 3640 parent->regs[i].live |= REG_LIVE_READ; 3641 touched = true; 3642 } 3643 } 3644 /* ... and stack slots */ 3645 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 3646 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 3647 if (parent->stack[i].slot_type[0] != STACK_SPILL) 3648 continue; 3649 if (state->stack[i].slot_type[0] != STACK_SPILL) 3650 continue; 3651 if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) 3652 continue; 3653 if (writes && 3654 (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN)) 3655 continue; 3656 if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) { 3657 parent->stack[i].spilled_ptr.live |= REG_LIVE_READ; 3658 touched = true; 3659 } 3660 } 3661 return touched; 3662 } 3663 3664 /* "parent" is "a state from which we reach the current state", but initially 3665 * it is not the state->parent (i.e. "the state whose straight-line code leads 3666 * to the current state"), instead it is the state that happened to arrive at 3667 * a (prunable) equivalent of the current state. See comment above 3668 * do_propagate_liveness() for consequences of this. 3669 * This function is just a more efficient way of calling mark_reg_read() or 3670 * mark_stack_slot_read() on each reg in "parent" that is read in "state", 3671 * though it requires that parent != state->parent in the call arguments. 3672 */ 3673 static void propagate_liveness(const struct bpf_verifier_state *state, 3674 struct bpf_verifier_state *parent) 3675 { 3676 while (do_propagate_liveness(state, parent)) { 3677 /* Something changed, so we need to feed those changes onward */ 3678 state = parent; 3679 parent = state->parent; 3680 } 3681 } 3682 3683 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 3684 { 3685 struct bpf_verifier_state_list *new_sl; 3686 struct bpf_verifier_state_list *sl; 3687 struct bpf_verifier_state *cur = env->cur_state; 3688 int i, err; 3689 3690 sl = env->explored_states[insn_idx]; 3691 if (!sl) 3692 /* this 'insn_idx' instruction wasn't marked, so we will not 3693 * be doing state search here 3694 */ 3695 return 0; 3696 3697 while (sl != STATE_LIST_MARK) { 3698 if (states_equal(env, &sl->state, cur)) { 3699 /* reached equivalent register/stack state, 3700 * prune the search. 3701 * Registers read by the continuation are read by us. 3702 * If we have any write marks in env->cur_state, they 3703 * will prevent corresponding reads in the continuation 3704 * from reaching our parent (an explored_state). Our 3705 * own state will get the read marks recorded, but 3706 * they'll be immediately forgotten as we're pruning 3707 * this state and will pop a new one. 3708 */ 3709 propagate_liveness(&sl->state, cur); 3710 return 1; 3711 } 3712 sl = sl->next; 3713 } 3714 3715 /* there were no equivalent states, remember current one. 3716 * technically the current state is not proven to be safe yet, 3717 * but it will either reach bpf_exit (which means it's safe) or 3718 * it will be rejected. Since there are no loops, we won't be 3719 * seeing this 'insn_idx' instruction again on the way to bpf_exit 3720 */ 3721 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 3722 if (!new_sl) 3723 return -ENOMEM; 3724 3725 /* add new state to the head of linked list */ 3726 err = copy_verifier_state(&new_sl->state, cur); 3727 if (err) { 3728 free_verifier_state(&new_sl->state, false); 3729 kfree(new_sl); 3730 return err; 3731 } 3732 new_sl->next = env->explored_states[insn_idx]; 3733 env->explored_states[insn_idx] = new_sl; 3734 /* connect new state to parentage chain */ 3735 cur->parent = &new_sl->state; 3736 /* clear write marks in current state: the writes we did are not writes 3737 * our child did, so they don't screen off its reads from us. 3738 * (There are no read marks in current state, because reads always mark 3739 * their parent and current state never has children yet. Only 3740 * explored_states can get read marks.) 3741 */ 3742 for (i = 0; i < BPF_REG_FP; i++) 3743 cur->regs[i].live = REG_LIVE_NONE; 3744 for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++) 3745 if (cur->stack[i].slot_type[0] == STACK_SPILL) 3746 cur->stack[i].spilled_ptr.live = REG_LIVE_NONE; 3747 return 0; 3748 } 3749 3750 static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, 3751 int insn_idx, int prev_insn_idx) 3752 { 3753 if (env->dev_ops && env->dev_ops->insn_hook) 3754 return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); 3755 3756 return 0; 3757 } 3758 3759 static int do_check(struct bpf_verifier_env *env) 3760 { 3761 struct bpf_verifier_state *state; 3762 struct bpf_insn *insns = env->prog->insnsi; 3763 struct bpf_reg_state *regs; 3764 int insn_cnt = env->prog->len; 3765 int insn_idx, prev_insn_idx = 0; 3766 int insn_processed = 0; 3767 bool do_print_state = false; 3768 3769 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 3770 if (!state) 3771 return -ENOMEM; 3772 env->cur_state = state; 3773 init_reg_state(env, state->regs); 3774 state->parent = NULL; 3775 insn_idx = 0; 3776 for (;;) { 3777 struct bpf_insn *insn; 3778 u8 class; 3779 int err; 3780 3781 if (insn_idx >= insn_cnt) { 3782 verbose(env, "invalid insn idx %d insn_cnt %d\n", 3783 insn_idx, insn_cnt); 3784 return -EFAULT; 3785 } 3786 3787 insn = &insns[insn_idx]; 3788 class = BPF_CLASS(insn->code); 3789 3790 if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 3791 verbose(env, 3792 "BPF program is too large. Processed %d insn\n", 3793 insn_processed); 3794 return -E2BIG; 3795 } 3796 3797 err = is_state_visited(env, insn_idx); 3798 if (err < 0) 3799 return err; 3800 if (err == 1) { 3801 /* found equivalent state, can prune the search */ 3802 if (env->log.level) { 3803 if (do_print_state) 3804 verbose(env, "\nfrom %d to %d: safe\n", 3805 prev_insn_idx, insn_idx); 3806 else 3807 verbose(env, "%d: safe\n", insn_idx); 3808 } 3809 goto process_bpf_exit; 3810 } 3811 3812 if (need_resched()) 3813 cond_resched(); 3814 3815 if (env->log.level > 1 || (env->log.level && do_print_state)) { 3816 if (env->log.level > 1) 3817 verbose(env, "%d:", insn_idx); 3818 else 3819 verbose(env, "\nfrom %d to %d:", 3820 prev_insn_idx, insn_idx); 3821 print_verifier_state(env, state); 3822 do_print_state = false; 3823 } 3824 3825 if (env->log.level) { 3826 verbose(env, "%d: ", insn_idx); 3827 print_bpf_insn(verbose, env, insn, 3828 env->allow_ptr_leaks); 3829 } 3830 3831 err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); 3832 if (err) 3833 return err; 3834 3835 regs = cur_regs(env); 3836 env->insn_aux_data[insn_idx].seen = true; 3837 if (class == BPF_ALU || class == BPF_ALU64) { 3838 err = check_alu_op(env, insn); 3839 if (err) 3840 return err; 3841 3842 } else if (class == BPF_LDX) { 3843 enum bpf_reg_type *prev_src_type, src_reg_type; 3844 3845 /* check for reserved fields is already done */ 3846 3847 /* check src operand */ 3848 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3849 if (err) 3850 return err; 3851 3852 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 3853 if (err) 3854 return err; 3855 3856 src_reg_type = regs[insn->src_reg].type; 3857 3858 /* check that memory (src_reg + off) is readable, 3859 * the state of dst_reg will be updated by this func 3860 */ 3861 err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, 3862 BPF_SIZE(insn->code), BPF_READ, 3863 insn->dst_reg); 3864 if (err) 3865 return err; 3866 3867 prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; 3868 3869 if (*prev_src_type == NOT_INIT) { 3870 /* saw a valid insn 3871 * dst_reg = *(u32 *)(src_reg + off) 3872 * save type to validate intersecting paths 3873 */ 3874 *prev_src_type = src_reg_type; 3875 3876 } else if (src_reg_type != *prev_src_type && 3877 (src_reg_type == PTR_TO_CTX || 3878 *prev_src_type == PTR_TO_CTX)) { 3879 /* ABuser program is trying to use the same insn 3880 * dst_reg = *(u32*) (src_reg + off) 3881 * with different pointer types: 3882 * src_reg == ctx in one branch and 3883 * src_reg == stack|map in some other branch. 3884 * Reject it. 3885 */ 3886 verbose(env, "same insn cannot be used with different pointers\n"); 3887 return -EINVAL; 3888 } 3889 3890 } else if (class == BPF_STX) { 3891 enum bpf_reg_type *prev_dst_type, dst_reg_type; 3892 3893 if (BPF_MODE(insn->code) == BPF_XADD) { 3894 err = check_xadd(env, insn_idx, insn); 3895 if (err) 3896 return err; 3897 insn_idx++; 3898 continue; 3899 } 3900 3901 /* check src1 operand */ 3902 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3903 if (err) 3904 return err; 3905 /* check src2 operand */ 3906 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3907 if (err) 3908 return err; 3909 3910 dst_reg_type = regs[insn->dst_reg].type; 3911 3912 /* check that memory (dst_reg + off) is writeable */ 3913 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3914 BPF_SIZE(insn->code), BPF_WRITE, 3915 insn->src_reg); 3916 if (err) 3917 return err; 3918 3919 prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; 3920 3921 if (*prev_dst_type == NOT_INIT) { 3922 *prev_dst_type = dst_reg_type; 3923 } else if (dst_reg_type != *prev_dst_type && 3924 (dst_reg_type == PTR_TO_CTX || 3925 *prev_dst_type == PTR_TO_CTX)) { 3926 verbose(env, "same insn cannot be used with different pointers\n"); 3927 return -EINVAL; 3928 } 3929 3930 } else if (class == BPF_ST) { 3931 if (BPF_MODE(insn->code) != BPF_MEM || 3932 insn->src_reg != BPF_REG_0) { 3933 verbose(env, "BPF_ST uses reserved fields\n"); 3934 return -EINVAL; 3935 } 3936 /* check src operand */ 3937 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3938 if (err) 3939 return err; 3940 3941 /* check that memory (dst_reg + off) is writeable */ 3942 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3943 BPF_SIZE(insn->code), BPF_WRITE, 3944 -1); 3945 if (err) 3946 return err; 3947 3948 } else if (class == BPF_JMP) { 3949 u8 opcode = BPF_OP(insn->code); 3950 3951 if (opcode == BPF_CALL) { 3952 if (BPF_SRC(insn->code) != BPF_K || 3953 insn->off != 0 || 3954 insn->src_reg != BPF_REG_0 || 3955 insn->dst_reg != BPF_REG_0) { 3956 verbose(env, "BPF_CALL uses reserved fields\n"); 3957 return -EINVAL; 3958 } 3959 3960 err = check_call(env, insn->imm, insn_idx); 3961 if (err) 3962 return err; 3963 3964 } else if (opcode == BPF_JA) { 3965 if (BPF_SRC(insn->code) != BPF_K || 3966 insn->imm != 0 || 3967 insn->src_reg != BPF_REG_0 || 3968 insn->dst_reg != BPF_REG_0) { 3969 verbose(env, "BPF_JA uses reserved fields\n"); 3970 return -EINVAL; 3971 } 3972 3973 insn_idx += insn->off + 1; 3974 continue; 3975 3976 } else if (opcode == BPF_EXIT) { 3977 if (BPF_SRC(insn->code) != BPF_K || 3978 insn->imm != 0 || 3979 insn->src_reg != BPF_REG_0 || 3980 insn->dst_reg != BPF_REG_0) { 3981 verbose(env, "BPF_EXIT uses reserved fields\n"); 3982 return -EINVAL; 3983 } 3984 3985 /* eBPF calling convetion is such that R0 is used 3986 * to return the value from eBPF program. 3987 * Make sure that it's readable at this time 3988 * of bpf_exit, which means that program wrote 3989 * something into it earlier 3990 */ 3991 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 3992 if (err) 3993 return err; 3994 3995 if (is_pointer_value(env, BPF_REG_0)) { 3996 verbose(env, "R0 leaks addr as return value\n"); 3997 return -EACCES; 3998 } 3999 4000 err = check_return_code(env); 4001 if (err) 4002 return err; 4003 process_bpf_exit: 4004 err = pop_stack(env, &prev_insn_idx, &insn_idx); 4005 if (err < 0) { 4006 if (err != -ENOENT) 4007 return err; 4008 break; 4009 } else { 4010 do_print_state = true; 4011 continue; 4012 } 4013 } else { 4014 err = check_cond_jmp_op(env, insn, &insn_idx); 4015 if (err) 4016 return err; 4017 } 4018 } else if (class == BPF_LD) { 4019 u8 mode = BPF_MODE(insn->code); 4020 4021 if (mode == BPF_ABS || mode == BPF_IND) { 4022 err = check_ld_abs(env, insn); 4023 if (err) 4024 return err; 4025 4026 } else if (mode == BPF_IMM) { 4027 err = check_ld_imm(env, insn); 4028 if (err) 4029 return err; 4030 4031 insn_idx++; 4032 env->insn_aux_data[insn_idx].seen = true; 4033 } else { 4034 verbose(env, "invalid BPF_LD mode\n"); 4035 return -EINVAL; 4036 } 4037 } else { 4038 verbose(env, "unknown insn class %d\n", class); 4039 return -EINVAL; 4040 } 4041 4042 insn_idx++; 4043 } 4044 4045 verbose(env, "processed %d insns, stack depth %d\n", insn_processed, 4046 env->prog->aux->stack_depth); 4047 return 0; 4048 } 4049 4050 static int check_map_prealloc(struct bpf_map *map) 4051 { 4052 return (map->map_type != BPF_MAP_TYPE_HASH && 4053 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 4054 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 4055 !(map->map_flags & BPF_F_NO_PREALLOC); 4056 } 4057 4058 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 4059 struct bpf_map *map, 4060 struct bpf_prog *prog) 4061 4062 { 4063 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use 4064 * preallocated hash maps, since doing memory allocation 4065 * in overflow_handler can crash depending on where nmi got 4066 * triggered. 4067 */ 4068 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { 4069 if (!check_map_prealloc(map)) { 4070 verbose(env, "perf_event programs can only use preallocated hash map\n"); 4071 return -EINVAL; 4072 } 4073 if (map->inner_map_meta && 4074 !check_map_prealloc(map->inner_map_meta)) { 4075 verbose(env, "perf_event programs can only use preallocated inner hash map\n"); 4076 return -EINVAL; 4077 } 4078 } 4079 return 0; 4080 } 4081 4082 /* look for pseudo eBPF instructions that access map FDs and 4083 * replace them with actual map pointers 4084 */ 4085 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) 4086 { 4087 struct bpf_insn *insn = env->prog->insnsi; 4088 int insn_cnt = env->prog->len; 4089 int i, j, err; 4090 4091 err = bpf_prog_calc_tag(env->prog); 4092 if (err) 4093 return err; 4094 4095 for (i = 0; i < insn_cnt; i++, insn++) { 4096 if (BPF_CLASS(insn->code) == BPF_LDX && 4097 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 4098 verbose(env, "BPF_LDX uses reserved fields\n"); 4099 return -EINVAL; 4100 } 4101 4102 if (BPF_CLASS(insn->code) == BPF_STX && 4103 ((BPF_MODE(insn->code) != BPF_MEM && 4104 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 4105 verbose(env, "BPF_STX uses reserved fields\n"); 4106 return -EINVAL; 4107 } 4108 4109 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 4110 struct bpf_map *map; 4111 struct fd f; 4112 4113 if (i == insn_cnt - 1 || insn[1].code != 0 || 4114 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 4115 insn[1].off != 0) { 4116 verbose(env, "invalid bpf_ld_imm64 insn\n"); 4117 return -EINVAL; 4118 } 4119 4120 if (insn->src_reg == 0) 4121 /* valid generic load 64-bit imm */ 4122 goto next_insn; 4123 4124 if (insn->src_reg != BPF_PSEUDO_MAP_FD) { 4125 verbose(env, 4126 "unrecognized bpf_ld_imm64 insn\n"); 4127 return -EINVAL; 4128 } 4129 4130 f = fdget(insn->imm); 4131 map = __bpf_map_get(f); 4132 if (IS_ERR(map)) { 4133 verbose(env, "fd %d is not pointing to valid bpf_map\n", 4134 insn->imm); 4135 return PTR_ERR(map); 4136 } 4137 4138 err = check_map_prog_compatibility(env, map, env->prog); 4139 if (err) { 4140 fdput(f); 4141 return err; 4142 } 4143 4144 /* store map pointer inside BPF_LD_IMM64 instruction */ 4145 insn[0].imm = (u32) (unsigned long) map; 4146 insn[1].imm = ((u64) (unsigned long) map) >> 32; 4147 4148 /* check whether we recorded this map already */ 4149 for (j = 0; j < env->used_map_cnt; j++) 4150 if (env->used_maps[j] == map) { 4151 fdput(f); 4152 goto next_insn; 4153 } 4154 4155 if (env->used_map_cnt >= MAX_USED_MAPS) { 4156 fdput(f); 4157 return -E2BIG; 4158 } 4159 4160 /* hold the map. If the program is rejected by verifier, 4161 * the map will be released by release_maps() or it 4162 * will be used by the valid program until it's unloaded 4163 * and all maps are released in free_bpf_prog_info() 4164 */ 4165 map = bpf_map_inc(map, false); 4166 if (IS_ERR(map)) { 4167 fdput(f); 4168 return PTR_ERR(map); 4169 } 4170 env->used_maps[env->used_map_cnt++] = map; 4171 4172 fdput(f); 4173 next_insn: 4174 insn++; 4175 i++; 4176 } 4177 } 4178 4179 /* now all pseudo BPF_LD_IMM64 instructions load valid 4180 * 'struct bpf_map *' into a register instead of user map_fd. 4181 * These pointers will be used later by verifier to validate map access. 4182 */ 4183 return 0; 4184 } 4185 4186 /* drop refcnt of maps used by the rejected program */ 4187 static void release_maps(struct bpf_verifier_env *env) 4188 { 4189 int i; 4190 4191 for (i = 0; i < env->used_map_cnt; i++) 4192 bpf_map_put(env->used_maps[i]); 4193 } 4194 4195 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 4196 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 4197 { 4198 struct bpf_insn *insn = env->prog->insnsi; 4199 int insn_cnt = env->prog->len; 4200 int i; 4201 4202 for (i = 0; i < insn_cnt; i++, insn++) 4203 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 4204 insn->src_reg = 0; 4205 } 4206 4207 /* single env->prog->insni[off] instruction was replaced with the range 4208 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 4209 * [0, off) and [off, end) to new locations, so the patched range stays zero 4210 */ 4211 static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, 4212 u32 off, u32 cnt) 4213 { 4214 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 4215 int i; 4216 4217 if (cnt == 1) 4218 return 0; 4219 new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); 4220 if (!new_data) 4221 return -ENOMEM; 4222 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 4223 memcpy(new_data + off + cnt - 1, old_data + off, 4224 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 4225 for (i = off; i < off + cnt - 1; i++) 4226 new_data[i].seen = true; 4227 env->insn_aux_data = new_data; 4228 vfree(old_data); 4229 return 0; 4230 } 4231 4232 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 4233 const struct bpf_insn *patch, u32 len) 4234 { 4235 struct bpf_prog *new_prog; 4236 4237 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 4238 if (!new_prog) 4239 return NULL; 4240 if (adjust_insn_aux_data(env, new_prog->len, off, len)) 4241 return NULL; 4242 return new_prog; 4243 } 4244 4245 /* The verifier does more data flow analysis than llvm and will not explore 4246 * branches that are dead at run time. Malicious programs can have dead code 4247 * too. Therefore replace all dead at-run-time code with nops. 4248 */ 4249 static void sanitize_dead_code(struct bpf_verifier_env *env) 4250 { 4251 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 4252 struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); 4253 struct bpf_insn *insn = env->prog->insnsi; 4254 const int insn_cnt = env->prog->len; 4255 int i; 4256 4257 for (i = 0; i < insn_cnt; i++) { 4258 if (aux_data[i].seen) 4259 continue; 4260 memcpy(insn + i, &nop, sizeof(nop)); 4261 } 4262 } 4263 4264 /* convert load instructions that access fields of 'struct __sk_buff' 4265 * into sequence of instructions that access fields of 'struct sk_buff' 4266 */ 4267 static int convert_ctx_accesses(struct bpf_verifier_env *env) 4268 { 4269 const struct bpf_verifier_ops *ops = env->ops; 4270 int i, cnt, size, ctx_field_size, delta = 0; 4271 const int insn_cnt = env->prog->len; 4272 struct bpf_insn insn_buf[16], *insn; 4273 struct bpf_prog *new_prog; 4274 enum bpf_access_type type; 4275 bool is_narrower_load; 4276 u32 target_size; 4277 4278 if (ops->gen_prologue) { 4279 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 4280 env->prog); 4281 if (cnt >= ARRAY_SIZE(insn_buf)) { 4282 verbose(env, "bpf verifier is misconfigured\n"); 4283 return -EINVAL; 4284 } else if (cnt) { 4285 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 4286 if (!new_prog) 4287 return -ENOMEM; 4288 4289 env->prog = new_prog; 4290 delta += cnt - 1; 4291 } 4292 } 4293 4294 if (!ops->convert_ctx_access) 4295 return 0; 4296 4297 insn = env->prog->insnsi + delta; 4298 4299 for (i = 0; i < insn_cnt; i++, insn++) { 4300 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 4301 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 4302 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 4303 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 4304 type = BPF_READ; 4305 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 4306 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 4307 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 4308 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 4309 type = BPF_WRITE; 4310 else 4311 continue; 4312 4313 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) 4314 continue; 4315 4316 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 4317 size = BPF_LDST_BYTES(insn); 4318 4319 /* If the read access is a narrower load of the field, 4320 * convert to a 4/8-byte load, to minimum program type specific 4321 * convert_ctx_access changes. If conversion is successful, 4322 * we will apply proper mask to the result. 4323 */ 4324 is_narrower_load = size < ctx_field_size; 4325 if (is_narrower_load) { 4326 u32 off = insn->off; 4327 u8 size_code; 4328 4329 if (type == BPF_WRITE) { 4330 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 4331 return -EINVAL; 4332 } 4333 4334 size_code = BPF_H; 4335 if (ctx_field_size == 4) 4336 size_code = BPF_W; 4337 else if (ctx_field_size == 8) 4338 size_code = BPF_DW; 4339 4340 insn->off = off & ~(ctx_field_size - 1); 4341 insn->code = BPF_LDX | BPF_MEM | size_code; 4342 } 4343 4344 target_size = 0; 4345 cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, 4346 &target_size); 4347 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 4348 (ctx_field_size && !target_size)) { 4349 verbose(env, "bpf verifier is misconfigured\n"); 4350 return -EINVAL; 4351 } 4352 4353 if (is_narrower_load && size < target_size) { 4354 if (ctx_field_size <= 4) 4355 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 4356 (1 << size * 8) - 1); 4357 else 4358 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 4359 (1 << size * 8) - 1); 4360 } 4361 4362 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 4363 if (!new_prog) 4364 return -ENOMEM; 4365 4366 delta += cnt - 1; 4367 4368 /* keep walking new program and skip insns we just inserted */ 4369 env->prog = new_prog; 4370 insn = new_prog->insnsi + i + delta; 4371 } 4372 4373 return 0; 4374 } 4375 4376 /* fixup insn->imm field of bpf_call instructions 4377 * and inline eligible helpers as explicit sequence of BPF instructions 4378 * 4379 * this function is called after eBPF program passed verification 4380 */ 4381 static int fixup_bpf_calls(struct bpf_verifier_env *env) 4382 { 4383 struct bpf_prog *prog = env->prog; 4384 struct bpf_insn *insn = prog->insnsi; 4385 const struct bpf_func_proto *fn; 4386 const int insn_cnt = prog->len; 4387 struct bpf_insn insn_buf[16]; 4388 struct bpf_prog *new_prog; 4389 struct bpf_map *map_ptr; 4390 int i, cnt, delta = 0; 4391 4392 for (i = 0; i < insn_cnt; i++, insn++) { 4393 if (insn->code != (BPF_JMP | BPF_CALL)) 4394 continue; 4395 4396 if (insn->imm == BPF_FUNC_get_route_realm) 4397 prog->dst_needed = 1; 4398 if (insn->imm == BPF_FUNC_get_prandom_u32) 4399 bpf_user_rnd_init_once(); 4400 if (insn->imm == BPF_FUNC_tail_call) { 4401 /* If we tail call into other programs, we 4402 * cannot make any assumptions since they can 4403 * be replaced dynamically during runtime in 4404 * the program array. 4405 */ 4406 prog->cb_access = 1; 4407 env->prog->aux->stack_depth = MAX_BPF_STACK; 4408 4409 /* mark bpf_tail_call as different opcode to avoid 4410 * conditional branch in the interpeter for every normal 4411 * call and to prevent accidental JITing by JIT compiler 4412 * that doesn't support bpf_tail_call yet 4413 */ 4414 insn->imm = 0; 4415 insn->code = BPF_JMP | BPF_TAIL_CALL; 4416 continue; 4417 } 4418 4419 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 4420 * handlers are currently limited to 64 bit only. 4421 */ 4422 if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && 4423 insn->imm == BPF_FUNC_map_lookup_elem) { 4424 map_ptr = env->insn_aux_data[i + delta].map_ptr; 4425 if (map_ptr == BPF_MAP_PTR_POISON || 4426 !map_ptr->ops->map_gen_lookup) 4427 goto patch_call_imm; 4428 4429 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 4430 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 4431 verbose(env, "bpf verifier is misconfigured\n"); 4432 return -EINVAL; 4433 } 4434 4435 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 4436 cnt); 4437 if (!new_prog) 4438 return -ENOMEM; 4439 4440 delta += cnt - 1; 4441 4442 /* keep walking new program and skip insns we just inserted */ 4443 env->prog = prog = new_prog; 4444 insn = new_prog->insnsi + i + delta; 4445 continue; 4446 } 4447 4448 if (insn->imm == BPF_FUNC_redirect_map) { 4449 /* Note, we cannot use prog directly as imm as subsequent 4450 * rewrites would still change the prog pointer. The only 4451 * stable address we can use is aux, which also works with 4452 * prog clones during blinding. 4453 */ 4454 u64 addr = (unsigned long)prog->aux; 4455 struct bpf_insn r4_ld[] = { 4456 BPF_LD_IMM64(BPF_REG_4, addr), 4457 *insn, 4458 }; 4459 cnt = ARRAY_SIZE(r4_ld); 4460 4461 new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); 4462 if (!new_prog) 4463 return -ENOMEM; 4464 4465 delta += cnt - 1; 4466 env->prog = prog = new_prog; 4467 insn = new_prog->insnsi + i + delta; 4468 } 4469 patch_call_imm: 4470 fn = env->ops->get_func_proto(insn->imm); 4471 /* all functions that have prototype and verifier allowed 4472 * programs to call them, must be real in-kernel functions 4473 */ 4474 if (!fn->func) { 4475 verbose(env, 4476 "kernel subsystem misconfigured func %s#%d\n", 4477 func_id_name(insn->imm), insn->imm); 4478 return -EFAULT; 4479 } 4480 insn->imm = fn->func - __bpf_call_base; 4481 } 4482 4483 return 0; 4484 } 4485 4486 static void free_states(struct bpf_verifier_env *env) 4487 { 4488 struct bpf_verifier_state_list *sl, *sln; 4489 int i; 4490 4491 if (!env->explored_states) 4492 return; 4493 4494 for (i = 0; i < env->prog->len; i++) { 4495 sl = env->explored_states[i]; 4496 4497 if (sl) 4498 while (sl != STATE_LIST_MARK) { 4499 sln = sl->next; 4500 free_verifier_state(&sl->state, false); 4501 kfree(sl); 4502 sl = sln; 4503 } 4504 } 4505 4506 kfree(env->explored_states); 4507 } 4508 4509 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) 4510 { 4511 struct bpf_verifier_env *env; 4512 struct bpf_verifer_log *log; 4513 int ret = -EINVAL; 4514 4515 /* no program is valid */ 4516 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 4517 return -EINVAL; 4518 4519 /* 'struct bpf_verifier_env' can be global, but since it's not small, 4520 * allocate/free it every time bpf_check() is called 4521 */ 4522 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 4523 if (!env) 4524 return -ENOMEM; 4525 log = &env->log; 4526 4527 env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * 4528 (*prog)->len); 4529 ret = -ENOMEM; 4530 if (!env->insn_aux_data) 4531 goto err_free_env; 4532 env->prog = *prog; 4533 env->ops = bpf_verifier_ops[env->prog->type]; 4534 4535 /* grab the mutex to protect few globals used by verifier */ 4536 mutex_lock(&bpf_verifier_lock); 4537 4538 if (attr->log_level || attr->log_buf || attr->log_size) { 4539 /* user requested verbose verifier output 4540 * and supplied buffer to store the verification trace 4541 */ 4542 log->level = attr->log_level; 4543 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 4544 log->len_total = attr->log_size; 4545 4546 ret = -EINVAL; 4547 /* log attributes have to be sane */ 4548 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || 4549 !log->level || !log->ubuf) 4550 goto err_unlock; 4551 } 4552 4553 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 4554 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 4555 env->strict_alignment = true; 4556 4557 if (env->prog->aux->offload) { 4558 ret = bpf_prog_offload_verifier_prep(env); 4559 if (ret) 4560 goto err_unlock; 4561 } 4562 4563 ret = replace_map_fd_with_map_ptr(env); 4564 if (ret < 0) 4565 goto skip_full_check; 4566 4567 env->explored_states = kcalloc(env->prog->len, 4568 sizeof(struct bpf_verifier_state_list *), 4569 GFP_USER); 4570 ret = -ENOMEM; 4571 if (!env->explored_states) 4572 goto skip_full_check; 4573 4574 ret = check_cfg(env); 4575 if (ret < 0) 4576 goto skip_full_check; 4577 4578 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); 4579 4580 ret = do_check(env); 4581 if (env->cur_state) { 4582 free_verifier_state(env->cur_state, true); 4583 env->cur_state = NULL; 4584 } 4585 4586 skip_full_check: 4587 while (!pop_stack(env, NULL, NULL)); 4588 free_states(env); 4589 4590 if (ret == 0) 4591 sanitize_dead_code(env); 4592 4593 if (ret == 0) 4594 /* program is valid, convert *(u32*)(ctx + off) accesses */ 4595 ret = convert_ctx_accesses(env); 4596 4597 if (ret == 0) 4598 ret = fixup_bpf_calls(env); 4599 4600 if (log->level && bpf_verifier_log_full(log)) 4601 ret = -ENOSPC; 4602 if (log->level && !log->ubuf) { 4603 ret = -EFAULT; 4604 goto err_release_maps; 4605 } 4606 4607 if (ret == 0 && env->used_map_cnt) { 4608 /* if program passed verifier, update used_maps in bpf_prog_info */ 4609 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 4610 sizeof(env->used_maps[0]), 4611 GFP_KERNEL); 4612 4613 if (!env->prog->aux->used_maps) { 4614 ret = -ENOMEM; 4615 goto err_release_maps; 4616 } 4617 4618 memcpy(env->prog->aux->used_maps, env->used_maps, 4619 sizeof(env->used_maps[0]) * env->used_map_cnt); 4620 env->prog->aux->used_map_cnt = env->used_map_cnt; 4621 4622 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 4623 * bpf_ld_imm64 instructions 4624 */ 4625 convert_pseudo_ld_imm64(env); 4626 } 4627 4628 err_release_maps: 4629 if (!env->prog->aux->used_maps) 4630 /* if we didn't copy map pointers into bpf_prog_info, release 4631 * them now. Otherwise free_bpf_prog_info() will release them. 4632 */ 4633 release_maps(env); 4634 *prog = env->prog; 4635 err_unlock: 4636 mutex_unlock(&bpf_verifier_lock); 4637 vfree(env->insn_aux_data); 4638 err_free_env: 4639 kfree(env); 4640 return ret; 4641 } 4642