1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/bpf-cgroup.h> 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/bpf.h> 12 #include <linux/btf.h> 13 #include <linux/bpf_verifier.h> 14 #include <linux/filter.h> 15 #include <net/netlink.h> 16 #include <linux/file.h> 17 #include <linux/vmalloc.h> 18 #include <linux/stringify.h> 19 #include <linux/bsearch.h> 20 #include <linux/sort.h> 21 #include <linux/perf_event.h> 22 #include <linux/ctype.h> 23 #include <linux/error-injection.h> 24 #include <linux/bpf_lsm.h> 25 #include <linux/btf_ids.h> 26 #include <linux/poison.h> 27 #include <linux/module.h> 28 #include <linux/cpumask.h> 29 #include <linux/bpf_mem_alloc.h> 30 #include <net/xdp.h> 31 32 #include "disasm.h" 33 34 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 35 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 36 [_id] = & _name ## _verifier_ops, 37 #define BPF_MAP_TYPE(_id, _ops) 38 #define BPF_LINK_TYPE(_id, _name) 39 #include <linux/bpf_types.h> 40 #undef BPF_PROG_TYPE 41 #undef BPF_MAP_TYPE 42 #undef BPF_LINK_TYPE 43 }; 44 45 struct bpf_mem_alloc bpf_global_percpu_ma; 46 static bool bpf_global_percpu_ma_set; 47 48 /* bpf_check() is a static code analyzer that walks eBPF program 49 * instruction by instruction and updates register/stack state. 50 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 51 * 52 * The first pass is depth-first-search to check that the program is a DAG. 53 * It rejects the following programs: 54 * - larger than BPF_MAXINSNS insns 55 * - if loop is present (detected via back-edge) 56 * - unreachable insns exist (shouldn't be a forest. program = one function) 57 * - out of bounds or malformed jumps 58 * The second pass is all possible path descent from the 1st insn. 59 * Since it's analyzing all paths through the program, the length of the 60 * analysis is limited to 64k insn, which may be hit even if total number of 61 * insn is less then 4K, but there are too many branches that change stack/regs. 62 * Number of 'branches to be analyzed' is limited to 1k 63 * 64 * On entry to each instruction, each register has a type, and the instruction 65 * changes the types of the registers depending on instruction semantics. 66 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 67 * copied to R1. 68 * 69 * All registers are 64-bit. 70 * R0 - return register 71 * R1-R5 argument passing registers 72 * R6-R9 callee saved registers 73 * R10 - frame pointer read-only 74 * 75 * At the start of BPF program the register R1 contains a pointer to bpf_context 76 * and has type PTR_TO_CTX. 77 * 78 * Verifier tracks arithmetic operations on pointers in case: 79 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 80 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 81 * 1st insn copies R10 (which has FRAME_PTR) type into R1 82 * and 2nd arithmetic instruction is pattern matched to recognize 83 * that it wants to construct a pointer to some element within stack. 84 * So after 2nd insn, the register R1 has type PTR_TO_STACK 85 * (and -20 constant is saved for further stack bounds checking). 86 * Meaning that this reg is a pointer to stack plus known immediate constant. 87 * 88 * Most of the time the registers have SCALAR_VALUE type, which 89 * means the register has some value, but it's not a valid pointer. 90 * (like pointer plus pointer becomes SCALAR_VALUE type) 91 * 92 * When verifier sees load or store instructions the type of base register 93 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 94 * four pointer types recognized by check_mem_access() function. 95 * 96 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 97 * and the range of [ptr, ptr + map's value_size) is accessible. 98 * 99 * registers used to pass values to function calls are checked against 100 * function argument constraints. 101 * 102 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 103 * It means that the register type passed to this function must be 104 * PTR_TO_STACK and it will be used inside the function as 105 * 'pointer to map element key' 106 * 107 * For example the argument constraints for bpf_map_lookup_elem(): 108 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 109 * .arg1_type = ARG_CONST_MAP_PTR, 110 * .arg2_type = ARG_PTR_TO_MAP_KEY, 111 * 112 * ret_type says that this function returns 'pointer to map elem value or null' 113 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 114 * 2nd argument should be a pointer to stack, which will be used inside 115 * the helper function as a pointer to map element key. 116 * 117 * On the kernel side the helper function looks like: 118 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 119 * { 120 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 121 * void *key = (void *) (unsigned long) r2; 122 * void *value; 123 * 124 * here kernel can access 'key' and 'map' pointers safely, knowing that 125 * [key, key + map->key_size) bytes are valid and were initialized on 126 * the stack of eBPF program. 127 * } 128 * 129 * Corresponding eBPF program may look like: 130 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 131 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 132 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 133 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 134 * here verifier looks at prototype of map_lookup_elem() and sees: 135 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 136 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 137 * 138 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 139 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 140 * and were initialized prior to this call. 141 * If it's ok, then verifier allows this BPF_CALL insn and looks at 142 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 143 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 144 * returns either pointer to map value or NULL. 145 * 146 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 147 * insn, the register holding that pointer in the true branch changes state to 148 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 149 * branch. See check_cond_jmp_op(). 150 * 151 * After the call R0 is set to return type of the function and registers R1-R5 152 * are set to NOT_INIT to indicate that they are no longer readable. 153 * 154 * The following reference types represent a potential reference to a kernel 155 * resource which, after first being allocated, must be checked and freed by 156 * the BPF program: 157 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 158 * 159 * When the verifier sees a helper call return a reference type, it allocates a 160 * pointer id for the reference and stores it in the current function state. 161 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 162 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 163 * passes through a NULL-check conditional. For the branch wherein the state is 164 * changed to CONST_IMM, the verifier releases the reference. 165 * 166 * For each helper function that allocates a reference, such as 167 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 168 * bpf_sk_release(). When a reference type passes into the release function, 169 * the verifier also releases the reference. If any unchecked or unreleased 170 * reference remains at the end of the program, the verifier rejects it. 171 */ 172 173 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 174 struct bpf_verifier_stack_elem { 175 /* verifer state is 'st' 176 * before processing instruction 'insn_idx' 177 * and after processing instruction 'prev_insn_idx' 178 */ 179 struct bpf_verifier_state st; 180 int insn_idx; 181 int prev_insn_idx; 182 struct bpf_verifier_stack_elem *next; 183 /* length of verifier log at the time this state was pushed on stack */ 184 u32 log_pos; 185 }; 186 187 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 188 #define BPF_COMPLEXITY_LIMIT_STATES 64 189 190 #define BPF_MAP_KEY_POISON (1ULL << 63) 191 #define BPF_MAP_KEY_SEEN (1ULL << 62) 192 193 #define BPF_MAP_PTR_UNPRIV 1UL 194 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 195 POISON_POINTER_DELTA)) 196 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 197 198 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx); 199 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id); 200 static void invalidate_non_owning_refs(struct bpf_verifier_env *env); 201 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env); 202 static int ref_set_non_owning(struct bpf_verifier_env *env, 203 struct bpf_reg_state *reg); 204 static void specialize_kfunc(struct bpf_verifier_env *env, 205 u32 func_id, u16 offset, unsigned long *addr); 206 static bool is_trusted_reg(const struct bpf_reg_state *reg); 207 208 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 209 { 210 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 211 } 212 213 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 214 { 215 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 216 } 217 218 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 219 const struct bpf_map *map, bool unpriv) 220 { 221 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 222 unpriv |= bpf_map_ptr_unpriv(aux); 223 aux->map_ptr_state = (unsigned long)map | 224 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 225 } 226 227 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 228 { 229 return aux->map_key_state & BPF_MAP_KEY_POISON; 230 } 231 232 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 233 { 234 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 235 } 236 237 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 238 { 239 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 240 } 241 242 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 243 { 244 bool poisoned = bpf_map_key_poisoned(aux); 245 246 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 247 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 248 } 249 250 static bool bpf_helper_call(const struct bpf_insn *insn) 251 { 252 return insn->code == (BPF_JMP | BPF_CALL) && 253 insn->src_reg == 0; 254 } 255 256 static bool bpf_pseudo_call(const struct bpf_insn *insn) 257 { 258 return insn->code == (BPF_JMP | BPF_CALL) && 259 insn->src_reg == BPF_PSEUDO_CALL; 260 } 261 262 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) 263 { 264 return insn->code == (BPF_JMP | BPF_CALL) && 265 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; 266 } 267 268 struct bpf_call_arg_meta { 269 struct bpf_map *map_ptr; 270 bool raw_mode; 271 bool pkt_access; 272 u8 release_regno; 273 int regno; 274 int access_size; 275 int mem_size; 276 u64 msize_max_value; 277 int ref_obj_id; 278 int dynptr_id; 279 int map_uid; 280 int func_id; 281 struct btf *btf; 282 u32 btf_id; 283 struct btf *ret_btf; 284 u32 ret_btf_id; 285 u32 subprogno; 286 struct btf_field *kptr_field; 287 }; 288 289 struct bpf_kfunc_call_arg_meta { 290 /* In parameters */ 291 struct btf *btf; 292 u32 func_id; 293 u32 kfunc_flags; 294 const struct btf_type *func_proto; 295 const char *func_name; 296 /* Out parameters */ 297 u32 ref_obj_id; 298 u8 release_regno; 299 bool r0_rdonly; 300 u32 ret_btf_id; 301 u64 r0_size; 302 u32 subprogno; 303 struct { 304 u64 value; 305 bool found; 306 } arg_constant; 307 308 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling, 309 * generally to pass info about user-defined local kptr types to later 310 * verification logic 311 * bpf_obj_drop/bpf_percpu_obj_drop 312 * Record the local kptr type to be drop'd 313 * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type) 314 * Record the local kptr type to be refcount_incr'd and use 315 * arg_owning_ref to determine whether refcount_acquire should be 316 * fallible 317 */ 318 struct btf *arg_btf; 319 u32 arg_btf_id; 320 bool arg_owning_ref; 321 322 struct { 323 struct btf_field *field; 324 } arg_list_head; 325 struct { 326 struct btf_field *field; 327 } arg_rbtree_root; 328 struct { 329 enum bpf_dynptr_type type; 330 u32 id; 331 u32 ref_obj_id; 332 } initialized_dynptr; 333 struct { 334 u8 spi; 335 u8 frameno; 336 } iter; 337 u64 mem_size; 338 }; 339 340 struct btf *btf_vmlinux; 341 342 static DEFINE_MUTEX(bpf_verifier_lock); 343 static DEFINE_MUTEX(bpf_percpu_ma_lock); 344 345 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 346 { 347 struct bpf_verifier_env *env = private_data; 348 va_list args; 349 350 if (!bpf_verifier_log_needed(&env->log)) 351 return; 352 353 va_start(args, fmt); 354 bpf_verifier_vlog(&env->log, fmt, args); 355 va_end(args); 356 } 357 358 static void verbose_invalid_scalar(struct bpf_verifier_env *env, 359 struct bpf_reg_state *reg, 360 struct tnum *range, const char *ctx, 361 const char *reg_name) 362 { 363 char tn_buf[48]; 364 365 verbose(env, "At %s the register %s ", ctx, reg_name); 366 if (!tnum_is_unknown(reg->var_off)) { 367 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 368 verbose(env, "has value %s", tn_buf); 369 } else { 370 verbose(env, "has unknown scalar value"); 371 } 372 tnum_strn(tn_buf, sizeof(tn_buf), *range); 373 verbose(env, " should have been in %s\n", tn_buf); 374 } 375 376 static bool type_may_be_null(u32 type) 377 { 378 return type & PTR_MAYBE_NULL; 379 } 380 381 static bool reg_not_null(const struct bpf_reg_state *reg) 382 { 383 enum bpf_reg_type type; 384 385 type = reg->type; 386 if (type_may_be_null(type)) 387 return false; 388 389 type = base_type(type); 390 return type == PTR_TO_SOCKET || 391 type == PTR_TO_TCP_SOCK || 392 type == PTR_TO_MAP_VALUE || 393 type == PTR_TO_MAP_KEY || 394 type == PTR_TO_SOCK_COMMON || 395 (type == PTR_TO_BTF_ID && is_trusted_reg(reg)) || 396 type == PTR_TO_MEM; 397 } 398 399 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg) 400 { 401 struct btf_record *rec = NULL; 402 struct btf_struct_meta *meta; 403 404 if (reg->type == PTR_TO_MAP_VALUE) { 405 rec = reg->map_ptr->record; 406 } else if (type_is_ptr_alloc_obj(reg->type)) { 407 meta = btf_find_struct_meta(reg->btf, reg->btf_id); 408 if (meta) 409 rec = meta->record; 410 } 411 return rec; 412 } 413 414 static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog) 415 { 416 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; 417 418 return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL; 419 } 420 421 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 422 { 423 return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK); 424 } 425 426 static bool type_is_rdonly_mem(u32 type) 427 { 428 return type & MEM_RDONLY; 429 } 430 431 static bool is_acquire_function(enum bpf_func_id func_id, 432 const struct bpf_map *map) 433 { 434 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; 435 436 if (func_id == BPF_FUNC_sk_lookup_tcp || 437 func_id == BPF_FUNC_sk_lookup_udp || 438 func_id == BPF_FUNC_skc_lookup_tcp || 439 func_id == BPF_FUNC_ringbuf_reserve || 440 func_id == BPF_FUNC_kptr_xchg) 441 return true; 442 443 if (func_id == BPF_FUNC_map_lookup_elem && 444 (map_type == BPF_MAP_TYPE_SOCKMAP || 445 map_type == BPF_MAP_TYPE_SOCKHASH)) 446 return true; 447 448 return false; 449 } 450 451 static bool is_ptr_cast_function(enum bpf_func_id func_id) 452 { 453 return func_id == BPF_FUNC_tcp_sock || 454 func_id == BPF_FUNC_sk_fullsock || 455 func_id == BPF_FUNC_skc_to_tcp_sock || 456 func_id == BPF_FUNC_skc_to_tcp6_sock || 457 func_id == BPF_FUNC_skc_to_udp6_sock || 458 func_id == BPF_FUNC_skc_to_mptcp_sock || 459 func_id == BPF_FUNC_skc_to_tcp_timewait_sock || 460 func_id == BPF_FUNC_skc_to_tcp_request_sock; 461 } 462 463 static bool is_dynptr_ref_function(enum bpf_func_id func_id) 464 { 465 return func_id == BPF_FUNC_dynptr_data; 466 } 467 468 static bool is_sync_callback_calling_kfunc(u32 btf_id); 469 static bool is_bpf_throw_kfunc(struct bpf_insn *insn); 470 471 static bool is_sync_callback_calling_function(enum bpf_func_id func_id) 472 { 473 return func_id == BPF_FUNC_for_each_map_elem || 474 func_id == BPF_FUNC_find_vma || 475 func_id == BPF_FUNC_loop || 476 func_id == BPF_FUNC_user_ringbuf_drain; 477 } 478 479 static bool is_async_callback_calling_function(enum bpf_func_id func_id) 480 { 481 return func_id == BPF_FUNC_timer_set_callback; 482 } 483 484 static bool is_callback_calling_function(enum bpf_func_id func_id) 485 { 486 return is_sync_callback_calling_function(func_id) || 487 is_async_callback_calling_function(func_id); 488 } 489 490 static bool is_sync_callback_calling_insn(struct bpf_insn *insn) 491 { 492 return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) || 493 (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm)); 494 } 495 496 static bool is_storage_get_function(enum bpf_func_id func_id) 497 { 498 return func_id == BPF_FUNC_sk_storage_get || 499 func_id == BPF_FUNC_inode_storage_get || 500 func_id == BPF_FUNC_task_storage_get || 501 func_id == BPF_FUNC_cgrp_storage_get; 502 } 503 504 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id, 505 const struct bpf_map *map) 506 { 507 int ref_obj_uses = 0; 508 509 if (is_ptr_cast_function(func_id)) 510 ref_obj_uses++; 511 if (is_acquire_function(func_id, map)) 512 ref_obj_uses++; 513 if (is_dynptr_ref_function(func_id)) 514 ref_obj_uses++; 515 516 return ref_obj_uses > 1; 517 } 518 519 static bool is_cmpxchg_insn(const struct bpf_insn *insn) 520 { 521 return BPF_CLASS(insn->code) == BPF_STX && 522 BPF_MODE(insn->code) == BPF_ATOMIC && 523 insn->imm == BPF_CMPXCHG; 524 } 525 526 static int __get_spi(s32 off) 527 { 528 return (-off - 1) / BPF_REG_SIZE; 529 } 530 531 static struct bpf_func_state *func(struct bpf_verifier_env *env, 532 const struct bpf_reg_state *reg) 533 { 534 struct bpf_verifier_state *cur = env->cur_state; 535 536 return cur->frame[reg->frameno]; 537 } 538 539 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) 540 { 541 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; 542 543 /* We need to check that slots between [spi - nr_slots + 1, spi] are 544 * within [0, allocated_stack). 545 * 546 * Please note that the spi grows downwards. For example, a dynptr 547 * takes the size of two stack slots; the first slot will be at 548 * spi and the second slot will be at spi - 1. 549 */ 550 return spi - nr_slots + 1 >= 0 && spi < allocated_slots; 551 } 552 553 static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 554 const char *obj_kind, int nr_slots) 555 { 556 int off, spi; 557 558 if (!tnum_is_const(reg->var_off)) { 559 verbose(env, "%s has to be at a constant offset\n", obj_kind); 560 return -EINVAL; 561 } 562 563 off = reg->off + reg->var_off.value; 564 if (off % BPF_REG_SIZE) { 565 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); 566 return -EINVAL; 567 } 568 569 spi = __get_spi(off); 570 if (spi + 1 < nr_slots) { 571 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); 572 return -EINVAL; 573 } 574 575 if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots)) 576 return -ERANGE; 577 return spi; 578 } 579 580 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 581 { 582 return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS); 583 } 584 585 static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots) 586 { 587 return stack_slot_obj_get_spi(env, reg, "iter", nr_slots); 588 } 589 590 static const char *btf_type_name(const struct btf *btf, u32 id) 591 { 592 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); 593 } 594 595 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) 596 { 597 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { 598 case DYNPTR_TYPE_LOCAL: 599 return BPF_DYNPTR_TYPE_LOCAL; 600 case DYNPTR_TYPE_RINGBUF: 601 return BPF_DYNPTR_TYPE_RINGBUF; 602 case DYNPTR_TYPE_SKB: 603 return BPF_DYNPTR_TYPE_SKB; 604 case DYNPTR_TYPE_XDP: 605 return BPF_DYNPTR_TYPE_XDP; 606 default: 607 return BPF_DYNPTR_TYPE_INVALID; 608 } 609 } 610 611 static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type) 612 { 613 switch (type) { 614 case BPF_DYNPTR_TYPE_LOCAL: 615 return DYNPTR_TYPE_LOCAL; 616 case BPF_DYNPTR_TYPE_RINGBUF: 617 return DYNPTR_TYPE_RINGBUF; 618 case BPF_DYNPTR_TYPE_SKB: 619 return DYNPTR_TYPE_SKB; 620 case BPF_DYNPTR_TYPE_XDP: 621 return DYNPTR_TYPE_XDP; 622 default: 623 return 0; 624 } 625 } 626 627 static bool dynptr_type_refcounted(enum bpf_dynptr_type type) 628 { 629 return type == BPF_DYNPTR_TYPE_RINGBUF; 630 } 631 632 static void __mark_dynptr_reg(struct bpf_reg_state *reg, 633 enum bpf_dynptr_type type, 634 bool first_slot, int dynptr_id); 635 636 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 637 struct bpf_reg_state *reg); 638 639 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env, 640 struct bpf_reg_state *sreg1, 641 struct bpf_reg_state *sreg2, 642 enum bpf_dynptr_type type) 643 { 644 int id = ++env->id_gen; 645 646 __mark_dynptr_reg(sreg1, type, true, id); 647 __mark_dynptr_reg(sreg2, type, false, id); 648 } 649 650 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env, 651 struct bpf_reg_state *reg, 652 enum bpf_dynptr_type type) 653 { 654 __mark_dynptr_reg(reg, type, true, ++env->id_gen); 655 } 656 657 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, 658 struct bpf_func_state *state, int spi); 659 660 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 661 enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id) 662 { 663 struct bpf_func_state *state = func(env, reg); 664 enum bpf_dynptr_type type; 665 int spi, i, err; 666 667 spi = dynptr_get_spi(env, reg); 668 if (spi < 0) 669 return spi; 670 671 /* We cannot assume both spi and spi - 1 belong to the same dynptr, 672 * hence we need to call destroy_if_dynptr_stack_slot twice for both, 673 * to ensure that for the following example: 674 * [d1][d1][d2][d2] 675 * spi 3 2 1 0 676 * So marking spi = 2 should lead to destruction of both d1 and d2. In 677 * case they do belong to same dynptr, second call won't see slot_type 678 * as STACK_DYNPTR and will simply skip destruction. 679 */ 680 err = destroy_if_dynptr_stack_slot(env, state, spi); 681 if (err) 682 return err; 683 err = destroy_if_dynptr_stack_slot(env, state, spi - 1); 684 if (err) 685 return err; 686 687 for (i = 0; i < BPF_REG_SIZE; i++) { 688 state->stack[spi].slot_type[i] = STACK_DYNPTR; 689 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; 690 } 691 692 type = arg_to_dynptr_type(arg_type); 693 if (type == BPF_DYNPTR_TYPE_INVALID) 694 return -EINVAL; 695 696 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, 697 &state->stack[spi - 1].spilled_ptr, type); 698 699 if (dynptr_type_refcounted(type)) { 700 /* The id is used to track proper releasing */ 701 int id; 702 703 if (clone_ref_obj_id) 704 id = clone_ref_obj_id; 705 else 706 id = acquire_reference_state(env, insn_idx); 707 708 if (id < 0) 709 return id; 710 711 state->stack[spi].spilled_ptr.ref_obj_id = id; 712 state->stack[spi - 1].spilled_ptr.ref_obj_id = id; 713 } 714 715 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 716 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 717 718 return 0; 719 } 720 721 static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi) 722 { 723 int i; 724 725 for (i = 0; i < BPF_REG_SIZE; i++) { 726 state->stack[spi].slot_type[i] = STACK_INVALID; 727 state->stack[spi - 1].slot_type[i] = STACK_INVALID; 728 } 729 730 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); 731 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); 732 733 /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot? 734 * 735 * While we don't allow reading STACK_INVALID, it is still possible to 736 * do <8 byte writes marking some but not all slots as STACK_MISC. Then, 737 * helpers or insns can do partial read of that part without failing, 738 * but check_stack_range_initialized, check_stack_read_var_off, and 739 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of 740 * the slot conservatively. Hence we need to prevent those liveness 741 * marking walks. 742 * 743 * This was not a problem before because STACK_INVALID is only set by 744 * default (where the default reg state has its reg->parent as NULL), or 745 * in clean_live_states after REG_LIVE_DONE (at which point 746 * mark_reg_read won't walk reg->parent chain), but not randomly during 747 * verifier state exploration (like we did above). Hence, for our case 748 * parentage chain will still be live (i.e. reg->parent may be 749 * non-NULL), while earlier reg->parent was NULL, so we need 750 * REG_LIVE_WRITTEN to screen off read marker propagation when it is 751 * done later on reads or by mark_dynptr_read as well to unnecessary 752 * mark registers in verifier state. 753 */ 754 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 755 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 756 } 757 758 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 759 { 760 struct bpf_func_state *state = func(env, reg); 761 int spi, ref_obj_id, i; 762 763 spi = dynptr_get_spi(env, reg); 764 if (spi < 0) 765 return spi; 766 767 if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { 768 invalidate_dynptr(env, state, spi); 769 return 0; 770 } 771 772 ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id; 773 774 /* If the dynptr has a ref_obj_id, then we need to invalidate 775 * two things: 776 * 777 * 1) Any dynptrs with a matching ref_obj_id (clones) 778 * 2) Any slices derived from this dynptr. 779 */ 780 781 /* Invalidate any slices associated with this dynptr */ 782 WARN_ON_ONCE(release_reference(env, ref_obj_id)); 783 784 /* Invalidate any dynptr clones */ 785 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { 786 if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id) 787 continue; 788 789 /* it should always be the case that if the ref obj id 790 * matches then the stack slot also belongs to a 791 * dynptr 792 */ 793 if (state->stack[i].slot_type[0] != STACK_DYNPTR) { 794 verbose(env, "verifier internal error: misconfigured ref_obj_id\n"); 795 return -EFAULT; 796 } 797 if (state->stack[i].spilled_ptr.dynptr.first_slot) 798 invalidate_dynptr(env, state, i); 799 } 800 801 return 0; 802 } 803 804 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 805 struct bpf_reg_state *reg); 806 807 static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) 808 { 809 if (!env->allow_ptr_leaks) 810 __mark_reg_not_init(env, reg); 811 else 812 __mark_reg_unknown(env, reg); 813 } 814 815 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, 816 struct bpf_func_state *state, int spi) 817 { 818 struct bpf_func_state *fstate; 819 struct bpf_reg_state *dreg; 820 int i, dynptr_id; 821 822 /* We always ensure that STACK_DYNPTR is never set partially, 823 * hence just checking for slot_type[0] is enough. This is 824 * different for STACK_SPILL, where it may be only set for 825 * 1 byte, so code has to use is_spilled_reg. 826 */ 827 if (state->stack[spi].slot_type[0] != STACK_DYNPTR) 828 return 0; 829 830 /* Reposition spi to first slot */ 831 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) 832 spi = spi + 1; 833 834 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { 835 verbose(env, "cannot overwrite referenced dynptr\n"); 836 return -EINVAL; 837 } 838 839 mark_stack_slot_scratched(env, spi); 840 mark_stack_slot_scratched(env, spi - 1); 841 842 /* Writing partially to one dynptr stack slot destroys both. */ 843 for (i = 0; i < BPF_REG_SIZE; i++) { 844 state->stack[spi].slot_type[i] = STACK_INVALID; 845 state->stack[spi - 1].slot_type[i] = STACK_INVALID; 846 } 847 848 dynptr_id = state->stack[spi].spilled_ptr.id; 849 /* Invalidate any slices associated with this dynptr */ 850 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ 851 /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */ 852 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) 853 continue; 854 if (dreg->dynptr_id == dynptr_id) 855 mark_reg_invalid(env, dreg); 856 })); 857 858 /* Do not release reference state, we are destroying dynptr on stack, 859 * not using some helper to release it. Just reset register. 860 */ 861 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); 862 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); 863 864 /* Same reason as unmark_stack_slots_dynptr above */ 865 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 866 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 867 868 return 0; 869 } 870 871 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 872 { 873 int spi; 874 875 if (reg->type == CONST_PTR_TO_DYNPTR) 876 return false; 877 878 spi = dynptr_get_spi(env, reg); 879 880 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an 881 * error because this just means the stack state hasn't been updated yet. 882 * We will do check_mem_access to check and update stack bounds later. 883 */ 884 if (spi < 0 && spi != -ERANGE) 885 return false; 886 887 /* We don't need to check if the stack slots are marked by previous 888 * dynptr initializations because we allow overwriting existing unreferenced 889 * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls 890 * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are 891 * touching are completely destructed before we reinitialize them for a new 892 * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early 893 * instead of delaying it until the end where the user will get "Unreleased 894 * reference" error. 895 */ 896 return true; 897 } 898 899 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 900 { 901 struct bpf_func_state *state = func(env, reg); 902 int i, spi; 903 904 /* This already represents first slot of initialized bpf_dynptr. 905 * 906 * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to 907 * check_func_arg_reg_off's logic, so we don't need to check its 908 * offset and alignment. 909 */ 910 if (reg->type == CONST_PTR_TO_DYNPTR) 911 return true; 912 913 spi = dynptr_get_spi(env, reg); 914 if (spi < 0) 915 return false; 916 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) 917 return false; 918 919 for (i = 0; i < BPF_REG_SIZE; i++) { 920 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || 921 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) 922 return false; 923 } 924 925 return true; 926 } 927 928 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 929 enum bpf_arg_type arg_type) 930 { 931 struct bpf_func_state *state = func(env, reg); 932 enum bpf_dynptr_type dynptr_type; 933 int spi; 934 935 /* ARG_PTR_TO_DYNPTR takes any type of dynptr */ 936 if (arg_type == ARG_PTR_TO_DYNPTR) 937 return true; 938 939 dynptr_type = arg_to_dynptr_type(arg_type); 940 if (reg->type == CONST_PTR_TO_DYNPTR) { 941 return reg->dynptr.type == dynptr_type; 942 } else { 943 spi = dynptr_get_spi(env, reg); 944 if (spi < 0) 945 return false; 946 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; 947 } 948 } 949 950 static void __mark_reg_known_zero(struct bpf_reg_state *reg); 951 952 static bool in_rcu_cs(struct bpf_verifier_env *env); 953 954 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta); 955 956 static int mark_stack_slots_iter(struct bpf_verifier_env *env, 957 struct bpf_kfunc_call_arg_meta *meta, 958 struct bpf_reg_state *reg, int insn_idx, 959 struct btf *btf, u32 btf_id, int nr_slots) 960 { 961 struct bpf_func_state *state = func(env, reg); 962 int spi, i, j, id; 963 964 spi = iter_get_spi(env, reg, nr_slots); 965 if (spi < 0) 966 return spi; 967 968 id = acquire_reference_state(env, insn_idx); 969 if (id < 0) 970 return id; 971 972 for (i = 0; i < nr_slots; i++) { 973 struct bpf_stack_state *slot = &state->stack[spi - i]; 974 struct bpf_reg_state *st = &slot->spilled_ptr; 975 976 __mark_reg_known_zero(st); 977 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ 978 if (is_kfunc_rcu_protected(meta)) { 979 if (in_rcu_cs(env)) 980 st->type |= MEM_RCU; 981 else 982 st->type |= PTR_UNTRUSTED; 983 } 984 st->live |= REG_LIVE_WRITTEN; 985 st->ref_obj_id = i == 0 ? id : 0; 986 st->iter.btf = btf; 987 st->iter.btf_id = btf_id; 988 st->iter.state = BPF_ITER_STATE_ACTIVE; 989 st->iter.depth = 0; 990 991 for (j = 0; j < BPF_REG_SIZE; j++) 992 slot->slot_type[j] = STACK_ITER; 993 994 mark_stack_slot_scratched(env, spi - i); 995 } 996 997 return 0; 998 } 999 1000 static int unmark_stack_slots_iter(struct bpf_verifier_env *env, 1001 struct bpf_reg_state *reg, int nr_slots) 1002 { 1003 struct bpf_func_state *state = func(env, reg); 1004 int spi, i, j; 1005 1006 spi = iter_get_spi(env, reg, nr_slots); 1007 if (spi < 0) 1008 return spi; 1009 1010 for (i = 0; i < nr_slots; i++) { 1011 struct bpf_stack_state *slot = &state->stack[spi - i]; 1012 struct bpf_reg_state *st = &slot->spilled_ptr; 1013 1014 if (i == 0) 1015 WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); 1016 1017 __mark_reg_not_init(env, st); 1018 1019 /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */ 1020 st->live |= REG_LIVE_WRITTEN; 1021 1022 for (j = 0; j < BPF_REG_SIZE; j++) 1023 slot->slot_type[j] = STACK_INVALID; 1024 1025 mark_stack_slot_scratched(env, spi - i); 1026 } 1027 1028 return 0; 1029 } 1030 1031 static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env, 1032 struct bpf_reg_state *reg, int nr_slots) 1033 { 1034 struct bpf_func_state *state = func(env, reg); 1035 int spi, i, j; 1036 1037 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we 1038 * will do check_mem_access to check and update stack bounds later, so 1039 * return true for that case. 1040 */ 1041 spi = iter_get_spi(env, reg, nr_slots); 1042 if (spi == -ERANGE) 1043 return true; 1044 if (spi < 0) 1045 return false; 1046 1047 for (i = 0; i < nr_slots; i++) { 1048 struct bpf_stack_state *slot = &state->stack[spi - i]; 1049 1050 for (j = 0; j < BPF_REG_SIZE; j++) 1051 if (slot->slot_type[j] == STACK_ITER) 1052 return false; 1053 } 1054 1055 return true; 1056 } 1057 1058 static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 1059 struct btf *btf, u32 btf_id, int nr_slots) 1060 { 1061 struct bpf_func_state *state = func(env, reg); 1062 int spi, i, j; 1063 1064 spi = iter_get_spi(env, reg, nr_slots); 1065 if (spi < 0) 1066 return -EINVAL; 1067 1068 for (i = 0; i < nr_slots; i++) { 1069 struct bpf_stack_state *slot = &state->stack[spi - i]; 1070 struct bpf_reg_state *st = &slot->spilled_ptr; 1071 1072 if (st->type & PTR_UNTRUSTED) 1073 return -EPROTO; 1074 /* only main (first) slot has ref_obj_id set */ 1075 if (i == 0 && !st->ref_obj_id) 1076 return -EINVAL; 1077 if (i != 0 && st->ref_obj_id) 1078 return -EINVAL; 1079 if (st->iter.btf != btf || st->iter.btf_id != btf_id) 1080 return -EINVAL; 1081 1082 for (j = 0; j < BPF_REG_SIZE; j++) 1083 if (slot->slot_type[j] != STACK_ITER) 1084 return -EINVAL; 1085 } 1086 1087 return 0; 1088 } 1089 1090 /* Check if given stack slot is "special": 1091 * - spilled register state (STACK_SPILL); 1092 * - dynptr state (STACK_DYNPTR); 1093 * - iter state (STACK_ITER). 1094 */ 1095 static bool is_stack_slot_special(const struct bpf_stack_state *stack) 1096 { 1097 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; 1098 1099 switch (type) { 1100 case STACK_SPILL: 1101 case STACK_DYNPTR: 1102 case STACK_ITER: 1103 return true; 1104 case STACK_INVALID: 1105 case STACK_MISC: 1106 case STACK_ZERO: 1107 return false; 1108 default: 1109 WARN_ONCE(1, "unknown stack slot type %d\n", type); 1110 return true; 1111 } 1112 } 1113 1114 /* The reg state of a pointer or a bounded scalar was saved when 1115 * it was spilled to the stack. 1116 */ 1117 static bool is_spilled_reg(const struct bpf_stack_state *stack) 1118 { 1119 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; 1120 } 1121 1122 static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack) 1123 { 1124 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && 1125 stack->spilled_ptr.type == SCALAR_VALUE; 1126 } 1127 1128 static void scrub_spilled_slot(u8 *stype) 1129 { 1130 if (*stype != STACK_INVALID) 1131 *stype = STACK_MISC; 1132 } 1133 1134 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too 1135 * small to hold src. This is different from krealloc since we don't want to preserve 1136 * the contents of dst. 1137 * 1138 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could 1139 * not be allocated. 1140 */ 1141 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags) 1142 { 1143 size_t alloc_bytes; 1144 void *orig = dst; 1145 size_t bytes; 1146 1147 if (ZERO_OR_NULL_PTR(src)) 1148 goto out; 1149 1150 if (unlikely(check_mul_overflow(n, size, &bytes))) 1151 return NULL; 1152 1153 alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes)); 1154 dst = krealloc(orig, alloc_bytes, flags); 1155 if (!dst) { 1156 kfree(orig); 1157 return NULL; 1158 } 1159 1160 memcpy(dst, src, bytes); 1161 out: 1162 return dst ? dst : ZERO_SIZE_PTR; 1163 } 1164 1165 /* resize an array from old_n items to new_n items. the array is reallocated if it's too 1166 * small to hold new_n items. new items are zeroed out if the array grows. 1167 * 1168 * Contrary to krealloc_array, does not free arr if new_n is zero. 1169 */ 1170 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) 1171 { 1172 size_t alloc_size; 1173 void *new_arr; 1174 1175 if (!new_n || old_n == new_n) 1176 goto out; 1177 1178 alloc_size = kmalloc_size_roundup(size_mul(new_n, size)); 1179 new_arr = krealloc(arr, alloc_size, GFP_KERNEL); 1180 if (!new_arr) { 1181 kfree(arr); 1182 return NULL; 1183 } 1184 arr = new_arr; 1185 1186 if (new_n > old_n) 1187 memset(arr + old_n * size, 0, (new_n - old_n) * size); 1188 1189 out: 1190 return arr ? arr : ZERO_SIZE_PTR; 1191 } 1192 1193 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 1194 { 1195 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, 1196 sizeof(struct bpf_reference_state), GFP_KERNEL); 1197 if (!dst->refs) 1198 return -ENOMEM; 1199 1200 dst->acquired_refs = src->acquired_refs; 1201 return 0; 1202 } 1203 1204 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 1205 { 1206 size_t n = src->allocated_stack / BPF_REG_SIZE; 1207 1208 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), 1209 GFP_KERNEL); 1210 if (!dst->stack) 1211 return -ENOMEM; 1212 1213 dst->allocated_stack = src->allocated_stack; 1214 return 0; 1215 } 1216 1217 static int resize_reference_state(struct bpf_func_state *state, size_t n) 1218 { 1219 state->refs = realloc_array(state->refs, state->acquired_refs, n, 1220 sizeof(struct bpf_reference_state)); 1221 if (!state->refs) 1222 return -ENOMEM; 1223 1224 state->acquired_refs = n; 1225 return 0; 1226 } 1227 1228 static int grow_stack_state(struct bpf_func_state *state, int size) 1229 { 1230 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; 1231 1232 if (old_n >= n) 1233 return 0; 1234 1235 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); 1236 if (!state->stack) 1237 return -ENOMEM; 1238 1239 state->allocated_stack = size; 1240 return 0; 1241 } 1242 1243 /* Acquire a pointer id from the env and update the state->refs to include 1244 * this new pointer reference. 1245 * On success, returns a valid pointer id to associate with the register 1246 * On failure, returns a negative errno. 1247 */ 1248 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 1249 { 1250 struct bpf_func_state *state = cur_func(env); 1251 int new_ofs = state->acquired_refs; 1252 int id, err; 1253 1254 err = resize_reference_state(state, state->acquired_refs + 1); 1255 if (err) 1256 return err; 1257 id = ++env->id_gen; 1258 state->refs[new_ofs].id = id; 1259 state->refs[new_ofs].insn_idx = insn_idx; 1260 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0; 1261 1262 return id; 1263 } 1264 1265 /* release function corresponding to acquire_reference_state(). Idempotent. */ 1266 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 1267 { 1268 int i, last_idx; 1269 1270 last_idx = state->acquired_refs - 1; 1271 for (i = 0; i < state->acquired_refs; i++) { 1272 if (state->refs[i].id == ptr_id) { 1273 /* Cannot release caller references in callbacks */ 1274 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) 1275 return -EINVAL; 1276 if (last_idx && i != last_idx) 1277 memcpy(&state->refs[i], &state->refs[last_idx], 1278 sizeof(*state->refs)); 1279 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 1280 state->acquired_refs--; 1281 return 0; 1282 } 1283 } 1284 return -EINVAL; 1285 } 1286 1287 static void free_func_state(struct bpf_func_state *state) 1288 { 1289 if (!state) 1290 return; 1291 kfree(state->refs); 1292 kfree(state->stack); 1293 kfree(state); 1294 } 1295 1296 static void clear_jmp_history(struct bpf_verifier_state *state) 1297 { 1298 kfree(state->jmp_history); 1299 state->jmp_history = NULL; 1300 state->jmp_history_cnt = 0; 1301 } 1302 1303 static void free_verifier_state(struct bpf_verifier_state *state, 1304 bool free_self) 1305 { 1306 int i; 1307 1308 for (i = 0; i <= state->curframe; i++) { 1309 free_func_state(state->frame[i]); 1310 state->frame[i] = NULL; 1311 } 1312 clear_jmp_history(state); 1313 if (free_self) 1314 kfree(state); 1315 } 1316 1317 /* copy verifier state from src to dst growing dst stack space 1318 * when necessary to accommodate larger src stack 1319 */ 1320 static int copy_func_state(struct bpf_func_state *dst, 1321 const struct bpf_func_state *src) 1322 { 1323 int err; 1324 1325 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 1326 err = copy_reference_state(dst, src); 1327 if (err) 1328 return err; 1329 return copy_stack_state(dst, src); 1330 } 1331 1332 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 1333 const struct bpf_verifier_state *src) 1334 { 1335 struct bpf_func_state *dst; 1336 int i, err; 1337 1338 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, 1339 src->jmp_history_cnt, sizeof(struct bpf_idx_pair), 1340 GFP_USER); 1341 if (!dst_state->jmp_history) 1342 return -ENOMEM; 1343 dst_state->jmp_history_cnt = src->jmp_history_cnt; 1344 1345 /* if dst has more stack frames then src frame, free them, this is also 1346 * necessary in case of exceptional exits using bpf_throw. 1347 */ 1348 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 1349 free_func_state(dst_state->frame[i]); 1350 dst_state->frame[i] = NULL; 1351 } 1352 dst_state->speculative = src->speculative; 1353 dst_state->active_rcu_lock = src->active_rcu_lock; 1354 dst_state->curframe = src->curframe; 1355 dst_state->active_lock.ptr = src->active_lock.ptr; 1356 dst_state->active_lock.id = src->active_lock.id; 1357 dst_state->branches = src->branches; 1358 dst_state->parent = src->parent; 1359 dst_state->first_insn_idx = src->first_insn_idx; 1360 dst_state->last_insn_idx = src->last_insn_idx; 1361 dst_state->dfs_depth = src->dfs_depth; 1362 dst_state->callback_unroll_depth = src->callback_unroll_depth; 1363 dst_state->used_as_loop_entry = src->used_as_loop_entry; 1364 for (i = 0; i <= src->curframe; i++) { 1365 dst = dst_state->frame[i]; 1366 if (!dst) { 1367 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 1368 if (!dst) 1369 return -ENOMEM; 1370 dst_state->frame[i] = dst; 1371 } 1372 err = copy_func_state(dst, src->frame[i]); 1373 if (err) 1374 return err; 1375 } 1376 return 0; 1377 } 1378 1379 static u32 state_htab_size(struct bpf_verifier_env *env) 1380 { 1381 return env->prog->len; 1382 } 1383 1384 static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx) 1385 { 1386 struct bpf_verifier_state *cur = env->cur_state; 1387 struct bpf_func_state *state = cur->frame[cur->curframe]; 1388 1389 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 1390 } 1391 1392 static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b) 1393 { 1394 int fr; 1395 1396 if (a->curframe != b->curframe) 1397 return false; 1398 1399 for (fr = a->curframe; fr >= 0; fr--) 1400 if (a->frame[fr]->callsite != b->frame[fr]->callsite) 1401 return false; 1402 1403 return true; 1404 } 1405 1406 /* Open coded iterators allow back-edges in the state graph in order to 1407 * check unbounded loops that iterators. 1408 * 1409 * In is_state_visited() it is necessary to know if explored states are 1410 * part of some loops in order to decide whether non-exact states 1411 * comparison could be used: 1412 * - non-exact states comparison establishes sub-state relation and uses 1413 * read and precision marks to do so, these marks are propagated from 1414 * children states and thus are not guaranteed to be final in a loop; 1415 * - exact states comparison just checks if current and explored states 1416 * are identical (and thus form a back-edge). 1417 * 1418 * Paper "A New Algorithm for Identifying Loops in Decompilation" 1419 * by Tao Wei, Jian Mao, Wei Zou and Yu Chen [1] presents a convenient 1420 * algorithm for loop structure detection and gives an overview of 1421 * relevant terminology. It also has helpful illustrations. 1422 * 1423 * [1] https://api.semanticscholar.org/CorpusID:15784067 1424 * 1425 * We use a similar algorithm but because loop nested structure is 1426 * irrelevant for verifier ours is significantly simpler and resembles 1427 * strongly connected components algorithm from Sedgewick's textbook. 1428 * 1429 * Define topmost loop entry as a first node of the loop traversed in a 1430 * depth first search starting from initial state. The goal of the loop 1431 * tracking algorithm is to associate topmost loop entries with states 1432 * derived from these entries. 1433 * 1434 * For each step in the DFS states traversal algorithm needs to identify 1435 * the following situations: 1436 * 1437 * initial initial initial 1438 * | | | 1439 * V V V 1440 * ... ... .---------> hdr 1441 * | | | | 1442 * V V | V 1443 * cur .-> succ | .------... 1444 * | | | | | | 1445 * V | V | V V 1446 * succ '-- cur | ... ... 1447 * | | | 1448 * | V V 1449 * | succ <- cur 1450 * | | 1451 * | V 1452 * | ... 1453 * | | 1454 * '----' 1455 * 1456 * (A) successor state of cur (B) successor state of cur or it's entry 1457 * not yet traversed are in current DFS path, thus cur and succ 1458 * are members of the same outermost loop 1459 * 1460 * initial initial 1461 * | | 1462 * V V 1463 * ... ... 1464 * | | 1465 * V V 1466 * .------... .------... 1467 * | | | | 1468 * V V V V 1469 * .-> hdr ... ... ... 1470 * | | | | | 1471 * | V V V V 1472 * | succ <- cur succ <- cur 1473 * | | | 1474 * | V V 1475 * | ... ... 1476 * | | | 1477 * '----' exit 1478 * 1479 * (C) successor state of cur is a part of some loop but this loop 1480 * does not include cur or successor state is not in a loop at all. 1481 * 1482 * Algorithm could be described as the following python code: 1483 * 1484 * traversed = set() # Set of traversed nodes 1485 * entries = {} # Mapping from node to loop entry 1486 * depths = {} # Depth level assigned to graph node 1487 * path = set() # Current DFS path 1488 * 1489 * # Find outermost loop entry known for n 1490 * def get_loop_entry(n): 1491 * h = entries.get(n, None) 1492 * while h in entries and entries[h] != h: 1493 * h = entries[h] 1494 * return h 1495 * 1496 * # Update n's loop entry if h's outermost entry comes 1497 * # before n's outermost entry in current DFS path. 1498 * def update_loop_entry(n, h): 1499 * n1 = get_loop_entry(n) or n 1500 * h1 = get_loop_entry(h) or h 1501 * if h1 in path and depths[h1] <= depths[n1]: 1502 * entries[n] = h1 1503 * 1504 * def dfs(n, depth): 1505 * traversed.add(n) 1506 * path.add(n) 1507 * depths[n] = depth 1508 * for succ in G.successors(n): 1509 * if succ not in traversed: 1510 * # Case A: explore succ and update cur's loop entry 1511 * # only if succ's entry is in current DFS path. 1512 * dfs(succ, depth + 1) 1513 * h = get_loop_entry(succ) 1514 * update_loop_entry(n, h) 1515 * else: 1516 * # Case B or C depending on `h1 in path` check in update_loop_entry(). 1517 * update_loop_entry(n, succ) 1518 * path.remove(n) 1519 * 1520 * To adapt this algorithm for use with verifier: 1521 * - use st->branch == 0 as a signal that DFS of succ had been finished 1522 * and cur's loop entry has to be updated (case A), handle this in 1523 * update_branch_counts(); 1524 * - use st->branch > 0 as a signal that st is in the current DFS path; 1525 * - handle cases B and C in is_state_visited(); 1526 * - update topmost loop entry for intermediate states in get_loop_entry(). 1527 */ 1528 static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_state *st) 1529 { 1530 struct bpf_verifier_state *topmost = st->loop_entry, *old; 1531 1532 while (topmost && topmost->loop_entry && topmost != topmost->loop_entry) 1533 topmost = topmost->loop_entry; 1534 /* Update loop entries for intermediate states to avoid this 1535 * traversal in future get_loop_entry() calls. 1536 */ 1537 while (st && st->loop_entry != topmost) { 1538 old = st->loop_entry; 1539 st->loop_entry = topmost; 1540 st = old; 1541 } 1542 return topmost; 1543 } 1544 1545 static void update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr) 1546 { 1547 struct bpf_verifier_state *cur1, *hdr1; 1548 1549 cur1 = get_loop_entry(cur) ?: cur; 1550 hdr1 = get_loop_entry(hdr) ?: hdr; 1551 /* The head1->branches check decides between cases B and C in 1552 * comment for get_loop_entry(). If hdr1->branches == 0 then 1553 * head's topmost loop entry is not in current DFS path, 1554 * hence 'cur' and 'hdr' are not in the same loop and there is 1555 * no need to update cur->loop_entry. 1556 */ 1557 if (hdr1->branches && hdr1->dfs_depth <= cur1->dfs_depth) { 1558 cur->loop_entry = hdr; 1559 hdr->used_as_loop_entry = true; 1560 } 1561 } 1562 1563 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 1564 { 1565 while (st) { 1566 u32 br = --st->branches; 1567 1568 /* br == 0 signals that DFS exploration for 'st' is finished, 1569 * thus it is necessary to update parent's loop entry if it 1570 * turned out that st is a part of some loop. 1571 * This is a part of 'case A' in get_loop_entry() comment. 1572 */ 1573 if (br == 0 && st->parent && st->loop_entry) 1574 update_loop_entry(st->parent, st->loop_entry); 1575 1576 /* WARN_ON(br > 1) technically makes sense here, 1577 * but see comment in push_stack(), hence: 1578 */ 1579 WARN_ONCE((int)br < 0, 1580 "BUG update_branch_counts:branches_to_explore=%d\n", 1581 br); 1582 if (br) 1583 break; 1584 st = st->parent; 1585 } 1586 } 1587 1588 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 1589 int *insn_idx, bool pop_log) 1590 { 1591 struct bpf_verifier_state *cur = env->cur_state; 1592 struct bpf_verifier_stack_elem *elem, *head = env->head; 1593 int err; 1594 1595 if (env->head == NULL) 1596 return -ENOENT; 1597 1598 if (cur) { 1599 err = copy_verifier_state(cur, &head->st); 1600 if (err) 1601 return err; 1602 } 1603 if (pop_log) 1604 bpf_vlog_reset(&env->log, head->log_pos); 1605 if (insn_idx) 1606 *insn_idx = head->insn_idx; 1607 if (prev_insn_idx) 1608 *prev_insn_idx = head->prev_insn_idx; 1609 elem = head->next; 1610 free_verifier_state(&head->st, false); 1611 kfree(head); 1612 env->head = elem; 1613 env->stack_size--; 1614 return 0; 1615 } 1616 1617 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 1618 int insn_idx, int prev_insn_idx, 1619 bool speculative) 1620 { 1621 struct bpf_verifier_state *cur = env->cur_state; 1622 struct bpf_verifier_stack_elem *elem; 1623 int err; 1624 1625 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 1626 if (!elem) 1627 goto err; 1628 1629 elem->insn_idx = insn_idx; 1630 elem->prev_insn_idx = prev_insn_idx; 1631 elem->next = env->head; 1632 elem->log_pos = env->log.end_pos; 1633 env->head = elem; 1634 env->stack_size++; 1635 err = copy_verifier_state(&elem->st, cur); 1636 if (err) 1637 goto err; 1638 elem->st.speculative |= speculative; 1639 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 1640 verbose(env, "The sequence of %d jumps is too complex.\n", 1641 env->stack_size); 1642 goto err; 1643 } 1644 if (elem->st.parent) { 1645 ++elem->st.parent->branches; 1646 /* WARN_ON(branches > 2) technically makes sense here, 1647 * but 1648 * 1. speculative states will bump 'branches' for non-branch 1649 * instructions 1650 * 2. is_state_visited() heuristics may decide not to create 1651 * a new state for a sequence of branches and all such current 1652 * and cloned states will be pointing to a single parent state 1653 * which might have large 'branches' count. 1654 */ 1655 } 1656 return &elem->st; 1657 err: 1658 free_verifier_state(env->cur_state, true); 1659 env->cur_state = NULL; 1660 /* pop all elements and return */ 1661 while (!pop_stack(env, NULL, NULL, false)); 1662 return NULL; 1663 } 1664 1665 #define CALLER_SAVED_REGS 6 1666 static const int caller_saved[CALLER_SAVED_REGS] = { 1667 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 1668 }; 1669 1670 /* This helper doesn't clear reg->id */ 1671 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1672 { 1673 reg->var_off = tnum_const(imm); 1674 reg->smin_value = (s64)imm; 1675 reg->smax_value = (s64)imm; 1676 reg->umin_value = imm; 1677 reg->umax_value = imm; 1678 1679 reg->s32_min_value = (s32)imm; 1680 reg->s32_max_value = (s32)imm; 1681 reg->u32_min_value = (u32)imm; 1682 reg->u32_max_value = (u32)imm; 1683 } 1684 1685 /* Mark the unknown part of a register (variable offset or scalar value) as 1686 * known to have the value @imm. 1687 */ 1688 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1689 { 1690 /* Clear off and union(map_ptr, range) */ 1691 memset(((u8 *)reg) + sizeof(reg->type), 0, 1692 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 1693 reg->id = 0; 1694 reg->ref_obj_id = 0; 1695 ___mark_reg_known(reg, imm); 1696 } 1697 1698 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 1699 { 1700 reg->var_off = tnum_const_subreg(reg->var_off, imm); 1701 reg->s32_min_value = (s32)imm; 1702 reg->s32_max_value = (s32)imm; 1703 reg->u32_min_value = (u32)imm; 1704 reg->u32_max_value = (u32)imm; 1705 } 1706 1707 /* Mark the 'variable offset' part of a register as zero. This should be 1708 * used only on registers holding a pointer type. 1709 */ 1710 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 1711 { 1712 __mark_reg_known(reg, 0); 1713 } 1714 1715 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 1716 { 1717 __mark_reg_known(reg, 0); 1718 reg->type = SCALAR_VALUE; 1719 } 1720 1721 static void mark_reg_known_zero(struct bpf_verifier_env *env, 1722 struct bpf_reg_state *regs, u32 regno) 1723 { 1724 if (WARN_ON(regno >= MAX_BPF_REG)) { 1725 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 1726 /* Something bad happened, let's kill all regs */ 1727 for (regno = 0; regno < MAX_BPF_REG; regno++) 1728 __mark_reg_not_init(env, regs + regno); 1729 return; 1730 } 1731 __mark_reg_known_zero(regs + regno); 1732 } 1733 1734 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type, 1735 bool first_slot, int dynptr_id) 1736 { 1737 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for 1738 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply 1739 * set it unconditionally as it is ignored for STACK_DYNPTR anyway. 1740 */ 1741 __mark_reg_known_zero(reg); 1742 reg->type = CONST_PTR_TO_DYNPTR; 1743 /* Give each dynptr a unique id to uniquely associate slices to it. */ 1744 reg->id = dynptr_id; 1745 reg->dynptr.type = type; 1746 reg->dynptr.first_slot = first_slot; 1747 } 1748 1749 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) 1750 { 1751 if (base_type(reg->type) == PTR_TO_MAP_VALUE) { 1752 const struct bpf_map *map = reg->map_ptr; 1753 1754 if (map->inner_map_meta) { 1755 reg->type = CONST_PTR_TO_MAP; 1756 reg->map_ptr = map->inner_map_meta; 1757 /* transfer reg's id which is unique for every map_lookup_elem 1758 * as UID of the inner map. 1759 */ 1760 if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER)) 1761 reg->map_uid = reg->id; 1762 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 1763 reg->type = PTR_TO_XDP_SOCK; 1764 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || 1765 map->map_type == BPF_MAP_TYPE_SOCKHASH) { 1766 reg->type = PTR_TO_SOCKET; 1767 } else { 1768 reg->type = PTR_TO_MAP_VALUE; 1769 } 1770 return; 1771 } 1772 1773 reg->type &= ~PTR_MAYBE_NULL; 1774 } 1775 1776 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno, 1777 struct btf_field_graph_root *ds_head) 1778 { 1779 __mark_reg_known_zero(®s[regno]); 1780 regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC; 1781 regs[regno].btf = ds_head->btf; 1782 regs[regno].btf_id = ds_head->value_btf_id; 1783 regs[regno].off = ds_head->node_offset; 1784 } 1785 1786 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 1787 { 1788 return type_is_pkt_pointer(reg->type); 1789 } 1790 1791 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 1792 { 1793 return reg_is_pkt_pointer(reg) || 1794 reg->type == PTR_TO_PACKET_END; 1795 } 1796 1797 static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg) 1798 { 1799 return base_type(reg->type) == PTR_TO_MEM && 1800 (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP); 1801 } 1802 1803 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 1804 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 1805 enum bpf_reg_type which) 1806 { 1807 /* The register can already have a range from prior markings. 1808 * This is fine as long as it hasn't been advanced from its 1809 * origin. 1810 */ 1811 return reg->type == which && 1812 reg->id == 0 && 1813 reg->off == 0 && 1814 tnum_equals_const(reg->var_off, 0); 1815 } 1816 1817 /* Reset the min/max bounds of a register */ 1818 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 1819 { 1820 reg->smin_value = S64_MIN; 1821 reg->smax_value = S64_MAX; 1822 reg->umin_value = 0; 1823 reg->umax_value = U64_MAX; 1824 1825 reg->s32_min_value = S32_MIN; 1826 reg->s32_max_value = S32_MAX; 1827 reg->u32_min_value = 0; 1828 reg->u32_max_value = U32_MAX; 1829 } 1830 1831 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 1832 { 1833 reg->smin_value = S64_MIN; 1834 reg->smax_value = S64_MAX; 1835 reg->umin_value = 0; 1836 reg->umax_value = U64_MAX; 1837 } 1838 1839 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 1840 { 1841 reg->s32_min_value = S32_MIN; 1842 reg->s32_max_value = S32_MAX; 1843 reg->u32_min_value = 0; 1844 reg->u32_max_value = U32_MAX; 1845 } 1846 1847 static void __update_reg32_bounds(struct bpf_reg_state *reg) 1848 { 1849 struct tnum var32_off = tnum_subreg(reg->var_off); 1850 1851 /* min signed is max(sign bit) | min(other bits) */ 1852 reg->s32_min_value = max_t(s32, reg->s32_min_value, 1853 var32_off.value | (var32_off.mask & S32_MIN)); 1854 /* max signed is min(sign bit) | max(other bits) */ 1855 reg->s32_max_value = min_t(s32, reg->s32_max_value, 1856 var32_off.value | (var32_off.mask & S32_MAX)); 1857 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 1858 reg->u32_max_value = min(reg->u32_max_value, 1859 (u32)(var32_off.value | var32_off.mask)); 1860 } 1861 1862 static void __update_reg64_bounds(struct bpf_reg_state *reg) 1863 { 1864 /* min signed is max(sign bit) | min(other bits) */ 1865 reg->smin_value = max_t(s64, reg->smin_value, 1866 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 1867 /* max signed is min(sign bit) | max(other bits) */ 1868 reg->smax_value = min_t(s64, reg->smax_value, 1869 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 1870 reg->umin_value = max(reg->umin_value, reg->var_off.value); 1871 reg->umax_value = min(reg->umax_value, 1872 reg->var_off.value | reg->var_off.mask); 1873 } 1874 1875 static void __update_reg_bounds(struct bpf_reg_state *reg) 1876 { 1877 __update_reg32_bounds(reg); 1878 __update_reg64_bounds(reg); 1879 } 1880 1881 /* Uses signed min/max values to inform unsigned, and vice-versa */ 1882 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 1883 { 1884 /* If upper 32 bits of u64/s64 range don't change, we can use lower 32 1885 * bits to improve our u32/s32 boundaries. 1886 * 1887 * E.g., the case where we have upper 32 bits as zero ([10, 20] in 1888 * u64) is pretty trivial, it's obvious that in u32 we'll also have 1889 * [10, 20] range. But this property holds for any 64-bit range as 1890 * long as upper 32 bits in that entire range of values stay the same. 1891 * 1892 * E.g., u64 range [0x10000000A, 0x10000000F] ([4294967306, 4294967311] 1893 * in decimal) has the same upper 32 bits throughout all the values in 1894 * that range. As such, lower 32 bits form a valid [0xA, 0xF] ([10, 15]) 1895 * range. 1896 * 1897 * Note also, that [0xA, 0xF] is a valid range both in u32 and in s32, 1898 * following the rules outlined below about u64/s64 correspondence 1899 * (which equally applies to u32 vs s32 correspondence). In general it 1900 * depends on actual hexadecimal values of 32-bit range. They can form 1901 * only valid u32, or only valid s32 ranges in some cases. 1902 * 1903 * So we use all these insights to derive bounds for subregisters here. 1904 */ 1905 if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) { 1906 /* u64 to u32 casting preserves validity of low 32 bits as 1907 * a range, if upper 32 bits are the same 1908 */ 1909 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value); 1910 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value); 1911 1912 if ((s32)reg->umin_value <= (s32)reg->umax_value) { 1913 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); 1914 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); 1915 } 1916 } 1917 if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) { 1918 /* low 32 bits should form a proper u32 range */ 1919 if ((u32)reg->smin_value <= (u32)reg->smax_value) { 1920 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value); 1921 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value); 1922 } 1923 /* low 32 bits should form a proper s32 range */ 1924 if ((s32)reg->smin_value <= (s32)reg->smax_value) { 1925 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); 1926 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); 1927 } 1928 } 1929 /* Special case where upper bits form a small sequence of two 1930 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to 1931 * 0x00000000 is also valid), while lower bits form a proper s32 range 1932 * going from negative numbers to positive numbers. E.g., let's say we 1933 * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]). 1934 * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff, 1935 * 0x0000000000000000, 0x00000000000001}). Ignoring upper 32 bits, 1936 * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]). 1937 * Note that it doesn't have to be 0xffffffff going to 0x00000000 in 1938 * upper 32 bits. As a random example, s64 range 1939 * [0xfffffff0fffffff0; 0xfffffff100000010], forms a valid s32 range 1940 * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister. 1941 */ 1942 if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) && 1943 (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) { 1944 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); 1945 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); 1946 } 1947 if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) && 1948 (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) { 1949 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); 1950 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); 1951 } 1952 /* if u32 range forms a valid s32 range (due to matching sign bit), 1953 * try to learn from that 1954 */ 1955 if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) { 1956 reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value); 1957 reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value); 1958 } 1959 /* If we cannot cross the sign boundary, then signed and unsigned bounds 1960 * are the same, so combine. This works even in the negative case, e.g. 1961 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1962 */ 1963 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { 1964 reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); 1965 reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); 1966 } 1967 } 1968 1969 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 1970 { 1971 /* If u64 range forms a valid s64 range (due to matching sign bit), 1972 * try to learn from that. Let's do a bit of ASCII art to see when 1973 * this is happening. Let's take u64 range first: 1974 * 1975 * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX 1976 * |-------------------------------|--------------------------------| 1977 * 1978 * Valid u64 range is formed when umin and umax are anywhere in the 1979 * range [0, U64_MAX], and umin <= umax. u64 case is simple and 1980 * straightforward. Let's see how s64 range maps onto the same range 1981 * of values, annotated below the line for comparison: 1982 * 1983 * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX 1984 * |-------------------------------|--------------------------------| 1985 * 0 S64_MAX S64_MIN -1 1986 * 1987 * So s64 values basically start in the middle and they are logically 1988 * contiguous to the right of it, wrapping around from -1 to 0, and 1989 * then finishing as S64_MAX (0x7fffffffffffffff) right before 1990 * S64_MIN. We can try drawing the continuity of u64 vs s64 values 1991 * more visually as mapped to sign-agnostic range of hex values. 1992 * 1993 * u64 start u64 end 1994 * _______________________________________________________________ 1995 * / \ 1996 * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX 1997 * |-------------------------------|--------------------------------| 1998 * 0 S64_MAX S64_MIN -1 1999 * / \ 2000 * >------------------------------ -------------------------------> 2001 * s64 continues... s64 end s64 start s64 "midpoint" 2002 * 2003 * What this means is that, in general, we can't always derive 2004 * something new about u64 from any random s64 range, and vice versa. 2005 * 2006 * But we can do that in two particular cases. One is when entire 2007 * u64/s64 range is *entirely* contained within left half of the above 2008 * diagram or when it is *entirely* contained in the right half. I.e.: 2009 * 2010 * |-------------------------------|--------------------------------| 2011 * ^ ^ ^ ^ 2012 * A B C D 2013 * 2014 * [A, B] and [C, D] are contained entirely in their respective halves 2015 * and form valid contiguous ranges as both u64 and s64 values. [A, B] 2016 * will be non-negative both as u64 and s64 (and in fact it will be 2017 * identical ranges no matter the signedness). [C, D] treated as s64 2018 * will be a range of negative values, while in u64 it will be 2019 * non-negative range of values larger than 0x8000000000000000. 2020 * 2021 * Now, any other range here can't be represented in both u64 and s64 2022 * simultaneously. E.g., [A, C], [A, D], [B, C], [B, D] are valid 2023 * contiguous u64 ranges, but they are discontinuous in s64. [B, C] 2024 * in s64 would be properly presented as [S64_MIN, C] and [B, S64_MAX], 2025 * for example. Similarly, valid s64 range [D, A] (going from negative 2026 * to positive values), would be two separate [D, U64_MAX] and [0, A] 2027 * ranges as u64. Currently reg_state can't represent two segments per 2028 * numeric domain, so in such situations we can only derive maximal 2029 * possible range ([0, U64_MAX] for u64, and [S64_MIN, S64_MAX] for s64). 2030 * 2031 * So we use these facts to derive umin/umax from smin/smax and vice 2032 * versa only if they stay within the same "half". This is equivalent 2033 * to checking sign bit: lower half will have sign bit as zero, upper 2034 * half have sign bit 1. Below in code we simplify this by just 2035 * casting umin/umax as smin/smax and checking if they form valid 2036 * range, and vice versa. Those are equivalent checks. 2037 */ 2038 if ((s64)reg->umin_value <= (s64)reg->umax_value) { 2039 reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value); 2040 reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value); 2041 } 2042 /* If we cannot cross the sign boundary, then signed and unsigned bounds 2043 * are the same, so combine. This works even in the negative case, e.g. 2044 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 2045 */ 2046 if ((u64)reg->smin_value <= (u64)reg->smax_value) { 2047 reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); 2048 reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); 2049 } 2050 } 2051 2052 static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg) 2053 { 2054 /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit 2055 * values on both sides of 64-bit range in hope to have tigher range. 2056 * E.g., if r1 is [0x1'00000000, 0x3'80000000], and we learn from 2057 * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff]. 2058 * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound 2059 * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of 2060 * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a 2061 * better overall bounds for r1 as [0x1'000000001; 0x3'7fffffff]. 2062 * We just need to make sure that derived bounds we are intersecting 2063 * with are well-formed ranges in respecitve s64 or u64 domain, just 2064 * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments. 2065 */ 2066 __u64 new_umin, new_umax; 2067 __s64 new_smin, new_smax; 2068 2069 /* u32 -> u64 tightening, it's always well-formed */ 2070 new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value; 2071 new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value; 2072 reg->umin_value = max_t(u64, reg->umin_value, new_umin); 2073 reg->umax_value = min_t(u64, reg->umax_value, new_umax); 2074 /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */ 2075 new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value; 2076 new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value; 2077 reg->smin_value = max_t(s64, reg->smin_value, new_smin); 2078 reg->smax_value = min_t(s64, reg->smax_value, new_smax); 2079 2080 /* if s32 can be treated as valid u32 range, we can use it as well */ 2081 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { 2082 /* s32 -> u64 tightening */ 2083 new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; 2084 new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; 2085 reg->umin_value = max_t(u64, reg->umin_value, new_umin); 2086 reg->umax_value = min_t(u64, reg->umax_value, new_umax); 2087 /* s32 -> s64 tightening */ 2088 new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; 2089 new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; 2090 reg->smin_value = max_t(s64, reg->smin_value, new_smin); 2091 reg->smax_value = min_t(s64, reg->smax_value, new_smax); 2092 } 2093 } 2094 2095 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 2096 { 2097 __reg32_deduce_bounds(reg); 2098 __reg64_deduce_bounds(reg); 2099 __reg_deduce_mixed_bounds(reg); 2100 } 2101 2102 /* Attempts to improve var_off based on unsigned min/max information */ 2103 static void __reg_bound_offset(struct bpf_reg_state *reg) 2104 { 2105 struct tnum var64_off = tnum_intersect(reg->var_off, 2106 tnum_range(reg->umin_value, 2107 reg->umax_value)); 2108 struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off), 2109 tnum_range(reg->u32_min_value, 2110 reg->u32_max_value)); 2111 2112 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 2113 } 2114 2115 static void reg_bounds_sync(struct bpf_reg_state *reg) 2116 { 2117 /* We might have learned new bounds from the var_off. */ 2118 __update_reg_bounds(reg); 2119 /* We might have learned something about the sign bit. */ 2120 __reg_deduce_bounds(reg); 2121 __reg_deduce_bounds(reg); 2122 /* We might have learned some bits from the bounds. */ 2123 __reg_bound_offset(reg); 2124 /* Intersecting with the old var_off might have improved our bounds 2125 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2126 * then new var_off is (0; 0x7f...fc) which improves our umax. 2127 */ 2128 __update_reg_bounds(reg); 2129 } 2130 2131 static int reg_bounds_sanity_check(struct bpf_verifier_env *env, 2132 struct bpf_reg_state *reg, const char *ctx) 2133 { 2134 const char *msg; 2135 2136 if (reg->umin_value > reg->umax_value || 2137 reg->smin_value > reg->smax_value || 2138 reg->u32_min_value > reg->u32_max_value || 2139 reg->s32_min_value > reg->s32_max_value) { 2140 msg = "range bounds violation"; 2141 goto out; 2142 } 2143 2144 if (tnum_is_const(reg->var_off)) { 2145 u64 uval = reg->var_off.value; 2146 s64 sval = (s64)uval; 2147 2148 if (reg->umin_value != uval || reg->umax_value != uval || 2149 reg->smin_value != sval || reg->smax_value != sval) { 2150 msg = "const tnum out of sync with range bounds"; 2151 goto out; 2152 } 2153 } 2154 2155 if (tnum_subreg_is_const(reg->var_off)) { 2156 u32 uval32 = tnum_subreg(reg->var_off).value; 2157 s32 sval32 = (s32)uval32; 2158 2159 if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 || 2160 reg->s32_min_value != sval32 || reg->s32_max_value != sval32) { 2161 msg = "const subreg tnum out of sync with range bounds"; 2162 goto out; 2163 } 2164 } 2165 2166 return 0; 2167 out: 2168 verbose(env, "REG INVARIANTS VIOLATION (%s): %s u64=[%#llx, %#llx] " 2169 "s64=[%#llx, %#llx] u32=[%#x, %#x] s32=[%#x, %#x] var_off=(%#llx, %#llx)\n", 2170 ctx, msg, reg->umin_value, reg->umax_value, 2171 reg->smin_value, reg->smax_value, 2172 reg->u32_min_value, reg->u32_max_value, 2173 reg->s32_min_value, reg->s32_max_value, 2174 reg->var_off.value, reg->var_off.mask); 2175 if (env->test_reg_invariants) 2176 return -EFAULT; 2177 __mark_reg_unbounded(reg); 2178 return 0; 2179 } 2180 2181 static bool __reg32_bound_s64(s32 a) 2182 { 2183 return a >= 0 && a <= S32_MAX; 2184 } 2185 2186 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 2187 { 2188 reg->umin_value = reg->u32_min_value; 2189 reg->umax_value = reg->u32_max_value; 2190 2191 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must 2192 * be positive otherwise set to worse case bounds and refine later 2193 * from tnum. 2194 */ 2195 if (__reg32_bound_s64(reg->s32_min_value) && 2196 __reg32_bound_s64(reg->s32_max_value)) { 2197 reg->smin_value = reg->s32_min_value; 2198 reg->smax_value = reg->s32_max_value; 2199 } else { 2200 reg->smin_value = 0; 2201 reg->smax_value = U32_MAX; 2202 } 2203 } 2204 2205 /* Mark a register as having a completely unknown (scalar) value. */ 2206 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 2207 struct bpf_reg_state *reg) 2208 { 2209 /* 2210 * Clear type, off, and union(map_ptr, range) and 2211 * padding between 'type' and union 2212 */ 2213 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 2214 reg->type = SCALAR_VALUE; 2215 reg->id = 0; 2216 reg->ref_obj_id = 0; 2217 reg->var_off = tnum_unknown; 2218 reg->frameno = 0; 2219 reg->precise = !env->bpf_capable; 2220 __mark_reg_unbounded(reg); 2221 } 2222 2223 static void mark_reg_unknown(struct bpf_verifier_env *env, 2224 struct bpf_reg_state *regs, u32 regno) 2225 { 2226 if (WARN_ON(regno >= MAX_BPF_REG)) { 2227 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 2228 /* Something bad happened, let's kill all regs except FP */ 2229 for (regno = 0; regno < BPF_REG_FP; regno++) 2230 __mark_reg_not_init(env, regs + regno); 2231 return; 2232 } 2233 __mark_reg_unknown(env, regs + regno); 2234 } 2235 2236 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 2237 struct bpf_reg_state *reg) 2238 { 2239 __mark_reg_unknown(env, reg); 2240 reg->type = NOT_INIT; 2241 } 2242 2243 static void mark_reg_not_init(struct bpf_verifier_env *env, 2244 struct bpf_reg_state *regs, u32 regno) 2245 { 2246 if (WARN_ON(regno >= MAX_BPF_REG)) { 2247 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 2248 /* Something bad happened, let's kill all regs except FP */ 2249 for (regno = 0; regno < BPF_REG_FP; regno++) 2250 __mark_reg_not_init(env, regs + regno); 2251 return; 2252 } 2253 __mark_reg_not_init(env, regs + regno); 2254 } 2255 2256 static void mark_btf_ld_reg(struct bpf_verifier_env *env, 2257 struct bpf_reg_state *regs, u32 regno, 2258 enum bpf_reg_type reg_type, 2259 struct btf *btf, u32 btf_id, 2260 enum bpf_type_flag flag) 2261 { 2262 if (reg_type == SCALAR_VALUE) { 2263 mark_reg_unknown(env, regs, regno); 2264 return; 2265 } 2266 mark_reg_known_zero(env, regs, regno); 2267 regs[regno].type = PTR_TO_BTF_ID | flag; 2268 regs[regno].btf = btf; 2269 regs[regno].btf_id = btf_id; 2270 } 2271 2272 #define DEF_NOT_SUBREG (0) 2273 static void init_reg_state(struct bpf_verifier_env *env, 2274 struct bpf_func_state *state) 2275 { 2276 struct bpf_reg_state *regs = state->regs; 2277 int i; 2278 2279 for (i = 0; i < MAX_BPF_REG; i++) { 2280 mark_reg_not_init(env, regs, i); 2281 regs[i].live = REG_LIVE_NONE; 2282 regs[i].parent = NULL; 2283 regs[i].subreg_def = DEF_NOT_SUBREG; 2284 } 2285 2286 /* frame pointer */ 2287 regs[BPF_REG_FP].type = PTR_TO_STACK; 2288 mark_reg_known_zero(env, regs, BPF_REG_FP); 2289 regs[BPF_REG_FP].frameno = state->frameno; 2290 } 2291 2292 #define BPF_MAIN_FUNC (-1) 2293 static void init_func_state(struct bpf_verifier_env *env, 2294 struct bpf_func_state *state, 2295 int callsite, int frameno, int subprogno) 2296 { 2297 state->callsite = callsite; 2298 state->frameno = frameno; 2299 state->subprogno = subprogno; 2300 state->callback_ret_range = tnum_range(0, 0); 2301 init_reg_state(env, state); 2302 mark_verifier_state_scratched(env); 2303 } 2304 2305 /* Similar to push_stack(), but for async callbacks */ 2306 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, 2307 int insn_idx, int prev_insn_idx, 2308 int subprog) 2309 { 2310 struct bpf_verifier_stack_elem *elem; 2311 struct bpf_func_state *frame; 2312 2313 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 2314 if (!elem) 2315 goto err; 2316 2317 elem->insn_idx = insn_idx; 2318 elem->prev_insn_idx = prev_insn_idx; 2319 elem->next = env->head; 2320 elem->log_pos = env->log.end_pos; 2321 env->head = elem; 2322 env->stack_size++; 2323 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 2324 verbose(env, 2325 "The sequence of %d jumps is too complex for async cb.\n", 2326 env->stack_size); 2327 goto err; 2328 } 2329 /* Unlike push_stack() do not copy_verifier_state(). 2330 * The caller state doesn't matter. 2331 * This is async callback. It starts in a fresh stack. 2332 * Initialize it similar to do_check_common(). 2333 */ 2334 elem->st.branches = 1; 2335 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 2336 if (!frame) 2337 goto err; 2338 init_func_state(env, frame, 2339 BPF_MAIN_FUNC /* callsite */, 2340 0 /* frameno within this callchain */, 2341 subprog /* subprog number within this prog */); 2342 elem->st.frame[0] = frame; 2343 return &elem->st; 2344 err: 2345 free_verifier_state(env->cur_state, true); 2346 env->cur_state = NULL; 2347 /* pop all elements and return */ 2348 while (!pop_stack(env, NULL, NULL, false)); 2349 return NULL; 2350 } 2351 2352 2353 enum reg_arg_type { 2354 SRC_OP, /* register is used as source operand */ 2355 DST_OP, /* register is used as destination operand */ 2356 DST_OP_NO_MARK /* same as above, check only, don't mark */ 2357 }; 2358 2359 static int cmp_subprogs(const void *a, const void *b) 2360 { 2361 return ((struct bpf_subprog_info *)a)->start - 2362 ((struct bpf_subprog_info *)b)->start; 2363 } 2364 2365 static int find_subprog(struct bpf_verifier_env *env, int off) 2366 { 2367 struct bpf_subprog_info *p; 2368 2369 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 2370 sizeof(env->subprog_info[0]), cmp_subprogs); 2371 if (!p) 2372 return -ENOENT; 2373 return p - env->subprog_info; 2374 2375 } 2376 2377 static int add_subprog(struct bpf_verifier_env *env, int off) 2378 { 2379 int insn_cnt = env->prog->len; 2380 int ret; 2381 2382 if (off >= insn_cnt || off < 0) { 2383 verbose(env, "call to invalid destination\n"); 2384 return -EINVAL; 2385 } 2386 ret = find_subprog(env, off); 2387 if (ret >= 0) 2388 return ret; 2389 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 2390 verbose(env, "too many subprograms\n"); 2391 return -E2BIG; 2392 } 2393 /* determine subprog starts. The end is one before the next starts */ 2394 env->subprog_info[env->subprog_cnt++].start = off; 2395 sort(env->subprog_info, env->subprog_cnt, 2396 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 2397 return env->subprog_cnt - 1; 2398 } 2399 2400 static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env) 2401 { 2402 struct bpf_prog_aux *aux = env->prog->aux; 2403 struct btf *btf = aux->btf; 2404 const struct btf_type *t; 2405 u32 main_btf_id, id; 2406 const char *name; 2407 int ret, i; 2408 2409 /* Non-zero func_info_cnt implies valid btf */ 2410 if (!aux->func_info_cnt) 2411 return 0; 2412 main_btf_id = aux->func_info[0].type_id; 2413 2414 t = btf_type_by_id(btf, main_btf_id); 2415 if (!t) { 2416 verbose(env, "invalid btf id for main subprog in func_info\n"); 2417 return -EINVAL; 2418 } 2419 2420 name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:"); 2421 if (IS_ERR(name)) { 2422 ret = PTR_ERR(name); 2423 /* If there is no tag present, there is no exception callback */ 2424 if (ret == -ENOENT) 2425 ret = 0; 2426 else if (ret == -EEXIST) 2427 verbose(env, "multiple exception callback tags for main subprog\n"); 2428 return ret; 2429 } 2430 2431 ret = btf_find_by_name_kind(btf, name, BTF_KIND_FUNC); 2432 if (ret < 0) { 2433 verbose(env, "exception callback '%s' could not be found in BTF\n", name); 2434 return ret; 2435 } 2436 id = ret; 2437 t = btf_type_by_id(btf, id); 2438 if (btf_func_linkage(t) != BTF_FUNC_GLOBAL) { 2439 verbose(env, "exception callback '%s' must have global linkage\n", name); 2440 return -EINVAL; 2441 } 2442 ret = 0; 2443 for (i = 0; i < aux->func_info_cnt; i++) { 2444 if (aux->func_info[i].type_id != id) 2445 continue; 2446 ret = aux->func_info[i].insn_off; 2447 /* Further func_info and subprog checks will also happen 2448 * later, so assume this is the right insn_off for now. 2449 */ 2450 if (!ret) { 2451 verbose(env, "invalid exception callback insn_off in func_info: 0\n"); 2452 ret = -EINVAL; 2453 } 2454 } 2455 if (!ret) { 2456 verbose(env, "exception callback type id not found in func_info\n"); 2457 ret = -EINVAL; 2458 } 2459 return ret; 2460 } 2461 2462 #define MAX_KFUNC_DESCS 256 2463 #define MAX_KFUNC_BTFS 256 2464 2465 struct bpf_kfunc_desc { 2466 struct btf_func_model func_model; 2467 u32 func_id; 2468 s32 imm; 2469 u16 offset; 2470 unsigned long addr; 2471 }; 2472 2473 struct bpf_kfunc_btf { 2474 struct btf *btf; 2475 struct module *module; 2476 u16 offset; 2477 }; 2478 2479 struct bpf_kfunc_desc_tab { 2480 /* Sorted by func_id (BTF ID) and offset (fd_array offset) during 2481 * verification. JITs do lookups by bpf_insn, where func_id may not be 2482 * available, therefore at the end of verification do_misc_fixups() 2483 * sorts this by imm and offset. 2484 */ 2485 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; 2486 u32 nr_descs; 2487 }; 2488 2489 struct bpf_kfunc_btf_tab { 2490 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS]; 2491 u32 nr_descs; 2492 }; 2493 2494 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b) 2495 { 2496 const struct bpf_kfunc_desc *d0 = a; 2497 const struct bpf_kfunc_desc *d1 = b; 2498 2499 /* func_id is not greater than BTF_MAX_TYPE */ 2500 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; 2501 } 2502 2503 static int kfunc_btf_cmp_by_off(const void *a, const void *b) 2504 { 2505 const struct bpf_kfunc_btf *d0 = a; 2506 const struct bpf_kfunc_btf *d1 = b; 2507 2508 return d0->offset - d1->offset; 2509 } 2510 2511 static const struct bpf_kfunc_desc * 2512 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) 2513 { 2514 struct bpf_kfunc_desc desc = { 2515 .func_id = func_id, 2516 .offset = offset, 2517 }; 2518 struct bpf_kfunc_desc_tab *tab; 2519 2520 tab = prog->aux->kfunc_tab; 2521 return bsearch(&desc, tab->descs, tab->nr_descs, 2522 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); 2523 } 2524 2525 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, 2526 u16 btf_fd_idx, u8 **func_addr) 2527 { 2528 const struct bpf_kfunc_desc *desc; 2529 2530 desc = find_kfunc_desc(prog, func_id, btf_fd_idx); 2531 if (!desc) 2532 return -EFAULT; 2533 2534 *func_addr = (u8 *)desc->addr; 2535 return 0; 2536 } 2537 2538 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, 2539 s16 offset) 2540 { 2541 struct bpf_kfunc_btf kf_btf = { .offset = offset }; 2542 struct bpf_kfunc_btf_tab *tab; 2543 struct bpf_kfunc_btf *b; 2544 struct module *mod; 2545 struct btf *btf; 2546 int btf_fd; 2547 2548 tab = env->prog->aux->kfunc_btf_tab; 2549 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, 2550 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); 2551 if (!b) { 2552 if (tab->nr_descs == MAX_KFUNC_BTFS) { 2553 verbose(env, "too many different module BTFs\n"); 2554 return ERR_PTR(-E2BIG); 2555 } 2556 2557 if (bpfptr_is_null(env->fd_array)) { 2558 verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); 2559 return ERR_PTR(-EPROTO); 2560 } 2561 2562 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, 2563 offset * sizeof(btf_fd), 2564 sizeof(btf_fd))) 2565 return ERR_PTR(-EFAULT); 2566 2567 btf = btf_get_by_fd(btf_fd); 2568 if (IS_ERR(btf)) { 2569 verbose(env, "invalid module BTF fd specified\n"); 2570 return btf; 2571 } 2572 2573 if (!btf_is_module(btf)) { 2574 verbose(env, "BTF fd for kfunc is not a module BTF\n"); 2575 btf_put(btf); 2576 return ERR_PTR(-EINVAL); 2577 } 2578 2579 mod = btf_try_get_module(btf); 2580 if (!mod) { 2581 btf_put(btf); 2582 return ERR_PTR(-ENXIO); 2583 } 2584 2585 b = &tab->descs[tab->nr_descs++]; 2586 b->btf = btf; 2587 b->module = mod; 2588 b->offset = offset; 2589 2590 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2591 kfunc_btf_cmp_by_off, NULL); 2592 } 2593 return b->btf; 2594 } 2595 2596 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) 2597 { 2598 if (!tab) 2599 return; 2600 2601 while (tab->nr_descs--) { 2602 module_put(tab->descs[tab->nr_descs].module); 2603 btf_put(tab->descs[tab->nr_descs].btf); 2604 } 2605 kfree(tab); 2606 } 2607 2608 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset) 2609 { 2610 if (offset) { 2611 if (offset < 0) { 2612 /* In the future, this can be allowed to increase limit 2613 * of fd index into fd_array, interpreted as u16. 2614 */ 2615 verbose(env, "negative offset disallowed for kernel module function call\n"); 2616 return ERR_PTR(-EINVAL); 2617 } 2618 2619 return __find_kfunc_desc_btf(env, offset); 2620 } 2621 return btf_vmlinux ?: ERR_PTR(-ENOENT); 2622 } 2623 2624 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) 2625 { 2626 const struct btf_type *func, *func_proto; 2627 struct bpf_kfunc_btf_tab *btf_tab; 2628 struct bpf_kfunc_desc_tab *tab; 2629 struct bpf_prog_aux *prog_aux; 2630 struct bpf_kfunc_desc *desc; 2631 const char *func_name; 2632 struct btf *desc_btf; 2633 unsigned long call_imm; 2634 unsigned long addr; 2635 int err; 2636 2637 prog_aux = env->prog->aux; 2638 tab = prog_aux->kfunc_tab; 2639 btf_tab = prog_aux->kfunc_btf_tab; 2640 if (!tab) { 2641 if (!btf_vmlinux) { 2642 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); 2643 return -ENOTSUPP; 2644 } 2645 2646 if (!env->prog->jit_requested) { 2647 verbose(env, "JIT is required for calling kernel function\n"); 2648 return -ENOTSUPP; 2649 } 2650 2651 if (!bpf_jit_supports_kfunc_call()) { 2652 verbose(env, "JIT does not support calling kernel function\n"); 2653 return -ENOTSUPP; 2654 } 2655 2656 if (!env->prog->gpl_compatible) { 2657 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); 2658 return -EINVAL; 2659 } 2660 2661 tab = kzalloc(sizeof(*tab), GFP_KERNEL); 2662 if (!tab) 2663 return -ENOMEM; 2664 prog_aux->kfunc_tab = tab; 2665 } 2666 2667 /* func_id == 0 is always invalid, but instead of returning an error, be 2668 * conservative and wait until the code elimination pass before returning 2669 * error, so that invalid calls that get pruned out can be in BPF programs 2670 * loaded from userspace. It is also required that offset be untouched 2671 * for such calls. 2672 */ 2673 if (!func_id && !offset) 2674 return 0; 2675 2676 if (!btf_tab && offset) { 2677 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); 2678 if (!btf_tab) 2679 return -ENOMEM; 2680 prog_aux->kfunc_btf_tab = btf_tab; 2681 } 2682 2683 desc_btf = find_kfunc_desc_btf(env, offset); 2684 if (IS_ERR(desc_btf)) { 2685 verbose(env, "failed to find BTF for kernel function\n"); 2686 return PTR_ERR(desc_btf); 2687 } 2688 2689 if (find_kfunc_desc(env->prog, func_id, offset)) 2690 return 0; 2691 2692 if (tab->nr_descs == MAX_KFUNC_DESCS) { 2693 verbose(env, "too many different kernel function calls\n"); 2694 return -E2BIG; 2695 } 2696 2697 func = btf_type_by_id(desc_btf, func_id); 2698 if (!func || !btf_type_is_func(func)) { 2699 verbose(env, "kernel btf_id %u is not a function\n", 2700 func_id); 2701 return -EINVAL; 2702 } 2703 func_proto = btf_type_by_id(desc_btf, func->type); 2704 if (!func_proto || !btf_type_is_func_proto(func_proto)) { 2705 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", 2706 func_id); 2707 return -EINVAL; 2708 } 2709 2710 func_name = btf_name_by_offset(desc_btf, func->name_off); 2711 addr = kallsyms_lookup_name(func_name); 2712 if (!addr) { 2713 verbose(env, "cannot find address for kernel function %s\n", 2714 func_name); 2715 return -EINVAL; 2716 } 2717 specialize_kfunc(env, func_id, offset, &addr); 2718 2719 if (bpf_jit_supports_far_kfunc_call()) { 2720 call_imm = func_id; 2721 } else { 2722 call_imm = BPF_CALL_IMM(addr); 2723 /* Check whether the relative offset overflows desc->imm */ 2724 if ((unsigned long)(s32)call_imm != call_imm) { 2725 verbose(env, "address of kernel function %s is out of range\n", 2726 func_name); 2727 return -EINVAL; 2728 } 2729 } 2730 2731 if (bpf_dev_bound_kfunc_id(func_id)) { 2732 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); 2733 if (err) 2734 return err; 2735 } 2736 2737 desc = &tab->descs[tab->nr_descs++]; 2738 desc->func_id = func_id; 2739 desc->imm = call_imm; 2740 desc->offset = offset; 2741 desc->addr = addr; 2742 err = btf_distill_func_proto(&env->log, desc_btf, 2743 func_proto, func_name, 2744 &desc->func_model); 2745 if (!err) 2746 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2747 kfunc_desc_cmp_by_id_off, NULL); 2748 return err; 2749 } 2750 2751 static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b) 2752 { 2753 const struct bpf_kfunc_desc *d0 = a; 2754 const struct bpf_kfunc_desc *d1 = b; 2755 2756 if (d0->imm != d1->imm) 2757 return d0->imm < d1->imm ? -1 : 1; 2758 if (d0->offset != d1->offset) 2759 return d0->offset < d1->offset ? -1 : 1; 2760 return 0; 2761 } 2762 2763 static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog) 2764 { 2765 struct bpf_kfunc_desc_tab *tab; 2766 2767 tab = prog->aux->kfunc_tab; 2768 if (!tab) 2769 return; 2770 2771 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2772 kfunc_desc_cmp_by_imm_off, NULL); 2773 } 2774 2775 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 2776 { 2777 return !!prog->aux->kfunc_tab; 2778 } 2779 2780 const struct btf_func_model * 2781 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 2782 const struct bpf_insn *insn) 2783 { 2784 const struct bpf_kfunc_desc desc = { 2785 .imm = insn->imm, 2786 .offset = insn->off, 2787 }; 2788 const struct bpf_kfunc_desc *res; 2789 struct bpf_kfunc_desc_tab *tab; 2790 2791 tab = prog->aux->kfunc_tab; 2792 res = bsearch(&desc, tab->descs, tab->nr_descs, 2793 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); 2794 2795 return res ? &res->func_model : NULL; 2796 } 2797 2798 static int add_subprog_and_kfunc(struct bpf_verifier_env *env) 2799 { 2800 struct bpf_subprog_info *subprog = env->subprog_info; 2801 int i, ret, insn_cnt = env->prog->len, ex_cb_insn; 2802 struct bpf_insn *insn = env->prog->insnsi; 2803 2804 /* Add entry function. */ 2805 ret = add_subprog(env, 0); 2806 if (ret) 2807 return ret; 2808 2809 for (i = 0; i < insn_cnt; i++, insn++) { 2810 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) && 2811 !bpf_pseudo_kfunc_call(insn)) 2812 continue; 2813 2814 if (!env->bpf_capable) { 2815 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); 2816 return -EPERM; 2817 } 2818 2819 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) 2820 ret = add_subprog(env, i + insn->imm + 1); 2821 else 2822 ret = add_kfunc_call(env, insn->imm, insn->off); 2823 2824 if (ret < 0) 2825 return ret; 2826 } 2827 2828 ret = bpf_find_exception_callback_insn_off(env); 2829 if (ret < 0) 2830 return ret; 2831 ex_cb_insn = ret; 2832 2833 /* If ex_cb_insn > 0, this means that the main program has a subprog 2834 * marked using BTF decl tag to serve as the exception callback. 2835 */ 2836 if (ex_cb_insn) { 2837 ret = add_subprog(env, ex_cb_insn); 2838 if (ret < 0) 2839 return ret; 2840 for (i = 1; i < env->subprog_cnt; i++) { 2841 if (env->subprog_info[i].start != ex_cb_insn) 2842 continue; 2843 env->exception_callback_subprog = i; 2844 break; 2845 } 2846 } 2847 2848 /* Add a fake 'exit' subprog which could simplify subprog iteration 2849 * logic. 'subprog_cnt' should not be increased. 2850 */ 2851 subprog[env->subprog_cnt].start = insn_cnt; 2852 2853 if (env->log.level & BPF_LOG_LEVEL2) 2854 for (i = 0; i < env->subprog_cnt; i++) 2855 verbose(env, "func#%d @%d\n", i, subprog[i].start); 2856 2857 return 0; 2858 } 2859 2860 static int check_subprogs(struct bpf_verifier_env *env) 2861 { 2862 int i, subprog_start, subprog_end, off, cur_subprog = 0; 2863 struct bpf_subprog_info *subprog = env->subprog_info; 2864 struct bpf_insn *insn = env->prog->insnsi; 2865 int insn_cnt = env->prog->len; 2866 2867 /* now check that all jumps are within the same subprog */ 2868 subprog_start = subprog[cur_subprog].start; 2869 subprog_end = subprog[cur_subprog + 1].start; 2870 for (i = 0; i < insn_cnt; i++) { 2871 u8 code = insn[i].code; 2872 2873 if (code == (BPF_JMP | BPF_CALL) && 2874 insn[i].src_reg == 0 && 2875 insn[i].imm == BPF_FUNC_tail_call) 2876 subprog[cur_subprog].has_tail_call = true; 2877 if (BPF_CLASS(code) == BPF_LD && 2878 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) 2879 subprog[cur_subprog].has_ld_abs = true; 2880 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 2881 goto next; 2882 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 2883 goto next; 2884 if (code == (BPF_JMP32 | BPF_JA)) 2885 off = i + insn[i].imm + 1; 2886 else 2887 off = i + insn[i].off + 1; 2888 if (off < subprog_start || off >= subprog_end) { 2889 verbose(env, "jump out of range from insn %d to %d\n", i, off); 2890 return -EINVAL; 2891 } 2892 next: 2893 if (i == subprog_end - 1) { 2894 /* to avoid fall-through from one subprog into another 2895 * the last insn of the subprog should be either exit 2896 * or unconditional jump back or bpf_throw call 2897 */ 2898 if (code != (BPF_JMP | BPF_EXIT) && 2899 code != (BPF_JMP32 | BPF_JA) && 2900 code != (BPF_JMP | BPF_JA)) { 2901 verbose(env, "last insn is not an exit or jmp\n"); 2902 return -EINVAL; 2903 } 2904 subprog_start = subprog_end; 2905 cur_subprog++; 2906 if (cur_subprog < env->subprog_cnt) 2907 subprog_end = subprog[cur_subprog + 1].start; 2908 } 2909 } 2910 return 0; 2911 } 2912 2913 /* Parentage chain of this register (or stack slot) should take care of all 2914 * issues like callee-saved registers, stack slot allocation time, etc. 2915 */ 2916 static int mark_reg_read(struct bpf_verifier_env *env, 2917 const struct bpf_reg_state *state, 2918 struct bpf_reg_state *parent, u8 flag) 2919 { 2920 bool writes = parent == state->parent; /* Observe write marks */ 2921 int cnt = 0; 2922 2923 while (parent) { 2924 /* if read wasn't screened by an earlier write ... */ 2925 if (writes && state->live & REG_LIVE_WRITTEN) 2926 break; 2927 if (parent->live & REG_LIVE_DONE) { 2928 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 2929 reg_type_str(env, parent->type), 2930 parent->var_off.value, parent->off); 2931 return -EFAULT; 2932 } 2933 /* The first condition is more likely to be true than the 2934 * second, checked it first. 2935 */ 2936 if ((parent->live & REG_LIVE_READ) == flag || 2937 parent->live & REG_LIVE_READ64) 2938 /* The parentage chain never changes and 2939 * this parent was already marked as LIVE_READ. 2940 * There is no need to keep walking the chain again and 2941 * keep re-marking all parents as LIVE_READ. 2942 * This case happens when the same register is read 2943 * multiple times without writes into it in-between. 2944 * Also, if parent has the stronger REG_LIVE_READ64 set, 2945 * then no need to set the weak REG_LIVE_READ32. 2946 */ 2947 break; 2948 /* ... then we depend on parent's value */ 2949 parent->live |= flag; 2950 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 2951 if (flag == REG_LIVE_READ64) 2952 parent->live &= ~REG_LIVE_READ32; 2953 state = parent; 2954 parent = state->parent; 2955 writes = true; 2956 cnt++; 2957 } 2958 2959 if (env->longest_mark_read_walk < cnt) 2960 env->longest_mark_read_walk = cnt; 2961 return 0; 2962 } 2963 2964 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 2965 { 2966 struct bpf_func_state *state = func(env, reg); 2967 int spi, ret; 2968 2969 /* For CONST_PTR_TO_DYNPTR, it must have already been done by 2970 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in 2971 * check_kfunc_call. 2972 */ 2973 if (reg->type == CONST_PTR_TO_DYNPTR) 2974 return 0; 2975 spi = dynptr_get_spi(env, reg); 2976 if (spi < 0) 2977 return spi; 2978 /* Caller ensures dynptr is valid and initialized, which means spi is in 2979 * bounds and spi is the first dynptr slot. Simply mark stack slot as 2980 * read. 2981 */ 2982 ret = mark_reg_read(env, &state->stack[spi].spilled_ptr, 2983 state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64); 2984 if (ret) 2985 return ret; 2986 return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr, 2987 state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64); 2988 } 2989 2990 static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 2991 int spi, int nr_slots) 2992 { 2993 struct bpf_func_state *state = func(env, reg); 2994 int err, i; 2995 2996 for (i = 0; i < nr_slots; i++) { 2997 struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr; 2998 2999 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64); 3000 if (err) 3001 return err; 3002 3003 mark_stack_slot_scratched(env, spi - i); 3004 } 3005 3006 return 0; 3007 } 3008 3009 /* This function is supposed to be used by the following 32-bit optimization 3010 * code only. It returns TRUE if the source or destination register operates 3011 * on 64-bit, otherwise return FALSE. 3012 */ 3013 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 3014 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 3015 { 3016 u8 code, class, op; 3017 3018 code = insn->code; 3019 class = BPF_CLASS(code); 3020 op = BPF_OP(code); 3021 if (class == BPF_JMP) { 3022 /* BPF_EXIT for "main" will reach here. Return TRUE 3023 * conservatively. 3024 */ 3025 if (op == BPF_EXIT) 3026 return true; 3027 if (op == BPF_CALL) { 3028 /* BPF to BPF call will reach here because of marking 3029 * caller saved clobber with DST_OP_NO_MARK for which we 3030 * don't care the register def because they are anyway 3031 * marked as NOT_INIT already. 3032 */ 3033 if (insn->src_reg == BPF_PSEUDO_CALL) 3034 return false; 3035 /* Helper call will reach here because of arg type 3036 * check, conservatively return TRUE. 3037 */ 3038 if (t == SRC_OP) 3039 return true; 3040 3041 return false; 3042 } 3043 } 3044 3045 if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32)) 3046 return false; 3047 3048 if (class == BPF_ALU64 || class == BPF_JMP || 3049 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 3050 return true; 3051 3052 if (class == BPF_ALU || class == BPF_JMP32) 3053 return false; 3054 3055 if (class == BPF_LDX) { 3056 if (t != SRC_OP) 3057 return BPF_SIZE(code) == BPF_DW || BPF_MODE(code) == BPF_MEMSX; 3058 /* LDX source must be ptr. */ 3059 return true; 3060 } 3061 3062 if (class == BPF_STX) { 3063 /* BPF_STX (including atomic variants) has multiple source 3064 * operands, one of which is a ptr. Check whether the caller is 3065 * asking about it. 3066 */ 3067 if (t == SRC_OP && reg->type != SCALAR_VALUE) 3068 return true; 3069 return BPF_SIZE(code) == BPF_DW; 3070 } 3071 3072 if (class == BPF_LD) { 3073 u8 mode = BPF_MODE(code); 3074 3075 /* LD_IMM64 */ 3076 if (mode == BPF_IMM) 3077 return true; 3078 3079 /* Both LD_IND and LD_ABS return 32-bit data. */ 3080 if (t != SRC_OP) 3081 return false; 3082 3083 /* Implicit ctx ptr. */ 3084 if (regno == BPF_REG_6) 3085 return true; 3086 3087 /* Explicit source could be any width. */ 3088 return true; 3089 } 3090 3091 if (class == BPF_ST) 3092 /* The only source register for BPF_ST is a ptr. */ 3093 return true; 3094 3095 /* Conservatively return true at default. */ 3096 return true; 3097 } 3098 3099 /* Return the regno defined by the insn, or -1. */ 3100 static int insn_def_regno(const struct bpf_insn *insn) 3101 { 3102 switch (BPF_CLASS(insn->code)) { 3103 case BPF_JMP: 3104 case BPF_JMP32: 3105 case BPF_ST: 3106 return -1; 3107 case BPF_STX: 3108 if (BPF_MODE(insn->code) == BPF_ATOMIC && 3109 (insn->imm & BPF_FETCH)) { 3110 if (insn->imm == BPF_CMPXCHG) 3111 return BPF_REG_0; 3112 else 3113 return insn->src_reg; 3114 } else { 3115 return -1; 3116 } 3117 default: 3118 return insn->dst_reg; 3119 } 3120 } 3121 3122 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 3123 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 3124 { 3125 int dst_reg = insn_def_regno(insn); 3126 3127 if (dst_reg == -1) 3128 return false; 3129 3130 return !is_reg64(env, insn, dst_reg, NULL, DST_OP); 3131 } 3132 3133 static void mark_insn_zext(struct bpf_verifier_env *env, 3134 struct bpf_reg_state *reg) 3135 { 3136 s32 def_idx = reg->subreg_def; 3137 3138 if (def_idx == DEF_NOT_SUBREG) 3139 return; 3140 3141 env->insn_aux_data[def_idx - 1].zext_dst = true; 3142 /* The dst will be zero extended, so won't be sub-register anymore. */ 3143 reg->subreg_def = DEF_NOT_SUBREG; 3144 } 3145 3146 static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno, 3147 enum reg_arg_type t) 3148 { 3149 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 3150 struct bpf_reg_state *reg; 3151 bool rw64; 3152 3153 if (regno >= MAX_BPF_REG) { 3154 verbose(env, "R%d is invalid\n", regno); 3155 return -EINVAL; 3156 } 3157 3158 mark_reg_scratched(env, regno); 3159 3160 reg = ®s[regno]; 3161 rw64 = is_reg64(env, insn, regno, reg, t); 3162 if (t == SRC_OP) { 3163 /* check whether register used as source operand can be read */ 3164 if (reg->type == NOT_INIT) { 3165 verbose(env, "R%d !read_ok\n", regno); 3166 return -EACCES; 3167 } 3168 /* We don't need to worry about FP liveness because it's read-only */ 3169 if (regno == BPF_REG_FP) 3170 return 0; 3171 3172 if (rw64) 3173 mark_insn_zext(env, reg); 3174 3175 return mark_reg_read(env, reg, reg->parent, 3176 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 3177 } else { 3178 /* check whether register used as dest operand can be written to */ 3179 if (regno == BPF_REG_FP) { 3180 verbose(env, "frame pointer is read only\n"); 3181 return -EACCES; 3182 } 3183 reg->live |= REG_LIVE_WRITTEN; 3184 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 3185 if (t == DST_OP) 3186 mark_reg_unknown(env, regs, regno); 3187 } 3188 return 0; 3189 } 3190 3191 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 3192 enum reg_arg_type t) 3193 { 3194 struct bpf_verifier_state *vstate = env->cur_state; 3195 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3196 3197 return __check_reg_arg(env, state->regs, regno, t); 3198 } 3199 3200 static void mark_jmp_point(struct bpf_verifier_env *env, int idx) 3201 { 3202 env->insn_aux_data[idx].jmp_point = true; 3203 } 3204 3205 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) 3206 { 3207 return env->insn_aux_data[insn_idx].jmp_point; 3208 } 3209 3210 /* for any branch, call, exit record the history of jmps in the given state */ 3211 static int push_jmp_history(struct bpf_verifier_env *env, 3212 struct bpf_verifier_state *cur) 3213 { 3214 u32 cnt = cur->jmp_history_cnt; 3215 struct bpf_idx_pair *p; 3216 size_t alloc_size; 3217 3218 if (!is_jmp_point(env, env->insn_idx)) 3219 return 0; 3220 3221 cnt++; 3222 alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p))); 3223 p = krealloc(cur->jmp_history, alloc_size, GFP_USER); 3224 if (!p) 3225 return -ENOMEM; 3226 p[cnt - 1].idx = env->insn_idx; 3227 p[cnt - 1].prev_idx = env->prev_insn_idx; 3228 cur->jmp_history = p; 3229 cur->jmp_history_cnt = cnt; 3230 return 0; 3231 } 3232 3233 /* Backtrack one insn at a time. If idx is not at the top of recorded 3234 * history then previous instruction came from straight line execution. 3235 * Return -ENOENT if we exhausted all instructions within given state. 3236 * 3237 * It's legal to have a bit of a looping with the same starting and ending 3238 * insn index within the same state, e.g.: 3->4->5->3, so just because current 3239 * instruction index is the same as state's first_idx doesn't mean we are 3240 * done. If there is still some jump history left, we should keep going. We 3241 * need to take into account that we might have a jump history between given 3242 * state's parent and itself, due to checkpointing. In this case, we'll have 3243 * history entry recording a jump from last instruction of parent state and 3244 * first instruction of given state. 3245 */ 3246 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 3247 u32 *history) 3248 { 3249 u32 cnt = *history; 3250 3251 if (i == st->first_insn_idx) { 3252 if (cnt == 0) 3253 return -ENOENT; 3254 if (cnt == 1 && st->jmp_history[0].idx == i) 3255 return -ENOENT; 3256 } 3257 3258 if (cnt && st->jmp_history[cnt - 1].idx == i) { 3259 i = st->jmp_history[cnt - 1].prev_idx; 3260 (*history)--; 3261 } else { 3262 i--; 3263 } 3264 return i; 3265 } 3266 3267 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) 3268 { 3269 const struct btf_type *func; 3270 struct btf *desc_btf; 3271 3272 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) 3273 return NULL; 3274 3275 desc_btf = find_kfunc_desc_btf(data, insn->off); 3276 if (IS_ERR(desc_btf)) 3277 return "<error>"; 3278 3279 func = btf_type_by_id(desc_btf, insn->imm); 3280 return btf_name_by_offset(desc_btf, func->name_off); 3281 } 3282 3283 static inline void bt_init(struct backtrack_state *bt, u32 frame) 3284 { 3285 bt->frame = frame; 3286 } 3287 3288 static inline void bt_reset(struct backtrack_state *bt) 3289 { 3290 struct bpf_verifier_env *env = bt->env; 3291 3292 memset(bt, 0, sizeof(*bt)); 3293 bt->env = env; 3294 } 3295 3296 static inline u32 bt_empty(struct backtrack_state *bt) 3297 { 3298 u64 mask = 0; 3299 int i; 3300 3301 for (i = 0; i <= bt->frame; i++) 3302 mask |= bt->reg_masks[i] | bt->stack_masks[i]; 3303 3304 return mask == 0; 3305 } 3306 3307 static inline int bt_subprog_enter(struct backtrack_state *bt) 3308 { 3309 if (bt->frame == MAX_CALL_FRAMES - 1) { 3310 verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); 3311 WARN_ONCE(1, "verifier backtracking bug"); 3312 return -EFAULT; 3313 } 3314 bt->frame++; 3315 return 0; 3316 } 3317 3318 static inline int bt_subprog_exit(struct backtrack_state *bt) 3319 { 3320 if (bt->frame == 0) { 3321 verbose(bt->env, "BUG subprog exit from frame 0\n"); 3322 WARN_ONCE(1, "verifier backtracking bug"); 3323 return -EFAULT; 3324 } 3325 bt->frame--; 3326 return 0; 3327 } 3328 3329 static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) 3330 { 3331 bt->reg_masks[frame] |= 1 << reg; 3332 } 3333 3334 static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) 3335 { 3336 bt->reg_masks[frame] &= ~(1 << reg); 3337 } 3338 3339 static inline void bt_set_reg(struct backtrack_state *bt, u32 reg) 3340 { 3341 bt_set_frame_reg(bt, bt->frame, reg); 3342 } 3343 3344 static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg) 3345 { 3346 bt_clear_frame_reg(bt, bt->frame, reg); 3347 } 3348 3349 static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) 3350 { 3351 bt->stack_masks[frame] |= 1ull << slot; 3352 } 3353 3354 static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) 3355 { 3356 bt->stack_masks[frame] &= ~(1ull << slot); 3357 } 3358 3359 static inline void bt_set_slot(struct backtrack_state *bt, u32 slot) 3360 { 3361 bt_set_frame_slot(bt, bt->frame, slot); 3362 } 3363 3364 static inline void bt_clear_slot(struct backtrack_state *bt, u32 slot) 3365 { 3366 bt_clear_frame_slot(bt, bt->frame, slot); 3367 } 3368 3369 static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame) 3370 { 3371 return bt->reg_masks[frame]; 3372 } 3373 3374 static inline u32 bt_reg_mask(struct backtrack_state *bt) 3375 { 3376 return bt->reg_masks[bt->frame]; 3377 } 3378 3379 static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame) 3380 { 3381 return bt->stack_masks[frame]; 3382 } 3383 3384 static inline u64 bt_stack_mask(struct backtrack_state *bt) 3385 { 3386 return bt->stack_masks[bt->frame]; 3387 } 3388 3389 static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg) 3390 { 3391 return bt->reg_masks[bt->frame] & (1 << reg); 3392 } 3393 3394 static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot) 3395 { 3396 return bt->stack_masks[bt->frame] & (1ull << slot); 3397 } 3398 3399 /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */ 3400 static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask) 3401 { 3402 DECLARE_BITMAP(mask, 64); 3403 bool first = true; 3404 int i, n; 3405 3406 buf[0] = '\0'; 3407 3408 bitmap_from_u64(mask, reg_mask); 3409 for_each_set_bit(i, mask, 32) { 3410 n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i); 3411 first = false; 3412 buf += n; 3413 buf_sz -= n; 3414 if (buf_sz < 0) 3415 break; 3416 } 3417 } 3418 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */ 3419 static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) 3420 { 3421 DECLARE_BITMAP(mask, 64); 3422 bool first = true; 3423 int i, n; 3424 3425 buf[0] = '\0'; 3426 3427 bitmap_from_u64(mask, stack_mask); 3428 for_each_set_bit(i, mask, 64) { 3429 n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8); 3430 first = false; 3431 buf += n; 3432 buf_sz -= n; 3433 if (buf_sz < 0) 3434 break; 3435 } 3436 } 3437 3438 static bool calls_callback(struct bpf_verifier_env *env, int insn_idx); 3439 3440 /* For given verifier state backtrack_insn() is called from the last insn to 3441 * the first insn. Its purpose is to compute a bitmask of registers and 3442 * stack slots that needs precision in the parent verifier state. 3443 * 3444 * @idx is an index of the instruction we are currently processing; 3445 * @subseq_idx is an index of the subsequent instruction that: 3446 * - *would be* executed next, if jump history is viewed in forward order; 3447 * - *was* processed previously during backtracking. 3448 */ 3449 static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, 3450 struct backtrack_state *bt) 3451 { 3452 const struct bpf_insn_cbs cbs = { 3453 .cb_call = disasm_kfunc_name, 3454 .cb_print = verbose, 3455 .private_data = env, 3456 }; 3457 struct bpf_insn *insn = env->prog->insnsi + idx; 3458 u8 class = BPF_CLASS(insn->code); 3459 u8 opcode = BPF_OP(insn->code); 3460 u8 mode = BPF_MODE(insn->code); 3461 u32 dreg = insn->dst_reg; 3462 u32 sreg = insn->src_reg; 3463 u32 spi, i; 3464 3465 if (insn->code == 0) 3466 return 0; 3467 if (env->log.level & BPF_LOG_LEVEL2) { 3468 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); 3469 verbose(env, "mark_precise: frame%d: regs=%s ", 3470 bt->frame, env->tmp_str_buf); 3471 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); 3472 verbose(env, "stack=%s before ", env->tmp_str_buf); 3473 verbose(env, "%d: ", idx); 3474 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 3475 } 3476 3477 if (class == BPF_ALU || class == BPF_ALU64) { 3478 if (!bt_is_reg_set(bt, dreg)) 3479 return 0; 3480 if (opcode == BPF_END || opcode == BPF_NEG) { 3481 /* sreg is reserved and unused 3482 * dreg still need precision before this insn 3483 */ 3484 return 0; 3485 } else if (opcode == BPF_MOV) { 3486 if (BPF_SRC(insn->code) == BPF_X) { 3487 /* dreg = sreg or dreg = (s8, s16, s32)sreg 3488 * dreg needs precision after this insn 3489 * sreg needs precision before this insn 3490 */ 3491 bt_clear_reg(bt, dreg); 3492 bt_set_reg(bt, sreg); 3493 } else { 3494 /* dreg = K 3495 * dreg needs precision after this insn. 3496 * Corresponding register is already marked 3497 * as precise=true in this verifier state. 3498 * No further markings in parent are necessary 3499 */ 3500 bt_clear_reg(bt, dreg); 3501 } 3502 } else { 3503 if (BPF_SRC(insn->code) == BPF_X) { 3504 /* dreg += sreg 3505 * both dreg and sreg need precision 3506 * before this insn 3507 */ 3508 bt_set_reg(bt, sreg); 3509 } /* else dreg += K 3510 * dreg still needs precision before this insn 3511 */ 3512 } 3513 } else if (class == BPF_LDX) { 3514 if (!bt_is_reg_set(bt, dreg)) 3515 return 0; 3516 bt_clear_reg(bt, dreg); 3517 3518 /* scalars can only be spilled into stack w/o losing precision. 3519 * Load from any other memory can be zero extended. 3520 * The desire to keep that precision is already indicated 3521 * by 'precise' mark in corresponding register of this state. 3522 * No further tracking necessary. 3523 */ 3524 if (insn->src_reg != BPF_REG_FP) 3525 return 0; 3526 3527 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 3528 * that [fp - off] slot contains scalar that needs to be 3529 * tracked with precision 3530 */ 3531 spi = (-insn->off - 1) / BPF_REG_SIZE; 3532 if (spi >= 64) { 3533 verbose(env, "BUG spi %d\n", spi); 3534 WARN_ONCE(1, "verifier backtracking bug"); 3535 return -EFAULT; 3536 } 3537 bt_set_slot(bt, spi); 3538 } else if (class == BPF_STX || class == BPF_ST) { 3539 if (bt_is_reg_set(bt, dreg)) 3540 /* stx & st shouldn't be using _scalar_ dst_reg 3541 * to access memory. It means backtracking 3542 * encountered a case of pointer subtraction. 3543 */ 3544 return -ENOTSUPP; 3545 /* scalars can only be spilled into stack */ 3546 if (insn->dst_reg != BPF_REG_FP) 3547 return 0; 3548 spi = (-insn->off - 1) / BPF_REG_SIZE; 3549 if (spi >= 64) { 3550 verbose(env, "BUG spi %d\n", spi); 3551 WARN_ONCE(1, "verifier backtracking bug"); 3552 return -EFAULT; 3553 } 3554 if (!bt_is_slot_set(bt, spi)) 3555 return 0; 3556 bt_clear_slot(bt, spi); 3557 if (class == BPF_STX) 3558 bt_set_reg(bt, sreg); 3559 } else if (class == BPF_JMP || class == BPF_JMP32) { 3560 if (bpf_pseudo_call(insn)) { 3561 int subprog_insn_idx, subprog; 3562 3563 subprog_insn_idx = idx + insn->imm + 1; 3564 subprog = find_subprog(env, subprog_insn_idx); 3565 if (subprog < 0) 3566 return -EFAULT; 3567 3568 if (subprog_is_global(env, subprog)) { 3569 /* check that jump history doesn't have any 3570 * extra instructions from subprog; the next 3571 * instruction after call to global subprog 3572 * should be literally next instruction in 3573 * caller program 3574 */ 3575 WARN_ONCE(idx + 1 != subseq_idx, "verifier backtracking bug"); 3576 /* r1-r5 are invalidated after subprog call, 3577 * so for global func call it shouldn't be set 3578 * anymore 3579 */ 3580 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3581 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3582 WARN_ONCE(1, "verifier backtracking bug"); 3583 return -EFAULT; 3584 } 3585 /* global subprog always sets R0 */ 3586 bt_clear_reg(bt, BPF_REG_0); 3587 return 0; 3588 } else { 3589 /* static subprog call instruction, which 3590 * means that we are exiting current subprog, 3591 * so only r1-r5 could be still requested as 3592 * precise, r0 and r6-r10 or any stack slot in 3593 * the current frame should be zero by now 3594 */ 3595 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { 3596 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3597 WARN_ONCE(1, "verifier backtracking bug"); 3598 return -EFAULT; 3599 } 3600 /* we don't track register spills perfectly, 3601 * so fallback to force-precise instead of failing */ 3602 if (bt_stack_mask(bt) != 0) 3603 return -ENOTSUPP; 3604 /* propagate r1-r5 to the caller */ 3605 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 3606 if (bt_is_reg_set(bt, i)) { 3607 bt_clear_reg(bt, i); 3608 bt_set_frame_reg(bt, bt->frame - 1, i); 3609 } 3610 } 3611 if (bt_subprog_exit(bt)) 3612 return -EFAULT; 3613 return 0; 3614 } 3615 } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) { 3616 /* exit from callback subprog to callback-calling helper or 3617 * kfunc call. Use idx/subseq_idx check to discern it from 3618 * straight line code backtracking. 3619 * Unlike the subprog call handling above, we shouldn't 3620 * propagate precision of r1-r5 (if any requested), as they are 3621 * not actually arguments passed directly to callback subprogs 3622 */ 3623 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { 3624 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3625 WARN_ONCE(1, "verifier backtracking bug"); 3626 return -EFAULT; 3627 } 3628 if (bt_stack_mask(bt) != 0) 3629 return -ENOTSUPP; 3630 /* clear r1-r5 in callback subprog's mask */ 3631 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 3632 bt_clear_reg(bt, i); 3633 if (bt_subprog_exit(bt)) 3634 return -EFAULT; 3635 return 0; 3636 } else if (opcode == BPF_CALL) { 3637 /* kfunc with imm==0 is invalid and fixup_kfunc_call will 3638 * catch this error later. Make backtracking conservative 3639 * with ENOTSUPP. 3640 */ 3641 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) 3642 return -ENOTSUPP; 3643 /* regular helper call sets R0 */ 3644 bt_clear_reg(bt, BPF_REG_0); 3645 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3646 /* if backtracing was looking for registers R1-R5 3647 * they should have been found already. 3648 */ 3649 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3650 WARN_ONCE(1, "verifier backtracking bug"); 3651 return -EFAULT; 3652 } 3653 } else if (opcode == BPF_EXIT) { 3654 bool r0_precise; 3655 3656 /* Backtracking to a nested function call, 'idx' is a part of 3657 * the inner frame 'subseq_idx' is a part of the outer frame. 3658 * In case of a regular function call, instructions giving 3659 * precision to registers R1-R5 should have been found already. 3660 * In case of a callback, it is ok to have R1-R5 marked for 3661 * backtracking, as these registers are set by the function 3662 * invoking callback. 3663 */ 3664 if (subseq_idx >= 0 && calls_callback(env, subseq_idx)) 3665 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 3666 bt_clear_reg(bt, i); 3667 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3668 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3669 WARN_ONCE(1, "verifier backtracking bug"); 3670 return -EFAULT; 3671 } 3672 3673 /* BPF_EXIT in subprog or callback always returns 3674 * right after the call instruction, so by checking 3675 * whether the instruction at subseq_idx-1 is subprog 3676 * call or not we can distinguish actual exit from 3677 * *subprog* from exit from *callback*. In the former 3678 * case, we need to propagate r0 precision, if 3679 * necessary. In the former we never do that. 3680 */ 3681 r0_precise = subseq_idx - 1 >= 0 && 3682 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && 3683 bt_is_reg_set(bt, BPF_REG_0); 3684 3685 bt_clear_reg(bt, BPF_REG_0); 3686 if (bt_subprog_enter(bt)) 3687 return -EFAULT; 3688 3689 if (r0_precise) 3690 bt_set_reg(bt, BPF_REG_0); 3691 /* r6-r9 and stack slots will stay set in caller frame 3692 * bitmasks until we return back from callee(s) 3693 */ 3694 return 0; 3695 } else if (BPF_SRC(insn->code) == BPF_X) { 3696 if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg)) 3697 return 0; 3698 /* dreg <cond> sreg 3699 * Both dreg and sreg need precision before 3700 * this insn. If only sreg was marked precise 3701 * before it would be equally necessary to 3702 * propagate it to dreg. 3703 */ 3704 bt_set_reg(bt, dreg); 3705 bt_set_reg(bt, sreg); 3706 /* else dreg <cond> K 3707 * Only dreg still needs precision before 3708 * this insn, so for the K-based conditional 3709 * there is nothing new to be marked. 3710 */ 3711 } 3712 } else if (class == BPF_LD) { 3713 if (!bt_is_reg_set(bt, dreg)) 3714 return 0; 3715 bt_clear_reg(bt, dreg); 3716 /* It's ld_imm64 or ld_abs or ld_ind. 3717 * For ld_imm64 no further tracking of precision 3718 * into parent is necessary 3719 */ 3720 if (mode == BPF_IND || mode == BPF_ABS) 3721 /* to be analyzed */ 3722 return -ENOTSUPP; 3723 } 3724 return 0; 3725 } 3726 3727 /* the scalar precision tracking algorithm: 3728 * . at the start all registers have precise=false. 3729 * . scalar ranges are tracked as normal through alu and jmp insns. 3730 * . once precise value of the scalar register is used in: 3731 * . ptr + scalar alu 3732 * . if (scalar cond K|scalar) 3733 * . helper_call(.., scalar, ...) where ARG_CONST is expected 3734 * backtrack through the verifier states and mark all registers and 3735 * stack slots with spilled constants that these scalar regisers 3736 * should be precise. 3737 * . during state pruning two registers (or spilled stack slots) 3738 * are equivalent if both are not precise. 3739 * 3740 * Note the verifier cannot simply walk register parentage chain, 3741 * since many different registers and stack slots could have been 3742 * used to compute single precise scalar. 3743 * 3744 * The approach of starting with precise=true for all registers and then 3745 * backtrack to mark a register as not precise when the verifier detects 3746 * that program doesn't care about specific value (e.g., when helper 3747 * takes register as ARG_ANYTHING parameter) is not safe. 3748 * 3749 * It's ok to walk single parentage chain of the verifier states. 3750 * It's possible that this backtracking will go all the way till 1st insn. 3751 * All other branches will be explored for needing precision later. 3752 * 3753 * The backtracking needs to deal with cases like: 3754 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 3755 * r9 -= r8 3756 * r5 = r9 3757 * if r5 > 0x79f goto pc+7 3758 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 3759 * r5 += 1 3760 * ... 3761 * call bpf_perf_event_output#25 3762 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 3763 * 3764 * and this case: 3765 * r6 = 1 3766 * call foo // uses callee's r6 inside to compute r0 3767 * r0 += r6 3768 * if r0 == 0 goto 3769 * 3770 * to track above reg_mask/stack_mask needs to be independent for each frame. 3771 * 3772 * Also if parent's curframe > frame where backtracking started, 3773 * the verifier need to mark registers in both frames, otherwise callees 3774 * may incorrectly prune callers. This is similar to 3775 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 3776 * 3777 * For now backtracking falls back into conservative marking. 3778 */ 3779 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 3780 struct bpf_verifier_state *st) 3781 { 3782 struct bpf_func_state *func; 3783 struct bpf_reg_state *reg; 3784 int i, j; 3785 3786 if (env->log.level & BPF_LOG_LEVEL2) { 3787 verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n", 3788 st->curframe); 3789 } 3790 3791 /* big hammer: mark all scalars precise in this path. 3792 * pop_stack may still get !precise scalars. 3793 * We also skip current state and go straight to first parent state, 3794 * because precision markings in current non-checkpointed state are 3795 * not needed. See why in the comment in __mark_chain_precision below. 3796 */ 3797 for (st = st->parent; st; st = st->parent) { 3798 for (i = 0; i <= st->curframe; i++) { 3799 func = st->frame[i]; 3800 for (j = 0; j < BPF_REG_FP; j++) { 3801 reg = &func->regs[j]; 3802 if (reg->type != SCALAR_VALUE || reg->precise) 3803 continue; 3804 reg->precise = true; 3805 if (env->log.level & BPF_LOG_LEVEL2) { 3806 verbose(env, "force_precise: frame%d: forcing r%d to be precise\n", 3807 i, j); 3808 } 3809 } 3810 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 3811 if (!is_spilled_reg(&func->stack[j])) 3812 continue; 3813 reg = &func->stack[j].spilled_ptr; 3814 if (reg->type != SCALAR_VALUE || reg->precise) 3815 continue; 3816 reg->precise = true; 3817 if (env->log.level & BPF_LOG_LEVEL2) { 3818 verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n", 3819 i, -(j + 1) * 8); 3820 } 3821 } 3822 } 3823 } 3824 } 3825 3826 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 3827 { 3828 struct bpf_func_state *func; 3829 struct bpf_reg_state *reg; 3830 int i, j; 3831 3832 for (i = 0; i <= st->curframe; i++) { 3833 func = st->frame[i]; 3834 for (j = 0; j < BPF_REG_FP; j++) { 3835 reg = &func->regs[j]; 3836 if (reg->type != SCALAR_VALUE) 3837 continue; 3838 reg->precise = false; 3839 } 3840 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 3841 if (!is_spilled_reg(&func->stack[j])) 3842 continue; 3843 reg = &func->stack[j].spilled_ptr; 3844 if (reg->type != SCALAR_VALUE) 3845 continue; 3846 reg->precise = false; 3847 } 3848 } 3849 } 3850 3851 static bool idset_contains(struct bpf_idset *s, u32 id) 3852 { 3853 u32 i; 3854 3855 for (i = 0; i < s->count; ++i) 3856 if (s->ids[i] == id) 3857 return true; 3858 3859 return false; 3860 } 3861 3862 static int idset_push(struct bpf_idset *s, u32 id) 3863 { 3864 if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids))) 3865 return -EFAULT; 3866 s->ids[s->count++] = id; 3867 return 0; 3868 } 3869 3870 static void idset_reset(struct bpf_idset *s) 3871 { 3872 s->count = 0; 3873 } 3874 3875 /* Collect a set of IDs for all registers currently marked as precise in env->bt. 3876 * Mark all registers with these IDs as precise. 3877 */ 3878 static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 3879 { 3880 struct bpf_idset *precise_ids = &env->idset_scratch; 3881 struct backtrack_state *bt = &env->bt; 3882 struct bpf_func_state *func; 3883 struct bpf_reg_state *reg; 3884 DECLARE_BITMAP(mask, 64); 3885 int i, fr; 3886 3887 idset_reset(precise_ids); 3888 3889 for (fr = bt->frame; fr >= 0; fr--) { 3890 func = st->frame[fr]; 3891 3892 bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); 3893 for_each_set_bit(i, mask, 32) { 3894 reg = &func->regs[i]; 3895 if (!reg->id || reg->type != SCALAR_VALUE) 3896 continue; 3897 if (idset_push(precise_ids, reg->id)) 3898 return -EFAULT; 3899 } 3900 3901 bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); 3902 for_each_set_bit(i, mask, 64) { 3903 if (i >= func->allocated_stack / BPF_REG_SIZE) 3904 break; 3905 if (!is_spilled_scalar_reg(&func->stack[i])) 3906 continue; 3907 reg = &func->stack[i].spilled_ptr; 3908 if (!reg->id) 3909 continue; 3910 if (idset_push(precise_ids, reg->id)) 3911 return -EFAULT; 3912 } 3913 } 3914 3915 for (fr = 0; fr <= st->curframe; ++fr) { 3916 func = st->frame[fr]; 3917 3918 for (i = BPF_REG_0; i < BPF_REG_10; ++i) { 3919 reg = &func->regs[i]; 3920 if (!reg->id) 3921 continue; 3922 if (!idset_contains(precise_ids, reg->id)) 3923 continue; 3924 bt_set_frame_reg(bt, fr, i); 3925 } 3926 for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) { 3927 if (!is_spilled_scalar_reg(&func->stack[i])) 3928 continue; 3929 reg = &func->stack[i].spilled_ptr; 3930 if (!reg->id) 3931 continue; 3932 if (!idset_contains(precise_ids, reg->id)) 3933 continue; 3934 bt_set_frame_slot(bt, fr, i); 3935 } 3936 } 3937 3938 return 0; 3939 } 3940 3941 /* 3942 * __mark_chain_precision() backtracks BPF program instruction sequence and 3943 * chain of verifier states making sure that register *regno* (if regno >= 0) 3944 * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked 3945 * SCALARS, as well as any other registers and slots that contribute to 3946 * a tracked state of given registers/stack slots, depending on specific BPF 3947 * assembly instructions (see backtrack_insns() for exact instruction handling 3948 * logic). This backtracking relies on recorded jmp_history and is able to 3949 * traverse entire chain of parent states. This process ends only when all the 3950 * necessary registers/slots and their transitive dependencies are marked as 3951 * precise. 3952 * 3953 * One important and subtle aspect is that precise marks *do not matter* in 3954 * the currently verified state (current state). It is important to understand 3955 * why this is the case. 3956 * 3957 * First, note that current state is the state that is not yet "checkpointed", 3958 * i.e., it is not yet put into env->explored_states, and it has no children 3959 * states as well. It's ephemeral, and can end up either a) being discarded if 3960 * compatible explored state is found at some point or BPF_EXIT instruction is 3961 * reached or b) checkpointed and put into env->explored_states, branching out 3962 * into one or more children states. 3963 * 3964 * In the former case, precise markings in current state are completely 3965 * ignored by state comparison code (see regsafe() for details). Only 3966 * checkpointed ("old") state precise markings are important, and if old 3967 * state's register/slot is precise, regsafe() assumes current state's 3968 * register/slot as precise and checks value ranges exactly and precisely. If 3969 * states turn out to be compatible, current state's necessary precise 3970 * markings and any required parent states' precise markings are enforced 3971 * after the fact with propagate_precision() logic, after the fact. But it's 3972 * important to realize that in this case, even after marking current state 3973 * registers/slots as precise, we immediately discard current state. So what 3974 * actually matters is any of the precise markings propagated into current 3975 * state's parent states, which are always checkpointed (due to b) case above). 3976 * As such, for scenario a) it doesn't matter if current state has precise 3977 * markings set or not. 3978 * 3979 * Now, for the scenario b), checkpointing and forking into child(ren) 3980 * state(s). Note that before current state gets to checkpointing step, any 3981 * processed instruction always assumes precise SCALAR register/slot 3982 * knowledge: if precise value or range is useful to prune jump branch, BPF 3983 * verifier takes this opportunity enthusiastically. Similarly, when 3984 * register's value is used to calculate offset or memory address, exact 3985 * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to 3986 * what we mentioned above about state comparison ignoring precise markings 3987 * during state comparison, BPF verifier ignores and also assumes precise 3988 * markings *at will* during instruction verification process. But as verifier 3989 * assumes precision, it also propagates any precision dependencies across 3990 * parent states, which are not yet finalized, so can be further restricted 3991 * based on new knowledge gained from restrictions enforced by their children 3992 * states. This is so that once those parent states are finalized, i.e., when 3993 * they have no more active children state, state comparison logic in 3994 * is_state_visited() would enforce strict and precise SCALAR ranges, if 3995 * required for correctness. 3996 * 3997 * To build a bit more intuition, note also that once a state is checkpointed, 3998 * the path we took to get to that state is not important. This is crucial 3999 * property for state pruning. When state is checkpointed and finalized at 4000 * some instruction index, it can be correctly and safely used to "short 4001 * circuit" any *compatible* state that reaches exactly the same instruction 4002 * index. I.e., if we jumped to that instruction from a completely different 4003 * code path than original finalized state was derived from, it doesn't 4004 * matter, current state can be discarded because from that instruction 4005 * forward having a compatible state will ensure we will safely reach the 4006 * exit. States describe preconditions for further exploration, but completely 4007 * forget the history of how we got here. 4008 * 4009 * This also means that even if we needed precise SCALAR range to get to 4010 * finalized state, but from that point forward *that same* SCALAR register is 4011 * never used in a precise context (i.e., it's precise value is not needed for 4012 * correctness), it's correct and safe to mark such register as "imprecise" 4013 * (i.e., precise marking set to false). This is what we rely on when we do 4014 * not set precise marking in current state. If no child state requires 4015 * precision for any given SCALAR register, it's safe to dictate that it can 4016 * be imprecise. If any child state does require this register to be precise, 4017 * we'll mark it precise later retroactively during precise markings 4018 * propagation from child state to parent states. 4019 * 4020 * Skipping precise marking setting in current state is a mild version of 4021 * relying on the above observation. But we can utilize this property even 4022 * more aggressively by proactively forgetting any precise marking in the 4023 * current state (which we inherited from the parent state), right before we 4024 * checkpoint it and branch off into new child state. This is done by 4025 * mark_all_scalars_imprecise() to hopefully get more permissive and generic 4026 * finalized states which help in short circuiting more future states. 4027 */ 4028 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) 4029 { 4030 struct backtrack_state *bt = &env->bt; 4031 struct bpf_verifier_state *st = env->cur_state; 4032 int first_idx = st->first_insn_idx; 4033 int last_idx = env->insn_idx; 4034 int subseq_idx = -1; 4035 struct bpf_func_state *func; 4036 struct bpf_reg_state *reg; 4037 bool skip_first = true; 4038 int i, fr, err; 4039 4040 if (!env->bpf_capable) 4041 return 0; 4042 4043 /* set frame number from which we are starting to backtrack */ 4044 bt_init(bt, env->cur_state->curframe); 4045 4046 /* Do sanity checks against current state of register and/or stack 4047 * slot, but don't set precise flag in current state, as precision 4048 * tracking in the current state is unnecessary. 4049 */ 4050 func = st->frame[bt->frame]; 4051 if (regno >= 0) { 4052 reg = &func->regs[regno]; 4053 if (reg->type != SCALAR_VALUE) { 4054 WARN_ONCE(1, "backtracing misuse"); 4055 return -EFAULT; 4056 } 4057 bt_set_reg(bt, regno); 4058 } 4059 4060 if (bt_empty(bt)) 4061 return 0; 4062 4063 for (;;) { 4064 DECLARE_BITMAP(mask, 64); 4065 u32 history = st->jmp_history_cnt; 4066 4067 if (env->log.level & BPF_LOG_LEVEL2) { 4068 verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n", 4069 bt->frame, last_idx, first_idx, subseq_idx); 4070 } 4071 4072 /* If some register with scalar ID is marked as precise, 4073 * make sure that all registers sharing this ID are also precise. 4074 * This is needed to estimate effect of find_equal_scalars(). 4075 * Do this at the last instruction of each state, 4076 * bpf_reg_state::id fields are valid for these instructions. 4077 * 4078 * Allows to track precision in situation like below: 4079 * 4080 * r2 = unknown value 4081 * ... 4082 * --- state #0 --- 4083 * ... 4084 * r1 = r2 // r1 and r2 now share the same ID 4085 * ... 4086 * --- state #1 {r1.id = A, r2.id = A} --- 4087 * ... 4088 * if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1 4089 * ... 4090 * --- state #2 {r1.id = A, r2.id = A} --- 4091 * r3 = r10 4092 * r3 += r1 // need to mark both r1 and r2 4093 */ 4094 if (mark_precise_scalar_ids(env, st)) 4095 return -EFAULT; 4096 4097 if (last_idx < 0) { 4098 /* we are at the entry into subprog, which 4099 * is expected for global funcs, but only if 4100 * requested precise registers are R1-R5 4101 * (which are global func's input arguments) 4102 */ 4103 if (st->curframe == 0 && 4104 st->frame[0]->subprogno > 0 && 4105 st->frame[0]->callsite == BPF_MAIN_FUNC && 4106 bt_stack_mask(bt) == 0 && 4107 (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) { 4108 bitmap_from_u64(mask, bt_reg_mask(bt)); 4109 for_each_set_bit(i, mask, 32) { 4110 reg = &st->frame[0]->regs[i]; 4111 bt_clear_reg(bt, i); 4112 if (reg->type == SCALAR_VALUE) 4113 reg->precise = true; 4114 } 4115 return 0; 4116 } 4117 4118 verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n", 4119 st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); 4120 WARN_ONCE(1, "verifier backtracking bug"); 4121 return -EFAULT; 4122 } 4123 4124 for (i = last_idx;;) { 4125 if (skip_first) { 4126 err = 0; 4127 skip_first = false; 4128 } else { 4129 err = backtrack_insn(env, i, subseq_idx, bt); 4130 } 4131 if (err == -ENOTSUPP) { 4132 mark_all_scalars_precise(env, env->cur_state); 4133 bt_reset(bt); 4134 return 0; 4135 } else if (err) { 4136 return err; 4137 } 4138 if (bt_empty(bt)) 4139 /* Found assignment(s) into tracked register in this state. 4140 * Since this state is already marked, just return. 4141 * Nothing to be tracked further in the parent state. 4142 */ 4143 return 0; 4144 subseq_idx = i; 4145 i = get_prev_insn_idx(st, i, &history); 4146 if (i == -ENOENT) 4147 break; 4148 if (i >= env->prog->len) { 4149 /* This can happen if backtracking reached insn 0 4150 * and there are still reg_mask or stack_mask 4151 * to backtrack. 4152 * It means the backtracking missed the spot where 4153 * particular register was initialized with a constant. 4154 */ 4155 verbose(env, "BUG backtracking idx %d\n", i); 4156 WARN_ONCE(1, "verifier backtracking bug"); 4157 return -EFAULT; 4158 } 4159 } 4160 st = st->parent; 4161 if (!st) 4162 break; 4163 4164 for (fr = bt->frame; fr >= 0; fr--) { 4165 func = st->frame[fr]; 4166 bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); 4167 for_each_set_bit(i, mask, 32) { 4168 reg = &func->regs[i]; 4169 if (reg->type != SCALAR_VALUE) { 4170 bt_clear_frame_reg(bt, fr, i); 4171 continue; 4172 } 4173 if (reg->precise) 4174 bt_clear_frame_reg(bt, fr, i); 4175 else 4176 reg->precise = true; 4177 } 4178 4179 bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); 4180 for_each_set_bit(i, mask, 64) { 4181 if (i >= func->allocated_stack / BPF_REG_SIZE) { 4182 /* the sequence of instructions: 4183 * 2: (bf) r3 = r10 4184 * 3: (7b) *(u64 *)(r3 -8) = r0 4185 * 4: (79) r4 = *(u64 *)(r10 -8) 4186 * doesn't contain jmps. It's backtracked 4187 * as a single block. 4188 * During backtracking insn 3 is not recognized as 4189 * stack access, so at the end of backtracking 4190 * stack slot fp-8 is still marked in stack_mask. 4191 * However the parent state may not have accessed 4192 * fp-8 and it's "unallocated" stack space. 4193 * In such case fallback to conservative. 4194 */ 4195 mark_all_scalars_precise(env, env->cur_state); 4196 bt_reset(bt); 4197 return 0; 4198 } 4199 4200 if (!is_spilled_scalar_reg(&func->stack[i])) { 4201 bt_clear_frame_slot(bt, fr, i); 4202 continue; 4203 } 4204 reg = &func->stack[i].spilled_ptr; 4205 if (reg->precise) 4206 bt_clear_frame_slot(bt, fr, i); 4207 else 4208 reg->precise = true; 4209 } 4210 if (env->log.level & BPF_LOG_LEVEL2) { 4211 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, 4212 bt_frame_reg_mask(bt, fr)); 4213 verbose(env, "mark_precise: frame%d: parent state regs=%s ", 4214 fr, env->tmp_str_buf); 4215 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, 4216 bt_frame_stack_mask(bt, fr)); 4217 verbose(env, "stack=%s: ", env->tmp_str_buf); 4218 print_verifier_state(env, func, true); 4219 } 4220 } 4221 4222 if (bt_empty(bt)) 4223 return 0; 4224 4225 subseq_idx = first_idx; 4226 last_idx = st->last_insn_idx; 4227 first_idx = st->first_insn_idx; 4228 } 4229 4230 /* if we still have requested precise regs or slots, we missed 4231 * something (e.g., stack access through non-r10 register), so 4232 * fallback to marking all precise 4233 */ 4234 if (!bt_empty(bt)) { 4235 mark_all_scalars_precise(env, env->cur_state); 4236 bt_reset(bt); 4237 } 4238 4239 return 0; 4240 } 4241 4242 int mark_chain_precision(struct bpf_verifier_env *env, int regno) 4243 { 4244 return __mark_chain_precision(env, regno); 4245 } 4246 4247 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to 4248 * desired reg and stack masks across all relevant frames 4249 */ 4250 static int mark_chain_precision_batch(struct bpf_verifier_env *env) 4251 { 4252 return __mark_chain_precision(env, -1); 4253 } 4254 4255 static bool is_spillable_regtype(enum bpf_reg_type type) 4256 { 4257 switch (base_type(type)) { 4258 case PTR_TO_MAP_VALUE: 4259 case PTR_TO_STACK: 4260 case PTR_TO_CTX: 4261 case PTR_TO_PACKET: 4262 case PTR_TO_PACKET_META: 4263 case PTR_TO_PACKET_END: 4264 case PTR_TO_FLOW_KEYS: 4265 case CONST_PTR_TO_MAP: 4266 case PTR_TO_SOCKET: 4267 case PTR_TO_SOCK_COMMON: 4268 case PTR_TO_TCP_SOCK: 4269 case PTR_TO_XDP_SOCK: 4270 case PTR_TO_BTF_ID: 4271 case PTR_TO_BUF: 4272 case PTR_TO_MEM: 4273 case PTR_TO_FUNC: 4274 case PTR_TO_MAP_KEY: 4275 return true; 4276 default: 4277 return false; 4278 } 4279 } 4280 4281 /* Does this register contain a constant zero? */ 4282 static bool register_is_null(struct bpf_reg_state *reg) 4283 { 4284 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 4285 } 4286 4287 /* check if register is a constant scalar value */ 4288 static bool is_reg_const(struct bpf_reg_state *reg, bool subreg32) 4289 { 4290 return reg->type == SCALAR_VALUE && 4291 tnum_is_const(subreg32 ? tnum_subreg(reg->var_off) : reg->var_off); 4292 } 4293 4294 /* assuming is_reg_const() is true, return constant value of a register */ 4295 static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32) 4296 { 4297 return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value; 4298 } 4299 4300 static bool __is_scalar_unbounded(struct bpf_reg_state *reg) 4301 { 4302 return tnum_is_unknown(reg->var_off) && 4303 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && 4304 reg->umin_value == 0 && reg->umax_value == U64_MAX && 4305 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && 4306 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; 4307 } 4308 4309 static bool register_is_bounded(struct bpf_reg_state *reg) 4310 { 4311 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); 4312 } 4313 4314 static bool __is_pointer_value(bool allow_ptr_leaks, 4315 const struct bpf_reg_state *reg) 4316 { 4317 if (allow_ptr_leaks) 4318 return false; 4319 4320 return reg->type != SCALAR_VALUE; 4321 } 4322 4323 /* Copy src state preserving dst->parent and dst->live fields */ 4324 static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src) 4325 { 4326 struct bpf_reg_state *parent = dst->parent; 4327 enum bpf_reg_liveness live = dst->live; 4328 4329 *dst = *src; 4330 dst->parent = parent; 4331 dst->live = live; 4332 } 4333 4334 static void save_register_state(struct bpf_func_state *state, 4335 int spi, struct bpf_reg_state *reg, 4336 int size) 4337 { 4338 int i; 4339 4340 copy_register_state(&state->stack[spi].spilled_ptr, reg); 4341 if (size == BPF_REG_SIZE) 4342 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 4343 4344 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) 4345 state->stack[spi].slot_type[i - 1] = STACK_SPILL; 4346 4347 /* size < 8 bytes spill */ 4348 for (; i; i--) 4349 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); 4350 } 4351 4352 static bool is_bpf_st_mem(struct bpf_insn *insn) 4353 { 4354 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; 4355 } 4356 4357 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, 4358 * stack boundary and alignment are checked in check_mem_access() 4359 */ 4360 static int check_stack_write_fixed_off(struct bpf_verifier_env *env, 4361 /* stack frame we're writing to */ 4362 struct bpf_func_state *state, 4363 int off, int size, int value_regno, 4364 int insn_idx) 4365 { 4366 struct bpf_func_state *cur; /* state of the current function */ 4367 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 4368 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 4369 struct bpf_reg_state *reg = NULL; 4370 u32 dst_reg = insn->dst_reg; 4371 4372 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); 4373 if (err) 4374 return err; 4375 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 4376 * so it's aligned access and [off, off + size) are within stack limits 4377 */ 4378 if (!env->allow_ptr_leaks && 4379 state->stack[spi].slot_type[0] == STACK_SPILL && 4380 size != BPF_REG_SIZE) { 4381 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 4382 return -EACCES; 4383 } 4384 4385 cur = env->cur_state->frame[env->cur_state->curframe]; 4386 if (value_regno >= 0) 4387 reg = &cur->regs[value_regno]; 4388 if (!env->bypass_spec_v4) { 4389 bool sanitize = reg && is_spillable_regtype(reg->type); 4390 4391 for (i = 0; i < size; i++) { 4392 u8 type = state->stack[spi].slot_type[i]; 4393 4394 if (type != STACK_MISC && type != STACK_ZERO) { 4395 sanitize = true; 4396 break; 4397 } 4398 } 4399 4400 if (sanitize) 4401 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; 4402 } 4403 4404 err = destroy_if_dynptr_stack_slot(env, state, spi); 4405 if (err) 4406 return err; 4407 4408 mark_stack_slot_scratched(env, spi); 4409 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && 4410 !register_is_null(reg) && env->bpf_capable) { 4411 if (dst_reg != BPF_REG_FP) { 4412 /* The backtracking logic can only recognize explicit 4413 * stack slot address like [fp - 8]. Other spill of 4414 * scalar via different register has to be conservative. 4415 * Backtrack from here and mark all registers as precise 4416 * that contributed into 'reg' being a constant. 4417 */ 4418 err = mark_chain_precision(env, value_regno); 4419 if (err) 4420 return err; 4421 } 4422 save_register_state(state, spi, reg, size); 4423 /* Break the relation on a narrowing spill. */ 4424 if (fls64(reg->umax_value) > BITS_PER_BYTE * size) 4425 state->stack[spi].spilled_ptr.id = 0; 4426 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && 4427 insn->imm != 0 && env->bpf_capable) { 4428 struct bpf_reg_state fake_reg = {}; 4429 4430 __mark_reg_known(&fake_reg, insn->imm); 4431 fake_reg.type = SCALAR_VALUE; 4432 save_register_state(state, spi, &fake_reg, size); 4433 } else if (reg && is_spillable_regtype(reg->type)) { 4434 /* register containing pointer is being spilled into stack */ 4435 if (size != BPF_REG_SIZE) { 4436 verbose_linfo(env, insn_idx, "; "); 4437 verbose(env, "invalid size of register spill\n"); 4438 return -EACCES; 4439 } 4440 if (state != cur && reg->type == PTR_TO_STACK) { 4441 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 4442 return -EINVAL; 4443 } 4444 save_register_state(state, spi, reg, size); 4445 } else { 4446 u8 type = STACK_MISC; 4447 4448 /* regular write of data into stack destroys any spilled ptr */ 4449 state->stack[spi].spilled_ptr.type = NOT_INIT; 4450 /* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */ 4451 if (is_stack_slot_special(&state->stack[spi])) 4452 for (i = 0; i < BPF_REG_SIZE; i++) 4453 scrub_spilled_slot(&state->stack[spi].slot_type[i]); 4454 4455 /* only mark the slot as written if all 8 bytes were written 4456 * otherwise read propagation may incorrectly stop too soon 4457 * when stack slots are partially written. 4458 * This heuristic means that read propagation will be 4459 * conservative, since it will add reg_live_read marks 4460 * to stack slots all the way to first state when programs 4461 * writes+reads less than 8 bytes 4462 */ 4463 if (size == BPF_REG_SIZE) 4464 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 4465 4466 /* when we zero initialize stack slots mark them as such */ 4467 if ((reg && register_is_null(reg)) || 4468 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { 4469 /* backtracking doesn't work for STACK_ZERO yet. */ 4470 err = mark_chain_precision(env, value_regno); 4471 if (err) 4472 return err; 4473 type = STACK_ZERO; 4474 } 4475 4476 /* Mark slots affected by this stack write. */ 4477 for (i = 0; i < size; i++) 4478 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 4479 type; 4480 } 4481 return 0; 4482 } 4483 4484 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is 4485 * known to contain a variable offset. 4486 * This function checks whether the write is permitted and conservatively 4487 * tracks the effects of the write, considering that each stack slot in the 4488 * dynamic range is potentially written to. 4489 * 4490 * 'off' includes 'regno->off'. 4491 * 'value_regno' can be -1, meaning that an unknown value is being written to 4492 * the stack. 4493 * 4494 * Spilled pointers in range are not marked as written because we don't know 4495 * what's going to be actually written. This means that read propagation for 4496 * future reads cannot be terminated by this write. 4497 * 4498 * For privileged programs, uninitialized stack slots are considered 4499 * initialized by this write (even though we don't know exactly what offsets 4500 * are going to be written to). The idea is that we don't want the verifier to 4501 * reject future reads that access slots written to through variable offsets. 4502 */ 4503 static int check_stack_write_var_off(struct bpf_verifier_env *env, 4504 /* func where register points to */ 4505 struct bpf_func_state *state, 4506 int ptr_regno, int off, int size, 4507 int value_regno, int insn_idx) 4508 { 4509 struct bpf_func_state *cur; /* state of the current function */ 4510 int min_off, max_off; 4511 int i, err; 4512 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; 4513 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 4514 bool writing_zero = false; 4515 /* set if the fact that we're writing a zero is used to let any 4516 * stack slots remain STACK_ZERO 4517 */ 4518 bool zero_used = false; 4519 4520 cur = env->cur_state->frame[env->cur_state->curframe]; 4521 ptr_reg = &cur->regs[ptr_regno]; 4522 min_off = ptr_reg->smin_value + off; 4523 max_off = ptr_reg->smax_value + off + size; 4524 if (value_regno >= 0) 4525 value_reg = &cur->regs[value_regno]; 4526 if ((value_reg && register_is_null(value_reg)) || 4527 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0)) 4528 writing_zero = true; 4529 4530 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); 4531 if (err) 4532 return err; 4533 4534 for (i = min_off; i < max_off; i++) { 4535 int spi; 4536 4537 spi = __get_spi(i); 4538 err = destroy_if_dynptr_stack_slot(env, state, spi); 4539 if (err) 4540 return err; 4541 } 4542 4543 /* Variable offset writes destroy any spilled pointers in range. */ 4544 for (i = min_off; i < max_off; i++) { 4545 u8 new_type, *stype; 4546 int slot, spi; 4547 4548 slot = -i - 1; 4549 spi = slot / BPF_REG_SIZE; 4550 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 4551 mark_stack_slot_scratched(env, spi); 4552 4553 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { 4554 /* Reject the write if range we may write to has not 4555 * been initialized beforehand. If we didn't reject 4556 * here, the ptr status would be erased below (even 4557 * though not all slots are actually overwritten), 4558 * possibly opening the door to leaks. 4559 * 4560 * We do however catch STACK_INVALID case below, and 4561 * only allow reading possibly uninitialized memory 4562 * later for CAP_PERFMON, as the write may not happen to 4563 * that slot. 4564 */ 4565 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", 4566 insn_idx, i); 4567 return -EINVAL; 4568 } 4569 4570 /* Erase all spilled pointers. */ 4571 state->stack[spi].spilled_ptr.type = NOT_INIT; 4572 4573 /* Update the slot type. */ 4574 new_type = STACK_MISC; 4575 if (writing_zero && *stype == STACK_ZERO) { 4576 new_type = STACK_ZERO; 4577 zero_used = true; 4578 } 4579 /* If the slot is STACK_INVALID, we check whether it's OK to 4580 * pretend that it will be initialized by this write. The slot 4581 * might not actually be written to, and so if we mark it as 4582 * initialized future reads might leak uninitialized memory. 4583 * For privileged programs, we will accept such reads to slots 4584 * that may or may not be written because, if we're reject 4585 * them, the error would be too confusing. 4586 */ 4587 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { 4588 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", 4589 insn_idx, i); 4590 return -EINVAL; 4591 } 4592 *stype = new_type; 4593 } 4594 if (zero_used) { 4595 /* backtracking doesn't work for STACK_ZERO yet. */ 4596 err = mark_chain_precision(env, value_regno); 4597 if (err) 4598 return err; 4599 } 4600 return 0; 4601 } 4602 4603 /* When register 'dst_regno' is assigned some values from stack[min_off, 4604 * max_off), we set the register's type according to the types of the 4605 * respective stack slots. If all the stack values are known to be zeros, then 4606 * so is the destination reg. Otherwise, the register is considered to be 4607 * SCALAR. This function does not deal with register filling; the caller must 4608 * ensure that all spilled registers in the stack range have been marked as 4609 * read. 4610 */ 4611 static void mark_reg_stack_read(struct bpf_verifier_env *env, 4612 /* func where src register points to */ 4613 struct bpf_func_state *ptr_state, 4614 int min_off, int max_off, int dst_regno) 4615 { 4616 struct bpf_verifier_state *vstate = env->cur_state; 4617 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4618 int i, slot, spi; 4619 u8 *stype; 4620 int zeros = 0; 4621 4622 for (i = min_off; i < max_off; i++) { 4623 slot = -i - 1; 4624 spi = slot / BPF_REG_SIZE; 4625 mark_stack_slot_scratched(env, spi); 4626 stype = ptr_state->stack[spi].slot_type; 4627 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) 4628 break; 4629 zeros++; 4630 } 4631 if (zeros == max_off - min_off) { 4632 /* any access_size read into register is zero extended, 4633 * so the whole register == const_zero 4634 */ 4635 __mark_reg_const_zero(&state->regs[dst_regno]); 4636 /* backtracking doesn't support STACK_ZERO yet, 4637 * so mark it precise here, so that later 4638 * backtracking can stop here. 4639 * Backtracking may not need this if this register 4640 * doesn't participate in pointer adjustment. 4641 * Forward propagation of precise flag is not 4642 * necessary either. This mark is only to stop 4643 * backtracking. Any register that contributed 4644 * to const 0 was marked precise before spill. 4645 */ 4646 state->regs[dst_regno].precise = true; 4647 } else { 4648 /* have read misc data from the stack */ 4649 mark_reg_unknown(env, state->regs, dst_regno); 4650 } 4651 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4652 } 4653 4654 /* Read the stack at 'off' and put the results into the register indicated by 4655 * 'dst_regno'. It handles reg filling if the addressed stack slot is a 4656 * spilled reg. 4657 * 4658 * 'dst_regno' can be -1, meaning that the read value is not going to a 4659 * register. 4660 * 4661 * The access is assumed to be within the current stack bounds. 4662 */ 4663 static int check_stack_read_fixed_off(struct bpf_verifier_env *env, 4664 /* func where src register points to */ 4665 struct bpf_func_state *reg_state, 4666 int off, int size, int dst_regno) 4667 { 4668 struct bpf_verifier_state *vstate = env->cur_state; 4669 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4670 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 4671 struct bpf_reg_state *reg; 4672 u8 *stype, type; 4673 4674 stype = reg_state->stack[spi].slot_type; 4675 reg = ®_state->stack[spi].spilled_ptr; 4676 4677 mark_stack_slot_scratched(env, spi); 4678 4679 if (is_spilled_reg(®_state->stack[spi])) { 4680 u8 spill_size = 1; 4681 4682 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) 4683 spill_size++; 4684 4685 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { 4686 if (reg->type != SCALAR_VALUE) { 4687 verbose_linfo(env, env->insn_idx, "; "); 4688 verbose(env, "invalid size of register fill\n"); 4689 return -EACCES; 4690 } 4691 4692 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 4693 if (dst_regno < 0) 4694 return 0; 4695 4696 if (!(off % BPF_REG_SIZE) && size == spill_size) { 4697 /* The earlier check_reg_arg() has decided the 4698 * subreg_def for this insn. Save it first. 4699 */ 4700 s32 subreg_def = state->regs[dst_regno].subreg_def; 4701 4702 copy_register_state(&state->regs[dst_regno], reg); 4703 state->regs[dst_regno].subreg_def = subreg_def; 4704 } else { 4705 for (i = 0; i < size; i++) { 4706 type = stype[(slot - i) % BPF_REG_SIZE]; 4707 if (type == STACK_SPILL) 4708 continue; 4709 if (type == STACK_MISC) 4710 continue; 4711 if (type == STACK_INVALID && env->allow_uninit_stack) 4712 continue; 4713 verbose(env, "invalid read from stack off %d+%d size %d\n", 4714 off, i, size); 4715 return -EACCES; 4716 } 4717 mark_reg_unknown(env, state->regs, dst_regno); 4718 } 4719 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4720 return 0; 4721 } 4722 4723 if (dst_regno >= 0) { 4724 /* restore register state from stack */ 4725 copy_register_state(&state->regs[dst_regno], reg); 4726 /* mark reg as written since spilled pointer state likely 4727 * has its liveness marks cleared by is_state_visited() 4728 * which resets stack/reg liveness for state transitions 4729 */ 4730 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4731 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { 4732 /* If dst_regno==-1, the caller is asking us whether 4733 * it is acceptable to use this value as a SCALAR_VALUE 4734 * (e.g. for XADD). 4735 * We must not allow unprivileged callers to do that 4736 * with spilled pointers. 4737 */ 4738 verbose(env, "leaking pointer from stack off %d\n", 4739 off); 4740 return -EACCES; 4741 } 4742 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 4743 } else { 4744 for (i = 0; i < size; i++) { 4745 type = stype[(slot - i) % BPF_REG_SIZE]; 4746 if (type == STACK_MISC) 4747 continue; 4748 if (type == STACK_ZERO) 4749 continue; 4750 if (type == STACK_INVALID && env->allow_uninit_stack) 4751 continue; 4752 verbose(env, "invalid read from stack off %d+%d size %d\n", 4753 off, i, size); 4754 return -EACCES; 4755 } 4756 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 4757 if (dst_regno >= 0) 4758 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); 4759 } 4760 return 0; 4761 } 4762 4763 enum bpf_access_src { 4764 ACCESS_DIRECT = 1, /* the access is performed by an instruction */ 4765 ACCESS_HELPER = 2, /* the access is performed by a helper */ 4766 }; 4767 4768 static int check_stack_range_initialized(struct bpf_verifier_env *env, 4769 int regno, int off, int access_size, 4770 bool zero_size_allowed, 4771 enum bpf_access_src type, 4772 struct bpf_call_arg_meta *meta); 4773 4774 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 4775 { 4776 return cur_regs(env) + regno; 4777 } 4778 4779 /* Read the stack at 'ptr_regno + off' and put the result into the register 4780 * 'dst_regno'. 4781 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'), 4782 * but not its variable offset. 4783 * 'size' is assumed to be <= reg size and the access is assumed to be aligned. 4784 * 4785 * As opposed to check_stack_read_fixed_off, this function doesn't deal with 4786 * filling registers (i.e. reads of spilled register cannot be detected when 4787 * the offset is not fixed). We conservatively mark 'dst_regno' as containing 4788 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable 4789 * offset; for a fixed offset check_stack_read_fixed_off should be used 4790 * instead. 4791 */ 4792 static int check_stack_read_var_off(struct bpf_verifier_env *env, 4793 int ptr_regno, int off, int size, int dst_regno) 4794 { 4795 /* The state of the source register. */ 4796 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 4797 struct bpf_func_state *ptr_state = func(env, reg); 4798 int err; 4799 int min_off, max_off; 4800 4801 /* Note that we pass a NULL meta, so raw access will not be permitted. 4802 */ 4803 err = check_stack_range_initialized(env, ptr_regno, off, size, 4804 false, ACCESS_DIRECT, NULL); 4805 if (err) 4806 return err; 4807 4808 min_off = reg->smin_value + off; 4809 max_off = reg->smax_value + off; 4810 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); 4811 return 0; 4812 } 4813 4814 /* check_stack_read dispatches to check_stack_read_fixed_off or 4815 * check_stack_read_var_off. 4816 * 4817 * The caller must ensure that the offset falls within the allocated stack 4818 * bounds. 4819 * 4820 * 'dst_regno' is a register which will receive the value from the stack. It 4821 * can be -1, meaning that the read value is not going to a register. 4822 */ 4823 static int check_stack_read(struct bpf_verifier_env *env, 4824 int ptr_regno, int off, int size, 4825 int dst_regno) 4826 { 4827 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 4828 struct bpf_func_state *state = func(env, reg); 4829 int err; 4830 /* Some accesses are only permitted with a static offset. */ 4831 bool var_off = !tnum_is_const(reg->var_off); 4832 4833 /* The offset is required to be static when reads don't go to a 4834 * register, in order to not leak pointers (see 4835 * check_stack_read_fixed_off). 4836 */ 4837 if (dst_regno < 0 && var_off) { 4838 char tn_buf[48]; 4839 4840 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4841 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n", 4842 tn_buf, off, size); 4843 return -EACCES; 4844 } 4845 /* Variable offset is prohibited for unprivileged mode for simplicity 4846 * since it requires corresponding support in Spectre masking for stack 4847 * ALU. See also retrieve_ptr_limit(). The check in 4848 * check_stack_access_for_ptr_arithmetic() called by 4849 * adjust_ptr_min_max_vals() prevents users from creating stack pointers 4850 * with variable offsets, therefore no check is required here. Further, 4851 * just checking it here would be insufficient as speculative stack 4852 * writes could still lead to unsafe speculative behaviour. 4853 */ 4854 if (!var_off) { 4855 off += reg->var_off.value; 4856 err = check_stack_read_fixed_off(env, state, off, size, 4857 dst_regno); 4858 } else { 4859 /* Variable offset stack reads need more conservative handling 4860 * than fixed offset ones. Note that dst_regno >= 0 on this 4861 * branch. 4862 */ 4863 err = check_stack_read_var_off(env, ptr_regno, off, size, 4864 dst_regno); 4865 } 4866 return err; 4867 } 4868 4869 4870 /* check_stack_write dispatches to check_stack_write_fixed_off or 4871 * check_stack_write_var_off. 4872 * 4873 * 'ptr_regno' is the register used as a pointer into the stack. 4874 * 'off' includes 'ptr_regno->off', but not its variable offset (if any). 4875 * 'value_regno' is the register whose value we're writing to the stack. It can 4876 * be -1, meaning that we're not writing from a register. 4877 * 4878 * The caller must ensure that the offset falls within the maximum stack size. 4879 */ 4880 static int check_stack_write(struct bpf_verifier_env *env, 4881 int ptr_regno, int off, int size, 4882 int value_regno, int insn_idx) 4883 { 4884 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 4885 struct bpf_func_state *state = func(env, reg); 4886 int err; 4887 4888 if (tnum_is_const(reg->var_off)) { 4889 off += reg->var_off.value; 4890 err = check_stack_write_fixed_off(env, state, off, size, 4891 value_regno, insn_idx); 4892 } else { 4893 /* Variable offset stack reads need more conservative handling 4894 * than fixed offset ones. 4895 */ 4896 err = check_stack_write_var_off(env, state, 4897 ptr_regno, off, size, 4898 value_regno, insn_idx); 4899 } 4900 return err; 4901 } 4902 4903 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 4904 int off, int size, enum bpf_access_type type) 4905 { 4906 struct bpf_reg_state *regs = cur_regs(env); 4907 struct bpf_map *map = regs[regno].map_ptr; 4908 u32 cap = bpf_map_flags_to_cap(map); 4909 4910 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 4911 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 4912 map->value_size, off, size); 4913 return -EACCES; 4914 } 4915 4916 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 4917 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 4918 map->value_size, off, size); 4919 return -EACCES; 4920 } 4921 4922 return 0; 4923 } 4924 4925 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ 4926 static int __check_mem_access(struct bpf_verifier_env *env, int regno, 4927 int off, int size, u32 mem_size, 4928 bool zero_size_allowed) 4929 { 4930 bool size_ok = size > 0 || (size == 0 && zero_size_allowed); 4931 struct bpf_reg_state *reg; 4932 4933 if (off >= 0 && size_ok && (u64)off + size <= mem_size) 4934 return 0; 4935 4936 reg = &cur_regs(env)[regno]; 4937 switch (reg->type) { 4938 case PTR_TO_MAP_KEY: 4939 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", 4940 mem_size, off, size); 4941 break; 4942 case PTR_TO_MAP_VALUE: 4943 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 4944 mem_size, off, size); 4945 break; 4946 case PTR_TO_PACKET: 4947 case PTR_TO_PACKET_META: 4948 case PTR_TO_PACKET_END: 4949 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 4950 off, size, regno, reg->id, off, mem_size); 4951 break; 4952 case PTR_TO_MEM: 4953 default: 4954 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", 4955 mem_size, off, size); 4956 } 4957 4958 return -EACCES; 4959 } 4960 4961 /* check read/write into a memory region with possible variable offset */ 4962 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, 4963 int off, int size, u32 mem_size, 4964 bool zero_size_allowed) 4965 { 4966 struct bpf_verifier_state *vstate = env->cur_state; 4967 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4968 struct bpf_reg_state *reg = &state->regs[regno]; 4969 int err; 4970 4971 /* We may have adjusted the register pointing to memory region, so we 4972 * need to try adding each of min_value and max_value to off 4973 * to make sure our theoretical access will be safe. 4974 * 4975 * The minimum value is only important with signed 4976 * comparisons where we can't assume the floor of a 4977 * value is 0. If we are using signed variables for our 4978 * index'es we need to make sure that whatever we use 4979 * will have a set floor within our range. 4980 */ 4981 if (reg->smin_value < 0 && 4982 (reg->smin_value == S64_MIN || 4983 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 4984 reg->smin_value + off < 0)) { 4985 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 4986 regno); 4987 return -EACCES; 4988 } 4989 err = __check_mem_access(env, regno, reg->smin_value + off, size, 4990 mem_size, zero_size_allowed); 4991 if (err) { 4992 verbose(env, "R%d min value is outside of the allowed memory range\n", 4993 regno); 4994 return err; 4995 } 4996 4997 /* If we haven't set a max value then we need to bail since we can't be 4998 * sure we won't do bad things. 4999 * If reg->umax_value + off could overflow, treat that as unbounded too. 5000 */ 5001 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 5002 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", 5003 regno); 5004 return -EACCES; 5005 } 5006 err = __check_mem_access(env, regno, reg->umax_value + off, size, 5007 mem_size, zero_size_allowed); 5008 if (err) { 5009 verbose(env, "R%d max value is outside of the allowed memory range\n", 5010 regno); 5011 return err; 5012 } 5013 5014 return 0; 5015 } 5016 5017 static int __check_ptr_off_reg(struct bpf_verifier_env *env, 5018 const struct bpf_reg_state *reg, int regno, 5019 bool fixed_off_ok) 5020 { 5021 /* Access to this pointer-typed register or passing it to a helper 5022 * is only allowed in its original, unmodified form. 5023 */ 5024 5025 if (reg->off < 0) { 5026 verbose(env, "negative offset %s ptr R%d off=%d disallowed\n", 5027 reg_type_str(env, reg->type), regno, reg->off); 5028 return -EACCES; 5029 } 5030 5031 if (!fixed_off_ok && reg->off) { 5032 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", 5033 reg_type_str(env, reg->type), regno, reg->off); 5034 return -EACCES; 5035 } 5036 5037 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 5038 char tn_buf[48]; 5039 5040 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5041 verbose(env, "variable %s access var_off=%s disallowed\n", 5042 reg_type_str(env, reg->type), tn_buf); 5043 return -EACCES; 5044 } 5045 5046 return 0; 5047 } 5048 5049 int check_ptr_off_reg(struct bpf_verifier_env *env, 5050 const struct bpf_reg_state *reg, int regno) 5051 { 5052 return __check_ptr_off_reg(env, reg, regno, false); 5053 } 5054 5055 static int map_kptr_match_type(struct bpf_verifier_env *env, 5056 struct btf_field *kptr_field, 5057 struct bpf_reg_state *reg, u32 regno) 5058 { 5059 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); 5060 int perm_flags; 5061 const char *reg_name = ""; 5062 5063 if (btf_is_kernel(reg->btf)) { 5064 perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU; 5065 5066 /* Only unreferenced case accepts untrusted pointers */ 5067 if (kptr_field->type == BPF_KPTR_UNREF) 5068 perm_flags |= PTR_UNTRUSTED; 5069 } else { 5070 perm_flags = PTR_MAYBE_NULL | MEM_ALLOC; 5071 if (kptr_field->type == BPF_KPTR_PERCPU) 5072 perm_flags |= MEM_PERCPU; 5073 } 5074 5075 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) 5076 goto bad_type; 5077 5078 /* We need to verify reg->type and reg->btf, before accessing reg->btf */ 5079 reg_name = btf_type_name(reg->btf, reg->btf_id); 5080 5081 /* For ref_ptr case, release function check should ensure we get one 5082 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the 5083 * normal store of unreferenced kptr, we must ensure var_off is zero. 5084 * Since ref_ptr cannot be accessed directly by BPF insns, checks for 5085 * reg->off and reg->ref_obj_id are not needed here. 5086 */ 5087 if (__check_ptr_off_reg(env, reg, regno, true)) 5088 return -EACCES; 5089 5090 /* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and 5091 * we also need to take into account the reg->off. 5092 * 5093 * We want to support cases like: 5094 * 5095 * struct foo { 5096 * struct bar br; 5097 * struct baz bz; 5098 * }; 5099 * 5100 * struct foo *v; 5101 * v = func(); // PTR_TO_BTF_ID 5102 * val->foo = v; // reg->off is zero, btf and btf_id match type 5103 * val->bar = &v->br; // reg->off is still zero, but we need to retry with 5104 * // first member type of struct after comparison fails 5105 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked 5106 * // to match type 5107 * 5108 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off 5109 * is zero. We must also ensure that btf_struct_ids_match does not walk 5110 * the struct to match type against first member of struct, i.e. reject 5111 * second case from above. Hence, when type is BPF_KPTR_REF, we set 5112 * strict mode to true for type match. 5113 */ 5114 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 5115 kptr_field->kptr.btf, kptr_field->kptr.btf_id, 5116 kptr_field->type != BPF_KPTR_UNREF)) 5117 goto bad_type; 5118 return 0; 5119 bad_type: 5120 verbose(env, "invalid kptr access, R%d type=%s%s ", regno, 5121 reg_type_str(env, reg->type), reg_name); 5122 verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name); 5123 if (kptr_field->type == BPF_KPTR_UNREF) 5124 verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED), 5125 targ_name); 5126 else 5127 verbose(env, "\n"); 5128 return -EINVAL; 5129 } 5130 5131 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() 5132 * can dereference RCU protected pointers and result is PTR_TRUSTED. 5133 */ 5134 static bool in_rcu_cs(struct bpf_verifier_env *env) 5135 { 5136 return env->cur_state->active_rcu_lock || 5137 env->cur_state->active_lock.ptr || 5138 !env->prog->aux->sleepable; 5139 } 5140 5141 /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ 5142 BTF_SET_START(rcu_protected_types) 5143 BTF_ID(struct, prog_test_ref_kfunc) 5144 #ifdef CONFIG_CGROUPS 5145 BTF_ID(struct, cgroup) 5146 #endif 5147 BTF_ID(struct, bpf_cpumask) 5148 BTF_ID(struct, task_struct) 5149 BTF_SET_END(rcu_protected_types) 5150 5151 static bool rcu_protected_object(const struct btf *btf, u32 btf_id) 5152 { 5153 if (!btf_is_kernel(btf)) 5154 return true; 5155 return btf_id_set_contains(&rcu_protected_types, btf_id); 5156 } 5157 5158 static struct btf_record *kptr_pointee_btf_record(struct btf_field *kptr_field) 5159 { 5160 struct btf_struct_meta *meta; 5161 5162 if (btf_is_kernel(kptr_field->kptr.btf)) 5163 return NULL; 5164 5165 meta = btf_find_struct_meta(kptr_field->kptr.btf, 5166 kptr_field->kptr.btf_id); 5167 5168 return meta ? meta->record : NULL; 5169 } 5170 5171 static bool rcu_safe_kptr(const struct btf_field *field) 5172 { 5173 const struct btf_field_kptr *kptr = &field->kptr; 5174 5175 return field->type == BPF_KPTR_PERCPU || 5176 (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id)); 5177 } 5178 5179 static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field) 5180 { 5181 struct btf_record *rec; 5182 u32 ret; 5183 5184 ret = PTR_MAYBE_NULL; 5185 if (rcu_safe_kptr(kptr_field) && in_rcu_cs(env)) { 5186 ret |= MEM_RCU; 5187 if (kptr_field->type == BPF_KPTR_PERCPU) 5188 ret |= MEM_PERCPU; 5189 else if (!btf_is_kernel(kptr_field->kptr.btf)) 5190 ret |= MEM_ALLOC; 5191 5192 rec = kptr_pointee_btf_record(kptr_field); 5193 if (rec && btf_record_has_field(rec, BPF_GRAPH_NODE)) 5194 ret |= NON_OWN_REF; 5195 } else { 5196 ret |= PTR_UNTRUSTED; 5197 } 5198 5199 return ret; 5200 } 5201 5202 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, 5203 int value_regno, int insn_idx, 5204 struct btf_field *kptr_field) 5205 { 5206 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 5207 int class = BPF_CLASS(insn->code); 5208 struct bpf_reg_state *val_reg; 5209 5210 /* Things we already checked for in check_map_access and caller: 5211 * - Reject cases where variable offset may touch kptr 5212 * - size of access (must be BPF_DW) 5213 * - tnum_is_const(reg->var_off) 5214 * - kptr_field->offset == off + reg->var_off.value 5215 */ 5216 /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */ 5217 if (BPF_MODE(insn->code) != BPF_MEM) { 5218 verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n"); 5219 return -EACCES; 5220 } 5221 5222 /* We only allow loading referenced kptr, since it will be marked as 5223 * untrusted, similar to unreferenced kptr. 5224 */ 5225 if (class != BPF_LDX && 5226 (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) { 5227 verbose(env, "store to referenced kptr disallowed\n"); 5228 return -EACCES; 5229 } 5230 5231 if (class == BPF_LDX) { 5232 val_reg = reg_state(env, value_regno); 5233 /* We can simply mark the value_regno receiving the pointer 5234 * value from map as PTR_TO_BTF_ID, with the correct type. 5235 */ 5236 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, 5237 kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field)); 5238 /* For mark_ptr_or_null_reg */ 5239 val_reg->id = ++env->id_gen; 5240 } else if (class == BPF_STX) { 5241 val_reg = reg_state(env, value_regno); 5242 if (!register_is_null(val_reg) && 5243 map_kptr_match_type(env, kptr_field, val_reg, value_regno)) 5244 return -EACCES; 5245 } else if (class == BPF_ST) { 5246 if (insn->imm) { 5247 verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n", 5248 kptr_field->offset); 5249 return -EACCES; 5250 } 5251 } else { 5252 verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n"); 5253 return -EACCES; 5254 } 5255 return 0; 5256 } 5257 5258 /* check read/write into a map element with possible variable offset */ 5259 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 5260 int off, int size, bool zero_size_allowed, 5261 enum bpf_access_src src) 5262 { 5263 struct bpf_verifier_state *vstate = env->cur_state; 5264 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5265 struct bpf_reg_state *reg = &state->regs[regno]; 5266 struct bpf_map *map = reg->map_ptr; 5267 struct btf_record *rec; 5268 int err, i; 5269 5270 err = check_mem_region_access(env, regno, off, size, map->value_size, 5271 zero_size_allowed); 5272 if (err) 5273 return err; 5274 5275 if (IS_ERR_OR_NULL(map->record)) 5276 return 0; 5277 rec = map->record; 5278 for (i = 0; i < rec->cnt; i++) { 5279 struct btf_field *field = &rec->fields[i]; 5280 u32 p = field->offset; 5281 5282 /* If any part of a field can be touched by load/store, reject 5283 * this program. To check that [x1, x2) overlaps with [y1, y2), 5284 * it is sufficient to check x1 < y2 && y1 < x2. 5285 */ 5286 if (reg->smin_value + off < p + btf_field_type_size(field->type) && 5287 p < reg->umax_value + off + size) { 5288 switch (field->type) { 5289 case BPF_KPTR_UNREF: 5290 case BPF_KPTR_REF: 5291 case BPF_KPTR_PERCPU: 5292 if (src != ACCESS_DIRECT) { 5293 verbose(env, "kptr cannot be accessed indirectly by helper\n"); 5294 return -EACCES; 5295 } 5296 if (!tnum_is_const(reg->var_off)) { 5297 verbose(env, "kptr access cannot have variable offset\n"); 5298 return -EACCES; 5299 } 5300 if (p != off + reg->var_off.value) { 5301 verbose(env, "kptr access misaligned expected=%u off=%llu\n", 5302 p, off + reg->var_off.value); 5303 return -EACCES; 5304 } 5305 if (size != bpf_size_to_bytes(BPF_DW)) { 5306 verbose(env, "kptr access size must be BPF_DW\n"); 5307 return -EACCES; 5308 } 5309 break; 5310 default: 5311 verbose(env, "%s cannot be accessed directly by load/store\n", 5312 btf_field_type_name(field->type)); 5313 return -EACCES; 5314 } 5315 } 5316 } 5317 return 0; 5318 } 5319 5320 #define MAX_PACKET_OFF 0xffff 5321 5322 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 5323 const struct bpf_call_arg_meta *meta, 5324 enum bpf_access_type t) 5325 { 5326 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 5327 5328 switch (prog_type) { 5329 /* Program types only with direct read access go here! */ 5330 case BPF_PROG_TYPE_LWT_IN: 5331 case BPF_PROG_TYPE_LWT_OUT: 5332 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 5333 case BPF_PROG_TYPE_SK_REUSEPORT: 5334 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5335 case BPF_PROG_TYPE_CGROUP_SKB: 5336 if (t == BPF_WRITE) 5337 return false; 5338 fallthrough; 5339 5340 /* Program types with direct read + write access go here! */ 5341 case BPF_PROG_TYPE_SCHED_CLS: 5342 case BPF_PROG_TYPE_SCHED_ACT: 5343 case BPF_PROG_TYPE_XDP: 5344 case BPF_PROG_TYPE_LWT_XMIT: 5345 case BPF_PROG_TYPE_SK_SKB: 5346 case BPF_PROG_TYPE_SK_MSG: 5347 if (meta) 5348 return meta->pkt_access; 5349 5350 env->seen_direct_write = true; 5351 return true; 5352 5353 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5354 if (t == BPF_WRITE) 5355 env->seen_direct_write = true; 5356 5357 return true; 5358 5359 default: 5360 return false; 5361 } 5362 } 5363 5364 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 5365 int size, bool zero_size_allowed) 5366 { 5367 struct bpf_reg_state *regs = cur_regs(env); 5368 struct bpf_reg_state *reg = ®s[regno]; 5369 int err; 5370 5371 /* We may have added a variable offset to the packet pointer; but any 5372 * reg->range we have comes after that. We are only checking the fixed 5373 * offset. 5374 */ 5375 5376 /* We don't allow negative numbers, because we aren't tracking enough 5377 * detail to prove they're safe. 5378 */ 5379 if (reg->smin_value < 0) { 5380 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 5381 regno); 5382 return -EACCES; 5383 } 5384 5385 err = reg->range < 0 ? -EINVAL : 5386 __check_mem_access(env, regno, off, size, reg->range, 5387 zero_size_allowed); 5388 if (err) { 5389 verbose(env, "R%d offset is outside of the packet\n", regno); 5390 return err; 5391 } 5392 5393 /* __check_mem_access has made sure "off + size - 1" is within u16. 5394 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 5395 * otherwise find_good_pkt_pointers would have refused to set range info 5396 * that __check_mem_access would have rejected this pkt access. 5397 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 5398 */ 5399 env->prog->aux->max_pkt_offset = 5400 max_t(u32, env->prog->aux->max_pkt_offset, 5401 off + reg->umax_value + size - 1); 5402 5403 return err; 5404 } 5405 5406 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 5407 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 5408 enum bpf_access_type t, enum bpf_reg_type *reg_type, 5409 struct btf **btf, u32 *btf_id) 5410 { 5411 struct bpf_insn_access_aux info = { 5412 .reg_type = *reg_type, 5413 .log = &env->log, 5414 }; 5415 5416 if (env->ops->is_valid_access && 5417 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 5418 /* A non zero info.ctx_field_size indicates that this field is a 5419 * candidate for later verifier transformation to load the whole 5420 * field and then apply a mask when accessed with a narrower 5421 * access than actual ctx access size. A zero info.ctx_field_size 5422 * will only allow for whole field access and rejects any other 5423 * type of narrower access. 5424 */ 5425 *reg_type = info.reg_type; 5426 5427 if (base_type(*reg_type) == PTR_TO_BTF_ID) { 5428 *btf = info.btf; 5429 *btf_id = info.btf_id; 5430 } else { 5431 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 5432 } 5433 /* remember the offset of last byte accessed in ctx */ 5434 if (env->prog->aux->max_ctx_offset < off + size) 5435 env->prog->aux->max_ctx_offset = off + size; 5436 return 0; 5437 } 5438 5439 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 5440 return -EACCES; 5441 } 5442 5443 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 5444 int size) 5445 { 5446 if (size < 0 || off < 0 || 5447 (u64)off + size > sizeof(struct bpf_flow_keys)) { 5448 verbose(env, "invalid access to flow keys off=%d size=%d\n", 5449 off, size); 5450 return -EACCES; 5451 } 5452 return 0; 5453 } 5454 5455 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 5456 u32 regno, int off, int size, 5457 enum bpf_access_type t) 5458 { 5459 struct bpf_reg_state *regs = cur_regs(env); 5460 struct bpf_reg_state *reg = ®s[regno]; 5461 struct bpf_insn_access_aux info = {}; 5462 bool valid; 5463 5464 if (reg->smin_value < 0) { 5465 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 5466 regno); 5467 return -EACCES; 5468 } 5469 5470 switch (reg->type) { 5471 case PTR_TO_SOCK_COMMON: 5472 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 5473 break; 5474 case PTR_TO_SOCKET: 5475 valid = bpf_sock_is_valid_access(off, size, t, &info); 5476 break; 5477 case PTR_TO_TCP_SOCK: 5478 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 5479 break; 5480 case PTR_TO_XDP_SOCK: 5481 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 5482 break; 5483 default: 5484 valid = false; 5485 } 5486 5487 5488 if (valid) { 5489 env->insn_aux_data[insn_idx].ctx_field_size = 5490 info.ctx_field_size; 5491 return 0; 5492 } 5493 5494 verbose(env, "R%d invalid %s access off=%d size=%d\n", 5495 regno, reg_type_str(env, reg->type), off, size); 5496 5497 return -EACCES; 5498 } 5499 5500 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 5501 { 5502 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 5503 } 5504 5505 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 5506 { 5507 const struct bpf_reg_state *reg = reg_state(env, regno); 5508 5509 return reg->type == PTR_TO_CTX; 5510 } 5511 5512 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 5513 { 5514 const struct bpf_reg_state *reg = reg_state(env, regno); 5515 5516 return type_is_sk_pointer(reg->type); 5517 } 5518 5519 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 5520 { 5521 const struct bpf_reg_state *reg = reg_state(env, regno); 5522 5523 return type_is_pkt_pointer(reg->type); 5524 } 5525 5526 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 5527 { 5528 const struct bpf_reg_state *reg = reg_state(env, regno); 5529 5530 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 5531 return reg->type == PTR_TO_FLOW_KEYS; 5532 } 5533 5534 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { 5535 #ifdef CONFIG_NET 5536 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], 5537 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 5538 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 5539 #endif 5540 [CONST_PTR_TO_MAP] = btf_bpf_map_id, 5541 }; 5542 5543 static bool is_trusted_reg(const struct bpf_reg_state *reg) 5544 { 5545 /* A referenced register is always trusted. */ 5546 if (reg->ref_obj_id) 5547 return true; 5548 5549 /* Types listed in the reg2btf_ids are always trusted */ 5550 if (reg2btf_ids[base_type(reg->type)]) 5551 return true; 5552 5553 /* If a register is not referenced, it is trusted if it has the 5554 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the 5555 * other type modifiers may be safe, but we elect to take an opt-in 5556 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are 5557 * not. 5558 * 5559 * Eventually, we should make PTR_TRUSTED the single source of truth 5560 * for whether a register is trusted. 5561 */ 5562 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS && 5563 !bpf_type_has_unsafe_modifiers(reg->type); 5564 } 5565 5566 static bool is_rcu_reg(const struct bpf_reg_state *reg) 5567 { 5568 return reg->type & MEM_RCU; 5569 } 5570 5571 static void clear_trusted_flags(enum bpf_type_flag *flag) 5572 { 5573 *flag &= ~(BPF_REG_TRUSTED_MODIFIERS | MEM_RCU); 5574 } 5575 5576 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 5577 const struct bpf_reg_state *reg, 5578 int off, int size, bool strict) 5579 { 5580 struct tnum reg_off; 5581 int ip_align; 5582 5583 /* Byte size accesses are always allowed. */ 5584 if (!strict || size == 1) 5585 return 0; 5586 5587 /* For platforms that do not have a Kconfig enabling 5588 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 5589 * NET_IP_ALIGN is universally set to '2'. And on platforms 5590 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 5591 * to this code only in strict mode where we want to emulate 5592 * the NET_IP_ALIGN==2 checking. Therefore use an 5593 * unconditional IP align value of '2'. 5594 */ 5595 ip_align = 2; 5596 5597 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 5598 if (!tnum_is_aligned(reg_off, size)) { 5599 char tn_buf[48]; 5600 5601 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5602 verbose(env, 5603 "misaligned packet access off %d+%s+%d+%d size %d\n", 5604 ip_align, tn_buf, reg->off, off, size); 5605 return -EACCES; 5606 } 5607 5608 return 0; 5609 } 5610 5611 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 5612 const struct bpf_reg_state *reg, 5613 const char *pointer_desc, 5614 int off, int size, bool strict) 5615 { 5616 struct tnum reg_off; 5617 5618 /* Byte size accesses are always allowed. */ 5619 if (!strict || size == 1) 5620 return 0; 5621 5622 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 5623 if (!tnum_is_aligned(reg_off, size)) { 5624 char tn_buf[48]; 5625 5626 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5627 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 5628 pointer_desc, tn_buf, reg->off, off, size); 5629 return -EACCES; 5630 } 5631 5632 return 0; 5633 } 5634 5635 static int check_ptr_alignment(struct bpf_verifier_env *env, 5636 const struct bpf_reg_state *reg, int off, 5637 int size, bool strict_alignment_once) 5638 { 5639 bool strict = env->strict_alignment || strict_alignment_once; 5640 const char *pointer_desc = ""; 5641 5642 switch (reg->type) { 5643 case PTR_TO_PACKET: 5644 case PTR_TO_PACKET_META: 5645 /* Special case, because of NET_IP_ALIGN. Given metadata sits 5646 * right in front, treat it the very same way. 5647 */ 5648 return check_pkt_ptr_alignment(env, reg, off, size, strict); 5649 case PTR_TO_FLOW_KEYS: 5650 pointer_desc = "flow keys "; 5651 break; 5652 case PTR_TO_MAP_KEY: 5653 pointer_desc = "key "; 5654 break; 5655 case PTR_TO_MAP_VALUE: 5656 pointer_desc = "value "; 5657 break; 5658 case PTR_TO_CTX: 5659 pointer_desc = "context "; 5660 break; 5661 case PTR_TO_STACK: 5662 pointer_desc = "stack "; 5663 /* The stack spill tracking logic in check_stack_write_fixed_off() 5664 * and check_stack_read_fixed_off() relies on stack accesses being 5665 * aligned. 5666 */ 5667 strict = true; 5668 break; 5669 case PTR_TO_SOCKET: 5670 pointer_desc = "sock "; 5671 break; 5672 case PTR_TO_SOCK_COMMON: 5673 pointer_desc = "sock_common "; 5674 break; 5675 case PTR_TO_TCP_SOCK: 5676 pointer_desc = "tcp_sock "; 5677 break; 5678 case PTR_TO_XDP_SOCK: 5679 pointer_desc = "xdp_sock "; 5680 break; 5681 default: 5682 break; 5683 } 5684 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 5685 strict); 5686 } 5687 5688 static int update_stack_depth(struct bpf_verifier_env *env, 5689 const struct bpf_func_state *func, 5690 int off) 5691 { 5692 u16 stack = env->subprog_info[func->subprogno].stack_depth; 5693 5694 if (stack >= -off) 5695 return 0; 5696 5697 /* update known max for given subprogram */ 5698 env->subprog_info[func->subprogno].stack_depth = -off; 5699 return 0; 5700 } 5701 5702 /* starting from main bpf function walk all instructions of the function 5703 * and recursively walk all callees that given function can call. 5704 * Ignore jump and exit insns. 5705 * Since recursion is prevented by check_cfg() this algorithm 5706 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 5707 */ 5708 static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) 5709 { 5710 struct bpf_subprog_info *subprog = env->subprog_info; 5711 struct bpf_insn *insn = env->prog->insnsi; 5712 int depth = 0, frame = 0, i, subprog_end; 5713 bool tail_call_reachable = false; 5714 int ret_insn[MAX_CALL_FRAMES]; 5715 int ret_prog[MAX_CALL_FRAMES]; 5716 int j; 5717 5718 i = subprog[idx].start; 5719 process_func: 5720 /* protect against potential stack overflow that might happen when 5721 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack 5722 * depth for such case down to 256 so that the worst case scenario 5723 * would result in 8k stack size (32 which is tailcall limit * 256 = 5724 * 8k). 5725 * 5726 * To get the idea what might happen, see an example: 5727 * func1 -> sub rsp, 128 5728 * subfunc1 -> sub rsp, 256 5729 * tailcall1 -> add rsp, 256 5730 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) 5731 * subfunc2 -> sub rsp, 64 5732 * subfunc22 -> sub rsp, 128 5733 * tailcall2 -> add rsp, 128 5734 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) 5735 * 5736 * tailcall will unwind the current stack frame but it will not get rid 5737 * of caller's stack as shown on the example above. 5738 */ 5739 if (idx && subprog[idx].has_tail_call && depth >= 256) { 5740 verbose(env, 5741 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", 5742 depth); 5743 return -EACCES; 5744 } 5745 /* round up to 32-bytes, since this is granularity 5746 * of interpreter stack size 5747 */ 5748 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 5749 if (depth > MAX_BPF_STACK) { 5750 verbose(env, "combined stack size of %d calls is %d. Too large\n", 5751 frame + 1, depth); 5752 return -EACCES; 5753 } 5754 continue_func: 5755 subprog_end = subprog[idx + 1].start; 5756 for (; i < subprog_end; i++) { 5757 int next_insn, sidx; 5758 5759 if (bpf_pseudo_kfunc_call(insn + i) && !insn[i].off) { 5760 bool err = false; 5761 5762 if (!is_bpf_throw_kfunc(insn + i)) 5763 continue; 5764 if (subprog[idx].is_cb) 5765 err = true; 5766 for (int c = 0; c < frame && !err; c++) { 5767 if (subprog[ret_prog[c]].is_cb) { 5768 err = true; 5769 break; 5770 } 5771 } 5772 if (!err) 5773 continue; 5774 verbose(env, 5775 "bpf_throw kfunc (insn %d) cannot be called from callback subprog %d\n", 5776 i, idx); 5777 return -EINVAL; 5778 } 5779 5780 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) 5781 continue; 5782 /* remember insn and function to return to */ 5783 ret_insn[frame] = i + 1; 5784 ret_prog[frame] = idx; 5785 5786 /* find the callee */ 5787 next_insn = i + insn[i].imm + 1; 5788 sidx = find_subprog(env, next_insn); 5789 if (sidx < 0) { 5790 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 5791 next_insn); 5792 return -EFAULT; 5793 } 5794 if (subprog[sidx].is_async_cb) { 5795 if (subprog[sidx].has_tail_call) { 5796 verbose(env, "verifier bug. subprog has tail_call and async cb\n"); 5797 return -EFAULT; 5798 } 5799 /* async callbacks don't increase bpf prog stack size unless called directly */ 5800 if (!bpf_pseudo_call(insn + i)) 5801 continue; 5802 if (subprog[sidx].is_exception_cb) { 5803 verbose(env, "insn %d cannot call exception cb directly\n", i); 5804 return -EINVAL; 5805 } 5806 } 5807 i = next_insn; 5808 idx = sidx; 5809 5810 if (subprog[idx].has_tail_call) 5811 tail_call_reachable = true; 5812 5813 frame++; 5814 if (frame >= MAX_CALL_FRAMES) { 5815 verbose(env, "the call stack of %d frames is too deep !\n", 5816 frame); 5817 return -E2BIG; 5818 } 5819 goto process_func; 5820 } 5821 /* if tail call got detected across bpf2bpf calls then mark each of the 5822 * currently present subprog frames as tail call reachable subprogs; 5823 * this info will be utilized by JIT so that we will be preserving the 5824 * tail call counter throughout bpf2bpf calls combined with tailcalls 5825 */ 5826 if (tail_call_reachable) 5827 for (j = 0; j < frame; j++) { 5828 if (subprog[ret_prog[j]].is_exception_cb) { 5829 verbose(env, "cannot tail call within exception cb\n"); 5830 return -EINVAL; 5831 } 5832 subprog[ret_prog[j]].tail_call_reachable = true; 5833 } 5834 if (subprog[0].tail_call_reachable) 5835 env->prog->aux->tail_call_reachable = true; 5836 5837 /* end of for() loop means the last insn of the 'subprog' 5838 * was reached. Doesn't matter whether it was JA or EXIT 5839 */ 5840 if (frame == 0) 5841 return 0; 5842 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 5843 frame--; 5844 i = ret_insn[frame]; 5845 idx = ret_prog[frame]; 5846 goto continue_func; 5847 } 5848 5849 static int check_max_stack_depth(struct bpf_verifier_env *env) 5850 { 5851 struct bpf_subprog_info *si = env->subprog_info; 5852 int ret; 5853 5854 for (int i = 0; i < env->subprog_cnt; i++) { 5855 if (!i || si[i].is_async_cb) { 5856 ret = check_max_stack_depth_subprog(env, i); 5857 if (ret < 0) 5858 return ret; 5859 } 5860 continue; 5861 } 5862 return 0; 5863 } 5864 5865 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 5866 static int get_callee_stack_depth(struct bpf_verifier_env *env, 5867 const struct bpf_insn *insn, int idx) 5868 { 5869 int start = idx + insn->imm + 1, subprog; 5870 5871 subprog = find_subprog(env, start); 5872 if (subprog < 0) { 5873 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 5874 start); 5875 return -EFAULT; 5876 } 5877 return env->subprog_info[subprog].stack_depth; 5878 } 5879 #endif 5880 5881 static int __check_buffer_access(struct bpf_verifier_env *env, 5882 const char *buf_info, 5883 const struct bpf_reg_state *reg, 5884 int regno, int off, int size) 5885 { 5886 if (off < 0) { 5887 verbose(env, 5888 "R%d invalid %s buffer access: off=%d, size=%d\n", 5889 regno, buf_info, off, size); 5890 return -EACCES; 5891 } 5892 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 5893 char tn_buf[48]; 5894 5895 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5896 verbose(env, 5897 "R%d invalid variable buffer offset: off=%d, var_off=%s\n", 5898 regno, off, tn_buf); 5899 return -EACCES; 5900 } 5901 5902 return 0; 5903 } 5904 5905 static int check_tp_buffer_access(struct bpf_verifier_env *env, 5906 const struct bpf_reg_state *reg, 5907 int regno, int off, int size) 5908 { 5909 int err; 5910 5911 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); 5912 if (err) 5913 return err; 5914 5915 if (off + size > env->prog->aux->max_tp_access) 5916 env->prog->aux->max_tp_access = off + size; 5917 5918 return 0; 5919 } 5920 5921 static int check_buffer_access(struct bpf_verifier_env *env, 5922 const struct bpf_reg_state *reg, 5923 int regno, int off, int size, 5924 bool zero_size_allowed, 5925 u32 *max_access) 5926 { 5927 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; 5928 int err; 5929 5930 err = __check_buffer_access(env, buf_info, reg, regno, off, size); 5931 if (err) 5932 return err; 5933 5934 if (off + size > *max_access) 5935 *max_access = off + size; 5936 5937 return 0; 5938 } 5939 5940 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 5941 static void zext_32_to_64(struct bpf_reg_state *reg) 5942 { 5943 reg->var_off = tnum_subreg(reg->var_off); 5944 __reg_assign_32_into_64(reg); 5945 } 5946 5947 /* truncate register to smaller size (in bytes) 5948 * must be called with size < BPF_REG_SIZE 5949 */ 5950 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 5951 { 5952 u64 mask; 5953 5954 /* clear high bits in bit representation */ 5955 reg->var_off = tnum_cast(reg->var_off, size); 5956 5957 /* fix arithmetic bounds */ 5958 mask = ((u64)1 << (size * 8)) - 1; 5959 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 5960 reg->umin_value &= mask; 5961 reg->umax_value &= mask; 5962 } else { 5963 reg->umin_value = 0; 5964 reg->umax_value = mask; 5965 } 5966 reg->smin_value = reg->umin_value; 5967 reg->smax_value = reg->umax_value; 5968 5969 /* If size is smaller than 32bit register the 32bit register 5970 * values are also truncated so we push 64-bit bounds into 5971 * 32-bit bounds. Above were truncated < 32-bits already. 5972 */ 5973 if (size < 4) { 5974 __mark_reg32_unbounded(reg); 5975 reg_bounds_sync(reg); 5976 } 5977 } 5978 5979 static void set_sext64_default_val(struct bpf_reg_state *reg, int size) 5980 { 5981 if (size == 1) { 5982 reg->smin_value = reg->s32_min_value = S8_MIN; 5983 reg->smax_value = reg->s32_max_value = S8_MAX; 5984 } else if (size == 2) { 5985 reg->smin_value = reg->s32_min_value = S16_MIN; 5986 reg->smax_value = reg->s32_max_value = S16_MAX; 5987 } else { 5988 /* size == 4 */ 5989 reg->smin_value = reg->s32_min_value = S32_MIN; 5990 reg->smax_value = reg->s32_max_value = S32_MAX; 5991 } 5992 reg->umin_value = reg->u32_min_value = 0; 5993 reg->umax_value = U64_MAX; 5994 reg->u32_max_value = U32_MAX; 5995 reg->var_off = tnum_unknown; 5996 } 5997 5998 static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size) 5999 { 6000 s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval; 6001 u64 top_smax_value, top_smin_value; 6002 u64 num_bits = size * 8; 6003 6004 if (tnum_is_const(reg->var_off)) { 6005 u64_cval = reg->var_off.value; 6006 if (size == 1) 6007 reg->var_off = tnum_const((s8)u64_cval); 6008 else if (size == 2) 6009 reg->var_off = tnum_const((s16)u64_cval); 6010 else 6011 /* size == 4 */ 6012 reg->var_off = tnum_const((s32)u64_cval); 6013 6014 u64_cval = reg->var_off.value; 6015 reg->smax_value = reg->smin_value = u64_cval; 6016 reg->umax_value = reg->umin_value = u64_cval; 6017 reg->s32_max_value = reg->s32_min_value = u64_cval; 6018 reg->u32_max_value = reg->u32_min_value = u64_cval; 6019 return; 6020 } 6021 6022 top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits; 6023 top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits; 6024 6025 if (top_smax_value != top_smin_value) 6026 goto out; 6027 6028 /* find the s64_min and s64_min after sign extension */ 6029 if (size == 1) { 6030 init_s64_max = (s8)reg->smax_value; 6031 init_s64_min = (s8)reg->smin_value; 6032 } else if (size == 2) { 6033 init_s64_max = (s16)reg->smax_value; 6034 init_s64_min = (s16)reg->smin_value; 6035 } else { 6036 init_s64_max = (s32)reg->smax_value; 6037 init_s64_min = (s32)reg->smin_value; 6038 } 6039 6040 s64_max = max(init_s64_max, init_s64_min); 6041 s64_min = min(init_s64_max, init_s64_min); 6042 6043 /* both of s64_max/s64_min positive or negative */ 6044 if ((s64_max >= 0) == (s64_min >= 0)) { 6045 reg->smin_value = reg->s32_min_value = s64_min; 6046 reg->smax_value = reg->s32_max_value = s64_max; 6047 reg->umin_value = reg->u32_min_value = s64_min; 6048 reg->umax_value = reg->u32_max_value = s64_max; 6049 reg->var_off = tnum_range(s64_min, s64_max); 6050 return; 6051 } 6052 6053 out: 6054 set_sext64_default_val(reg, size); 6055 } 6056 6057 static void set_sext32_default_val(struct bpf_reg_state *reg, int size) 6058 { 6059 if (size == 1) { 6060 reg->s32_min_value = S8_MIN; 6061 reg->s32_max_value = S8_MAX; 6062 } else { 6063 /* size == 2 */ 6064 reg->s32_min_value = S16_MIN; 6065 reg->s32_max_value = S16_MAX; 6066 } 6067 reg->u32_min_value = 0; 6068 reg->u32_max_value = U32_MAX; 6069 } 6070 6071 static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size) 6072 { 6073 s32 init_s32_max, init_s32_min, s32_max, s32_min, u32_val; 6074 u32 top_smax_value, top_smin_value; 6075 u32 num_bits = size * 8; 6076 6077 if (tnum_is_const(reg->var_off)) { 6078 u32_val = reg->var_off.value; 6079 if (size == 1) 6080 reg->var_off = tnum_const((s8)u32_val); 6081 else 6082 reg->var_off = tnum_const((s16)u32_val); 6083 6084 u32_val = reg->var_off.value; 6085 reg->s32_min_value = reg->s32_max_value = u32_val; 6086 reg->u32_min_value = reg->u32_max_value = u32_val; 6087 return; 6088 } 6089 6090 top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits; 6091 top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits; 6092 6093 if (top_smax_value != top_smin_value) 6094 goto out; 6095 6096 /* find the s32_min and s32_min after sign extension */ 6097 if (size == 1) { 6098 init_s32_max = (s8)reg->s32_max_value; 6099 init_s32_min = (s8)reg->s32_min_value; 6100 } else { 6101 /* size == 2 */ 6102 init_s32_max = (s16)reg->s32_max_value; 6103 init_s32_min = (s16)reg->s32_min_value; 6104 } 6105 s32_max = max(init_s32_max, init_s32_min); 6106 s32_min = min(init_s32_max, init_s32_min); 6107 6108 if ((s32_min >= 0) == (s32_max >= 0)) { 6109 reg->s32_min_value = s32_min; 6110 reg->s32_max_value = s32_max; 6111 reg->u32_min_value = (u32)s32_min; 6112 reg->u32_max_value = (u32)s32_max; 6113 return; 6114 } 6115 6116 out: 6117 set_sext32_default_val(reg, size); 6118 } 6119 6120 static bool bpf_map_is_rdonly(const struct bpf_map *map) 6121 { 6122 /* A map is considered read-only if the following condition are true: 6123 * 6124 * 1) BPF program side cannot change any of the map content. The 6125 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map 6126 * and was set at map creation time. 6127 * 2) The map value(s) have been initialized from user space by a 6128 * loader and then "frozen", such that no new map update/delete 6129 * operations from syscall side are possible for the rest of 6130 * the map's lifetime from that point onwards. 6131 * 3) Any parallel/pending map update/delete operations from syscall 6132 * side have been completed. Only after that point, it's safe to 6133 * assume that map value(s) are immutable. 6134 */ 6135 return (map->map_flags & BPF_F_RDONLY_PROG) && 6136 READ_ONCE(map->frozen) && 6137 !bpf_map_write_active(map); 6138 } 6139 6140 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val, 6141 bool is_ldsx) 6142 { 6143 void *ptr; 6144 u64 addr; 6145 int err; 6146 6147 err = map->ops->map_direct_value_addr(map, &addr, off); 6148 if (err) 6149 return err; 6150 ptr = (void *)(long)addr + off; 6151 6152 switch (size) { 6153 case sizeof(u8): 6154 *val = is_ldsx ? (s64)*(s8 *)ptr : (u64)*(u8 *)ptr; 6155 break; 6156 case sizeof(u16): 6157 *val = is_ldsx ? (s64)*(s16 *)ptr : (u64)*(u16 *)ptr; 6158 break; 6159 case sizeof(u32): 6160 *val = is_ldsx ? (s64)*(s32 *)ptr : (u64)*(u32 *)ptr; 6161 break; 6162 case sizeof(u64): 6163 *val = *(u64 *)ptr; 6164 break; 6165 default: 6166 return -EINVAL; 6167 } 6168 return 0; 6169 } 6170 6171 #define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu) 6172 #define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null) 6173 #define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted) 6174 6175 /* 6176 * Allow list few fields as RCU trusted or full trusted. 6177 * This logic doesn't allow mix tagging and will be removed once GCC supports 6178 * btf_type_tag. 6179 */ 6180 6181 /* RCU trusted: these fields are trusted in RCU CS and never NULL */ 6182 BTF_TYPE_SAFE_RCU(struct task_struct) { 6183 const cpumask_t *cpus_ptr; 6184 struct css_set __rcu *cgroups; 6185 struct task_struct __rcu *real_parent; 6186 struct task_struct *group_leader; 6187 }; 6188 6189 BTF_TYPE_SAFE_RCU(struct cgroup) { 6190 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */ 6191 struct kernfs_node *kn; 6192 }; 6193 6194 BTF_TYPE_SAFE_RCU(struct css_set) { 6195 struct cgroup *dfl_cgrp; 6196 }; 6197 6198 /* RCU trusted: these fields are trusted in RCU CS and can be NULL */ 6199 BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) { 6200 struct file __rcu *exe_file; 6201 }; 6202 6203 /* skb->sk, req->sk are not RCU protected, but we mark them as such 6204 * because bpf prog accessible sockets are SOCK_RCU_FREE. 6205 */ 6206 BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) { 6207 struct sock *sk; 6208 }; 6209 6210 BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) { 6211 struct sock *sk; 6212 }; 6213 6214 /* full trusted: these fields are trusted even outside of RCU CS and never NULL */ 6215 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) { 6216 struct seq_file *seq; 6217 }; 6218 6219 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) { 6220 struct bpf_iter_meta *meta; 6221 struct task_struct *task; 6222 }; 6223 6224 BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) { 6225 struct file *file; 6226 }; 6227 6228 BTF_TYPE_SAFE_TRUSTED(struct file) { 6229 struct inode *f_inode; 6230 }; 6231 6232 BTF_TYPE_SAFE_TRUSTED(struct dentry) { 6233 /* no negative dentry-s in places where bpf can see it */ 6234 struct inode *d_inode; 6235 }; 6236 6237 BTF_TYPE_SAFE_TRUSTED(struct socket) { 6238 struct sock *sk; 6239 }; 6240 6241 static bool type_is_rcu(struct bpf_verifier_env *env, 6242 struct bpf_reg_state *reg, 6243 const char *field_name, u32 btf_id) 6244 { 6245 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct)); 6246 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup)); 6247 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set)); 6248 6249 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); 6250 } 6251 6252 static bool type_is_rcu_or_null(struct bpf_verifier_env *env, 6253 struct bpf_reg_state *reg, 6254 const char *field_name, u32 btf_id) 6255 { 6256 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct)); 6257 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff)); 6258 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock)); 6259 6260 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); 6261 } 6262 6263 static bool type_is_trusted(struct bpf_verifier_env *env, 6264 struct bpf_reg_state *reg, 6265 const char *field_name, u32 btf_id) 6266 { 6267 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta)); 6268 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task)); 6269 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm)); 6270 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file)); 6271 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry)); 6272 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket)); 6273 6274 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); 6275 } 6276 6277 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 6278 struct bpf_reg_state *regs, 6279 int regno, int off, int size, 6280 enum bpf_access_type atype, 6281 int value_regno) 6282 { 6283 struct bpf_reg_state *reg = regs + regno; 6284 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); 6285 const char *tname = btf_name_by_offset(reg->btf, t->name_off); 6286 const char *field_name = NULL; 6287 enum bpf_type_flag flag = 0; 6288 u32 btf_id = 0; 6289 int ret; 6290 6291 if (!env->allow_ptr_leaks) { 6292 verbose(env, 6293 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 6294 tname); 6295 return -EPERM; 6296 } 6297 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { 6298 verbose(env, 6299 "Cannot access kernel 'struct %s' from non-GPL compatible program\n", 6300 tname); 6301 return -EINVAL; 6302 } 6303 if (off < 0) { 6304 verbose(env, 6305 "R%d is ptr_%s invalid negative access: off=%d\n", 6306 regno, tname, off); 6307 return -EACCES; 6308 } 6309 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 6310 char tn_buf[48]; 6311 6312 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6313 verbose(env, 6314 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 6315 regno, tname, off, tn_buf); 6316 return -EACCES; 6317 } 6318 6319 if (reg->type & MEM_USER) { 6320 verbose(env, 6321 "R%d is ptr_%s access user memory: off=%d\n", 6322 regno, tname, off); 6323 return -EACCES; 6324 } 6325 6326 if (reg->type & MEM_PERCPU) { 6327 verbose(env, 6328 "R%d is ptr_%s access percpu memory: off=%d\n", 6329 regno, tname, off); 6330 return -EACCES; 6331 } 6332 6333 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { 6334 if (!btf_is_kernel(reg->btf)) { 6335 verbose(env, "verifier internal error: reg->btf must be kernel btf\n"); 6336 return -EFAULT; 6337 } 6338 ret = env->ops->btf_struct_access(&env->log, reg, off, size); 6339 } else { 6340 /* Writes are permitted with default btf_struct_access for 6341 * program allocated objects (which always have ref_obj_id > 0), 6342 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC. 6343 */ 6344 if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) { 6345 verbose(env, "only read is supported\n"); 6346 return -EACCES; 6347 } 6348 6349 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && 6350 !(reg->type & MEM_RCU) && !reg->ref_obj_id) { 6351 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); 6352 return -EFAULT; 6353 } 6354 6355 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); 6356 } 6357 6358 if (ret < 0) 6359 return ret; 6360 6361 if (ret != PTR_TO_BTF_ID) { 6362 /* just mark; */ 6363 6364 } else if (type_flag(reg->type) & PTR_UNTRUSTED) { 6365 /* If this is an untrusted pointer, all pointers formed by walking it 6366 * also inherit the untrusted flag. 6367 */ 6368 flag = PTR_UNTRUSTED; 6369 6370 } else if (is_trusted_reg(reg) || is_rcu_reg(reg)) { 6371 /* By default any pointer obtained from walking a trusted pointer is no 6372 * longer trusted, unless the field being accessed has explicitly been 6373 * marked as inheriting its parent's state of trust (either full or RCU). 6374 * For example: 6375 * 'cgroups' pointer is untrusted if task->cgroups dereference 6376 * happened in a sleepable program outside of bpf_rcu_read_lock() 6377 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). 6378 * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED. 6379 * 6380 * A regular RCU-protected pointer with __rcu tag can also be deemed 6381 * trusted if we are in an RCU CS. Such pointer can be NULL. 6382 */ 6383 if (type_is_trusted(env, reg, field_name, btf_id)) { 6384 flag |= PTR_TRUSTED; 6385 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { 6386 if (type_is_rcu(env, reg, field_name, btf_id)) { 6387 /* ignore __rcu tag and mark it MEM_RCU */ 6388 flag |= MEM_RCU; 6389 } else if (flag & MEM_RCU || 6390 type_is_rcu_or_null(env, reg, field_name, btf_id)) { 6391 /* __rcu tagged pointers can be NULL */ 6392 flag |= MEM_RCU | PTR_MAYBE_NULL; 6393 6394 /* We always trust them */ 6395 if (type_is_rcu_or_null(env, reg, field_name, btf_id) && 6396 flag & PTR_UNTRUSTED) 6397 flag &= ~PTR_UNTRUSTED; 6398 } else if (flag & (MEM_PERCPU | MEM_USER)) { 6399 /* keep as-is */ 6400 } else { 6401 /* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */ 6402 clear_trusted_flags(&flag); 6403 } 6404 } else { 6405 /* 6406 * If not in RCU CS or MEM_RCU pointer can be NULL then 6407 * aggressively mark as untrusted otherwise such 6408 * pointers will be plain PTR_TO_BTF_ID without flags 6409 * and will be allowed to be passed into helpers for 6410 * compat reasons. 6411 */ 6412 flag = PTR_UNTRUSTED; 6413 } 6414 } else { 6415 /* Old compat. Deprecated */ 6416 clear_trusted_flags(&flag); 6417 } 6418 6419 if (atype == BPF_READ && value_regno >= 0) 6420 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); 6421 6422 return 0; 6423 } 6424 6425 static int check_ptr_to_map_access(struct bpf_verifier_env *env, 6426 struct bpf_reg_state *regs, 6427 int regno, int off, int size, 6428 enum bpf_access_type atype, 6429 int value_regno) 6430 { 6431 struct bpf_reg_state *reg = regs + regno; 6432 struct bpf_map *map = reg->map_ptr; 6433 struct bpf_reg_state map_reg; 6434 enum bpf_type_flag flag = 0; 6435 const struct btf_type *t; 6436 const char *tname; 6437 u32 btf_id; 6438 int ret; 6439 6440 if (!btf_vmlinux) { 6441 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); 6442 return -ENOTSUPP; 6443 } 6444 6445 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { 6446 verbose(env, "map_ptr access not supported for map type %d\n", 6447 map->map_type); 6448 return -ENOTSUPP; 6449 } 6450 6451 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); 6452 tname = btf_name_by_offset(btf_vmlinux, t->name_off); 6453 6454 if (!env->allow_ptr_leaks) { 6455 verbose(env, 6456 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 6457 tname); 6458 return -EPERM; 6459 } 6460 6461 if (off < 0) { 6462 verbose(env, "R%d is %s invalid negative access: off=%d\n", 6463 regno, tname, off); 6464 return -EACCES; 6465 } 6466 6467 if (atype != BPF_READ) { 6468 verbose(env, "only read from %s is supported\n", tname); 6469 return -EACCES; 6470 } 6471 6472 /* Simulate access to a PTR_TO_BTF_ID */ 6473 memset(&map_reg, 0, sizeof(map_reg)); 6474 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0); 6475 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); 6476 if (ret < 0) 6477 return ret; 6478 6479 if (value_regno >= 0) 6480 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); 6481 6482 return 0; 6483 } 6484 6485 /* Check that the stack access at the given offset is within bounds. The 6486 * maximum valid offset is -1. 6487 * 6488 * The minimum valid offset is -MAX_BPF_STACK for writes, and 6489 * -state->allocated_stack for reads. 6490 */ 6491 static int check_stack_slot_within_bounds(int off, 6492 struct bpf_func_state *state, 6493 enum bpf_access_type t) 6494 { 6495 int min_valid_off; 6496 6497 if (t == BPF_WRITE) 6498 min_valid_off = -MAX_BPF_STACK; 6499 else 6500 min_valid_off = -state->allocated_stack; 6501 6502 if (off < min_valid_off || off > -1) 6503 return -EACCES; 6504 return 0; 6505 } 6506 6507 /* Check that the stack access at 'regno + off' falls within the maximum stack 6508 * bounds. 6509 * 6510 * 'off' includes `regno->offset`, but not its dynamic part (if any). 6511 */ 6512 static int check_stack_access_within_bounds( 6513 struct bpf_verifier_env *env, 6514 int regno, int off, int access_size, 6515 enum bpf_access_src src, enum bpf_access_type type) 6516 { 6517 struct bpf_reg_state *regs = cur_regs(env); 6518 struct bpf_reg_state *reg = regs + regno; 6519 struct bpf_func_state *state = func(env, reg); 6520 int min_off, max_off; 6521 int err; 6522 char *err_extra; 6523 6524 if (src == ACCESS_HELPER) 6525 /* We don't know if helpers are reading or writing (or both). */ 6526 err_extra = " indirect access to"; 6527 else if (type == BPF_READ) 6528 err_extra = " read from"; 6529 else 6530 err_extra = " write to"; 6531 6532 if (tnum_is_const(reg->var_off)) { 6533 min_off = reg->var_off.value + off; 6534 if (access_size > 0) 6535 max_off = min_off + access_size - 1; 6536 else 6537 max_off = min_off; 6538 } else { 6539 if (reg->smax_value >= BPF_MAX_VAR_OFF || 6540 reg->smin_value <= -BPF_MAX_VAR_OFF) { 6541 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", 6542 err_extra, regno); 6543 return -EACCES; 6544 } 6545 min_off = reg->smin_value + off; 6546 if (access_size > 0) 6547 max_off = reg->smax_value + off + access_size - 1; 6548 else 6549 max_off = min_off; 6550 } 6551 6552 err = check_stack_slot_within_bounds(min_off, state, type); 6553 if (!err) 6554 err = check_stack_slot_within_bounds(max_off, state, type); 6555 6556 if (err) { 6557 if (tnum_is_const(reg->var_off)) { 6558 verbose(env, "invalid%s stack R%d off=%d size=%d\n", 6559 err_extra, regno, off, access_size); 6560 } else { 6561 char tn_buf[48]; 6562 6563 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6564 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", 6565 err_extra, regno, tn_buf, access_size); 6566 } 6567 } 6568 return err; 6569 } 6570 6571 /* check whether memory at (regno + off) is accessible for t = (read | write) 6572 * if t==write, value_regno is a register which value is stored into memory 6573 * if t==read, value_regno is a register which will receive the value from memory 6574 * if t==write && value_regno==-1, some unknown value is stored into memory 6575 * if t==read && value_regno==-1, don't care what we read from memory 6576 */ 6577 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 6578 int off, int bpf_size, enum bpf_access_type t, 6579 int value_regno, bool strict_alignment_once, bool is_ldsx) 6580 { 6581 struct bpf_reg_state *regs = cur_regs(env); 6582 struct bpf_reg_state *reg = regs + regno; 6583 struct bpf_func_state *state; 6584 int size, err = 0; 6585 6586 size = bpf_size_to_bytes(bpf_size); 6587 if (size < 0) 6588 return size; 6589 6590 /* alignment checks will add in reg->off themselves */ 6591 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 6592 if (err) 6593 return err; 6594 6595 /* for access checks, reg->off is just part of off */ 6596 off += reg->off; 6597 6598 if (reg->type == PTR_TO_MAP_KEY) { 6599 if (t == BPF_WRITE) { 6600 verbose(env, "write to change key R%d not allowed\n", regno); 6601 return -EACCES; 6602 } 6603 6604 err = check_mem_region_access(env, regno, off, size, 6605 reg->map_ptr->key_size, false); 6606 if (err) 6607 return err; 6608 if (value_regno >= 0) 6609 mark_reg_unknown(env, regs, value_regno); 6610 } else if (reg->type == PTR_TO_MAP_VALUE) { 6611 struct btf_field *kptr_field = NULL; 6612 6613 if (t == BPF_WRITE && value_regno >= 0 && 6614 is_pointer_value(env, value_regno)) { 6615 verbose(env, "R%d leaks addr into map\n", value_regno); 6616 return -EACCES; 6617 } 6618 err = check_map_access_type(env, regno, off, size, t); 6619 if (err) 6620 return err; 6621 err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT); 6622 if (err) 6623 return err; 6624 if (tnum_is_const(reg->var_off)) 6625 kptr_field = btf_record_find(reg->map_ptr->record, 6626 off + reg->var_off.value, BPF_KPTR); 6627 if (kptr_field) { 6628 err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field); 6629 } else if (t == BPF_READ && value_regno >= 0) { 6630 struct bpf_map *map = reg->map_ptr; 6631 6632 /* if map is read-only, track its contents as scalars */ 6633 if (tnum_is_const(reg->var_off) && 6634 bpf_map_is_rdonly(map) && 6635 map->ops->map_direct_value_addr) { 6636 int map_off = off + reg->var_off.value; 6637 u64 val = 0; 6638 6639 err = bpf_map_direct_read(map, map_off, size, 6640 &val, is_ldsx); 6641 if (err) 6642 return err; 6643 6644 regs[value_regno].type = SCALAR_VALUE; 6645 __mark_reg_known(®s[value_regno], val); 6646 } else { 6647 mark_reg_unknown(env, regs, value_regno); 6648 } 6649 } 6650 } else if (base_type(reg->type) == PTR_TO_MEM) { 6651 bool rdonly_mem = type_is_rdonly_mem(reg->type); 6652 6653 if (type_may_be_null(reg->type)) { 6654 verbose(env, "R%d invalid mem access '%s'\n", regno, 6655 reg_type_str(env, reg->type)); 6656 return -EACCES; 6657 } 6658 6659 if (t == BPF_WRITE && rdonly_mem) { 6660 verbose(env, "R%d cannot write into %s\n", 6661 regno, reg_type_str(env, reg->type)); 6662 return -EACCES; 6663 } 6664 6665 if (t == BPF_WRITE && value_regno >= 0 && 6666 is_pointer_value(env, value_regno)) { 6667 verbose(env, "R%d leaks addr into mem\n", value_regno); 6668 return -EACCES; 6669 } 6670 6671 err = check_mem_region_access(env, regno, off, size, 6672 reg->mem_size, false); 6673 if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) 6674 mark_reg_unknown(env, regs, value_regno); 6675 } else if (reg->type == PTR_TO_CTX) { 6676 enum bpf_reg_type reg_type = SCALAR_VALUE; 6677 struct btf *btf = NULL; 6678 u32 btf_id = 0; 6679 6680 if (t == BPF_WRITE && value_regno >= 0 && 6681 is_pointer_value(env, value_regno)) { 6682 verbose(env, "R%d leaks addr into ctx\n", value_regno); 6683 return -EACCES; 6684 } 6685 6686 err = check_ptr_off_reg(env, reg, regno); 6687 if (err < 0) 6688 return err; 6689 6690 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, 6691 &btf_id); 6692 if (err) 6693 verbose_linfo(env, insn_idx, "; "); 6694 if (!err && t == BPF_READ && value_regno >= 0) { 6695 /* ctx access returns either a scalar, or a 6696 * PTR_TO_PACKET[_META,_END]. In the latter 6697 * case, we know the offset is zero. 6698 */ 6699 if (reg_type == SCALAR_VALUE) { 6700 mark_reg_unknown(env, regs, value_regno); 6701 } else { 6702 mark_reg_known_zero(env, regs, 6703 value_regno); 6704 if (type_may_be_null(reg_type)) 6705 regs[value_regno].id = ++env->id_gen; 6706 /* A load of ctx field could have different 6707 * actual load size with the one encoded in the 6708 * insn. When the dst is PTR, it is for sure not 6709 * a sub-register. 6710 */ 6711 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 6712 if (base_type(reg_type) == PTR_TO_BTF_ID) { 6713 regs[value_regno].btf = btf; 6714 regs[value_regno].btf_id = btf_id; 6715 } 6716 } 6717 regs[value_regno].type = reg_type; 6718 } 6719 6720 } else if (reg->type == PTR_TO_STACK) { 6721 /* Basic bounds checks. */ 6722 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t); 6723 if (err) 6724 return err; 6725 6726 state = func(env, reg); 6727 err = update_stack_depth(env, state, off); 6728 if (err) 6729 return err; 6730 6731 if (t == BPF_READ) 6732 err = check_stack_read(env, regno, off, size, 6733 value_regno); 6734 else 6735 err = check_stack_write(env, regno, off, size, 6736 value_regno, insn_idx); 6737 } else if (reg_is_pkt_pointer(reg)) { 6738 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 6739 verbose(env, "cannot write into packet\n"); 6740 return -EACCES; 6741 } 6742 if (t == BPF_WRITE && value_regno >= 0 && 6743 is_pointer_value(env, value_regno)) { 6744 verbose(env, "R%d leaks addr into packet\n", 6745 value_regno); 6746 return -EACCES; 6747 } 6748 err = check_packet_access(env, regno, off, size, false); 6749 if (!err && t == BPF_READ && value_regno >= 0) 6750 mark_reg_unknown(env, regs, value_regno); 6751 } else if (reg->type == PTR_TO_FLOW_KEYS) { 6752 if (t == BPF_WRITE && value_regno >= 0 && 6753 is_pointer_value(env, value_regno)) { 6754 verbose(env, "R%d leaks addr into flow keys\n", 6755 value_regno); 6756 return -EACCES; 6757 } 6758 6759 err = check_flow_keys_access(env, off, size); 6760 if (!err && t == BPF_READ && value_regno >= 0) 6761 mark_reg_unknown(env, regs, value_regno); 6762 } else if (type_is_sk_pointer(reg->type)) { 6763 if (t == BPF_WRITE) { 6764 verbose(env, "R%d cannot write into %s\n", 6765 regno, reg_type_str(env, reg->type)); 6766 return -EACCES; 6767 } 6768 err = check_sock_access(env, insn_idx, regno, off, size, t); 6769 if (!err && value_regno >= 0) 6770 mark_reg_unknown(env, regs, value_regno); 6771 } else if (reg->type == PTR_TO_TP_BUFFER) { 6772 err = check_tp_buffer_access(env, reg, regno, off, size); 6773 if (!err && t == BPF_READ && value_regno >= 0) 6774 mark_reg_unknown(env, regs, value_regno); 6775 } else if (base_type(reg->type) == PTR_TO_BTF_ID && 6776 !type_may_be_null(reg->type)) { 6777 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 6778 value_regno); 6779 } else if (reg->type == CONST_PTR_TO_MAP) { 6780 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 6781 value_regno); 6782 } else if (base_type(reg->type) == PTR_TO_BUF) { 6783 bool rdonly_mem = type_is_rdonly_mem(reg->type); 6784 u32 *max_access; 6785 6786 if (rdonly_mem) { 6787 if (t == BPF_WRITE) { 6788 verbose(env, "R%d cannot write into %s\n", 6789 regno, reg_type_str(env, reg->type)); 6790 return -EACCES; 6791 } 6792 max_access = &env->prog->aux->max_rdonly_access; 6793 } else { 6794 max_access = &env->prog->aux->max_rdwr_access; 6795 } 6796 6797 err = check_buffer_access(env, reg, regno, off, size, false, 6798 max_access); 6799 6800 if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ)) 6801 mark_reg_unknown(env, regs, value_regno); 6802 } else { 6803 verbose(env, "R%d invalid mem access '%s'\n", regno, 6804 reg_type_str(env, reg->type)); 6805 return -EACCES; 6806 } 6807 6808 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 6809 regs[value_regno].type == SCALAR_VALUE) { 6810 if (!is_ldsx) 6811 /* b/h/w load zero-extends, mark upper bits as known 0 */ 6812 coerce_reg_to_size(®s[value_regno], size); 6813 else 6814 coerce_reg_to_size_sx(®s[value_regno], size); 6815 } 6816 return err; 6817 } 6818 6819 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 6820 { 6821 int load_reg; 6822 int err; 6823 6824 switch (insn->imm) { 6825 case BPF_ADD: 6826 case BPF_ADD | BPF_FETCH: 6827 case BPF_AND: 6828 case BPF_AND | BPF_FETCH: 6829 case BPF_OR: 6830 case BPF_OR | BPF_FETCH: 6831 case BPF_XOR: 6832 case BPF_XOR | BPF_FETCH: 6833 case BPF_XCHG: 6834 case BPF_CMPXCHG: 6835 break; 6836 default: 6837 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); 6838 return -EINVAL; 6839 } 6840 6841 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { 6842 verbose(env, "invalid atomic operand size\n"); 6843 return -EINVAL; 6844 } 6845 6846 /* check src1 operand */ 6847 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6848 if (err) 6849 return err; 6850 6851 /* check src2 operand */ 6852 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 6853 if (err) 6854 return err; 6855 6856 if (insn->imm == BPF_CMPXCHG) { 6857 /* Check comparison of R0 with memory location */ 6858 const u32 aux_reg = BPF_REG_0; 6859 6860 err = check_reg_arg(env, aux_reg, SRC_OP); 6861 if (err) 6862 return err; 6863 6864 if (is_pointer_value(env, aux_reg)) { 6865 verbose(env, "R%d leaks addr into mem\n", aux_reg); 6866 return -EACCES; 6867 } 6868 } 6869 6870 if (is_pointer_value(env, insn->src_reg)) { 6871 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 6872 return -EACCES; 6873 } 6874 6875 if (is_ctx_reg(env, insn->dst_reg) || 6876 is_pkt_reg(env, insn->dst_reg) || 6877 is_flow_key_reg(env, insn->dst_reg) || 6878 is_sk_reg(env, insn->dst_reg)) { 6879 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", 6880 insn->dst_reg, 6881 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); 6882 return -EACCES; 6883 } 6884 6885 if (insn->imm & BPF_FETCH) { 6886 if (insn->imm == BPF_CMPXCHG) 6887 load_reg = BPF_REG_0; 6888 else 6889 load_reg = insn->src_reg; 6890 6891 /* check and record load of old value */ 6892 err = check_reg_arg(env, load_reg, DST_OP); 6893 if (err) 6894 return err; 6895 } else { 6896 /* This instruction accesses a memory location but doesn't 6897 * actually load it into a register. 6898 */ 6899 load_reg = -1; 6900 } 6901 6902 /* Check whether we can read the memory, with second call for fetch 6903 * case to simulate the register fill. 6904 */ 6905 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 6906 BPF_SIZE(insn->code), BPF_READ, -1, true, false); 6907 if (!err && load_reg >= 0) 6908 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 6909 BPF_SIZE(insn->code), BPF_READ, load_reg, 6910 true, false); 6911 if (err) 6912 return err; 6913 6914 /* Check whether we can write into the same memory. */ 6915 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 6916 BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); 6917 if (err) 6918 return err; 6919 6920 return 0; 6921 } 6922 6923 /* When register 'regno' is used to read the stack (either directly or through 6924 * a helper function) make sure that it's within stack boundary and, depending 6925 * on the access type, that all elements of the stack are initialized. 6926 * 6927 * 'off' includes 'regno->off', but not its dynamic part (if any). 6928 * 6929 * All registers that have been spilled on the stack in the slots within the 6930 * read offsets are marked as read. 6931 */ 6932 static int check_stack_range_initialized( 6933 struct bpf_verifier_env *env, int regno, int off, 6934 int access_size, bool zero_size_allowed, 6935 enum bpf_access_src type, struct bpf_call_arg_meta *meta) 6936 { 6937 struct bpf_reg_state *reg = reg_state(env, regno); 6938 struct bpf_func_state *state = func(env, reg); 6939 int err, min_off, max_off, i, j, slot, spi; 6940 char *err_extra = type == ACCESS_HELPER ? " indirect" : ""; 6941 enum bpf_access_type bounds_check_type; 6942 /* Some accesses can write anything into the stack, others are 6943 * read-only. 6944 */ 6945 bool clobber = false; 6946 6947 if (access_size == 0 && !zero_size_allowed) { 6948 verbose(env, "invalid zero-sized read\n"); 6949 return -EACCES; 6950 } 6951 6952 if (type == ACCESS_HELPER) { 6953 /* The bounds checks for writes are more permissive than for 6954 * reads. However, if raw_mode is not set, we'll do extra 6955 * checks below. 6956 */ 6957 bounds_check_type = BPF_WRITE; 6958 clobber = true; 6959 } else { 6960 bounds_check_type = BPF_READ; 6961 } 6962 err = check_stack_access_within_bounds(env, regno, off, access_size, 6963 type, bounds_check_type); 6964 if (err) 6965 return err; 6966 6967 6968 if (tnum_is_const(reg->var_off)) { 6969 min_off = max_off = reg->var_off.value + off; 6970 } else { 6971 /* Variable offset is prohibited for unprivileged mode for 6972 * simplicity since it requires corresponding support in 6973 * Spectre masking for stack ALU. 6974 * See also retrieve_ptr_limit(). 6975 */ 6976 if (!env->bypass_spec_v1) { 6977 char tn_buf[48]; 6978 6979 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6980 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n", 6981 regno, err_extra, tn_buf); 6982 return -EACCES; 6983 } 6984 /* Only initialized buffer on stack is allowed to be accessed 6985 * with variable offset. With uninitialized buffer it's hard to 6986 * guarantee that whole memory is marked as initialized on 6987 * helper return since specific bounds are unknown what may 6988 * cause uninitialized stack leaking. 6989 */ 6990 if (meta && meta->raw_mode) 6991 meta = NULL; 6992 6993 min_off = reg->smin_value + off; 6994 max_off = reg->smax_value + off; 6995 } 6996 6997 if (meta && meta->raw_mode) { 6998 /* Ensure we won't be overwriting dynptrs when simulating byte 6999 * by byte access in check_helper_call using meta.access_size. 7000 * This would be a problem if we have a helper in the future 7001 * which takes: 7002 * 7003 * helper(uninit_mem, len, dynptr) 7004 * 7005 * Now, uninint_mem may overlap with dynptr pointer. Hence, it 7006 * may end up writing to dynptr itself when touching memory from 7007 * arg 1. This can be relaxed on a case by case basis for known 7008 * safe cases, but reject due to the possibilitiy of aliasing by 7009 * default. 7010 */ 7011 for (i = min_off; i < max_off + access_size; i++) { 7012 int stack_off = -i - 1; 7013 7014 spi = __get_spi(i); 7015 /* raw_mode may write past allocated_stack */ 7016 if (state->allocated_stack <= stack_off) 7017 continue; 7018 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { 7019 verbose(env, "potential write to dynptr at off=%d disallowed\n", i); 7020 return -EACCES; 7021 } 7022 } 7023 meta->access_size = access_size; 7024 meta->regno = regno; 7025 return 0; 7026 } 7027 7028 for (i = min_off; i < max_off + access_size; i++) { 7029 u8 *stype; 7030 7031 slot = -i - 1; 7032 spi = slot / BPF_REG_SIZE; 7033 if (state->allocated_stack <= slot) 7034 goto err; 7035 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 7036 if (*stype == STACK_MISC) 7037 goto mark; 7038 if ((*stype == STACK_ZERO) || 7039 (*stype == STACK_INVALID && env->allow_uninit_stack)) { 7040 if (clobber) { 7041 /* helper can write anything into the stack */ 7042 *stype = STACK_MISC; 7043 } 7044 goto mark; 7045 } 7046 7047 if (is_spilled_reg(&state->stack[spi]) && 7048 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || 7049 env->allow_ptr_leaks)) { 7050 if (clobber) { 7051 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 7052 for (j = 0; j < BPF_REG_SIZE; j++) 7053 scrub_spilled_slot(&state->stack[spi].slot_type[j]); 7054 } 7055 goto mark; 7056 } 7057 7058 err: 7059 if (tnum_is_const(reg->var_off)) { 7060 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n", 7061 err_extra, regno, min_off, i - min_off, access_size); 7062 } else { 7063 char tn_buf[48]; 7064 7065 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 7066 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n", 7067 err_extra, regno, tn_buf, i - min_off, access_size); 7068 } 7069 return -EACCES; 7070 mark: 7071 /* reading any byte out of 8-byte 'spill_slot' will cause 7072 * the whole slot to be marked as 'read' 7073 */ 7074 mark_reg_read(env, &state->stack[spi].spilled_ptr, 7075 state->stack[spi].spilled_ptr.parent, 7076 REG_LIVE_READ64); 7077 /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not 7078 * be sure that whether stack slot is written to or not. Hence, 7079 * we must still conservatively propagate reads upwards even if 7080 * helper may write to the entire memory range. 7081 */ 7082 } 7083 return update_stack_depth(env, state, min_off); 7084 } 7085 7086 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 7087 int access_size, bool zero_size_allowed, 7088 struct bpf_call_arg_meta *meta) 7089 { 7090 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7091 u32 *max_access; 7092 7093 switch (base_type(reg->type)) { 7094 case PTR_TO_PACKET: 7095 case PTR_TO_PACKET_META: 7096 return check_packet_access(env, regno, reg->off, access_size, 7097 zero_size_allowed); 7098 case PTR_TO_MAP_KEY: 7099 if (meta && meta->raw_mode) { 7100 verbose(env, "R%d cannot write into %s\n", regno, 7101 reg_type_str(env, reg->type)); 7102 return -EACCES; 7103 } 7104 return check_mem_region_access(env, regno, reg->off, access_size, 7105 reg->map_ptr->key_size, false); 7106 case PTR_TO_MAP_VALUE: 7107 if (check_map_access_type(env, regno, reg->off, access_size, 7108 meta && meta->raw_mode ? BPF_WRITE : 7109 BPF_READ)) 7110 return -EACCES; 7111 return check_map_access(env, regno, reg->off, access_size, 7112 zero_size_allowed, ACCESS_HELPER); 7113 case PTR_TO_MEM: 7114 if (type_is_rdonly_mem(reg->type)) { 7115 if (meta && meta->raw_mode) { 7116 verbose(env, "R%d cannot write into %s\n", regno, 7117 reg_type_str(env, reg->type)); 7118 return -EACCES; 7119 } 7120 } 7121 return check_mem_region_access(env, regno, reg->off, 7122 access_size, reg->mem_size, 7123 zero_size_allowed); 7124 case PTR_TO_BUF: 7125 if (type_is_rdonly_mem(reg->type)) { 7126 if (meta && meta->raw_mode) { 7127 verbose(env, "R%d cannot write into %s\n", regno, 7128 reg_type_str(env, reg->type)); 7129 return -EACCES; 7130 } 7131 7132 max_access = &env->prog->aux->max_rdonly_access; 7133 } else { 7134 max_access = &env->prog->aux->max_rdwr_access; 7135 } 7136 return check_buffer_access(env, reg, regno, reg->off, 7137 access_size, zero_size_allowed, 7138 max_access); 7139 case PTR_TO_STACK: 7140 return check_stack_range_initialized( 7141 env, 7142 regno, reg->off, access_size, 7143 zero_size_allowed, ACCESS_HELPER, meta); 7144 case PTR_TO_BTF_ID: 7145 return check_ptr_to_btf_access(env, regs, regno, reg->off, 7146 access_size, BPF_READ, -1); 7147 case PTR_TO_CTX: 7148 /* in case the function doesn't know how to access the context, 7149 * (because we are in a program of type SYSCALL for example), we 7150 * can not statically check its size. 7151 * Dynamically check it now. 7152 */ 7153 if (!env->ops->convert_ctx_access) { 7154 enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ; 7155 int offset = access_size - 1; 7156 7157 /* Allow zero-byte read from PTR_TO_CTX */ 7158 if (access_size == 0) 7159 return zero_size_allowed ? 0 : -EACCES; 7160 7161 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, 7162 atype, -1, false, false); 7163 } 7164 7165 fallthrough; 7166 default: /* scalar_value or invalid ptr */ 7167 /* Allow zero-byte read from NULL, regardless of pointer type */ 7168 if (zero_size_allowed && access_size == 0 && 7169 register_is_null(reg)) 7170 return 0; 7171 7172 verbose(env, "R%d type=%s ", regno, 7173 reg_type_str(env, reg->type)); 7174 verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK)); 7175 return -EACCES; 7176 } 7177 } 7178 7179 static int check_mem_size_reg(struct bpf_verifier_env *env, 7180 struct bpf_reg_state *reg, u32 regno, 7181 bool zero_size_allowed, 7182 struct bpf_call_arg_meta *meta) 7183 { 7184 int err; 7185 7186 /* This is used to refine r0 return value bounds for helpers 7187 * that enforce this value as an upper bound on return values. 7188 * See do_refine_retval_range() for helpers that can refine 7189 * the return value. C type of helper is u32 so we pull register 7190 * bound from umax_value however, if negative verifier errors 7191 * out. Only upper bounds can be learned because retval is an 7192 * int type and negative retvals are allowed. 7193 */ 7194 meta->msize_max_value = reg->umax_value; 7195 7196 /* The register is SCALAR_VALUE; the access check 7197 * happens using its boundaries. 7198 */ 7199 if (!tnum_is_const(reg->var_off)) 7200 /* For unprivileged variable accesses, disable raw 7201 * mode so that the program is required to 7202 * initialize all the memory that the helper could 7203 * just partially fill up. 7204 */ 7205 meta = NULL; 7206 7207 if (reg->smin_value < 0) { 7208 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 7209 regno); 7210 return -EACCES; 7211 } 7212 7213 if (reg->umin_value == 0) { 7214 err = check_helper_mem_access(env, regno - 1, 0, 7215 zero_size_allowed, 7216 meta); 7217 if (err) 7218 return err; 7219 } 7220 7221 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 7222 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 7223 regno); 7224 return -EACCES; 7225 } 7226 err = check_helper_mem_access(env, regno - 1, 7227 reg->umax_value, 7228 zero_size_allowed, meta); 7229 if (!err) 7230 err = mark_chain_precision(env, regno); 7231 return err; 7232 } 7233 7234 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 7235 u32 regno, u32 mem_size) 7236 { 7237 bool may_be_null = type_may_be_null(reg->type); 7238 struct bpf_reg_state saved_reg; 7239 struct bpf_call_arg_meta meta; 7240 int err; 7241 7242 if (register_is_null(reg)) 7243 return 0; 7244 7245 memset(&meta, 0, sizeof(meta)); 7246 /* Assuming that the register contains a value check if the memory 7247 * access is safe. Temporarily save and restore the register's state as 7248 * the conversion shouldn't be visible to a caller. 7249 */ 7250 if (may_be_null) { 7251 saved_reg = *reg; 7252 mark_ptr_not_null_reg(reg); 7253 } 7254 7255 err = check_helper_mem_access(env, regno, mem_size, true, &meta); 7256 /* Check access for BPF_WRITE */ 7257 meta.raw_mode = true; 7258 err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta); 7259 7260 if (may_be_null) 7261 *reg = saved_reg; 7262 7263 return err; 7264 } 7265 7266 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 7267 u32 regno) 7268 { 7269 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; 7270 bool may_be_null = type_may_be_null(mem_reg->type); 7271 struct bpf_reg_state saved_reg; 7272 struct bpf_call_arg_meta meta; 7273 int err; 7274 7275 WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); 7276 7277 memset(&meta, 0, sizeof(meta)); 7278 7279 if (may_be_null) { 7280 saved_reg = *mem_reg; 7281 mark_ptr_not_null_reg(mem_reg); 7282 } 7283 7284 err = check_mem_size_reg(env, reg, regno, true, &meta); 7285 /* Check access for BPF_WRITE */ 7286 meta.raw_mode = true; 7287 err = err ?: check_mem_size_reg(env, reg, regno, true, &meta); 7288 7289 if (may_be_null) 7290 *mem_reg = saved_reg; 7291 return err; 7292 } 7293 7294 /* Implementation details: 7295 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL. 7296 * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL. 7297 * Two bpf_map_lookups (even with the same key) will have different reg->id. 7298 * Two separate bpf_obj_new will also have different reg->id. 7299 * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier 7300 * clears reg->id after value_or_null->value transition, since the verifier only 7301 * cares about the range of access to valid map value pointer and doesn't care 7302 * about actual address of the map element. 7303 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 7304 * reg->id > 0 after value_or_null->value transition. By doing so 7305 * two bpf_map_lookups will be considered two different pointers that 7306 * point to different bpf_spin_locks. Likewise for pointers to allocated objects 7307 * returned from bpf_obj_new. 7308 * The verifier allows taking only one bpf_spin_lock at a time to avoid 7309 * dead-locks. 7310 * Since only one bpf_spin_lock is allowed the checks are simpler than 7311 * reg_is_refcounted() logic. The verifier needs to remember only 7312 * one spin_lock instead of array of acquired_refs. 7313 * cur_state->active_lock remembers which map value element or allocated 7314 * object got locked and clears it after bpf_spin_unlock. 7315 */ 7316 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 7317 bool is_lock) 7318 { 7319 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7320 struct bpf_verifier_state *cur = env->cur_state; 7321 bool is_const = tnum_is_const(reg->var_off); 7322 u64 val = reg->var_off.value; 7323 struct bpf_map *map = NULL; 7324 struct btf *btf = NULL; 7325 struct btf_record *rec; 7326 7327 if (!is_const) { 7328 verbose(env, 7329 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 7330 regno); 7331 return -EINVAL; 7332 } 7333 if (reg->type == PTR_TO_MAP_VALUE) { 7334 map = reg->map_ptr; 7335 if (!map->btf) { 7336 verbose(env, 7337 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 7338 map->name); 7339 return -EINVAL; 7340 } 7341 } else { 7342 btf = reg->btf; 7343 } 7344 7345 rec = reg_btf_record(reg); 7346 if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) { 7347 verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local", 7348 map ? map->name : "kptr"); 7349 return -EINVAL; 7350 } 7351 if (rec->spin_lock_off != val + reg->off) { 7352 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n", 7353 val + reg->off, rec->spin_lock_off); 7354 return -EINVAL; 7355 } 7356 if (is_lock) { 7357 if (cur->active_lock.ptr) { 7358 verbose(env, 7359 "Locking two bpf_spin_locks are not allowed\n"); 7360 return -EINVAL; 7361 } 7362 if (map) 7363 cur->active_lock.ptr = map; 7364 else 7365 cur->active_lock.ptr = btf; 7366 cur->active_lock.id = reg->id; 7367 } else { 7368 void *ptr; 7369 7370 if (map) 7371 ptr = map; 7372 else 7373 ptr = btf; 7374 7375 if (!cur->active_lock.ptr) { 7376 verbose(env, "bpf_spin_unlock without taking a lock\n"); 7377 return -EINVAL; 7378 } 7379 if (cur->active_lock.ptr != ptr || 7380 cur->active_lock.id != reg->id) { 7381 verbose(env, "bpf_spin_unlock of different lock\n"); 7382 return -EINVAL; 7383 } 7384 7385 invalidate_non_owning_refs(env); 7386 7387 cur->active_lock.ptr = NULL; 7388 cur->active_lock.id = 0; 7389 } 7390 return 0; 7391 } 7392 7393 static int process_timer_func(struct bpf_verifier_env *env, int regno, 7394 struct bpf_call_arg_meta *meta) 7395 { 7396 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7397 bool is_const = tnum_is_const(reg->var_off); 7398 struct bpf_map *map = reg->map_ptr; 7399 u64 val = reg->var_off.value; 7400 7401 if (!is_const) { 7402 verbose(env, 7403 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", 7404 regno); 7405 return -EINVAL; 7406 } 7407 if (!map->btf) { 7408 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", 7409 map->name); 7410 return -EINVAL; 7411 } 7412 if (!btf_record_has_field(map->record, BPF_TIMER)) { 7413 verbose(env, "map '%s' has no valid bpf_timer\n", map->name); 7414 return -EINVAL; 7415 } 7416 if (map->record->timer_off != val + reg->off) { 7417 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", 7418 val + reg->off, map->record->timer_off); 7419 return -EINVAL; 7420 } 7421 if (meta->map_ptr) { 7422 verbose(env, "verifier bug. Two map pointers in a timer helper\n"); 7423 return -EFAULT; 7424 } 7425 meta->map_uid = reg->map_uid; 7426 meta->map_ptr = map; 7427 return 0; 7428 } 7429 7430 static int process_kptr_func(struct bpf_verifier_env *env, int regno, 7431 struct bpf_call_arg_meta *meta) 7432 { 7433 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7434 struct bpf_map *map_ptr = reg->map_ptr; 7435 struct btf_field *kptr_field; 7436 u32 kptr_off; 7437 7438 if (!tnum_is_const(reg->var_off)) { 7439 verbose(env, 7440 "R%d doesn't have constant offset. kptr has to be at the constant offset\n", 7441 regno); 7442 return -EINVAL; 7443 } 7444 if (!map_ptr->btf) { 7445 verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", 7446 map_ptr->name); 7447 return -EINVAL; 7448 } 7449 if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) { 7450 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name); 7451 return -EINVAL; 7452 } 7453 7454 meta->map_ptr = map_ptr; 7455 kptr_off = reg->off + reg->var_off.value; 7456 kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR); 7457 if (!kptr_field) { 7458 verbose(env, "off=%d doesn't point to kptr\n", kptr_off); 7459 return -EACCES; 7460 } 7461 if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) { 7462 verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off); 7463 return -EACCES; 7464 } 7465 meta->kptr_field = kptr_field; 7466 return 0; 7467 } 7468 7469 /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK 7470 * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR. 7471 * 7472 * In both cases we deal with the first 8 bytes, but need to mark the next 8 7473 * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of 7474 * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object. 7475 * 7476 * Mutability of bpf_dynptr is at two levels, one is at the level of struct 7477 * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct 7478 * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can 7479 * mutate the view of the dynptr and also possibly destroy it. In the latter 7480 * case, it cannot mutate the bpf_dynptr itself but it can still mutate the 7481 * memory that dynptr points to. 7482 * 7483 * The verifier will keep track both levels of mutation (bpf_dynptr's in 7484 * reg->type and the memory's in reg->dynptr.type), but there is no support for 7485 * readonly dynptr view yet, hence only the first case is tracked and checked. 7486 * 7487 * This is consistent with how C applies the const modifier to a struct object, 7488 * where the pointer itself inside bpf_dynptr becomes const but not what it 7489 * points to. 7490 * 7491 * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument 7492 * type, and declare it as 'const struct bpf_dynptr *' in their prototype. 7493 */ 7494 static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx, 7495 enum bpf_arg_type arg_type, int clone_ref_obj_id) 7496 { 7497 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7498 int err; 7499 7500 /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an 7501 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): 7502 */ 7503 if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) { 7504 verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n"); 7505 return -EFAULT; 7506 } 7507 7508 /* MEM_UNINIT - Points to memory that is an appropriate candidate for 7509 * constructing a mutable bpf_dynptr object. 7510 * 7511 * Currently, this is only possible with PTR_TO_STACK 7512 * pointing to a region of at least 16 bytes which doesn't 7513 * contain an existing bpf_dynptr. 7514 * 7515 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be 7516 * mutated or destroyed. However, the memory it points to 7517 * may be mutated. 7518 * 7519 * None - Points to a initialized dynptr that can be mutated and 7520 * destroyed, including mutation of the memory it points 7521 * to. 7522 */ 7523 if (arg_type & MEM_UNINIT) { 7524 int i; 7525 7526 if (!is_dynptr_reg_valid_uninit(env, reg)) { 7527 verbose(env, "Dynptr has to be an uninitialized dynptr\n"); 7528 return -EINVAL; 7529 } 7530 7531 /* we write BPF_DW bits (8 bytes) at a time */ 7532 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) { 7533 err = check_mem_access(env, insn_idx, regno, 7534 i, BPF_DW, BPF_WRITE, -1, false, false); 7535 if (err) 7536 return err; 7537 } 7538 7539 err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id); 7540 } else /* MEM_RDONLY and None case from above */ { 7541 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ 7542 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { 7543 verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n"); 7544 return -EINVAL; 7545 } 7546 7547 if (!is_dynptr_reg_valid_init(env, reg)) { 7548 verbose(env, 7549 "Expected an initialized dynptr as arg #%d\n", 7550 regno); 7551 return -EINVAL; 7552 } 7553 7554 /* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */ 7555 if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) { 7556 verbose(env, 7557 "Expected a dynptr of type %s as arg #%d\n", 7558 dynptr_type_str(arg_to_dynptr_type(arg_type)), regno); 7559 return -EINVAL; 7560 } 7561 7562 err = mark_dynptr_read(env, reg); 7563 } 7564 return err; 7565 } 7566 7567 static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi) 7568 { 7569 struct bpf_func_state *state = func(env, reg); 7570 7571 return state->stack[spi].spilled_ptr.ref_obj_id; 7572 } 7573 7574 static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7575 { 7576 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); 7577 } 7578 7579 static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7580 { 7581 return meta->kfunc_flags & KF_ITER_NEW; 7582 } 7583 7584 static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7585 { 7586 return meta->kfunc_flags & KF_ITER_NEXT; 7587 } 7588 7589 static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7590 { 7591 return meta->kfunc_flags & KF_ITER_DESTROY; 7592 } 7593 7594 static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg) 7595 { 7596 /* btf_check_iter_kfuncs() guarantees that first argument of any iter 7597 * kfunc is iter state pointer 7598 */ 7599 return arg == 0 && is_iter_kfunc(meta); 7600 } 7601 7602 static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx, 7603 struct bpf_kfunc_call_arg_meta *meta) 7604 { 7605 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7606 const struct btf_type *t; 7607 const struct btf_param *arg; 7608 int spi, err, i, nr_slots; 7609 u32 btf_id; 7610 7611 /* btf_check_iter_kfuncs() ensures we don't need to validate anything here */ 7612 arg = &btf_params(meta->func_proto)[0]; 7613 t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */ 7614 t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */ 7615 nr_slots = t->size / BPF_REG_SIZE; 7616 7617 if (is_iter_new_kfunc(meta)) { 7618 /* bpf_iter_<type>_new() expects pointer to uninit iter state */ 7619 if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) { 7620 verbose(env, "expected uninitialized iter_%s as arg #%d\n", 7621 iter_type_str(meta->btf, btf_id), regno); 7622 return -EINVAL; 7623 } 7624 7625 for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) { 7626 err = check_mem_access(env, insn_idx, regno, 7627 i, BPF_DW, BPF_WRITE, -1, false, false); 7628 if (err) 7629 return err; 7630 } 7631 7632 err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots); 7633 if (err) 7634 return err; 7635 } else { 7636 /* iter_next() or iter_destroy() expect initialized iter state*/ 7637 err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); 7638 switch (err) { 7639 case 0: 7640 break; 7641 case -EINVAL: 7642 verbose(env, "expected an initialized iter_%s as arg #%d\n", 7643 iter_type_str(meta->btf, btf_id), regno); 7644 return err; 7645 case -EPROTO: 7646 verbose(env, "expected an RCU CS when using %s\n", meta->func_name); 7647 return err; 7648 default: 7649 return err; 7650 } 7651 7652 spi = iter_get_spi(env, reg, nr_slots); 7653 if (spi < 0) 7654 return spi; 7655 7656 err = mark_iter_read(env, reg, spi, nr_slots); 7657 if (err) 7658 return err; 7659 7660 /* remember meta->iter info for process_iter_next_call() */ 7661 meta->iter.spi = spi; 7662 meta->iter.frameno = reg->frameno; 7663 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); 7664 7665 if (is_iter_destroy_kfunc(meta)) { 7666 err = unmark_stack_slots_iter(env, reg, nr_slots); 7667 if (err) 7668 return err; 7669 } 7670 } 7671 7672 return 0; 7673 } 7674 7675 /* Look for a previous loop entry at insn_idx: nearest parent state 7676 * stopped at insn_idx with callsites matching those in cur->frame. 7677 */ 7678 static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env, 7679 struct bpf_verifier_state *cur, 7680 int insn_idx) 7681 { 7682 struct bpf_verifier_state_list *sl; 7683 struct bpf_verifier_state *st; 7684 7685 /* Explored states are pushed in stack order, most recent states come first */ 7686 sl = *explored_state(env, insn_idx); 7687 for (; sl; sl = sl->next) { 7688 /* If st->branches != 0 state is a part of current DFS verification path, 7689 * hence cur & st for a loop. 7690 */ 7691 st = &sl->state; 7692 if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) && 7693 st->dfs_depth < cur->dfs_depth) 7694 return st; 7695 } 7696 7697 return NULL; 7698 } 7699 7700 static void reset_idmap_scratch(struct bpf_verifier_env *env); 7701 static bool regs_exact(const struct bpf_reg_state *rold, 7702 const struct bpf_reg_state *rcur, 7703 struct bpf_idmap *idmap); 7704 7705 static void maybe_widen_reg(struct bpf_verifier_env *env, 7706 struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 7707 struct bpf_idmap *idmap) 7708 { 7709 if (rold->type != SCALAR_VALUE) 7710 return; 7711 if (rold->type != rcur->type) 7712 return; 7713 if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap)) 7714 return; 7715 __mark_reg_unknown(env, rcur); 7716 } 7717 7718 static int widen_imprecise_scalars(struct bpf_verifier_env *env, 7719 struct bpf_verifier_state *old, 7720 struct bpf_verifier_state *cur) 7721 { 7722 struct bpf_func_state *fold, *fcur; 7723 int i, fr; 7724 7725 reset_idmap_scratch(env); 7726 for (fr = old->curframe; fr >= 0; fr--) { 7727 fold = old->frame[fr]; 7728 fcur = cur->frame[fr]; 7729 7730 for (i = 0; i < MAX_BPF_REG; i++) 7731 maybe_widen_reg(env, 7732 &fold->regs[i], 7733 &fcur->regs[i], 7734 &env->idmap_scratch); 7735 7736 for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { 7737 if (!is_spilled_reg(&fold->stack[i]) || 7738 !is_spilled_reg(&fcur->stack[i])) 7739 continue; 7740 7741 maybe_widen_reg(env, 7742 &fold->stack[i].spilled_ptr, 7743 &fcur->stack[i].spilled_ptr, 7744 &env->idmap_scratch); 7745 } 7746 } 7747 return 0; 7748 } 7749 7750 /* process_iter_next_call() is called when verifier gets to iterator's next 7751 * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer 7752 * to it as just "iter_next()" in comments below. 7753 * 7754 * BPF verifier relies on a crucial contract for any iter_next() 7755 * implementation: it should *eventually* return NULL, and once that happens 7756 * it should keep returning NULL. That is, once iterator exhausts elements to 7757 * iterate, it should never reset or spuriously return new elements. 7758 * 7759 * With the assumption of such contract, process_iter_next_call() simulates 7760 * a fork in the verifier state to validate loop logic correctness and safety 7761 * without having to simulate infinite amount of iterations. 7762 * 7763 * In current state, we first assume that iter_next() returned NULL and 7764 * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such 7765 * conditions we should not form an infinite loop and should eventually reach 7766 * exit. 7767 * 7768 * Besides that, we also fork current state and enqueue it for later 7769 * verification. In a forked state we keep iterator state as ACTIVE 7770 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We 7771 * also bump iteration depth to prevent erroneous infinite loop detection 7772 * later on (see iter_active_depths_differ() comment for details). In this 7773 * state we assume that we'll eventually loop back to another iter_next() 7774 * calls (it could be in exactly same location or in some other instruction, 7775 * it doesn't matter, we don't make any unnecessary assumptions about this, 7776 * everything revolves around iterator state in a stack slot, not which 7777 * instruction is calling iter_next()). When that happens, we either will come 7778 * to iter_next() with equivalent state and can conclude that next iteration 7779 * will proceed in exactly the same way as we just verified, so it's safe to 7780 * assume that loop converges. If not, we'll go on another iteration 7781 * simulation with a different input state, until all possible starting states 7782 * are validated or we reach maximum number of instructions limit. 7783 * 7784 * This way, we will either exhaustively discover all possible input states 7785 * that iterator loop can start with and eventually will converge, or we'll 7786 * effectively regress into bounded loop simulation logic and either reach 7787 * maximum number of instructions if loop is not provably convergent, or there 7788 * is some statically known limit on number of iterations (e.g., if there is 7789 * an explicit `if n > 100 then break;` statement somewhere in the loop). 7790 * 7791 * Iteration convergence logic in is_state_visited() relies on exact 7792 * states comparison, which ignores read and precision marks. 7793 * This is necessary because read and precision marks are not finalized 7794 * while in the loop. Exact comparison might preclude convergence for 7795 * simple programs like below: 7796 * 7797 * i = 0; 7798 * while(iter_next(&it)) 7799 * i++; 7800 * 7801 * At each iteration step i++ would produce a new distinct state and 7802 * eventually instruction processing limit would be reached. 7803 * 7804 * To avoid such behavior speculatively forget (widen) range for 7805 * imprecise scalar registers, if those registers were not precise at the 7806 * end of the previous iteration and do not match exactly. 7807 * 7808 * This is a conservative heuristic that allows to verify wide range of programs, 7809 * however it precludes verification of programs that conjure an 7810 * imprecise value on the first loop iteration and use it as precise on a second. 7811 * For example, the following safe program would fail to verify: 7812 * 7813 * struct bpf_num_iter it; 7814 * int arr[10]; 7815 * int i = 0, a = 0; 7816 * bpf_iter_num_new(&it, 0, 10); 7817 * while (bpf_iter_num_next(&it)) { 7818 * if (a == 0) { 7819 * a = 1; 7820 * i = 7; // Because i changed verifier would forget 7821 * // it's range on second loop entry. 7822 * } else { 7823 * arr[i] = 42; // This would fail to verify. 7824 * } 7825 * } 7826 * bpf_iter_num_destroy(&it); 7827 */ 7828 static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, 7829 struct bpf_kfunc_call_arg_meta *meta) 7830 { 7831 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; 7832 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; 7833 struct bpf_reg_state *cur_iter, *queued_iter; 7834 int iter_frameno = meta->iter.frameno; 7835 int iter_spi = meta->iter.spi; 7836 7837 BTF_TYPE_EMIT(struct bpf_iter); 7838 7839 cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr; 7840 7841 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && 7842 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { 7843 verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n", 7844 cur_iter->iter.state, iter_state_str(cur_iter->iter.state)); 7845 return -EFAULT; 7846 } 7847 7848 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { 7849 /* Because iter_next() call is a checkpoint is_state_visitied() 7850 * should guarantee parent state with same call sites and insn_idx. 7851 */ 7852 if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx || 7853 !same_callsites(cur_st->parent, cur_st)) { 7854 verbose(env, "bug: bad parent state for iter next call"); 7855 return -EFAULT; 7856 } 7857 /* Note cur_st->parent in the call below, it is necessary to skip 7858 * checkpoint created for cur_st by is_state_visited() 7859 * right at this instruction. 7860 */ 7861 prev_st = find_prev_entry(env, cur_st->parent, insn_idx); 7862 /* branch out active iter state */ 7863 queued_st = push_stack(env, insn_idx + 1, insn_idx, false); 7864 if (!queued_st) 7865 return -ENOMEM; 7866 7867 queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; 7868 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; 7869 queued_iter->iter.depth++; 7870 if (prev_st) 7871 widen_imprecise_scalars(env, prev_st, queued_st); 7872 7873 queued_fr = queued_st->frame[queued_st->curframe]; 7874 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); 7875 } 7876 7877 /* switch to DRAINED state, but keep the depth unchanged */ 7878 /* mark current iter state as drained and assume returned NULL */ 7879 cur_iter->iter.state = BPF_ITER_STATE_DRAINED; 7880 __mark_reg_const_zero(&cur_fr->regs[BPF_REG_0]); 7881 7882 return 0; 7883 } 7884 7885 static bool arg_type_is_mem_size(enum bpf_arg_type type) 7886 { 7887 return type == ARG_CONST_SIZE || 7888 type == ARG_CONST_SIZE_OR_ZERO; 7889 } 7890 7891 static bool arg_type_is_release(enum bpf_arg_type type) 7892 { 7893 return type & OBJ_RELEASE; 7894 } 7895 7896 static bool arg_type_is_dynptr(enum bpf_arg_type type) 7897 { 7898 return base_type(type) == ARG_PTR_TO_DYNPTR; 7899 } 7900 7901 static int int_ptr_type_to_size(enum bpf_arg_type type) 7902 { 7903 if (type == ARG_PTR_TO_INT) 7904 return sizeof(u32); 7905 else if (type == ARG_PTR_TO_LONG) 7906 return sizeof(u64); 7907 7908 return -EINVAL; 7909 } 7910 7911 static int resolve_map_arg_type(struct bpf_verifier_env *env, 7912 const struct bpf_call_arg_meta *meta, 7913 enum bpf_arg_type *arg_type) 7914 { 7915 if (!meta->map_ptr) { 7916 /* kernel subsystem misconfigured verifier */ 7917 verbose(env, "invalid map_ptr to access map->type\n"); 7918 return -EACCES; 7919 } 7920 7921 switch (meta->map_ptr->map_type) { 7922 case BPF_MAP_TYPE_SOCKMAP: 7923 case BPF_MAP_TYPE_SOCKHASH: 7924 if (*arg_type == ARG_PTR_TO_MAP_VALUE) { 7925 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON; 7926 } else { 7927 verbose(env, "invalid arg_type for sockmap/sockhash\n"); 7928 return -EINVAL; 7929 } 7930 break; 7931 case BPF_MAP_TYPE_BLOOM_FILTER: 7932 if (meta->func_id == BPF_FUNC_map_peek_elem) 7933 *arg_type = ARG_PTR_TO_MAP_VALUE; 7934 break; 7935 default: 7936 break; 7937 } 7938 return 0; 7939 } 7940 7941 struct bpf_reg_types { 7942 const enum bpf_reg_type types[10]; 7943 u32 *btf_id; 7944 }; 7945 7946 static const struct bpf_reg_types sock_types = { 7947 .types = { 7948 PTR_TO_SOCK_COMMON, 7949 PTR_TO_SOCKET, 7950 PTR_TO_TCP_SOCK, 7951 PTR_TO_XDP_SOCK, 7952 }, 7953 }; 7954 7955 #ifdef CONFIG_NET 7956 static const struct bpf_reg_types btf_id_sock_common_types = { 7957 .types = { 7958 PTR_TO_SOCK_COMMON, 7959 PTR_TO_SOCKET, 7960 PTR_TO_TCP_SOCK, 7961 PTR_TO_XDP_SOCK, 7962 PTR_TO_BTF_ID, 7963 PTR_TO_BTF_ID | PTR_TRUSTED, 7964 }, 7965 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 7966 }; 7967 #endif 7968 7969 static const struct bpf_reg_types mem_types = { 7970 .types = { 7971 PTR_TO_STACK, 7972 PTR_TO_PACKET, 7973 PTR_TO_PACKET_META, 7974 PTR_TO_MAP_KEY, 7975 PTR_TO_MAP_VALUE, 7976 PTR_TO_MEM, 7977 PTR_TO_MEM | MEM_RINGBUF, 7978 PTR_TO_BUF, 7979 PTR_TO_BTF_ID | PTR_TRUSTED, 7980 }, 7981 }; 7982 7983 static const struct bpf_reg_types int_ptr_types = { 7984 .types = { 7985 PTR_TO_STACK, 7986 PTR_TO_PACKET, 7987 PTR_TO_PACKET_META, 7988 PTR_TO_MAP_KEY, 7989 PTR_TO_MAP_VALUE, 7990 }, 7991 }; 7992 7993 static const struct bpf_reg_types spin_lock_types = { 7994 .types = { 7995 PTR_TO_MAP_VALUE, 7996 PTR_TO_BTF_ID | MEM_ALLOC, 7997 } 7998 }; 7999 8000 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; 8001 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; 8002 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; 8003 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } }; 8004 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; 8005 static const struct bpf_reg_types btf_ptr_types = { 8006 .types = { 8007 PTR_TO_BTF_ID, 8008 PTR_TO_BTF_ID | PTR_TRUSTED, 8009 PTR_TO_BTF_ID | MEM_RCU, 8010 }, 8011 }; 8012 static const struct bpf_reg_types percpu_btf_ptr_types = { 8013 .types = { 8014 PTR_TO_BTF_ID | MEM_PERCPU, 8015 PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU, 8016 PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED, 8017 } 8018 }; 8019 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; 8020 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; 8021 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; 8022 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; 8023 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } }; 8024 static const struct bpf_reg_types dynptr_types = { 8025 .types = { 8026 PTR_TO_STACK, 8027 CONST_PTR_TO_DYNPTR, 8028 } 8029 }; 8030 8031 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { 8032 [ARG_PTR_TO_MAP_KEY] = &mem_types, 8033 [ARG_PTR_TO_MAP_VALUE] = &mem_types, 8034 [ARG_CONST_SIZE] = &scalar_types, 8035 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, 8036 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, 8037 [ARG_CONST_MAP_PTR] = &const_map_ptr_types, 8038 [ARG_PTR_TO_CTX] = &context_types, 8039 [ARG_PTR_TO_SOCK_COMMON] = &sock_types, 8040 #ifdef CONFIG_NET 8041 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, 8042 #endif 8043 [ARG_PTR_TO_SOCKET] = &fullsock_types, 8044 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, 8045 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, 8046 [ARG_PTR_TO_MEM] = &mem_types, 8047 [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types, 8048 [ARG_PTR_TO_INT] = &int_ptr_types, 8049 [ARG_PTR_TO_LONG] = &int_ptr_types, 8050 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, 8051 [ARG_PTR_TO_FUNC] = &func_ptr_types, 8052 [ARG_PTR_TO_STACK] = &stack_ptr_types, 8053 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, 8054 [ARG_PTR_TO_TIMER] = &timer_types, 8055 [ARG_PTR_TO_KPTR] = &kptr_types, 8056 [ARG_PTR_TO_DYNPTR] = &dynptr_types, 8057 }; 8058 8059 static int check_reg_type(struct bpf_verifier_env *env, u32 regno, 8060 enum bpf_arg_type arg_type, 8061 const u32 *arg_btf_id, 8062 struct bpf_call_arg_meta *meta) 8063 { 8064 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 8065 enum bpf_reg_type expected, type = reg->type; 8066 const struct bpf_reg_types *compatible; 8067 int i, j; 8068 8069 compatible = compatible_reg_types[base_type(arg_type)]; 8070 if (!compatible) { 8071 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); 8072 return -EFAULT; 8073 } 8074 8075 /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY, 8076 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY 8077 * 8078 * Same for MAYBE_NULL: 8079 * 8080 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL, 8081 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL 8082 * 8083 * ARG_PTR_TO_MEM is compatible with PTR_TO_MEM that is tagged with a dynptr type. 8084 * 8085 * Therefore we fold these flags depending on the arg_type before comparison. 8086 */ 8087 if (arg_type & MEM_RDONLY) 8088 type &= ~MEM_RDONLY; 8089 if (arg_type & PTR_MAYBE_NULL) 8090 type &= ~PTR_MAYBE_NULL; 8091 if (base_type(arg_type) == ARG_PTR_TO_MEM) 8092 type &= ~DYNPTR_TYPE_FLAG_MASK; 8093 8094 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) { 8095 type &= ~MEM_ALLOC; 8096 type &= ~MEM_PERCPU; 8097 } 8098 8099 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { 8100 expected = compatible->types[i]; 8101 if (expected == NOT_INIT) 8102 break; 8103 8104 if (type == expected) 8105 goto found; 8106 } 8107 8108 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); 8109 for (j = 0; j + 1 < i; j++) 8110 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); 8111 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); 8112 return -EACCES; 8113 8114 found: 8115 if (base_type(reg->type) != PTR_TO_BTF_ID) 8116 return 0; 8117 8118 if (compatible == &mem_types) { 8119 if (!(arg_type & MEM_RDONLY)) { 8120 verbose(env, 8121 "%s() may write into memory pointed by R%d type=%s\n", 8122 func_id_name(meta->func_id), 8123 regno, reg_type_str(env, reg->type)); 8124 return -EACCES; 8125 } 8126 return 0; 8127 } 8128 8129 switch ((int)reg->type) { 8130 case PTR_TO_BTF_ID: 8131 case PTR_TO_BTF_ID | PTR_TRUSTED: 8132 case PTR_TO_BTF_ID | MEM_RCU: 8133 case PTR_TO_BTF_ID | PTR_MAYBE_NULL: 8134 case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU: 8135 { 8136 /* For bpf_sk_release, it needs to match against first member 8137 * 'struct sock_common', hence make an exception for it. This 8138 * allows bpf_sk_release to work for multiple socket types. 8139 */ 8140 bool strict_type_match = arg_type_is_release(arg_type) && 8141 meta->func_id != BPF_FUNC_sk_release; 8142 8143 if (type_may_be_null(reg->type) && 8144 (!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) { 8145 verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno); 8146 return -EACCES; 8147 } 8148 8149 if (!arg_btf_id) { 8150 if (!compatible->btf_id) { 8151 verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); 8152 return -EFAULT; 8153 } 8154 arg_btf_id = compatible->btf_id; 8155 } 8156 8157 if (meta->func_id == BPF_FUNC_kptr_xchg) { 8158 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) 8159 return -EACCES; 8160 } else { 8161 if (arg_btf_id == BPF_PTR_POISON) { 8162 verbose(env, "verifier internal error:"); 8163 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", 8164 regno); 8165 return -EACCES; 8166 } 8167 8168 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 8169 btf_vmlinux, *arg_btf_id, 8170 strict_type_match)) { 8171 verbose(env, "R%d is of type %s but %s is expected\n", 8172 regno, btf_type_name(reg->btf, reg->btf_id), 8173 btf_type_name(btf_vmlinux, *arg_btf_id)); 8174 return -EACCES; 8175 } 8176 } 8177 break; 8178 } 8179 case PTR_TO_BTF_ID | MEM_ALLOC: 8180 case PTR_TO_BTF_ID | MEM_PERCPU | MEM_ALLOC: 8181 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock && 8182 meta->func_id != BPF_FUNC_kptr_xchg) { 8183 verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n"); 8184 return -EFAULT; 8185 } 8186 if (meta->func_id == BPF_FUNC_kptr_xchg) { 8187 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) 8188 return -EACCES; 8189 } 8190 break; 8191 case PTR_TO_BTF_ID | MEM_PERCPU: 8192 case PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU: 8193 case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED: 8194 /* Handled by helper specific checks */ 8195 break; 8196 default: 8197 verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n"); 8198 return -EFAULT; 8199 } 8200 return 0; 8201 } 8202 8203 static struct btf_field * 8204 reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields) 8205 { 8206 struct btf_field *field; 8207 struct btf_record *rec; 8208 8209 rec = reg_btf_record(reg); 8210 if (!rec) 8211 return NULL; 8212 8213 field = btf_record_find(rec, off, fields); 8214 if (!field) 8215 return NULL; 8216 8217 return field; 8218 } 8219 8220 int check_func_arg_reg_off(struct bpf_verifier_env *env, 8221 const struct bpf_reg_state *reg, int regno, 8222 enum bpf_arg_type arg_type) 8223 { 8224 u32 type = reg->type; 8225 8226 /* When referenced register is passed to release function, its fixed 8227 * offset must be 0. 8228 * 8229 * We will check arg_type_is_release reg has ref_obj_id when storing 8230 * meta->release_regno. 8231 */ 8232 if (arg_type_is_release(arg_type)) { 8233 /* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it 8234 * may not directly point to the object being released, but to 8235 * dynptr pointing to such object, which might be at some offset 8236 * on the stack. In that case, we simply to fallback to the 8237 * default handling. 8238 */ 8239 if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK) 8240 return 0; 8241 8242 /* Doing check_ptr_off_reg check for the offset will catch this 8243 * because fixed_off_ok is false, but checking here allows us 8244 * to give the user a better error message. 8245 */ 8246 if (reg->off) { 8247 verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n", 8248 regno); 8249 return -EINVAL; 8250 } 8251 return __check_ptr_off_reg(env, reg, regno, false); 8252 } 8253 8254 switch (type) { 8255 /* Pointer types where both fixed and variable offset is explicitly allowed: */ 8256 case PTR_TO_STACK: 8257 case PTR_TO_PACKET: 8258 case PTR_TO_PACKET_META: 8259 case PTR_TO_MAP_KEY: 8260 case PTR_TO_MAP_VALUE: 8261 case PTR_TO_MEM: 8262 case PTR_TO_MEM | MEM_RDONLY: 8263 case PTR_TO_MEM | MEM_RINGBUF: 8264 case PTR_TO_BUF: 8265 case PTR_TO_BUF | MEM_RDONLY: 8266 case SCALAR_VALUE: 8267 return 0; 8268 /* All the rest must be rejected, except PTR_TO_BTF_ID which allows 8269 * fixed offset. 8270 */ 8271 case PTR_TO_BTF_ID: 8272 case PTR_TO_BTF_ID | MEM_ALLOC: 8273 case PTR_TO_BTF_ID | PTR_TRUSTED: 8274 case PTR_TO_BTF_ID | MEM_RCU: 8275 case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF: 8276 case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU: 8277 /* When referenced PTR_TO_BTF_ID is passed to release function, 8278 * its fixed offset must be 0. In the other cases, fixed offset 8279 * can be non-zero. This was already checked above. So pass 8280 * fixed_off_ok as true to allow fixed offset for all other 8281 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we 8282 * still need to do checks instead of returning. 8283 */ 8284 return __check_ptr_off_reg(env, reg, regno, true); 8285 default: 8286 return __check_ptr_off_reg(env, reg, regno, false); 8287 } 8288 } 8289 8290 static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env, 8291 const struct bpf_func_proto *fn, 8292 struct bpf_reg_state *regs) 8293 { 8294 struct bpf_reg_state *state = NULL; 8295 int i; 8296 8297 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) 8298 if (arg_type_is_dynptr(fn->arg_type[i])) { 8299 if (state) { 8300 verbose(env, "verifier internal error: multiple dynptr args\n"); 8301 return NULL; 8302 } 8303 state = ®s[BPF_REG_1 + i]; 8304 } 8305 8306 if (!state) 8307 verbose(env, "verifier internal error: no dynptr arg found\n"); 8308 8309 return state; 8310 } 8311 8312 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 8313 { 8314 struct bpf_func_state *state = func(env, reg); 8315 int spi; 8316 8317 if (reg->type == CONST_PTR_TO_DYNPTR) 8318 return reg->id; 8319 spi = dynptr_get_spi(env, reg); 8320 if (spi < 0) 8321 return spi; 8322 return state->stack[spi].spilled_ptr.id; 8323 } 8324 8325 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 8326 { 8327 struct bpf_func_state *state = func(env, reg); 8328 int spi; 8329 8330 if (reg->type == CONST_PTR_TO_DYNPTR) 8331 return reg->ref_obj_id; 8332 spi = dynptr_get_spi(env, reg); 8333 if (spi < 0) 8334 return spi; 8335 return state->stack[spi].spilled_ptr.ref_obj_id; 8336 } 8337 8338 static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env, 8339 struct bpf_reg_state *reg) 8340 { 8341 struct bpf_func_state *state = func(env, reg); 8342 int spi; 8343 8344 if (reg->type == CONST_PTR_TO_DYNPTR) 8345 return reg->dynptr.type; 8346 8347 spi = __get_spi(reg->off); 8348 if (spi < 0) { 8349 verbose(env, "verifier internal error: invalid spi when querying dynptr type\n"); 8350 return BPF_DYNPTR_TYPE_INVALID; 8351 } 8352 8353 return state->stack[spi].spilled_ptr.dynptr.type; 8354 } 8355 8356 static int check_reg_const_str(struct bpf_verifier_env *env, 8357 struct bpf_reg_state *reg, u32 regno) 8358 { 8359 struct bpf_map *map = reg->map_ptr; 8360 int err; 8361 int map_off; 8362 u64 map_addr; 8363 char *str_ptr; 8364 8365 if (reg->type != PTR_TO_MAP_VALUE) 8366 return -EINVAL; 8367 8368 if (!bpf_map_is_rdonly(map)) { 8369 verbose(env, "R%d does not point to a readonly map'\n", regno); 8370 return -EACCES; 8371 } 8372 8373 if (!tnum_is_const(reg->var_off)) { 8374 verbose(env, "R%d is not a constant address'\n", regno); 8375 return -EACCES; 8376 } 8377 8378 if (!map->ops->map_direct_value_addr) { 8379 verbose(env, "no direct value access support for this map type\n"); 8380 return -EACCES; 8381 } 8382 8383 err = check_map_access(env, regno, reg->off, 8384 map->value_size - reg->off, false, 8385 ACCESS_HELPER); 8386 if (err) 8387 return err; 8388 8389 map_off = reg->off + reg->var_off.value; 8390 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); 8391 if (err) { 8392 verbose(env, "direct value access on string failed\n"); 8393 return err; 8394 } 8395 8396 str_ptr = (char *)(long)(map_addr); 8397 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { 8398 verbose(env, "string is not zero-terminated\n"); 8399 return -EINVAL; 8400 } 8401 return 0; 8402 } 8403 8404 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, 8405 struct bpf_call_arg_meta *meta, 8406 const struct bpf_func_proto *fn, 8407 int insn_idx) 8408 { 8409 u32 regno = BPF_REG_1 + arg; 8410 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 8411 enum bpf_arg_type arg_type = fn->arg_type[arg]; 8412 enum bpf_reg_type type = reg->type; 8413 u32 *arg_btf_id = NULL; 8414 int err = 0; 8415 8416 if (arg_type == ARG_DONTCARE) 8417 return 0; 8418 8419 err = check_reg_arg(env, regno, SRC_OP); 8420 if (err) 8421 return err; 8422 8423 if (arg_type == ARG_ANYTHING) { 8424 if (is_pointer_value(env, regno)) { 8425 verbose(env, "R%d leaks addr into helper function\n", 8426 regno); 8427 return -EACCES; 8428 } 8429 return 0; 8430 } 8431 8432 if (type_is_pkt_pointer(type) && 8433 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 8434 verbose(env, "helper access to the packet is not allowed\n"); 8435 return -EACCES; 8436 } 8437 8438 if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) { 8439 err = resolve_map_arg_type(env, meta, &arg_type); 8440 if (err) 8441 return err; 8442 } 8443 8444 if (register_is_null(reg) && type_may_be_null(arg_type)) 8445 /* A NULL register has a SCALAR_VALUE type, so skip 8446 * type checking. 8447 */ 8448 goto skip_type_check; 8449 8450 /* arg_btf_id and arg_size are in a union. */ 8451 if (base_type(arg_type) == ARG_PTR_TO_BTF_ID || 8452 base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK) 8453 arg_btf_id = fn->arg_btf_id[arg]; 8454 8455 err = check_reg_type(env, regno, arg_type, arg_btf_id, meta); 8456 if (err) 8457 return err; 8458 8459 err = check_func_arg_reg_off(env, reg, regno, arg_type); 8460 if (err) 8461 return err; 8462 8463 skip_type_check: 8464 if (arg_type_is_release(arg_type)) { 8465 if (arg_type_is_dynptr(arg_type)) { 8466 struct bpf_func_state *state = func(env, reg); 8467 int spi; 8468 8469 /* Only dynptr created on stack can be released, thus 8470 * the get_spi and stack state checks for spilled_ptr 8471 * should only be done before process_dynptr_func for 8472 * PTR_TO_STACK. 8473 */ 8474 if (reg->type == PTR_TO_STACK) { 8475 spi = dynptr_get_spi(env, reg); 8476 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { 8477 verbose(env, "arg %d is an unacquired reference\n", regno); 8478 return -EINVAL; 8479 } 8480 } else { 8481 verbose(env, "cannot release unowned const bpf_dynptr\n"); 8482 return -EINVAL; 8483 } 8484 } else if (!reg->ref_obj_id && !register_is_null(reg)) { 8485 verbose(env, "R%d must be referenced when passed to release function\n", 8486 regno); 8487 return -EINVAL; 8488 } 8489 if (meta->release_regno) { 8490 verbose(env, "verifier internal error: more than one release argument\n"); 8491 return -EFAULT; 8492 } 8493 meta->release_regno = regno; 8494 } 8495 8496 if (reg->ref_obj_id) { 8497 if (meta->ref_obj_id) { 8498 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 8499 regno, reg->ref_obj_id, 8500 meta->ref_obj_id); 8501 return -EFAULT; 8502 } 8503 meta->ref_obj_id = reg->ref_obj_id; 8504 } 8505 8506 switch (base_type(arg_type)) { 8507 case ARG_CONST_MAP_PTR: 8508 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 8509 if (meta->map_ptr) { 8510 /* Use map_uid (which is unique id of inner map) to reject: 8511 * inner_map1 = bpf_map_lookup_elem(outer_map, key1) 8512 * inner_map2 = bpf_map_lookup_elem(outer_map, key2) 8513 * if (inner_map1 && inner_map2) { 8514 * timer = bpf_map_lookup_elem(inner_map1); 8515 * if (timer) 8516 * // mismatch would have been allowed 8517 * bpf_timer_init(timer, inner_map2); 8518 * } 8519 * 8520 * Comparing map_ptr is enough to distinguish normal and outer maps. 8521 */ 8522 if (meta->map_ptr != reg->map_ptr || 8523 meta->map_uid != reg->map_uid) { 8524 verbose(env, 8525 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", 8526 meta->map_uid, reg->map_uid); 8527 return -EINVAL; 8528 } 8529 } 8530 meta->map_ptr = reg->map_ptr; 8531 meta->map_uid = reg->map_uid; 8532 break; 8533 case ARG_PTR_TO_MAP_KEY: 8534 /* bpf_map_xxx(..., map_ptr, ..., key) call: 8535 * check that [key, key + map->key_size) are within 8536 * stack limits and initialized 8537 */ 8538 if (!meta->map_ptr) { 8539 /* in function declaration map_ptr must come before 8540 * map_key, so that it's verified and known before 8541 * we have to check map_key here. Otherwise it means 8542 * that kernel subsystem misconfigured verifier 8543 */ 8544 verbose(env, "invalid map_ptr to access map->key\n"); 8545 return -EACCES; 8546 } 8547 err = check_helper_mem_access(env, regno, 8548 meta->map_ptr->key_size, false, 8549 NULL); 8550 break; 8551 case ARG_PTR_TO_MAP_VALUE: 8552 if (type_may_be_null(arg_type) && register_is_null(reg)) 8553 return 0; 8554 8555 /* bpf_map_xxx(..., map_ptr, ..., value) call: 8556 * check [value, value + map->value_size) validity 8557 */ 8558 if (!meta->map_ptr) { 8559 /* kernel subsystem misconfigured verifier */ 8560 verbose(env, "invalid map_ptr to access map->value\n"); 8561 return -EACCES; 8562 } 8563 meta->raw_mode = arg_type & MEM_UNINIT; 8564 err = check_helper_mem_access(env, regno, 8565 meta->map_ptr->value_size, false, 8566 meta); 8567 break; 8568 case ARG_PTR_TO_PERCPU_BTF_ID: 8569 if (!reg->btf_id) { 8570 verbose(env, "Helper has invalid btf_id in R%d\n", regno); 8571 return -EACCES; 8572 } 8573 meta->ret_btf = reg->btf; 8574 meta->ret_btf_id = reg->btf_id; 8575 break; 8576 case ARG_PTR_TO_SPIN_LOCK: 8577 if (in_rbtree_lock_required_cb(env)) { 8578 verbose(env, "can't spin_{lock,unlock} in rbtree cb\n"); 8579 return -EACCES; 8580 } 8581 if (meta->func_id == BPF_FUNC_spin_lock) { 8582 err = process_spin_lock(env, regno, true); 8583 if (err) 8584 return err; 8585 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 8586 err = process_spin_lock(env, regno, false); 8587 if (err) 8588 return err; 8589 } else { 8590 verbose(env, "verifier internal error\n"); 8591 return -EFAULT; 8592 } 8593 break; 8594 case ARG_PTR_TO_TIMER: 8595 err = process_timer_func(env, regno, meta); 8596 if (err) 8597 return err; 8598 break; 8599 case ARG_PTR_TO_FUNC: 8600 meta->subprogno = reg->subprogno; 8601 break; 8602 case ARG_PTR_TO_MEM: 8603 /* The access to this pointer is only checked when we hit the 8604 * next is_mem_size argument below. 8605 */ 8606 meta->raw_mode = arg_type & MEM_UNINIT; 8607 if (arg_type & MEM_FIXED_SIZE) { 8608 err = check_helper_mem_access(env, regno, 8609 fn->arg_size[arg], false, 8610 meta); 8611 } 8612 break; 8613 case ARG_CONST_SIZE: 8614 err = check_mem_size_reg(env, reg, regno, false, meta); 8615 break; 8616 case ARG_CONST_SIZE_OR_ZERO: 8617 err = check_mem_size_reg(env, reg, regno, true, meta); 8618 break; 8619 case ARG_PTR_TO_DYNPTR: 8620 err = process_dynptr_func(env, regno, insn_idx, arg_type, 0); 8621 if (err) 8622 return err; 8623 break; 8624 case ARG_CONST_ALLOC_SIZE_OR_ZERO: 8625 if (!tnum_is_const(reg->var_off)) { 8626 verbose(env, "R%d is not a known constant'\n", 8627 regno); 8628 return -EACCES; 8629 } 8630 meta->mem_size = reg->var_off.value; 8631 err = mark_chain_precision(env, regno); 8632 if (err) 8633 return err; 8634 break; 8635 case ARG_PTR_TO_INT: 8636 case ARG_PTR_TO_LONG: 8637 { 8638 int size = int_ptr_type_to_size(arg_type); 8639 8640 err = check_helper_mem_access(env, regno, size, false, meta); 8641 if (err) 8642 return err; 8643 err = check_ptr_alignment(env, reg, 0, size, true); 8644 break; 8645 } 8646 case ARG_PTR_TO_CONST_STR: 8647 { 8648 err = check_reg_const_str(env, reg, regno); 8649 if (err) 8650 return err; 8651 break; 8652 } 8653 case ARG_PTR_TO_KPTR: 8654 err = process_kptr_func(env, regno, meta); 8655 if (err) 8656 return err; 8657 break; 8658 } 8659 8660 return err; 8661 } 8662 8663 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) 8664 { 8665 enum bpf_attach_type eatype = env->prog->expected_attach_type; 8666 enum bpf_prog_type type = resolve_prog_type(env->prog); 8667 8668 if (func_id != BPF_FUNC_map_update_elem) 8669 return false; 8670 8671 /* It's not possible to get access to a locked struct sock in these 8672 * contexts, so updating is safe. 8673 */ 8674 switch (type) { 8675 case BPF_PROG_TYPE_TRACING: 8676 if (eatype == BPF_TRACE_ITER) 8677 return true; 8678 break; 8679 case BPF_PROG_TYPE_SOCKET_FILTER: 8680 case BPF_PROG_TYPE_SCHED_CLS: 8681 case BPF_PROG_TYPE_SCHED_ACT: 8682 case BPF_PROG_TYPE_XDP: 8683 case BPF_PROG_TYPE_SK_REUSEPORT: 8684 case BPF_PROG_TYPE_FLOW_DISSECTOR: 8685 case BPF_PROG_TYPE_SK_LOOKUP: 8686 return true; 8687 default: 8688 break; 8689 } 8690 8691 verbose(env, "cannot update sockmap in this context\n"); 8692 return false; 8693 } 8694 8695 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) 8696 { 8697 return env->prog->jit_requested && 8698 bpf_jit_supports_subprog_tailcalls(); 8699 } 8700 8701 static int check_map_func_compatibility(struct bpf_verifier_env *env, 8702 struct bpf_map *map, int func_id) 8703 { 8704 if (!map) 8705 return 0; 8706 8707 /* We need a two way check, first is from map perspective ... */ 8708 switch (map->map_type) { 8709 case BPF_MAP_TYPE_PROG_ARRAY: 8710 if (func_id != BPF_FUNC_tail_call) 8711 goto error; 8712 break; 8713 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 8714 if (func_id != BPF_FUNC_perf_event_read && 8715 func_id != BPF_FUNC_perf_event_output && 8716 func_id != BPF_FUNC_skb_output && 8717 func_id != BPF_FUNC_perf_event_read_value && 8718 func_id != BPF_FUNC_xdp_output) 8719 goto error; 8720 break; 8721 case BPF_MAP_TYPE_RINGBUF: 8722 if (func_id != BPF_FUNC_ringbuf_output && 8723 func_id != BPF_FUNC_ringbuf_reserve && 8724 func_id != BPF_FUNC_ringbuf_query && 8725 func_id != BPF_FUNC_ringbuf_reserve_dynptr && 8726 func_id != BPF_FUNC_ringbuf_submit_dynptr && 8727 func_id != BPF_FUNC_ringbuf_discard_dynptr) 8728 goto error; 8729 break; 8730 case BPF_MAP_TYPE_USER_RINGBUF: 8731 if (func_id != BPF_FUNC_user_ringbuf_drain) 8732 goto error; 8733 break; 8734 case BPF_MAP_TYPE_STACK_TRACE: 8735 if (func_id != BPF_FUNC_get_stackid) 8736 goto error; 8737 break; 8738 case BPF_MAP_TYPE_CGROUP_ARRAY: 8739 if (func_id != BPF_FUNC_skb_under_cgroup && 8740 func_id != BPF_FUNC_current_task_under_cgroup) 8741 goto error; 8742 break; 8743 case BPF_MAP_TYPE_CGROUP_STORAGE: 8744 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 8745 if (func_id != BPF_FUNC_get_local_storage) 8746 goto error; 8747 break; 8748 case BPF_MAP_TYPE_DEVMAP: 8749 case BPF_MAP_TYPE_DEVMAP_HASH: 8750 if (func_id != BPF_FUNC_redirect_map && 8751 func_id != BPF_FUNC_map_lookup_elem) 8752 goto error; 8753 break; 8754 /* Restrict bpf side of cpumap and xskmap, open when use-cases 8755 * appear. 8756 */ 8757 case BPF_MAP_TYPE_CPUMAP: 8758 if (func_id != BPF_FUNC_redirect_map) 8759 goto error; 8760 break; 8761 case BPF_MAP_TYPE_XSKMAP: 8762 if (func_id != BPF_FUNC_redirect_map && 8763 func_id != BPF_FUNC_map_lookup_elem) 8764 goto error; 8765 break; 8766 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 8767 case BPF_MAP_TYPE_HASH_OF_MAPS: 8768 if (func_id != BPF_FUNC_map_lookup_elem) 8769 goto error; 8770 break; 8771 case BPF_MAP_TYPE_SOCKMAP: 8772 if (func_id != BPF_FUNC_sk_redirect_map && 8773 func_id != BPF_FUNC_sock_map_update && 8774 func_id != BPF_FUNC_map_delete_elem && 8775 func_id != BPF_FUNC_msg_redirect_map && 8776 func_id != BPF_FUNC_sk_select_reuseport && 8777 func_id != BPF_FUNC_map_lookup_elem && 8778 !may_update_sockmap(env, func_id)) 8779 goto error; 8780 break; 8781 case BPF_MAP_TYPE_SOCKHASH: 8782 if (func_id != BPF_FUNC_sk_redirect_hash && 8783 func_id != BPF_FUNC_sock_hash_update && 8784 func_id != BPF_FUNC_map_delete_elem && 8785 func_id != BPF_FUNC_msg_redirect_hash && 8786 func_id != BPF_FUNC_sk_select_reuseport && 8787 func_id != BPF_FUNC_map_lookup_elem && 8788 !may_update_sockmap(env, func_id)) 8789 goto error; 8790 break; 8791 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 8792 if (func_id != BPF_FUNC_sk_select_reuseport) 8793 goto error; 8794 break; 8795 case BPF_MAP_TYPE_QUEUE: 8796 case BPF_MAP_TYPE_STACK: 8797 if (func_id != BPF_FUNC_map_peek_elem && 8798 func_id != BPF_FUNC_map_pop_elem && 8799 func_id != BPF_FUNC_map_push_elem) 8800 goto error; 8801 break; 8802 case BPF_MAP_TYPE_SK_STORAGE: 8803 if (func_id != BPF_FUNC_sk_storage_get && 8804 func_id != BPF_FUNC_sk_storage_delete && 8805 func_id != BPF_FUNC_kptr_xchg) 8806 goto error; 8807 break; 8808 case BPF_MAP_TYPE_INODE_STORAGE: 8809 if (func_id != BPF_FUNC_inode_storage_get && 8810 func_id != BPF_FUNC_inode_storage_delete && 8811 func_id != BPF_FUNC_kptr_xchg) 8812 goto error; 8813 break; 8814 case BPF_MAP_TYPE_TASK_STORAGE: 8815 if (func_id != BPF_FUNC_task_storage_get && 8816 func_id != BPF_FUNC_task_storage_delete && 8817 func_id != BPF_FUNC_kptr_xchg) 8818 goto error; 8819 break; 8820 case BPF_MAP_TYPE_CGRP_STORAGE: 8821 if (func_id != BPF_FUNC_cgrp_storage_get && 8822 func_id != BPF_FUNC_cgrp_storage_delete && 8823 func_id != BPF_FUNC_kptr_xchg) 8824 goto error; 8825 break; 8826 case BPF_MAP_TYPE_BLOOM_FILTER: 8827 if (func_id != BPF_FUNC_map_peek_elem && 8828 func_id != BPF_FUNC_map_push_elem) 8829 goto error; 8830 break; 8831 default: 8832 break; 8833 } 8834 8835 /* ... and second from the function itself. */ 8836 switch (func_id) { 8837 case BPF_FUNC_tail_call: 8838 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 8839 goto error; 8840 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { 8841 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 8842 return -EINVAL; 8843 } 8844 break; 8845 case BPF_FUNC_perf_event_read: 8846 case BPF_FUNC_perf_event_output: 8847 case BPF_FUNC_perf_event_read_value: 8848 case BPF_FUNC_skb_output: 8849 case BPF_FUNC_xdp_output: 8850 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 8851 goto error; 8852 break; 8853 case BPF_FUNC_ringbuf_output: 8854 case BPF_FUNC_ringbuf_reserve: 8855 case BPF_FUNC_ringbuf_query: 8856 case BPF_FUNC_ringbuf_reserve_dynptr: 8857 case BPF_FUNC_ringbuf_submit_dynptr: 8858 case BPF_FUNC_ringbuf_discard_dynptr: 8859 if (map->map_type != BPF_MAP_TYPE_RINGBUF) 8860 goto error; 8861 break; 8862 case BPF_FUNC_user_ringbuf_drain: 8863 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) 8864 goto error; 8865 break; 8866 case BPF_FUNC_get_stackid: 8867 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 8868 goto error; 8869 break; 8870 case BPF_FUNC_current_task_under_cgroup: 8871 case BPF_FUNC_skb_under_cgroup: 8872 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 8873 goto error; 8874 break; 8875 case BPF_FUNC_redirect_map: 8876 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 8877 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 8878 map->map_type != BPF_MAP_TYPE_CPUMAP && 8879 map->map_type != BPF_MAP_TYPE_XSKMAP) 8880 goto error; 8881 break; 8882 case BPF_FUNC_sk_redirect_map: 8883 case BPF_FUNC_msg_redirect_map: 8884 case BPF_FUNC_sock_map_update: 8885 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 8886 goto error; 8887 break; 8888 case BPF_FUNC_sk_redirect_hash: 8889 case BPF_FUNC_msg_redirect_hash: 8890 case BPF_FUNC_sock_hash_update: 8891 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 8892 goto error; 8893 break; 8894 case BPF_FUNC_get_local_storage: 8895 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 8896 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 8897 goto error; 8898 break; 8899 case BPF_FUNC_sk_select_reuseport: 8900 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 8901 map->map_type != BPF_MAP_TYPE_SOCKMAP && 8902 map->map_type != BPF_MAP_TYPE_SOCKHASH) 8903 goto error; 8904 break; 8905 case BPF_FUNC_map_pop_elem: 8906 if (map->map_type != BPF_MAP_TYPE_QUEUE && 8907 map->map_type != BPF_MAP_TYPE_STACK) 8908 goto error; 8909 break; 8910 case BPF_FUNC_map_peek_elem: 8911 case BPF_FUNC_map_push_elem: 8912 if (map->map_type != BPF_MAP_TYPE_QUEUE && 8913 map->map_type != BPF_MAP_TYPE_STACK && 8914 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) 8915 goto error; 8916 break; 8917 case BPF_FUNC_map_lookup_percpu_elem: 8918 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 8919 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 8920 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) 8921 goto error; 8922 break; 8923 case BPF_FUNC_sk_storage_get: 8924 case BPF_FUNC_sk_storage_delete: 8925 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 8926 goto error; 8927 break; 8928 case BPF_FUNC_inode_storage_get: 8929 case BPF_FUNC_inode_storage_delete: 8930 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) 8931 goto error; 8932 break; 8933 case BPF_FUNC_task_storage_get: 8934 case BPF_FUNC_task_storage_delete: 8935 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 8936 goto error; 8937 break; 8938 case BPF_FUNC_cgrp_storage_get: 8939 case BPF_FUNC_cgrp_storage_delete: 8940 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) 8941 goto error; 8942 break; 8943 default: 8944 break; 8945 } 8946 8947 return 0; 8948 error: 8949 verbose(env, "cannot pass map_type %d into func %s#%d\n", 8950 map->map_type, func_id_name(func_id), func_id); 8951 return -EINVAL; 8952 } 8953 8954 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 8955 { 8956 int count = 0; 8957 8958 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 8959 count++; 8960 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 8961 count++; 8962 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 8963 count++; 8964 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 8965 count++; 8966 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 8967 count++; 8968 8969 /* We only support one arg being in raw mode at the moment, 8970 * which is sufficient for the helper functions we have 8971 * right now. 8972 */ 8973 return count <= 1; 8974 } 8975 8976 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg) 8977 { 8978 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; 8979 bool has_size = fn->arg_size[arg] != 0; 8980 bool is_next_size = false; 8981 8982 if (arg + 1 < ARRAY_SIZE(fn->arg_type)) 8983 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); 8984 8985 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) 8986 return is_next_size; 8987 8988 return has_size == is_next_size || is_next_size == is_fixed; 8989 } 8990 8991 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 8992 { 8993 /* bpf_xxx(..., buf, len) call will access 'len' 8994 * bytes from memory 'buf'. Both arg types need 8995 * to be paired, so make sure there's no buggy 8996 * helper function specification. 8997 */ 8998 if (arg_type_is_mem_size(fn->arg1_type) || 8999 check_args_pair_invalid(fn, 0) || 9000 check_args_pair_invalid(fn, 1) || 9001 check_args_pair_invalid(fn, 2) || 9002 check_args_pair_invalid(fn, 3) || 9003 check_args_pair_invalid(fn, 4)) 9004 return false; 9005 9006 return true; 9007 } 9008 9009 static bool check_btf_id_ok(const struct bpf_func_proto *fn) 9010 { 9011 int i; 9012 9013 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { 9014 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID) 9015 return !!fn->arg_btf_id[i]; 9016 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK) 9017 return fn->arg_btf_id[i] == BPF_PTR_POISON; 9018 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && 9019 /* arg_btf_id and arg_size are in a union. */ 9020 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || 9021 !(fn->arg_type[i] & MEM_FIXED_SIZE))) 9022 return false; 9023 } 9024 9025 return true; 9026 } 9027 9028 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 9029 { 9030 return check_raw_mode_ok(fn) && 9031 check_arg_pair_ok(fn) && 9032 check_btf_id_ok(fn) ? 0 : -EINVAL; 9033 } 9034 9035 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 9036 * are now invalid, so turn them into unknown SCALAR_VALUE. 9037 * 9038 * This also applies to dynptr slices belonging to skb and xdp dynptrs, 9039 * since these slices point to packet data. 9040 */ 9041 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 9042 { 9043 struct bpf_func_state *state; 9044 struct bpf_reg_state *reg; 9045 9046 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 9047 if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg)) 9048 mark_reg_invalid(env, reg); 9049 })); 9050 } 9051 9052 enum { 9053 AT_PKT_END = -1, 9054 BEYOND_PKT_END = -2, 9055 }; 9056 9057 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) 9058 { 9059 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 9060 struct bpf_reg_state *reg = &state->regs[regn]; 9061 9062 if (reg->type != PTR_TO_PACKET) 9063 /* PTR_TO_PACKET_META is not supported yet */ 9064 return; 9065 9066 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. 9067 * How far beyond pkt_end it goes is unknown. 9068 * if (!range_open) it's the case of pkt >= pkt_end 9069 * if (range_open) it's the case of pkt > pkt_end 9070 * hence this pointer is at least 1 byte bigger than pkt_end 9071 */ 9072 if (range_open) 9073 reg->range = BEYOND_PKT_END; 9074 else 9075 reg->range = AT_PKT_END; 9076 } 9077 9078 /* The pointer with the specified id has released its reference to kernel 9079 * resources. Identify all copies of the same pointer and clear the reference. 9080 */ 9081 static int release_reference(struct bpf_verifier_env *env, 9082 int ref_obj_id) 9083 { 9084 struct bpf_func_state *state; 9085 struct bpf_reg_state *reg; 9086 int err; 9087 9088 err = release_reference_state(cur_func(env), ref_obj_id); 9089 if (err) 9090 return err; 9091 9092 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 9093 if (reg->ref_obj_id == ref_obj_id) 9094 mark_reg_invalid(env, reg); 9095 })); 9096 9097 return 0; 9098 } 9099 9100 static void invalidate_non_owning_refs(struct bpf_verifier_env *env) 9101 { 9102 struct bpf_func_state *unused; 9103 struct bpf_reg_state *reg; 9104 9105 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ 9106 if (type_is_non_owning_ref(reg->type)) 9107 mark_reg_invalid(env, reg); 9108 })); 9109 } 9110 9111 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 9112 struct bpf_reg_state *regs) 9113 { 9114 int i; 9115 9116 /* after the call registers r0 - r5 were scratched */ 9117 for (i = 0; i < CALLER_SAVED_REGS; i++) { 9118 mark_reg_not_init(env, regs, caller_saved[i]); 9119 __check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK); 9120 } 9121 } 9122 9123 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, 9124 struct bpf_func_state *caller, 9125 struct bpf_func_state *callee, 9126 int insn_idx); 9127 9128 static int set_callee_state(struct bpf_verifier_env *env, 9129 struct bpf_func_state *caller, 9130 struct bpf_func_state *callee, int insn_idx); 9131 9132 static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite, 9133 set_callee_state_fn set_callee_state_cb, 9134 struct bpf_verifier_state *state) 9135 { 9136 struct bpf_func_state *caller, *callee; 9137 int err; 9138 9139 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 9140 verbose(env, "the call stack of %d frames is too deep\n", 9141 state->curframe + 2); 9142 return -E2BIG; 9143 } 9144 9145 if (state->frame[state->curframe + 1]) { 9146 verbose(env, "verifier bug. Frame %d already allocated\n", 9147 state->curframe + 1); 9148 return -EFAULT; 9149 } 9150 9151 caller = state->frame[state->curframe]; 9152 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 9153 if (!callee) 9154 return -ENOMEM; 9155 state->frame[state->curframe + 1] = callee; 9156 9157 /* callee cannot access r0, r6 - r9 for reading and has to write 9158 * into its own stack before reading from it. 9159 * callee can read/write into caller's stack 9160 */ 9161 init_func_state(env, callee, 9162 /* remember the callsite, it will be used by bpf_exit */ 9163 callsite, 9164 state->curframe + 1 /* frameno within this callchain */, 9165 subprog /* subprog number within this prog */); 9166 /* Transfer references to the callee */ 9167 err = copy_reference_state(callee, caller); 9168 err = err ?: set_callee_state_cb(env, caller, callee, callsite); 9169 if (err) 9170 goto err_out; 9171 9172 /* only increment it after check_reg_arg() finished */ 9173 state->curframe++; 9174 9175 return 0; 9176 9177 err_out: 9178 free_func_state(callee); 9179 state->frame[state->curframe + 1] = NULL; 9180 return err; 9181 } 9182 9183 static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 9184 int insn_idx, int subprog, 9185 set_callee_state_fn set_callee_state_cb) 9186 { 9187 struct bpf_verifier_state *state = env->cur_state, *callback_state; 9188 struct bpf_func_state *caller, *callee; 9189 int err; 9190 9191 caller = state->frame[state->curframe]; 9192 err = btf_check_subprog_call(env, subprog, caller->regs); 9193 if (err == -EFAULT) 9194 return err; 9195 9196 /* set_callee_state is used for direct subprog calls, but we are 9197 * interested in validating only BPF helpers that can call subprogs as 9198 * callbacks 9199 */ 9200 env->subprog_info[subprog].is_cb = true; 9201 if (bpf_pseudo_kfunc_call(insn) && 9202 !is_sync_callback_calling_kfunc(insn->imm)) { 9203 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", 9204 func_id_name(insn->imm), insn->imm); 9205 return -EFAULT; 9206 } else if (!bpf_pseudo_kfunc_call(insn) && 9207 !is_callback_calling_function(insn->imm)) { /* helper */ 9208 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", 9209 func_id_name(insn->imm), insn->imm); 9210 return -EFAULT; 9211 } 9212 9213 if (insn->code == (BPF_JMP | BPF_CALL) && 9214 insn->src_reg == 0 && 9215 insn->imm == BPF_FUNC_timer_set_callback) { 9216 struct bpf_verifier_state *async_cb; 9217 9218 /* there is no real recursion here. timer callbacks are async */ 9219 env->subprog_info[subprog].is_async_cb = true; 9220 async_cb = push_async_cb(env, env->subprog_info[subprog].start, 9221 insn_idx, subprog); 9222 if (!async_cb) 9223 return -EFAULT; 9224 callee = async_cb->frame[0]; 9225 callee->async_entry_cnt = caller->async_entry_cnt + 1; 9226 9227 /* Convert bpf_timer_set_callback() args into timer callback args */ 9228 err = set_callee_state_cb(env, caller, callee, insn_idx); 9229 if (err) 9230 return err; 9231 9232 return 0; 9233 } 9234 9235 /* for callback functions enqueue entry to callback and 9236 * proceed with next instruction within current frame. 9237 */ 9238 callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); 9239 if (!callback_state) 9240 return -ENOMEM; 9241 9242 err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb, 9243 callback_state); 9244 if (err) 9245 return err; 9246 9247 callback_state->callback_unroll_depth++; 9248 callback_state->frame[callback_state->curframe - 1]->callback_depth++; 9249 caller->callback_depth = 0; 9250 return 0; 9251 } 9252 9253 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 9254 int *insn_idx) 9255 { 9256 struct bpf_verifier_state *state = env->cur_state; 9257 struct bpf_func_state *caller; 9258 int err, subprog, target_insn; 9259 9260 target_insn = *insn_idx + insn->imm + 1; 9261 subprog = find_subprog(env, target_insn); 9262 if (subprog < 0) { 9263 verbose(env, "verifier bug. No program starts at insn %d\n", target_insn); 9264 return -EFAULT; 9265 } 9266 9267 caller = state->frame[state->curframe]; 9268 err = btf_check_subprog_call(env, subprog, caller->regs); 9269 if (err == -EFAULT) 9270 return err; 9271 if (subprog_is_global(env, subprog)) { 9272 if (err) { 9273 verbose(env, "Caller passes invalid args into func#%d\n", subprog); 9274 return err; 9275 } 9276 9277 if (env->log.level & BPF_LOG_LEVEL) 9278 verbose(env, "Func#%d is global and valid. Skipping.\n", subprog); 9279 clear_caller_saved_regs(env, caller->regs); 9280 9281 /* All global functions return a 64-bit SCALAR_VALUE */ 9282 mark_reg_unknown(env, caller->regs, BPF_REG_0); 9283 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 9284 9285 /* continue with next insn after call */ 9286 return 0; 9287 } 9288 9289 /* for regular function entry setup new frame and continue 9290 * from that frame. 9291 */ 9292 err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state); 9293 if (err) 9294 return err; 9295 9296 clear_caller_saved_regs(env, caller->regs); 9297 9298 /* and go analyze first insn of the callee */ 9299 *insn_idx = env->subprog_info[subprog].start - 1; 9300 9301 if (env->log.level & BPF_LOG_LEVEL) { 9302 verbose(env, "caller:\n"); 9303 print_verifier_state(env, caller, true); 9304 verbose(env, "callee:\n"); 9305 print_verifier_state(env, state->frame[state->curframe], true); 9306 } 9307 9308 return 0; 9309 } 9310 9311 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 9312 struct bpf_func_state *caller, 9313 struct bpf_func_state *callee) 9314 { 9315 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, 9316 * void *callback_ctx, u64 flags); 9317 * callback_fn(struct bpf_map *map, void *key, void *value, 9318 * void *callback_ctx); 9319 */ 9320 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 9321 9322 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 9323 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9324 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; 9325 9326 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 9327 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 9328 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; 9329 9330 /* pointer to stack or null */ 9331 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; 9332 9333 /* unused */ 9334 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9335 return 0; 9336 } 9337 9338 static int set_callee_state(struct bpf_verifier_env *env, 9339 struct bpf_func_state *caller, 9340 struct bpf_func_state *callee, int insn_idx) 9341 { 9342 int i; 9343 9344 /* copy r1 - r5 args that callee can access. The copy includes parent 9345 * pointers, which connects us up to the liveness chain 9346 */ 9347 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 9348 callee->regs[i] = caller->regs[i]; 9349 return 0; 9350 } 9351 9352 static int set_map_elem_callback_state(struct bpf_verifier_env *env, 9353 struct bpf_func_state *caller, 9354 struct bpf_func_state *callee, 9355 int insn_idx) 9356 { 9357 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; 9358 struct bpf_map *map; 9359 int err; 9360 9361 if (bpf_map_ptr_poisoned(insn_aux)) { 9362 verbose(env, "tail_call abusing map_ptr\n"); 9363 return -EINVAL; 9364 } 9365 9366 map = BPF_MAP_PTR(insn_aux->map_ptr_state); 9367 if (!map->ops->map_set_for_each_callback_args || 9368 !map->ops->map_for_each_callback) { 9369 verbose(env, "callback function not allowed for map\n"); 9370 return -ENOTSUPP; 9371 } 9372 9373 err = map->ops->map_set_for_each_callback_args(env, caller, callee); 9374 if (err) 9375 return err; 9376 9377 callee->in_callback_fn = true; 9378 callee->callback_ret_range = tnum_range(0, 1); 9379 return 0; 9380 } 9381 9382 static int set_loop_callback_state(struct bpf_verifier_env *env, 9383 struct bpf_func_state *caller, 9384 struct bpf_func_state *callee, 9385 int insn_idx) 9386 { 9387 /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, 9388 * u64 flags); 9389 * callback_fn(u32 index, void *callback_ctx); 9390 */ 9391 callee->regs[BPF_REG_1].type = SCALAR_VALUE; 9392 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; 9393 9394 /* unused */ 9395 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9396 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9397 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9398 9399 callee->in_callback_fn = true; 9400 callee->callback_ret_range = tnum_range(0, 1); 9401 return 0; 9402 } 9403 9404 static int set_timer_callback_state(struct bpf_verifier_env *env, 9405 struct bpf_func_state *caller, 9406 struct bpf_func_state *callee, 9407 int insn_idx) 9408 { 9409 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; 9410 9411 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); 9412 * callback_fn(struct bpf_map *map, void *key, void *value); 9413 */ 9414 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; 9415 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); 9416 callee->regs[BPF_REG_1].map_ptr = map_ptr; 9417 9418 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 9419 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9420 callee->regs[BPF_REG_2].map_ptr = map_ptr; 9421 9422 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 9423 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 9424 callee->regs[BPF_REG_3].map_ptr = map_ptr; 9425 9426 /* unused */ 9427 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9428 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9429 callee->in_async_callback_fn = true; 9430 callee->callback_ret_range = tnum_range(0, 1); 9431 return 0; 9432 } 9433 9434 static int set_find_vma_callback_state(struct bpf_verifier_env *env, 9435 struct bpf_func_state *caller, 9436 struct bpf_func_state *callee, 9437 int insn_idx) 9438 { 9439 /* bpf_find_vma(struct task_struct *task, u64 addr, 9440 * void *callback_fn, void *callback_ctx, u64 flags) 9441 * (callback_fn)(struct task_struct *task, 9442 * struct vm_area_struct *vma, void *callback_ctx); 9443 */ 9444 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 9445 9446 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; 9447 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9448 callee->regs[BPF_REG_2].btf = btf_vmlinux; 9449 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], 9450 9451 /* pointer to stack or null */ 9452 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; 9453 9454 /* unused */ 9455 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9456 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9457 callee->in_callback_fn = true; 9458 callee->callback_ret_range = tnum_range(0, 1); 9459 return 0; 9460 } 9461 9462 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, 9463 struct bpf_func_state *caller, 9464 struct bpf_func_state *callee, 9465 int insn_idx) 9466 { 9467 /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void 9468 * callback_ctx, u64 flags); 9469 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx); 9470 */ 9471 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); 9472 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); 9473 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; 9474 9475 /* unused */ 9476 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9477 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9478 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9479 9480 callee->in_callback_fn = true; 9481 callee->callback_ret_range = tnum_range(0, 1); 9482 return 0; 9483 } 9484 9485 static int set_rbtree_add_callback_state(struct bpf_verifier_env *env, 9486 struct bpf_func_state *caller, 9487 struct bpf_func_state *callee, 9488 int insn_idx) 9489 { 9490 /* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 9491 * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)); 9492 * 9493 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset 9494 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd 9495 * by this point, so look at 'root' 9496 */ 9497 struct btf_field *field; 9498 9499 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, 9500 BPF_RB_ROOT); 9501 if (!field || !field->graph_root.value_btf_id) 9502 return -EFAULT; 9503 9504 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); 9505 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); 9506 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); 9507 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); 9508 9509 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9510 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9511 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9512 callee->in_callback_fn = true; 9513 callee->callback_ret_range = tnum_range(0, 1); 9514 return 0; 9515 } 9516 9517 static bool is_rbtree_lock_required_kfunc(u32 btf_id); 9518 9519 /* Are we currently verifying the callback for a rbtree helper that must 9520 * be called with lock held? If so, no need to complain about unreleased 9521 * lock 9522 */ 9523 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) 9524 { 9525 struct bpf_verifier_state *state = env->cur_state; 9526 struct bpf_insn *insn = env->prog->insnsi; 9527 struct bpf_func_state *callee; 9528 int kfunc_btf_id; 9529 9530 if (!state->curframe) 9531 return false; 9532 9533 callee = state->frame[state->curframe]; 9534 9535 if (!callee->in_callback_fn) 9536 return false; 9537 9538 kfunc_btf_id = insn[callee->callsite].imm; 9539 return is_rbtree_lock_required_kfunc(kfunc_btf_id); 9540 } 9541 9542 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 9543 { 9544 struct bpf_verifier_state *state = env->cur_state, *prev_st; 9545 struct bpf_func_state *caller, *callee; 9546 struct bpf_reg_state *r0; 9547 bool in_callback_fn; 9548 int err; 9549 9550 callee = state->frame[state->curframe]; 9551 r0 = &callee->regs[BPF_REG_0]; 9552 if (r0->type == PTR_TO_STACK) { 9553 /* technically it's ok to return caller's stack pointer 9554 * (or caller's caller's pointer) back to the caller, 9555 * since these pointers are valid. Only current stack 9556 * pointer will be invalid as soon as function exits, 9557 * but let's be conservative 9558 */ 9559 verbose(env, "cannot return stack pointer to the caller\n"); 9560 return -EINVAL; 9561 } 9562 9563 caller = state->frame[state->curframe - 1]; 9564 if (callee->in_callback_fn) { 9565 /* enforce R0 return value range [0, 1]. */ 9566 struct tnum range = callee->callback_ret_range; 9567 9568 if (r0->type != SCALAR_VALUE) { 9569 verbose(env, "R0 not a scalar value\n"); 9570 return -EACCES; 9571 } 9572 if (!tnum_in(range, r0->var_off)) { 9573 verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); 9574 return -EINVAL; 9575 } 9576 if (!calls_callback(env, callee->callsite)) { 9577 verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n", 9578 *insn_idx, callee->callsite); 9579 return -EFAULT; 9580 } 9581 } else { 9582 /* return to the caller whatever r0 had in the callee */ 9583 caller->regs[BPF_REG_0] = *r0; 9584 } 9585 9586 /* callback_fn frame should have released its own additions to parent's 9587 * reference state at this point, or check_reference_leak would 9588 * complain, hence it must be the same as the caller. There is no need 9589 * to copy it back. 9590 */ 9591 if (!callee->in_callback_fn) { 9592 /* Transfer references to the caller */ 9593 err = copy_reference_state(caller, callee); 9594 if (err) 9595 return err; 9596 } 9597 9598 /* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite, 9599 * there function call logic would reschedule callback visit. If iteration 9600 * converges is_state_visited() would prune that visit eventually. 9601 */ 9602 in_callback_fn = callee->in_callback_fn; 9603 if (in_callback_fn) 9604 *insn_idx = callee->callsite; 9605 else 9606 *insn_idx = callee->callsite + 1; 9607 9608 if (env->log.level & BPF_LOG_LEVEL) { 9609 verbose(env, "returning from callee:\n"); 9610 print_verifier_state(env, callee, true); 9611 verbose(env, "to caller at %d:\n", *insn_idx); 9612 print_verifier_state(env, caller, true); 9613 } 9614 /* clear everything in the callee. In case of exceptional exits using 9615 * bpf_throw, this will be done by copy_verifier_state for extra frames. */ 9616 free_func_state(callee); 9617 state->frame[state->curframe--] = NULL; 9618 9619 /* for callbacks widen imprecise scalars to make programs like below verify: 9620 * 9621 * struct ctx { int i; } 9622 * void cb(int idx, struct ctx *ctx) { ctx->i++; ... } 9623 * ... 9624 * struct ctx = { .i = 0; } 9625 * bpf_loop(100, cb, &ctx, 0); 9626 * 9627 * This is similar to what is done in process_iter_next_call() for open 9628 * coded iterators. 9629 */ 9630 prev_st = in_callback_fn ? find_prev_entry(env, state, *insn_idx) : NULL; 9631 if (prev_st) { 9632 err = widen_imprecise_scalars(env, prev_st, state); 9633 if (err) 9634 return err; 9635 } 9636 return 0; 9637 } 9638 9639 static int do_refine_retval_range(struct bpf_verifier_env *env, 9640 struct bpf_reg_state *regs, int ret_type, 9641 int func_id, 9642 struct bpf_call_arg_meta *meta) 9643 { 9644 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 9645 9646 if (ret_type != RET_INTEGER) 9647 return 0; 9648 9649 switch (func_id) { 9650 case BPF_FUNC_get_stack: 9651 case BPF_FUNC_get_task_stack: 9652 case BPF_FUNC_probe_read_str: 9653 case BPF_FUNC_probe_read_kernel_str: 9654 case BPF_FUNC_probe_read_user_str: 9655 ret_reg->smax_value = meta->msize_max_value; 9656 ret_reg->s32_max_value = meta->msize_max_value; 9657 ret_reg->smin_value = -MAX_ERRNO; 9658 ret_reg->s32_min_value = -MAX_ERRNO; 9659 reg_bounds_sync(ret_reg); 9660 break; 9661 case BPF_FUNC_get_smp_processor_id: 9662 ret_reg->umax_value = nr_cpu_ids - 1; 9663 ret_reg->u32_max_value = nr_cpu_ids - 1; 9664 ret_reg->smax_value = nr_cpu_ids - 1; 9665 ret_reg->s32_max_value = nr_cpu_ids - 1; 9666 ret_reg->umin_value = 0; 9667 ret_reg->u32_min_value = 0; 9668 ret_reg->smin_value = 0; 9669 ret_reg->s32_min_value = 0; 9670 reg_bounds_sync(ret_reg); 9671 break; 9672 } 9673 9674 return reg_bounds_sanity_check(env, ret_reg, "retval"); 9675 } 9676 9677 static int 9678 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 9679 int func_id, int insn_idx) 9680 { 9681 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 9682 struct bpf_map *map = meta->map_ptr; 9683 9684 if (func_id != BPF_FUNC_tail_call && 9685 func_id != BPF_FUNC_map_lookup_elem && 9686 func_id != BPF_FUNC_map_update_elem && 9687 func_id != BPF_FUNC_map_delete_elem && 9688 func_id != BPF_FUNC_map_push_elem && 9689 func_id != BPF_FUNC_map_pop_elem && 9690 func_id != BPF_FUNC_map_peek_elem && 9691 func_id != BPF_FUNC_for_each_map_elem && 9692 func_id != BPF_FUNC_redirect_map && 9693 func_id != BPF_FUNC_map_lookup_percpu_elem) 9694 return 0; 9695 9696 if (map == NULL) { 9697 verbose(env, "kernel subsystem misconfigured verifier\n"); 9698 return -EINVAL; 9699 } 9700 9701 /* In case of read-only, some additional restrictions 9702 * need to be applied in order to prevent altering the 9703 * state of the map from program side. 9704 */ 9705 if ((map->map_flags & BPF_F_RDONLY_PROG) && 9706 (func_id == BPF_FUNC_map_delete_elem || 9707 func_id == BPF_FUNC_map_update_elem || 9708 func_id == BPF_FUNC_map_push_elem || 9709 func_id == BPF_FUNC_map_pop_elem)) { 9710 verbose(env, "write into map forbidden\n"); 9711 return -EACCES; 9712 } 9713 9714 if (!BPF_MAP_PTR(aux->map_ptr_state)) 9715 bpf_map_ptr_store(aux, meta->map_ptr, 9716 !meta->map_ptr->bypass_spec_v1); 9717 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 9718 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 9719 !meta->map_ptr->bypass_spec_v1); 9720 return 0; 9721 } 9722 9723 static int 9724 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 9725 int func_id, int insn_idx) 9726 { 9727 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 9728 struct bpf_reg_state *regs = cur_regs(env), *reg; 9729 struct bpf_map *map = meta->map_ptr; 9730 u64 val, max; 9731 int err; 9732 9733 if (func_id != BPF_FUNC_tail_call) 9734 return 0; 9735 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 9736 verbose(env, "kernel subsystem misconfigured verifier\n"); 9737 return -EINVAL; 9738 } 9739 9740 reg = ®s[BPF_REG_3]; 9741 val = reg->var_off.value; 9742 max = map->max_entries; 9743 9744 if (!(is_reg_const(reg, false) && val < max)) { 9745 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 9746 return 0; 9747 } 9748 9749 err = mark_chain_precision(env, BPF_REG_3); 9750 if (err) 9751 return err; 9752 if (bpf_map_key_unseen(aux)) 9753 bpf_map_key_store(aux, val); 9754 else if (!bpf_map_key_poisoned(aux) && 9755 bpf_map_key_immediate(aux) != val) 9756 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 9757 return 0; 9758 } 9759 9760 static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit) 9761 { 9762 struct bpf_func_state *state = cur_func(env); 9763 bool refs_lingering = false; 9764 int i; 9765 9766 if (!exception_exit && state->frameno && !state->in_callback_fn) 9767 return 0; 9768 9769 for (i = 0; i < state->acquired_refs; i++) { 9770 if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno) 9771 continue; 9772 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 9773 state->refs[i].id, state->refs[i].insn_idx); 9774 refs_lingering = true; 9775 } 9776 return refs_lingering ? -EINVAL : 0; 9777 } 9778 9779 static int check_bpf_snprintf_call(struct bpf_verifier_env *env, 9780 struct bpf_reg_state *regs) 9781 { 9782 struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; 9783 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; 9784 struct bpf_map *fmt_map = fmt_reg->map_ptr; 9785 struct bpf_bprintf_data data = {}; 9786 int err, fmt_map_off, num_args; 9787 u64 fmt_addr; 9788 char *fmt; 9789 9790 /* data must be an array of u64 */ 9791 if (data_len_reg->var_off.value % 8) 9792 return -EINVAL; 9793 num_args = data_len_reg->var_off.value / 8; 9794 9795 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const 9796 * and map_direct_value_addr is set. 9797 */ 9798 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; 9799 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, 9800 fmt_map_off); 9801 if (err) { 9802 verbose(env, "verifier bug\n"); 9803 return -EFAULT; 9804 } 9805 fmt = (char *)(long)fmt_addr + fmt_map_off; 9806 9807 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we 9808 * can focus on validating the format specifiers. 9809 */ 9810 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data); 9811 if (err < 0) 9812 verbose(env, "Invalid format string\n"); 9813 9814 return err; 9815 } 9816 9817 static int check_get_func_ip(struct bpf_verifier_env *env) 9818 { 9819 enum bpf_prog_type type = resolve_prog_type(env->prog); 9820 int func_id = BPF_FUNC_get_func_ip; 9821 9822 if (type == BPF_PROG_TYPE_TRACING) { 9823 if (!bpf_prog_has_trampoline(env->prog)) { 9824 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", 9825 func_id_name(func_id), func_id); 9826 return -ENOTSUPP; 9827 } 9828 return 0; 9829 } else if (type == BPF_PROG_TYPE_KPROBE) { 9830 return 0; 9831 } 9832 9833 verbose(env, "func %s#%d not supported for program type %d\n", 9834 func_id_name(func_id), func_id, type); 9835 return -ENOTSUPP; 9836 } 9837 9838 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 9839 { 9840 return &env->insn_aux_data[env->insn_idx]; 9841 } 9842 9843 static bool loop_flag_is_zero(struct bpf_verifier_env *env) 9844 { 9845 struct bpf_reg_state *regs = cur_regs(env); 9846 struct bpf_reg_state *reg = ®s[BPF_REG_4]; 9847 bool reg_is_null = register_is_null(reg); 9848 9849 if (reg_is_null) 9850 mark_chain_precision(env, BPF_REG_4); 9851 9852 return reg_is_null; 9853 } 9854 9855 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno) 9856 { 9857 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; 9858 9859 if (!state->initialized) { 9860 state->initialized = 1; 9861 state->fit_for_inline = loop_flag_is_zero(env); 9862 state->callback_subprogno = subprogno; 9863 return; 9864 } 9865 9866 if (!state->fit_for_inline) 9867 return; 9868 9869 state->fit_for_inline = (loop_flag_is_zero(env) && 9870 state->callback_subprogno == subprogno); 9871 } 9872 9873 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 9874 int *insn_idx_p) 9875 { 9876 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 9877 bool returns_cpu_specific_alloc_ptr = false; 9878 const struct bpf_func_proto *fn = NULL; 9879 enum bpf_return_type ret_type; 9880 enum bpf_type_flag ret_flag; 9881 struct bpf_reg_state *regs; 9882 struct bpf_call_arg_meta meta; 9883 int insn_idx = *insn_idx_p; 9884 bool changes_data; 9885 int i, err, func_id; 9886 9887 /* find function prototype */ 9888 func_id = insn->imm; 9889 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 9890 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 9891 func_id); 9892 return -EINVAL; 9893 } 9894 9895 if (env->ops->get_func_proto) 9896 fn = env->ops->get_func_proto(func_id, env->prog); 9897 if (!fn) { 9898 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 9899 func_id); 9900 return -EINVAL; 9901 } 9902 9903 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 9904 if (!env->prog->gpl_compatible && fn->gpl_only) { 9905 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 9906 return -EINVAL; 9907 } 9908 9909 if (fn->allowed && !fn->allowed(env->prog)) { 9910 verbose(env, "helper call is not allowed in probe\n"); 9911 return -EINVAL; 9912 } 9913 9914 if (!env->prog->aux->sleepable && fn->might_sleep) { 9915 verbose(env, "helper call might sleep in a non-sleepable prog\n"); 9916 return -EINVAL; 9917 } 9918 9919 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 9920 changes_data = bpf_helper_changes_pkt_data(fn->func); 9921 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 9922 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 9923 func_id_name(func_id), func_id); 9924 return -EINVAL; 9925 } 9926 9927 memset(&meta, 0, sizeof(meta)); 9928 meta.pkt_access = fn->pkt_access; 9929 9930 err = check_func_proto(fn, func_id); 9931 if (err) { 9932 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 9933 func_id_name(func_id), func_id); 9934 return err; 9935 } 9936 9937 if (env->cur_state->active_rcu_lock) { 9938 if (fn->might_sleep) { 9939 verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n", 9940 func_id_name(func_id), func_id); 9941 return -EINVAL; 9942 } 9943 9944 if (env->prog->aux->sleepable && is_storage_get_function(func_id)) 9945 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; 9946 } 9947 9948 meta.func_id = func_id; 9949 /* check args */ 9950 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { 9951 err = check_func_arg(env, i, &meta, fn, insn_idx); 9952 if (err) 9953 return err; 9954 } 9955 9956 err = record_func_map(env, &meta, func_id, insn_idx); 9957 if (err) 9958 return err; 9959 9960 err = record_func_key(env, &meta, func_id, insn_idx); 9961 if (err) 9962 return err; 9963 9964 /* Mark slots with STACK_MISC in case of raw mode, stack offset 9965 * is inferred from register state. 9966 */ 9967 for (i = 0; i < meta.access_size; i++) { 9968 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 9969 BPF_WRITE, -1, false, false); 9970 if (err) 9971 return err; 9972 } 9973 9974 regs = cur_regs(env); 9975 9976 if (meta.release_regno) { 9977 err = -EINVAL; 9978 /* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot 9979 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr 9980 * is safe to do directly. 9981 */ 9982 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) { 9983 if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) { 9984 verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n"); 9985 return -EFAULT; 9986 } 9987 err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]); 9988 } else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj_id) { 9989 u32 ref_obj_id = meta.ref_obj_id; 9990 bool in_rcu = in_rcu_cs(env); 9991 struct bpf_func_state *state; 9992 struct bpf_reg_state *reg; 9993 9994 err = release_reference_state(cur_func(env), ref_obj_id); 9995 if (!err) { 9996 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 9997 if (reg->ref_obj_id == ref_obj_id) { 9998 if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) { 9999 reg->ref_obj_id = 0; 10000 reg->type &= ~MEM_ALLOC; 10001 reg->type |= MEM_RCU; 10002 } else { 10003 mark_reg_invalid(env, reg); 10004 } 10005 } 10006 })); 10007 } 10008 } else if (meta.ref_obj_id) { 10009 err = release_reference(env, meta.ref_obj_id); 10010 } else if (register_is_null(®s[meta.release_regno])) { 10011 /* meta.ref_obj_id can only be 0 if register that is meant to be 10012 * released is NULL, which must be > R0. 10013 */ 10014 err = 0; 10015 } 10016 if (err) { 10017 verbose(env, "func %s#%d reference has not been acquired before\n", 10018 func_id_name(func_id), func_id); 10019 return err; 10020 } 10021 } 10022 10023 switch (func_id) { 10024 case BPF_FUNC_tail_call: 10025 err = check_reference_leak(env, false); 10026 if (err) { 10027 verbose(env, "tail_call would lead to reference leak\n"); 10028 return err; 10029 } 10030 break; 10031 case BPF_FUNC_get_local_storage: 10032 /* check that flags argument in get_local_storage(map, flags) is 0, 10033 * this is required because get_local_storage() can't return an error. 10034 */ 10035 if (!register_is_null(®s[BPF_REG_2])) { 10036 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 10037 return -EINVAL; 10038 } 10039 break; 10040 case BPF_FUNC_for_each_map_elem: 10041 err = push_callback_call(env, insn, insn_idx, meta.subprogno, 10042 set_map_elem_callback_state); 10043 break; 10044 case BPF_FUNC_timer_set_callback: 10045 err = push_callback_call(env, insn, insn_idx, meta.subprogno, 10046 set_timer_callback_state); 10047 break; 10048 case BPF_FUNC_find_vma: 10049 err = push_callback_call(env, insn, insn_idx, meta.subprogno, 10050 set_find_vma_callback_state); 10051 break; 10052 case BPF_FUNC_snprintf: 10053 err = check_bpf_snprintf_call(env, regs); 10054 break; 10055 case BPF_FUNC_loop: 10056 update_loop_inline_state(env, meta.subprogno); 10057 /* Verifier relies on R1 value to determine if bpf_loop() iteration 10058 * is finished, thus mark it precise. 10059 */ 10060 err = mark_chain_precision(env, BPF_REG_1); 10061 if (err) 10062 return err; 10063 if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { 10064 err = push_callback_call(env, insn, insn_idx, meta.subprogno, 10065 set_loop_callback_state); 10066 } else { 10067 cur_func(env)->callback_depth = 0; 10068 if (env->log.level & BPF_LOG_LEVEL2) 10069 verbose(env, "frame%d bpf_loop iteration limit reached\n", 10070 env->cur_state->curframe); 10071 } 10072 break; 10073 case BPF_FUNC_dynptr_from_mem: 10074 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { 10075 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n", 10076 reg_type_str(env, regs[BPF_REG_1].type)); 10077 return -EACCES; 10078 } 10079 break; 10080 case BPF_FUNC_set_retval: 10081 if (prog_type == BPF_PROG_TYPE_LSM && 10082 env->prog->expected_attach_type == BPF_LSM_CGROUP) { 10083 if (!env->prog->aux->attach_func_proto->type) { 10084 /* Make sure programs that attach to void 10085 * hooks don't try to modify return value. 10086 */ 10087 verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); 10088 return -EINVAL; 10089 } 10090 } 10091 break; 10092 case BPF_FUNC_dynptr_data: 10093 { 10094 struct bpf_reg_state *reg; 10095 int id, ref_obj_id; 10096 10097 reg = get_dynptr_arg_reg(env, fn, regs); 10098 if (!reg) 10099 return -EFAULT; 10100 10101 10102 if (meta.dynptr_id) { 10103 verbose(env, "verifier internal error: meta.dynptr_id already set\n"); 10104 return -EFAULT; 10105 } 10106 if (meta.ref_obj_id) { 10107 verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); 10108 return -EFAULT; 10109 } 10110 10111 id = dynptr_id(env, reg); 10112 if (id < 0) { 10113 verbose(env, "verifier internal error: failed to obtain dynptr id\n"); 10114 return id; 10115 } 10116 10117 ref_obj_id = dynptr_ref_obj_id(env, reg); 10118 if (ref_obj_id < 0) { 10119 verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n"); 10120 return ref_obj_id; 10121 } 10122 10123 meta.dynptr_id = id; 10124 meta.ref_obj_id = ref_obj_id; 10125 10126 break; 10127 } 10128 case BPF_FUNC_dynptr_write: 10129 { 10130 enum bpf_dynptr_type dynptr_type; 10131 struct bpf_reg_state *reg; 10132 10133 reg = get_dynptr_arg_reg(env, fn, regs); 10134 if (!reg) 10135 return -EFAULT; 10136 10137 dynptr_type = dynptr_get_type(env, reg); 10138 if (dynptr_type == BPF_DYNPTR_TYPE_INVALID) 10139 return -EFAULT; 10140 10141 if (dynptr_type == BPF_DYNPTR_TYPE_SKB) 10142 /* this will trigger clear_all_pkt_pointers(), which will 10143 * invalidate all dynptr slices associated with the skb 10144 */ 10145 changes_data = true; 10146 10147 break; 10148 } 10149 case BPF_FUNC_per_cpu_ptr: 10150 case BPF_FUNC_this_cpu_ptr: 10151 { 10152 struct bpf_reg_state *reg = ®s[BPF_REG_1]; 10153 const struct btf_type *type; 10154 10155 if (reg->type & MEM_RCU) { 10156 type = btf_type_by_id(reg->btf, reg->btf_id); 10157 if (!type || !btf_type_is_struct(type)) { 10158 verbose(env, "Helper has invalid btf/btf_id in R1\n"); 10159 return -EFAULT; 10160 } 10161 returns_cpu_specific_alloc_ptr = true; 10162 env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true; 10163 } 10164 break; 10165 } 10166 case BPF_FUNC_user_ringbuf_drain: 10167 err = push_callback_call(env, insn, insn_idx, meta.subprogno, 10168 set_user_ringbuf_callback_state); 10169 break; 10170 } 10171 10172 if (err) 10173 return err; 10174 10175 /* reset caller saved regs */ 10176 for (i = 0; i < CALLER_SAVED_REGS; i++) { 10177 mark_reg_not_init(env, regs, caller_saved[i]); 10178 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 10179 } 10180 10181 /* helper call returns 64-bit value. */ 10182 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 10183 10184 /* update return register (already marked as written above) */ 10185 ret_type = fn->ret_type; 10186 ret_flag = type_flag(ret_type); 10187 10188 switch (base_type(ret_type)) { 10189 case RET_INTEGER: 10190 /* sets type to SCALAR_VALUE */ 10191 mark_reg_unknown(env, regs, BPF_REG_0); 10192 break; 10193 case RET_VOID: 10194 regs[BPF_REG_0].type = NOT_INIT; 10195 break; 10196 case RET_PTR_TO_MAP_VALUE: 10197 /* There is no offset yet applied, variable or fixed */ 10198 mark_reg_known_zero(env, regs, BPF_REG_0); 10199 /* remember map_ptr, so that check_map_access() 10200 * can check 'value_size' boundary of memory access 10201 * to map element returned from bpf_map_lookup_elem() 10202 */ 10203 if (meta.map_ptr == NULL) { 10204 verbose(env, 10205 "kernel subsystem misconfigured verifier\n"); 10206 return -EINVAL; 10207 } 10208 regs[BPF_REG_0].map_ptr = meta.map_ptr; 10209 regs[BPF_REG_0].map_uid = meta.map_uid; 10210 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; 10211 if (!type_may_be_null(ret_type) && 10212 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) { 10213 regs[BPF_REG_0].id = ++env->id_gen; 10214 } 10215 break; 10216 case RET_PTR_TO_SOCKET: 10217 mark_reg_known_zero(env, regs, BPF_REG_0); 10218 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; 10219 break; 10220 case RET_PTR_TO_SOCK_COMMON: 10221 mark_reg_known_zero(env, regs, BPF_REG_0); 10222 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; 10223 break; 10224 case RET_PTR_TO_TCP_SOCK: 10225 mark_reg_known_zero(env, regs, BPF_REG_0); 10226 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; 10227 break; 10228 case RET_PTR_TO_MEM: 10229 mark_reg_known_zero(env, regs, BPF_REG_0); 10230 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; 10231 regs[BPF_REG_0].mem_size = meta.mem_size; 10232 break; 10233 case RET_PTR_TO_MEM_OR_BTF_ID: 10234 { 10235 const struct btf_type *t; 10236 10237 mark_reg_known_zero(env, regs, BPF_REG_0); 10238 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL); 10239 if (!btf_type_is_struct(t)) { 10240 u32 tsize; 10241 const struct btf_type *ret; 10242 const char *tname; 10243 10244 /* resolve the type size of ksym. */ 10245 ret = btf_resolve_size(meta.ret_btf, t, &tsize); 10246 if (IS_ERR(ret)) { 10247 tname = btf_name_by_offset(meta.ret_btf, t->name_off); 10248 verbose(env, "unable to resolve the size of type '%s': %ld\n", 10249 tname, PTR_ERR(ret)); 10250 return -EINVAL; 10251 } 10252 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; 10253 regs[BPF_REG_0].mem_size = tsize; 10254 } else { 10255 if (returns_cpu_specific_alloc_ptr) { 10256 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC | MEM_RCU; 10257 } else { 10258 /* MEM_RDONLY may be carried from ret_flag, but it 10259 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise 10260 * it will confuse the check of PTR_TO_BTF_ID in 10261 * check_mem_access(). 10262 */ 10263 ret_flag &= ~MEM_RDONLY; 10264 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; 10265 } 10266 10267 regs[BPF_REG_0].btf = meta.ret_btf; 10268 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 10269 } 10270 break; 10271 } 10272 case RET_PTR_TO_BTF_ID: 10273 { 10274 struct btf *ret_btf; 10275 int ret_btf_id; 10276 10277 mark_reg_known_zero(env, regs, BPF_REG_0); 10278 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; 10279 if (func_id == BPF_FUNC_kptr_xchg) { 10280 ret_btf = meta.kptr_field->kptr.btf; 10281 ret_btf_id = meta.kptr_field->kptr.btf_id; 10282 if (!btf_is_kernel(ret_btf)) { 10283 regs[BPF_REG_0].type |= MEM_ALLOC; 10284 if (meta.kptr_field->type == BPF_KPTR_PERCPU) 10285 regs[BPF_REG_0].type |= MEM_PERCPU; 10286 } 10287 } else { 10288 if (fn->ret_btf_id == BPF_PTR_POISON) { 10289 verbose(env, "verifier internal error:"); 10290 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", 10291 func_id_name(func_id)); 10292 return -EINVAL; 10293 } 10294 ret_btf = btf_vmlinux; 10295 ret_btf_id = *fn->ret_btf_id; 10296 } 10297 if (ret_btf_id == 0) { 10298 verbose(env, "invalid return type %u of func %s#%d\n", 10299 base_type(ret_type), func_id_name(func_id), 10300 func_id); 10301 return -EINVAL; 10302 } 10303 regs[BPF_REG_0].btf = ret_btf; 10304 regs[BPF_REG_0].btf_id = ret_btf_id; 10305 break; 10306 } 10307 default: 10308 verbose(env, "unknown return type %u of func %s#%d\n", 10309 base_type(ret_type), func_id_name(func_id), func_id); 10310 return -EINVAL; 10311 } 10312 10313 if (type_may_be_null(regs[BPF_REG_0].type)) 10314 regs[BPF_REG_0].id = ++env->id_gen; 10315 10316 if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) { 10317 verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n", 10318 func_id_name(func_id), func_id); 10319 return -EFAULT; 10320 } 10321 10322 if (is_dynptr_ref_function(func_id)) 10323 regs[BPF_REG_0].dynptr_id = meta.dynptr_id; 10324 10325 if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) { 10326 /* For release_reference() */ 10327 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 10328 } else if (is_acquire_function(func_id, meta.map_ptr)) { 10329 int id = acquire_reference_state(env, insn_idx); 10330 10331 if (id < 0) 10332 return id; 10333 /* For mark_ptr_or_null_reg() */ 10334 regs[BPF_REG_0].id = id; 10335 /* For release_reference() */ 10336 regs[BPF_REG_0].ref_obj_id = id; 10337 } 10338 10339 err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); 10340 if (err) 10341 return err; 10342 10343 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 10344 if (err) 10345 return err; 10346 10347 if ((func_id == BPF_FUNC_get_stack || 10348 func_id == BPF_FUNC_get_task_stack) && 10349 !env->prog->has_callchain_buf) { 10350 const char *err_str; 10351 10352 #ifdef CONFIG_PERF_EVENTS 10353 err = get_callchain_buffers(sysctl_perf_event_max_stack); 10354 err_str = "cannot get callchain buffer for func %s#%d\n"; 10355 #else 10356 err = -ENOTSUPP; 10357 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 10358 #endif 10359 if (err) { 10360 verbose(env, err_str, func_id_name(func_id), func_id); 10361 return err; 10362 } 10363 10364 env->prog->has_callchain_buf = true; 10365 } 10366 10367 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) 10368 env->prog->call_get_stack = true; 10369 10370 if (func_id == BPF_FUNC_get_func_ip) { 10371 if (check_get_func_ip(env)) 10372 return -ENOTSUPP; 10373 env->prog->call_get_func_ip = true; 10374 } 10375 10376 if (changes_data) 10377 clear_all_pkt_pointers(env); 10378 return 0; 10379 } 10380 10381 /* mark_btf_func_reg_size() is used when the reg size is determined by 10382 * the BTF func_proto's return value size and argument. 10383 */ 10384 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, 10385 size_t reg_size) 10386 { 10387 struct bpf_reg_state *reg = &cur_regs(env)[regno]; 10388 10389 if (regno == BPF_REG_0) { 10390 /* Function return value */ 10391 reg->live |= REG_LIVE_WRITTEN; 10392 reg->subreg_def = reg_size == sizeof(u64) ? 10393 DEF_NOT_SUBREG : env->insn_idx + 1; 10394 } else { 10395 /* Function argument */ 10396 if (reg_size == sizeof(u64)) { 10397 mark_insn_zext(env, reg); 10398 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 10399 } else { 10400 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); 10401 } 10402 } 10403 } 10404 10405 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta) 10406 { 10407 return meta->kfunc_flags & KF_ACQUIRE; 10408 } 10409 10410 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta) 10411 { 10412 return meta->kfunc_flags & KF_RELEASE; 10413 } 10414 10415 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta) 10416 { 10417 return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); 10418 } 10419 10420 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta) 10421 { 10422 return meta->kfunc_flags & KF_SLEEPABLE; 10423 } 10424 10425 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta) 10426 { 10427 return meta->kfunc_flags & KF_DESTRUCTIVE; 10428 } 10429 10430 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta) 10431 { 10432 return meta->kfunc_flags & KF_RCU; 10433 } 10434 10435 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta) 10436 { 10437 return meta->kfunc_flags & KF_RCU_PROTECTED; 10438 } 10439 10440 static bool __kfunc_param_match_suffix(const struct btf *btf, 10441 const struct btf_param *arg, 10442 const char *suffix) 10443 { 10444 int suffix_len = strlen(suffix), len; 10445 const char *param_name; 10446 10447 /* In the future, this can be ported to use BTF tagging */ 10448 param_name = btf_name_by_offset(btf, arg->name_off); 10449 if (str_is_empty(param_name)) 10450 return false; 10451 len = strlen(param_name); 10452 if (len < suffix_len) 10453 return false; 10454 param_name += len - suffix_len; 10455 return !strncmp(param_name, suffix, suffix_len); 10456 } 10457 10458 static bool is_kfunc_arg_mem_size(const struct btf *btf, 10459 const struct btf_param *arg, 10460 const struct bpf_reg_state *reg) 10461 { 10462 const struct btf_type *t; 10463 10464 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10465 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 10466 return false; 10467 10468 return __kfunc_param_match_suffix(btf, arg, "__sz"); 10469 } 10470 10471 static bool is_kfunc_arg_const_mem_size(const struct btf *btf, 10472 const struct btf_param *arg, 10473 const struct bpf_reg_state *reg) 10474 { 10475 const struct btf_type *t; 10476 10477 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10478 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 10479 return false; 10480 10481 return __kfunc_param_match_suffix(btf, arg, "__szk"); 10482 } 10483 10484 static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg) 10485 { 10486 return __kfunc_param_match_suffix(btf, arg, "__opt"); 10487 } 10488 10489 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg) 10490 { 10491 return __kfunc_param_match_suffix(btf, arg, "__k"); 10492 } 10493 10494 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg) 10495 { 10496 return __kfunc_param_match_suffix(btf, arg, "__ign"); 10497 } 10498 10499 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg) 10500 { 10501 return __kfunc_param_match_suffix(btf, arg, "__alloc"); 10502 } 10503 10504 static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg) 10505 { 10506 return __kfunc_param_match_suffix(btf, arg, "__uninit"); 10507 } 10508 10509 static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg) 10510 { 10511 return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr"); 10512 } 10513 10514 static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg) 10515 { 10516 return __kfunc_param_match_suffix(btf, arg, "__nullable"); 10517 } 10518 10519 static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg) 10520 { 10521 return __kfunc_param_match_suffix(btf, arg, "__str"); 10522 } 10523 10524 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, 10525 const struct btf_param *arg, 10526 const char *name) 10527 { 10528 int len, target_len = strlen(name); 10529 const char *param_name; 10530 10531 param_name = btf_name_by_offset(btf, arg->name_off); 10532 if (str_is_empty(param_name)) 10533 return false; 10534 len = strlen(param_name); 10535 if (len != target_len) 10536 return false; 10537 if (strcmp(param_name, name)) 10538 return false; 10539 10540 return true; 10541 } 10542 10543 enum { 10544 KF_ARG_DYNPTR_ID, 10545 KF_ARG_LIST_HEAD_ID, 10546 KF_ARG_LIST_NODE_ID, 10547 KF_ARG_RB_ROOT_ID, 10548 KF_ARG_RB_NODE_ID, 10549 }; 10550 10551 BTF_ID_LIST(kf_arg_btf_ids) 10552 BTF_ID(struct, bpf_dynptr_kern) 10553 BTF_ID(struct, bpf_list_head) 10554 BTF_ID(struct, bpf_list_node) 10555 BTF_ID(struct, bpf_rb_root) 10556 BTF_ID(struct, bpf_rb_node) 10557 10558 static bool __is_kfunc_ptr_arg_type(const struct btf *btf, 10559 const struct btf_param *arg, int type) 10560 { 10561 const struct btf_type *t; 10562 u32 res_id; 10563 10564 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10565 if (!t) 10566 return false; 10567 if (!btf_type_is_ptr(t)) 10568 return false; 10569 t = btf_type_skip_modifiers(btf, t->type, &res_id); 10570 if (!t) 10571 return false; 10572 return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]); 10573 } 10574 10575 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg) 10576 { 10577 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID); 10578 } 10579 10580 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg) 10581 { 10582 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID); 10583 } 10584 10585 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg) 10586 { 10587 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID); 10588 } 10589 10590 static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg) 10591 { 10592 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID); 10593 } 10594 10595 static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg) 10596 { 10597 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID); 10598 } 10599 10600 static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf, 10601 const struct btf_param *arg) 10602 { 10603 const struct btf_type *t; 10604 10605 t = btf_type_resolve_func_ptr(btf, arg->type, NULL); 10606 if (!t) 10607 return false; 10608 10609 return true; 10610 } 10611 10612 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ 10613 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env, 10614 const struct btf *btf, 10615 const struct btf_type *t, int rec) 10616 { 10617 const struct btf_type *member_type; 10618 const struct btf_member *member; 10619 u32 i; 10620 10621 if (!btf_type_is_struct(t)) 10622 return false; 10623 10624 for_each_member(i, t, member) { 10625 const struct btf_array *array; 10626 10627 member_type = btf_type_skip_modifiers(btf, member->type, NULL); 10628 if (btf_type_is_struct(member_type)) { 10629 if (rec >= 3) { 10630 verbose(env, "max struct nesting depth exceeded\n"); 10631 return false; 10632 } 10633 if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1)) 10634 return false; 10635 continue; 10636 } 10637 if (btf_type_is_array(member_type)) { 10638 array = btf_array(member_type); 10639 if (!array->nelems) 10640 return false; 10641 member_type = btf_type_skip_modifiers(btf, array->type, NULL); 10642 if (!btf_type_is_scalar(member_type)) 10643 return false; 10644 continue; 10645 } 10646 if (!btf_type_is_scalar(member_type)) 10647 return false; 10648 } 10649 return true; 10650 } 10651 10652 enum kfunc_ptr_arg_type { 10653 KF_ARG_PTR_TO_CTX, 10654 KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */ 10655 KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */ 10656 KF_ARG_PTR_TO_DYNPTR, 10657 KF_ARG_PTR_TO_ITER, 10658 KF_ARG_PTR_TO_LIST_HEAD, 10659 KF_ARG_PTR_TO_LIST_NODE, 10660 KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */ 10661 KF_ARG_PTR_TO_MEM, 10662 KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */ 10663 KF_ARG_PTR_TO_CALLBACK, 10664 KF_ARG_PTR_TO_RB_ROOT, 10665 KF_ARG_PTR_TO_RB_NODE, 10666 KF_ARG_PTR_TO_NULL, 10667 KF_ARG_PTR_TO_CONST_STR, 10668 }; 10669 10670 enum special_kfunc_type { 10671 KF_bpf_obj_new_impl, 10672 KF_bpf_obj_drop_impl, 10673 KF_bpf_refcount_acquire_impl, 10674 KF_bpf_list_push_front_impl, 10675 KF_bpf_list_push_back_impl, 10676 KF_bpf_list_pop_front, 10677 KF_bpf_list_pop_back, 10678 KF_bpf_cast_to_kern_ctx, 10679 KF_bpf_rdonly_cast, 10680 KF_bpf_rcu_read_lock, 10681 KF_bpf_rcu_read_unlock, 10682 KF_bpf_rbtree_remove, 10683 KF_bpf_rbtree_add_impl, 10684 KF_bpf_rbtree_first, 10685 KF_bpf_dynptr_from_skb, 10686 KF_bpf_dynptr_from_xdp, 10687 KF_bpf_dynptr_slice, 10688 KF_bpf_dynptr_slice_rdwr, 10689 KF_bpf_dynptr_clone, 10690 KF_bpf_percpu_obj_new_impl, 10691 KF_bpf_percpu_obj_drop_impl, 10692 KF_bpf_throw, 10693 KF_bpf_iter_css_task_new, 10694 }; 10695 10696 BTF_SET_START(special_kfunc_set) 10697 BTF_ID(func, bpf_obj_new_impl) 10698 BTF_ID(func, bpf_obj_drop_impl) 10699 BTF_ID(func, bpf_refcount_acquire_impl) 10700 BTF_ID(func, bpf_list_push_front_impl) 10701 BTF_ID(func, bpf_list_push_back_impl) 10702 BTF_ID(func, bpf_list_pop_front) 10703 BTF_ID(func, bpf_list_pop_back) 10704 BTF_ID(func, bpf_cast_to_kern_ctx) 10705 BTF_ID(func, bpf_rdonly_cast) 10706 BTF_ID(func, bpf_rbtree_remove) 10707 BTF_ID(func, bpf_rbtree_add_impl) 10708 BTF_ID(func, bpf_rbtree_first) 10709 BTF_ID(func, bpf_dynptr_from_skb) 10710 BTF_ID(func, bpf_dynptr_from_xdp) 10711 BTF_ID(func, bpf_dynptr_slice) 10712 BTF_ID(func, bpf_dynptr_slice_rdwr) 10713 BTF_ID(func, bpf_dynptr_clone) 10714 BTF_ID(func, bpf_percpu_obj_new_impl) 10715 BTF_ID(func, bpf_percpu_obj_drop_impl) 10716 BTF_ID(func, bpf_throw) 10717 #ifdef CONFIG_CGROUPS 10718 BTF_ID(func, bpf_iter_css_task_new) 10719 #endif 10720 BTF_SET_END(special_kfunc_set) 10721 10722 BTF_ID_LIST(special_kfunc_list) 10723 BTF_ID(func, bpf_obj_new_impl) 10724 BTF_ID(func, bpf_obj_drop_impl) 10725 BTF_ID(func, bpf_refcount_acquire_impl) 10726 BTF_ID(func, bpf_list_push_front_impl) 10727 BTF_ID(func, bpf_list_push_back_impl) 10728 BTF_ID(func, bpf_list_pop_front) 10729 BTF_ID(func, bpf_list_pop_back) 10730 BTF_ID(func, bpf_cast_to_kern_ctx) 10731 BTF_ID(func, bpf_rdonly_cast) 10732 BTF_ID(func, bpf_rcu_read_lock) 10733 BTF_ID(func, bpf_rcu_read_unlock) 10734 BTF_ID(func, bpf_rbtree_remove) 10735 BTF_ID(func, bpf_rbtree_add_impl) 10736 BTF_ID(func, bpf_rbtree_first) 10737 BTF_ID(func, bpf_dynptr_from_skb) 10738 BTF_ID(func, bpf_dynptr_from_xdp) 10739 BTF_ID(func, bpf_dynptr_slice) 10740 BTF_ID(func, bpf_dynptr_slice_rdwr) 10741 BTF_ID(func, bpf_dynptr_clone) 10742 BTF_ID(func, bpf_percpu_obj_new_impl) 10743 BTF_ID(func, bpf_percpu_obj_drop_impl) 10744 BTF_ID(func, bpf_throw) 10745 #ifdef CONFIG_CGROUPS 10746 BTF_ID(func, bpf_iter_css_task_new) 10747 #else 10748 BTF_ID_UNUSED 10749 #endif 10750 10751 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) 10752 { 10753 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && 10754 meta->arg_owning_ref) { 10755 return false; 10756 } 10757 10758 return meta->kfunc_flags & KF_RET_NULL; 10759 } 10760 10761 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta) 10762 { 10763 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock]; 10764 } 10765 10766 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta) 10767 { 10768 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; 10769 } 10770 10771 static enum kfunc_ptr_arg_type 10772 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, 10773 struct bpf_kfunc_call_arg_meta *meta, 10774 const struct btf_type *t, const struct btf_type *ref_t, 10775 const char *ref_tname, const struct btf_param *args, 10776 int argno, int nargs) 10777 { 10778 u32 regno = argno + 1; 10779 struct bpf_reg_state *regs = cur_regs(env); 10780 struct bpf_reg_state *reg = ®s[regno]; 10781 bool arg_mem_size = false; 10782 10783 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) 10784 return KF_ARG_PTR_TO_CTX; 10785 10786 /* In this function, we verify the kfunc's BTF as per the argument type, 10787 * leaving the rest of the verification with respect to the register 10788 * type to our caller. When a set of conditions hold in the BTF type of 10789 * arguments, we resolve it to a known kfunc_ptr_arg_type. 10790 */ 10791 if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) 10792 return KF_ARG_PTR_TO_CTX; 10793 10794 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) 10795 return KF_ARG_PTR_TO_ALLOC_BTF_ID; 10796 10797 if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) 10798 return KF_ARG_PTR_TO_REFCOUNTED_KPTR; 10799 10800 if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) 10801 return KF_ARG_PTR_TO_DYNPTR; 10802 10803 if (is_kfunc_arg_iter(meta, argno)) 10804 return KF_ARG_PTR_TO_ITER; 10805 10806 if (is_kfunc_arg_list_head(meta->btf, &args[argno])) 10807 return KF_ARG_PTR_TO_LIST_HEAD; 10808 10809 if (is_kfunc_arg_list_node(meta->btf, &args[argno])) 10810 return KF_ARG_PTR_TO_LIST_NODE; 10811 10812 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno])) 10813 return KF_ARG_PTR_TO_RB_ROOT; 10814 10815 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno])) 10816 return KF_ARG_PTR_TO_RB_NODE; 10817 10818 if (is_kfunc_arg_const_str(meta->btf, &args[argno])) 10819 return KF_ARG_PTR_TO_CONST_STR; 10820 10821 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { 10822 if (!btf_type_is_struct(ref_t)) { 10823 verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n", 10824 meta->func_name, argno, btf_type_str(ref_t), ref_tname); 10825 return -EINVAL; 10826 } 10827 return KF_ARG_PTR_TO_BTF_ID; 10828 } 10829 10830 if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) 10831 return KF_ARG_PTR_TO_CALLBACK; 10832 10833 if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) 10834 return KF_ARG_PTR_TO_NULL; 10835 10836 if (argno + 1 < nargs && 10837 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || 10838 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))) 10839 arg_mem_size = true; 10840 10841 /* This is the catch all argument type of register types supported by 10842 * check_helper_mem_access. However, we only allow when argument type is 10843 * pointer to scalar, or struct composed (recursively) of scalars. When 10844 * arg_mem_size is true, the pointer can be void *. 10845 */ 10846 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && 10847 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { 10848 verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", 10849 argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); 10850 return -EINVAL; 10851 } 10852 return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM; 10853 } 10854 10855 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, 10856 struct bpf_reg_state *reg, 10857 const struct btf_type *ref_t, 10858 const char *ref_tname, u32 ref_id, 10859 struct bpf_kfunc_call_arg_meta *meta, 10860 int argno) 10861 { 10862 const struct btf_type *reg_ref_t; 10863 bool strict_type_match = false; 10864 const struct btf *reg_btf; 10865 const char *reg_ref_tname; 10866 u32 reg_ref_id; 10867 10868 if (base_type(reg->type) == PTR_TO_BTF_ID) { 10869 reg_btf = reg->btf; 10870 reg_ref_id = reg->btf_id; 10871 } else { 10872 reg_btf = btf_vmlinux; 10873 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; 10874 } 10875 10876 /* Enforce strict type matching for calls to kfuncs that are acquiring 10877 * or releasing a reference, or are no-cast aliases. We do _not_ 10878 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default, 10879 * as we want to enable BPF programs to pass types that are bitwise 10880 * equivalent without forcing them to explicitly cast with something 10881 * like bpf_cast_to_kern_ctx(). 10882 * 10883 * For example, say we had a type like the following: 10884 * 10885 * struct bpf_cpumask { 10886 * cpumask_t cpumask; 10887 * refcount_t usage; 10888 * }; 10889 * 10890 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed 10891 * to a struct cpumask, so it would be safe to pass a struct 10892 * bpf_cpumask * to a kfunc expecting a struct cpumask *. 10893 * 10894 * The philosophy here is similar to how we allow scalars of different 10895 * types to be passed to kfuncs as long as the size is the same. The 10896 * only difference here is that we're simply allowing 10897 * btf_struct_ids_match() to walk the struct at the 0th offset, and 10898 * resolve types. 10899 */ 10900 if (is_kfunc_acquire(meta) || 10901 (is_kfunc_release(meta) && reg->ref_obj_id) || 10902 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) 10903 strict_type_match = true; 10904 10905 WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off); 10906 10907 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id); 10908 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); 10909 if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) { 10910 verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", 10911 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, 10912 btf_type_str(reg_ref_t), reg_ref_tname); 10913 return -EINVAL; 10914 } 10915 return 0; 10916 } 10917 10918 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 10919 { 10920 struct bpf_verifier_state *state = env->cur_state; 10921 struct btf_record *rec = reg_btf_record(reg); 10922 10923 if (!state->active_lock.ptr) { 10924 verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); 10925 return -EFAULT; 10926 } 10927 10928 if (type_flag(reg->type) & NON_OWN_REF) { 10929 verbose(env, "verifier internal error: NON_OWN_REF already set\n"); 10930 return -EFAULT; 10931 } 10932 10933 reg->type |= NON_OWN_REF; 10934 if (rec->refcount_off >= 0) 10935 reg->type |= MEM_RCU; 10936 10937 return 0; 10938 } 10939 10940 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id) 10941 { 10942 struct bpf_func_state *state, *unused; 10943 struct bpf_reg_state *reg; 10944 int i; 10945 10946 state = cur_func(env); 10947 10948 if (!ref_obj_id) { 10949 verbose(env, "verifier internal error: ref_obj_id is zero for " 10950 "owning -> non-owning conversion\n"); 10951 return -EFAULT; 10952 } 10953 10954 for (i = 0; i < state->acquired_refs; i++) { 10955 if (state->refs[i].id != ref_obj_id) 10956 continue; 10957 10958 /* Clear ref_obj_id here so release_reference doesn't clobber 10959 * the whole reg 10960 */ 10961 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ 10962 if (reg->ref_obj_id == ref_obj_id) { 10963 reg->ref_obj_id = 0; 10964 ref_set_non_owning(env, reg); 10965 } 10966 })); 10967 return 0; 10968 } 10969 10970 verbose(env, "verifier internal error: ref state missing for ref_obj_id\n"); 10971 return -EFAULT; 10972 } 10973 10974 /* Implementation details: 10975 * 10976 * Each register points to some region of memory, which we define as an 10977 * allocation. Each allocation may embed a bpf_spin_lock which protects any 10978 * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same 10979 * allocation. The lock and the data it protects are colocated in the same 10980 * memory region. 10981 * 10982 * Hence, everytime a register holds a pointer value pointing to such 10983 * allocation, the verifier preserves a unique reg->id for it. 10984 * 10985 * The verifier remembers the lock 'ptr' and the lock 'id' whenever 10986 * bpf_spin_lock is called. 10987 * 10988 * To enable this, lock state in the verifier captures two values: 10989 * active_lock.ptr = Register's type specific pointer 10990 * active_lock.id = A unique ID for each register pointer value 10991 * 10992 * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two 10993 * supported register types. 10994 * 10995 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of 10996 * allocated objects is the reg->btf pointer. 10997 * 10998 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we 10999 * can establish the provenance of the map value statically for each distinct 11000 * lookup into such maps. They always contain a single map value hence unique 11001 * IDs for each pseudo load pessimizes the algorithm and rejects valid programs. 11002 * 11003 * So, in case of global variables, they use array maps with max_entries = 1, 11004 * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point 11005 * into the same map value as max_entries is 1, as described above). 11006 * 11007 * In case of inner map lookups, the inner map pointer has same map_ptr as the 11008 * outer map pointer (in verifier context), but each lookup into an inner map 11009 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner 11010 * maps from the same outer map share the same map_ptr as active_lock.ptr, they 11011 * will get different reg->id assigned to each lookup, hence different 11012 * active_lock.id. 11013 * 11014 * In case of allocated objects, active_lock.ptr is the reg->btf, and the 11015 * reg->id is a unique ID preserved after the NULL pointer check on the pointer 11016 * returned from bpf_obj_new. Each allocation receives a new reg->id. 11017 */ 11018 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 11019 { 11020 void *ptr; 11021 u32 id; 11022 11023 switch ((int)reg->type) { 11024 case PTR_TO_MAP_VALUE: 11025 ptr = reg->map_ptr; 11026 break; 11027 case PTR_TO_BTF_ID | MEM_ALLOC: 11028 ptr = reg->btf; 11029 break; 11030 default: 11031 verbose(env, "verifier internal error: unknown reg type for lock check\n"); 11032 return -EFAULT; 11033 } 11034 id = reg->id; 11035 11036 if (!env->cur_state->active_lock.ptr) 11037 return -EINVAL; 11038 if (env->cur_state->active_lock.ptr != ptr || 11039 env->cur_state->active_lock.id != id) { 11040 verbose(env, "held lock and object are not in the same allocation\n"); 11041 return -EINVAL; 11042 } 11043 return 0; 11044 } 11045 11046 static bool is_bpf_list_api_kfunc(u32 btf_id) 11047 { 11048 return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 11049 btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 11050 btf_id == special_kfunc_list[KF_bpf_list_pop_front] || 11051 btf_id == special_kfunc_list[KF_bpf_list_pop_back]; 11052 } 11053 11054 static bool is_bpf_rbtree_api_kfunc(u32 btf_id) 11055 { 11056 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || 11057 btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || 11058 btf_id == special_kfunc_list[KF_bpf_rbtree_first]; 11059 } 11060 11061 static bool is_bpf_graph_api_kfunc(u32 btf_id) 11062 { 11063 return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) || 11064 btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; 11065 } 11066 11067 static bool is_sync_callback_calling_kfunc(u32 btf_id) 11068 { 11069 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; 11070 } 11071 11072 static bool is_bpf_throw_kfunc(struct bpf_insn *insn) 11073 { 11074 return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && 11075 insn->imm == special_kfunc_list[KF_bpf_throw]; 11076 } 11077 11078 static bool is_rbtree_lock_required_kfunc(u32 btf_id) 11079 { 11080 return is_bpf_rbtree_api_kfunc(btf_id); 11081 } 11082 11083 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env, 11084 enum btf_field_type head_field_type, 11085 u32 kfunc_btf_id) 11086 { 11087 bool ret; 11088 11089 switch (head_field_type) { 11090 case BPF_LIST_HEAD: 11091 ret = is_bpf_list_api_kfunc(kfunc_btf_id); 11092 break; 11093 case BPF_RB_ROOT: 11094 ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id); 11095 break; 11096 default: 11097 verbose(env, "verifier internal error: unexpected graph root argument type %s\n", 11098 btf_field_type_name(head_field_type)); 11099 return false; 11100 } 11101 11102 if (!ret) 11103 verbose(env, "verifier internal error: %s head arg for unknown kfunc\n", 11104 btf_field_type_name(head_field_type)); 11105 return ret; 11106 } 11107 11108 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, 11109 enum btf_field_type node_field_type, 11110 u32 kfunc_btf_id) 11111 { 11112 bool ret; 11113 11114 switch (node_field_type) { 11115 case BPF_LIST_NODE: 11116 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 11117 kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]); 11118 break; 11119 case BPF_RB_NODE: 11120 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || 11121 kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]); 11122 break; 11123 default: 11124 verbose(env, "verifier internal error: unexpected graph node argument type %s\n", 11125 btf_field_type_name(node_field_type)); 11126 return false; 11127 } 11128 11129 if (!ret) 11130 verbose(env, "verifier internal error: %s node arg for unknown kfunc\n", 11131 btf_field_type_name(node_field_type)); 11132 return ret; 11133 } 11134 11135 static int 11136 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env, 11137 struct bpf_reg_state *reg, u32 regno, 11138 struct bpf_kfunc_call_arg_meta *meta, 11139 enum btf_field_type head_field_type, 11140 struct btf_field **head_field) 11141 { 11142 const char *head_type_name; 11143 struct btf_field *field; 11144 struct btf_record *rec; 11145 u32 head_off; 11146 11147 if (meta->btf != btf_vmlinux) { 11148 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); 11149 return -EFAULT; 11150 } 11151 11152 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) 11153 return -EFAULT; 11154 11155 head_type_name = btf_field_type_name(head_field_type); 11156 if (!tnum_is_const(reg->var_off)) { 11157 verbose(env, 11158 "R%d doesn't have constant offset. %s has to be at the constant offset\n", 11159 regno, head_type_name); 11160 return -EINVAL; 11161 } 11162 11163 rec = reg_btf_record(reg); 11164 head_off = reg->off + reg->var_off.value; 11165 field = btf_record_find(rec, head_off, head_field_type); 11166 if (!field) { 11167 verbose(env, "%s not found at offset=%u\n", head_type_name, head_off); 11168 return -EINVAL; 11169 } 11170 11171 /* All functions require bpf_list_head to be protected using a bpf_spin_lock */ 11172 if (check_reg_allocation_locked(env, reg)) { 11173 verbose(env, "bpf_spin_lock at off=%d must be held for %s\n", 11174 rec->spin_lock_off, head_type_name); 11175 return -EINVAL; 11176 } 11177 11178 if (*head_field) { 11179 verbose(env, "verifier internal error: repeating %s arg\n", head_type_name); 11180 return -EFAULT; 11181 } 11182 *head_field = field; 11183 return 0; 11184 } 11185 11186 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env, 11187 struct bpf_reg_state *reg, u32 regno, 11188 struct bpf_kfunc_call_arg_meta *meta) 11189 { 11190 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD, 11191 &meta->arg_list_head.field); 11192 } 11193 11194 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env, 11195 struct bpf_reg_state *reg, u32 regno, 11196 struct bpf_kfunc_call_arg_meta *meta) 11197 { 11198 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT, 11199 &meta->arg_rbtree_root.field); 11200 } 11201 11202 static int 11203 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env, 11204 struct bpf_reg_state *reg, u32 regno, 11205 struct bpf_kfunc_call_arg_meta *meta, 11206 enum btf_field_type head_field_type, 11207 enum btf_field_type node_field_type, 11208 struct btf_field **node_field) 11209 { 11210 const char *node_type_name; 11211 const struct btf_type *et, *t; 11212 struct btf_field *field; 11213 u32 node_off; 11214 11215 if (meta->btf != btf_vmlinux) { 11216 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); 11217 return -EFAULT; 11218 } 11219 11220 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) 11221 return -EFAULT; 11222 11223 node_type_name = btf_field_type_name(node_field_type); 11224 if (!tnum_is_const(reg->var_off)) { 11225 verbose(env, 11226 "R%d doesn't have constant offset. %s has to be at the constant offset\n", 11227 regno, node_type_name); 11228 return -EINVAL; 11229 } 11230 11231 node_off = reg->off + reg->var_off.value; 11232 field = reg_find_field_offset(reg, node_off, node_field_type); 11233 if (!field || field->offset != node_off) { 11234 verbose(env, "%s not found at offset=%u\n", node_type_name, node_off); 11235 return -EINVAL; 11236 } 11237 11238 field = *node_field; 11239 11240 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id); 11241 t = btf_type_by_id(reg->btf, reg->btf_id); 11242 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, 11243 field->graph_root.value_btf_id, true)) { 11244 verbose(env, "operation on %s expects arg#1 %s at offset=%d " 11245 "in struct %s, but arg is at offset=%d in struct %s\n", 11246 btf_field_type_name(head_field_type), 11247 btf_field_type_name(node_field_type), 11248 field->graph_root.node_offset, 11249 btf_name_by_offset(field->graph_root.btf, et->name_off), 11250 node_off, btf_name_by_offset(reg->btf, t->name_off)); 11251 return -EINVAL; 11252 } 11253 meta->arg_btf = reg->btf; 11254 meta->arg_btf_id = reg->btf_id; 11255 11256 if (node_off != field->graph_root.node_offset) { 11257 verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n", 11258 node_off, btf_field_type_name(node_field_type), 11259 field->graph_root.node_offset, 11260 btf_name_by_offset(field->graph_root.btf, et->name_off)); 11261 return -EINVAL; 11262 } 11263 11264 return 0; 11265 } 11266 11267 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, 11268 struct bpf_reg_state *reg, u32 regno, 11269 struct bpf_kfunc_call_arg_meta *meta) 11270 { 11271 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, 11272 BPF_LIST_HEAD, BPF_LIST_NODE, 11273 &meta->arg_list_head.field); 11274 } 11275 11276 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env, 11277 struct bpf_reg_state *reg, u32 regno, 11278 struct bpf_kfunc_call_arg_meta *meta) 11279 { 11280 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, 11281 BPF_RB_ROOT, BPF_RB_NODE, 11282 &meta->arg_rbtree_root.field); 11283 } 11284 11285 /* 11286 * css_task iter allowlist is needed to avoid dead locking on css_set_lock. 11287 * LSM hooks and iters (both sleepable and non-sleepable) are safe. 11288 * Any sleepable progs are also safe since bpf_check_attach_target() enforce 11289 * them can only be attached to some specific hook points. 11290 */ 11291 static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env) 11292 { 11293 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 11294 11295 switch (prog_type) { 11296 case BPF_PROG_TYPE_LSM: 11297 return true; 11298 case BPF_PROG_TYPE_TRACING: 11299 if (env->prog->expected_attach_type == BPF_TRACE_ITER) 11300 return true; 11301 fallthrough; 11302 default: 11303 return env->prog->aux->sleepable; 11304 } 11305 } 11306 11307 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, 11308 int insn_idx) 11309 { 11310 const char *func_name = meta->func_name, *ref_tname; 11311 const struct btf *btf = meta->btf; 11312 const struct btf_param *args; 11313 struct btf_record *rec; 11314 u32 i, nargs; 11315 int ret; 11316 11317 args = (const struct btf_param *)(meta->func_proto + 1); 11318 nargs = btf_type_vlen(meta->func_proto); 11319 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 11320 verbose(env, "Function %s has %d > %d args\n", func_name, nargs, 11321 MAX_BPF_FUNC_REG_ARGS); 11322 return -EINVAL; 11323 } 11324 11325 /* Check that BTF function arguments match actual types that the 11326 * verifier sees. 11327 */ 11328 for (i = 0; i < nargs; i++) { 11329 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[i + 1]; 11330 const struct btf_type *t, *ref_t, *resolve_ret; 11331 enum bpf_arg_type arg_type = ARG_DONTCARE; 11332 u32 regno = i + 1, ref_id, type_size; 11333 bool is_ret_buf_sz = false; 11334 int kf_arg_type; 11335 11336 t = btf_type_skip_modifiers(btf, args[i].type, NULL); 11337 11338 if (is_kfunc_arg_ignore(btf, &args[i])) 11339 continue; 11340 11341 if (btf_type_is_scalar(t)) { 11342 if (reg->type != SCALAR_VALUE) { 11343 verbose(env, "R%d is not a scalar\n", regno); 11344 return -EINVAL; 11345 } 11346 11347 if (is_kfunc_arg_constant(meta->btf, &args[i])) { 11348 if (meta->arg_constant.found) { 11349 verbose(env, "verifier internal error: only one constant argument permitted\n"); 11350 return -EFAULT; 11351 } 11352 if (!tnum_is_const(reg->var_off)) { 11353 verbose(env, "R%d must be a known constant\n", regno); 11354 return -EINVAL; 11355 } 11356 ret = mark_chain_precision(env, regno); 11357 if (ret < 0) 11358 return ret; 11359 meta->arg_constant.found = true; 11360 meta->arg_constant.value = reg->var_off.value; 11361 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) { 11362 meta->r0_rdonly = true; 11363 is_ret_buf_sz = true; 11364 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) { 11365 is_ret_buf_sz = true; 11366 } 11367 11368 if (is_ret_buf_sz) { 11369 if (meta->r0_size) { 11370 verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc"); 11371 return -EINVAL; 11372 } 11373 11374 if (!tnum_is_const(reg->var_off)) { 11375 verbose(env, "R%d is not a const\n", regno); 11376 return -EINVAL; 11377 } 11378 11379 meta->r0_size = reg->var_off.value; 11380 ret = mark_chain_precision(env, regno); 11381 if (ret) 11382 return ret; 11383 } 11384 continue; 11385 } 11386 11387 if (!btf_type_is_ptr(t)) { 11388 verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t)); 11389 return -EINVAL; 11390 } 11391 11392 if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) && 11393 (register_is_null(reg) || type_may_be_null(reg->type)) && 11394 !is_kfunc_arg_nullable(meta->btf, &args[i])) { 11395 verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); 11396 return -EACCES; 11397 } 11398 11399 if (reg->ref_obj_id) { 11400 if (is_kfunc_release(meta) && meta->ref_obj_id) { 11401 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 11402 regno, reg->ref_obj_id, 11403 meta->ref_obj_id); 11404 return -EFAULT; 11405 } 11406 meta->ref_obj_id = reg->ref_obj_id; 11407 if (is_kfunc_release(meta)) 11408 meta->release_regno = regno; 11409 } 11410 11411 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); 11412 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 11413 11414 kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs); 11415 if (kf_arg_type < 0) 11416 return kf_arg_type; 11417 11418 switch (kf_arg_type) { 11419 case KF_ARG_PTR_TO_NULL: 11420 continue; 11421 case KF_ARG_PTR_TO_ALLOC_BTF_ID: 11422 case KF_ARG_PTR_TO_BTF_ID: 11423 if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta)) 11424 break; 11425 11426 if (!is_trusted_reg(reg)) { 11427 if (!is_kfunc_rcu(meta)) { 11428 verbose(env, "R%d must be referenced or trusted\n", regno); 11429 return -EINVAL; 11430 } 11431 if (!is_rcu_reg(reg)) { 11432 verbose(env, "R%d must be a rcu pointer\n", regno); 11433 return -EINVAL; 11434 } 11435 } 11436 11437 fallthrough; 11438 case KF_ARG_PTR_TO_CTX: 11439 /* Trusted arguments have the same offset checks as release arguments */ 11440 arg_type |= OBJ_RELEASE; 11441 break; 11442 case KF_ARG_PTR_TO_DYNPTR: 11443 case KF_ARG_PTR_TO_ITER: 11444 case KF_ARG_PTR_TO_LIST_HEAD: 11445 case KF_ARG_PTR_TO_LIST_NODE: 11446 case KF_ARG_PTR_TO_RB_ROOT: 11447 case KF_ARG_PTR_TO_RB_NODE: 11448 case KF_ARG_PTR_TO_MEM: 11449 case KF_ARG_PTR_TO_MEM_SIZE: 11450 case KF_ARG_PTR_TO_CALLBACK: 11451 case KF_ARG_PTR_TO_REFCOUNTED_KPTR: 11452 case KF_ARG_PTR_TO_CONST_STR: 11453 /* Trusted by default */ 11454 break; 11455 default: 11456 WARN_ON_ONCE(1); 11457 return -EFAULT; 11458 } 11459 11460 if (is_kfunc_release(meta) && reg->ref_obj_id) 11461 arg_type |= OBJ_RELEASE; 11462 ret = check_func_arg_reg_off(env, reg, regno, arg_type); 11463 if (ret < 0) 11464 return ret; 11465 11466 switch (kf_arg_type) { 11467 case KF_ARG_PTR_TO_CTX: 11468 if (reg->type != PTR_TO_CTX) { 11469 verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t)); 11470 return -EINVAL; 11471 } 11472 11473 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { 11474 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); 11475 if (ret < 0) 11476 return -EINVAL; 11477 meta->ret_btf_id = ret; 11478 } 11479 break; 11480 case KF_ARG_PTR_TO_ALLOC_BTF_ID: 11481 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) { 11482 if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) { 11483 verbose(env, "arg#%d expected for bpf_obj_drop_impl()\n", i); 11484 return -EINVAL; 11485 } 11486 } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) { 11487 if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { 11488 verbose(env, "arg#%d expected for bpf_percpu_obj_drop_impl()\n", i); 11489 return -EINVAL; 11490 } 11491 } else { 11492 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11493 return -EINVAL; 11494 } 11495 if (!reg->ref_obj_id) { 11496 verbose(env, "allocated object must be referenced\n"); 11497 return -EINVAL; 11498 } 11499 if (meta->btf == btf_vmlinux) { 11500 meta->arg_btf = reg->btf; 11501 meta->arg_btf_id = reg->btf_id; 11502 } 11503 break; 11504 case KF_ARG_PTR_TO_DYNPTR: 11505 { 11506 enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR; 11507 int clone_ref_obj_id = 0; 11508 11509 if (reg->type != PTR_TO_STACK && 11510 reg->type != CONST_PTR_TO_DYNPTR) { 11511 verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i); 11512 return -EINVAL; 11513 } 11514 11515 if (reg->type == CONST_PTR_TO_DYNPTR) 11516 dynptr_arg_type |= MEM_RDONLY; 11517 11518 if (is_kfunc_arg_uninit(btf, &args[i])) 11519 dynptr_arg_type |= MEM_UNINIT; 11520 11521 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { 11522 dynptr_arg_type |= DYNPTR_TYPE_SKB; 11523 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { 11524 dynptr_arg_type |= DYNPTR_TYPE_XDP; 11525 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] && 11526 (dynptr_arg_type & MEM_UNINIT)) { 11527 enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type; 11528 11529 if (parent_type == BPF_DYNPTR_TYPE_INVALID) { 11530 verbose(env, "verifier internal error: no dynptr type for parent of clone\n"); 11531 return -EFAULT; 11532 } 11533 11534 dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type); 11535 clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id; 11536 if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) { 11537 verbose(env, "verifier internal error: missing ref obj id for parent of clone\n"); 11538 return -EFAULT; 11539 } 11540 } 11541 11542 ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id); 11543 if (ret < 0) 11544 return ret; 11545 11546 if (!(dynptr_arg_type & MEM_UNINIT)) { 11547 int id = dynptr_id(env, reg); 11548 11549 if (id < 0) { 11550 verbose(env, "verifier internal error: failed to obtain dynptr id\n"); 11551 return id; 11552 } 11553 meta->initialized_dynptr.id = id; 11554 meta->initialized_dynptr.type = dynptr_get_type(env, reg); 11555 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); 11556 } 11557 11558 break; 11559 } 11560 case KF_ARG_PTR_TO_ITER: 11561 if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { 11562 if (!check_css_task_iter_allowlist(env)) { 11563 verbose(env, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n"); 11564 return -EINVAL; 11565 } 11566 } 11567 ret = process_iter_arg(env, regno, insn_idx, meta); 11568 if (ret < 0) 11569 return ret; 11570 break; 11571 case KF_ARG_PTR_TO_LIST_HEAD: 11572 if (reg->type != PTR_TO_MAP_VALUE && 11573 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11574 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); 11575 return -EINVAL; 11576 } 11577 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { 11578 verbose(env, "allocated object must be referenced\n"); 11579 return -EINVAL; 11580 } 11581 ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta); 11582 if (ret < 0) 11583 return ret; 11584 break; 11585 case KF_ARG_PTR_TO_RB_ROOT: 11586 if (reg->type != PTR_TO_MAP_VALUE && 11587 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11588 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); 11589 return -EINVAL; 11590 } 11591 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { 11592 verbose(env, "allocated object must be referenced\n"); 11593 return -EINVAL; 11594 } 11595 ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta); 11596 if (ret < 0) 11597 return ret; 11598 break; 11599 case KF_ARG_PTR_TO_LIST_NODE: 11600 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11601 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11602 return -EINVAL; 11603 } 11604 if (!reg->ref_obj_id) { 11605 verbose(env, "allocated object must be referenced\n"); 11606 return -EINVAL; 11607 } 11608 ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta); 11609 if (ret < 0) 11610 return ret; 11611 break; 11612 case KF_ARG_PTR_TO_RB_NODE: 11613 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) { 11614 if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { 11615 verbose(env, "rbtree_remove node input must be non-owning ref\n"); 11616 return -EINVAL; 11617 } 11618 if (in_rbtree_lock_required_cb(env)) { 11619 verbose(env, "rbtree_remove not allowed in rbtree cb\n"); 11620 return -EINVAL; 11621 } 11622 } else { 11623 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11624 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11625 return -EINVAL; 11626 } 11627 if (!reg->ref_obj_id) { 11628 verbose(env, "allocated object must be referenced\n"); 11629 return -EINVAL; 11630 } 11631 } 11632 11633 ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta); 11634 if (ret < 0) 11635 return ret; 11636 break; 11637 case KF_ARG_PTR_TO_BTF_ID: 11638 /* Only base_type is checked, further checks are done here */ 11639 if ((base_type(reg->type) != PTR_TO_BTF_ID || 11640 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && 11641 !reg2btf_ids[base_type(reg->type)]) { 11642 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); 11643 verbose(env, "expected %s or socket\n", 11644 reg_type_str(env, base_type(reg->type) | 11645 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); 11646 return -EINVAL; 11647 } 11648 ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i); 11649 if (ret < 0) 11650 return ret; 11651 break; 11652 case KF_ARG_PTR_TO_MEM: 11653 resolve_ret = btf_resolve_size(btf, ref_t, &type_size); 11654 if (IS_ERR(resolve_ret)) { 11655 verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 11656 i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret)); 11657 return -EINVAL; 11658 } 11659 ret = check_mem_reg(env, reg, regno, type_size); 11660 if (ret < 0) 11661 return ret; 11662 break; 11663 case KF_ARG_PTR_TO_MEM_SIZE: 11664 { 11665 struct bpf_reg_state *buff_reg = ®s[regno]; 11666 const struct btf_param *buff_arg = &args[i]; 11667 struct bpf_reg_state *size_reg = ®s[regno + 1]; 11668 const struct btf_param *size_arg = &args[i + 1]; 11669 11670 if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) { 11671 ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1); 11672 if (ret < 0) { 11673 verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1); 11674 return ret; 11675 } 11676 } 11677 11678 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { 11679 if (meta->arg_constant.found) { 11680 verbose(env, "verifier internal error: only one constant argument permitted\n"); 11681 return -EFAULT; 11682 } 11683 if (!tnum_is_const(size_reg->var_off)) { 11684 verbose(env, "R%d must be a known constant\n", regno + 1); 11685 return -EINVAL; 11686 } 11687 meta->arg_constant.found = true; 11688 meta->arg_constant.value = size_reg->var_off.value; 11689 } 11690 11691 /* Skip next '__sz' or '__szk' argument */ 11692 i++; 11693 break; 11694 } 11695 case KF_ARG_PTR_TO_CALLBACK: 11696 if (reg->type != PTR_TO_FUNC) { 11697 verbose(env, "arg%d expected pointer to func\n", i); 11698 return -EINVAL; 11699 } 11700 meta->subprogno = reg->subprogno; 11701 break; 11702 case KF_ARG_PTR_TO_REFCOUNTED_KPTR: 11703 if (!type_is_ptr_alloc_obj(reg->type)) { 11704 verbose(env, "arg#%d is neither owning or non-owning ref\n", i); 11705 return -EINVAL; 11706 } 11707 if (!type_is_non_owning_ref(reg->type)) 11708 meta->arg_owning_ref = true; 11709 11710 rec = reg_btf_record(reg); 11711 if (!rec) { 11712 verbose(env, "verifier internal error: Couldn't find btf_record\n"); 11713 return -EFAULT; 11714 } 11715 11716 if (rec->refcount_off < 0) { 11717 verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); 11718 return -EINVAL; 11719 } 11720 11721 meta->arg_btf = reg->btf; 11722 meta->arg_btf_id = reg->btf_id; 11723 break; 11724 case KF_ARG_PTR_TO_CONST_STR: 11725 if (reg->type != PTR_TO_MAP_VALUE) { 11726 verbose(env, "arg#%d doesn't point to a const string\n", i); 11727 return -EINVAL; 11728 } 11729 ret = check_reg_const_str(env, reg, regno); 11730 if (ret) 11731 return ret; 11732 break; 11733 } 11734 } 11735 11736 if (is_kfunc_release(meta) && !meta->release_regno) { 11737 verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", 11738 func_name); 11739 return -EINVAL; 11740 } 11741 11742 return 0; 11743 } 11744 11745 static int fetch_kfunc_meta(struct bpf_verifier_env *env, 11746 struct bpf_insn *insn, 11747 struct bpf_kfunc_call_arg_meta *meta, 11748 const char **kfunc_name) 11749 { 11750 const struct btf_type *func, *func_proto; 11751 u32 func_id, *kfunc_flags; 11752 const char *func_name; 11753 struct btf *desc_btf; 11754 11755 if (kfunc_name) 11756 *kfunc_name = NULL; 11757 11758 if (!insn->imm) 11759 return -EINVAL; 11760 11761 desc_btf = find_kfunc_desc_btf(env, insn->off); 11762 if (IS_ERR(desc_btf)) 11763 return PTR_ERR(desc_btf); 11764 11765 func_id = insn->imm; 11766 func = btf_type_by_id(desc_btf, func_id); 11767 func_name = btf_name_by_offset(desc_btf, func->name_off); 11768 if (kfunc_name) 11769 *kfunc_name = func_name; 11770 func_proto = btf_type_by_id(desc_btf, func->type); 11771 11772 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); 11773 if (!kfunc_flags) { 11774 return -EACCES; 11775 } 11776 11777 memset(meta, 0, sizeof(*meta)); 11778 meta->btf = desc_btf; 11779 meta->func_id = func_id; 11780 meta->kfunc_flags = *kfunc_flags; 11781 meta->func_proto = func_proto; 11782 meta->func_name = func_name; 11783 11784 return 0; 11785 } 11786 11787 static int check_return_code(struct bpf_verifier_env *env, int regno); 11788 11789 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 11790 int *insn_idx_p) 11791 { 11792 const struct btf_type *t, *ptr_type; 11793 u32 i, nargs, ptr_type_id, release_ref_obj_id; 11794 struct bpf_reg_state *regs = cur_regs(env); 11795 const char *func_name, *ptr_type_name; 11796 bool sleepable, rcu_lock, rcu_unlock; 11797 struct bpf_kfunc_call_arg_meta meta; 11798 struct bpf_insn_aux_data *insn_aux; 11799 int err, insn_idx = *insn_idx_p; 11800 const struct btf_param *args; 11801 const struct btf_type *ret_t; 11802 struct btf *desc_btf; 11803 11804 /* skip for now, but return error when we find this in fixup_kfunc_call */ 11805 if (!insn->imm) 11806 return 0; 11807 11808 err = fetch_kfunc_meta(env, insn, &meta, &func_name); 11809 if (err == -EACCES && func_name) 11810 verbose(env, "calling kernel function %s is not allowed\n", func_name); 11811 if (err) 11812 return err; 11813 desc_btf = meta.btf; 11814 insn_aux = &env->insn_aux_data[insn_idx]; 11815 11816 insn_aux->is_iter_next = is_iter_next_kfunc(&meta); 11817 11818 if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) { 11819 verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n"); 11820 return -EACCES; 11821 } 11822 11823 sleepable = is_kfunc_sleepable(&meta); 11824 if (sleepable && !env->prog->aux->sleepable) { 11825 verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name); 11826 return -EACCES; 11827 } 11828 11829 /* Check the arguments */ 11830 err = check_kfunc_args(env, &meta, insn_idx); 11831 if (err < 0) 11832 return err; 11833 11834 if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 11835 err = push_callback_call(env, insn, insn_idx, meta.subprogno, 11836 set_rbtree_add_callback_state); 11837 if (err) { 11838 verbose(env, "kfunc %s#%d failed callback verification\n", 11839 func_name, meta.func_id); 11840 return err; 11841 } 11842 } 11843 11844 rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); 11845 rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); 11846 11847 if (env->cur_state->active_rcu_lock) { 11848 struct bpf_func_state *state; 11849 struct bpf_reg_state *reg; 11850 u32 clear_mask = (1 << STACK_SPILL) | (1 << STACK_ITER); 11851 11852 if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) { 11853 verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n"); 11854 return -EACCES; 11855 } 11856 11857 if (rcu_lock) { 11858 verbose(env, "nested rcu read lock (kernel function %s)\n", func_name); 11859 return -EINVAL; 11860 } else if (rcu_unlock) { 11861 bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({ 11862 if (reg->type & MEM_RCU) { 11863 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); 11864 reg->type |= PTR_UNTRUSTED; 11865 } 11866 })); 11867 env->cur_state->active_rcu_lock = false; 11868 } else if (sleepable) { 11869 verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name); 11870 return -EACCES; 11871 } 11872 } else if (rcu_lock) { 11873 env->cur_state->active_rcu_lock = true; 11874 } else if (rcu_unlock) { 11875 verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name); 11876 return -EINVAL; 11877 } 11878 11879 /* In case of release function, we get register number of refcounted 11880 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now. 11881 */ 11882 if (meta.release_regno) { 11883 err = release_reference(env, regs[meta.release_regno].ref_obj_id); 11884 if (err) { 11885 verbose(env, "kfunc %s#%d reference has not been acquired before\n", 11886 func_name, meta.func_id); 11887 return err; 11888 } 11889 } 11890 11891 if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 11892 meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 11893 meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 11894 release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; 11895 insn_aux->insert_off = regs[BPF_REG_2].off; 11896 insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); 11897 err = ref_convert_owning_non_owning(env, release_ref_obj_id); 11898 if (err) { 11899 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", 11900 func_name, meta.func_id); 11901 return err; 11902 } 11903 11904 err = release_reference(env, release_ref_obj_id); 11905 if (err) { 11906 verbose(env, "kfunc %s#%d reference has not been acquired before\n", 11907 func_name, meta.func_id); 11908 return err; 11909 } 11910 } 11911 11912 if (meta.func_id == special_kfunc_list[KF_bpf_throw]) { 11913 if (!bpf_jit_supports_exceptions()) { 11914 verbose(env, "JIT does not support calling kfunc %s#%d\n", 11915 func_name, meta.func_id); 11916 return -ENOTSUPP; 11917 } 11918 env->seen_exception = true; 11919 11920 /* In the case of the default callback, the cookie value passed 11921 * to bpf_throw becomes the return value of the program. 11922 */ 11923 if (!env->exception_callback_subprog) { 11924 err = check_return_code(env, BPF_REG_1); 11925 if (err < 0) 11926 return err; 11927 } 11928 } 11929 11930 for (i = 0; i < CALLER_SAVED_REGS; i++) 11931 mark_reg_not_init(env, regs, caller_saved[i]); 11932 11933 /* Check return type */ 11934 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); 11935 11936 if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) { 11937 /* Only exception is bpf_obj_new_impl */ 11938 if (meta.btf != btf_vmlinux || 11939 (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] && 11940 meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] && 11941 meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) { 11942 verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); 11943 return -EINVAL; 11944 } 11945 } 11946 11947 if (btf_type_is_scalar(t)) { 11948 mark_reg_unknown(env, regs, BPF_REG_0); 11949 mark_btf_func_reg_size(env, BPF_REG_0, t->size); 11950 } else if (btf_type_is_ptr(t)) { 11951 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); 11952 11953 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { 11954 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] || 11955 meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { 11956 struct btf_struct_meta *struct_meta; 11957 struct btf *ret_btf; 11958 u32 ret_btf_id; 11959 11960 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set) 11961 return -ENOMEM; 11962 11963 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { 11964 if (!bpf_global_percpu_ma_set) { 11965 mutex_lock(&bpf_percpu_ma_lock); 11966 if (!bpf_global_percpu_ma_set) { 11967 err = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true); 11968 if (!err) 11969 bpf_global_percpu_ma_set = true; 11970 } 11971 mutex_unlock(&bpf_percpu_ma_lock); 11972 if (err) 11973 return err; 11974 } 11975 } 11976 11977 if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) { 11978 verbose(env, "local type ID argument must be in range [0, U32_MAX]\n"); 11979 return -EINVAL; 11980 } 11981 11982 ret_btf = env->prog->aux->btf; 11983 ret_btf_id = meta.arg_constant.value; 11984 11985 /* This may be NULL due to user not supplying a BTF */ 11986 if (!ret_btf) { 11987 verbose(env, "bpf_obj_new/bpf_percpu_obj_new requires prog BTF\n"); 11988 return -EINVAL; 11989 } 11990 11991 ret_t = btf_type_by_id(ret_btf, ret_btf_id); 11992 if (!ret_t || !__btf_type_is_struct(ret_t)) { 11993 verbose(env, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n"); 11994 return -EINVAL; 11995 } 11996 11997 struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id); 11998 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { 11999 if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) { 12000 verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n"); 12001 return -EINVAL; 12002 } 12003 12004 if (struct_meta) { 12005 verbose(env, "bpf_percpu_obj_new type ID argument must not contain special fields\n"); 12006 return -EINVAL; 12007 } 12008 } 12009 12010 mark_reg_known_zero(env, regs, BPF_REG_0); 12011 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; 12012 regs[BPF_REG_0].btf = ret_btf; 12013 regs[BPF_REG_0].btf_id = ret_btf_id; 12014 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) 12015 regs[BPF_REG_0].type |= MEM_PERCPU; 12016 12017 insn_aux->obj_new_size = ret_t->size; 12018 insn_aux->kptr_struct_meta = struct_meta; 12019 } else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { 12020 mark_reg_known_zero(env, regs, BPF_REG_0); 12021 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; 12022 regs[BPF_REG_0].btf = meta.arg_btf; 12023 regs[BPF_REG_0].btf_id = meta.arg_btf_id; 12024 12025 insn_aux->kptr_struct_meta = 12026 btf_find_struct_meta(meta.arg_btf, 12027 meta.arg_btf_id); 12028 } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] || 12029 meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) { 12030 struct btf_field *field = meta.arg_list_head.field; 12031 12032 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); 12033 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] || 12034 meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { 12035 struct btf_field *field = meta.arg_rbtree_root.field; 12036 12037 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); 12038 } else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { 12039 mark_reg_known_zero(env, regs, BPF_REG_0); 12040 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; 12041 regs[BPF_REG_0].btf = desc_btf; 12042 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 12043 } else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { 12044 ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value); 12045 if (!ret_t || !btf_type_is_struct(ret_t)) { 12046 verbose(env, 12047 "kfunc bpf_rdonly_cast type ID argument must be of a struct\n"); 12048 return -EINVAL; 12049 } 12050 12051 mark_reg_known_zero(env, regs, BPF_REG_0); 12052 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED; 12053 regs[BPF_REG_0].btf = desc_btf; 12054 regs[BPF_REG_0].btf_id = meta.arg_constant.value; 12055 } else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] || 12056 meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) { 12057 enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type); 12058 12059 mark_reg_known_zero(env, regs, BPF_REG_0); 12060 12061 if (!meta.arg_constant.found) { 12062 verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n"); 12063 return -EFAULT; 12064 } 12065 12066 regs[BPF_REG_0].mem_size = meta.arg_constant.value; 12067 12068 /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */ 12069 regs[BPF_REG_0].type = PTR_TO_MEM | type_flag; 12070 12071 if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) { 12072 regs[BPF_REG_0].type |= MEM_RDONLY; 12073 } else { 12074 /* this will set env->seen_direct_write to true */ 12075 if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) { 12076 verbose(env, "the prog does not allow writes to packet data\n"); 12077 return -EINVAL; 12078 } 12079 } 12080 12081 if (!meta.initialized_dynptr.id) { 12082 verbose(env, "verifier internal error: no dynptr id\n"); 12083 return -EFAULT; 12084 } 12085 regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id; 12086 12087 /* we don't need to set BPF_REG_0's ref obj id 12088 * because packet slices are not refcounted (see 12089 * dynptr_type_refcounted) 12090 */ 12091 } else { 12092 verbose(env, "kernel function %s unhandled dynamic return type\n", 12093 meta.func_name); 12094 return -EFAULT; 12095 } 12096 } else if (!__btf_type_is_struct(ptr_type)) { 12097 if (!meta.r0_size) { 12098 __u32 sz; 12099 12100 if (!IS_ERR(btf_resolve_size(desc_btf, ptr_type, &sz))) { 12101 meta.r0_size = sz; 12102 meta.r0_rdonly = true; 12103 } 12104 } 12105 if (!meta.r0_size) { 12106 ptr_type_name = btf_name_by_offset(desc_btf, 12107 ptr_type->name_off); 12108 verbose(env, 12109 "kernel function %s returns pointer type %s %s is not supported\n", 12110 func_name, 12111 btf_type_str(ptr_type), 12112 ptr_type_name); 12113 return -EINVAL; 12114 } 12115 12116 mark_reg_known_zero(env, regs, BPF_REG_0); 12117 regs[BPF_REG_0].type = PTR_TO_MEM; 12118 regs[BPF_REG_0].mem_size = meta.r0_size; 12119 12120 if (meta.r0_rdonly) 12121 regs[BPF_REG_0].type |= MEM_RDONLY; 12122 12123 /* Ensures we don't access the memory after a release_reference() */ 12124 if (meta.ref_obj_id) 12125 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 12126 } else { 12127 mark_reg_known_zero(env, regs, BPF_REG_0); 12128 regs[BPF_REG_0].btf = desc_btf; 12129 regs[BPF_REG_0].type = PTR_TO_BTF_ID; 12130 regs[BPF_REG_0].btf_id = ptr_type_id; 12131 } 12132 12133 if (is_kfunc_ret_null(&meta)) { 12134 regs[BPF_REG_0].type |= PTR_MAYBE_NULL; 12135 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */ 12136 regs[BPF_REG_0].id = ++env->id_gen; 12137 } 12138 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); 12139 if (is_kfunc_acquire(&meta)) { 12140 int id = acquire_reference_state(env, insn_idx); 12141 12142 if (id < 0) 12143 return id; 12144 if (is_kfunc_ret_null(&meta)) 12145 regs[BPF_REG_0].id = id; 12146 regs[BPF_REG_0].ref_obj_id = id; 12147 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { 12148 ref_set_non_owning(env, ®s[BPF_REG_0]); 12149 } 12150 12151 if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id) 12152 regs[BPF_REG_0].id = ++env->id_gen; 12153 } else if (btf_type_is_void(t)) { 12154 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { 12155 if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || 12156 meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { 12157 insn_aux->kptr_struct_meta = 12158 btf_find_struct_meta(meta.arg_btf, 12159 meta.arg_btf_id); 12160 } 12161 } 12162 } 12163 12164 nargs = btf_type_vlen(meta.func_proto); 12165 args = (const struct btf_param *)(meta.func_proto + 1); 12166 for (i = 0; i < nargs; i++) { 12167 u32 regno = i + 1; 12168 12169 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); 12170 if (btf_type_is_ptr(t)) 12171 mark_btf_func_reg_size(env, regno, sizeof(void *)); 12172 else 12173 /* scalar. ensured by btf_check_kfunc_arg_match() */ 12174 mark_btf_func_reg_size(env, regno, t->size); 12175 } 12176 12177 if (is_iter_next_kfunc(&meta)) { 12178 err = process_iter_next_call(env, insn_idx, &meta); 12179 if (err) 12180 return err; 12181 } 12182 12183 return 0; 12184 } 12185 12186 static bool signed_add_overflows(s64 a, s64 b) 12187 { 12188 /* Do the add in u64, where overflow is well-defined */ 12189 s64 res = (s64)((u64)a + (u64)b); 12190 12191 if (b < 0) 12192 return res > a; 12193 return res < a; 12194 } 12195 12196 static bool signed_add32_overflows(s32 a, s32 b) 12197 { 12198 /* Do the add in u32, where overflow is well-defined */ 12199 s32 res = (s32)((u32)a + (u32)b); 12200 12201 if (b < 0) 12202 return res > a; 12203 return res < a; 12204 } 12205 12206 static bool signed_sub_overflows(s64 a, s64 b) 12207 { 12208 /* Do the sub in u64, where overflow is well-defined */ 12209 s64 res = (s64)((u64)a - (u64)b); 12210 12211 if (b < 0) 12212 return res < a; 12213 return res > a; 12214 } 12215 12216 static bool signed_sub32_overflows(s32 a, s32 b) 12217 { 12218 /* Do the sub in u32, where overflow is well-defined */ 12219 s32 res = (s32)((u32)a - (u32)b); 12220 12221 if (b < 0) 12222 return res < a; 12223 return res > a; 12224 } 12225 12226 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 12227 const struct bpf_reg_state *reg, 12228 enum bpf_reg_type type) 12229 { 12230 bool known = tnum_is_const(reg->var_off); 12231 s64 val = reg->var_off.value; 12232 s64 smin = reg->smin_value; 12233 12234 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 12235 verbose(env, "math between %s pointer and %lld is not allowed\n", 12236 reg_type_str(env, type), val); 12237 return false; 12238 } 12239 12240 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 12241 verbose(env, "%s pointer offset %d is not allowed\n", 12242 reg_type_str(env, type), reg->off); 12243 return false; 12244 } 12245 12246 if (smin == S64_MIN) { 12247 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 12248 reg_type_str(env, type)); 12249 return false; 12250 } 12251 12252 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 12253 verbose(env, "value %lld makes %s pointer be out of bounds\n", 12254 smin, reg_type_str(env, type)); 12255 return false; 12256 } 12257 12258 return true; 12259 } 12260 12261 enum { 12262 REASON_BOUNDS = -1, 12263 REASON_TYPE = -2, 12264 REASON_PATHS = -3, 12265 REASON_LIMIT = -4, 12266 REASON_STACK = -5, 12267 }; 12268 12269 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 12270 u32 *alu_limit, bool mask_to_left) 12271 { 12272 u32 max = 0, ptr_limit = 0; 12273 12274 switch (ptr_reg->type) { 12275 case PTR_TO_STACK: 12276 /* Offset 0 is out-of-bounds, but acceptable start for the 12277 * left direction, see BPF_REG_FP. Also, unknown scalar 12278 * offset where we would need to deal with min/max bounds is 12279 * currently prohibited for unprivileged. 12280 */ 12281 max = MAX_BPF_STACK + mask_to_left; 12282 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); 12283 break; 12284 case PTR_TO_MAP_VALUE: 12285 max = ptr_reg->map_ptr->value_size; 12286 ptr_limit = (mask_to_left ? 12287 ptr_reg->smin_value : 12288 ptr_reg->umax_value) + ptr_reg->off; 12289 break; 12290 default: 12291 return REASON_TYPE; 12292 } 12293 12294 if (ptr_limit >= max) 12295 return REASON_LIMIT; 12296 *alu_limit = ptr_limit; 12297 return 0; 12298 } 12299 12300 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 12301 const struct bpf_insn *insn) 12302 { 12303 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; 12304 } 12305 12306 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 12307 u32 alu_state, u32 alu_limit) 12308 { 12309 /* If we arrived here from different branches with different 12310 * state or limits to sanitize, then this won't work. 12311 */ 12312 if (aux->alu_state && 12313 (aux->alu_state != alu_state || 12314 aux->alu_limit != alu_limit)) 12315 return REASON_PATHS; 12316 12317 /* Corresponding fixup done in do_misc_fixups(). */ 12318 aux->alu_state = alu_state; 12319 aux->alu_limit = alu_limit; 12320 return 0; 12321 } 12322 12323 static int sanitize_val_alu(struct bpf_verifier_env *env, 12324 struct bpf_insn *insn) 12325 { 12326 struct bpf_insn_aux_data *aux = cur_aux(env); 12327 12328 if (can_skip_alu_sanitation(env, insn)) 12329 return 0; 12330 12331 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 12332 } 12333 12334 static bool sanitize_needed(u8 opcode) 12335 { 12336 return opcode == BPF_ADD || opcode == BPF_SUB; 12337 } 12338 12339 struct bpf_sanitize_info { 12340 struct bpf_insn_aux_data aux; 12341 bool mask_to_left; 12342 }; 12343 12344 static struct bpf_verifier_state * 12345 sanitize_speculative_path(struct bpf_verifier_env *env, 12346 const struct bpf_insn *insn, 12347 u32 next_idx, u32 curr_idx) 12348 { 12349 struct bpf_verifier_state *branch; 12350 struct bpf_reg_state *regs; 12351 12352 branch = push_stack(env, next_idx, curr_idx, true); 12353 if (branch && insn) { 12354 regs = branch->frame[branch->curframe]->regs; 12355 if (BPF_SRC(insn->code) == BPF_K) { 12356 mark_reg_unknown(env, regs, insn->dst_reg); 12357 } else if (BPF_SRC(insn->code) == BPF_X) { 12358 mark_reg_unknown(env, regs, insn->dst_reg); 12359 mark_reg_unknown(env, regs, insn->src_reg); 12360 } 12361 } 12362 return branch; 12363 } 12364 12365 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 12366 struct bpf_insn *insn, 12367 const struct bpf_reg_state *ptr_reg, 12368 const struct bpf_reg_state *off_reg, 12369 struct bpf_reg_state *dst_reg, 12370 struct bpf_sanitize_info *info, 12371 const bool commit_window) 12372 { 12373 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; 12374 struct bpf_verifier_state *vstate = env->cur_state; 12375 bool off_is_imm = tnum_is_const(off_reg->var_off); 12376 bool off_is_neg = off_reg->smin_value < 0; 12377 bool ptr_is_dst_reg = ptr_reg == dst_reg; 12378 u8 opcode = BPF_OP(insn->code); 12379 u32 alu_state, alu_limit; 12380 struct bpf_reg_state tmp; 12381 bool ret; 12382 int err; 12383 12384 if (can_skip_alu_sanitation(env, insn)) 12385 return 0; 12386 12387 /* We already marked aux for masking from non-speculative 12388 * paths, thus we got here in the first place. We only care 12389 * to explore bad access from here. 12390 */ 12391 if (vstate->speculative) 12392 goto do_sim; 12393 12394 if (!commit_window) { 12395 if (!tnum_is_const(off_reg->var_off) && 12396 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) 12397 return REASON_BOUNDS; 12398 12399 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || 12400 (opcode == BPF_SUB && !off_is_neg); 12401 } 12402 12403 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); 12404 if (err < 0) 12405 return err; 12406 12407 if (commit_window) { 12408 /* In commit phase we narrow the masking window based on 12409 * the observed pointer move after the simulated operation. 12410 */ 12411 alu_state = info->aux.alu_state; 12412 alu_limit = abs(info->aux.alu_limit - alu_limit); 12413 } else { 12414 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 12415 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; 12416 alu_state |= ptr_is_dst_reg ? 12417 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 12418 12419 /* Limit pruning on unknown scalars to enable deep search for 12420 * potential masking differences from other program paths. 12421 */ 12422 if (!off_is_imm) 12423 env->explore_alu_limits = true; 12424 } 12425 12426 err = update_alu_sanitation_state(aux, alu_state, alu_limit); 12427 if (err < 0) 12428 return err; 12429 do_sim: 12430 /* If we're in commit phase, we're done here given we already 12431 * pushed the truncated dst_reg into the speculative verification 12432 * stack. 12433 * 12434 * Also, when register is a known constant, we rewrite register-based 12435 * operation to immediate-based, and thus do not need masking (and as 12436 * a consequence, do not need to simulate the zero-truncation either). 12437 */ 12438 if (commit_window || off_is_imm) 12439 return 0; 12440 12441 /* Simulate and find potential out-of-bounds access under 12442 * speculative execution from truncation as a result of 12443 * masking when off was not within expected range. If off 12444 * sits in dst, then we temporarily need to move ptr there 12445 * to simulate dst (== 0) +/-= ptr. Needed, for example, 12446 * for cases where we use K-based arithmetic in one direction 12447 * and truncated reg-based in the other in order to explore 12448 * bad access. 12449 */ 12450 if (!ptr_is_dst_reg) { 12451 tmp = *dst_reg; 12452 copy_register_state(dst_reg, ptr_reg); 12453 } 12454 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, 12455 env->insn_idx); 12456 if (!ptr_is_dst_reg && ret) 12457 *dst_reg = tmp; 12458 return !ret ? REASON_STACK : 0; 12459 } 12460 12461 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) 12462 { 12463 struct bpf_verifier_state *vstate = env->cur_state; 12464 12465 /* If we simulate paths under speculation, we don't update the 12466 * insn as 'seen' such that when we verify unreachable paths in 12467 * the non-speculative domain, sanitize_dead_code() can still 12468 * rewrite/sanitize them. 12469 */ 12470 if (!vstate->speculative) 12471 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 12472 } 12473 12474 static int sanitize_err(struct bpf_verifier_env *env, 12475 const struct bpf_insn *insn, int reason, 12476 const struct bpf_reg_state *off_reg, 12477 const struct bpf_reg_state *dst_reg) 12478 { 12479 static const char *err = "pointer arithmetic with it prohibited for !root"; 12480 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; 12481 u32 dst = insn->dst_reg, src = insn->src_reg; 12482 12483 switch (reason) { 12484 case REASON_BOUNDS: 12485 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", 12486 off_reg == dst_reg ? dst : src, err); 12487 break; 12488 case REASON_TYPE: 12489 verbose(env, "R%d has pointer with unsupported alu operation, %s\n", 12490 off_reg == dst_reg ? src : dst, err); 12491 break; 12492 case REASON_PATHS: 12493 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", 12494 dst, op, err); 12495 break; 12496 case REASON_LIMIT: 12497 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", 12498 dst, op, err); 12499 break; 12500 case REASON_STACK: 12501 verbose(env, "R%d could not be pushed for speculative verification, %s\n", 12502 dst, err); 12503 break; 12504 default: 12505 verbose(env, "verifier internal error: unknown reason (%d)\n", 12506 reason); 12507 break; 12508 } 12509 12510 return -EACCES; 12511 } 12512 12513 /* check that stack access falls within stack limits and that 'reg' doesn't 12514 * have a variable offset. 12515 * 12516 * Variable offset is prohibited for unprivileged mode for simplicity since it 12517 * requires corresponding support in Spectre masking for stack ALU. See also 12518 * retrieve_ptr_limit(). 12519 * 12520 * 12521 * 'off' includes 'reg->off'. 12522 */ 12523 static int check_stack_access_for_ptr_arithmetic( 12524 struct bpf_verifier_env *env, 12525 int regno, 12526 const struct bpf_reg_state *reg, 12527 int off) 12528 { 12529 if (!tnum_is_const(reg->var_off)) { 12530 char tn_buf[48]; 12531 12532 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 12533 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", 12534 regno, tn_buf, off); 12535 return -EACCES; 12536 } 12537 12538 if (off >= 0 || off < -MAX_BPF_STACK) { 12539 verbose(env, "R%d stack pointer arithmetic goes out of range, " 12540 "prohibited for !root; off=%d\n", regno, off); 12541 return -EACCES; 12542 } 12543 12544 return 0; 12545 } 12546 12547 static int sanitize_check_bounds(struct bpf_verifier_env *env, 12548 const struct bpf_insn *insn, 12549 const struct bpf_reg_state *dst_reg) 12550 { 12551 u32 dst = insn->dst_reg; 12552 12553 /* For unprivileged we require that resulting offset must be in bounds 12554 * in order to be able to sanitize access later on. 12555 */ 12556 if (env->bypass_spec_v1) 12557 return 0; 12558 12559 switch (dst_reg->type) { 12560 case PTR_TO_STACK: 12561 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, 12562 dst_reg->off + dst_reg->var_off.value)) 12563 return -EACCES; 12564 break; 12565 case PTR_TO_MAP_VALUE: 12566 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { 12567 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 12568 "prohibited for !root\n", dst); 12569 return -EACCES; 12570 } 12571 break; 12572 default: 12573 break; 12574 } 12575 12576 return 0; 12577 } 12578 12579 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 12580 * Caller should also handle BPF_MOV case separately. 12581 * If we return -EACCES, caller may want to try again treating pointer as a 12582 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 12583 */ 12584 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 12585 struct bpf_insn *insn, 12586 const struct bpf_reg_state *ptr_reg, 12587 const struct bpf_reg_state *off_reg) 12588 { 12589 struct bpf_verifier_state *vstate = env->cur_state; 12590 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 12591 struct bpf_reg_state *regs = state->regs, *dst_reg; 12592 bool known = tnum_is_const(off_reg->var_off); 12593 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 12594 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 12595 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 12596 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 12597 struct bpf_sanitize_info info = {}; 12598 u8 opcode = BPF_OP(insn->code); 12599 u32 dst = insn->dst_reg; 12600 int ret; 12601 12602 dst_reg = ®s[dst]; 12603 12604 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 12605 smin_val > smax_val || umin_val > umax_val) { 12606 /* Taint dst register if offset had invalid bounds derived from 12607 * e.g. dead branches. 12608 */ 12609 __mark_reg_unknown(env, dst_reg); 12610 return 0; 12611 } 12612 12613 if (BPF_CLASS(insn->code) != BPF_ALU64) { 12614 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 12615 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 12616 __mark_reg_unknown(env, dst_reg); 12617 return 0; 12618 } 12619 12620 verbose(env, 12621 "R%d 32-bit pointer arithmetic prohibited\n", 12622 dst); 12623 return -EACCES; 12624 } 12625 12626 if (ptr_reg->type & PTR_MAYBE_NULL) { 12627 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 12628 dst, reg_type_str(env, ptr_reg->type)); 12629 return -EACCES; 12630 } 12631 12632 switch (base_type(ptr_reg->type)) { 12633 case CONST_PTR_TO_MAP: 12634 /* smin_val represents the known value */ 12635 if (known && smin_val == 0 && opcode == BPF_ADD) 12636 break; 12637 fallthrough; 12638 case PTR_TO_PACKET_END: 12639 case PTR_TO_SOCKET: 12640 case PTR_TO_SOCK_COMMON: 12641 case PTR_TO_TCP_SOCK: 12642 case PTR_TO_XDP_SOCK: 12643 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 12644 dst, reg_type_str(env, ptr_reg->type)); 12645 return -EACCES; 12646 default: 12647 break; 12648 } 12649 12650 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 12651 * The id may be overwritten later if we create a new variable offset. 12652 */ 12653 dst_reg->type = ptr_reg->type; 12654 dst_reg->id = ptr_reg->id; 12655 12656 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 12657 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 12658 return -EINVAL; 12659 12660 /* pointer types do not carry 32-bit bounds at the moment. */ 12661 __mark_reg32_unbounded(dst_reg); 12662 12663 if (sanitize_needed(opcode)) { 12664 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, 12665 &info, false); 12666 if (ret < 0) 12667 return sanitize_err(env, insn, ret, off_reg, dst_reg); 12668 } 12669 12670 switch (opcode) { 12671 case BPF_ADD: 12672 /* We can take a fixed offset as long as it doesn't overflow 12673 * the s32 'off' field 12674 */ 12675 if (known && (ptr_reg->off + smin_val == 12676 (s64)(s32)(ptr_reg->off + smin_val))) { 12677 /* pointer += K. Accumulate it into fixed offset */ 12678 dst_reg->smin_value = smin_ptr; 12679 dst_reg->smax_value = smax_ptr; 12680 dst_reg->umin_value = umin_ptr; 12681 dst_reg->umax_value = umax_ptr; 12682 dst_reg->var_off = ptr_reg->var_off; 12683 dst_reg->off = ptr_reg->off + smin_val; 12684 dst_reg->raw = ptr_reg->raw; 12685 break; 12686 } 12687 /* A new variable offset is created. Note that off_reg->off 12688 * == 0, since it's a scalar. 12689 * dst_reg gets the pointer type and since some positive 12690 * integer value was added to the pointer, give it a new 'id' 12691 * if it's a PTR_TO_PACKET. 12692 * this creates a new 'base' pointer, off_reg (variable) gets 12693 * added into the variable offset, and we copy the fixed offset 12694 * from ptr_reg. 12695 */ 12696 if (signed_add_overflows(smin_ptr, smin_val) || 12697 signed_add_overflows(smax_ptr, smax_val)) { 12698 dst_reg->smin_value = S64_MIN; 12699 dst_reg->smax_value = S64_MAX; 12700 } else { 12701 dst_reg->smin_value = smin_ptr + smin_val; 12702 dst_reg->smax_value = smax_ptr + smax_val; 12703 } 12704 if (umin_ptr + umin_val < umin_ptr || 12705 umax_ptr + umax_val < umax_ptr) { 12706 dst_reg->umin_value = 0; 12707 dst_reg->umax_value = U64_MAX; 12708 } else { 12709 dst_reg->umin_value = umin_ptr + umin_val; 12710 dst_reg->umax_value = umax_ptr + umax_val; 12711 } 12712 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 12713 dst_reg->off = ptr_reg->off; 12714 dst_reg->raw = ptr_reg->raw; 12715 if (reg_is_pkt_pointer(ptr_reg)) { 12716 dst_reg->id = ++env->id_gen; 12717 /* something was added to pkt_ptr, set range to zero */ 12718 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 12719 } 12720 break; 12721 case BPF_SUB: 12722 if (dst_reg == off_reg) { 12723 /* scalar -= pointer. Creates an unknown scalar */ 12724 verbose(env, "R%d tried to subtract pointer from scalar\n", 12725 dst); 12726 return -EACCES; 12727 } 12728 /* We don't allow subtraction from FP, because (according to 12729 * test_verifier.c test "invalid fp arithmetic", JITs might not 12730 * be able to deal with it. 12731 */ 12732 if (ptr_reg->type == PTR_TO_STACK) { 12733 verbose(env, "R%d subtraction from stack pointer prohibited\n", 12734 dst); 12735 return -EACCES; 12736 } 12737 if (known && (ptr_reg->off - smin_val == 12738 (s64)(s32)(ptr_reg->off - smin_val))) { 12739 /* pointer -= K. Subtract it from fixed offset */ 12740 dst_reg->smin_value = smin_ptr; 12741 dst_reg->smax_value = smax_ptr; 12742 dst_reg->umin_value = umin_ptr; 12743 dst_reg->umax_value = umax_ptr; 12744 dst_reg->var_off = ptr_reg->var_off; 12745 dst_reg->id = ptr_reg->id; 12746 dst_reg->off = ptr_reg->off - smin_val; 12747 dst_reg->raw = ptr_reg->raw; 12748 break; 12749 } 12750 /* A new variable offset is created. If the subtrahend is known 12751 * nonnegative, then any reg->range we had before is still good. 12752 */ 12753 if (signed_sub_overflows(smin_ptr, smax_val) || 12754 signed_sub_overflows(smax_ptr, smin_val)) { 12755 /* Overflow possible, we know nothing */ 12756 dst_reg->smin_value = S64_MIN; 12757 dst_reg->smax_value = S64_MAX; 12758 } else { 12759 dst_reg->smin_value = smin_ptr - smax_val; 12760 dst_reg->smax_value = smax_ptr - smin_val; 12761 } 12762 if (umin_ptr < umax_val) { 12763 /* Overflow possible, we know nothing */ 12764 dst_reg->umin_value = 0; 12765 dst_reg->umax_value = U64_MAX; 12766 } else { 12767 /* Cannot overflow (as long as bounds are consistent) */ 12768 dst_reg->umin_value = umin_ptr - umax_val; 12769 dst_reg->umax_value = umax_ptr - umin_val; 12770 } 12771 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 12772 dst_reg->off = ptr_reg->off; 12773 dst_reg->raw = ptr_reg->raw; 12774 if (reg_is_pkt_pointer(ptr_reg)) { 12775 dst_reg->id = ++env->id_gen; 12776 /* something was added to pkt_ptr, set range to zero */ 12777 if (smin_val < 0) 12778 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 12779 } 12780 break; 12781 case BPF_AND: 12782 case BPF_OR: 12783 case BPF_XOR: 12784 /* bitwise ops on pointers are troublesome, prohibit. */ 12785 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 12786 dst, bpf_alu_string[opcode >> 4]); 12787 return -EACCES; 12788 default: 12789 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 12790 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 12791 dst, bpf_alu_string[opcode >> 4]); 12792 return -EACCES; 12793 } 12794 12795 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 12796 return -EINVAL; 12797 reg_bounds_sync(dst_reg); 12798 if (sanitize_check_bounds(env, insn, dst_reg) < 0) 12799 return -EACCES; 12800 if (sanitize_needed(opcode)) { 12801 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, 12802 &info, true); 12803 if (ret < 0) 12804 return sanitize_err(env, insn, ret, off_reg, dst_reg); 12805 } 12806 12807 return 0; 12808 } 12809 12810 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 12811 struct bpf_reg_state *src_reg) 12812 { 12813 s32 smin_val = src_reg->s32_min_value; 12814 s32 smax_val = src_reg->s32_max_value; 12815 u32 umin_val = src_reg->u32_min_value; 12816 u32 umax_val = src_reg->u32_max_value; 12817 12818 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 12819 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 12820 dst_reg->s32_min_value = S32_MIN; 12821 dst_reg->s32_max_value = S32_MAX; 12822 } else { 12823 dst_reg->s32_min_value += smin_val; 12824 dst_reg->s32_max_value += smax_val; 12825 } 12826 if (dst_reg->u32_min_value + umin_val < umin_val || 12827 dst_reg->u32_max_value + umax_val < umax_val) { 12828 dst_reg->u32_min_value = 0; 12829 dst_reg->u32_max_value = U32_MAX; 12830 } else { 12831 dst_reg->u32_min_value += umin_val; 12832 dst_reg->u32_max_value += umax_val; 12833 } 12834 } 12835 12836 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 12837 struct bpf_reg_state *src_reg) 12838 { 12839 s64 smin_val = src_reg->smin_value; 12840 s64 smax_val = src_reg->smax_value; 12841 u64 umin_val = src_reg->umin_value; 12842 u64 umax_val = src_reg->umax_value; 12843 12844 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 12845 signed_add_overflows(dst_reg->smax_value, smax_val)) { 12846 dst_reg->smin_value = S64_MIN; 12847 dst_reg->smax_value = S64_MAX; 12848 } else { 12849 dst_reg->smin_value += smin_val; 12850 dst_reg->smax_value += smax_val; 12851 } 12852 if (dst_reg->umin_value + umin_val < umin_val || 12853 dst_reg->umax_value + umax_val < umax_val) { 12854 dst_reg->umin_value = 0; 12855 dst_reg->umax_value = U64_MAX; 12856 } else { 12857 dst_reg->umin_value += umin_val; 12858 dst_reg->umax_value += umax_val; 12859 } 12860 } 12861 12862 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 12863 struct bpf_reg_state *src_reg) 12864 { 12865 s32 smin_val = src_reg->s32_min_value; 12866 s32 smax_val = src_reg->s32_max_value; 12867 u32 umin_val = src_reg->u32_min_value; 12868 u32 umax_val = src_reg->u32_max_value; 12869 12870 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 12871 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 12872 /* Overflow possible, we know nothing */ 12873 dst_reg->s32_min_value = S32_MIN; 12874 dst_reg->s32_max_value = S32_MAX; 12875 } else { 12876 dst_reg->s32_min_value -= smax_val; 12877 dst_reg->s32_max_value -= smin_val; 12878 } 12879 if (dst_reg->u32_min_value < umax_val) { 12880 /* Overflow possible, we know nothing */ 12881 dst_reg->u32_min_value = 0; 12882 dst_reg->u32_max_value = U32_MAX; 12883 } else { 12884 /* Cannot overflow (as long as bounds are consistent) */ 12885 dst_reg->u32_min_value -= umax_val; 12886 dst_reg->u32_max_value -= umin_val; 12887 } 12888 } 12889 12890 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 12891 struct bpf_reg_state *src_reg) 12892 { 12893 s64 smin_val = src_reg->smin_value; 12894 s64 smax_val = src_reg->smax_value; 12895 u64 umin_val = src_reg->umin_value; 12896 u64 umax_val = src_reg->umax_value; 12897 12898 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 12899 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 12900 /* Overflow possible, we know nothing */ 12901 dst_reg->smin_value = S64_MIN; 12902 dst_reg->smax_value = S64_MAX; 12903 } else { 12904 dst_reg->smin_value -= smax_val; 12905 dst_reg->smax_value -= smin_val; 12906 } 12907 if (dst_reg->umin_value < umax_val) { 12908 /* Overflow possible, we know nothing */ 12909 dst_reg->umin_value = 0; 12910 dst_reg->umax_value = U64_MAX; 12911 } else { 12912 /* Cannot overflow (as long as bounds are consistent) */ 12913 dst_reg->umin_value -= umax_val; 12914 dst_reg->umax_value -= umin_val; 12915 } 12916 } 12917 12918 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 12919 struct bpf_reg_state *src_reg) 12920 { 12921 s32 smin_val = src_reg->s32_min_value; 12922 u32 umin_val = src_reg->u32_min_value; 12923 u32 umax_val = src_reg->u32_max_value; 12924 12925 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 12926 /* Ain't nobody got time to multiply that sign */ 12927 __mark_reg32_unbounded(dst_reg); 12928 return; 12929 } 12930 /* Both values are positive, so we can work with unsigned and 12931 * copy the result to signed (unless it exceeds S32_MAX). 12932 */ 12933 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 12934 /* Potential overflow, we know nothing */ 12935 __mark_reg32_unbounded(dst_reg); 12936 return; 12937 } 12938 dst_reg->u32_min_value *= umin_val; 12939 dst_reg->u32_max_value *= umax_val; 12940 if (dst_reg->u32_max_value > S32_MAX) { 12941 /* Overflow possible, we know nothing */ 12942 dst_reg->s32_min_value = S32_MIN; 12943 dst_reg->s32_max_value = S32_MAX; 12944 } else { 12945 dst_reg->s32_min_value = dst_reg->u32_min_value; 12946 dst_reg->s32_max_value = dst_reg->u32_max_value; 12947 } 12948 } 12949 12950 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 12951 struct bpf_reg_state *src_reg) 12952 { 12953 s64 smin_val = src_reg->smin_value; 12954 u64 umin_val = src_reg->umin_value; 12955 u64 umax_val = src_reg->umax_value; 12956 12957 if (smin_val < 0 || dst_reg->smin_value < 0) { 12958 /* Ain't nobody got time to multiply that sign */ 12959 __mark_reg64_unbounded(dst_reg); 12960 return; 12961 } 12962 /* Both values are positive, so we can work with unsigned and 12963 * copy the result to signed (unless it exceeds S64_MAX). 12964 */ 12965 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 12966 /* Potential overflow, we know nothing */ 12967 __mark_reg64_unbounded(dst_reg); 12968 return; 12969 } 12970 dst_reg->umin_value *= umin_val; 12971 dst_reg->umax_value *= umax_val; 12972 if (dst_reg->umax_value > S64_MAX) { 12973 /* Overflow possible, we know nothing */ 12974 dst_reg->smin_value = S64_MIN; 12975 dst_reg->smax_value = S64_MAX; 12976 } else { 12977 dst_reg->smin_value = dst_reg->umin_value; 12978 dst_reg->smax_value = dst_reg->umax_value; 12979 } 12980 } 12981 12982 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 12983 struct bpf_reg_state *src_reg) 12984 { 12985 bool src_known = tnum_subreg_is_const(src_reg->var_off); 12986 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 12987 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 12988 s32 smin_val = src_reg->s32_min_value; 12989 u32 umax_val = src_reg->u32_max_value; 12990 12991 if (src_known && dst_known) { 12992 __mark_reg32_known(dst_reg, var32_off.value); 12993 return; 12994 } 12995 12996 /* We get our minimum from the var_off, since that's inherently 12997 * bitwise. Our maximum is the minimum of the operands' maxima. 12998 */ 12999 dst_reg->u32_min_value = var32_off.value; 13000 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 13001 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 13002 /* Lose signed bounds when ANDing negative numbers, 13003 * ain't nobody got time for that. 13004 */ 13005 dst_reg->s32_min_value = S32_MIN; 13006 dst_reg->s32_max_value = S32_MAX; 13007 } else { 13008 /* ANDing two positives gives a positive, so safe to 13009 * cast result into s64. 13010 */ 13011 dst_reg->s32_min_value = dst_reg->u32_min_value; 13012 dst_reg->s32_max_value = dst_reg->u32_max_value; 13013 } 13014 } 13015 13016 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 13017 struct bpf_reg_state *src_reg) 13018 { 13019 bool src_known = tnum_is_const(src_reg->var_off); 13020 bool dst_known = tnum_is_const(dst_reg->var_off); 13021 s64 smin_val = src_reg->smin_value; 13022 u64 umax_val = src_reg->umax_value; 13023 13024 if (src_known && dst_known) { 13025 __mark_reg_known(dst_reg, dst_reg->var_off.value); 13026 return; 13027 } 13028 13029 /* We get our minimum from the var_off, since that's inherently 13030 * bitwise. Our maximum is the minimum of the operands' maxima. 13031 */ 13032 dst_reg->umin_value = dst_reg->var_off.value; 13033 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 13034 if (dst_reg->smin_value < 0 || smin_val < 0) { 13035 /* Lose signed bounds when ANDing negative numbers, 13036 * ain't nobody got time for that. 13037 */ 13038 dst_reg->smin_value = S64_MIN; 13039 dst_reg->smax_value = S64_MAX; 13040 } else { 13041 /* ANDing two positives gives a positive, so safe to 13042 * cast result into s64. 13043 */ 13044 dst_reg->smin_value = dst_reg->umin_value; 13045 dst_reg->smax_value = dst_reg->umax_value; 13046 } 13047 /* We may learn something more from the var_off */ 13048 __update_reg_bounds(dst_reg); 13049 } 13050 13051 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 13052 struct bpf_reg_state *src_reg) 13053 { 13054 bool src_known = tnum_subreg_is_const(src_reg->var_off); 13055 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 13056 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 13057 s32 smin_val = src_reg->s32_min_value; 13058 u32 umin_val = src_reg->u32_min_value; 13059 13060 if (src_known && dst_known) { 13061 __mark_reg32_known(dst_reg, var32_off.value); 13062 return; 13063 } 13064 13065 /* We get our maximum from the var_off, and our minimum is the 13066 * maximum of the operands' minima 13067 */ 13068 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 13069 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 13070 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 13071 /* Lose signed bounds when ORing negative numbers, 13072 * ain't nobody got time for that. 13073 */ 13074 dst_reg->s32_min_value = S32_MIN; 13075 dst_reg->s32_max_value = S32_MAX; 13076 } else { 13077 /* ORing two positives gives a positive, so safe to 13078 * cast result into s64. 13079 */ 13080 dst_reg->s32_min_value = dst_reg->u32_min_value; 13081 dst_reg->s32_max_value = dst_reg->u32_max_value; 13082 } 13083 } 13084 13085 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 13086 struct bpf_reg_state *src_reg) 13087 { 13088 bool src_known = tnum_is_const(src_reg->var_off); 13089 bool dst_known = tnum_is_const(dst_reg->var_off); 13090 s64 smin_val = src_reg->smin_value; 13091 u64 umin_val = src_reg->umin_value; 13092 13093 if (src_known && dst_known) { 13094 __mark_reg_known(dst_reg, dst_reg->var_off.value); 13095 return; 13096 } 13097 13098 /* We get our maximum from the var_off, and our minimum is the 13099 * maximum of the operands' minima 13100 */ 13101 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 13102 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 13103 if (dst_reg->smin_value < 0 || smin_val < 0) { 13104 /* Lose signed bounds when ORing negative numbers, 13105 * ain't nobody got time for that. 13106 */ 13107 dst_reg->smin_value = S64_MIN; 13108 dst_reg->smax_value = S64_MAX; 13109 } else { 13110 /* ORing two positives gives a positive, so safe to 13111 * cast result into s64. 13112 */ 13113 dst_reg->smin_value = dst_reg->umin_value; 13114 dst_reg->smax_value = dst_reg->umax_value; 13115 } 13116 /* We may learn something more from the var_off */ 13117 __update_reg_bounds(dst_reg); 13118 } 13119 13120 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, 13121 struct bpf_reg_state *src_reg) 13122 { 13123 bool src_known = tnum_subreg_is_const(src_reg->var_off); 13124 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 13125 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 13126 s32 smin_val = src_reg->s32_min_value; 13127 13128 if (src_known && dst_known) { 13129 __mark_reg32_known(dst_reg, var32_off.value); 13130 return; 13131 } 13132 13133 /* We get both minimum and maximum from the var32_off. */ 13134 dst_reg->u32_min_value = var32_off.value; 13135 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 13136 13137 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { 13138 /* XORing two positive sign numbers gives a positive, 13139 * so safe to cast u32 result into s32. 13140 */ 13141 dst_reg->s32_min_value = dst_reg->u32_min_value; 13142 dst_reg->s32_max_value = dst_reg->u32_max_value; 13143 } else { 13144 dst_reg->s32_min_value = S32_MIN; 13145 dst_reg->s32_max_value = S32_MAX; 13146 } 13147 } 13148 13149 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, 13150 struct bpf_reg_state *src_reg) 13151 { 13152 bool src_known = tnum_is_const(src_reg->var_off); 13153 bool dst_known = tnum_is_const(dst_reg->var_off); 13154 s64 smin_val = src_reg->smin_value; 13155 13156 if (src_known && dst_known) { 13157 /* dst_reg->var_off.value has been updated earlier */ 13158 __mark_reg_known(dst_reg, dst_reg->var_off.value); 13159 return; 13160 } 13161 13162 /* We get both minimum and maximum from the var_off. */ 13163 dst_reg->umin_value = dst_reg->var_off.value; 13164 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 13165 13166 if (dst_reg->smin_value >= 0 && smin_val >= 0) { 13167 /* XORing two positive sign numbers gives a positive, 13168 * so safe to cast u64 result into s64. 13169 */ 13170 dst_reg->smin_value = dst_reg->umin_value; 13171 dst_reg->smax_value = dst_reg->umax_value; 13172 } else { 13173 dst_reg->smin_value = S64_MIN; 13174 dst_reg->smax_value = S64_MAX; 13175 } 13176 13177 __update_reg_bounds(dst_reg); 13178 } 13179 13180 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 13181 u64 umin_val, u64 umax_val) 13182 { 13183 /* We lose all sign bit information (except what we can pick 13184 * up from var_off) 13185 */ 13186 dst_reg->s32_min_value = S32_MIN; 13187 dst_reg->s32_max_value = S32_MAX; 13188 /* If we might shift our top bit out, then we know nothing */ 13189 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 13190 dst_reg->u32_min_value = 0; 13191 dst_reg->u32_max_value = U32_MAX; 13192 } else { 13193 dst_reg->u32_min_value <<= umin_val; 13194 dst_reg->u32_max_value <<= umax_val; 13195 } 13196 } 13197 13198 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 13199 struct bpf_reg_state *src_reg) 13200 { 13201 u32 umax_val = src_reg->u32_max_value; 13202 u32 umin_val = src_reg->u32_min_value; 13203 /* u32 alu operation will zext upper bits */ 13204 struct tnum subreg = tnum_subreg(dst_reg->var_off); 13205 13206 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 13207 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 13208 /* Not required but being careful mark reg64 bounds as unknown so 13209 * that we are forced to pick them up from tnum and zext later and 13210 * if some path skips this step we are still safe. 13211 */ 13212 __mark_reg64_unbounded(dst_reg); 13213 __update_reg32_bounds(dst_reg); 13214 } 13215 13216 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 13217 u64 umin_val, u64 umax_val) 13218 { 13219 /* Special case <<32 because it is a common compiler pattern to sign 13220 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 13221 * positive we know this shift will also be positive so we can track 13222 * bounds correctly. Otherwise we lose all sign bit information except 13223 * what we can pick up from var_off. Perhaps we can generalize this 13224 * later to shifts of any length. 13225 */ 13226 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 13227 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 13228 else 13229 dst_reg->smax_value = S64_MAX; 13230 13231 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 13232 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 13233 else 13234 dst_reg->smin_value = S64_MIN; 13235 13236 /* If we might shift our top bit out, then we know nothing */ 13237 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 13238 dst_reg->umin_value = 0; 13239 dst_reg->umax_value = U64_MAX; 13240 } else { 13241 dst_reg->umin_value <<= umin_val; 13242 dst_reg->umax_value <<= umax_val; 13243 } 13244 } 13245 13246 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 13247 struct bpf_reg_state *src_reg) 13248 { 13249 u64 umax_val = src_reg->umax_value; 13250 u64 umin_val = src_reg->umin_value; 13251 13252 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 13253 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 13254 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 13255 13256 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 13257 /* We may learn something more from the var_off */ 13258 __update_reg_bounds(dst_reg); 13259 } 13260 13261 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 13262 struct bpf_reg_state *src_reg) 13263 { 13264 struct tnum subreg = tnum_subreg(dst_reg->var_off); 13265 u32 umax_val = src_reg->u32_max_value; 13266 u32 umin_val = src_reg->u32_min_value; 13267 13268 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 13269 * be negative, then either: 13270 * 1) src_reg might be zero, so the sign bit of the result is 13271 * unknown, so we lose our signed bounds 13272 * 2) it's known negative, thus the unsigned bounds capture the 13273 * signed bounds 13274 * 3) the signed bounds cross zero, so they tell us nothing 13275 * about the result 13276 * If the value in dst_reg is known nonnegative, then again the 13277 * unsigned bounds capture the signed bounds. 13278 * Thus, in all cases it suffices to blow away our signed bounds 13279 * and rely on inferring new ones from the unsigned bounds and 13280 * var_off of the result. 13281 */ 13282 dst_reg->s32_min_value = S32_MIN; 13283 dst_reg->s32_max_value = S32_MAX; 13284 13285 dst_reg->var_off = tnum_rshift(subreg, umin_val); 13286 dst_reg->u32_min_value >>= umax_val; 13287 dst_reg->u32_max_value >>= umin_val; 13288 13289 __mark_reg64_unbounded(dst_reg); 13290 __update_reg32_bounds(dst_reg); 13291 } 13292 13293 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 13294 struct bpf_reg_state *src_reg) 13295 { 13296 u64 umax_val = src_reg->umax_value; 13297 u64 umin_val = src_reg->umin_value; 13298 13299 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 13300 * be negative, then either: 13301 * 1) src_reg might be zero, so the sign bit of the result is 13302 * unknown, so we lose our signed bounds 13303 * 2) it's known negative, thus the unsigned bounds capture the 13304 * signed bounds 13305 * 3) the signed bounds cross zero, so they tell us nothing 13306 * about the result 13307 * If the value in dst_reg is known nonnegative, then again the 13308 * unsigned bounds capture the signed bounds. 13309 * Thus, in all cases it suffices to blow away our signed bounds 13310 * and rely on inferring new ones from the unsigned bounds and 13311 * var_off of the result. 13312 */ 13313 dst_reg->smin_value = S64_MIN; 13314 dst_reg->smax_value = S64_MAX; 13315 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 13316 dst_reg->umin_value >>= umax_val; 13317 dst_reg->umax_value >>= umin_val; 13318 13319 /* Its not easy to operate on alu32 bounds here because it depends 13320 * on bits being shifted in. Take easy way out and mark unbounded 13321 * so we can recalculate later from tnum. 13322 */ 13323 __mark_reg32_unbounded(dst_reg); 13324 __update_reg_bounds(dst_reg); 13325 } 13326 13327 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 13328 struct bpf_reg_state *src_reg) 13329 { 13330 u64 umin_val = src_reg->u32_min_value; 13331 13332 /* Upon reaching here, src_known is true and 13333 * umax_val is equal to umin_val. 13334 */ 13335 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 13336 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 13337 13338 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 13339 13340 /* blow away the dst_reg umin_value/umax_value and rely on 13341 * dst_reg var_off to refine the result. 13342 */ 13343 dst_reg->u32_min_value = 0; 13344 dst_reg->u32_max_value = U32_MAX; 13345 13346 __mark_reg64_unbounded(dst_reg); 13347 __update_reg32_bounds(dst_reg); 13348 } 13349 13350 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 13351 struct bpf_reg_state *src_reg) 13352 { 13353 u64 umin_val = src_reg->umin_value; 13354 13355 /* Upon reaching here, src_known is true and umax_val is equal 13356 * to umin_val. 13357 */ 13358 dst_reg->smin_value >>= umin_val; 13359 dst_reg->smax_value >>= umin_val; 13360 13361 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 13362 13363 /* blow away the dst_reg umin_value/umax_value and rely on 13364 * dst_reg var_off to refine the result. 13365 */ 13366 dst_reg->umin_value = 0; 13367 dst_reg->umax_value = U64_MAX; 13368 13369 /* Its not easy to operate on alu32 bounds here because it depends 13370 * on bits being shifted in from upper 32-bits. Take easy way out 13371 * and mark unbounded so we can recalculate later from tnum. 13372 */ 13373 __mark_reg32_unbounded(dst_reg); 13374 __update_reg_bounds(dst_reg); 13375 } 13376 13377 /* WARNING: This function does calculations on 64-bit values, but the actual 13378 * execution may occur on 32-bit values. Therefore, things like bitshifts 13379 * need extra checks in the 32-bit case. 13380 */ 13381 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 13382 struct bpf_insn *insn, 13383 struct bpf_reg_state *dst_reg, 13384 struct bpf_reg_state src_reg) 13385 { 13386 struct bpf_reg_state *regs = cur_regs(env); 13387 u8 opcode = BPF_OP(insn->code); 13388 bool src_known; 13389 s64 smin_val, smax_val; 13390 u64 umin_val, umax_val; 13391 s32 s32_min_val, s32_max_val; 13392 u32 u32_min_val, u32_max_val; 13393 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 13394 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 13395 int ret; 13396 13397 smin_val = src_reg.smin_value; 13398 smax_val = src_reg.smax_value; 13399 umin_val = src_reg.umin_value; 13400 umax_val = src_reg.umax_value; 13401 13402 s32_min_val = src_reg.s32_min_value; 13403 s32_max_val = src_reg.s32_max_value; 13404 u32_min_val = src_reg.u32_min_value; 13405 u32_max_val = src_reg.u32_max_value; 13406 13407 if (alu32) { 13408 src_known = tnum_subreg_is_const(src_reg.var_off); 13409 if ((src_known && 13410 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 13411 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 13412 /* Taint dst register if offset had invalid bounds 13413 * derived from e.g. dead branches. 13414 */ 13415 __mark_reg_unknown(env, dst_reg); 13416 return 0; 13417 } 13418 } else { 13419 src_known = tnum_is_const(src_reg.var_off); 13420 if ((src_known && 13421 (smin_val != smax_val || umin_val != umax_val)) || 13422 smin_val > smax_val || umin_val > umax_val) { 13423 /* Taint dst register if offset had invalid bounds 13424 * derived from e.g. dead branches. 13425 */ 13426 __mark_reg_unknown(env, dst_reg); 13427 return 0; 13428 } 13429 } 13430 13431 if (!src_known && 13432 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 13433 __mark_reg_unknown(env, dst_reg); 13434 return 0; 13435 } 13436 13437 if (sanitize_needed(opcode)) { 13438 ret = sanitize_val_alu(env, insn); 13439 if (ret < 0) 13440 return sanitize_err(env, insn, ret, NULL, NULL); 13441 } 13442 13443 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 13444 * There are two classes of instructions: The first class we track both 13445 * alu32 and alu64 sign/unsigned bounds independently this provides the 13446 * greatest amount of precision when alu operations are mixed with jmp32 13447 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 13448 * and BPF_OR. This is possible because these ops have fairly easy to 13449 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 13450 * See alu32 verifier tests for examples. The second class of 13451 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 13452 * with regards to tracking sign/unsigned bounds because the bits may 13453 * cross subreg boundaries in the alu64 case. When this happens we mark 13454 * the reg unbounded in the subreg bound space and use the resulting 13455 * tnum to calculate an approximation of the sign/unsigned bounds. 13456 */ 13457 switch (opcode) { 13458 case BPF_ADD: 13459 scalar32_min_max_add(dst_reg, &src_reg); 13460 scalar_min_max_add(dst_reg, &src_reg); 13461 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 13462 break; 13463 case BPF_SUB: 13464 scalar32_min_max_sub(dst_reg, &src_reg); 13465 scalar_min_max_sub(dst_reg, &src_reg); 13466 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 13467 break; 13468 case BPF_MUL: 13469 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 13470 scalar32_min_max_mul(dst_reg, &src_reg); 13471 scalar_min_max_mul(dst_reg, &src_reg); 13472 break; 13473 case BPF_AND: 13474 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 13475 scalar32_min_max_and(dst_reg, &src_reg); 13476 scalar_min_max_and(dst_reg, &src_reg); 13477 break; 13478 case BPF_OR: 13479 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 13480 scalar32_min_max_or(dst_reg, &src_reg); 13481 scalar_min_max_or(dst_reg, &src_reg); 13482 break; 13483 case BPF_XOR: 13484 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); 13485 scalar32_min_max_xor(dst_reg, &src_reg); 13486 scalar_min_max_xor(dst_reg, &src_reg); 13487 break; 13488 case BPF_LSH: 13489 if (umax_val >= insn_bitness) { 13490 /* Shifts greater than 31 or 63 are undefined. 13491 * This includes shifts by a negative number. 13492 */ 13493 mark_reg_unknown(env, regs, insn->dst_reg); 13494 break; 13495 } 13496 if (alu32) 13497 scalar32_min_max_lsh(dst_reg, &src_reg); 13498 else 13499 scalar_min_max_lsh(dst_reg, &src_reg); 13500 break; 13501 case BPF_RSH: 13502 if (umax_val >= insn_bitness) { 13503 /* Shifts greater than 31 or 63 are undefined. 13504 * This includes shifts by a negative number. 13505 */ 13506 mark_reg_unknown(env, regs, insn->dst_reg); 13507 break; 13508 } 13509 if (alu32) 13510 scalar32_min_max_rsh(dst_reg, &src_reg); 13511 else 13512 scalar_min_max_rsh(dst_reg, &src_reg); 13513 break; 13514 case BPF_ARSH: 13515 if (umax_val >= insn_bitness) { 13516 /* Shifts greater than 31 or 63 are undefined. 13517 * This includes shifts by a negative number. 13518 */ 13519 mark_reg_unknown(env, regs, insn->dst_reg); 13520 break; 13521 } 13522 if (alu32) 13523 scalar32_min_max_arsh(dst_reg, &src_reg); 13524 else 13525 scalar_min_max_arsh(dst_reg, &src_reg); 13526 break; 13527 default: 13528 mark_reg_unknown(env, regs, insn->dst_reg); 13529 break; 13530 } 13531 13532 /* ALU32 ops are zero extended into 64bit register */ 13533 if (alu32) 13534 zext_32_to_64(dst_reg); 13535 reg_bounds_sync(dst_reg); 13536 return 0; 13537 } 13538 13539 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 13540 * and var_off. 13541 */ 13542 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 13543 struct bpf_insn *insn) 13544 { 13545 struct bpf_verifier_state *vstate = env->cur_state; 13546 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 13547 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 13548 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 13549 u8 opcode = BPF_OP(insn->code); 13550 int err; 13551 13552 dst_reg = ®s[insn->dst_reg]; 13553 src_reg = NULL; 13554 if (dst_reg->type != SCALAR_VALUE) 13555 ptr_reg = dst_reg; 13556 else 13557 /* Make sure ID is cleared otherwise dst_reg min/max could be 13558 * incorrectly propagated into other registers by find_equal_scalars() 13559 */ 13560 dst_reg->id = 0; 13561 if (BPF_SRC(insn->code) == BPF_X) { 13562 src_reg = ®s[insn->src_reg]; 13563 if (src_reg->type != SCALAR_VALUE) { 13564 if (dst_reg->type != SCALAR_VALUE) { 13565 /* Combining two pointers by any ALU op yields 13566 * an arbitrary scalar. Disallow all math except 13567 * pointer subtraction 13568 */ 13569 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 13570 mark_reg_unknown(env, regs, insn->dst_reg); 13571 return 0; 13572 } 13573 verbose(env, "R%d pointer %s pointer prohibited\n", 13574 insn->dst_reg, 13575 bpf_alu_string[opcode >> 4]); 13576 return -EACCES; 13577 } else { 13578 /* scalar += pointer 13579 * This is legal, but we have to reverse our 13580 * src/dest handling in computing the range 13581 */ 13582 err = mark_chain_precision(env, insn->dst_reg); 13583 if (err) 13584 return err; 13585 return adjust_ptr_min_max_vals(env, insn, 13586 src_reg, dst_reg); 13587 } 13588 } else if (ptr_reg) { 13589 /* pointer += scalar */ 13590 err = mark_chain_precision(env, insn->src_reg); 13591 if (err) 13592 return err; 13593 return adjust_ptr_min_max_vals(env, insn, 13594 dst_reg, src_reg); 13595 } else if (dst_reg->precise) { 13596 /* if dst_reg is precise, src_reg should be precise as well */ 13597 err = mark_chain_precision(env, insn->src_reg); 13598 if (err) 13599 return err; 13600 } 13601 } else { 13602 /* Pretend the src is a reg with a known value, since we only 13603 * need to be able to read from this state. 13604 */ 13605 off_reg.type = SCALAR_VALUE; 13606 __mark_reg_known(&off_reg, insn->imm); 13607 src_reg = &off_reg; 13608 if (ptr_reg) /* pointer += K */ 13609 return adjust_ptr_min_max_vals(env, insn, 13610 ptr_reg, src_reg); 13611 } 13612 13613 /* Got here implies adding two SCALAR_VALUEs */ 13614 if (WARN_ON_ONCE(ptr_reg)) { 13615 print_verifier_state(env, state, true); 13616 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 13617 return -EINVAL; 13618 } 13619 if (WARN_ON(!src_reg)) { 13620 print_verifier_state(env, state, true); 13621 verbose(env, "verifier internal error: no src_reg\n"); 13622 return -EINVAL; 13623 } 13624 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 13625 } 13626 13627 /* check validity of 32-bit and 64-bit arithmetic operations */ 13628 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 13629 { 13630 struct bpf_reg_state *regs = cur_regs(env); 13631 u8 opcode = BPF_OP(insn->code); 13632 int err; 13633 13634 if (opcode == BPF_END || opcode == BPF_NEG) { 13635 if (opcode == BPF_NEG) { 13636 if (BPF_SRC(insn->code) != BPF_K || 13637 insn->src_reg != BPF_REG_0 || 13638 insn->off != 0 || insn->imm != 0) { 13639 verbose(env, "BPF_NEG uses reserved fields\n"); 13640 return -EINVAL; 13641 } 13642 } else { 13643 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 13644 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 13645 (BPF_CLASS(insn->code) == BPF_ALU64 && 13646 BPF_SRC(insn->code) != BPF_TO_LE)) { 13647 verbose(env, "BPF_END uses reserved fields\n"); 13648 return -EINVAL; 13649 } 13650 } 13651 13652 /* check src operand */ 13653 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 13654 if (err) 13655 return err; 13656 13657 if (is_pointer_value(env, insn->dst_reg)) { 13658 verbose(env, "R%d pointer arithmetic prohibited\n", 13659 insn->dst_reg); 13660 return -EACCES; 13661 } 13662 13663 /* check dest operand */ 13664 err = check_reg_arg(env, insn->dst_reg, DST_OP); 13665 if (err) 13666 return err; 13667 13668 } else if (opcode == BPF_MOV) { 13669 13670 if (BPF_SRC(insn->code) == BPF_X) { 13671 if (insn->imm != 0) { 13672 verbose(env, "BPF_MOV uses reserved fields\n"); 13673 return -EINVAL; 13674 } 13675 13676 if (BPF_CLASS(insn->code) == BPF_ALU) { 13677 if (insn->off != 0 && insn->off != 8 && insn->off != 16) { 13678 verbose(env, "BPF_MOV uses reserved fields\n"); 13679 return -EINVAL; 13680 } 13681 } else { 13682 if (insn->off != 0 && insn->off != 8 && insn->off != 16 && 13683 insn->off != 32) { 13684 verbose(env, "BPF_MOV uses reserved fields\n"); 13685 return -EINVAL; 13686 } 13687 } 13688 13689 /* check src operand */ 13690 err = check_reg_arg(env, insn->src_reg, SRC_OP); 13691 if (err) 13692 return err; 13693 } else { 13694 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 13695 verbose(env, "BPF_MOV uses reserved fields\n"); 13696 return -EINVAL; 13697 } 13698 } 13699 13700 /* check dest operand, mark as required later */ 13701 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 13702 if (err) 13703 return err; 13704 13705 if (BPF_SRC(insn->code) == BPF_X) { 13706 struct bpf_reg_state *src_reg = regs + insn->src_reg; 13707 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 13708 bool need_id = src_reg->type == SCALAR_VALUE && !src_reg->id && 13709 !tnum_is_const(src_reg->var_off); 13710 13711 if (BPF_CLASS(insn->code) == BPF_ALU64) { 13712 if (insn->off == 0) { 13713 /* case: R1 = R2 13714 * copy register state to dest reg 13715 */ 13716 if (need_id) 13717 /* Assign src and dst registers the same ID 13718 * that will be used by find_equal_scalars() 13719 * to propagate min/max range. 13720 */ 13721 src_reg->id = ++env->id_gen; 13722 copy_register_state(dst_reg, src_reg); 13723 dst_reg->live |= REG_LIVE_WRITTEN; 13724 dst_reg->subreg_def = DEF_NOT_SUBREG; 13725 } else { 13726 /* case: R1 = (s8, s16 s32)R2 */ 13727 if (is_pointer_value(env, insn->src_reg)) { 13728 verbose(env, 13729 "R%d sign-extension part of pointer\n", 13730 insn->src_reg); 13731 return -EACCES; 13732 } else if (src_reg->type == SCALAR_VALUE) { 13733 bool no_sext; 13734 13735 no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); 13736 if (no_sext && need_id) 13737 src_reg->id = ++env->id_gen; 13738 copy_register_state(dst_reg, src_reg); 13739 if (!no_sext) 13740 dst_reg->id = 0; 13741 coerce_reg_to_size_sx(dst_reg, insn->off >> 3); 13742 dst_reg->live |= REG_LIVE_WRITTEN; 13743 dst_reg->subreg_def = DEF_NOT_SUBREG; 13744 } else { 13745 mark_reg_unknown(env, regs, insn->dst_reg); 13746 } 13747 } 13748 } else { 13749 /* R1 = (u32) R2 */ 13750 if (is_pointer_value(env, insn->src_reg)) { 13751 verbose(env, 13752 "R%d partial copy of pointer\n", 13753 insn->src_reg); 13754 return -EACCES; 13755 } else if (src_reg->type == SCALAR_VALUE) { 13756 if (insn->off == 0) { 13757 bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX; 13758 13759 if (is_src_reg_u32 && need_id) 13760 src_reg->id = ++env->id_gen; 13761 copy_register_state(dst_reg, src_reg); 13762 /* Make sure ID is cleared if src_reg is not in u32 13763 * range otherwise dst_reg min/max could be incorrectly 13764 * propagated into src_reg by find_equal_scalars() 13765 */ 13766 if (!is_src_reg_u32) 13767 dst_reg->id = 0; 13768 dst_reg->live |= REG_LIVE_WRITTEN; 13769 dst_reg->subreg_def = env->insn_idx + 1; 13770 } else { 13771 /* case: W1 = (s8, s16)W2 */ 13772 bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); 13773 13774 if (no_sext && need_id) 13775 src_reg->id = ++env->id_gen; 13776 copy_register_state(dst_reg, src_reg); 13777 if (!no_sext) 13778 dst_reg->id = 0; 13779 dst_reg->live |= REG_LIVE_WRITTEN; 13780 dst_reg->subreg_def = env->insn_idx + 1; 13781 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3); 13782 } 13783 } else { 13784 mark_reg_unknown(env, regs, 13785 insn->dst_reg); 13786 } 13787 zext_32_to_64(dst_reg); 13788 reg_bounds_sync(dst_reg); 13789 } 13790 } else { 13791 /* case: R = imm 13792 * remember the value we stored into this reg 13793 */ 13794 /* clear any state __mark_reg_known doesn't set */ 13795 mark_reg_unknown(env, regs, insn->dst_reg); 13796 regs[insn->dst_reg].type = SCALAR_VALUE; 13797 if (BPF_CLASS(insn->code) == BPF_ALU64) { 13798 __mark_reg_known(regs + insn->dst_reg, 13799 insn->imm); 13800 } else { 13801 __mark_reg_known(regs + insn->dst_reg, 13802 (u32)insn->imm); 13803 } 13804 } 13805 13806 } else if (opcode > BPF_END) { 13807 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 13808 return -EINVAL; 13809 13810 } else { /* all other ALU ops: and, sub, xor, add, ... */ 13811 13812 if (BPF_SRC(insn->code) == BPF_X) { 13813 if (insn->imm != 0 || insn->off > 1 || 13814 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { 13815 verbose(env, "BPF_ALU uses reserved fields\n"); 13816 return -EINVAL; 13817 } 13818 /* check src1 operand */ 13819 err = check_reg_arg(env, insn->src_reg, SRC_OP); 13820 if (err) 13821 return err; 13822 } else { 13823 if (insn->src_reg != BPF_REG_0 || insn->off > 1 || 13824 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { 13825 verbose(env, "BPF_ALU uses reserved fields\n"); 13826 return -EINVAL; 13827 } 13828 } 13829 13830 /* check src2 operand */ 13831 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 13832 if (err) 13833 return err; 13834 13835 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 13836 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 13837 verbose(env, "div by zero\n"); 13838 return -EINVAL; 13839 } 13840 13841 if ((opcode == BPF_LSH || opcode == BPF_RSH || 13842 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 13843 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 13844 13845 if (insn->imm < 0 || insn->imm >= size) { 13846 verbose(env, "invalid shift %d\n", insn->imm); 13847 return -EINVAL; 13848 } 13849 } 13850 13851 /* check dest operand */ 13852 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 13853 err = err ?: adjust_reg_min_max_vals(env, insn); 13854 if (err) 13855 return err; 13856 } 13857 13858 return reg_bounds_sanity_check(env, ®s[insn->dst_reg], "alu"); 13859 } 13860 13861 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 13862 struct bpf_reg_state *dst_reg, 13863 enum bpf_reg_type type, 13864 bool range_right_open) 13865 { 13866 struct bpf_func_state *state; 13867 struct bpf_reg_state *reg; 13868 int new_range; 13869 13870 if (dst_reg->off < 0 || 13871 (dst_reg->off == 0 && range_right_open)) 13872 /* This doesn't give us any range */ 13873 return; 13874 13875 if (dst_reg->umax_value > MAX_PACKET_OFF || 13876 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 13877 /* Risk of overflow. For instance, ptr + (1<<63) may be less 13878 * than pkt_end, but that's because it's also less than pkt. 13879 */ 13880 return; 13881 13882 new_range = dst_reg->off; 13883 if (range_right_open) 13884 new_range++; 13885 13886 /* Examples for register markings: 13887 * 13888 * pkt_data in dst register: 13889 * 13890 * r2 = r3; 13891 * r2 += 8; 13892 * if (r2 > pkt_end) goto <handle exception> 13893 * <access okay> 13894 * 13895 * r2 = r3; 13896 * r2 += 8; 13897 * if (r2 < pkt_end) goto <access okay> 13898 * <handle exception> 13899 * 13900 * Where: 13901 * r2 == dst_reg, pkt_end == src_reg 13902 * r2=pkt(id=n,off=8,r=0) 13903 * r3=pkt(id=n,off=0,r=0) 13904 * 13905 * pkt_data in src register: 13906 * 13907 * r2 = r3; 13908 * r2 += 8; 13909 * if (pkt_end >= r2) goto <access okay> 13910 * <handle exception> 13911 * 13912 * r2 = r3; 13913 * r2 += 8; 13914 * if (pkt_end <= r2) goto <handle exception> 13915 * <access okay> 13916 * 13917 * Where: 13918 * pkt_end == dst_reg, r2 == src_reg 13919 * r2=pkt(id=n,off=8,r=0) 13920 * r3=pkt(id=n,off=0,r=0) 13921 * 13922 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 13923 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 13924 * and [r3, r3 + 8-1) respectively is safe to access depending on 13925 * the check. 13926 */ 13927 13928 /* If our ids match, then we must have the same max_value. And we 13929 * don't care about the other reg's fixed offset, since if it's too big 13930 * the range won't allow anything. 13931 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 13932 */ 13933 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 13934 if (reg->type == type && reg->id == dst_reg->id) 13935 /* keep the maximum range already checked */ 13936 reg->range = max(reg->range, new_range); 13937 })); 13938 } 13939 13940 /* 13941 * <reg1> <op> <reg2>, currently assuming reg2 is a constant 13942 */ 13943 static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, 13944 u8 opcode, bool is_jmp32) 13945 { 13946 struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off; 13947 struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off; 13948 u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value; 13949 u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value; 13950 s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value; 13951 s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value; 13952 u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value; 13953 u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value; 13954 s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value; 13955 s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value; 13956 13957 switch (opcode) { 13958 case BPF_JEQ: 13959 /* constants, umin/umax and smin/smax checks would be 13960 * redundant in this case because they all should match 13961 */ 13962 if (tnum_is_const(t1) && tnum_is_const(t2)) 13963 return t1.value == t2.value; 13964 /* non-overlapping ranges */ 13965 if (umin1 > umax2 || umax1 < umin2) 13966 return 0; 13967 if (smin1 > smax2 || smax1 < smin2) 13968 return 0; 13969 if (!is_jmp32) { 13970 /* if 64-bit ranges are inconclusive, see if we can 13971 * utilize 32-bit subrange knowledge to eliminate 13972 * branches that can't be taken a priori 13973 */ 13974 if (reg1->u32_min_value > reg2->u32_max_value || 13975 reg1->u32_max_value < reg2->u32_min_value) 13976 return 0; 13977 if (reg1->s32_min_value > reg2->s32_max_value || 13978 reg1->s32_max_value < reg2->s32_min_value) 13979 return 0; 13980 } 13981 break; 13982 case BPF_JNE: 13983 /* constants, umin/umax and smin/smax checks would be 13984 * redundant in this case because they all should match 13985 */ 13986 if (tnum_is_const(t1) && tnum_is_const(t2)) 13987 return t1.value != t2.value; 13988 /* non-overlapping ranges */ 13989 if (umin1 > umax2 || umax1 < umin2) 13990 return 1; 13991 if (smin1 > smax2 || smax1 < smin2) 13992 return 1; 13993 if (!is_jmp32) { 13994 /* if 64-bit ranges are inconclusive, see if we can 13995 * utilize 32-bit subrange knowledge to eliminate 13996 * branches that can't be taken a priori 13997 */ 13998 if (reg1->u32_min_value > reg2->u32_max_value || 13999 reg1->u32_max_value < reg2->u32_min_value) 14000 return 1; 14001 if (reg1->s32_min_value > reg2->s32_max_value || 14002 reg1->s32_max_value < reg2->s32_min_value) 14003 return 1; 14004 } 14005 break; 14006 case BPF_JSET: 14007 if (!is_reg_const(reg2, is_jmp32)) { 14008 swap(reg1, reg2); 14009 swap(t1, t2); 14010 } 14011 if (!is_reg_const(reg2, is_jmp32)) 14012 return -1; 14013 if ((~t1.mask & t1.value) & t2.value) 14014 return 1; 14015 if (!((t1.mask | t1.value) & t2.value)) 14016 return 0; 14017 break; 14018 case BPF_JGT: 14019 if (umin1 > umax2) 14020 return 1; 14021 else if (umax1 <= umin2) 14022 return 0; 14023 break; 14024 case BPF_JSGT: 14025 if (smin1 > smax2) 14026 return 1; 14027 else if (smax1 <= smin2) 14028 return 0; 14029 break; 14030 case BPF_JLT: 14031 if (umax1 < umin2) 14032 return 1; 14033 else if (umin1 >= umax2) 14034 return 0; 14035 break; 14036 case BPF_JSLT: 14037 if (smax1 < smin2) 14038 return 1; 14039 else if (smin1 >= smax2) 14040 return 0; 14041 break; 14042 case BPF_JGE: 14043 if (umin1 >= umax2) 14044 return 1; 14045 else if (umax1 < umin2) 14046 return 0; 14047 break; 14048 case BPF_JSGE: 14049 if (smin1 >= smax2) 14050 return 1; 14051 else if (smax1 < smin2) 14052 return 0; 14053 break; 14054 case BPF_JLE: 14055 if (umax1 <= umin2) 14056 return 1; 14057 else if (umin1 > umax2) 14058 return 0; 14059 break; 14060 case BPF_JSLE: 14061 if (smax1 <= smin2) 14062 return 1; 14063 else if (smin1 > smax2) 14064 return 0; 14065 break; 14066 } 14067 14068 return -1; 14069 } 14070 14071 static int flip_opcode(u32 opcode) 14072 { 14073 /* How can we transform "a <op> b" into "b <op> a"? */ 14074 static const u8 opcode_flip[16] = { 14075 /* these stay the same */ 14076 [BPF_JEQ >> 4] = BPF_JEQ, 14077 [BPF_JNE >> 4] = BPF_JNE, 14078 [BPF_JSET >> 4] = BPF_JSET, 14079 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 14080 [BPF_JGE >> 4] = BPF_JLE, 14081 [BPF_JGT >> 4] = BPF_JLT, 14082 [BPF_JLE >> 4] = BPF_JGE, 14083 [BPF_JLT >> 4] = BPF_JGT, 14084 [BPF_JSGE >> 4] = BPF_JSLE, 14085 [BPF_JSGT >> 4] = BPF_JSLT, 14086 [BPF_JSLE >> 4] = BPF_JSGE, 14087 [BPF_JSLT >> 4] = BPF_JSGT 14088 }; 14089 return opcode_flip[opcode >> 4]; 14090 } 14091 14092 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, 14093 struct bpf_reg_state *src_reg, 14094 u8 opcode) 14095 { 14096 struct bpf_reg_state *pkt; 14097 14098 if (src_reg->type == PTR_TO_PACKET_END) { 14099 pkt = dst_reg; 14100 } else if (dst_reg->type == PTR_TO_PACKET_END) { 14101 pkt = src_reg; 14102 opcode = flip_opcode(opcode); 14103 } else { 14104 return -1; 14105 } 14106 14107 if (pkt->range >= 0) 14108 return -1; 14109 14110 switch (opcode) { 14111 case BPF_JLE: 14112 /* pkt <= pkt_end */ 14113 fallthrough; 14114 case BPF_JGT: 14115 /* pkt > pkt_end */ 14116 if (pkt->range == BEYOND_PKT_END) 14117 /* pkt has at last one extra byte beyond pkt_end */ 14118 return opcode == BPF_JGT; 14119 break; 14120 case BPF_JLT: 14121 /* pkt < pkt_end */ 14122 fallthrough; 14123 case BPF_JGE: 14124 /* pkt >= pkt_end */ 14125 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) 14126 return opcode == BPF_JGE; 14127 break; 14128 } 14129 return -1; 14130 } 14131 14132 /* compute branch direction of the expression "if (<reg1> opcode <reg2>) goto target;" 14133 * and return: 14134 * 1 - branch will be taken and "goto target" will be executed 14135 * 0 - branch will not be taken and fall-through to next insn 14136 * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value 14137 * range [0,10] 14138 */ 14139 static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, 14140 u8 opcode, bool is_jmp32) 14141 { 14142 if (reg_is_pkt_pointer_any(reg1) && reg_is_pkt_pointer_any(reg2) && !is_jmp32) 14143 return is_pkt_ptr_branch_taken(reg1, reg2, opcode); 14144 14145 if (__is_pointer_value(false, reg1) || __is_pointer_value(false, reg2)) { 14146 u64 val; 14147 14148 /* arrange that reg2 is a scalar, and reg1 is a pointer */ 14149 if (!is_reg_const(reg2, is_jmp32)) { 14150 opcode = flip_opcode(opcode); 14151 swap(reg1, reg2); 14152 } 14153 /* and ensure that reg2 is a constant */ 14154 if (!is_reg_const(reg2, is_jmp32)) 14155 return -1; 14156 14157 if (!reg_not_null(reg1)) 14158 return -1; 14159 14160 /* If pointer is valid tests against zero will fail so we can 14161 * use this to direct branch taken. 14162 */ 14163 val = reg_const_value(reg2, is_jmp32); 14164 if (val != 0) 14165 return -1; 14166 14167 switch (opcode) { 14168 case BPF_JEQ: 14169 return 0; 14170 case BPF_JNE: 14171 return 1; 14172 default: 14173 return -1; 14174 } 14175 } 14176 14177 /* now deal with two scalars, but not necessarily constants */ 14178 return is_scalar_branch_taken(reg1, reg2, opcode, is_jmp32); 14179 } 14180 14181 /* Opcode that corresponds to a *false* branch condition. 14182 * E.g., if r1 < r2, then reverse (false) condition is r1 >= r2 14183 */ 14184 static u8 rev_opcode(u8 opcode) 14185 { 14186 switch (opcode) { 14187 case BPF_JEQ: return BPF_JNE; 14188 case BPF_JNE: return BPF_JEQ; 14189 /* JSET doesn't have it's reverse opcode in BPF, so add 14190 * BPF_X flag to denote the reverse of that operation 14191 */ 14192 case BPF_JSET: return BPF_JSET | BPF_X; 14193 case BPF_JSET | BPF_X: return BPF_JSET; 14194 case BPF_JGE: return BPF_JLT; 14195 case BPF_JGT: return BPF_JLE; 14196 case BPF_JLE: return BPF_JGT; 14197 case BPF_JLT: return BPF_JGE; 14198 case BPF_JSGE: return BPF_JSLT; 14199 case BPF_JSGT: return BPF_JSLE; 14200 case BPF_JSLE: return BPF_JSGT; 14201 case BPF_JSLT: return BPF_JSGE; 14202 default: return 0; 14203 } 14204 } 14205 14206 /* Refine range knowledge for <reg1> <op> <reg>2 conditional operation. */ 14207 static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, 14208 u8 opcode, bool is_jmp32) 14209 { 14210 struct tnum t; 14211 u64 val; 14212 14213 again: 14214 switch (opcode) { 14215 case BPF_JEQ: 14216 if (is_jmp32) { 14217 reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); 14218 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); 14219 reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); 14220 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); 14221 reg2->u32_min_value = reg1->u32_min_value; 14222 reg2->u32_max_value = reg1->u32_max_value; 14223 reg2->s32_min_value = reg1->s32_min_value; 14224 reg2->s32_max_value = reg1->s32_max_value; 14225 14226 t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off)); 14227 reg1->var_off = tnum_with_subreg(reg1->var_off, t); 14228 reg2->var_off = tnum_with_subreg(reg2->var_off, t); 14229 } else { 14230 reg1->umin_value = max(reg1->umin_value, reg2->umin_value); 14231 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); 14232 reg1->smin_value = max(reg1->smin_value, reg2->smin_value); 14233 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); 14234 reg2->umin_value = reg1->umin_value; 14235 reg2->umax_value = reg1->umax_value; 14236 reg2->smin_value = reg1->smin_value; 14237 reg2->smax_value = reg1->smax_value; 14238 14239 reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off); 14240 reg2->var_off = reg1->var_off; 14241 } 14242 break; 14243 case BPF_JNE: 14244 /* we don't derive any new information for inequality yet */ 14245 break; 14246 case BPF_JSET: 14247 if (!is_reg_const(reg2, is_jmp32)) 14248 swap(reg1, reg2); 14249 if (!is_reg_const(reg2, is_jmp32)) 14250 break; 14251 val = reg_const_value(reg2, is_jmp32); 14252 /* BPF_JSET (i.e., TRUE branch, *not* BPF_JSET | BPF_X) 14253 * requires single bit to learn something useful. E.g., if we 14254 * know that `r1 & 0x3` is true, then which bits (0, 1, or both) 14255 * are actually set? We can learn something definite only if 14256 * it's a single-bit value to begin with. 14257 * 14258 * BPF_JSET | BPF_X (i.e., negation of BPF_JSET) doesn't have 14259 * this restriction. I.e., !(r1 & 0x3) means neither bit 0 nor 14260 * bit 1 is set, which we can readily use in adjustments. 14261 */ 14262 if (!is_power_of_2(val)) 14263 break; 14264 if (is_jmp32) { 14265 t = tnum_or(tnum_subreg(reg1->var_off), tnum_const(val)); 14266 reg1->var_off = tnum_with_subreg(reg1->var_off, t); 14267 } else { 14268 reg1->var_off = tnum_or(reg1->var_off, tnum_const(val)); 14269 } 14270 break; 14271 case BPF_JSET | BPF_X: /* reverse of BPF_JSET, see rev_opcode() */ 14272 if (!is_reg_const(reg2, is_jmp32)) 14273 swap(reg1, reg2); 14274 if (!is_reg_const(reg2, is_jmp32)) 14275 break; 14276 val = reg_const_value(reg2, is_jmp32); 14277 if (is_jmp32) { 14278 t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val)); 14279 reg1->var_off = tnum_with_subreg(reg1->var_off, t); 14280 } else { 14281 reg1->var_off = tnum_and(reg1->var_off, tnum_const(~val)); 14282 } 14283 break; 14284 case BPF_JLE: 14285 if (is_jmp32) { 14286 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); 14287 reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); 14288 } else { 14289 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); 14290 reg2->umin_value = max(reg1->umin_value, reg2->umin_value); 14291 } 14292 break; 14293 case BPF_JLT: 14294 if (is_jmp32) { 14295 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1); 14296 reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value); 14297 } else { 14298 reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1); 14299 reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value); 14300 } 14301 break; 14302 case BPF_JSLE: 14303 if (is_jmp32) { 14304 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); 14305 reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); 14306 } else { 14307 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); 14308 reg2->smin_value = max(reg1->smin_value, reg2->smin_value); 14309 } 14310 break; 14311 case BPF_JSLT: 14312 if (is_jmp32) { 14313 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1); 14314 reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value); 14315 } else { 14316 reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1); 14317 reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value); 14318 } 14319 break; 14320 case BPF_JGE: 14321 case BPF_JGT: 14322 case BPF_JSGE: 14323 case BPF_JSGT: 14324 /* just reuse LE/LT logic above */ 14325 opcode = flip_opcode(opcode); 14326 swap(reg1, reg2); 14327 goto again; 14328 default: 14329 return; 14330 } 14331 } 14332 14333 /* Adjusts the register min/max values in the case that the dst_reg and 14334 * src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K 14335 * check, in which case we havea fake SCALAR_VALUE representing insn->imm). 14336 * Technically we can do similar adjustments for pointers to the same object, 14337 * but we don't support that right now. 14338 */ 14339 static int reg_set_min_max(struct bpf_verifier_env *env, 14340 struct bpf_reg_state *true_reg1, 14341 struct bpf_reg_state *true_reg2, 14342 struct bpf_reg_state *false_reg1, 14343 struct bpf_reg_state *false_reg2, 14344 u8 opcode, bool is_jmp32) 14345 { 14346 int err; 14347 14348 /* If either register is a pointer, we can't learn anything about its 14349 * variable offset from the compare (unless they were a pointer into 14350 * the same object, but we don't bother with that). 14351 */ 14352 if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE) 14353 return 0; 14354 14355 /* fallthrough (FALSE) branch */ 14356 regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32); 14357 reg_bounds_sync(false_reg1); 14358 reg_bounds_sync(false_reg2); 14359 14360 /* jump (TRUE) branch */ 14361 regs_refine_cond_op(true_reg1, true_reg2, opcode, is_jmp32); 14362 reg_bounds_sync(true_reg1); 14363 reg_bounds_sync(true_reg2); 14364 14365 err = reg_bounds_sanity_check(env, true_reg1, "true_reg1"); 14366 err = err ?: reg_bounds_sanity_check(env, true_reg2, "true_reg2"); 14367 err = err ?: reg_bounds_sanity_check(env, false_reg1, "false_reg1"); 14368 err = err ?: reg_bounds_sanity_check(env, false_reg2, "false_reg2"); 14369 return err; 14370 } 14371 14372 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 14373 struct bpf_reg_state *reg, u32 id, 14374 bool is_null) 14375 { 14376 if (type_may_be_null(reg->type) && reg->id == id && 14377 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) { 14378 /* Old offset (both fixed and variable parts) should have been 14379 * known-zero, because we don't allow pointer arithmetic on 14380 * pointers that might be NULL. If we see this happening, don't 14381 * convert the register. 14382 * 14383 * But in some cases, some helpers that return local kptrs 14384 * advance offset for the returned pointer. In those cases, it 14385 * is fine to expect to see reg->off. 14386 */ 14387 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) 14388 return; 14389 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && 14390 WARN_ON_ONCE(reg->off)) 14391 return; 14392 14393 if (is_null) { 14394 reg->type = SCALAR_VALUE; 14395 /* We don't need id and ref_obj_id from this point 14396 * onwards anymore, thus we should better reset it, 14397 * so that state pruning has chances to take effect. 14398 */ 14399 reg->id = 0; 14400 reg->ref_obj_id = 0; 14401 14402 return; 14403 } 14404 14405 mark_ptr_not_null_reg(reg); 14406 14407 if (!reg_may_point_to_spin_lock(reg)) { 14408 /* For not-NULL ptr, reg->ref_obj_id will be reset 14409 * in release_reference(). 14410 * 14411 * reg->id is still used by spin_lock ptr. Other 14412 * than spin_lock ptr type, reg->id can be reset. 14413 */ 14414 reg->id = 0; 14415 } 14416 } 14417 } 14418 14419 /* The logic is similar to find_good_pkt_pointers(), both could eventually 14420 * be folded together at some point. 14421 */ 14422 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 14423 bool is_null) 14424 { 14425 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 14426 struct bpf_reg_state *regs = state->regs, *reg; 14427 u32 ref_obj_id = regs[regno].ref_obj_id; 14428 u32 id = regs[regno].id; 14429 14430 if (ref_obj_id && ref_obj_id == id && is_null) 14431 /* regs[regno] is in the " == NULL" branch. 14432 * No one could have freed the reference state before 14433 * doing the NULL check. 14434 */ 14435 WARN_ON_ONCE(release_reference_state(state, id)); 14436 14437 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 14438 mark_ptr_or_null_reg(state, reg, id, is_null); 14439 })); 14440 } 14441 14442 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 14443 struct bpf_reg_state *dst_reg, 14444 struct bpf_reg_state *src_reg, 14445 struct bpf_verifier_state *this_branch, 14446 struct bpf_verifier_state *other_branch) 14447 { 14448 if (BPF_SRC(insn->code) != BPF_X) 14449 return false; 14450 14451 /* Pointers are always 64-bit. */ 14452 if (BPF_CLASS(insn->code) == BPF_JMP32) 14453 return false; 14454 14455 switch (BPF_OP(insn->code)) { 14456 case BPF_JGT: 14457 if ((dst_reg->type == PTR_TO_PACKET && 14458 src_reg->type == PTR_TO_PACKET_END) || 14459 (dst_reg->type == PTR_TO_PACKET_META && 14460 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14461 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 14462 find_good_pkt_pointers(this_branch, dst_reg, 14463 dst_reg->type, false); 14464 mark_pkt_end(other_branch, insn->dst_reg, true); 14465 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14466 src_reg->type == PTR_TO_PACKET) || 14467 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14468 src_reg->type == PTR_TO_PACKET_META)) { 14469 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 14470 find_good_pkt_pointers(other_branch, src_reg, 14471 src_reg->type, true); 14472 mark_pkt_end(this_branch, insn->src_reg, false); 14473 } else { 14474 return false; 14475 } 14476 break; 14477 case BPF_JLT: 14478 if ((dst_reg->type == PTR_TO_PACKET && 14479 src_reg->type == PTR_TO_PACKET_END) || 14480 (dst_reg->type == PTR_TO_PACKET_META && 14481 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14482 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 14483 find_good_pkt_pointers(other_branch, dst_reg, 14484 dst_reg->type, true); 14485 mark_pkt_end(this_branch, insn->dst_reg, false); 14486 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14487 src_reg->type == PTR_TO_PACKET) || 14488 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14489 src_reg->type == PTR_TO_PACKET_META)) { 14490 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 14491 find_good_pkt_pointers(this_branch, src_reg, 14492 src_reg->type, false); 14493 mark_pkt_end(other_branch, insn->src_reg, true); 14494 } else { 14495 return false; 14496 } 14497 break; 14498 case BPF_JGE: 14499 if ((dst_reg->type == PTR_TO_PACKET && 14500 src_reg->type == PTR_TO_PACKET_END) || 14501 (dst_reg->type == PTR_TO_PACKET_META && 14502 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14503 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 14504 find_good_pkt_pointers(this_branch, dst_reg, 14505 dst_reg->type, true); 14506 mark_pkt_end(other_branch, insn->dst_reg, false); 14507 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14508 src_reg->type == PTR_TO_PACKET) || 14509 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14510 src_reg->type == PTR_TO_PACKET_META)) { 14511 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 14512 find_good_pkt_pointers(other_branch, src_reg, 14513 src_reg->type, false); 14514 mark_pkt_end(this_branch, insn->src_reg, true); 14515 } else { 14516 return false; 14517 } 14518 break; 14519 case BPF_JLE: 14520 if ((dst_reg->type == PTR_TO_PACKET && 14521 src_reg->type == PTR_TO_PACKET_END) || 14522 (dst_reg->type == PTR_TO_PACKET_META && 14523 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14524 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 14525 find_good_pkt_pointers(other_branch, dst_reg, 14526 dst_reg->type, false); 14527 mark_pkt_end(this_branch, insn->dst_reg, true); 14528 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14529 src_reg->type == PTR_TO_PACKET) || 14530 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14531 src_reg->type == PTR_TO_PACKET_META)) { 14532 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 14533 find_good_pkt_pointers(this_branch, src_reg, 14534 src_reg->type, true); 14535 mark_pkt_end(other_branch, insn->src_reg, false); 14536 } else { 14537 return false; 14538 } 14539 break; 14540 default: 14541 return false; 14542 } 14543 14544 return true; 14545 } 14546 14547 static void find_equal_scalars(struct bpf_verifier_state *vstate, 14548 struct bpf_reg_state *known_reg) 14549 { 14550 struct bpf_func_state *state; 14551 struct bpf_reg_state *reg; 14552 14553 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 14554 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 14555 copy_register_state(reg, known_reg); 14556 })); 14557 } 14558 14559 static int check_cond_jmp_op(struct bpf_verifier_env *env, 14560 struct bpf_insn *insn, int *insn_idx) 14561 { 14562 struct bpf_verifier_state *this_branch = env->cur_state; 14563 struct bpf_verifier_state *other_branch; 14564 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 14565 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 14566 struct bpf_reg_state *eq_branch_regs; 14567 struct bpf_reg_state fake_reg = {}; 14568 u8 opcode = BPF_OP(insn->code); 14569 bool is_jmp32; 14570 int pred = -1; 14571 int err; 14572 14573 /* Only conditional jumps are expected to reach here. */ 14574 if (opcode == BPF_JA || opcode > BPF_JSLE) { 14575 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 14576 return -EINVAL; 14577 } 14578 14579 /* check src2 operand */ 14580 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 14581 if (err) 14582 return err; 14583 14584 dst_reg = ®s[insn->dst_reg]; 14585 if (BPF_SRC(insn->code) == BPF_X) { 14586 if (insn->imm != 0) { 14587 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 14588 return -EINVAL; 14589 } 14590 14591 /* check src1 operand */ 14592 err = check_reg_arg(env, insn->src_reg, SRC_OP); 14593 if (err) 14594 return err; 14595 14596 src_reg = ®s[insn->src_reg]; 14597 if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) && 14598 is_pointer_value(env, insn->src_reg)) { 14599 verbose(env, "R%d pointer comparison prohibited\n", 14600 insn->src_reg); 14601 return -EACCES; 14602 } 14603 } else { 14604 if (insn->src_reg != BPF_REG_0) { 14605 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 14606 return -EINVAL; 14607 } 14608 src_reg = &fake_reg; 14609 src_reg->type = SCALAR_VALUE; 14610 __mark_reg_known(src_reg, insn->imm); 14611 } 14612 14613 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 14614 pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32); 14615 if (pred >= 0) { 14616 /* If we get here with a dst_reg pointer type it is because 14617 * above is_branch_taken() special cased the 0 comparison. 14618 */ 14619 if (!__is_pointer_value(false, dst_reg)) 14620 err = mark_chain_precision(env, insn->dst_reg); 14621 if (BPF_SRC(insn->code) == BPF_X && !err && 14622 !__is_pointer_value(false, src_reg)) 14623 err = mark_chain_precision(env, insn->src_reg); 14624 if (err) 14625 return err; 14626 } 14627 14628 if (pred == 1) { 14629 /* Only follow the goto, ignore fall-through. If needed, push 14630 * the fall-through branch for simulation under speculative 14631 * execution. 14632 */ 14633 if (!env->bypass_spec_v1 && 14634 !sanitize_speculative_path(env, insn, *insn_idx + 1, 14635 *insn_idx)) 14636 return -EFAULT; 14637 if (env->log.level & BPF_LOG_LEVEL) 14638 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14639 *insn_idx += insn->off; 14640 return 0; 14641 } else if (pred == 0) { 14642 /* Only follow the fall-through branch, since that's where the 14643 * program will go. If needed, push the goto branch for 14644 * simulation under speculative execution. 14645 */ 14646 if (!env->bypass_spec_v1 && 14647 !sanitize_speculative_path(env, insn, 14648 *insn_idx + insn->off + 1, 14649 *insn_idx)) 14650 return -EFAULT; 14651 if (env->log.level & BPF_LOG_LEVEL) 14652 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14653 return 0; 14654 } 14655 14656 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 14657 false); 14658 if (!other_branch) 14659 return -EFAULT; 14660 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 14661 14662 if (BPF_SRC(insn->code) == BPF_X) { 14663 err = reg_set_min_max(env, 14664 &other_branch_regs[insn->dst_reg], 14665 &other_branch_regs[insn->src_reg], 14666 dst_reg, src_reg, opcode, is_jmp32); 14667 } else /* BPF_SRC(insn->code) == BPF_K */ { 14668 err = reg_set_min_max(env, 14669 &other_branch_regs[insn->dst_reg], 14670 src_reg /* fake one */, 14671 dst_reg, src_reg /* same fake one */, 14672 opcode, is_jmp32); 14673 } 14674 if (err) 14675 return err; 14676 14677 if (BPF_SRC(insn->code) == BPF_X && 14678 src_reg->type == SCALAR_VALUE && src_reg->id && 14679 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { 14680 find_equal_scalars(this_branch, src_reg); 14681 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); 14682 } 14683 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && 14684 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { 14685 find_equal_scalars(this_branch, dst_reg); 14686 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); 14687 } 14688 14689 /* if one pointer register is compared to another pointer 14690 * register check if PTR_MAYBE_NULL could be lifted. 14691 * E.g. register A - maybe null 14692 * register B - not null 14693 * for JNE A, B, ... - A is not null in the false branch; 14694 * for JEQ A, B, ... - A is not null in the true branch. 14695 * 14696 * Since PTR_TO_BTF_ID points to a kernel struct that does 14697 * not need to be null checked by the BPF program, i.e., 14698 * could be null even without PTR_MAYBE_NULL marking, so 14699 * only propagate nullness when neither reg is that type. 14700 */ 14701 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X && 14702 __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) && 14703 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) && 14704 base_type(src_reg->type) != PTR_TO_BTF_ID && 14705 base_type(dst_reg->type) != PTR_TO_BTF_ID) { 14706 eq_branch_regs = NULL; 14707 switch (opcode) { 14708 case BPF_JEQ: 14709 eq_branch_regs = other_branch_regs; 14710 break; 14711 case BPF_JNE: 14712 eq_branch_regs = regs; 14713 break; 14714 default: 14715 /* do nothing */ 14716 break; 14717 } 14718 if (eq_branch_regs) { 14719 if (type_may_be_null(src_reg->type)) 14720 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]); 14721 else 14722 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]); 14723 } 14724 } 14725 14726 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 14727 * NOTE: these optimizations below are related with pointer comparison 14728 * which will never be JMP32. 14729 */ 14730 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 14731 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 14732 type_may_be_null(dst_reg->type)) { 14733 /* Mark all identical registers in each branch as either 14734 * safe or unknown depending R == 0 or R != 0 conditional. 14735 */ 14736 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 14737 opcode == BPF_JNE); 14738 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 14739 opcode == BPF_JEQ); 14740 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 14741 this_branch, other_branch) && 14742 is_pointer_value(env, insn->dst_reg)) { 14743 verbose(env, "R%d pointer comparison prohibited\n", 14744 insn->dst_reg); 14745 return -EACCES; 14746 } 14747 if (env->log.level & BPF_LOG_LEVEL) 14748 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14749 return 0; 14750 } 14751 14752 /* verify BPF_LD_IMM64 instruction */ 14753 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 14754 { 14755 struct bpf_insn_aux_data *aux = cur_aux(env); 14756 struct bpf_reg_state *regs = cur_regs(env); 14757 struct bpf_reg_state *dst_reg; 14758 struct bpf_map *map; 14759 int err; 14760 14761 if (BPF_SIZE(insn->code) != BPF_DW) { 14762 verbose(env, "invalid BPF_LD_IMM insn\n"); 14763 return -EINVAL; 14764 } 14765 if (insn->off != 0) { 14766 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 14767 return -EINVAL; 14768 } 14769 14770 err = check_reg_arg(env, insn->dst_reg, DST_OP); 14771 if (err) 14772 return err; 14773 14774 dst_reg = ®s[insn->dst_reg]; 14775 if (insn->src_reg == 0) { 14776 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 14777 14778 dst_reg->type = SCALAR_VALUE; 14779 __mark_reg_known(®s[insn->dst_reg], imm); 14780 return 0; 14781 } 14782 14783 /* All special src_reg cases are listed below. From this point onwards 14784 * we either succeed and assign a corresponding dst_reg->type after 14785 * zeroing the offset, or fail and reject the program. 14786 */ 14787 mark_reg_known_zero(env, regs, insn->dst_reg); 14788 14789 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { 14790 dst_reg->type = aux->btf_var.reg_type; 14791 switch (base_type(dst_reg->type)) { 14792 case PTR_TO_MEM: 14793 dst_reg->mem_size = aux->btf_var.mem_size; 14794 break; 14795 case PTR_TO_BTF_ID: 14796 dst_reg->btf = aux->btf_var.btf; 14797 dst_reg->btf_id = aux->btf_var.btf_id; 14798 break; 14799 default: 14800 verbose(env, "bpf verifier is misconfigured\n"); 14801 return -EFAULT; 14802 } 14803 return 0; 14804 } 14805 14806 if (insn->src_reg == BPF_PSEUDO_FUNC) { 14807 struct bpf_prog_aux *aux = env->prog->aux; 14808 u32 subprogno = find_subprog(env, 14809 env->insn_idx + insn->imm + 1); 14810 14811 if (!aux->func_info) { 14812 verbose(env, "missing btf func_info\n"); 14813 return -EINVAL; 14814 } 14815 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { 14816 verbose(env, "callback function not static\n"); 14817 return -EINVAL; 14818 } 14819 14820 dst_reg->type = PTR_TO_FUNC; 14821 dst_reg->subprogno = subprogno; 14822 return 0; 14823 } 14824 14825 map = env->used_maps[aux->map_index]; 14826 dst_reg->map_ptr = map; 14827 14828 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || 14829 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { 14830 dst_reg->type = PTR_TO_MAP_VALUE; 14831 dst_reg->off = aux->map_off; 14832 WARN_ON_ONCE(map->max_entries != 1); 14833 /* We want reg->id to be same (0) as map_value is not distinct */ 14834 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || 14835 insn->src_reg == BPF_PSEUDO_MAP_IDX) { 14836 dst_reg->type = CONST_PTR_TO_MAP; 14837 } else { 14838 verbose(env, "bpf verifier is misconfigured\n"); 14839 return -EINVAL; 14840 } 14841 14842 return 0; 14843 } 14844 14845 static bool may_access_skb(enum bpf_prog_type type) 14846 { 14847 switch (type) { 14848 case BPF_PROG_TYPE_SOCKET_FILTER: 14849 case BPF_PROG_TYPE_SCHED_CLS: 14850 case BPF_PROG_TYPE_SCHED_ACT: 14851 return true; 14852 default: 14853 return false; 14854 } 14855 } 14856 14857 /* verify safety of LD_ABS|LD_IND instructions: 14858 * - they can only appear in the programs where ctx == skb 14859 * - since they are wrappers of function calls, they scratch R1-R5 registers, 14860 * preserve R6-R9, and store return value into R0 14861 * 14862 * Implicit input: 14863 * ctx == skb == R6 == CTX 14864 * 14865 * Explicit input: 14866 * SRC == any register 14867 * IMM == 32-bit immediate 14868 * 14869 * Output: 14870 * R0 - 8/16/32-bit skb data converted to cpu endianness 14871 */ 14872 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 14873 { 14874 struct bpf_reg_state *regs = cur_regs(env); 14875 static const int ctx_reg = BPF_REG_6; 14876 u8 mode = BPF_MODE(insn->code); 14877 int i, err; 14878 14879 if (!may_access_skb(resolve_prog_type(env->prog))) { 14880 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 14881 return -EINVAL; 14882 } 14883 14884 if (!env->ops->gen_ld_abs) { 14885 verbose(env, "bpf verifier is misconfigured\n"); 14886 return -EINVAL; 14887 } 14888 14889 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 14890 BPF_SIZE(insn->code) == BPF_DW || 14891 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 14892 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 14893 return -EINVAL; 14894 } 14895 14896 /* check whether implicit source operand (register R6) is readable */ 14897 err = check_reg_arg(env, ctx_reg, SRC_OP); 14898 if (err) 14899 return err; 14900 14901 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 14902 * gen_ld_abs() may terminate the program at runtime, leading to 14903 * reference leak. 14904 */ 14905 err = check_reference_leak(env, false); 14906 if (err) { 14907 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 14908 return err; 14909 } 14910 14911 if (env->cur_state->active_lock.ptr) { 14912 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 14913 return -EINVAL; 14914 } 14915 14916 if (env->cur_state->active_rcu_lock) { 14917 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n"); 14918 return -EINVAL; 14919 } 14920 14921 if (regs[ctx_reg].type != PTR_TO_CTX) { 14922 verbose(env, 14923 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 14924 return -EINVAL; 14925 } 14926 14927 if (mode == BPF_IND) { 14928 /* check explicit source operand */ 14929 err = check_reg_arg(env, insn->src_reg, SRC_OP); 14930 if (err) 14931 return err; 14932 } 14933 14934 err = check_ptr_off_reg(env, ®s[ctx_reg], ctx_reg); 14935 if (err < 0) 14936 return err; 14937 14938 /* reset caller saved regs to unreadable */ 14939 for (i = 0; i < CALLER_SAVED_REGS; i++) { 14940 mark_reg_not_init(env, regs, caller_saved[i]); 14941 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 14942 } 14943 14944 /* mark destination R0 register as readable, since it contains 14945 * the value fetched from the packet. 14946 * Already marked as written above. 14947 */ 14948 mark_reg_unknown(env, regs, BPF_REG_0); 14949 /* ld_abs load up to 32-bit skb data. */ 14950 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 14951 return 0; 14952 } 14953 14954 static int check_return_code(struct bpf_verifier_env *env, int regno) 14955 { 14956 struct tnum enforce_attach_type_range = tnum_unknown; 14957 const struct bpf_prog *prog = env->prog; 14958 struct bpf_reg_state *reg; 14959 struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0); 14960 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 14961 int err; 14962 struct bpf_func_state *frame = env->cur_state->frame[0]; 14963 const bool is_subprog = frame->subprogno; 14964 14965 /* LSM and struct_ops func-ptr's return type could be "void" */ 14966 if (!is_subprog || frame->in_exception_callback_fn) { 14967 switch (prog_type) { 14968 case BPF_PROG_TYPE_LSM: 14969 if (prog->expected_attach_type == BPF_LSM_CGROUP) 14970 /* See below, can be 0 or 0-1 depending on hook. */ 14971 break; 14972 fallthrough; 14973 case BPF_PROG_TYPE_STRUCT_OPS: 14974 if (!prog->aux->attach_func_proto->type) 14975 return 0; 14976 break; 14977 default: 14978 break; 14979 } 14980 } 14981 14982 /* eBPF calling convention is such that R0 is used 14983 * to return the value from eBPF program. 14984 * Make sure that it's readable at this time 14985 * of bpf_exit, which means that program wrote 14986 * something into it earlier 14987 */ 14988 err = check_reg_arg(env, regno, SRC_OP); 14989 if (err) 14990 return err; 14991 14992 if (is_pointer_value(env, regno)) { 14993 verbose(env, "R%d leaks addr as return value\n", regno); 14994 return -EACCES; 14995 } 14996 14997 reg = cur_regs(env) + regno; 14998 14999 if (frame->in_async_callback_fn) { 15000 /* enforce return zero from async callbacks like timer */ 15001 if (reg->type != SCALAR_VALUE) { 15002 verbose(env, "In async callback the register R%d is not a known value (%s)\n", 15003 regno, reg_type_str(env, reg->type)); 15004 return -EINVAL; 15005 } 15006 15007 if (!tnum_in(const_0, reg->var_off)) { 15008 verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0"); 15009 return -EINVAL; 15010 } 15011 return 0; 15012 } 15013 15014 if (is_subprog && !frame->in_exception_callback_fn) { 15015 if (reg->type != SCALAR_VALUE) { 15016 verbose(env, "At subprogram exit the register R%d is not a scalar value (%s)\n", 15017 regno, reg_type_str(env, reg->type)); 15018 return -EINVAL; 15019 } 15020 return 0; 15021 } 15022 15023 switch (prog_type) { 15024 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 15025 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 15026 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || 15027 env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG || 15028 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || 15029 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || 15030 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME || 15031 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || 15032 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME || 15033 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME) 15034 range = tnum_range(1, 1); 15035 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || 15036 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) 15037 range = tnum_range(0, 3); 15038 break; 15039 case BPF_PROG_TYPE_CGROUP_SKB: 15040 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 15041 range = tnum_range(0, 3); 15042 enforce_attach_type_range = tnum_range(2, 3); 15043 } 15044 break; 15045 case BPF_PROG_TYPE_CGROUP_SOCK: 15046 case BPF_PROG_TYPE_SOCK_OPS: 15047 case BPF_PROG_TYPE_CGROUP_DEVICE: 15048 case BPF_PROG_TYPE_CGROUP_SYSCTL: 15049 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 15050 break; 15051 case BPF_PROG_TYPE_RAW_TRACEPOINT: 15052 if (!env->prog->aux->attach_btf_id) 15053 return 0; 15054 range = tnum_const(0); 15055 break; 15056 case BPF_PROG_TYPE_TRACING: 15057 switch (env->prog->expected_attach_type) { 15058 case BPF_TRACE_FENTRY: 15059 case BPF_TRACE_FEXIT: 15060 range = tnum_const(0); 15061 break; 15062 case BPF_TRACE_RAW_TP: 15063 case BPF_MODIFY_RETURN: 15064 return 0; 15065 case BPF_TRACE_ITER: 15066 break; 15067 default: 15068 return -ENOTSUPP; 15069 } 15070 break; 15071 case BPF_PROG_TYPE_SK_LOOKUP: 15072 range = tnum_range(SK_DROP, SK_PASS); 15073 break; 15074 15075 case BPF_PROG_TYPE_LSM: 15076 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { 15077 /* Regular BPF_PROG_TYPE_LSM programs can return 15078 * any value. 15079 */ 15080 return 0; 15081 } 15082 if (!env->prog->aux->attach_func_proto->type) { 15083 /* Make sure programs that attach to void 15084 * hooks don't try to modify return value. 15085 */ 15086 range = tnum_range(1, 1); 15087 } 15088 break; 15089 15090 case BPF_PROG_TYPE_NETFILTER: 15091 range = tnum_range(NF_DROP, NF_ACCEPT); 15092 break; 15093 case BPF_PROG_TYPE_EXT: 15094 /* freplace program can return anything as its return value 15095 * depends on the to-be-replaced kernel func or bpf program. 15096 */ 15097 default: 15098 return 0; 15099 } 15100 15101 if (reg->type != SCALAR_VALUE) { 15102 verbose(env, "At program exit the register R%d is not a known value (%s)\n", 15103 regno, reg_type_str(env, reg->type)); 15104 return -EINVAL; 15105 } 15106 15107 if (!tnum_in(range, reg->var_off)) { 15108 verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); 15109 if (prog->expected_attach_type == BPF_LSM_CGROUP && 15110 prog_type == BPF_PROG_TYPE_LSM && 15111 !prog->aux->attach_func_proto->type) 15112 verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); 15113 return -EINVAL; 15114 } 15115 15116 if (!tnum_is_unknown(enforce_attach_type_range) && 15117 tnum_in(enforce_attach_type_range, reg->var_off)) 15118 env->prog->enforce_expected_attach_type = 1; 15119 return 0; 15120 } 15121 15122 /* non-recursive DFS pseudo code 15123 * 1 procedure DFS-iterative(G,v): 15124 * 2 label v as discovered 15125 * 3 let S be a stack 15126 * 4 S.push(v) 15127 * 5 while S is not empty 15128 * 6 t <- S.peek() 15129 * 7 if t is what we're looking for: 15130 * 8 return t 15131 * 9 for all edges e in G.adjacentEdges(t) do 15132 * 10 if edge e is already labelled 15133 * 11 continue with the next edge 15134 * 12 w <- G.adjacentVertex(t,e) 15135 * 13 if vertex w is not discovered and not explored 15136 * 14 label e as tree-edge 15137 * 15 label w as discovered 15138 * 16 S.push(w) 15139 * 17 continue at 5 15140 * 18 else if vertex w is discovered 15141 * 19 label e as back-edge 15142 * 20 else 15143 * 21 // vertex w is explored 15144 * 22 label e as forward- or cross-edge 15145 * 23 label t as explored 15146 * 24 S.pop() 15147 * 15148 * convention: 15149 * 0x10 - discovered 15150 * 0x11 - discovered and fall-through edge labelled 15151 * 0x12 - discovered and fall-through and branch edges labelled 15152 * 0x20 - explored 15153 */ 15154 15155 enum { 15156 DISCOVERED = 0x10, 15157 EXPLORED = 0x20, 15158 FALLTHROUGH = 1, 15159 BRANCH = 2, 15160 }; 15161 15162 static void mark_prune_point(struct bpf_verifier_env *env, int idx) 15163 { 15164 env->insn_aux_data[idx].prune_point = true; 15165 } 15166 15167 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) 15168 { 15169 return env->insn_aux_data[insn_idx].prune_point; 15170 } 15171 15172 static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx) 15173 { 15174 env->insn_aux_data[idx].force_checkpoint = true; 15175 } 15176 15177 static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) 15178 { 15179 return env->insn_aux_data[insn_idx].force_checkpoint; 15180 } 15181 15182 static void mark_calls_callback(struct bpf_verifier_env *env, int idx) 15183 { 15184 env->insn_aux_data[idx].calls_callback = true; 15185 } 15186 15187 static bool calls_callback(struct bpf_verifier_env *env, int insn_idx) 15188 { 15189 return env->insn_aux_data[insn_idx].calls_callback; 15190 } 15191 15192 enum { 15193 DONE_EXPLORING = 0, 15194 KEEP_EXPLORING = 1, 15195 }; 15196 15197 /* t, w, e - match pseudo-code above: 15198 * t - index of current instruction 15199 * w - next instruction 15200 * e - edge 15201 */ 15202 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) 15203 { 15204 int *insn_stack = env->cfg.insn_stack; 15205 int *insn_state = env->cfg.insn_state; 15206 15207 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 15208 return DONE_EXPLORING; 15209 15210 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 15211 return DONE_EXPLORING; 15212 15213 if (w < 0 || w >= env->prog->len) { 15214 verbose_linfo(env, t, "%d: ", t); 15215 verbose(env, "jump out of range from insn %d to %d\n", t, w); 15216 return -EINVAL; 15217 } 15218 15219 if (e == BRANCH) { 15220 /* mark branch target for state pruning */ 15221 mark_prune_point(env, w); 15222 mark_jmp_point(env, w); 15223 } 15224 15225 if (insn_state[w] == 0) { 15226 /* tree-edge */ 15227 insn_state[t] = DISCOVERED | e; 15228 insn_state[w] = DISCOVERED; 15229 if (env->cfg.cur_stack >= env->prog->len) 15230 return -E2BIG; 15231 insn_stack[env->cfg.cur_stack++] = w; 15232 return KEEP_EXPLORING; 15233 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 15234 if (env->bpf_capable) 15235 return DONE_EXPLORING; 15236 verbose_linfo(env, t, "%d: ", t); 15237 verbose_linfo(env, w, "%d: ", w); 15238 verbose(env, "back-edge from insn %d to %d\n", t, w); 15239 return -EINVAL; 15240 } else if (insn_state[w] == EXPLORED) { 15241 /* forward- or cross-edge */ 15242 insn_state[t] = DISCOVERED | e; 15243 } else { 15244 verbose(env, "insn state internal bug\n"); 15245 return -EFAULT; 15246 } 15247 return DONE_EXPLORING; 15248 } 15249 15250 static int visit_func_call_insn(int t, struct bpf_insn *insns, 15251 struct bpf_verifier_env *env, 15252 bool visit_callee) 15253 { 15254 int ret, insn_sz; 15255 15256 insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1; 15257 ret = push_insn(t, t + insn_sz, FALLTHROUGH, env); 15258 if (ret) 15259 return ret; 15260 15261 mark_prune_point(env, t + insn_sz); 15262 /* when we exit from subprog, we need to record non-linear history */ 15263 mark_jmp_point(env, t + insn_sz); 15264 15265 if (visit_callee) { 15266 mark_prune_point(env, t); 15267 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); 15268 } 15269 return ret; 15270 } 15271 15272 /* Visits the instruction at index t and returns one of the following: 15273 * < 0 - an error occurred 15274 * DONE_EXPLORING - the instruction was fully explored 15275 * KEEP_EXPLORING - there is still work to be done before it is fully explored 15276 */ 15277 static int visit_insn(int t, struct bpf_verifier_env *env) 15278 { 15279 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; 15280 int ret, off, insn_sz; 15281 15282 if (bpf_pseudo_func(insn)) 15283 return visit_func_call_insn(t, insns, env, true); 15284 15285 /* All non-branch instructions have a single fall-through edge. */ 15286 if (BPF_CLASS(insn->code) != BPF_JMP && 15287 BPF_CLASS(insn->code) != BPF_JMP32) { 15288 insn_sz = bpf_is_ldimm64(insn) ? 2 : 1; 15289 return push_insn(t, t + insn_sz, FALLTHROUGH, env); 15290 } 15291 15292 switch (BPF_OP(insn->code)) { 15293 case BPF_EXIT: 15294 return DONE_EXPLORING; 15295 15296 case BPF_CALL: 15297 if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback) 15298 /* Mark this call insn as a prune point to trigger 15299 * is_state_visited() check before call itself is 15300 * processed by __check_func_call(). Otherwise new 15301 * async state will be pushed for further exploration. 15302 */ 15303 mark_prune_point(env, t); 15304 /* For functions that invoke callbacks it is not known how many times 15305 * callback would be called. Verifier models callback calling functions 15306 * by repeatedly visiting callback bodies and returning to origin call 15307 * instruction. 15308 * In order to stop such iteration verifier needs to identify when a 15309 * state identical some state from a previous iteration is reached. 15310 * Check below forces creation of checkpoint before callback calling 15311 * instruction to allow search for such identical states. 15312 */ 15313 if (is_sync_callback_calling_insn(insn)) { 15314 mark_calls_callback(env, t); 15315 mark_force_checkpoint(env, t); 15316 mark_prune_point(env, t); 15317 mark_jmp_point(env, t); 15318 } 15319 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 15320 struct bpf_kfunc_call_arg_meta meta; 15321 15322 ret = fetch_kfunc_meta(env, insn, &meta, NULL); 15323 if (ret == 0 && is_iter_next_kfunc(&meta)) { 15324 mark_prune_point(env, t); 15325 /* Checking and saving state checkpoints at iter_next() call 15326 * is crucial for fast convergence of open-coded iterator loop 15327 * logic, so we need to force it. If we don't do that, 15328 * is_state_visited() might skip saving a checkpoint, causing 15329 * unnecessarily long sequence of not checkpointed 15330 * instructions and jumps, leading to exhaustion of jump 15331 * history buffer, and potentially other undesired outcomes. 15332 * It is expected that with correct open-coded iterators 15333 * convergence will happen quickly, so we don't run a risk of 15334 * exhausting memory. 15335 */ 15336 mark_force_checkpoint(env, t); 15337 } 15338 } 15339 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); 15340 15341 case BPF_JA: 15342 if (BPF_SRC(insn->code) != BPF_K) 15343 return -EINVAL; 15344 15345 if (BPF_CLASS(insn->code) == BPF_JMP) 15346 off = insn->off; 15347 else 15348 off = insn->imm; 15349 15350 /* unconditional jump with single edge */ 15351 ret = push_insn(t, t + off + 1, FALLTHROUGH, env); 15352 if (ret) 15353 return ret; 15354 15355 mark_prune_point(env, t + off + 1); 15356 mark_jmp_point(env, t + off + 1); 15357 15358 return ret; 15359 15360 default: 15361 /* conditional jump with two edges */ 15362 mark_prune_point(env, t); 15363 15364 ret = push_insn(t, t + 1, FALLTHROUGH, env); 15365 if (ret) 15366 return ret; 15367 15368 return push_insn(t, t + insn->off + 1, BRANCH, env); 15369 } 15370 } 15371 15372 /* non-recursive depth-first-search to detect loops in BPF program 15373 * loop == back-edge in directed graph 15374 */ 15375 static int check_cfg(struct bpf_verifier_env *env) 15376 { 15377 int insn_cnt = env->prog->len; 15378 int *insn_stack, *insn_state; 15379 int ex_insn_beg, i, ret = 0; 15380 bool ex_done = false; 15381 15382 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 15383 if (!insn_state) 15384 return -ENOMEM; 15385 15386 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 15387 if (!insn_stack) { 15388 kvfree(insn_state); 15389 return -ENOMEM; 15390 } 15391 15392 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 15393 insn_stack[0] = 0; /* 0 is the first instruction */ 15394 env->cfg.cur_stack = 1; 15395 15396 walk_cfg: 15397 while (env->cfg.cur_stack > 0) { 15398 int t = insn_stack[env->cfg.cur_stack - 1]; 15399 15400 ret = visit_insn(t, env); 15401 switch (ret) { 15402 case DONE_EXPLORING: 15403 insn_state[t] = EXPLORED; 15404 env->cfg.cur_stack--; 15405 break; 15406 case KEEP_EXPLORING: 15407 break; 15408 default: 15409 if (ret > 0) { 15410 verbose(env, "visit_insn internal bug\n"); 15411 ret = -EFAULT; 15412 } 15413 goto err_free; 15414 } 15415 } 15416 15417 if (env->cfg.cur_stack < 0) { 15418 verbose(env, "pop stack internal bug\n"); 15419 ret = -EFAULT; 15420 goto err_free; 15421 } 15422 15423 if (env->exception_callback_subprog && !ex_done) { 15424 ex_insn_beg = env->subprog_info[env->exception_callback_subprog].start; 15425 15426 insn_state[ex_insn_beg] = DISCOVERED; 15427 insn_stack[0] = ex_insn_beg; 15428 env->cfg.cur_stack = 1; 15429 ex_done = true; 15430 goto walk_cfg; 15431 } 15432 15433 for (i = 0; i < insn_cnt; i++) { 15434 struct bpf_insn *insn = &env->prog->insnsi[i]; 15435 15436 if (insn_state[i] != EXPLORED) { 15437 verbose(env, "unreachable insn %d\n", i); 15438 ret = -EINVAL; 15439 goto err_free; 15440 } 15441 if (bpf_is_ldimm64(insn)) { 15442 if (insn_state[i + 1] != 0) { 15443 verbose(env, "jump into the middle of ldimm64 insn %d\n", i); 15444 ret = -EINVAL; 15445 goto err_free; 15446 } 15447 i++; /* skip second half of ldimm64 */ 15448 } 15449 } 15450 ret = 0; /* cfg looks good */ 15451 15452 err_free: 15453 kvfree(insn_state); 15454 kvfree(insn_stack); 15455 env->cfg.insn_state = env->cfg.insn_stack = NULL; 15456 return ret; 15457 } 15458 15459 static int check_abnormal_return(struct bpf_verifier_env *env) 15460 { 15461 int i; 15462 15463 for (i = 1; i < env->subprog_cnt; i++) { 15464 if (env->subprog_info[i].has_ld_abs) { 15465 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); 15466 return -EINVAL; 15467 } 15468 if (env->subprog_info[i].has_tail_call) { 15469 verbose(env, "tail_call is not allowed in subprogs without BTF\n"); 15470 return -EINVAL; 15471 } 15472 } 15473 return 0; 15474 } 15475 15476 /* The minimum supported BTF func info size */ 15477 #define MIN_BPF_FUNCINFO_SIZE 8 15478 #define MAX_FUNCINFO_REC_SIZE 252 15479 15480 static int check_btf_func_early(struct bpf_verifier_env *env, 15481 const union bpf_attr *attr, 15482 bpfptr_t uattr) 15483 { 15484 u32 krec_size = sizeof(struct bpf_func_info); 15485 const struct btf_type *type, *func_proto; 15486 u32 i, nfuncs, urec_size, min_size; 15487 struct bpf_func_info *krecord; 15488 struct bpf_prog *prog; 15489 const struct btf *btf; 15490 u32 prev_offset = 0; 15491 bpfptr_t urecord; 15492 int ret = -ENOMEM; 15493 15494 nfuncs = attr->func_info_cnt; 15495 if (!nfuncs) { 15496 if (check_abnormal_return(env)) 15497 return -EINVAL; 15498 return 0; 15499 } 15500 15501 urec_size = attr->func_info_rec_size; 15502 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 15503 urec_size > MAX_FUNCINFO_REC_SIZE || 15504 urec_size % sizeof(u32)) { 15505 verbose(env, "invalid func info rec size %u\n", urec_size); 15506 return -EINVAL; 15507 } 15508 15509 prog = env->prog; 15510 btf = prog->aux->btf; 15511 15512 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); 15513 min_size = min_t(u32, krec_size, urec_size); 15514 15515 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 15516 if (!krecord) 15517 return -ENOMEM; 15518 15519 for (i = 0; i < nfuncs; i++) { 15520 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 15521 if (ret) { 15522 if (ret == -E2BIG) { 15523 verbose(env, "nonzero tailing record in func info"); 15524 /* set the size kernel expects so loader can zero 15525 * out the rest of the record. 15526 */ 15527 if (copy_to_bpfptr_offset(uattr, 15528 offsetof(union bpf_attr, func_info_rec_size), 15529 &min_size, sizeof(min_size))) 15530 ret = -EFAULT; 15531 } 15532 goto err_free; 15533 } 15534 15535 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) { 15536 ret = -EFAULT; 15537 goto err_free; 15538 } 15539 15540 /* check insn_off */ 15541 ret = -EINVAL; 15542 if (i == 0) { 15543 if (krecord[i].insn_off) { 15544 verbose(env, 15545 "nonzero insn_off %u for the first func info record", 15546 krecord[i].insn_off); 15547 goto err_free; 15548 } 15549 } else if (krecord[i].insn_off <= prev_offset) { 15550 verbose(env, 15551 "same or smaller insn offset (%u) than previous func info record (%u)", 15552 krecord[i].insn_off, prev_offset); 15553 goto err_free; 15554 } 15555 15556 /* check type_id */ 15557 type = btf_type_by_id(btf, krecord[i].type_id); 15558 if (!type || !btf_type_is_func(type)) { 15559 verbose(env, "invalid type id %d in func info", 15560 krecord[i].type_id); 15561 goto err_free; 15562 } 15563 15564 func_proto = btf_type_by_id(btf, type->type); 15565 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto))) 15566 /* btf_func_check() already verified it during BTF load */ 15567 goto err_free; 15568 15569 prev_offset = krecord[i].insn_off; 15570 bpfptr_add(&urecord, urec_size); 15571 } 15572 15573 prog->aux->func_info = krecord; 15574 prog->aux->func_info_cnt = nfuncs; 15575 return 0; 15576 15577 err_free: 15578 kvfree(krecord); 15579 return ret; 15580 } 15581 15582 static int check_btf_func(struct bpf_verifier_env *env, 15583 const union bpf_attr *attr, 15584 bpfptr_t uattr) 15585 { 15586 const struct btf_type *type, *func_proto, *ret_type; 15587 u32 i, nfuncs, urec_size; 15588 struct bpf_func_info *krecord; 15589 struct bpf_func_info_aux *info_aux = NULL; 15590 struct bpf_prog *prog; 15591 const struct btf *btf; 15592 bpfptr_t urecord; 15593 bool scalar_return; 15594 int ret = -ENOMEM; 15595 15596 nfuncs = attr->func_info_cnt; 15597 if (!nfuncs) { 15598 if (check_abnormal_return(env)) 15599 return -EINVAL; 15600 return 0; 15601 } 15602 if (nfuncs != env->subprog_cnt) { 15603 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 15604 return -EINVAL; 15605 } 15606 15607 urec_size = attr->func_info_rec_size; 15608 15609 prog = env->prog; 15610 btf = prog->aux->btf; 15611 15612 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); 15613 15614 krecord = prog->aux->func_info; 15615 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 15616 if (!info_aux) 15617 return -ENOMEM; 15618 15619 for (i = 0; i < nfuncs; i++) { 15620 /* check insn_off */ 15621 ret = -EINVAL; 15622 15623 if (env->subprog_info[i].start != krecord[i].insn_off) { 15624 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 15625 goto err_free; 15626 } 15627 15628 /* Already checked type_id */ 15629 type = btf_type_by_id(btf, krecord[i].type_id); 15630 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 15631 /* Already checked func_proto */ 15632 func_proto = btf_type_by_id(btf, type->type); 15633 15634 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); 15635 scalar_return = 15636 btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type); 15637 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { 15638 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); 15639 goto err_free; 15640 } 15641 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { 15642 verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); 15643 goto err_free; 15644 } 15645 15646 bpfptr_add(&urecord, urec_size); 15647 } 15648 15649 prog->aux->func_info_aux = info_aux; 15650 return 0; 15651 15652 err_free: 15653 kfree(info_aux); 15654 return ret; 15655 } 15656 15657 static void adjust_btf_func(struct bpf_verifier_env *env) 15658 { 15659 struct bpf_prog_aux *aux = env->prog->aux; 15660 int i; 15661 15662 if (!aux->func_info) 15663 return; 15664 15665 /* func_info is not available for hidden subprogs */ 15666 for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++) 15667 aux->func_info[i].insn_off = env->subprog_info[i].start; 15668 } 15669 15670 #define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col) 15671 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 15672 15673 static int check_btf_line(struct bpf_verifier_env *env, 15674 const union bpf_attr *attr, 15675 bpfptr_t uattr) 15676 { 15677 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 15678 struct bpf_subprog_info *sub; 15679 struct bpf_line_info *linfo; 15680 struct bpf_prog *prog; 15681 const struct btf *btf; 15682 bpfptr_t ulinfo; 15683 int err; 15684 15685 nr_linfo = attr->line_info_cnt; 15686 if (!nr_linfo) 15687 return 0; 15688 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) 15689 return -EINVAL; 15690 15691 rec_size = attr->line_info_rec_size; 15692 if (rec_size < MIN_BPF_LINEINFO_SIZE || 15693 rec_size > MAX_LINEINFO_REC_SIZE || 15694 rec_size & (sizeof(u32) - 1)) 15695 return -EINVAL; 15696 15697 /* Need to zero it in case the userspace may 15698 * pass in a smaller bpf_line_info object. 15699 */ 15700 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 15701 GFP_KERNEL | __GFP_NOWARN); 15702 if (!linfo) 15703 return -ENOMEM; 15704 15705 prog = env->prog; 15706 btf = prog->aux->btf; 15707 15708 s = 0; 15709 sub = env->subprog_info; 15710 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); 15711 expected_size = sizeof(struct bpf_line_info); 15712 ncopy = min_t(u32, expected_size, rec_size); 15713 for (i = 0; i < nr_linfo; i++) { 15714 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 15715 if (err) { 15716 if (err == -E2BIG) { 15717 verbose(env, "nonzero tailing record in line_info"); 15718 if (copy_to_bpfptr_offset(uattr, 15719 offsetof(union bpf_attr, line_info_rec_size), 15720 &expected_size, sizeof(expected_size))) 15721 err = -EFAULT; 15722 } 15723 goto err_free; 15724 } 15725 15726 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) { 15727 err = -EFAULT; 15728 goto err_free; 15729 } 15730 15731 /* 15732 * Check insn_off to ensure 15733 * 1) strictly increasing AND 15734 * 2) bounded by prog->len 15735 * 15736 * The linfo[0].insn_off == 0 check logically falls into 15737 * the later "missing bpf_line_info for func..." case 15738 * because the first linfo[0].insn_off must be the 15739 * first sub also and the first sub must have 15740 * subprog_info[0].start == 0. 15741 */ 15742 if ((i && linfo[i].insn_off <= prev_offset) || 15743 linfo[i].insn_off >= prog->len) { 15744 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 15745 i, linfo[i].insn_off, prev_offset, 15746 prog->len); 15747 err = -EINVAL; 15748 goto err_free; 15749 } 15750 15751 if (!prog->insnsi[linfo[i].insn_off].code) { 15752 verbose(env, 15753 "Invalid insn code at line_info[%u].insn_off\n", 15754 i); 15755 err = -EINVAL; 15756 goto err_free; 15757 } 15758 15759 if (!btf_name_by_offset(btf, linfo[i].line_off) || 15760 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 15761 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 15762 err = -EINVAL; 15763 goto err_free; 15764 } 15765 15766 if (s != env->subprog_cnt) { 15767 if (linfo[i].insn_off == sub[s].start) { 15768 sub[s].linfo_idx = i; 15769 s++; 15770 } else if (sub[s].start < linfo[i].insn_off) { 15771 verbose(env, "missing bpf_line_info for func#%u\n", s); 15772 err = -EINVAL; 15773 goto err_free; 15774 } 15775 } 15776 15777 prev_offset = linfo[i].insn_off; 15778 bpfptr_add(&ulinfo, rec_size); 15779 } 15780 15781 if (s != env->subprog_cnt) { 15782 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 15783 env->subprog_cnt - s, s); 15784 err = -EINVAL; 15785 goto err_free; 15786 } 15787 15788 prog->aux->linfo = linfo; 15789 prog->aux->nr_linfo = nr_linfo; 15790 15791 return 0; 15792 15793 err_free: 15794 kvfree(linfo); 15795 return err; 15796 } 15797 15798 #define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo) 15799 #define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE 15800 15801 static int check_core_relo(struct bpf_verifier_env *env, 15802 const union bpf_attr *attr, 15803 bpfptr_t uattr) 15804 { 15805 u32 i, nr_core_relo, ncopy, expected_size, rec_size; 15806 struct bpf_core_relo core_relo = {}; 15807 struct bpf_prog *prog = env->prog; 15808 const struct btf *btf = prog->aux->btf; 15809 struct bpf_core_ctx ctx = { 15810 .log = &env->log, 15811 .btf = btf, 15812 }; 15813 bpfptr_t u_core_relo; 15814 int err; 15815 15816 nr_core_relo = attr->core_relo_cnt; 15817 if (!nr_core_relo) 15818 return 0; 15819 if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo)) 15820 return -EINVAL; 15821 15822 rec_size = attr->core_relo_rec_size; 15823 if (rec_size < MIN_CORE_RELO_SIZE || 15824 rec_size > MAX_CORE_RELO_SIZE || 15825 rec_size % sizeof(u32)) 15826 return -EINVAL; 15827 15828 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); 15829 expected_size = sizeof(struct bpf_core_relo); 15830 ncopy = min_t(u32, expected_size, rec_size); 15831 15832 /* Unlike func_info and line_info, copy and apply each CO-RE 15833 * relocation record one at a time. 15834 */ 15835 for (i = 0; i < nr_core_relo; i++) { 15836 /* future proofing when sizeof(bpf_core_relo) changes */ 15837 err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size); 15838 if (err) { 15839 if (err == -E2BIG) { 15840 verbose(env, "nonzero tailing record in core_relo"); 15841 if (copy_to_bpfptr_offset(uattr, 15842 offsetof(union bpf_attr, core_relo_rec_size), 15843 &expected_size, sizeof(expected_size))) 15844 err = -EFAULT; 15845 } 15846 break; 15847 } 15848 15849 if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) { 15850 err = -EFAULT; 15851 break; 15852 } 15853 15854 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { 15855 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", 15856 i, core_relo.insn_off, prog->len); 15857 err = -EINVAL; 15858 break; 15859 } 15860 15861 err = bpf_core_apply(&ctx, &core_relo, i, 15862 &prog->insnsi[core_relo.insn_off / 8]); 15863 if (err) 15864 break; 15865 bpfptr_add(&u_core_relo, rec_size); 15866 } 15867 return err; 15868 } 15869 15870 static int check_btf_info_early(struct bpf_verifier_env *env, 15871 const union bpf_attr *attr, 15872 bpfptr_t uattr) 15873 { 15874 struct btf *btf; 15875 int err; 15876 15877 if (!attr->func_info_cnt && !attr->line_info_cnt) { 15878 if (check_abnormal_return(env)) 15879 return -EINVAL; 15880 return 0; 15881 } 15882 15883 btf = btf_get_by_fd(attr->prog_btf_fd); 15884 if (IS_ERR(btf)) 15885 return PTR_ERR(btf); 15886 if (btf_is_kernel(btf)) { 15887 btf_put(btf); 15888 return -EACCES; 15889 } 15890 env->prog->aux->btf = btf; 15891 15892 err = check_btf_func_early(env, attr, uattr); 15893 if (err) 15894 return err; 15895 return 0; 15896 } 15897 15898 static int check_btf_info(struct bpf_verifier_env *env, 15899 const union bpf_attr *attr, 15900 bpfptr_t uattr) 15901 { 15902 int err; 15903 15904 if (!attr->func_info_cnt && !attr->line_info_cnt) { 15905 if (check_abnormal_return(env)) 15906 return -EINVAL; 15907 return 0; 15908 } 15909 15910 err = check_btf_func(env, attr, uattr); 15911 if (err) 15912 return err; 15913 15914 err = check_btf_line(env, attr, uattr); 15915 if (err) 15916 return err; 15917 15918 err = check_core_relo(env, attr, uattr); 15919 if (err) 15920 return err; 15921 15922 return 0; 15923 } 15924 15925 /* check %cur's range satisfies %old's */ 15926 static bool range_within(struct bpf_reg_state *old, 15927 struct bpf_reg_state *cur) 15928 { 15929 return old->umin_value <= cur->umin_value && 15930 old->umax_value >= cur->umax_value && 15931 old->smin_value <= cur->smin_value && 15932 old->smax_value >= cur->smax_value && 15933 old->u32_min_value <= cur->u32_min_value && 15934 old->u32_max_value >= cur->u32_max_value && 15935 old->s32_min_value <= cur->s32_min_value && 15936 old->s32_max_value >= cur->s32_max_value; 15937 } 15938 15939 /* If in the old state two registers had the same id, then they need to have 15940 * the same id in the new state as well. But that id could be different from 15941 * the old state, so we need to track the mapping from old to new ids. 15942 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 15943 * regs with old id 5 must also have new id 9 for the new state to be safe. But 15944 * regs with a different old id could still have new id 9, we don't care about 15945 * that. 15946 * So we look through our idmap to see if this old id has been seen before. If 15947 * so, we require the new id to match; otherwise, we add the id pair to the map. 15948 */ 15949 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap) 15950 { 15951 struct bpf_id_pair *map = idmap->map; 15952 unsigned int i; 15953 15954 /* either both IDs should be set or both should be zero */ 15955 if (!!old_id != !!cur_id) 15956 return false; 15957 15958 if (old_id == 0) /* cur_id == 0 as well */ 15959 return true; 15960 15961 for (i = 0; i < BPF_ID_MAP_SIZE; i++) { 15962 if (!map[i].old) { 15963 /* Reached an empty slot; haven't seen this id before */ 15964 map[i].old = old_id; 15965 map[i].cur = cur_id; 15966 return true; 15967 } 15968 if (map[i].old == old_id) 15969 return map[i].cur == cur_id; 15970 if (map[i].cur == cur_id) 15971 return false; 15972 } 15973 /* We ran out of idmap slots, which should be impossible */ 15974 WARN_ON_ONCE(1); 15975 return false; 15976 } 15977 15978 /* Similar to check_ids(), but allocate a unique temporary ID 15979 * for 'old_id' or 'cur_id' of zero. 15980 * This makes pairs like '0 vs unique ID', 'unique ID vs 0' valid. 15981 */ 15982 static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap) 15983 { 15984 old_id = old_id ? old_id : ++idmap->tmp_id_gen; 15985 cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen; 15986 15987 return check_ids(old_id, cur_id, idmap); 15988 } 15989 15990 static void clean_func_state(struct bpf_verifier_env *env, 15991 struct bpf_func_state *st) 15992 { 15993 enum bpf_reg_liveness live; 15994 int i, j; 15995 15996 for (i = 0; i < BPF_REG_FP; i++) { 15997 live = st->regs[i].live; 15998 /* liveness must not touch this register anymore */ 15999 st->regs[i].live |= REG_LIVE_DONE; 16000 if (!(live & REG_LIVE_READ)) 16001 /* since the register is unused, clear its state 16002 * to make further comparison simpler 16003 */ 16004 __mark_reg_not_init(env, &st->regs[i]); 16005 } 16006 16007 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 16008 live = st->stack[i].spilled_ptr.live; 16009 /* liveness must not touch this stack slot anymore */ 16010 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 16011 if (!(live & REG_LIVE_READ)) { 16012 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 16013 for (j = 0; j < BPF_REG_SIZE; j++) 16014 st->stack[i].slot_type[j] = STACK_INVALID; 16015 } 16016 } 16017 } 16018 16019 static void clean_verifier_state(struct bpf_verifier_env *env, 16020 struct bpf_verifier_state *st) 16021 { 16022 int i; 16023 16024 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 16025 /* all regs in this state in all frames were already marked */ 16026 return; 16027 16028 for (i = 0; i <= st->curframe; i++) 16029 clean_func_state(env, st->frame[i]); 16030 } 16031 16032 /* the parentage chains form a tree. 16033 * the verifier states are added to state lists at given insn and 16034 * pushed into state stack for future exploration. 16035 * when the verifier reaches bpf_exit insn some of the verifer states 16036 * stored in the state lists have their final liveness state already, 16037 * but a lot of states will get revised from liveness point of view when 16038 * the verifier explores other branches. 16039 * Example: 16040 * 1: r0 = 1 16041 * 2: if r1 == 100 goto pc+1 16042 * 3: r0 = 2 16043 * 4: exit 16044 * when the verifier reaches exit insn the register r0 in the state list of 16045 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 16046 * of insn 2 and goes exploring further. At the insn 4 it will walk the 16047 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 16048 * 16049 * Since the verifier pushes the branch states as it sees them while exploring 16050 * the program the condition of walking the branch instruction for the second 16051 * time means that all states below this branch were already explored and 16052 * their final liveness marks are already propagated. 16053 * Hence when the verifier completes the search of state list in is_state_visited() 16054 * we can call this clean_live_states() function to mark all liveness states 16055 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 16056 * will not be used. 16057 * This function also clears the registers and stack for states that !READ 16058 * to simplify state merging. 16059 * 16060 * Important note here that walking the same branch instruction in the callee 16061 * doesn't meant that the states are DONE. The verifier has to compare 16062 * the callsites 16063 */ 16064 static void clean_live_states(struct bpf_verifier_env *env, int insn, 16065 struct bpf_verifier_state *cur) 16066 { 16067 struct bpf_verifier_state_list *sl; 16068 16069 sl = *explored_state(env, insn); 16070 while (sl) { 16071 if (sl->state.branches) 16072 goto next; 16073 if (sl->state.insn_idx != insn || 16074 !same_callsites(&sl->state, cur)) 16075 goto next; 16076 clean_verifier_state(env, &sl->state); 16077 next: 16078 sl = sl->next; 16079 } 16080 } 16081 16082 static bool regs_exact(const struct bpf_reg_state *rold, 16083 const struct bpf_reg_state *rcur, 16084 struct bpf_idmap *idmap) 16085 { 16086 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 16087 check_ids(rold->id, rcur->id, idmap) && 16088 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); 16089 } 16090 16091 /* Returns true if (rold safe implies rcur safe) */ 16092 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, 16093 struct bpf_reg_state *rcur, struct bpf_idmap *idmap, bool exact) 16094 { 16095 if (exact) 16096 return regs_exact(rold, rcur, idmap); 16097 16098 if (!(rold->live & REG_LIVE_READ)) 16099 /* explored state didn't use this */ 16100 return true; 16101 if (rold->type == NOT_INIT) 16102 /* explored state can't have used this */ 16103 return true; 16104 if (rcur->type == NOT_INIT) 16105 return false; 16106 16107 /* Enforce that register types have to match exactly, including their 16108 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general 16109 * rule. 16110 * 16111 * One can make a point that using a pointer register as unbounded 16112 * SCALAR would be technically acceptable, but this could lead to 16113 * pointer leaks because scalars are allowed to leak while pointers 16114 * are not. We could make this safe in special cases if root is 16115 * calling us, but it's probably not worth the hassle. 16116 * 16117 * Also, register types that are *not* MAYBE_NULL could technically be 16118 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE 16119 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point 16120 * to the same map). 16121 * However, if the old MAYBE_NULL register then got NULL checked, 16122 * doing so could have affected others with the same id, and we can't 16123 * check for that because we lost the id when we converted to 16124 * a non-MAYBE_NULL variant. 16125 * So, as a general rule we don't allow mixing MAYBE_NULL and 16126 * non-MAYBE_NULL registers as well. 16127 */ 16128 if (rold->type != rcur->type) 16129 return false; 16130 16131 switch (base_type(rold->type)) { 16132 case SCALAR_VALUE: 16133 if (env->explore_alu_limits) { 16134 /* explore_alu_limits disables tnum_in() and range_within() 16135 * logic and requires everything to be strict 16136 */ 16137 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 16138 check_scalar_ids(rold->id, rcur->id, idmap); 16139 } 16140 if (!rold->precise) 16141 return true; 16142 /* Why check_ids() for scalar registers? 16143 * 16144 * Consider the following BPF code: 16145 * 1: r6 = ... unbound scalar, ID=a ... 16146 * 2: r7 = ... unbound scalar, ID=b ... 16147 * 3: if (r6 > r7) goto +1 16148 * 4: r6 = r7 16149 * 5: if (r6 > X) goto ... 16150 * 6: ... memory operation using r7 ... 16151 * 16152 * First verification path is [1-6]: 16153 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; 16154 * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark 16155 * r7 <= X, because r6 and r7 share same id. 16156 * Next verification path is [1-4, 6]. 16157 * 16158 * Instruction (6) would be reached in two states: 16159 * I. r6{.id=b}, r7{.id=b} via path 1-6; 16160 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. 16161 * 16162 * Use check_ids() to distinguish these states. 16163 * --- 16164 * Also verify that new value satisfies old value range knowledge. 16165 */ 16166 return range_within(rold, rcur) && 16167 tnum_in(rold->var_off, rcur->var_off) && 16168 check_scalar_ids(rold->id, rcur->id, idmap); 16169 case PTR_TO_MAP_KEY: 16170 case PTR_TO_MAP_VALUE: 16171 case PTR_TO_MEM: 16172 case PTR_TO_BUF: 16173 case PTR_TO_TP_BUFFER: 16174 /* If the new min/max/var_off satisfy the old ones and 16175 * everything else matches, we are OK. 16176 */ 16177 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 && 16178 range_within(rold, rcur) && 16179 tnum_in(rold->var_off, rcur->var_off) && 16180 check_ids(rold->id, rcur->id, idmap) && 16181 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); 16182 case PTR_TO_PACKET_META: 16183 case PTR_TO_PACKET: 16184 /* We must have at least as much range as the old ptr 16185 * did, so that any accesses which were safe before are 16186 * still safe. This is true even if old range < old off, 16187 * since someone could have accessed through (ptr - k), or 16188 * even done ptr -= k in a register, to get a safe access. 16189 */ 16190 if (rold->range > rcur->range) 16191 return false; 16192 /* If the offsets don't match, we can't trust our alignment; 16193 * nor can we be sure that we won't fall out of range. 16194 */ 16195 if (rold->off != rcur->off) 16196 return false; 16197 /* id relations must be preserved */ 16198 if (!check_ids(rold->id, rcur->id, idmap)) 16199 return false; 16200 /* new val must satisfy old val knowledge */ 16201 return range_within(rold, rcur) && 16202 tnum_in(rold->var_off, rcur->var_off); 16203 case PTR_TO_STACK: 16204 /* two stack pointers are equal only if they're pointing to 16205 * the same stack frame, since fp-8 in foo != fp-8 in bar 16206 */ 16207 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno; 16208 default: 16209 return regs_exact(rold, rcur, idmap); 16210 } 16211 } 16212 16213 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, 16214 struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact) 16215 { 16216 int i, spi; 16217 16218 /* walk slots of the explored stack and ignore any additional 16219 * slots in the current stack, since explored(safe) state 16220 * didn't use them 16221 */ 16222 for (i = 0; i < old->allocated_stack; i++) { 16223 struct bpf_reg_state *old_reg, *cur_reg; 16224 16225 spi = i / BPF_REG_SIZE; 16226 16227 if (exact && 16228 old->stack[spi].slot_type[i % BPF_REG_SIZE] != 16229 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 16230 return false; 16231 16232 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) && !exact) { 16233 i += BPF_REG_SIZE - 1; 16234 /* explored state didn't use this */ 16235 continue; 16236 } 16237 16238 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 16239 continue; 16240 16241 if (env->allow_uninit_stack && 16242 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) 16243 continue; 16244 16245 /* explored stack has more populated slots than current stack 16246 * and these slots were used 16247 */ 16248 if (i >= cur->allocated_stack) 16249 return false; 16250 16251 /* if old state was safe with misc data in the stack 16252 * it will be safe with zero-initialized stack. 16253 * The opposite is not true 16254 */ 16255 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 16256 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 16257 continue; 16258 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 16259 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 16260 /* Ex: old explored (safe) state has STACK_SPILL in 16261 * this stack slot, but current has STACK_MISC -> 16262 * this verifier states are not equivalent, 16263 * return false to continue verification of this path 16264 */ 16265 return false; 16266 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) 16267 continue; 16268 /* Both old and cur are having same slot_type */ 16269 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { 16270 case STACK_SPILL: 16271 /* when explored and current stack slot are both storing 16272 * spilled registers, check that stored pointers types 16273 * are the same as well. 16274 * Ex: explored safe path could have stored 16275 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 16276 * but current path has stored: 16277 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 16278 * such verifier states are not equivalent. 16279 * return false to continue verification of this path 16280 */ 16281 if (!regsafe(env, &old->stack[spi].spilled_ptr, 16282 &cur->stack[spi].spilled_ptr, idmap, exact)) 16283 return false; 16284 break; 16285 case STACK_DYNPTR: 16286 old_reg = &old->stack[spi].spilled_ptr; 16287 cur_reg = &cur->stack[spi].spilled_ptr; 16288 if (old_reg->dynptr.type != cur_reg->dynptr.type || 16289 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || 16290 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) 16291 return false; 16292 break; 16293 case STACK_ITER: 16294 old_reg = &old->stack[spi].spilled_ptr; 16295 cur_reg = &cur->stack[spi].spilled_ptr; 16296 /* iter.depth is not compared between states as it 16297 * doesn't matter for correctness and would otherwise 16298 * prevent convergence; we maintain it only to prevent 16299 * infinite loop check triggering, see 16300 * iter_active_depths_differ() 16301 */ 16302 if (old_reg->iter.btf != cur_reg->iter.btf || 16303 old_reg->iter.btf_id != cur_reg->iter.btf_id || 16304 old_reg->iter.state != cur_reg->iter.state || 16305 /* ignore {old_reg,cur_reg}->iter.depth, see above */ 16306 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) 16307 return false; 16308 break; 16309 case STACK_MISC: 16310 case STACK_ZERO: 16311 case STACK_INVALID: 16312 continue; 16313 /* Ensure that new unhandled slot types return false by default */ 16314 default: 16315 return false; 16316 } 16317 } 16318 return true; 16319 } 16320 16321 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur, 16322 struct bpf_idmap *idmap) 16323 { 16324 int i; 16325 16326 if (old->acquired_refs != cur->acquired_refs) 16327 return false; 16328 16329 for (i = 0; i < old->acquired_refs; i++) { 16330 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap)) 16331 return false; 16332 } 16333 16334 return true; 16335 } 16336 16337 /* compare two verifier states 16338 * 16339 * all states stored in state_list are known to be valid, since 16340 * verifier reached 'bpf_exit' instruction through them 16341 * 16342 * this function is called when verifier exploring different branches of 16343 * execution popped from the state stack. If it sees an old state that has 16344 * more strict register state and more strict stack state then this execution 16345 * branch doesn't need to be explored further, since verifier already 16346 * concluded that more strict state leads to valid finish. 16347 * 16348 * Therefore two states are equivalent if register state is more conservative 16349 * and explored stack state is more conservative than the current one. 16350 * Example: 16351 * explored current 16352 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 16353 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 16354 * 16355 * In other words if current stack state (one being explored) has more 16356 * valid slots than old one that already passed validation, it means 16357 * the verifier can stop exploring and conclude that current state is valid too 16358 * 16359 * Similarly with registers. If explored state has register type as invalid 16360 * whereas register type in current state is meaningful, it means that 16361 * the current state will reach 'bpf_exit' instruction safely 16362 */ 16363 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, 16364 struct bpf_func_state *cur, bool exact) 16365 { 16366 int i; 16367 16368 for (i = 0; i < MAX_BPF_REG; i++) 16369 if (!regsafe(env, &old->regs[i], &cur->regs[i], 16370 &env->idmap_scratch, exact)) 16371 return false; 16372 16373 if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) 16374 return false; 16375 16376 if (!refsafe(old, cur, &env->idmap_scratch)) 16377 return false; 16378 16379 return true; 16380 } 16381 16382 static void reset_idmap_scratch(struct bpf_verifier_env *env) 16383 { 16384 env->idmap_scratch.tmp_id_gen = env->id_gen; 16385 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); 16386 } 16387 16388 static bool states_equal(struct bpf_verifier_env *env, 16389 struct bpf_verifier_state *old, 16390 struct bpf_verifier_state *cur, 16391 bool exact) 16392 { 16393 int i; 16394 16395 if (old->curframe != cur->curframe) 16396 return false; 16397 16398 reset_idmap_scratch(env); 16399 16400 /* Verification state from speculative execution simulation 16401 * must never prune a non-speculative execution one. 16402 */ 16403 if (old->speculative && !cur->speculative) 16404 return false; 16405 16406 if (old->active_lock.ptr != cur->active_lock.ptr) 16407 return false; 16408 16409 /* Old and cur active_lock's have to be either both present 16410 * or both absent. 16411 */ 16412 if (!!old->active_lock.id != !!cur->active_lock.id) 16413 return false; 16414 16415 if (old->active_lock.id && 16416 !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch)) 16417 return false; 16418 16419 if (old->active_rcu_lock != cur->active_rcu_lock) 16420 return false; 16421 16422 /* for states to be equal callsites have to be the same 16423 * and all frame states need to be equivalent 16424 */ 16425 for (i = 0; i <= old->curframe; i++) { 16426 if (old->frame[i]->callsite != cur->frame[i]->callsite) 16427 return false; 16428 if (!func_states_equal(env, old->frame[i], cur->frame[i], exact)) 16429 return false; 16430 } 16431 return true; 16432 } 16433 16434 /* Return 0 if no propagation happened. Return negative error code if error 16435 * happened. Otherwise, return the propagated bit. 16436 */ 16437 static int propagate_liveness_reg(struct bpf_verifier_env *env, 16438 struct bpf_reg_state *reg, 16439 struct bpf_reg_state *parent_reg) 16440 { 16441 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 16442 u8 flag = reg->live & REG_LIVE_READ; 16443 int err; 16444 16445 /* When comes here, read flags of PARENT_REG or REG could be any of 16446 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 16447 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 16448 */ 16449 if (parent_flag == REG_LIVE_READ64 || 16450 /* Or if there is no read flag from REG. */ 16451 !flag || 16452 /* Or if the read flag from REG is the same as PARENT_REG. */ 16453 parent_flag == flag) 16454 return 0; 16455 16456 err = mark_reg_read(env, reg, parent_reg, flag); 16457 if (err) 16458 return err; 16459 16460 return flag; 16461 } 16462 16463 /* A write screens off any subsequent reads; but write marks come from the 16464 * straight-line code between a state and its parent. When we arrive at an 16465 * equivalent state (jump target or such) we didn't arrive by the straight-line 16466 * code, so read marks in the state must propagate to the parent regardless 16467 * of the state's write marks. That's what 'parent == state->parent' comparison 16468 * in mark_reg_read() is for. 16469 */ 16470 static int propagate_liveness(struct bpf_verifier_env *env, 16471 const struct bpf_verifier_state *vstate, 16472 struct bpf_verifier_state *vparent) 16473 { 16474 struct bpf_reg_state *state_reg, *parent_reg; 16475 struct bpf_func_state *state, *parent; 16476 int i, frame, err = 0; 16477 16478 if (vparent->curframe != vstate->curframe) { 16479 WARN(1, "propagate_live: parent frame %d current frame %d\n", 16480 vparent->curframe, vstate->curframe); 16481 return -EFAULT; 16482 } 16483 /* Propagate read liveness of registers... */ 16484 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 16485 for (frame = 0; frame <= vstate->curframe; frame++) { 16486 parent = vparent->frame[frame]; 16487 state = vstate->frame[frame]; 16488 parent_reg = parent->regs; 16489 state_reg = state->regs; 16490 /* We don't need to worry about FP liveness, it's read-only */ 16491 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 16492 err = propagate_liveness_reg(env, &state_reg[i], 16493 &parent_reg[i]); 16494 if (err < 0) 16495 return err; 16496 if (err == REG_LIVE_READ64) 16497 mark_insn_zext(env, &parent_reg[i]); 16498 } 16499 16500 /* Propagate stack slots. */ 16501 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 16502 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 16503 parent_reg = &parent->stack[i].spilled_ptr; 16504 state_reg = &state->stack[i].spilled_ptr; 16505 err = propagate_liveness_reg(env, state_reg, 16506 parent_reg); 16507 if (err < 0) 16508 return err; 16509 } 16510 } 16511 return 0; 16512 } 16513 16514 /* find precise scalars in the previous equivalent state and 16515 * propagate them into the current state 16516 */ 16517 static int propagate_precision(struct bpf_verifier_env *env, 16518 const struct bpf_verifier_state *old) 16519 { 16520 struct bpf_reg_state *state_reg; 16521 struct bpf_func_state *state; 16522 int i, err = 0, fr; 16523 bool first; 16524 16525 for (fr = old->curframe; fr >= 0; fr--) { 16526 state = old->frame[fr]; 16527 state_reg = state->regs; 16528 first = true; 16529 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 16530 if (state_reg->type != SCALAR_VALUE || 16531 !state_reg->precise || 16532 !(state_reg->live & REG_LIVE_READ)) 16533 continue; 16534 if (env->log.level & BPF_LOG_LEVEL2) { 16535 if (first) 16536 verbose(env, "frame %d: propagating r%d", fr, i); 16537 else 16538 verbose(env, ",r%d", i); 16539 } 16540 bt_set_frame_reg(&env->bt, fr, i); 16541 first = false; 16542 } 16543 16544 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 16545 if (!is_spilled_reg(&state->stack[i])) 16546 continue; 16547 state_reg = &state->stack[i].spilled_ptr; 16548 if (state_reg->type != SCALAR_VALUE || 16549 !state_reg->precise || 16550 !(state_reg->live & REG_LIVE_READ)) 16551 continue; 16552 if (env->log.level & BPF_LOG_LEVEL2) { 16553 if (first) 16554 verbose(env, "frame %d: propagating fp%d", 16555 fr, (-i - 1) * BPF_REG_SIZE); 16556 else 16557 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); 16558 } 16559 bt_set_frame_slot(&env->bt, fr, i); 16560 first = false; 16561 } 16562 if (!first) 16563 verbose(env, "\n"); 16564 } 16565 16566 err = mark_chain_precision_batch(env); 16567 if (err < 0) 16568 return err; 16569 16570 return 0; 16571 } 16572 16573 static bool states_maybe_looping(struct bpf_verifier_state *old, 16574 struct bpf_verifier_state *cur) 16575 { 16576 struct bpf_func_state *fold, *fcur; 16577 int i, fr = cur->curframe; 16578 16579 if (old->curframe != fr) 16580 return false; 16581 16582 fold = old->frame[fr]; 16583 fcur = cur->frame[fr]; 16584 for (i = 0; i < MAX_BPF_REG; i++) 16585 if (memcmp(&fold->regs[i], &fcur->regs[i], 16586 offsetof(struct bpf_reg_state, parent))) 16587 return false; 16588 return true; 16589 } 16590 16591 static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx) 16592 { 16593 return env->insn_aux_data[insn_idx].is_iter_next; 16594 } 16595 16596 /* is_state_visited() handles iter_next() (see process_iter_next_call() for 16597 * terminology) calls specially: as opposed to bounded BPF loops, it *expects* 16598 * states to match, which otherwise would look like an infinite loop. So while 16599 * iter_next() calls are taken care of, we still need to be careful and 16600 * prevent erroneous and too eager declaration of "ininite loop", when 16601 * iterators are involved. 16602 * 16603 * Here's a situation in pseudo-BPF assembly form: 16604 * 16605 * 0: again: ; set up iter_next() call args 16606 * 1: r1 = &it ; <CHECKPOINT HERE> 16607 * 2: call bpf_iter_num_next ; this is iter_next() call 16608 * 3: if r0 == 0 goto done 16609 * 4: ... something useful here ... 16610 * 5: goto again ; another iteration 16611 * 6: done: 16612 * 7: r1 = &it 16613 * 8: call bpf_iter_num_destroy ; clean up iter state 16614 * 9: exit 16615 * 16616 * This is a typical loop. Let's assume that we have a prune point at 1:, 16617 * before we get to `call bpf_iter_num_next` (e.g., because of that `goto 16618 * again`, assuming other heuristics don't get in a way). 16619 * 16620 * When we first time come to 1:, let's say we have some state X. We proceed 16621 * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit. 16622 * Now we come back to validate that forked ACTIVE state. We proceed through 16623 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we 16624 * are converging. But the problem is that we don't know that yet, as this 16625 * convergence has to happen at iter_next() call site only. So if nothing is 16626 * done, at 1: verifier will use bounded loop logic and declare infinite 16627 * looping (and would be *technically* correct, if not for iterator's 16628 * "eventual sticky NULL" contract, see process_iter_next_call()). But we 16629 * don't want that. So what we do in process_iter_next_call() when we go on 16630 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's 16631 * a different iteration. So when we suspect an infinite loop, we additionally 16632 * check if any of the *ACTIVE* iterator states depths differ. If yes, we 16633 * pretend we are not looping and wait for next iter_next() call. 16634 * 16635 * This only applies to ACTIVE state. In DRAINED state we don't expect to 16636 * loop, because that would actually mean infinite loop, as DRAINED state is 16637 * "sticky", and so we'll keep returning into the same instruction with the 16638 * same state (at least in one of possible code paths). 16639 * 16640 * This approach allows to keep infinite loop heuristic even in the face of 16641 * active iterator. E.g., C snippet below is and will be detected as 16642 * inifintely looping: 16643 * 16644 * struct bpf_iter_num it; 16645 * int *p, x; 16646 * 16647 * bpf_iter_num_new(&it, 0, 10); 16648 * while ((p = bpf_iter_num_next(&t))) { 16649 * x = p; 16650 * while (x--) {} // <<-- infinite loop here 16651 * } 16652 * 16653 */ 16654 static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur) 16655 { 16656 struct bpf_reg_state *slot, *cur_slot; 16657 struct bpf_func_state *state; 16658 int i, fr; 16659 16660 for (fr = old->curframe; fr >= 0; fr--) { 16661 state = old->frame[fr]; 16662 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 16663 if (state->stack[i].slot_type[0] != STACK_ITER) 16664 continue; 16665 16666 slot = &state->stack[i].spilled_ptr; 16667 if (slot->iter.state != BPF_ITER_STATE_ACTIVE) 16668 continue; 16669 16670 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr; 16671 if (cur_slot->iter.depth != slot->iter.depth) 16672 return true; 16673 } 16674 } 16675 return false; 16676 } 16677 16678 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 16679 { 16680 struct bpf_verifier_state_list *new_sl; 16681 struct bpf_verifier_state_list *sl, **pprev; 16682 struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; 16683 int i, j, n, err, states_cnt = 0; 16684 bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx); 16685 bool add_new_state = force_new_state; 16686 bool force_exact; 16687 16688 /* bpf progs typically have pruning point every 4 instructions 16689 * http://vger.kernel.org/bpfconf2019.html#session-1 16690 * Do not add new state for future pruning if the verifier hasn't seen 16691 * at least 2 jumps and at least 8 instructions. 16692 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 16693 * In tests that amounts to up to 50% reduction into total verifier 16694 * memory consumption and 20% verifier time speedup. 16695 */ 16696 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 16697 env->insn_processed - env->prev_insn_processed >= 8) 16698 add_new_state = true; 16699 16700 pprev = explored_state(env, insn_idx); 16701 sl = *pprev; 16702 16703 clean_live_states(env, insn_idx, cur); 16704 16705 while (sl) { 16706 states_cnt++; 16707 if (sl->state.insn_idx != insn_idx) 16708 goto next; 16709 16710 if (sl->state.branches) { 16711 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; 16712 16713 if (frame->in_async_callback_fn && 16714 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { 16715 /* Different async_entry_cnt means that the verifier is 16716 * processing another entry into async callback. 16717 * Seeing the same state is not an indication of infinite 16718 * loop or infinite recursion. 16719 * But finding the same state doesn't mean that it's safe 16720 * to stop processing the current state. The previous state 16721 * hasn't yet reached bpf_exit, since state.branches > 0. 16722 * Checking in_async_callback_fn alone is not enough either. 16723 * Since the verifier still needs to catch infinite loops 16724 * inside async callbacks. 16725 */ 16726 goto skip_inf_loop_check; 16727 } 16728 /* BPF open-coded iterators loop detection is special. 16729 * states_maybe_looping() logic is too simplistic in detecting 16730 * states that *might* be equivalent, because it doesn't know 16731 * about ID remapping, so don't even perform it. 16732 * See process_iter_next_call() and iter_active_depths_differ() 16733 * for overview of the logic. When current and one of parent 16734 * states are detected as equivalent, it's a good thing: we prove 16735 * convergence and can stop simulating further iterations. 16736 * It's safe to assume that iterator loop will finish, taking into 16737 * account iter_next() contract of eventually returning 16738 * sticky NULL result. 16739 * 16740 * Note, that states have to be compared exactly in this case because 16741 * read and precision marks might not be finalized inside the loop. 16742 * E.g. as in the program below: 16743 * 16744 * 1. r7 = -16 16745 * 2. r6 = bpf_get_prandom_u32() 16746 * 3. while (bpf_iter_num_next(&fp[-8])) { 16747 * 4. if (r6 != 42) { 16748 * 5. r7 = -32 16749 * 6. r6 = bpf_get_prandom_u32() 16750 * 7. continue 16751 * 8. } 16752 * 9. r0 = r10 16753 * 10. r0 += r7 16754 * 11. r8 = *(u64 *)(r0 + 0) 16755 * 12. r6 = bpf_get_prandom_u32() 16756 * 13. } 16757 * 16758 * Here verifier would first visit path 1-3, create a checkpoint at 3 16759 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does 16760 * not have read or precision mark for r7 yet, thus inexact states 16761 * comparison would discard current state with r7=-32 16762 * => unsafe memory access at 11 would not be caught. 16763 */ 16764 if (is_iter_next_insn(env, insn_idx)) { 16765 if (states_equal(env, &sl->state, cur, true)) { 16766 struct bpf_func_state *cur_frame; 16767 struct bpf_reg_state *iter_state, *iter_reg; 16768 int spi; 16769 16770 cur_frame = cur->frame[cur->curframe]; 16771 /* btf_check_iter_kfuncs() enforces that 16772 * iter state pointer is always the first arg 16773 */ 16774 iter_reg = &cur_frame->regs[BPF_REG_1]; 16775 /* current state is valid due to states_equal(), 16776 * so we can assume valid iter and reg state, 16777 * no need for extra (re-)validations 16778 */ 16779 spi = __get_spi(iter_reg->off + iter_reg->var_off.value); 16780 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; 16781 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) { 16782 update_loop_entry(cur, &sl->state); 16783 goto hit; 16784 } 16785 } 16786 goto skip_inf_loop_check; 16787 } 16788 if (calls_callback(env, insn_idx)) { 16789 if (states_equal(env, &sl->state, cur, true)) 16790 goto hit; 16791 goto skip_inf_loop_check; 16792 } 16793 /* attempt to detect infinite loop to avoid unnecessary doomed work */ 16794 if (states_maybe_looping(&sl->state, cur) && 16795 states_equal(env, &sl->state, cur, false) && 16796 !iter_active_depths_differ(&sl->state, cur) && 16797 sl->state.callback_unroll_depth == cur->callback_unroll_depth) { 16798 verbose_linfo(env, insn_idx, "; "); 16799 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 16800 verbose(env, "cur state:"); 16801 print_verifier_state(env, cur->frame[cur->curframe], true); 16802 verbose(env, "old state:"); 16803 print_verifier_state(env, sl->state.frame[cur->curframe], true); 16804 return -EINVAL; 16805 } 16806 /* if the verifier is processing a loop, avoid adding new state 16807 * too often, since different loop iterations have distinct 16808 * states and may not help future pruning. 16809 * This threshold shouldn't be too low to make sure that 16810 * a loop with large bound will be rejected quickly. 16811 * The most abusive loop will be: 16812 * r1 += 1 16813 * if r1 < 1000000 goto pc-2 16814 * 1M insn_procssed limit / 100 == 10k peak states. 16815 * This threshold shouldn't be too high either, since states 16816 * at the end of the loop are likely to be useful in pruning. 16817 */ 16818 skip_inf_loop_check: 16819 if (!force_new_state && 16820 env->jmps_processed - env->prev_jmps_processed < 20 && 16821 env->insn_processed - env->prev_insn_processed < 100) 16822 add_new_state = false; 16823 goto miss; 16824 } 16825 /* If sl->state is a part of a loop and this loop's entry is a part of 16826 * current verification path then states have to be compared exactly. 16827 * 'force_exact' is needed to catch the following case: 16828 * 16829 * initial Here state 'succ' was processed first, 16830 * | it was eventually tracked to produce a 16831 * V state identical to 'hdr'. 16832 * .---------> hdr All branches from 'succ' had been explored 16833 * | | and thus 'succ' has its .branches == 0. 16834 * | V 16835 * | .------... Suppose states 'cur' and 'succ' correspond 16836 * | | | to the same instruction + callsites. 16837 * | V V In such case it is necessary to check 16838 * | ... ... if 'succ' and 'cur' are states_equal(). 16839 * | | | If 'succ' and 'cur' are a part of the 16840 * | V V same loop exact flag has to be set. 16841 * | succ <- cur To check if that is the case, verify 16842 * | | if loop entry of 'succ' is in current 16843 * | V DFS path. 16844 * | ... 16845 * | | 16846 * '----' 16847 * 16848 * Additional details are in the comment before get_loop_entry(). 16849 */ 16850 loop_entry = get_loop_entry(&sl->state); 16851 force_exact = loop_entry && loop_entry->branches > 0; 16852 if (states_equal(env, &sl->state, cur, force_exact)) { 16853 if (force_exact) 16854 update_loop_entry(cur, loop_entry); 16855 hit: 16856 sl->hit_cnt++; 16857 /* reached equivalent register/stack state, 16858 * prune the search. 16859 * Registers read by the continuation are read by us. 16860 * If we have any write marks in env->cur_state, they 16861 * will prevent corresponding reads in the continuation 16862 * from reaching our parent (an explored_state). Our 16863 * own state will get the read marks recorded, but 16864 * they'll be immediately forgotten as we're pruning 16865 * this state and will pop a new one. 16866 */ 16867 err = propagate_liveness(env, &sl->state, cur); 16868 16869 /* if previous state reached the exit with precision and 16870 * current state is equivalent to it (except precsion marks) 16871 * the precision needs to be propagated back in 16872 * the current state. 16873 */ 16874 err = err ? : push_jmp_history(env, cur); 16875 err = err ? : propagate_precision(env, &sl->state); 16876 if (err) 16877 return err; 16878 return 1; 16879 } 16880 miss: 16881 /* when new state is not going to be added do not increase miss count. 16882 * Otherwise several loop iterations will remove the state 16883 * recorded earlier. The goal of these heuristics is to have 16884 * states from some iterations of the loop (some in the beginning 16885 * and some at the end) to help pruning. 16886 */ 16887 if (add_new_state) 16888 sl->miss_cnt++; 16889 /* heuristic to determine whether this state is beneficial 16890 * to keep checking from state equivalence point of view. 16891 * Higher numbers increase max_states_per_insn and verification time, 16892 * but do not meaningfully decrease insn_processed. 16893 * 'n' controls how many times state could miss before eviction. 16894 * Use bigger 'n' for checkpoints because evicting checkpoint states 16895 * too early would hinder iterator convergence. 16896 */ 16897 n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; 16898 if (sl->miss_cnt > sl->hit_cnt * n + n) { 16899 /* the state is unlikely to be useful. Remove it to 16900 * speed up verification 16901 */ 16902 *pprev = sl->next; 16903 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE && 16904 !sl->state.used_as_loop_entry) { 16905 u32 br = sl->state.branches; 16906 16907 WARN_ONCE(br, 16908 "BUG live_done but branches_to_explore %d\n", 16909 br); 16910 free_verifier_state(&sl->state, false); 16911 kfree(sl); 16912 env->peak_states--; 16913 } else { 16914 /* cannot free this state, since parentage chain may 16915 * walk it later. Add it for free_list instead to 16916 * be freed at the end of verification 16917 */ 16918 sl->next = env->free_list; 16919 env->free_list = sl; 16920 } 16921 sl = *pprev; 16922 continue; 16923 } 16924 next: 16925 pprev = &sl->next; 16926 sl = *pprev; 16927 } 16928 16929 if (env->max_states_per_insn < states_cnt) 16930 env->max_states_per_insn = states_cnt; 16931 16932 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 16933 return 0; 16934 16935 if (!add_new_state) 16936 return 0; 16937 16938 /* There were no equivalent states, remember the current one. 16939 * Technically the current state is not proven to be safe yet, 16940 * but it will either reach outer most bpf_exit (which means it's safe) 16941 * or it will be rejected. When there are no loops the verifier won't be 16942 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 16943 * again on the way to bpf_exit. 16944 * When looping the sl->state.branches will be > 0 and this state 16945 * will not be considered for equivalence until branches == 0. 16946 */ 16947 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 16948 if (!new_sl) 16949 return -ENOMEM; 16950 env->total_states++; 16951 env->peak_states++; 16952 env->prev_jmps_processed = env->jmps_processed; 16953 env->prev_insn_processed = env->insn_processed; 16954 16955 /* forget precise markings we inherited, see __mark_chain_precision */ 16956 if (env->bpf_capable) 16957 mark_all_scalars_imprecise(env, cur); 16958 16959 /* add new state to the head of linked list */ 16960 new = &new_sl->state; 16961 err = copy_verifier_state(new, cur); 16962 if (err) { 16963 free_verifier_state(new, false); 16964 kfree(new_sl); 16965 return err; 16966 } 16967 new->insn_idx = insn_idx; 16968 WARN_ONCE(new->branches != 1, 16969 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 16970 16971 cur->parent = new; 16972 cur->first_insn_idx = insn_idx; 16973 cur->dfs_depth = new->dfs_depth + 1; 16974 clear_jmp_history(cur); 16975 new_sl->next = *explored_state(env, insn_idx); 16976 *explored_state(env, insn_idx) = new_sl; 16977 /* connect new state to parentage chain. Current frame needs all 16978 * registers connected. Only r6 - r9 of the callers are alive (pushed 16979 * to the stack implicitly by JITs) so in callers' frames connect just 16980 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 16981 * the state of the call instruction (with WRITTEN set), and r0 comes 16982 * from callee with its full parentage chain, anyway. 16983 */ 16984 /* clear write marks in current state: the writes we did are not writes 16985 * our child did, so they don't screen off its reads from us. 16986 * (There are no read marks in current state, because reads always mark 16987 * their parent and current state never has children yet. Only 16988 * explored_states can get read marks.) 16989 */ 16990 for (j = 0; j <= cur->curframe; j++) { 16991 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 16992 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 16993 for (i = 0; i < BPF_REG_FP; i++) 16994 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 16995 } 16996 16997 /* all stack frames are accessible from callee, clear them all */ 16998 for (j = 0; j <= cur->curframe; j++) { 16999 struct bpf_func_state *frame = cur->frame[j]; 17000 struct bpf_func_state *newframe = new->frame[j]; 17001 17002 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 17003 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 17004 frame->stack[i].spilled_ptr.parent = 17005 &newframe->stack[i].spilled_ptr; 17006 } 17007 } 17008 return 0; 17009 } 17010 17011 /* Return true if it's OK to have the same insn return a different type. */ 17012 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 17013 { 17014 switch (base_type(type)) { 17015 case PTR_TO_CTX: 17016 case PTR_TO_SOCKET: 17017 case PTR_TO_SOCK_COMMON: 17018 case PTR_TO_TCP_SOCK: 17019 case PTR_TO_XDP_SOCK: 17020 case PTR_TO_BTF_ID: 17021 return false; 17022 default: 17023 return true; 17024 } 17025 } 17026 17027 /* If an instruction was previously used with particular pointer types, then we 17028 * need to be careful to avoid cases such as the below, where it may be ok 17029 * for one branch accessing the pointer, but not ok for the other branch: 17030 * 17031 * R1 = sock_ptr 17032 * goto X; 17033 * ... 17034 * R1 = some_other_valid_ptr; 17035 * goto X; 17036 * ... 17037 * R2 = *(u32 *)(R1 + 0); 17038 */ 17039 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 17040 { 17041 return src != prev && (!reg_type_mismatch_ok(src) || 17042 !reg_type_mismatch_ok(prev)); 17043 } 17044 17045 static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type, 17046 bool allow_trust_missmatch) 17047 { 17048 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; 17049 17050 if (*prev_type == NOT_INIT) { 17051 /* Saw a valid insn 17052 * dst_reg = *(u32 *)(src_reg + off) 17053 * save type to validate intersecting paths 17054 */ 17055 *prev_type = type; 17056 } else if (reg_type_mismatch(type, *prev_type)) { 17057 /* Abuser program is trying to use the same insn 17058 * dst_reg = *(u32*) (src_reg + off) 17059 * with different pointer types: 17060 * src_reg == ctx in one branch and 17061 * src_reg == stack|map in some other branch. 17062 * Reject it. 17063 */ 17064 if (allow_trust_missmatch && 17065 base_type(type) == PTR_TO_BTF_ID && 17066 base_type(*prev_type) == PTR_TO_BTF_ID) { 17067 /* 17068 * Have to support a use case when one path through 17069 * the program yields TRUSTED pointer while another 17070 * is UNTRUSTED. Fallback to UNTRUSTED to generate 17071 * BPF_PROBE_MEM/BPF_PROBE_MEMSX. 17072 */ 17073 *prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED; 17074 } else { 17075 verbose(env, "same insn cannot be used with different pointers\n"); 17076 return -EINVAL; 17077 } 17078 } 17079 17080 return 0; 17081 } 17082 17083 static int do_check(struct bpf_verifier_env *env) 17084 { 17085 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 17086 struct bpf_verifier_state *state = env->cur_state; 17087 struct bpf_insn *insns = env->prog->insnsi; 17088 struct bpf_reg_state *regs; 17089 int insn_cnt = env->prog->len; 17090 bool do_print_state = false; 17091 int prev_insn_idx = -1; 17092 17093 for (;;) { 17094 bool exception_exit = false; 17095 struct bpf_insn *insn; 17096 u8 class; 17097 int err; 17098 17099 env->prev_insn_idx = prev_insn_idx; 17100 if (env->insn_idx >= insn_cnt) { 17101 verbose(env, "invalid insn idx %d insn_cnt %d\n", 17102 env->insn_idx, insn_cnt); 17103 return -EFAULT; 17104 } 17105 17106 insn = &insns[env->insn_idx]; 17107 class = BPF_CLASS(insn->code); 17108 17109 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 17110 verbose(env, 17111 "BPF program is too large. Processed %d insn\n", 17112 env->insn_processed); 17113 return -E2BIG; 17114 } 17115 17116 state->last_insn_idx = env->prev_insn_idx; 17117 17118 if (is_prune_point(env, env->insn_idx)) { 17119 err = is_state_visited(env, env->insn_idx); 17120 if (err < 0) 17121 return err; 17122 if (err == 1) { 17123 /* found equivalent state, can prune the search */ 17124 if (env->log.level & BPF_LOG_LEVEL) { 17125 if (do_print_state) 17126 verbose(env, "\nfrom %d to %d%s: safe\n", 17127 env->prev_insn_idx, env->insn_idx, 17128 env->cur_state->speculative ? 17129 " (speculative execution)" : ""); 17130 else 17131 verbose(env, "%d: safe\n", env->insn_idx); 17132 } 17133 goto process_bpf_exit; 17134 } 17135 } 17136 17137 if (is_jmp_point(env, env->insn_idx)) { 17138 err = push_jmp_history(env, state); 17139 if (err) 17140 return err; 17141 } 17142 17143 if (signal_pending(current)) 17144 return -EAGAIN; 17145 17146 if (need_resched()) 17147 cond_resched(); 17148 17149 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { 17150 verbose(env, "\nfrom %d to %d%s:", 17151 env->prev_insn_idx, env->insn_idx, 17152 env->cur_state->speculative ? 17153 " (speculative execution)" : ""); 17154 print_verifier_state(env, state->frame[state->curframe], true); 17155 do_print_state = false; 17156 } 17157 17158 if (env->log.level & BPF_LOG_LEVEL) { 17159 const struct bpf_insn_cbs cbs = { 17160 .cb_call = disasm_kfunc_name, 17161 .cb_print = verbose, 17162 .private_data = env, 17163 }; 17164 17165 if (verifier_state_scratched(env)) 17166 print_insn_state(env, state->frame[state->curframe]); 17167 17168 verbose_linfo(env, env->insn_idx, "; "); 17169 env->prev_log_pos = env->log.end_pos; 17170 verbose(env, "%d: ", env->insn_idx); 17171 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 17172 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; 17173 env->prev_log_pos = env->log.end_pos; 17174 } 17175 17176 if (bpf_prog_is_offloaded(env->prog->aux)) { 17177 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 17178 env->prev_insn_idx); 17179 if (err) 17180 return err; 17181 } 17182 17183 regs = cur_regs(env); 17184 sanitize_mark_insn_seen(env); 17185 prev_insn_idx = env->insn_idx; 17186 17187 if (class == BPF_ALU || class == BPF_ALU64) { 17188 err = check_alu_op(env, insn); 17189 if (err) 17190 return err; 17191 17192 } else if (class == BPF_LDX) { 17193 enum bpf_reg_type src_reg_type; 17194 17195 /* check for reserved fields is already done */ 17196 17197 /* check src operand */ 17198 err = check_reg_arg(env, insn->src_reg, SRC_OP); 17199 if (err) 17200 return err; 17201 17202 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 17203 if (err) 17204 return err; 17205 17206 src_reg_type = regs[insn->src_reg].type; 17207 17208 /* check that memory (src_reg + off) is readable, 17209 * the state of dst_reg will be updated by this func 17210 */ 17211 err = check_mem_access(env, env->insn_idx, insn->src_reg, 17212 insn->off, BPF_SIZE(insn->code), 17213 BPF_READ, insn->dst_reg, false, 17214 BPF_MODE(insn->code) == BPF_MEMSX); 17215 err = err ?: save_aux_ptr_type(env, src_reg_type, true); 17216 err = err ?: reg_bounds_sanity_check(env, ®s[insn->dst_reg], "ldx"); 17217 if (err) 17218 return err; 17219 } else if (class == BPF_STX) { 17220 enum bpf_reg_type dst_reg_type; 17221 17222 if (BPF_MODE(insn->code) == BPF_ATOMIC) { 17223 err = check_atomic(env, env->insn_idx, insn); 17224 if (err) 17225 return err; 17226 env->insn_idx++; 17227 continue; 17228 } 17229 17230 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { 17231 verbose(env, "BPF_STX uses reserved fields\n"); 17232 return -EINVAL; 17233 } 17234 17235 /* check src1 operand */ 17236 err = check_reg_arg(env, insn->src_reg, SRC_OP); 17237 if (err) 17238 return err; 17239 /* check src2 operand */ 17240 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 17241 if (err) 17242 return err; 17243 17244 dst_reg_type = regs[insn->dst_reg].type; 17245 17246 /* check that memory (dst_reg + off) is writeable */ 17247 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 17248 insn->off, BPF_SIZE(insn->code), 17249 BPF_WRITE, insn->src_reg, false, false); 17250 if (err) 17251 return err; 17252 17253 err = save_aux_ptr_type(env, dst_reg_type, false); 17254 if (err) 17255 return err; 17256 } else if (class == BPF_ST) { 17257 enum bpf_reg_type dst_reg_type; 17258 17259 if (BPF_MODE(insn->code) != BPF_MEM || 17260 insn->src_reg != BPF_REG_0) { 17261 verbose(env, "BPF_ST uses reserved fields\n"); 17262 return -EINVAL; 17263 } 17264 /* check src operand */ 17265 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 17266 if (err) 17267 return err; 17268 17269 dst_reg_type = regs[insn->dst_reg].type; 17270 17271 /* check that memory (dst_reg + off) is writeable */ 17272 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 17273 insn->off, BPF_SIZE(insn->code), 17274 BPF_WRITE, -1, false, false); 17275 if (err) 17276 return err; 17277 17278 err = save_aux_ptr_type(env, dst_reg_type, false); 17279 if (err) 17280 return err; 17281 } else if (class == BPF_JMP || class == BPF_JMP32) { 17282 u8 opcode = BPF_OP(insn->code); 17283 17284 env->jmps_processed++; 17285 if (opcode == BPF_CALL) { 17286 if (BPF_SRC(insn->code) != BPF_K || 17287 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL 17288 && insn->off != 0) || 17289 (insn->src_reg != BPF_REG_0 && 17290 insn->src_reg != BPF_PSEUDO_CALL && 17291 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || 17292 insn->dst_reg != BPF_REG_0 || 17293 class == BPF_JMP32) { 17294 verbose(env, "BPF_CALL uses reserved fields\n"); 17295 return -EINVAL; 17296 } 17297 17298 if (env->cur_state->active_lock.ptr) { 17299 if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || 17300 (insn->src_reg == BPF_PSEUDO_CALL) || 17301 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && 17302 (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) { 17303 verbose(env, "function calls are not allowed while holding a lock\n"); 17304 return -EINVAL; 17305 } 17306 } 17307 if (insn->src_reg == BPF_PSEUDO_CALL) { 17308 err = check_func_call(env, insn, &env->insn_idx); 17309 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 17310 err = check_kfunc_call(env, insn, &env->insn_idx); 17311 if (!err && is_bpf_throw_kfunc(insn)) { 17312 exception_exit = true; 17313 goto process_bpf_exit_full; 17314 } 17315 } else { 17316 err = check_helper_call(env, insn, &env->insn_idx); 17317 } 17318 if (err) 17319 return err; 17320 17321 mark_reg_scratched(env, BPF_REG_0); 17322 } else if (opcode == BPF_JA) { 17323 if (BPF_SRC(insn->code) != BPF_K || 17324 insn->src_reg != BPF_REG_0 || 17325 insn->dst_reg != BPF_REG_0 || 17326 (class == BPF_JMP && insn->imm != 0) || 17327 (class == BPF_JMP32 && insn->off != 0)) { 17328 verbose(env, "BPF_JA uses reserved fields\n"); 17329 return -EINVAL; 17330 } 17331 17332 if (class == BPF_JMP) 17333 env->insn_idx += insn->off + 1; 17334 else 17335 env->insn_idx += insn->imm + 1; 17336 continue; 17337 17338 } else if (opcode == BPF_EXIT) { 17339 if (BPF_SRC(insn->code) != BPF_K || 17340 insn->imm != 0 || 17341 insn->src_reg != BPF_REG_0 || 17342 insn->dst_reg != BPF_REG_0 || 17343 class == BPF_JMP32) { 17344 verbose(env, "BPF_EXIT uses reserved fields\n"); 17345 return -EINVAL; 17346 } 17347 process_bpf_exit_full: 17348 if (env->cur_state->active_lock.ptr && 17349 !in_rbtree_lock_required_cb(env)) { 17350 verbose(env, "bpf_spin_unlock is missing\n"); 17351 return -EINVAL; 17352 } 17353 17354 if (env->cur_state->active_rcu_lock && 17355 !in_rbtree_lock_required_cb(env)) { 17356 verbose(env, "bpf_rcu_read_unlock is missing\n"); 17357 return -EINVAL; 17358 } 17359 17360 /* We must do check_reference_leak here before 17361 * prepare_func_exit to handle the case when 17362 * state->curframe > 0, it may be a callback 17363 * function, for which reference_state must 17364 * match caller reference state when it exits. 17365 */ 17366 err = check_reference_leak(env, exception_exit); 17367 if (err) 17368 return err; 17369 17370 /* The side effect of the prepare_func_exit 17371 * which is being skipped is that it frees 17372 * bpf_func_state. Typically, process_bpf_exit 17373 * will only be hit with outermost exit. 17374 * copy_verifier_state in pop_stack will handle 17375 * freeing of any extra bpf_func_state left over 17376 * from not processing all nested function 17377 * exits. We also skip return code checks as 17378 * they are not needed for exceptional exits. 17379 */ 17380 if (exception_exit) 17381 goto process_bpf_exit; 17382 17383 if (state->curframe) { 17384 /* exit from nested function */ 17385 err = prepare_func_exit(env, &env->insn_idx); 17386 if (err) 17387 return err; 17388 do_print_state = true; 17389 continue; 17390 } 17391 17392 err = check_return_code(env, BPF_REG_0); 17393 if (err) 17394 return err; 17395 process_bpf_exit: 17396 mark_verifier_state_scratched(env); 17397 update_branch_counts(env, env->cur_state); 17398 err = pop_stack(env, &prev_insn_idx, 17399 &env->insn_idx, pop_log); 17400 if (err < 0) { 17401 if (err != -ENOENT) 17402 return err; 17403 break; 17404 } else { 17405 do_print_state = true; 17406 continue; 17407 } 17408 } else { 17409 err = check_cond_jmp_op(env, insn, &env->insn_idx); 17410 if (err) 17411 return err; 17412 } 17413 } else if (class == BPF_LD) { 17414 u8 mode = BPF_MODE(insn->code); 17415 17416 if (mode == BPF_ABS || mode == BPF_IND) { 17417 err = check_ld_abs(env, insn); 17418 if (err) 17419 return err; 17420 17421 } else if (mode == BPF_IMM) { 17422 err = check_ld_imm(env, insn); 17423 if (err) 17424 return err; 17425 17426 env->insn_idx++; 17427 sanitize_mark_insn_seen(env); 17428 } else { 17429 verbose(env, "invalid BPF_LD mode\n"); 17430 return -EINVAL; 17431 } 17432 } else { 17433 verbose(env, "unknown insn class %d\n", class); 17434 return -EINVAL; 17435 } 17436 17437 env->insn_idx++; 17438 } 17439 17440 return 0; 17441 } 17442 17443 static int find_btf_percpu_datasec(struct btf *btf) 17444 { 17445 const struct btf_type *t; 17446 const char *tname; 17447 int i, n; 17448 17449 /* 17450 * Both vmlinux and module each have their own ".data..percpu" 17451 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF 17452 * types to look at only module's own BTF types. 17453 */ 17454 n = btf_nr_types(btf); 17455 if (btf_is_module(btf)) 17456 i = btf_nr_types(btf_vmlinux); 17457 else 17458 i = 1; 17459 17460 for(; i < n; i++) { 17461 t = btf_type_by_id(btf, i); 17462 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) 17463 continue; 17464 17465 tname = btf_name_by_offset(btf, t->name_off); 17466 if (!strcmp(tname, ".data..percpu")) 17467 return i; 17468 } 17469 17470 return -ENOENT; 17471 } 17472 17473 /* replace pseudo btf_id with kernel symbol address */ 17474 static int check_pseudo_btf_id(struct bpf_verifier_env *env, 17475 struct bpf_insn *insn, 17476 struct bpf_insn_aux_data *aux) 17477 { 17478 const struct btf_var_secinfo *vsi; 17479 const struct btf_type *datasec; 17480 struct btf_mod_pair *btf_mod; 17481 const struct btf_type *t; 17482 const char *sym_name; 17483 bool percpu = false; 17484 u32 type, id = insn->imm; 17485 struct btf *btf; 17486 s32 datasec_id; 17487 u64 addr; 17488 int i, btf_fd, err; 17489 17490 btf_fd = insn[1].imm; 17491 if (btf_fd) { 17492 btf = btf_get_by_fd(btf_fd); 17493 if (IS_ERR(btf)) { 17494 verbose(env, "invalid module BTF object FD specified.\n"); 17495 return -EINVAL; 17496 } 17497 } else { 17498 if (!btf_vmlinux) { 17499 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); 17500 return -EINVAL; 17501 } 17502 btf = btf_vmlinux; 17503 btf_get(btf); 17504 } 17505 17506 t = btf_type_by_id(btf, id); 17507 if (!t) { 17508 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); 17509 err = -ENOENT; 17510 goto err_put; 17511 } 17512 17513 if (!btf_type_is_var(t) && !btf_type_is_func(t)) { 17514 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id); 17515 err = -EINVAL; 17516 goto err_put; 17517 } 17518 17519 sym_name = btf_name_by_offset(btf, t->name_off); 17520 addr = kallsyms_lookup_name(sym_name); 17521 if (!addr) { 17522 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", 17523 sym_name); 17524 err = -ENOENT; 17525 goto err_put; 17526 } 17527 insn[0].imm = (u32)addr; 17528 insn[1].imm = addr >> 32; 17529 17530 if (btf_type_is_func(t)) { 17531 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; 17532 aux->btf_var.mem_size = 0; 17533 goto check_btf; 17534 } 17535 17536 datasec_id = find_btf_percpu_datasec(btf); 17537 if (datasec_id > 0) { 17538 datasec = btf_type_by_id(btf, datasec_id); 17539 for_each_vsi(i, datasec, vsi) { 17540 if (vsi->type == id) { 17541 percpu = true; 17542 break; 17543 } 17544 } 17545 } 17546 17547 type = t->type; 17548 t = btf_type_skip_modifiers(btf, type, NULL); 17549 if (percpu) { 17550 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; 17551 aux->btf_var.btf = btf; 17552 aux->btf_var.btf_id = type; 17553 } else if (!btf_type_is_struct(t)) { 17554 const struct btf_type *ret; 17555 const char *tname; 17556 u32 tsize; 17557 17558 /* resolve the type size of ksym. */ 17559 ret = btf_resolve_size(btf, t, &tsize); 17560 if (IS_ERR(ret)) { 17561 tname = btf_name_by_offset(btf, t->name_off); 17562 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", 17563 tname, PTR_ERR(ret)); 17564 err = -EINVAL; 17565 goto err_put; 17566 } 17567 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; 17568 aux->btf_var.mem_size = tsize; 17569 } else { 17570 aux->btf_var.reg_type = PTR_TO_BTF_ID; 17571 aux->btf_var.btf = btf; 17572 aux->btf_var.btf_id = type; 17573 } 17574 check_btf: 17575 /* check whether we recorded this BTF (and maybe module) already */ 17576 for (i = 0; i < env->used_btf_cnt; i++) { 17577 if (env->used_btfs[i].btf == btf) { 17578 btf_put(btf); 17579 return 0; 17580 } 17581 } 17582 17583 if (env->used_btf_cnt >= MAX_USED_BTFS) { 17584 err = -E2BIG; 17585 goto err_put; 17586 } 17587 17588 btf_mod = &env->used_btfs[env->used_btf_cnt]; 17589 btf_mod->btf = btf; 17590 btf_mod->module = NULL; 17591 17592 /* if we reference variables from kernel module, bump its refcount */ 17593 if (btf_is_module(btf)) { 17594 btf_mod->module = btf_try_get_module(btf); 17595 if (!btf_mod->module) { 17596 err = -ENXIO; 17597 goto err_put; 17598 } 17599 } 17600 17601 env->used_btf_cnt++; 17602 17603 return 0; 17604 err_put: 17605 btf_put(btf); 17606 return err; 17607 } 17608 17609 static bool is_tracing_prog_type(enum bpf_prog_type type) 17610 { 17611 switch (type) { 17612 case BPF_PROG_TYPE_KPROBE: 17613 case BPF_PROG_TYPE_TRACEPOINT: 17614 case BPF_PROG_TYPE_PERF_EVENT: 17615 case BPF_PROG_TYPE_RAW_TRACEPOINT: 17616 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 17617 return true; 17618 default: 17619 return false; 17620 } 17621 } 17622 17623 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 17624 struct bpf_map *map, 17625 struct bpf_prog *prog) 17626 17627 { 17628 enum bpf_prog_type prog_type = resolve_prog_type(prog); 17629 17630 if (btf_record_has_field(map->record, BPF_LIST_HEAD) || 17631 btf_record_has_field(map->record, BPF_RB_ROOT)) { 17632 if (is_tracing_prog_type(prog_type)) { 17633 verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n"); 17634 return -EINVAL; 17635 } 17636 } 17637 17638 if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 17639 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { 17640 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); 17641 return -EINVAL; 17642 } 17643 17644 if (is_tracing_prog_type(prog_type)) { 17645 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 17646 return -EINVAL; 17647 } 17648 } 17649 17650 if (btf_record_has_field(map->record, BPF_TIMER)) { 17651 if (is_tracing_prog_type(prog_type)) { 17652 verbose(env, "tracing progs cannot use bpf_timer yet\n"); 17653 return -EINVAL; 17654 } 17655 } 17656 17657 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && 17658 !bpf_offload_prog_map_match(prog, map)) { 17659 verbose(env, "offload device mismatch between prog and map\n"); 17660 return -EINVAL; 17661 } 17662 17663 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 17664 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 17665 return -EINVAL; 17666 } 17667 17668 if (prog->aux->sleepable) 17669 switch (map->map_type) { 17670 case BPF_MAP_TYPE_HASH: 17671 case BPF_MAP_TYPE_LRU_HASH: 17672 case BPF_MAP_TYPE_ARRAY: 17673 case BPF_MAP_TYPE_PERCPU_HASH: 17674 case BPF_MAP_TYPE_PERCPU_ARRAY: 17675 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 17676 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 17677 case BPF_MAP_TYPE_HASH_OF_MAPS: 17678 case BPF_MAP_TYPE_RINGBUF: 17679 case BPF_MAP_TYPE_USER_RINGBUF: 17680 case BPF_MAP_TYPE_INODE_STORAGE: 17681 case BPF_MAP_TYPE_SK_STORAGE: 17682 case BPF_MAP_TYPE_TASK_STORAGE: 17683 case BPF_MAP_TYPE_CGRP_STORAGE: 17684 break; 17685 default: 17686 verbose(env, 17687 "Sleepable programs can only use array, hash, ringbuf and local storage maps\n"); 17688 return -EINVAL; 17689 } 17690 17691 return 0; 17692 } 17693 17694 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 17695 { 17696 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 17697 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 17698 } 17699 17700 /* find and rewrite pseudo imm in ld_imm64 instructions: 17701 * 17702 * 1. if it accesses map FD, replace it with actual map pointer. 17703 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. 17704 * 17705 * NOTE: btf_vmlinux is required for converting pseudo btf_id. 17706 */ 17707 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) 17708 { 17709 struct bpf_insn *insn = env->prog->insnsi; 17710 int insn_cnt = env->prog->len; 17711 int i, j, err; 17712 17713 err = bpf_prog_calc_tag(env->prog); 17714 if (err) 17715 return err; 17716 17717 for (i = 0; i < insn_cnt; i++, insn++) { 17718 if (BPF_CLASS(insn->code) == BPF_LDX && 17719 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) || 17720 insn->imm != 0)) { 17721 verbose(env, "BPF_LDX uses reserved fields\n"); 17722 return -EINVAL; 17723 } 17724 17725 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 17726 struct bpf_insn_aux_data *aux; 17727 struct bpf_map *map; 17728 struct fd f; 17729 u64 addr; 17730 u32 fd; 17731 17732 if (i == insn_cnt - 1 || insn[1].code != 0 || 17733 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 17734 insn[1].off != 0) { 17735 verbose(env, "invalid bpf_ld_imm64 insn\n"); 17736 return -EINVAL; 17737 } 17738 17739 if (insn[0].src_reg == 0) 17740 /* valid generic load 64-bit imm */ 17741 goto next_insn; 17742 17743 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { 17744 aux = &env->insn_aux_data[i]; 17745 err = check_pseudo_btf_id(env, insn, aux); 17746 if (err) 17747 return err; 17748 goto next_insn; 17749 } 17750 17751 if (insn[0].src_reg == BPF_PSEUDO_FUNC) { 17752 aux = &env->insn_aux_data[i]; 17753 aux->ptr_type = PTR_TO_FUNC; 17754 goto next_insn; 17755 } 17756 17757 /* In final convert_pseudo_ld_imm64() step, this is 17758 * converted into regular 64-bit imm load insn. 17759 */ 17760 switch (insn[0].src_reg) { 17761 case BPF_PSEUDO_MAP_VALUE: 17762 case BPF_PSEUDO_MAP_IDX_VALUE: 17763 break; 17764 case BPF_PSEUDO_MAP_FD: 17765 case BPF_PSEUDO_MAP_IDX: 17766 if (insn[1].imm == 0) 17767 break; 17768 fallthrough; 17769 default: 17770 verbose(env, "unrecognized bpf_ld_imm64 insn\n"); 17771 return -EINVAL; 17772 } 17773 17774 switch (insn[0].src_reg) { 17775 case BPF_PSEUDO_MAP_IDX_VALUE: 17776 case BPF_PSEUDO_MAP_IDX: 17777 if (bpfptr_is_null(env->fd_array)) { 17778 verbose(env, "fd_idx without fd_array is invalid\n"); 17779 return -EPROTO; 17780 } 17781 if (copy_from_bpfptr_offset(&fd, env->fd_array, 17782 insn[0].imm * sizeof(fd), 17783 sizeof(fd))) 17784 return -EFAULT; 17785 break; 17786 default: 17787 fd = insn[0].imm; 17788 break; 17789 } 17790 17791 f = fdget(fd); 17792 map = __bpf_map_get(f); 17793 if (IS_ERR(map)) { 17794 verbose(env, "fd %d is not pointing to valid bpf_map\n", 17795 insn[0].imm); 17796 return PTR_ERR(map); 17797 } 17798 17799 err = check_map_prog_compatibility(env, map, env->prog); 17800 if (err) { 17801 fdput(f); 17802 return err; 17803 } 17804 17805 aux = &env->insn_aux_data[i]; 17806 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || 17807 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { 17808 addr = (unsigned long)map; 17809 } else { 17810 u32 off = insn[1].imm; 17811 17812 if (off >= BPF_MAX_VAR_OFF) { 17813 verbose(env, "direct value offset of %u is not allowed\n", off); 17814 fdput(f); 17815 return -EINVAL; 17816 } 17817 17818 if (!map->ops->map_direct_value_addr) { 17819 verbose(env, "no direct value access support for this map type\n"); 17820 fdput(f); 17821 return -EINVAL; 17822 } 17823 17824 err = map->ops->map_direct_value_addr(map, &addr, off); 17825 if (err) { 17826 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 17827 map->value_size, off); 17828 fdput(f); 17829 return err; 17830 } 17831 17832 aux->map_off = off; 17833 addr += off; 17834 } 17835 17836 insn[0].imm = (u32)addr; 17837 insn[1].imm = addr >> 32; 17838 17839 /* check whether we recorded this map already */ 17840 for (j = 0; j < env->used_map_cnt; j++) { 17841 if (env->used_maps[j] == map) { 17842 aux->map_index = j; 17843 fdput(f); 17844 goto next_insn; 17845 } 17846 } 17847 17848 if (env->used_map_cnt >= MAX_USED_MAPS) { 17849 fdput(f); 17850 return -E2BIG; 17851 } 17852 17853 /* hold the map. If the program is rejected by verifier, 17854 * the map will be released by release_maps() or it 17855 * will be used by the valid program until it's unloaded 17856 * and all maps are released in free_used_maps() 17857 */ 17858 bpf_map_inc(map); 17859 17860 aux->map_index = env->used_map_cnt; 17861 env->used_maps[env->used_map_cnt++] = map; 17862 17863 if (bpf_map_is_cgroup_storage(map) && 17864 bpf_cgroup_storage_assign(env->prog->aux, map)) { 17865 verbose(env, "only one cgroup storage of each type is allowed\n"); 17866 fdput(f); 17867 return -EBUSY; 17868 } 17869 17870 fdput(f); 17871 next_insn: 17872 insn++; 17873 i++; 17874 continue; 17875 } 17876 17877 /* Basic sanity check before we invest more work here. */ 17878 if (!bpf_opcode_in_insntable(insn->code)) { 17879 verbose(env, "unknown opcode %02x\n", insn->code); 17880 return -EINVAL; 17881 } 17882 } 17883 17884 /* now all pseudo BPF_LD_IMM64 instructions load valid 17885 * 'struct bpf_map *' into a register instead of user map_fd. 17886 * These pointers will be used later by verifier to validate map access. 17887 */ 17888 return 0; 17889 } 17890 17891 /* drop refcnt of maps used by the rejected program */ 17892 static void release_maps(struct bpf_verifier_env *env) 17893 { 17894 __bpf_free_used_maps(env->prog->aux, env->used_maps, 17895 env->used_map_cnt); 17896 } 17897 17898 /* drop refcnt of maps used by the rejected program */ 17899 static void release_btfs(struct bpf_verifier_env *env) 17900 { 17901 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, 17902 env->used_btf_cnt); 17903 } 17904 17905 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 17906 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 17907 { 17908 struct bpf_insn *insn = env->prog->insnsi; 17909 int insn_cnt = env->prog->len; 17910 int i; 17911 17912 for (i = 0; i < insn_cnt; i++, insn++) { 17913 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) 17914 continue; 17915 if (insn->src_reg == BPF_PSEUDO_FUNC) 17916 continue; 17917 insn->src_reg = 0; 17918 } 17919 } 17920 17921 /* single env->prog->insni[off] instruction was replaced with the range 17922 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 17923 * [0, off) and [off, end) to new locations, so the patched range stays zero 17924 */ 17925 static void adjust_insn_aux_data(struct bpf_verifier_env *env, 17926 struct bpf_insn_aux_data *new_data, 17927 struct bpf_prog *new_prog, u32 off, u32 cnt) 17928 { 17929 struct bpf_insn_aux_data *old_data = env->insn_aux_data; 17930 struct bpf_insn *insn = new_prog->insnsi; 17931 u32 old_seen = old_data[off].seen; 17932 u32 prog_len; 17933 int i; 17934 17935 /* aux info at OFF always needs adjustment, no matter fast path 17936 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 17937 * original insn at old prog. 17938 */ 17939 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 17940 17941 if (cnt == 1) 17942 return; 17943 prog_len = new_prog->len; 17944 17945 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 17946 memcpy(new_data + off + cnt - 1, old_data + off, 17947 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 17948 for (i = off; i < off + cnt - 1; i++) { 17949 /* Expand insni[off]'s seen count to the patched range. */ 17950 new_data[i].seen = old_seen; 17951 new_data[i].zext_dst = insn_has_def32(env, insn + i); 17952 } 17953 env->insn_aux_data = new_data; 17954 vfree(old_data); 17955 } 17956 17957 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 17958 { 17959 int i; 17960 17961 if (len == 1) 17962 return; 17963 /* NOTE: fake 'exit' subprog should be updated as well. */ 17964 for (i = 0; i <= env->subprog_cnt; i++) { 17965 if (env->subprog_info[i].start <= off) 17966 continue; 17967 env->subprog_info[i].start += len - 1; 17968 } 17969 } 17970 17971 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 17972 { 17973 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 17974 int i, sz = prog->aux->size_poke_tab; 17975 struct bpf_jit_poke_descriptor *desc; 17976 17977 for (i = 0; i < sz; i++) { 17978 desc = &tab[i]; 17979 if (desc->insn_idx <= off) 17980 continue; 17981 desc->insn_idx += len - 1; 17982 } 17983 } 17984 17985 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 17986 const struct bpf_insn *patch, u32 len) 17987 { 17988 struct bpf_prog *new_prog; 17989 struct bpf_insn_aux_data *new_data = NULL; 17990 17991 if (len > 1) { 17992 new_data = vzalloc(array_size(env->prog->len + len - 1, 17993 sizeof(struct bpf_insn_aux_data))); 17994 if (!new_data) 17995 return NULL; 17996 } 17997 17998 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 17999 if (IS_ERR(new_prog)) { 18000 if (PTR_ERR(new_prog) == -ERANGE) 18001 verbose(env, 18002 "insn %d cannot be patched due to 16-bit range\n", 18003 env->insn_aux_data[off].orig_idx); 18004 vfree(new_data); 18005 return NULL; 18006 } 18007 adjust_insn_aux_data(env, new_data, new_prog, off, len); 18008 adjust_subprog_starts(env, off, len); 18009 adjust_poke_descs(new_prog, off, len); 18010 return new_prog; 18011 } 18012 18013 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 18014 u32 off, u32 cnt) 18015 { 18016 int i, j; 18017 18018 /* find first prog starting at or after off (first to remove) */ 18019 for (i = 0; i < env->subprog_cnt; i++) 18020 if (env->subprog_info[i].start >= off) 18021 break; 18022 /* find first prog starting at or after off + cnt (first to stay) */ 18023 for (j = i; j < env->subprog_cnt; j++) 18024 if (env->subprog_info[j].start >= off + cnt) 18025 break; 18026 /* if j doesn't start exactly at off + cnt, we are just removing 18027 * the front of previous prog 18028 */ 18029 if (env->subprog_info[j].start != off + cnt) 18030 j--; 18031 18032 if (j > i) { 18033 struct bpf_prog_aux *aux = env->prog->aux; 18034 int move; 18035 18036 /* move fake 'exit' subprog as well */ 18037 move = env->subprog_cnt + 1 - j; 18038 18039 memmove(env->subprog_info + i, 18040 env->subprog_info + j, 18041 sizeof(*env->subprog_info) * move); 18042 env->subprog_cnt -= j - i; 18043 18044 /* remove func_info */ 18045 if (aux->func_info) { 18046 move = aux->func_info_cnt - j; 18047 18048 memmove(aux->func_info + i, 18049 aux->func_info + j, 18050 sizeof(*aux->func_info) * move); 18051 aux->func_info_cnt -= j - i; 18052 /* func_info->insn_off is set after all code rewrites, 18053 * in adjust_btf_func() - no need to adjust 18054 */ 18055 } 18056 } else { 18057 /* convert i from "first prog to remove" to "first to adjust" */ 18058 if (env->subprog_info[i].start == off) 18059 i++; 18060 } 18061 18062 /* update fake 'exit' subprog as well */ 18063 for (; i <= env->subprog_cnt; i++) 18064 env->subprog_info[i].start -= cnt; 18065 18066 return 0; 18067 } 18068 18069 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 18070 u32 cnt) 18071 { 18072 struct bpf_prog *prog = env->prog; 18073 u32 i, l_off, l_cnt, nr_linfo; 18074 struct bpf_line_info *linfo; 18075 18076 nr_linfo = prog->aux->nr_linfo; 18077 if (!nr_linfo) 18078 return 0; 18079 18080 linfo = prog->aux->linfo; 18081 18082 /* find first line info to remove, count lines to be removed */ 18083 for (i = 0; i < nr_linfo; i++) 18084 if (linfo[i].insn_off >= off) 18085 break; 18086 18087 l_off = i; 18088 l_cnt = 0; 18089 for (; i < nr_linfo; i++) 18090 if (linfo[i].insn_off < off + cnt) 18091 l_cnt++; 18092 else 18093 break; 18094 18095 /* First live insn doesn't match first live linfo, it needs to "inherit" 18096 * last removed linfo. prog is already modified, so prog->len == off 18097 * means no live instructions after (tail of the program was removed). 18098 */ 18099 if (prog->len != off && l_cnt && 18100 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 18101 l_cnt--; 18102 linfo[--i].insn_off = off + cnt; 18103 } 18104 18105 /* remove the line info which refer to the removed instructions */ 18106 if (l_cnt) { 18107 memmove(linfo + l_off, linfo + i, 18108 sizeof(*linfo) * (nr_linfo - i)); 18109 18110 prog->aux->nr_linfo -= l_cnt; 18111 nr_linfo = prog->aux->nr_linfo; 18112 } 18113 18114 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 18115 for (i = l_off; i < nr_linfo; i++) 18116 linfo[i].insn_off -= cnt; 18117 18118 /* fix up all subprogs (incl. 'exit') which start >= off */ 18119 for (i = 0; i <= env->subprog_cnt; i++) 18120 if (env->subprog_info[i].linfo_idx > l_off) { 18121 /* program may have started in the removed region but 18122 * may not be fully removed 18123 */ 18124 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 18125 env->subprog_info[i].linfo_idx -= l_cnt; 18126 else 18127 env->subprog_info[i].linfo_idx = l_off; 18128 } 18129 18130 return 0; 18131 } 18132 18133 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 18134 { 18135 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 18136 unsigned int orig_prog_len = env->prog->len; 18137 int err; 18138 18139 if (bpf_prog_is_offloaded(env->prog->aux)) 18140 bpf_prog_offload_remove_insns(env, off, cnt); 18141 18142 err = bpf_remove_insns(env->prog, off, cnt); 18143 if (err) 18144 return err; 18145 18146 err = adjust_subprog_starts_after_remove(env, off, cnt); 18147 if (err) 18148 return err; 18149 18150 err = bpf_adj_linfo_after_remove(env, off, cnt); 18151 if (err) 18152 return err; 18153 18154 memmove(aux_data + off, aux_data + off + cnt, 18155 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 18156 18157 return 0; 18158 } 18159 18160 /* The verifier does more data flow analysis than llvm and will not 18161 * explore branches that are dead at run time. Malicious programs can 18162 * have dead code too. Therefore replace all dead at-run-time code 18163 * with 'ja -1'. 18164 * 18165 * Just nops are not optimal, e.g. if they would sit at the end of the 18166 * program and through another bug we would manage to jump there, then 18167 * we'd execute beyond program memory otherwise. Returning exception 18168 * code also wouldn't work since we can have subprogs where the dead 18169 * code could be located. 18170 */ 18171 static void sanitize_dead_code(struct bpf_verifier_env *env) 18172 { 18173 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 18174 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 18175 struct bpf_insn *insn = env->prog->insnsi; 18176 const int insn_cnt = env->prog->len; 18177 int i; 18178 18179 for (i = 0; i < insn_cnt; i++) { 18180 if (aux_data[i].seen) 18181 continue; 18182 memcpy(insn + i, &trap, sizeof(trap)); 18183 aux_data[i].zext_dst = false; 18184 } 18185 } 18186 18187 static bool insn_is_cond_jump(u8 code) 18188 { 18189 u8 op; 18190 18191 op = BPF_OP(code); 18192 if (BPF_CLASS(code) == BPF_JMP32) 18193 return op != BPF_JA; 18194 18195 if (BPF_CLASS(code) != BPF_JMP) 18196 return false; 18197 18198 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 18199 } 18200 18201 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 18202 { 18203 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 18204 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 18205 struct bpf_insn *insn = env->prog->insnsi; 18206 const int insn_cnt = env->prog->len; 18207 int i; 18208 18209 for (i = 0; i < insn_cnt; i++, insn++) { 18210 if (!insn_is_cond_jump(insn->code)) 18211 continue; 18212 18213 if (!aux_data[i + 1].seen) 18214 ja.off = insn->off; 18215 else if (!aux_data[i + 1 + insn->off].seen) 18216 ja.off = 0; 18217 else 18218 continue; 18219 18220 if (bpf_prog_is_offloaded(env->prog->aux)) 18221 bpf_prog_offload_replace_insn(env, i, &ja); 18222 18223 memcpy(insn, &ja, sizeof(ja)); 18224 } 18225 } 18226 18227 static int opt_remove_dead_code(struct bpf_verifier_env *env) 18228 { 18229 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 18230 int insn_cnt = env->prog->len; 18231 int i, err; 18232 18233 for (i = 0; i < insn_cnt; i++) { 18234 int j; 18235 18236 j = 0; 18237 while (i + j < insn_cnt && !aux_data[i + j].seen) 18238 j++; 18239 if (!j) 18240 continue; 18241 18242 err = verifier_remove_insns(env, i, j); 18243 if (err) 18244 return err; 18245 insn_cnt = env->prog->len; 18246 } 18247 18248 return 0; 18249 } 18250 18251 static int opt_remove_nops(struct bpf_verifier_env *env) 18252 { 18253 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 18254 struct bpf_insn *insn = env->prog->insnsi; 18255 int insn_cnt = env->prog->len; 18256 int i, err; 18257 18258 for (i = 0; i < insn_cnt; i++) { 18259 if (memcmp(&insn[i], &ja, sizeof(ja))) 18260 continue; 18261 18262 err = verifier_remove_insns(env, i, 1); 18263 if (err) 18264 return err; 18265 insn_cnt--; 18266 i--; 18267 } 18268 18269 return 0; 18270 } 18271 18272 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 18273 const union bpf_attr *attr) 18274 { 18275 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 18276 struct bpf_insn_aux_data *aux = env->insn_aux_data; 18277 int i, patch_len, delta = 0, len = env->prog->len; 18278 struct bpf_insn *insns = env->prog->insnsi; 18279 struct bpf_prog *new_prog; 18280 bool rnd_hi32; 18281 18282 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 18283 zext_patch[1] = BPF_ZEXT_REG(0); 18284 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 18285 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 18286 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 18287 for (i = 0; i < len; i++) { 18288 int adj_idx = i + delta; 18289 struct bpf_insn insn; 18290 int load_reg; 18291 18292 insn = insns[adj_idx]; 18293 load_reg = insn_def_regno(&insn); 18294 if (!aux[adj_idx].zext_dst) { 18295 u8 code, class; 18296 u32 imm_rnd; 18297 18298 if (!rnd_hi32) 18299 continue; 18300 18301 code = insn.code; 18302 class = BPF_CLASS(code); 18303 if (load_reg == -1) 18304 continue; 18305 18306 /* NOTE: arg "reg" (the fourth one) is only used for 18307 * BPF_STX + SRC_OP, so it is safe to pass NULL 18308 * here. 18309 */ 18310 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { 18311 if (class == BPF_LD && 18312 BPF_MODE(code) == BPF_IMM) 18313 i++; 18314 continue; 18315 } 18316 18317 /* ctx load could be transformed into wider load. */ 18318 if (class == BPF_LDX && 18319 aux[adj_idx].ptr_type == PTR_TO_CTX) 18320 continue; 18321 18322 imm_rnd = get_random_u32(); 18323 rnd_hi32_patch[0] = insn; 18324 rnd_hi32_patch[1].imm = imm_rnd; 18325 rnd_hi32_patch[3].dst_reg = load_reg; 18326 patch = rnd_hi32_patch; 18327 patch_len = 4; 18328 goto apply_patch_buffer; 18329 } 18330 18331 /* Add in an zero-extend instruction if a) the JIT has requested 18332 * it or b) it's a CMPXCHG. 18333 * 18334 * The latter is because: BPF_CMPXCHG always loads a value into 18335 * R0, therefore always zero-extends. However some archs' 18336 * equivalent instruction only does this load when the 18337 * comparison is successful. This detail of CMPXCHG is 18338 * orthogonal to the general zero-extension behaviour of the 18339 * CPU, so it's treated independently of bpf_jit_needs_zext. 18340 */ 18341 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) 18342 continue; 18343 18344 /* Zero-extension is done by the caller. */ 18345 if (bpf_pseudo_kfunc_call(&insn)) 18346 continue; 18347 18348 if (WARN_ON(load_reg == -1)) { 18349 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); 18350 return -EFAULT; 18351 } 18352 18353 zext_patch[0] = insn; 18354 zext_patch[1].dst_reg = load_reg; 18355 zext_patch[1].src_reg = load_reg; 18356 patch = zext_patch; 18357 patch_len = 2; 18358 apply_patch_buffer: 18359 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 18360 if (!new_prog) 18361 return -ENOMEM; 18362 env->prog = new_prog; 18363 insns = new_prog->insnsi; 18364 aux = env->insn_aux_data; 18365 delta += patch_len - 1; 18366 } 18367 18368 return 0; 18369 } 18370 18371 /* convert load instructions that access fields of a context type into a 18372 * sequence of instructions that access fields of the underlying structure: 18373 * struct __sk_buff -> struct sk_buff 18374 * struct bpf_sock_ops -> struct sock 18375 */ 18376 static int convert_ctx_accesses(struct bpf_verifier_env *env) 18377 { 18378 const struct bpf_verifier_ops *ops = env->ops; 18379 int i, cnt, size, ctx_field_size, delta = 0; 18380 const int insn_cnt = env->prog->len; 18381 struct bpf_insn insn_buf[16], *insn; 18382 u32 target_size, size_default, off; 18383 struct bpf_prog *new_prog; 18384 enum bpf_access_type type; 18385 bool is_narrower_load; 18386 18387 if (ops->gen_prologue || env->seen_direct_write) { 18388 if (!ops->gen_prologue) { 18389 verbose(env, "bpf verifier is misconfigured\n"); 18390 return -EINVAL; 18391 } 18392 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 18393 env->prog); 18394 if (cnt >= ARRAY_SIZE(insn_buf)) { 18395 verbose(env, "bpf verifier is misconfigured\n"); 18396 return -EINVAL; 18397 } else if (cnt) { 18398 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 18399 if (!new_prog) 18400 return -ENOMEM; 18401 18402 env->prog = new_prog; 18403 delta += cnt - 1; 18404 } 18405 } 18406 18407 if (bpf_prog_is_offloaded(env->prog->aux)) 18408 return 0; 18409 18410 insn = env->prog->insnsi + delta; 18411 18412 for (i = 0; i < insn_cnt; i++, insn++) { 18413 bpf_convert_ctx_access_t convert_ctx_access; 18414 u8 mode; 18415 18416 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 18417 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 18418 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 18419 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || 18420 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || 18421 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || 18422 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { 18423 type = BPF_READ; 18424 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 18425 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 18426 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 18427 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || 18428 insn->code == (BPF_ST | BPF_MEM | BPF_B) || 18429 insn->code == (BPF_ST | BPF_MEM | BPF_H) || 18430 insn->code == (BPF_ST | BPF_MEM | BPF_W) || 18431 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { 18432 type = BPF_WRITE; 18433 } else { 18434 continue; 18435 } 18436 18437 if (type == BPF_WRITE && 18438 env->insn_aux_data[i + delta].sanitize_stack_spill) { 18439 struct bpf_insn patch[] = { 18440 *insn, 18441 BPF_ST_NOSPEC(), 18442 }; 18443 18444 cnt = ARRAY_SIZE(patch); 18445 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 18446 if (!new_prog) 18447 return -ENOMEM; 18448 18449 delta += cnt - 1; 18450 env->prog = new_prog; 18451 insn = new_prog->insnsi + i + delta; 18452 continue; 18453 } 18454 18455 switch ((int)env->insn_aux_data[i + delta].ptr_type) { 18456 case PTR_TO_CTX: 18457 if (!ops->convert_ctx_access) 18458 continue; 18459 convert_ctx_access = ops->convert_ctx_access; 18460 break; 18461 case PTR_TO_SOCKET: 18462 case PTR_TO_SOCK_COMMON: 18463 convert_ctx_access = bpf_sock_convert_ctx_access; 18464 break; 18465 case PTR_TO_TCP_SOCK: 18466 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 18467 break; 18468 case PTR_TO_XDP_SOCK: 18469 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 18470 break; 18471 case PTR_TO_BTF_ID: 18472 case PTR_TO_BTF_ID | PTR_UNTRUSTED: 18473 /* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike 18474 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot 18475 * be said once it is marked PTR_UNTRUSTED, hence we must handle 18476 * any faults for loads into such types. BPF_WRITE is disallowed 18477 * for this case. 18478 */ 18479 case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED: 18480 if (type == BPF_READ) { 18481 if (BPF_MODE(insn->code) == BPF_MEM) 18482 insn->code = BPF_LDX | BPF_PROBE_MEM | 18483 BPF_SIZE((insn)->code); 18484 else 18485 insn->code = BPF_LDX | BPF_PROBE_MEMSX | 18486 BPF_SIZE((insn)->code); 18487 env->prog->aux->num_exentries++; 18488 } 18489 continue; 18490 default: 18491 continue; 18492 } 18493 18494 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 18495 size = BPF_LDST_BYTES(insn); 18496 mode = BPF_MODE(insn->code); 18497 18498 /* If the read access is a narrower load of the field, 18499 * convert to a 4/8-byte load, to minimum program type specific 18500 * convert_ctx_access changes. If conversion is successful, 18501 * we will apply proper mask to the result. 18502 */ 18503 is_narrower_load = size < ctx_field_size; 18504 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 18505 off = insn->off; 18506 if (is_narrower_load) { 18507 u8 size_code; 18508 18509 if (type == BPF_WRITE) { 18510 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 18511 return -EINVAL; 18512 } 18513 18514 size_code = BPF_H; 18515 if (ctx_field_size == 4) 18516 size_code = BPF_W; 18517 else if (ctx_field_size == 8) 18518 size_code = BPF_DW; 18519 18520 insn->off = off & ~(size_default - 1); 18521 insn->code = BPF_LDX | BPF_MEM | size_code; 18522 } 18523 18524 target_size = 0; 18525 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 18526 &target_size); 18527 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 18528 (ctx_field_size && !target_size)) { 18529 verbose(env, "bpf verifier is misconfigured\n"); 18530 return -EINVAL; 18531 } 18532 18533 if (is_narrower_load && size < target_size) { 18534 u8 shift = bpf_ctx_narrow_access_offset( 18535 off, size, size_default) * 8; 18536 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { 18537 verbose(env, "bpf verifier narrow ctx load misconfigured\n"); 18538 return -EINVAL; 18539 } 18540 if (ctx_field_size <= 4) { 18541 if (shift) 18542 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 18543 insn->dst_reg, 18544 shift); 18545 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 18546 (1 << size * 8) - 1); 18547 } else { 18548 if (shift) 18549 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 18550 insn->dst_reg, 18551 shift); 18552 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 18553 (1ULL << size * 8) - 1); 18554 } 18555 } 18556 if (mode == BPF_MEMSX) 18557 insn_buf[cnt++] = BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X, 18558 insn->dst_reg, insn->dst_reg, 18559 size * 8, 0); 18560 18561 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18562 if (!new_prog) 18563 return -ENOMEM; 18564 18565 delta += cnt - 1; 18566 18567 /* keep walking new program and skip insns we just inserted */ 18568 env->prog = new_prog; 18569 insn = new_prog->insnsi + i + delta; 18570 } 18571 18572 return 0; 18573 } 18574 18575 static int jit_subprogs(struct bpf_verifier_env *env) 18576 { 18577 struct bpf_prog *prog = env->prog, **func, *tmp; 18578 int i, j, subprog_start, subprog_end = 0, len, subprog; 18579 struct bpf_map *map_ptr; 18580 struct bpf_insn *insn; 18581 void *old_bpf_func; 18582 int err, num_exentries; 18583 18584 if (env->subprog_cnt <= 1) 18585 return 0; 18586 18587 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 18588 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) 18589 continue; 18590 18591 /* Upon error here we cannot fall back to interpreter but 18592 * need a hard reject of the program. Thus -EFAULT is 18593 * propagated in any case. 18594 */ 18595 subprog = find_subprog(env, i + insn->imm + 1); 18596 if (subprog < 0) { 18597 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 18598 i + insn->imm + 1); 18599 return -EFAULT; 18600 } 18601 /* temporarily remember subprog id inside insn instead of 18602 * aux_data, since next loop will split up all insns into funcs 18603 */ 18604 insn->off = subprog; 18605 /* remember original imm in case JIT fails and fallback 18606 * to interpreter will be needed 18607 */ 18608 env->insn_aux_data[i].call_imm = insn->imm; 18609 /* point imm to __bpf_call_base+1 from JITs point of view */ 18610 insn->imm = 1; 18611 if (bpf_pseudo_func(insn)) 18612 /* jit (e.g. x86_64) may emit fewer instructions 18613 * if it learns a u32 imm is the same as a u64 imm. 18614 * Force a non zero here. 18615 */ 18616 insn[1].imm = 1; 18617 } 18618 18619 err = bpf_prog_alloc_jited_linfo(prog); 18620 if (err) 18621 goto out_undo_insn; 18622 18623 err = -ENOMEM; 18624 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 18625 if (!func) 18626 goto out_undo_insn; 18627 18628 for (i = 0; i < env->subprog_cnt; i++) { 18629 subprog_start = subprog_end; 18630 subprog_end = env->subprog_info[i + 1].start; 18631 18632 len = subprog_end - subprog_start; 18633 /* bpf_prog_run() doesn't call subprogs directly, 18634 * hence main prog stats include the runtime of subprogs. 18635 * subprogs don't have IDs and not reachable via prog_get_next_id 18636 * func[i]->stats will never be accessed and stays NULL 18637 */ 18638 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 18639 if (!func[i]) 18640 goto out_free; 18641 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 18642 len * sizeof(struct bpf_insn)); 18643 func[i]->type = prog->type; 18644 func[i]->len = len; 18645 if (bpf_prog_calc_tag(func[i])) 18646 goto out_free; 18647 func[i]->is_func = 1; 18648 func[i]->aux->func_idx = i; 18649 /* Below members will be freed only at prog->aux */ 18650 func[i]->aux->btf = prog->aux->btf; 18651 func[i]->aux->func_info = prog->aux->func_info; 18652 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; 18653 func[i]->aux->poke_tab = prog->aux->poke_tab; 18654 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; 18655 18656 for (j = 0; j < prog->aux->size_poke_tab; j++) { 18657 struct bpf_jit_poke_descriptor *poke; 18658 18659 poke = &prog->aux->poke_tab[j]; 18660 if (poke->insn_idx < subprog_end && 18661 poke->insn_idx >= subprog_start) 18662 poke->aux = func[i]->aux; 18663 } 18664 18665 func[i]->aux->name[0] = 'F'; 18666 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 18667 func[i]->jit_requested = 1; 18668 func[i]->blinding_requested = prog->blinding_requested; 18669 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; 18670 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; 18671 func[i]->aux->linfo = prog->aux->linfo; 18672 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 18673 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 18674 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 18675 num_exentries = 0; 18676 insn = func[i]->insnsi; 18677 for (j = 0; j < func[i]->len; j++, insn++) { 18678 if (BPF_CLASS(insn->code) == BPF_LDX && 18679 (BPF_MODE(insn->code) == BPF_PROBE_MEM || 18680 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) 18681 num_exentries++; 18682 } 18683 func[i]->aux->num_exentries = num_exentries; 18684 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; 18685 func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; 18686 if (!i) 18687 func[i]->aux->exception_boundary = env->seen_exception; 18688 func[i] = bpf_int_jit_compile(func[i]); 18689 if (!func[i]->jited) { 18690 err = -ENOTSUPP; 18691 goto out_free; 18692 } 18693 cond_resched(); 18694 } 18695 18696 /* at this point all bpf functions were successfully JITed 18697 * now populate all bpf_calls with correct addresses and 18698 * run last pass of JIT 18699 */ 18700 for (i = 0; i < env->subprog_cnt; i++) { 18701 insn = func[i]->insnsi; 18702 for (j = 0; j < func[i]->len; j++, insn++) { 18703 if (bpf_pseudo_func(insn)) { 18704 subprog = insn->off; 18705 insn[0].imm = (u32)(long)func[subprog]->bpf_func; 18706 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; 18707 continue; 18708 } 18709 if (!bpf_pseudo_call(insn)) 18710 continue; 18711 subprog = insn->off; 18712 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); 18713 } 18714 18715 /* we use the aux data to keep a list of the start addresses 18716 * of the JITed images for each function in the program 18717 * 18718 * for some architectures, such as powerpc64, the imm field 18719 * might not be large enough to hold the offset of the start 18720 * address of the callee's JITed image from __bpf_call_base 18721 * 18722 * in such cases, we can lookup the start address of a callee 18723 * by using its subprog id, available from the off field of 18724 * the call instruction, as an index for this list 18725 */ 18726 func[i]->aux->func = func; 18727 func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; 18728 func[i]->aux->real_func_cnt = env->subprog_cnt; 18729 } 18730 for (i = 0; i < env->subprog_cnt; i++) { 18731 old_bpf_func = func[i]->bpf_func; 18732 tmp = bpf_int_jit_compile(func[i]); 18733 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 18734 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 18735 err = -ENOTSUPP; 18736 goto out_free; 18737 } 18738 cond_resched(); 18739 } 18740 18741 /* finally lock prog and jit images for all functions and 18742 * populate kallsysm. Begin at the first subprogram, since 18743 * bpf_prog_load will add the kallsyms for the main program. 18744 */ 18745 for (i = 1; i < env->subprog_cnt; i++) { 18746 bpf_prog_lock_ro(func[i]); 18747 bpf_prog_kallsyms_add(func[i]); 18748 } 18749 18750 /* Last step: make now unused interpreter insns from main 18751 * prog consistent for later dump requests, so they can 18752 * later look the same as if they were interpreted only. 18753 */ 18754 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 18755 if (bpf_pseudo_func(insn)) { 18756 insn[0].imm = env->insn_aux_data[i].call_imm; 18757 insn[1].imm = insn->off; 18758 insn->off = 0; 18759 continue; 18760 } 18761 if (!bpf_pseudo_call(insn)) 18762 continue; 18763 insn->off = env->insn_aux_data[i].call_imm; 18764 subprog = find_subprog(env, i + insn->off + 1); 18765 insn->imm = subprog; 18766 } 18767 18768 prog->jited = 1; 18769 prog->bpf_func = func[0]->bpf_func; 18770 prog->jited_len = func[0]->jited_len; 18771 prog->aux->extable = func[0]->aux->extable; 18772 prog->aux->num_exentries = func[0]->aux->num_exentries; 18773 prog->aux->func = func; 18774 prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; 18775 prog->aux->real_func_cnt = env->subprog_cnt; 18776 prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; 18777 prog->aux->exception_boundary = func[0]->aux->exception_boundary; 18778 bpf_prog_jit_attempt_done(prog); 18779 return 0; 18780 out_free: 18781 /* We failed JIT'ing, so at this point we need to unregister poke 18782 * descriptors from subprogs, so that kernel is not attempting to 18783 * patch it anymore as we're freeing the subprog JIT memory. 18784 */ 18785 for (i = 0; i < prog->aux->size_poke_tab; i++) { 18786 map_ptr = prog->aux->poke_tab[i].tail_call.map; 18787 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); 18788 } 18789 /* At this point we're guaranteed that poke descriptors are not 18790 * live anymore. We can just unlink its descriptor table as it's 18791 * released with the main prog. 18792 */ 18793 for (i = 0; i < env->subprog_cnt; i++) { 18794 if (!func[i]) 18795 continue; 18796 func[i]->aux->poke_tab = NULL; 18797 bpf_jit_free(func[i]); 18798 } 18799 kfree(func); 18800 out_undo_insn: 18801 /* cleanup main prog to be interpreted */ 18802 prog->jit_requested = 0; 18803 prog->blinding_requested = 0; 18804 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 18805 if (!bpf_pseudo_call(insn)) 18806 continue; 18807 insn->off = 0; 18808 insn->imm = env->insn_aux_data[i].call_imm; 18809 } 18810 bpf_prog_jit_attempt_done(prog); 18811 return err; 18812 } 18813 18814 static int fixup_call_args(struct bpf_verifier_env *env) 18815 { 18816 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 18817 struct bpf_prog *prog = env->prog; 18818 struct bpf_insn *insn = prog->insnsi; 18819 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); 18820 int i, depth; 18821 #endif 18822 int err = 0; 18823 18824 if (env->prog->jit_requested && 18825 !bpf_prog_is_offloaded(env->prog->aux)) { 18826 err = jit_subprogs(env); 18827 if (err == 0) 18828 return 0; 18829 if (err == -EFAULT) 18830 return err; 18831 } 18832 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 18833 if (has_kfunc_call) { 18834 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); 18835 return -EINVAL; 18836 } 18837 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { 18838 /* When JIT fails the progs with bpf2bpf calls and tail_calls 18839 * have to be rejected, since interpreter doesn't support them yet. 18840 */ 18841 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 18842 return -EINVAL; 18843 } 18844 for (i = 0; i < prog->len; i++, insn++) { 18845 if (bpf_pseudo_func(insn)) { 18846 /* When JIT fails the progs with callback calls 18847 * have to be rejected, since interpreter doesn't support them yet. 18848 */ 18849 verbose(env, "callbacks are not allowed in non-JITed programs\n"); 18850 return -EINVAL; 18851 } 18852 18853 if (!bpf_pseudo_call(insn)) 18854 continue; 18855 depth = get_callee_stack_depth(env, insn, i); 18856 if (depth < 0) 18857 return depth; 18858 bpf_patch_call_args(insn, depth); 18859 } 18860 err = 0; 18861 #endif 18862 return err; 18863 } 18864 18865 /* replace a generic kfunc with a specialized version if necessary */ 18866 static void specialize_kfunc(struct bpf_verifier_env *env, 18867 u32 func_id, u16 offset, unsigned long *addr) 18868 { 18869 struct bpf_prog *prog = env->prog; 18870 bool seen_direct_write; 18871 void *xdp_kfunc; 18872 bool is_rdonly; 18873 18874 if (bpf_dev_bound_kfunc_id(func_id)) { 18875 xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id); 18876 if (xdp_kfunc) { 18877 *addr = (unsigned long)xdp_kfunc; 18878 return; 18879 } 18880 /* fallback to default kfunc when not supported by netdev */ 18881 } 18882 18883 if (offset) 18884 return; 18885 18886 if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { 18887 seen_direct_write = env->seen_direct_write; 18888 is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE); 18889 18890 if (is_rdonly) 18891 *addr = (unsigned long)bpf_dynptr_from_skb_rdonly; 18892 18893 /* restore env->seen_direct_write to its original value, since 18894 * may_access_direct_pkt_data mutates it 18895 */ 18896 env->seen_direct_write = seen_direct_write; 18897 } 18898 } 18899 18900 static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux, 18901 u16 struct_meta_reg, 18902 u16 node_offset_reg, 18903 struct bpf_insn *insn, 18904 struct bpf_insn *insn_buf, 18905 int *cnt) 18906 { 18907 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; 18908 struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) }; 18909 18910 insn_buf[0] = addr[0]; 18911 insn_buf[1] = addr[1]; 18912 insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); 18913 insn_buf[3] = *insn; 18914 *cnt = 4; 18915 } 18916 18917 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 18918 struct bpf_insn *insn_buf, int insn_idx, int *cnt) 18919 { 18920 const struct bpf_kfunc_desc *desc; 18921 18922 if (!insn->imm) { 18923 verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); 18924 return -EINVAL; 18925 } 18926 18927 *cnt = 0; 18928 18929 /* insn->imm has the btf func_id. Replace it with an offset relative to 18930 * __bpf_call_base, unless the JIT needs to call functions that are 18931 * further than 32 bits away (bpf_jit_supports_far_kfunc_call()). 18932 */ 18933 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); 18934 if (!desc) { 18935 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", 18936 insn->imm); 18937 return -EFAULT; 18938 } 18939 18940 if (!bpf_jit_supports_far_kfunc_call()) 18941 insn->imm = BPF_CALL_IMM(desc->addr); 18942 if (insn->off) 18943 return 0; 18944 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || 18945 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { 18946 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 18947 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; 18948 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; 18949 18950 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) { 18951 verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n", 18952 insn_idx); 18953 return -EFAULT; 18954 } 18955 18956 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size); 18957 insn_buf[1] = addr[0]; 18958 insn_buf[2] = addr[1]; 18959 insn_buf[3] = *insn; 18960 *cnt = 4; 18961 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || 18962 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] || 18963 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { 18964 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 18965 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; 18966 18967 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) { 18968 verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n", 18969 insn_idx); 18970 return -EFAULT; 18971 } 18972 18973 if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && 18974 !kptr_struct_meta) { 18975 verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", 18976 insn_idx); 18977 return -EFAULT; 18978 } 18979 18980 insn_buf[0] = addr[0]; 18981 insn_buf[1] = addr[1]; 18982 insn_buf[2] = *insn; 18983 *cnt = 3; 18984 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 18985 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 18986 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 18987 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 18988 int struct_meta_reg = BPF_REG_3; 18989 int node_offset_reg = BPF_REG_4; 18990 18991 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ 18992 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 18993 struct_meta_reg = BPF_REG_4; 18994 node_offset_reg = BPF_REG_5; 18995 } 18996 18997 if (!kptr_struct_meta) { 18998 verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", 18999 insn_idx); 19000 return -EFAULT; 19001 } 19002 19003 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, 19004 node_offset_reg, insn, insn_buf, cnt); 19005 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || 19006 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { 19007 insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); 19008 *cnt = 1; 19009 } 19010 return 0; 19011 } 19012 19013 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */ 19014 static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len) 19015 { 19016 struct bpf_subprog_info *info = env->subprog_info; 19017 int cnt = env->subprog_cnt; 19018 struct bpf_prog *prog; 19019 19020 /* We only reserve one slot for hidden subprogs in subprog_info. */ 19021 if (env->hidden_subprog_cnt) { 19022 verbose(env, "verifier internal error: only one hidden subprog supported\n"); 19023 return -EFAULT; 19024 } 19025 /* We're not patching any existing instruction, just appending the new 19026 * ones for the hidden subprog. Hence all of the adjustment operations 19027 * in bpf_patch_insn_data are no-ops. 19028 */ 19029 prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); 19030 if (!prog) 19031 return -ENOMEM; 19032 env->prog = prog; 19033 info[cnt + 1].start = info[cnt].start; 19034 info[cnt].start = prog->len - len + 1; 19035 env->subprog_cnt++; 19036 env->hidden_subprog_cnt++; 19037 return 0; 19038 } 19039 19040 /* Do various post-verification rewrites in a single program pass. 19041 * These rewrites simplify JIT and interpreter implementations. 19042 */ 19043 static int do_misc_fixups(struct bpf_verifier_env *env) 19044 { 19045 struct bpf_prog *prog = env->prog; 19046 enum bpf_attach_type eatype = prog->expected_attach_type; 19047 enum bpf_prog_type prog_type = resolve_prog_type(prog); 19048 struct bpf_insn *insn = prog->insnsi; 19049 const struct bpf_func_proto *fn; 19050 const int insn_cnt = prog->len; 19051 const struct bpf_map_ops *ops; 19052 struct bpf_insn_aux_data *aux; 19053 struct bpf_insn insn_buf[16]; 19054 struct bpf_prog *new_prog; 19055 struct bpf_map *map_ptr; 19056 int i, ret, cnt, delta = 0; 19057 19058 if (env->seen_exception && !env->exception_callback_subprog) { 19059 struct bpf_insn patch[] = { 19060 env->prog->insnsi[insn_cnt - 1], 19061 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 19062 BPF_EXIT_INSN(), 19063 }; 19064 19065 ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch)); 19066 if (ret < 0) 19067 return ret; 19068 prog = env->prog; 19069 insn = prog->insnsi; 19070 19071 env->exception_callback_subprog = env->subprog_cnt - 1; 19072 /* Don't update insn_cnt, as add_hidden_subprog always appends insns */ 19073 env->subprog_info[env->exception_callback_subprog].is_cb = true; 19074 env->subprog_info[env->exception_callback_subprog].is_async_cb = true; 19075 env->subprog_info[env->exception_callback_subprog].is_exception_cb = true; 19076 } 19077 19078 for (i = 0; i < insn_cnt; i++, insn++) { 19079 /* Make divide-by-zero exceptions impossible. */ 19080 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 19081 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 19082 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 19083 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 19084 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 19085 bool isdiv = BPF_OP(insn->code) == BPF_DIV; 19086 struct bpf_insn *patchlet; 19087 struct bpf_insn chk_and_div[] = { 19088 /* [R,W]x div 0 -> 0 */ 19089 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 19090 BPF_JNE | BPF_K, insn->src_reg, 19091 0, 2, 0), 19092 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 19093 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 19094 *insn, 19095 }; 19096 struct bpf_insn chk_and_mod[] = { 19097 /* [R,W]x mod 0 -> [R,W]x */ 19098 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 19099 BPF_JEQ | BPF_K, insn->src_reg, 19100 0, 1 + (is64 ? 0 : 1), 0), 19101 *insn, 19102 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 19103 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), 19104 }; 19105 19106 patchlet = isdiv ? chk_and_div : chk_and_mod; 19107 cnt = isdiv ? ARRAY_SIZE(chk_and_div) : 19108 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); 19109 19110 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 19111 if (!new_prog) 19112 return -ENOMEM; 19113 19114 delta += cnt - 1; 19115 env->prog = prog = new_prog; 19116 insn = new_prog->insnsi + i + delta; 19117 continue; 19118 } 19119 19120 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ 19121 if (BPF_CLASS(insn->code) == BPF_LD && 19122 (BPF_MODE(insn->code) == BPF_ABS || 19123 BPF_MODE(insn->code) == BPF_IND)) { 19124 cnt = env->ops->gen_ld_abs(insn, insn_buf); 19125 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 19126 verbose(env, "bpf verifier is misconfigured\n"); 19127 return -EINVAL; 19128 } 19129 19130 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19131 if (!new_prog) 19132 return -ENOMEM; 19133 19134 delta += cnt - 1; 19135 env->prog = prog = new_prog; 19136 insn = new_prog->insnsi + i + delta; 19137 continue; 19138 } 19139 19140 /* Rewrite pointer arithmetic to mitigate speculation attacks. */ 19141 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 19142 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 19143 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 19144 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 19145 struct bpf_insn *patch = &insn_buf[0]; 19146 bool issrc, isneg, isimm; 19147 u32 off_reg; 19148 19149 aux = &env->insn_aux_data[i + delta]; 19150 if (!aux->alu_state || 19151 aux->alu_state == BPF_ALU_NON_POINTER) 19152 continue; 19153 19154 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 19155 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 19156 BPF_ALU_SANITIZE_SRC; 19157 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; 19158 19159 off_reg = issrc ? insn->src_reg : insn->dst_reg; 19160 if (isimm) { 19161 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 19162 } else { 19163 if (isneg) 19164 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 19165 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 19166 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 19167 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 19168 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 19169 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 19170 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); 19171 } 19172 if (!issrc) 19173 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); 19174 insn->src_reg = BPF_REG_AX; 19175 if (isneg) 19176 insn->code = insn->code == code_add ? 19177 code_sub : code_add; 19178 *patch++ = *insn; 19179 if (issrc && isneg && !isimm) 19180 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 19181 cnt = patch - insn_buf; 19182 19183 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19184 if (!new_prog) 19185 return -ENOMEM; 19186 19187 delta += cnt - 1; 19188 env->prog = prog = new_prog; 19189 insn = new_prog->insnsi + i + delta; 19190 continue; 19191 } 19192 19193 if (insn->code != (BPF_JMP | BPF_CALL)) 19194 continue; 19195 if (insn->src_reg == BPF_PSEUDO_CALL) 19196 continue; 19197 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 19198 ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt); 19199 if (ret) 19200 return ret; 19201 if (cnt == 0) 19202 continue; 19203 19204 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19205 if (!new_prog) 19206 return -ENOMEM; 19207 19208 delta += cnt - 1; 19209 env->prog = prog = new_prog; 19210 insn = new_prog->insnsi + i + delta; 19211 continue; 19212 } 19213 19214 if (insn->imm == BPF_FUNC_get_route_realm) 19215 prog->dst_needed = 1; 19216 if (insn->imm == BPF_FUNC_get_prandom_u32) 19217 bpf_user_rnd_init_once(); 19218 if (insn->imm == BPF_FUNC_override_return) 19219 prog->kprobe_override = 1; 19220 if (insn->imm == BPF_FUNC_tail_call) { 19221 /* If we tail call into other programs, we 19222 * cannot make any assumptions since they can 19223 * be replaced dynamically during runtime in 19224 * the program array. 19225 */ 19226 prog->cb_access = 1; 19227 if (!allow_tail_call_in_subprogs(env)) 19228 prog->aux->stack_depth = MAX_BPF_STACK; 19229 prog->aux->max_pkt_offset = MAX_PACKET_OFF; 19230 19231 /* mark bpf_tail_call as different opcode to avoid 19232 * conditional branch in the interpreter for every normal 19233 * call and to prevent accidental JITing by JIT compiler 19234 * that doesn't support bpf_tail_call yet 19235 */ 19236 insn->imm = 0; 19237 insn->code = BPF_JMP | BPF_TAIL_CALL; 19238 19239 aux = &env->insn_aux_data[i + delta]; 19240 if (env->bpf_capable && !prog->blinding_requested && 19241 prog->jit_requested && 19242 !bpf_map_key_poisoned(aux) && 19243 !bpf_map_ptr_poisoned(aux) && 19244 !bpf_map_ptr_unpriv(aux)) { 19245 struct bpf_jit_poke_descriptor desc = { 19246 .reason = BPF_POKE_REASON_TAIL_CALL, 19247 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 19248 .tail_call.key = bpf_map_key_immediate(aux), 19249 .insn_idx = i + delta, 19250 }; 19251 19252 ret = bpf_jit_add_poke_descriptor(prog, &desc); 19253 if (ret < 0) { 19254 verbose(env, "adding tail call poke descriptor failed\n"); 19255 return ret; 19256 } 19257 19258 insn->imm = ret + 1; 19259 continue; 19260 } 19261 19262 if (!bpf_map_ptr_unpriv(aux)) 19263 continue; 19264 19265 /* instead of changing every JIT dealing with tail_call 19266 * emit two extra insns: 19267 * if (index >= max_entries) goto out; 19268 * index &= array->index_mask; 19269 * to avoid out-of-bounds cpu speculation 19270 */ 19271 if (bpf_map_ptr_poisoned(aux)) { 19272 verbose(env, "tail_call abusing map_ptr\n"); 19273 return -EINVAL; 19274 } 19275 19276 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 19277 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 19278 map_ptr->max_entries, 2); 19279 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 19280 container_of(map_ptr, 19281 struct bpf_array, 19282 map)->index_mask); 19283 insn_buf[2] = *insn; 19284 cnt = 3; 19285 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19286 if (!new_prog) 19287 return -ENOMEM; 19288 19289 delta += cnt - 1; 19290 env->prog = prog = new_prog; 19291 insn = new_prog->insnsi + i + delta; 19292 continue; 19293 } 19294 19295 if (insn->imm == BPF_FUNC_timer_set_callback) { 19296 /* The verifier will process callback_fn as many times as necessary 19297 * with different maps and the register states prepared by 19298 * set_timer_callback_state will be accurate. 19299 * 19300 * The following use case is valid: 19301 * map1 is shared by prog1, prog2, prog3. 19302 * prog1 calls bpf_timer_init for some map1 elements 19303 * prog2 calls bpf_timer_set_callback for some map1 elements. 19304 * Those that were not bpf_timer_init-ed will return -EINVAL. 19305 * prog3 calls bpf_timer_start for some map1 elements. 19306 * Those that were not both bpf_timer_init-ed and 19307 * bpf_timer_set_callback-ed will return -EINVAL. 19308 */ 19309 struct bpf_insn ld_addrs[2] = { 19310 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), 19311 }; 19312 19313 insn_buf[0] = ld_addrs[0]; 19314 insn_buf[1] = ld_addrs[1]; 19315 insn_buf[2] = *insn; 19316 cnt = 3; 19317 19318 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19319 if (!new_prog) 19320 return -ENOMEM; 19321 19322 delta += cnt - 1; 19323 env->prog = prog = new_prog; 19324 insn = new_prog->insnsi + i + delta; 19325 goto patch_call_imm; 19326 } 19327 19328 if (is_storage_get_function(insn->imm)) { 19329 if (!env->prog->aux->sleepable || 19330 env->insn_aux_data[i + delta].storage_get_func_atomic) 19331 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); 19332 else 19333 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL); 19334 insn_buf[1] = *insn; 19335 cnt = 2; 19336 19337 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19338 if (!new_prog) 19339 return -ENOMEM; 19340 19341 delta += cnt - 1; 19342 env->prog = prog = new_prog; 19343 insn = new_prog->insnsi + i + delta; 19344 goto patch_call_imm; 19345 } 19346 19347 /* bpf_per_cpu_ptr() and bpf_this_cpu_ptr() */ 19348 if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) { 19349 /* patch with 'r1 = *(u64 *)(r1 + 0)' since for percpu data, 19350 * bpf_mem_alloc() returns a ptr to the percpu data ptr. 19351 */ 19352 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 19353 insn_buf[1] = *insn; 19354 cnt = 2; 19355 19356 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19357 if (!new_prog) 19358 return -ENOMEM; 19359 19360 delta += cnt - 1; 19361 env->prog = prog = new_prog; 19362 insn = new_prog->insnsi + i + delta; 19363 goto patch_call_imm; 19364 } 19365 19366 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 19367 * and other inlining handlers are currently limited to 64 bit 19368 * only. 19369 */ 19370 if (prog->jit_requested && BITS_PER_LONG == 64 && 19371 (insn->imm == BPF_FUNC_map_lookup_elem || 19372 insn->imm == BPF_FUNC_map_update_elem || 19373 insn->imm == BPF_FUNC_map_delete_elem || 19374 insn->imm == BPF_FUNC_map_push_elem || 19375 insn->imm == BPF_FUNC_map_pop_elem || 19376 insn->imm == BPF_FUNC_map_peek_elem || 19377 insn->imm == BPF_FUNC_redirect_map || 19378 insn->imm == BPF_FUNC_for_each_map_elem || 19379 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { 19380 aux = &env->insn_aux_data[i + delta]; 19381 if (bpf_map_ptr_poisoned(aux)) 19382 goto patch_call_imm; 19383 19384 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 19385 ops = map_ptr->ops; 19386 if (insn->imm == BPF_FUNC_map_lookup_elem && 19387 ops->map_gen_lookup) { 19388 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 19389 if (cnt == -EOPNOTSUPP) 19390 goto patch_map_ops_generic; 19391 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { 19392 verbose(env, "bpf verifier is misconfigured\n"); 19393 return -EINVAL; 19394 } 19395 19396 new_prog = bpf_patch_insn_data(env, i + delta, 19397 insn_buf, cnt); 19398 if (!new_prog) 19399 return -ENOMEM; 19400 19401 delta += cnt - 1; 19402 env->prog = prog = new_prog; 19403 insn = new_prog->insnsi + i + delta; 19404 continue; 19405 } 19406 19407 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 19408 (void *(*)(struct bpf_map *map, void *key))NULL)); 19409 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 19410 (long (*)(struct bpf_map *map, void *key))NULL)); 19411 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 19412 (long (*)(struct bpf_map *map, void *key, void *value, 19413 u64 flags))NULL)); 19414 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 19415 (long (*)(struct bpf_map *map, void *value, 19416 u64 flags))NULL)); 19417 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 19418 (long (*)(struct bpf_map *map, void *value))NULL)); 19419 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 19420 (long (*)(struct bpf_map *map, void *value))NULL)); 19421 BUILD_BUG_ON(!__same_type(ops->map_redirect, 19422 (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL)); 19423 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, 19424 (long (*)(struct bpf_map *map, 19425 bpf_callback_t callback_fn, 19426 void *callback_ctx, 19427 u64 flags))NULL)); 19428 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, 19429 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL)); 19430 19431 patch_map_ops_generic: 19432 switch (insn->imm) { 19433 case BPF_FUNC_map_lookup_elem: 19434 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); 19435 continue; 19436 case BPF_FUNC_map_update_elem: 19437 insn->imm = BPF_CALL_IMM(ops->map_update_elem); 19438 continue; 19439 case BPF_FUNC_map_delete_elem: 19440 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); 19441 continue; 19442 case BPF_FUNC_map_push_elem: 19443 insn->imm = BPF_CALL_IMM(ops->map_push_elem); 19444 continue; 19445 case BPF_FUNC_map_pop_elem: 19446 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); 19447 continue; 19448 case BPF_FUNC_map_peek_elem: 19449 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); 19450 continue; 19451 case BPF_FUNC_redirect_map: 19452 insn->imm = BPF_CALL_IMM(ops->map_redirect); 19453 continue; 19454 case BPF_FUNC_for_each_map_elem: 19455 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); 19456 continue; 19457 case BPF_FUNC_map_lookup_percpu_elem: 19458 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); 19459 continue; 19460 } 19461 19462 goto patch_call_imm; 19463 } 19464 19465 /* Implement bpf_jiffies64 inline. */ 19466 if (prog->jit_requested && BITS_PER_LONG == 64 && 19467 insn->imm == BPF_FUNC_jiffies64) { 19468 struct bpf_insn ld_jiffies_addr[2] = { 19469 BPF_LD_IMM64(BPF_REG_0, 19470 (unsigned long)&jiffies), 19471 }; 19472 19473 insn_buf[0] = ld_jiffies_addr[0]; 19474 insn_buf[1] = ld_jiffies_addr[1]; 19475 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 19476 BPF_REG_0, 0); 19477 cnt = 3; 19478 19479 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 19480 cnt); 19481 if (!new_prog) 19482 return -ENOMEM; 19483 19484 delta += cnt - 1; 19485 env->prog = prog = new_prog; 19486 insn = new_prog->insnsi + i + delta; 19487 continue; 19488 } 19489 19490 /* Implement bpf_get_func_arg inline. */ 19491 if (prog_type == BPF_PROG_TYPE_TRACING && 19492 insn->imm == BPF_FUNC_get_func_arg) { 19493 /* Load nr_args from ctx - 8 */ 19494 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 19495 insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6); 19496 insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3); 19497 insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1); 19498 insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0); 19499 insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 19500 insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0); 19501 insn_buf[7] = BPF_JMP_A(1); 19502 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); 19503 cnt = 9; 19504 19505 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19506 if (!new_prog) 19507 return -ENOMEM; 19508 19509 delta += cnt - 1; 19510 env->prog = prog = new_prog; 19511 insn = new_prog->insnsi + i + delta; 19512 continue; 19513 } 19514 19515 /* Implement bpf_get_func_ret inline. */ 19516 if (prog_type == BPF_PROG_TYPE_TRACING && 19517 insn->imm == BPF_FUNC_get_func_ret) { 19518 if (eatype == BPF_TRACE_FEXIT || 19519 eatype == BPF_MODIFY_RETURN) { 19520 /* Load nr_args from ctx - 8 */ 19521 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 19522 insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); 19523 insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); 19524 insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 19525 insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0); 19526 insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0); 19527 cnt = 6; 19528 } else { 19529 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); 19530 cnt = 1; 19531 } 19532 19533 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19534 if (!new_prog) 19535 return -ENOMEM; 19536 19537 delta += cnt - 1; 19538 env->prog = prog = new_prog; 19539 insn = new_prog->insnsi + i + delta; 19540 continue; 19541 } 19542 19543 /* Implement get_func_arg_cnt inline. */ 19544 if (prog_type == BPF_PROG_TYPE_TRACING && 19545 insn->imm == BPF_FUNC_get_func_arg_cnt) { 19546 /* Load nr_args from ctx - 8 */ 19547 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 19548 19549 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 19550 if (!new_prog) 19551 return -ENOMEM; 19552 19553 env->prog = prog = new_prog; 19554 insn = new_prog->insnsi + i + delta; 19555 continue; 19556 } 19557 19558 /* Implement bpf_get_func_ip inline. */ 19559 if (prog_type == BPF_PROG_TYPE_TRACING && 19560 insn->imm == BPF_FUNC_get_func_ip) { 19561 /* Load IP address from ctx - 16 */ 19562 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); 19563 19564 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 19565 if (!new_prog) 19566 return -ENOMEM; 19567 19568 env->prog = prog = new_prog; 19569 insn = new_prog->insnsi + i + delta; 19570 continue; 19571 } 19572 19573 patch_call_imm: 19574 fn = env->ops->get_func_proto(insn->imm, env->prog); 19575 /* all functions that have prototype and verifier allowed 19576 * programs to call them, must be real in-kernel functions 19577 */ 19578 if (!fn->func) { 19579 verbose(env, 19580 "kernel subsystem misconfigured func %s#%d\n", 19581 func_id_name(insn->imm), insn->imm); 19582 return -EFAULT; 19583 } 19584 insn->imm = fn->func - __bpf_call_base; 19585 } 19586 19587 /* Since poke tab is now finalized, publish aux to tracker. */ 19588 for (i = 0; i < prog->aux->size_poke_tab; i++) { 19589 map_ptr = prog->aux->poke_tab[i].tail_call.map; 19590 if (!map_ptr->ops->map_poke_track || 19591 !map_ptr->ops->map_poke_untrack || 19592 !map_ptr->ops->map_poke_run) { 19593 verbose(env, "bpf verifier is misconfigured\n"); 19594 return -EINVAL; 19595 } 19596 19597 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 19598 if (ret < 0) { 19599 verbose(env, "tracking tail call prog failed\n"); 19600 return ret; 19601 } 19602 } 19603 19604 sort_kfunc_descs_by_imm_off(env->prog); 19605 19606 return 0; 19607 } 19608 19609 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, 19610 int position, 19611 s32 stack_base, 19612 u32 callback_subprogno, 19613 u32 *cnt) 19614 { 19615 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; 19616 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; 19617 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; 19618 int reg_loop_max = BPF_REG_6; 19619 int reg_loop_cnt = BPF_REG_7; 19620 int reg_loop_ctx = BPF_REG_8; 19621 19622 struct bpf_prog *new_prog; 19623 u32 callback_start; 19624 u32 call_insn_offset; 19625 s32 callback_offset; 19626 19627 /* This represents an inlined version of bpf_iter.c:bpf_loop, 19628 * be careful to modify this code in sync. 19629 */ 19630 struct bpf_insn insn_buf[] = { 19631 /* Return error and jump to the end of the patch if 19632 * expected number of iterations is too big. 19633 */ 19634 BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2), 19635 BPF_MOV32_IMM(BPF_REG_0, -E2BIG), 19636 BPF_JMP_IMM(BPF_JA, 0, 0, 16), 19637 /* spill R6, R7, R8 to use these as loop vars */ 19638 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset), 19639 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset), 19640 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset), 19641 /* initialize loop vars */ 19642 BPF_MOV64_REG(reg_loop_max, BPF_REG_1), 19643 BPF_MOV32_IMM(reg_loop_cnt, 0), 19644 BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3), 19645 /* loop header, 19646 * if reg_loop_cnt >= reg_loop_max skip the loop body 19647 */ 19648 BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5), 19649 /* callback call, 19650 * correct callback offset would be set after patching 19651 */ 19652 BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt), 19653 BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx), 19654 BPF_CALL_REL(0), 19655 /* increment loop counter */ 19656 BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1), 19657 /* jump to loop header if callback returned 0 */ 19658 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6), 19659 /* return value of bpf_loop, 19660 * set R0 to the number of iterations 19661 */ 19662 BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt), 19663 /* restore original values of R6, R7, R8 */ 19664 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset), 19665 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset), 19666 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset), 19667 }; 19668 19669 *cnt = ARRAY_SIZE(insn_buf); 19670 new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt); 19671 if (!new_prog) 19672 return new_prog; 19673 19674 /* callback start is known only after patching */ 19675 callback_start = env->subprog_info[callback_subprogno].start; 19676 /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */ 19677 call_insn_offset = position + 12; 19678 callback_offset = callback_start - call_insn_offset - 1; 19679 new_prog->insnsi[call_insn_offset].imm = callback_offset; 19680 19681 return new_prog; 19682 } 19683 19684 static bool is_bpf_loop_call(struct bpf_insn *insn) 19685 { 19686 return insn->code == (BPF_JMP | BPF_CALL) && 19687 insn->src_reg == 0 && 19688 insn->imm == BPF_FUNC_loop; 19689 } 19690 19691 /* For all sub-programs in the program (including main) check 19692 * insn_aux_data to see if there are bpf_loop calls that require 19693 * inlining. If such calls are found the calls are replaced with a 19694 * sequence of instructions produced by `inline_bpf_loop` function and 19695 * subprog stack_depth is increased by the size of 3 registers. 19696 * This stack space is used to spill values of the R6, R7, R8. These 19697 * registers are used to store the loop bound, counter and context 19698 * variables. 19699 */ 19700 static int optimize_bpf_loop(struct bpf_verifier_env *env) 19701 { 19702 struct bpf_subprog_info *subprogs = env->subprog_info; 19703 int i, cur_subprog = 0, cnt, delta = 0; 19704 struct bpf_insn *insn = env->prog->insnsi; 19705 int insn_cnt = env->prog->len; 19706 u16 stack_depth = subprogs[cur_subprog].stack_depth; 19707 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 19708 u16 stack_depth_extra = 0; 19709 19710 for (i = 0; i < insn_cnt; i++, insn++) { 19711 struct bpf_loop_inline_state *inline_state = 19712 &env->insn_aux_data[i + delta].loop_inline_state; 19713 19714 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { 19715 struct bpf_prog *new_prog; 19716 19717 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; 19718 new_prog = inline_bpf_loop(env, 19719 i + delta, 19720 -(stack_depth + stack_depth_extra), 19721 inline_state->callback_subprogno, 19722 &cnt); 19723 if (!new_prog) 19724 return -ENOMEM; 19725 19726 delta += cnt - 1; 19727 env->prog = new_prog; 19728 insn = new_prog->insnsi + i + delta; 19729 } 19730 19731 if (subprogs[cur_subprog + 1].start == i + delta + 1) { 19732 subprogs[cur_subprog].stack_depth += stack_depth_extra; 19733 cur_subprog++; 19734 stack_depth = subprogs[cur_subprog].stack_depth; 19735 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 19736 stack_depth_extra = 0; 19737 } 19738 } 19739 19740 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 19741 19742 return 0; 19743 } 19744 19745 static void free_states(struct bpf_verifier_env *env) 19746 { 19747 struct bpf_verifier_state_list *sl, *sln; 19748 int i; 19749 19750 sl = env->free_list; 19751 while (sl) { 19752 sln = sl->next; 19753 free_verifier_state(&sl->state, false); 19754 kfree(sl); 19755 sl = sln; 19756 } 19757 env->free_list = NULL; 19758 19759 if (!env->explored_states) 19760 return; 19761 19762 for (i = 0; i < state_htab_size(env); i++) { 19763 sl = env->explored_states[i]; 19764 19765 while (sl) { 19766 sln = sl->next; 19767 free_verifier_state(&sl->state, false); 19768 kfree(sl); 19769 sl = sln; 19770 } 19771 env->explored_states[i] = NULL; 19772 } 19773 } 19774 19775 static int do_check_common(struct bpf_verifier_env *env, int subprog, bool is_ex_cb) 19776 { 19777 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 19778 struct bpf_verifier_state *state; 19779 struct bpf_reg_state *regs; 19780 int ret, i; 19781 19782 env->prev_linfo = NULL; 19783 env->pass_cnt++; 19784 19785 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 19786 if (!state) 19787 return -ENOMEM; 19788 state->curframe = 0; 19789 state->speculative = false; 19790 state->branches = 1; 19791 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 19792 if (!state->frame[0]) { 19793 kfree(state); 19794 return -ENOMEM; 19795 } 19796 env->cur_state = state; 19797 init_func_state(env, state->frame[0], 19798 BPF_MAIN_FUNC /* callsite */, 19799 0 /* frameno */, 19800 subprog); 19801 state->first_insn_idx = env->subprog_info[subprog].start; 19802 state->last_insn_idx = -1; 19803 19804 regs = state->frame[state->curframe]->regs; 19805 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 19806 ret = btf_prepare_func_args(env, subprog, regs, is_ex_cb); 19807 if (ret) 19808 goto out; 19809 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 19810 if (regs[i].type == PTR_TO_CTX) 19811 mark_reg_known_zero(env, regs, i); 19812 else if (regs[i].type == SCALAR_VALUE) 19813 mark_reg_unknown(env, regs, i); 19814 else if (base_type(regs[i].type) == PTR_TO_MEM) { 19815 const u32 mem_size = regs[i].mem_size; 19816 19817 mark_reg_known_zero(env, regs, i); 19818 regs[i].mem_size = mem_size; 19819 regs[i].id = ++env->id_gen; 19820 } 19821 } 19822 if (is_ex_cb) { 19823 state->frame[0]->in_exception_callback_fn = true; 19824 env->subprog_info[subprog].is_cb = true; 19825 env->subprog_info[subprog].is_async_cb = true; 19826 env->subprog_info[subprog].is_exception_cb = true; 19827 } 19828 } else { 19829 /* 1st arg to a function */ 19830 regs[BPF_REG_1].type = PTR_TO_CTX; 19831 mark_reg_known_zero(env, regs, BPF_REG_1); 19832 ret = btf_check_subprog_arg_match(env, subprog, regs); 19833 if (ret == -EFAULT) 19834 /* unlikely verifier bug. abort. 19835 * ret == 0 and ret < 0 are sadly acceptable for 19836 * main() function due to backward compatibility. 19837 * Like socket filter program may be written as: 19838 * int bpf_prog(struct pt_regs *ctx) 19839 * and never dereference that ctx in the program. 19840 * 'struct pt_regs' is a type mismatch for socket 19841 * filter that should be using 'struct __sk_buff'. 19842 */ 19843 goto out; 19844 } 19845 19846 ret = do_check(env); 19847 out: 19848 /* check for NULL is necessary, since cur_state can be freed inside 19849 * do_check() under memory pressure. 19850 */ 19851 if (env->cur_state) { 19852 free_verifier_state(env->cur_state, true); 19853 env->cur_state = NULL; 19854 } 19855 while (!pop_stack(env, NULL, NULL, false)); 19856 if (!ret && pop_log) 19857 bpf_vlog_reset(&env->log, 0); 19858 free_states(env); 19859 return ret; 19860 } 19861 19862 /* Verify all global functions in a BPF program one by one based on their BTF. 19863 * All global functions must pass verification. Otherwise the whole program is rejected. 19864 * Consider: 19865 * int bar(int); 19866 * int foo(int f) 19867 * { 19868 * return bar(f); 19869 * } 19870 * int bar(int b) 19871 * { 19872 * ... 19873 * } 19874 * foo() will be verified first for R1=any_scalar_value. During verification it 19875 * will be assumed that bar() already verified successfully and call to bar() 19876 * from foo() will be checked for type match only. Later bar() will be verified 19877 * independently to check that it's safe for R1=any_scalar_value. 19878 */ 19879 static int do_check_subprogs(struct bpf_verifier_env *env) 19880 { 19881 struct bpf_prog_aux *aux = env->prog->aux; 19882 int i, ret; 19883 19884 if (!aux->func_info) 19885 return 0; 19886 19887 for (i = 1; i < env->subprog_cnt; i++) { 19888 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 19889 continue; 19890 env->insn_idx = env->subprog_info[i].start; 19891 WARN_ON_ONCE(env->insn_idx == 0); 19892 ret = do_check_common(env, i, env->exception_callback_subprog == i); 19893 if (ret) { 19894 return ret; 19895 } else if (env->log.level & BPF_LOG_LEVEL) { 19896 verbose(env, 19897 "Func#%d is safe for any args that match its prototype\n", 19898 i); 19899 } 19900 } 19901 return 0; 19902 } 19903 19904 static int do_check_main(struct bpf_verifier_env *env) 19905 { 19906 int ret; 19907 19908 env->insn_idx = 0; 19909 ret = do_check_common(env, 0, false); 19910 if (!ret) 19911 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 19912 return ret; 19913 } 19914 19915 19916 static void print_verification_stats(struct bpf_verifier_env *env) 19917 { 19918 int i; 19919 19920 if (env->log.level & BPF_LOG_STATS) { 19921 verbose(env, "verification time %lld usec\n", 19922 div_u64(env->verification_time, 1000)); 19923 verbose(env, "stack depth "); 19924 for (i = 0; i < env->subprog_cnt; i++) { 19925 u32 depth = env->subprog_info[i].stack_depth; 19926 19927 verbose(env, "%d", depth); 19928 if (i + 1 < env->subprog_cnt) 19929 verbose(env, "+"); 19930 } 19931 verbose(env, "\n"); 19932 } 19933 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 19934 "total_states %d peak_states %d mark_read %d\n", 19935 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 19936 env->max_states_per_insn, env->total_states, 19937 env->peak_states, env->longest_mark_read_walk); 19938 } 19939 19940 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 19941 { 19942 const struct btf_type *t, *func_proto; 19943 const struct bpf_struct_ops *st_ops; 19944 const struct btf_member *member; 19945 struct bpf_prog *prog = env->prog; 19946 u32 btf_id, member_idx; 19947 const char *mname; 19948 19949 if (!prog->gpl_compatible) { 19950 verbose(env, "struct ops programs must have a GPL compatible license\n"); 19951 return -EINVAL; 19952 } 19953 19954 btf_id = prog->aux->attach_btf_id; 19955 st_ops = bpf_struct_ops_find(btf_id); 19956 if (!st_ops) { 19957 verbose(env, "attach_btf_id %u is not a supported struct\n", 19958 btf_id); 19959 return -ENOTSUPP; 19960 } 19961 19962 t = st_ops->type; 19963 member_idx = prog->expected_attach_type; 19964 if (member_idx >= btf_type_vlen(t)) { 19965 verbose(env, "attach to invalid member idx %u of struct %s\n", 19966 member_idx, st_ops->name); 19967 return -EINVAL; 19968 } 19969 19970 member = &btf_type_member(t)[member_idx]; 19971 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 19972 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 19973 NULL); 19974 if (!func_proto) { 19975 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 19976 mname, member_idx, st_ops->name); 19977 return -EINVAL; 19978 } 19979 19980 if (st_ops->check_member) { 19981 int err = st_ops->check_member(t, member, prog); 19982 19983 if (err) { 19984 verbose(env, "attach to unsupported member %s of struct %s\n", 19985 mname, st_ops->name); 19986 return err; 19987 } 19988 } 19989 19990 prog->aux->attach_func_proto = func_proto; 19991 prog->aux->attach_func_name = mname; 19992 env->ops = st_ops->verifier_ops; 19993 19994 return 0; 19995 } 19996 #define SECURITY_PREFIX "security_" 19997 19998 static int check_attach_modify_return(unsigned long addr, const char *func_name) 19999 { 20000 if (within_error_injection_list(addr) || 20001 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) 20002 return 0; 20003 20004 return -EINVAL; 20005 } 20006 20007 /* list of non-sleepable functions that are otherwise on 20008 * ALLOW_ERROR_INJECTION list 20009 */ 20010 BTF_SET_START(btf_non_sleepable_error_inject) 20011 /* Three functions below can be called from sleepable and non-sleepable context. 20012 * Assume non-sleepable from bpf safety point of view. 20013 */ 20014 BTF_ID(func, __filemap_add_folio) 20015 BTF_ID(func, should_fail_alloc_page) 20016 BTF_ID(func, should_failslab) 20017 BTF_SET_END(btf_non_sleepable_error_inject) 20018 20019 static int check_non_sleepable_error_inject(u32 btf_id) 20020 { 20021 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); 20022 } 20023 20024 int bpf_check_attach_target(struct bpf_verifier_log *log, 20025 const struct bpf_prog *prog, 20026 const struct bpf_prog *tgt_prog, 20027 u32 btf_id, 20028 struct bpf_attach_target_info *tgt_info) 20029 { 20030 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 20031 const char prefix[] = "btf_trace_"; 20032 int ret = 0, subprog = -1, i; 20033 const struct btf_type *t; 20034 bool conservative = true; 20035 const char *tname; 20036 struct btf *btf; 20037 long addr = 0; 20038 struct module *mod = NULL; 20039 20040 if (!btf_id) { 20041 bpf_log(log, "Tracing programs must provide btf_id\n"); 20042 return -EINVAL; 20043 } 20044 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; 20045 if (!btf) { 20046 bpf_log(log, 20047 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 20048 return -EINVAL; 20049 } 20050 t = btf_type_by_id(btf, btf_id); 20051 if (!t) { 20052 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id); 20053 return -EINVAL; 20054 } 20055 tname = btf_name_by_offset(btf, t->name_off); 20056 if (!tname) { 20057 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id); 20058 return -EINVAL; 20059 } 20060 if (tgt_prog) { 20061 struct bpf_prog_aux *aux = tgt_prog->aux; 20062 20063 if (bpf_prog_is_dev_bound(prog->aux) && 20064 !bpf_prog_dev_bound_match(prog, tgt_prog)) { 20065 bpf_log(log, "Target program bound device mismatch"); 20066 return -EINVAL; 20067 } 20068 20069 for (i = 0; i < aux->func_info_cnt; i++) 20070 if (aux->func_info[i].type_id == btf_id) { 20071 subprog = i; 20072 break; 20073 } 20074 if (subprog == -1) { 20075 bpf_log(log, "Subprog %s doesn't exist\n", tname); 20076 return -EINVAL; 20077 } 20078 if (aux->func && aux->func[subprog]->aux->exception_cb) { 20079 bpf_log(log, 20080 "%s programs cannot attach to exception callback\n", 20081 prog_extension ? "Extension" : "FENTRY/FEXIT"); 20082 return -EINVAL; 20083 } 20084 conservative = aux->func_info_aux[subprog].unreliable; 20085 if (prog_extension) { 20086 if (conservative) { 20087 bpf_log(log, 20088 "Cannot replace static functions\n"); 20089 return -EINVAL; 20090 } 20091 if (!prog->jit_requested) { 20092 bpf_log(log, 20093 "Extension programs should be JITed\n"); 20094 return -EINVAL; 20095 } 20096 } 20097 if (!tgt_prog->jited) { 20098 bpf_log(log, "Can attach to only JITed progs\n"); 20099 return -EINVAL; 20100 } 20101 if (tgt_prog->type == prog->type) { 20102 /* Cannot fentry/fexit another fentry/fexit program. 20103 * Cannot attach program extension to another extension. 20104 * It's ok to attach fentry/fexit to extension program. 20105 */ 20106 bpf_log(log, "Cannot recursively attach\n"); 20107 return -EINVAL; 20108 } 20109 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 20110 prog_extension && 20111 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 20112 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 20113 /* Program extensions can extend all program types 20114 * except fentry/fexit. The reason is the following. 20115 * The fentry/fexit programs are used for performance 20116 * analysis, stats and can be attached to any program 20117 * type except themselves. When extension program is 20118 * replacing XDP function it is necessary to allow 20119 * performance analysis of all functions. Both original 20120 * XDP program and its program extension. Hence 20121 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 20122 * allowed. If extending of fentry/fexit was allowed it 20123 * would be possible to create long call chain 20124 * fentry->extension->fentry->extension beyond 20125 * reasonable stack size. Hence extending fentry is not 20126 * allowed. 20127 */ 20128 bpf_log(log, "Cannot extend fentry/fexit\n"); 20129 return -EINVAL; 20130 } 20131 } else { 20132 if (prog_extension) { 20133 bpf_log(log, "Cannot replace kernel functions\n"); 20134 return -EINVAL; 20135 } 20136 } 20137 20138 switch (prog->expected_attach_type) { 20139 case BPF_TRACE_RAW_TP: 20140 if (tgt_prog) { 20141 bpf_log(log, 20142 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 20143 return -EINVAL; 20144 } 20145 if (!btf_type_is_typedef(t)) { 20146 bpf_log(log, "attach_btf_id %u is not a typedef\n", 20147 btf_id); 20148 return -EINVAL; 20149 } 20150 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 20151 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n", 20152 btf_id, tname); 20153 return -EINVAL; 20154 } 20155 tname += sizeof(prefix) - 1; 20156 t = btf_type_by_id(btf, t->type); 20157 if (!btf_type_is_ptr(t)) 20158 /* should never happen in valid vmlinux build */ 20159 return -EINVAL; 20160 t = btf_type_by_id(btf, t->type); 20161 if (!btf_type_is_func_proto(t)) 20162 /* should never happen in valid vmlinux build */ 20163 return -EINVAL; 20164 20165 break; 20166 case BPF_TRACE_ITER: 20167 if (!btf_type_is_func(t)) { 20168 bpf_log(log, "attach_btf_id %u is not a function\n", 20169 btf_id); 20170 return -EINVAL; 20171 } 20172 t = btf_type_by_id(btf, t->type); 20173 if (!btf_type_is_func_proto(t)) 20174 return -EINVAL; 20175 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 20176 if (ret) 20177 return ret; 20178 break; 20179 default: 20180 if (!prog_extension) 20181 return -EINVAL; 20182 fallthrough; 20183 case BPF_MODIFY_RETURN: 20184 case BPF_LSM_MAC: 20185 case BPF_LSM_CGROUP: 20186 case BPF_TRACE_FENTRY: 20187 case BPF_TRACE_FEXIT: 20188 if (!btf_type_is_func(t)) { 20189 bpf_log(log, "attach_btf_id %u is not a function\n", 20190 btf_id); 20191 return -EINVAL; 20192 } 20193 if (prog_extension && 20194 btf_check_type_match(log, prog, btf, t)) 20195 return -EINVAL; 20196 t = btf_type_by_id(btf, t->type); 20197 if (!btf_type_is_func_proto(t)) 20198 return -EINVAL; 20199 20200 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && 20201 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || 20202 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) 20203 return -EINVAL; 20204 20205 if (tgt_prog && conservative) 20206 t = NULL; 20207 20208 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 20209 if (ret < 0) 20210 return ret; 20211 20212 if (tgt_prog) { 20213 if (subprog == 0) 20214 addr = (long) tgt_prog->bpf_func; 20215 else 20216 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 20217 } else { 20218 if (btf_is_module(btf)) { 20219 mod = btf_try_get_module(btf); 20220 if (mod) 20221 addr = find_kallsyms_symbol_value(mod, tname); 20222 else 20223 addr = 0; 20224 } else { 20225 addr = kallsyms_lookup_name(tname); 20226 } 20227 if (!addr) { 20228 module_put(mod); 20229 bpf_log(log, 20230 "The address of function %s cannot be found\n", 20231 tname); 20232 return -ENOENT; 20233 } 20234 } 20235 20236 if (prog->aux->sleepable) { 20237 ret = -EINVAL; 20238 switch (prog->type) { 20239 case BPF_PROG_TYPE_TRACING: 20240 20241 /* fentry/fexit/fmod_ret progs can be sleepable if they are 20242 * attached to ALLOW_ERROR_INJECTION and are not in denylist. 20243 */ 20244 if (!check_non_sleepable_error_inject(btf_id) && 20245 within_error_injection_list(addr)) 20246 ret = 0; 20247 /* fentry/fexit/fmod_ret progs can also be sleepable if they are 20248 * in the fmodret id set with the KF_SLEEPABLE flag. 20249 */ 20250 else { 20251 u32 *flags = btf_kfunc_is_modify_return(btf, btf_id, 20252 prog); 20253 20254 if (flags && (*flags & KF_SLEEPABLE)) 20255 ret = 0; 20256 } 20257 break; 20258 case BPF_PROG_TYPE_LSM: 20259 /* LSM progs check that they are attached to bpf_lsm_*() funcs. 20260 * Only some of them are sleepable. 20261 */ 20262 if (bpf_lsm_is_sleepable_hook(btf_id)) 20263 ret = 0; 20264 break; 20265 default: 20266 break; 20267 } 20268 if (ret) { 20269 module_put(mod); 20270 bpf_log(log, "%s is not sleepable\n", tname); 20271 return ret; 20272 } 20273 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 20274 if (tgt_prog) { 20275 module_put(mod); 20276 bpf_log(log, "can't modify return codes of BPF programs\n"); 20277 return -EINVAL; 20278 } 20279 ret = -EINVAL; 20280 if (btf_kfunc_is_modify_return(btf, btf_id, prog) || 20281 !check_attach_modify_return(addr, tname)) 20282 ret = 0; 20283 if (ret) { 20284 module_put(mod); 20285 bpf_log(log, "%s() is not modifiable\n", tname); 20286 return ret; 20287 } 20288 } 20289 20290 break; 20291 } 20292 tgt_info->tgt_addr = addr; 20293 tgt_info->tgt_name = tname; 20294 tgt_info->tgt_type = t; 20295 tgt_info->tgt_mod = mod; 20296 return 0; 20297 } 20298 20299 BTF_SET_START(btf_id_deny) 20300 BTF_ID_UNUSED 20301 #ifdef CONFIG_SMP 20302 BTF_ID(func, migrate_disable) 20303 BTF_ID(func, migrate_enable) 20304 #endif 20305 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU 20306 BTF_ID(func, rcu_read_unlock_strict) 20307 #endif 20308 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) 20309 BTF_ID(func, preempt_count_add) 20310 BTF_ID(func, preempt_count_sub) 20311 #endif 20312 #ifdef CONFIG_PREEMPT_RCU 20313 BTF_ID(func, __rcu_read_lock) 20314 BTF_ID(func, __rcu_read_unlock) 20315 #endif 20316 BTF_SET_END(btf_id_deny) 20317 20318 static bool can_be_sleepable(struct bpf_prog *prog) 20319 { 20320 if (prog->type == BPF_PROG_TYPE_TRACING) { 20321 switch (prog->expected_attach_type) { 20322 case BPF_TRACE_FENTRY: 20323 case BPF_TRACE_FEXIT: 20324 case BPF_MODIFY_RETURN: 20325 case BPF_TRACE_ITER: 20326 return true; 20327 default: 20328 return false; 20329 } 20330 } 20331 return prog->type == BPF_PROG_TYPE_LSM || 20332 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || 20333 prog->type == BPF_PROG_TYPE_STRUCT_OPS; 20334 } 20335 20336 static int check_attach_btf_id(struct bpf_verifier_env *env) 20337 { 20338 struct bpf_prog *prog = env->prog; 20339 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 20340 struct bpf_attach_target_info tgt_info = {}; 20341 u32 btf_id = prog->aux->attach_btf_id; 20342 struct bpf_trampoline *tr; 20343 int ret; 20344 u64 key; 20345 20346 if (prog->type == BPF_PROG_TYPE_SYSCALL) { 20347 if (prog->aux->sleepable) 20348 /* attach_btf_id checked to be zero already */ 20349 return 0; 20350 verbose(env, "Syscall programs can only be sleepable\n"); 20351 return -EINVAL; 20352 } 20353 20354 if (prog->aux->sleepable && !can_be_sleepable(prog)) { 20355 verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n"); 20356 return -EINVAL; 20357 } 20358 20359 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 20360 return check_struct_ops_btf_id(env); 20361 20362 if (prog->type != BPF_PROG_TYPE_TRACING && 20363 prog->type != BPF_PROG_TYPE_LSM && 20364 prog->type != BPF_PROG_TYPE_EXT) 20365 return 0; 20366 20367 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); 20368 if (ret) 20369 return ret; 20370 20371 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { 20372 /* to make freplace equivalent to their targets, they need to 20373 * inherit env->ops and expected_attach_type for the rest of the 20374 * verification 20375 */ 20376 env->ops = bpf_verifier_ops[tgt_prog->type]; 20377 prog->expected_attach_type = tgt_prog->expected_attach_type; 20378 } 20379 20380 /* store info about the attachment target that will be used later */ 20381 prog->aux->attach_func_proto = tgt_info.tgt_type; 20382 prog->aux->attach_func_name = tgt_info.tgt_name; 20383 prog->aux->mod = tgt_info.tgt_mod; 20384 20385 if (tgt_prog) { 20386 prog->aux->saved_dst_prog_type = tgt_prog->type; 20387 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; 20388 } 20389 20390 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { 20391 prog->aux->attach_btf_trace = true; 20392 return 0; 20393 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { 20394 if (!bpf_iter_prog_supported(prog)) 20395 return -EINVAL; 20396 return 0; 20397 } 20398 20399 if (prog->type == BPF_PROG_TYPE_LSM) { 20400 ret = bpf_lsm_verify_prog(&env->log, prog); 20401 if (ret < 0) 20402 return ret; 20403 } else if (prog->type == BPF_PROG_TYPE_TRACING && 20404 btf_id_set_contains(&btf_id_deny, btf_id)) { 20405 return -EINVAL; 20406 } 20407 20408 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); 20409 tr = bpf_trampoline_get(key, &tgt_info); 20410 if (!tr) 20411 return -ENOMEM; 20412 20413 if (tgt_prog && tgt_prog->aux->tail_call_reachable) 20414 tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; 20415 20416 prog->aux->dst_trampoline = tr; 20417 return 0; 20418 } 20419 20420 struct btf *bpf_get_btf_vmlinux(void) 20421 { 20422 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 20423 mutex_lock(&bpf_verifier_lock); 20424 if (!btf_vmlinux) 20425 btf_vmlinux = btf_parse_vmlinux(); 20426 mutex_unlock(&bpf_verifier_lock); 20427 } 20428 return btf_vmlinux; 20429 } 20430 20431 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 20432 { 20433 u64 start_time = ktime_get_ns(); 20434 struct bpf_verifier_env *env; 20435 int i, len, ret = -EINVAL, err; 20436 u32 log_true_size; 20437 bool is_priv; 20438 20439 /* no program is valid */ 20440 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 20441 return -EINVAL; 20442 20443 /* 'struct bpf_verifier_env' can be global, but since it's not small, 20444 * allocate/free it every time bpf_check() is called 20445 */ 20446 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 20447 if (!env) 20448 return -ENOMEM; 20449 20450 env->bt.env = env; 20451 20452 len = (*prog)->len; 20453 env->insn_aux_data = 20454 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 20455 ret = -ENOMEM; 20456 if (!env->insn_aux_data) 20457 goto err_free_env; 20458 for (i = 0; i < len; i++) 20459 env->insn_aux_data[i].orig_idx = i; 20460 env->prog = *prog; 20461 env->ops = bpf_verifier_ops[env->prog->type]; 20462 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); 20463 is_priv = bpf_capable(); 20464 20465 bpf_get_btf_vmlinux(); 20466 20467 /* grab the mutex to protect few globals used by verifier */ 20468 if (!is_priv) 20469 mutex_lock(&bpf_verifier_lock); 20470 20471 /* user could have requested verbose verifier output 20472 * and supplied buffer to store the verification trace 20473 */ 20474 ret = bpf_vlog_init(&env->log, attr->log_level, 20475 (char __user *) (unsigned long) attr->log_buf, 20476 attr->log_size); 20477 if (ret) 20478 goto err_unlock; 20479 20480 mark_verifier_state_clean(env); 20481 20482 if (IS_ERR(btf_vmlinux)) { 20483 /* Either gcc or pahole or kernel are broken. */ 20484 verbose(env, "in-kernel BTF is malformed\n"); 20485 ret = PTR_ERR(btf_vmlinux); 20486 goto skip_full_check; 20487 } 20488 20489 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 20490 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 20491 env->strict_alignment = true; 20492 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 20493 env->strict_alignment = false; 20494 20495 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); 20496 env->allow_uninit_stack = bpf_allow_uninit_stack(); 20497 env->bypass_spec_v1 = bpf_bypass_spec_v1(); 20498 env->bypass_spec_v4 = bpf_bypass_spec_v4(); 20499 env->bpf_capable = bpf_capable(); 20500 20501 if (is_priv) 20502 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 20503 env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS; 20504 20505 env->explored_states = kvcalloc(state_htab_size(env), 20506 sizeof(struct bpf_verifier_state_list *), 20507 GFP_USER); 20508 ret = -ENOMEM; 20509 if (!env->explored_states) 20510 goto skip_full_check; 20511 20512 ret = check_btf_info_early(env, attr, uattr); 20513 if (ret < 0) 20514 goto skip_full_check; 20515 20516 ret = add_subprog_and_kfunc(env); 20517 if (ret < 0) 20518 goto skip_full_check; 20519 20520 ret = check_subprogs(env); 20521 if (ret < 0) 20522 goto skip_full_check; 20523 20524 ret = check_btf_info(env, attr, uattr); 20525 if (ret < 0) 20526 goto skip_full_check; 20527 20528 ret = check_attach_btf_id(env); 20529 if (ret) 20530 goto skip_full_check; 20531 20532 ret = resolve_pseudo_ldimm64(env); 20533 if (ret < 0) 20534 goto skip_full_check; 20535 20536 if (bpf_prog_is_offloaded(env->prog->aux)) { 20537 ret = bpf_prog_offload_verifier_prep(env->prog); 20538 if (ret) 20539 goto skip_full_check; 20540 } 20541 20542 ret = check_cfg(env); 20543 if (ret < 0) 20544 goto skip_full_check; 20545 20546 ret = do_check_subprogs(env); 20547 ret = ret ?: do_check_main(env); 20548 20549 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) 20550 ret = bpf_prog_offload_finalize(env); 20551 20552 skip_full_check: 20553 kvfree(env->explored_states); 20554 20555 if (ret == 0) 20556 ret = check_max_stack_depth(env); 20557 20558 /* instruction rewrites happen after this point */ 20559 if (ret == 0) 20560 ret = optimize_bpf_loop(env); 20561 20562 if (is_priv) { 20563 if (ret == 0) 20564 opt_hard_wire_dead_code_branches(env); 20565 if (ret == 0) 20566 ret = opt_remove_dead_code(env); 20567 if (ret == 0) 20568 ret = opt_remove_nops(env); 20569 } else { 20570 if (ret == 0) 20571 sanitize_dead_code(env); 20572 } 20573 20574 if (ret == 0) 20575 /* program is valid, convert *(u32*)(ctx + off) accesses */ 20576 ret = convert_ctx_accesses(env); 20577 20578 if (ret == 0) 20579 ret = do_misc_fixups(env); 20580 20581 /* do 32-bit optimization after insn patching has done so those patched 20582 * insns could be handled correctly. 20583 */ 20584 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { 20585 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 20586 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 20587 : false; 20588 } 20589 20590 if (ret == 0) 20591 ret = fixup_call_args(env); 20592 20593 env->verification_time = ktime_get_ns() - start_time; 20594 print_verification_stats(env); 20595 env->prog->aux->verified_insns = env->insn_processed; 20596 20597 /* preserve original error even if log finalization is successful */ 20598 err = bpf_vlog_finalize(&env->log, &log_true_size); 20599 if (err) 20600 ret = err; 20601 20602 if (uattr_size >= offsetofend(union bpf_attr, log_true_size) && 20603 copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size), 20604 &log_true_size, sizeof(log_true_size))) { 20605 ret = -EFAULT; 20606 goto err_release_maps; 20607 } 20608 20609 if (ret) 20610 goto err_release_maps; 20611 20612 if (env->used_map_cnt) { 20613 /* if program passed verifier, update used_maps in bpf_prog_info */ 20614 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 20615 sizeof(env->used_maps[0]), 20616 GFP_KERNEL); 20617 20618 if (!env->prog->aux->used_maps) { 20619 ret = -ENOMEM; 20620 goto err_release_maps; 20621 } 20622 20623 memcpy(env->prog->aux->used_maps, env->used_maps, 20624 sizeof(env->used_maps[0]) * env->used_map_cnt); 20625 env->prog->aux->used_map_cnt = env->used_map_cnt; 20626 } 20627 if (env->used_btf_cnt) { 20628 /* if program passed verifier, update used_btfs in bpf_prog_aux */ 20629 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, 20630 sizeof(env->used_btfs[0]), 20631 GFP_KERNEL); 20632 if (!env->prog->aux->used_btfs) { 20633 ret = -ENOMEM; 20634 goto err_release_maps; 20635 } 20636 20637 memcpy(env->prog->aux->used_btfs, env->used_btfs, 20638 sizeof(env->used_btfs[0]) * env->used_btf_cnt); 20639 env->prog->aux->used_btf_cnt = env->used_btf_cnt; 20640 } 20641 if (env->used_map_cnt || env->used_btf_cnt) { 20642 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 20643 * bpf_ld_imm64 instructions 20644 */ 20645 convert_pseudo_ld_imm64(env); 20646 } 20647 20648 adjust_btf_func(env); 20649 20650 err_release_maps: 20651 if (!env->prog->aux->used_maps) 20652 /* if we didn't copy map pointers into bpf_prog_info, release 20653 * them now. Otherwise free_used_maps() will release them. 20654 */ 20655 release_maps(env); 20656 if (!env->prog->aux->used_btfs) 20657 release_btfs(env); 20658 20659 /* extension progs temporarily inherit the attach_type of their targets 20660 for verification purposes, so set it back to zero before returning 20661 */ 20662 if (env->prog->type == BPF_PROG_TYPE_EXT) 20663 env->prog->expected_attach_type = 0; 20664 20665 *prog = env->prog; 20666 err_unlock: 20667 if (!is_priv) 20668 mutex_unlock(&bpf_verifier_lock); 20669 vfree(env->insn_aux_data); 20670 err_free_env: 20671 kfree(env); 20672 return ret; 20673 } 20674