1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/btf.h> 12 #include <linux/bpf_verifier.h> 13 #include <linux/filter.h> 14 #include <net/netlink.h> 15 #include <linux/file.h> 16 #include <linux/vmalloc.h> 17 #include <linux/stringify.h> 18 #include <linux/bsearch.h> 19 #include <linux/sort.h> 20 #include <linux/perf_event.h> 21 #include <linux/ctype.h> 22 #include <linux/error-injection.h> 23 #include <linux/bpf_lsm.h> 24 #include <linux/btf_ids.h> 25 26 #include "disasm.h" 27 28 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 29 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 30 [_id] = & _name ## _verifier_ops, 31 #define BPF_MAP_TYPE(_id, _ops) 32 #define BPF_LINK_TYPE(_id, _name) 33 #include <linux/bpf_types.h> 34 #undef BPF_PROG_TYPE 35 #undef BPF_MAP_TYPE 36 #undef BPF_LINK_TYPE 37 }; 38 39 /* bpf_check() is a static code analyzer that walks eBPF program 40 * instruction by instruction and updates register/stack state. 41 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 42 * 43 * The first pass is depth-first-search to check that the program is a DAG. 44 * It rejects the following programs: 45 * - larger than BPF_MAXINSNS insns 46 * - if loop is present (detected via back-edge) 47 * - unreachable insns exist (shouldn't be a forest. program = one function) 48 * - out of bounds or malformed jumps 49 * The second pass is all possible path descent from the 1st insn. 50 * Since it's analyzing all paths through the program, the length of the 51 * analysis is limited to 64k insn, which may be hit even if total number of 52 * insn is less then 4K, but there are too many branches that change stack/regs. 53 * Number of 'branches to be analyzed' is limited to 1k 54 * 55 * On entry to each instruction, each register has a type, and the instruction 56 * changes the types of the registers depending on instruction semantics. 57 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 58 * copied to R1. 59 * 60 * All registers are 64-bit. 61 * R0 - return register 62 * R1-R5 argument passing registers 63 * R6-R9 callee saved registers 64 * R10 - frame pointer read-only 65 * 66 * At the start of BPF program the register R1 contains a pointer to bpf_context 67 * and has type PTR_TO_CTX. 68 * 69 * Verifier tracks arithmetic operations on pointers in case: 70 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 71 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 72 * 1st insn copies R10 (which has FRAME_PTR) type into R1 73 * and 2nd arithmetic instruction is pattern matched to recognize 74 * that it wants to construct a pointer to some element within stack. 75 * So after 2nd insn, the register R1 has type PTR_TO_STACK 76 * (and -20 constant is saved for further stack bounds checking). 77 * Meaning that this reg is a pointer to stack plus known immediate constant. 78 * 79 * Most of the time the registers have SCALAR_VALUE type, which 80 * means the register has some value, but it's not a valid pointer. 81 * (like pointer plus pointer becomes SCALAR_VALUE type) 82 * 83 * When verifier sees load or store instructions the type of base register 84 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 85 * four pointer types recognized by check_mem_access() function. 86 * 87 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 88 * and the range of [ptr, ptr + map's value_size) is accessible. 89 * 90 * registers used to pass values to function calls are checked against 91 * function argument constraints. 92 * 93 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 94 * It means that the register type passed to this function must be 95 * PTR_TO_STACK and it will be used inside the function as 96 * 'pointer to map element key' 97 * 98 * For example the argument constraints for bpf_map_lookup_elem(): 99 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 100 * .arg1_type = ARG_CONST_MAP_PTR, 101 * .arg2_type = ARG_PTR_TO_MAP_KEY, 102 * 103 * ret_type says that this function returns 'pointer to map elem value or null' 104 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 105 * 2nd argument should be a pointer to stack, which will be used inside 106 * the helper function as a pointer to map element key. 107 * 108 * On the kernel side the helper function looks like: 109 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 110 * { 111 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 112 * void *key = (void *) (unsigned long) r2; 113 * void *value; 114 * 115 * here kernel can access 'key' and 'map' pointers safely, knowing that 116 * [key, key + map->key_size) bytes are valid and were initialized on 117 * the stack of eBPF program. 118 * } 119 * 120 * Corresponding eBPF program may look like: 121 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 122 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 123 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 124 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 125 * here verifier looks at prototype of map_lookup_elem() and sees: 126 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 127 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 128 * 129 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 130 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 131 * and were initialized prior to this call. 132 * If it's ok, then verifier allows this BPF_CALL insn and looks at 133 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 134 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 135 * returns either pointer to map value or NULL. 136 * 137 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 138 * insn, the register holding that pointer in the true branch changes state to 139 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 140 * branch. See check_cond_jmp_op(). 141 * 142 * After the call R0 is set to return type of the function and registers R1-R5 143 * are set to NOT_INIT to indicate that they are no longer readable. 144 * 145 * The following reference types represent a potential reference to a kernel 146 * resource which, after first being allocated, must be checked and freed by 147 * the BPF program: 148 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 149 * 150 * When the verifier sees a helper call return a reference type, it allocates a 151 * pointer id for the reference and stores it in the current function state. 152 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 153 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 154 * passes through a NULL-check conditional. For the branch wherein the state is 155 * changed to CONST_IMM, the verifier releases the reference. 156 * 157 * For each helper function that allocates a reference, such as 158 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 159 * bpf_sk_release(). When a reference type passes into the release function, 160 * the verifier also releases the reference. If any unchecked or unreleased 161 * reference remains at the end of the program, the verifier rejects it. 162 */ 163 164 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 165 struct bpf_verifier_stack_elem { 166 /* verifer state is 'st' 167 * before processing instruction 'insn_idx' 168 * and after processing instruction 'prev_insn_idx' 169 */ 170 struct bpf_verifier_state st; 171 int insn_idx; 172 int prev_insn_idx; 173 struct bpf_verifier_stack_elem *next; 174 /* length of verifier log at the time this state was pushed on stack */ 175 u32 log_pos; 176 }; 177 178 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 179 #define BPF_COMPLEXITY_LIMIT_STATES 64 180 181 #define BPF_MAP_KEY_POISON (1ULL << 63) 182 #define BPF_MAP_KEY_SEEN (1ULL << 62) 183 184 #define BPF_MAP_PTR_UNPRIV 1UL 185 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 186 POISON_POINTER_DELTA)) 187 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 188 189 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 190 { 191 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 192 } 193 194 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 195 { 196 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 197 } 198 199 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 200 const struct bpf_map *map, bool unpriv) 201 { 202 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 203 unpriv |= bpf_map_ptr_unpriv(aux); 204 aux->map_ptr_state = (unsigned long)map | 205 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 206 } 207 208 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 209 { 210 return aux->map_key_state & BPF_MAP_KEY_POISON; 211 } 212 213 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 214 { 215 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 216 } 217 218 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 219 { 220 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 221 } 222 223 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 224 { 225 bool poisoned = bpf_map_key_poisoned(aux); 226 227 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 228 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 229 } 230 231 static bool bpf_pseudo_call(const struct bpf_insn *insn) 232 { 233 return insn->code == (BPF_JMP | BPF_CALL) && 234 insn->src_reg == BPF_PSEUDO_CALL; 235 } 236 237 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) 238 { 239 return insn->code == (BPF_JMP | BPF_CALL) && 240 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; 241 } 242 243 struct bpf_call_arg_meta { 244 struct bpf_map *map_ptr; 245 bool raw_mode; 246 bool pkt_access; 247 int regno; 248 int access_size; 249 int mem_size; 250 u64 msize_max_value; 251 int ref_obj_id; 252 int map_uid; 253 int func_id; 254 struct btf *btf; 255 u32 btf_id; 256 struct btf *ret_btf; 257 u32 ret_btf_id; 258 u32 subprogno; 259 }; 260 261 struct btf *btf_vmlinux; 262 263 static DEFINE_MUTEX(bpf_verifier_lock); 264 265 static const struct bpf_line_info * 266 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 267 { 268 const struct bpf_line_info *linfo; 269 const struct bpf_prog *prog; 270 u32 i, nr_linfo; 271 272 prog = env->prog; 273 nr_linfo = prog->aux->nr_linfo; 274 275 if (!nr_linfo || insn_off >= prog->len) 276 return NULL; 277 278 linfo = prog->aux->linfo; 279 for (i = 1; i < nr_linfo; i++) 280 if (insn_off < linfo[i].insn_off) 281 break; 282 283 return &linfo[i - 1]; 284 } 285 286 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 287 va_list args) 288 { 289 unsigned int n; 290 291 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 292 293 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 294 "verifier log line truncated - local buffer too short\n"); 295 296 n = min(log->len_total - log->len_used - 1, n); 297 log->kbuf[n] = '\0'; 298 299 if (log->level == BPF_LOG_KERNEL) { 300 pr_err("BPF:%s\n", log->kbuf); 301 return; 302 } 303 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 304 log->len_used += n; 305 else 306 log->ubuf = NULL; 307 } 308 309 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos) 310 { 311 char zero = 0; 312 313 if (!bpf_verifier_log_needed(log)) 314 return; 315 316 log->len_used = new_pos; 317 if (put_user(zero, log->ubuf + new_pos)) 318 log->ubuf = NULL; 319 } 320 321 /* log_level controls verbosity level of eBPF verifier. 322 * bpf_verifier_log_write() is used to dump the verification trace to the log, 323 * so the user can figure out what's wrong with the program 324 */ 325 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 326 const char *fmt, ...) 327 { 328 va_list args; 329 330 if (!bpf_verifier_log_needed(&env->log)) 331 return; 332 333 va_start(args, fmt); 334 bpf_verifier_vlog(&env->log, fmt, args); 335 va_end(args); 336 } 337 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 338 339 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 340 { 341 struct bpf_verifier_env *env = private_data; 342 va_list args; 343 344 if (!bpf_verifier_log_needed(&env->log)) 345 return; 346 347 va_start(args, fmt); 348 bpf_verifier_vlog(&env->log, fmt, args); 349 va_end(args); 350 } 351 352 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 353 const char *fmt, ...) 354 { 355 va_list args; 356 357 if (!bpf_verifier_log_needed(log)) 358 return; 359 360 va_start(args, fmt); 361 bpf_verifier_vlog(log, fmt, args); 362 va_end(args); 363 } 364 365 static const char *ltrim(const char *s) 366 { 367 while (isspace(*s)) 368 s++; 369 370 return s; 371 } 372 373 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 374 u32 insn_off, 375 const char *prefix_fmt, ...) 376 { 377 const struct bpf_line_info *linfo; 378 379 if (!bpf_verifier_log_needed(&env->log)) 380 return; 381 382 linfo = find_linfo(env, insn_off); 383 if (!linfo || linfo == env->prev_linfo) 384 return; 385 386 if (prefix_fmt) { 387 va_list args; 388 389 va_start(args, prefix_fmt); 390 bpf_verifier_vlog(&env->log, prefix_fmt, args); 391 va_end(args); 392 } 393 394 verbose(env, "%s\n", 395 ltrim(btf_name_by_offset(env->prog->aux->btf, 396 linfo->line_off))); 397 398 env->prev_linfo = linfo; 399 } 400 401 static void verbose_invalid_scalar(struct bpf_verifier_env *env, 402 struct bpf_reg_state *reg, 403 struct tnum *range, const char *ctx, 404 const char *reg_name) 405 { 406 char tn_buf[48]; 407 408 verbose(env, "At %s the register %s ", ctx, reg_name); 409 if (!tnum_is_unknown(reg->var_off)) { 410 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 411 verbose(env, "has value %s", tn_buf); 412 } else { 413 verbose(env, "has unknown scalar value"); 414 } 415 tnum_strn(tn_buf, sizeof(tn_buf), *range); 416 verbose(env, " should have been in %s\n", tn_buf); 417 } 418 419 static bool type_is_pkt_pointer(enum bpf_reg_type type) 420 { 421 return type == PTR_TO_PACKET || 422 type == PTR_TO_PACKET_META; 423 } 424 425 static bool type_is_sk_pointer(enum bpf_reg_type type) 426 { 427 return type == PTR_TO_SOCKET || 428 type == PTR_TO_SOCK_COMMON || 429 type == PTR_TO_TCP_SOCK || 430 type == PTR_TO_XDP_SOCK; 431 } 432 433 static bool reg_type_not_null(enum bpf_reg_type type) 434 { 435 return type == PTR_TO_SOCKET || 436 type == PTR_TO_TCP_SOCK || 437 type == PTR_TO_MAP_VALUE || 438 type == PTR_TO_MAP_KEY || 439 type == PTR_TO_SOCK_COMMON; 440 } 441 442 static bool reg_type_may_be_null(enum bpf_reg_type type) 443 { 444 return type == PTR_TO_MAP_VALUE_OR_NULL || 445 type == PTR_TO_SOCKET_OR_NULL || 446 type == PTR_TO_SOCK_COMMON_OR_NULL || 447 type == PTR_TO_TCP_SOCK_OR_NULL || 448 type == PTR_TO_BTF_ID_OR_NULL || 449 type == PTR_TO_MEM_OR_NULL || 450 type == PTR_TO_RDONLY_BUF_OR_NULL || 451 type == PTR_TO_RDWR_BUF_OR_NULL; 452 } 453 454 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 455 { 456 return reg->type == PTR_TO_MAP_VALUE && 457 map_value_has_spin_lock(reg->map_ptr); 458 } 459 460 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) 461 { 462 return type == PTR_TO_SOCKET || 463 type == PTR_TO_SOCKET_OR_NULL || 464 type == PTR_TO_TCP_SOCK || 465 type == PTR_TO_TCP_SOCK_OR_NULL || 466 type == PTR_TO_MEM || 467 type == PTR_TO_MEM_OR_NULL; 468 } 469 470 static bool arg_type_may_be_refcounted(enum bpf_arg_type type) 471 { 472 return type == ARG_PTR_TO_SOCK_COMMON; 473 } 474 475 static bool arg_type_may_be_null(enum bpf_arg_type type) 476 { 477 return type == ARG_PTR_TO_MAP_VALUE_OR_NULL || 478 type == ARG_PTR_TO_MEM_OR_NULL || 479 type == ARG_PTR_TO_CTX_OR_NULL || 480 type == ARG_PTR_TO_SOCKET_OR_NULL || 481 type == ARG_PTR_TO_ALLOC_MEM_OR_NULL || 482 type == ARG_PTR_TO_STACK_OR_NULL; 483 } 484 485 /* Determine whether the function releases some resources allocated by another 486 * function call. The first reference type argument will be assumed to be 487 * released by release_reference(). 488 */ 489 static bool is_release_function(enum bpf_func_id func_id) 490 { 491 return func_id == BPF_FUNC_sk_release || 492 func_id == BPF_FUNC_ringbuf_submit || 493 func_id == BPF_FUNC_ringbuf_discard; 494 } 495 496 static bool may_be_acquire_function(enum bpf_func_id func_id) 497 { 498 return func_id == BPF_FUNC_sk_lookup_tcp || 499 func_id == BPF_FUNC_sk_lookup_udp || 500 func_id == BPF_FUNC_skc_lookup_tcp || 501 func_id == BPF_FUNC_map_lookup_elem || 502 func_id == BPF_FUNC_ringbuf_reserve; 503 } 504 505 static bool is_acquire_function(enum bpf_func_id func_id, 506 const struct bpf_map *map) 507 { 508 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; 509 510 if (func_id == BPF_FUNC_sk_lookup_tcp || 511 func_id == BPF_FUNC_sk_lookup_udp || 512 func_id == BPF_FUNC_skc_lookup_tcp || 513 func_id == BPF_FUNC_ringbuf_reserve) 514 return true; 515 516 if (func_id == BPF_FUNC_map_lookup_elem && 517 (map_type == BPF_MAP_TYPE_SOCKMAP || 518 map_type == BPF_MAP_TYPE_SOCKHASH)) 519 return true; 520 521 return false; 522 } 523 524 static bool is_ptr_cast_function(enum bpf_func_id func_id) 525 { 526 return func_id == BPF_FUNC_tcp_sock || 527 func_id == BPF_FUNC_sk_fullsock || 528 func_id == BPF_FUNC_skc_to_tcp_sock || 529 func_id == BPF_FUNC_skc_to_tcp6_sock || 530 func_id == BPF_FUNC_skc_to_udp6_sock || 531 func_id == BPF_FUNC_skc_to_tcp_timewait_sock || 532 func_id == BPF_FUNC_skc_to_tcp_request_sock; 533 } 534 535 static bool is_cmpxchg_insn(const struct bpf_insn *insn) 536 { 537 return BPF_CLASS(insn->code) == BPF_STX && 538 BPF_MODE(insn->code) == BPF_ATOMIC && 539 insn->imm == BPF_CMPXCHG; 540 } 541 542 /* string representation of 'enum bpf_reg_type' */ 543 static const char * const reg_type_str[] = { 544 [NOT_INIT] = "?", 545 [SCALAR_VALUE] = "inv", 546 [PTR_TO_CTX] = "ctx", 547 [CONST_PTR_TO_MAP] = "map_ptr", 548 [PTR_TO_MAP_VALUE] = "map_value", 549 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 550 [PTR_TO_STACK] = "fp", 551 [PTR_TO_PACKET] = "pkt", 552 [PTR_TO_PACKET_META] = "pkt_meta", 553 [PTR_TO_PACKET_END] = "pkt_end", 554 [PTR_TO_FLOW_KEYS] = "flow_keys", 555 [PTR_TO_SOCKET] = "sock", 556 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", 557 [PTR_TO_SOCK_COMMON] = "sock_common", 558 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", 559 [PTR_TO_TCP_SOCK] = "tcp_sock", 560 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", 561 [PTR_TO_TP_BUFFER] = "tp_buffer", 562 [PTR_TO_XDP_SOCK] = "xdp_sock", 563 [PTR_TO_BTF_ID] = "ptr_", 564 [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", 565 [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", 566 [PTR_TO_MEM] = "mem", 567 [PTR_TO_MEM_OR_NULL] = "mem_or_null", 568 [PTR_TO_RDONLY_BUF] = "rdonly_buf", 569 [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", 570 [PTR_TO_RDWR_BUF] = "rdwr_buf", 571 [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", 572 [PTR_TO_FUNC] = "func", 573 [PTR_TO_MAP_KEY] = "map_key", 574 }; 575 576 static char slot_type_char[] = { 577 [STACK_INVALID] = '?', 578 [STACK_SPILL] = 'r', 579 [STACK_MISC] = 'm', 580 [STACK_ZERO] = '0', 581 }; 582 583 static void print_liveness(struct bpf_verifier_env *env, 584 enum bpf_reg_liveness live) 585 { 586 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 587 verbose(env, "_"); 588 if (live & REG_LIVE_READ) 589 verbose(env, "r"); 590 if (live & REG_LIVE_WRITTEN) 591 verbose(env, "w"); 592 if (live & REG_LIVE_DONE) 593 verbose(env, "D"); 594 } 595 596 static struct bpf_func_state *func(struct bpf_verifier_env *env, 597 const struct bpf_reg_state *reg) 598 { 599 struct bpf_verifier_state *cur = env->cur_state; 600 601 return cur->frame[reg->frameno]; 602 } 603 604 static const char *kernel_type_name(const struct btf* btf, u32 id) 605 { 606 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); 607 } 608 609 /* The reg state of a pointer or a bounded scalar was saved when 610 * it was spilled to the stack. 611 */ 612 static bool is_spilled_reg(const struct bpf_stack_state *stack) 613 { 614 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; 615 } 616 617 static void scrub_spilled_slot(u8 *stype) 618 { 619 if (*stype != STACK_INVALID) 620 *stype = STACK_MISC; 621 } 622 623 static void print_verifier_state(struct bpf_verifier_env *env, 624 const struct bpf_func_state *state) 625 { 626 const struct bpf_reg_state *reg; 627 enum bpf_reg_type t; 628 int i; 629 630 if (state->frameno) 631 verbose(env, " frame%d:", state->frameno); 632 for (i = 0; i < MAX_BPF_REG; i++) { 633 reg = &state->regs[i]; 634 t = reg->type; 635 if (t == NOT_INIT) 636 continue; 637 verbose(env, " R%d", i); 638 print_liveness(env, reg->live); 639 verbose(env, "=%s", reg_type_str[t]); 640 if (t == SCALAR_VALUE && reg->precise) 641 verbose(env, "P"); 642 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 643 tnum_is_const(reg->var_off)) { 644 /* reg->off should be 0 for SCALAR_VALUE */ 645 verbose(env, "%lld", reg->var_off.value + reg->off); 646 } else { 647 if (t == PTR_TO_BTF_ID || 648 t == PTR_TO_BTF_ID_OR_NULL || 649 t == PTR_TO_PERCPU_BTF_ID) 650 verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); 651 verbose(env, "(id=%d", reg->id); 652 if (reg_type_may_be_refcounted_or_null(t)) 653 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); 654 if (t != SCALAR_VALUE) 655 verbose(env, ",off=%d", reg->off); 656 if (type_is_pkt_pointer(t)) 657 verbose(env, ",r=%d", reg->range); 658 else if (t == CONST_PTR_TO_MAP || 659 t == PTR_TO_MAP_KEY || 660 t == PTR_TO_MAP_VALUE || 661 t == PTR_TO_MAP_VALUE_OR_NULL) 662 verbose(env, ",ks=%d,vs=%d", 663 reg->map_ptr->key_size, 664 reg->map_ptr->value_size); 665 if (tnum_is_const(reg->var_off)) { 666 /* Typically an immediate SCALAR_VALUE, but 667 * could be a pointer whose offset is too big 668 * for reg->off 669 */ 670 verbose(env, ",imm=%llx", reg->var_off.value); 671 } else { 672 if (reg->smin_value != reg->umin_value && 673 reg->smin_value != S64_MIN) 674 verbose(env, ",smin_value=%lld", 675 (long long)reg->smin_value); 676 if (reg->smax_value != reg->umax_value && 677 reg->smax_value != S64_MAX) 678 verbose(env, ",smax_value=%lld", 679 (long long)reg->smax_value); 680 if (reg->umin_value != 0) 681 verbose(env, ",umin_value=%llu", 682 (unsigned long long)reg->umin_value); 683 if (reg->umax_value != U64_MAX) 684 verbose(env, ",umax_value=%llu", 685 (unsigned long long)reg->umax_value); 686 if (!tnum_is_unknown(reg->var_off)) { 687 char tn_buf[48]; 688 689 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 690 verbose(env, ",var_off=%s", tn_buf); 691 } 692 if (reg->s32_min_value != reg->smin_value && 693 reg->s32_min_value != S32_MIN) 694 verbose(env, ",s32_min_value=%d", 695 (int)(reg->s32_min_value)); 696 if (reg->s32_max_value != reg->smax_value && 697 reg->s32_max_value != S32_MAX) 698 verbose(env, ",s32_max_value=%d", 699 (int)(reg->s32_max_value)); 700 if (reg->u32_min_value != reg->umin_value && 701 reg->u32_min_value != U32_MIN) 702 verbose(env, ",u32_min_value=%d", 703 (int)(reg->u32_min_value)); 704 if (reg->u32_max_value != reg->umax_value && 705 reg->u32_max_value != U32_MAX) 706 verbose(env, ",u32_max_value=%d", 707 (int)(reg->u32_max_value)); 708 } 709 verbose(env, ")"); 710 } 711 } 712 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 713 char types_buf[BPF_REG_SIZE + 1]; 714 bool valid = false; 715 int j; 716 717 for (j = 0; j < BPF_REG_SIZE; j++) { 718 if (state->stack[i].slot_type[j] != STACK_INVALID) 719 valid = true; 720 types_buf[j] = slot_type_char[ 721 state->stack[i].slot_type[j]]; 722 } 723 types_buf[BPF_REG_SIZE] = 0; 724 if (!valid) 725 continue; 726 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 727 print_liveness(env, state->stack[i].spilled_ptr.live); 728 if (is_spilled_reg(&state->stack[i])) { 729 reg = &state->stack[i].spilled_ptr; 730 t = reg->type; 731 verbose(env, "=%s", reg_type_str[t]); 732 if (t == SCALAR_VALUE && reg->precise) 733 verbose(env, "P"); 734 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 735 verbose(env, "%lld", reg->var_off.value + reg->off); 736 } else { 737 verbose(env, "=%s", types_buf); 738 } 739 } 740 if (state->acquired_refs && state->refs[0].id) { 741 verbose(env, " refs=%d", state->refs[0].id); 742 for (i = 1; i < state->acquired_refs; i++) 743 if (state->refs[i].id) 744 verbose(env, ",%d", state->refs[i].id); 745 } 746 if (state->in_callback_fn) 747 verbose(env, " cb"); 748 if (state->in_async_callback_fn) 749 verbose(env, " async_cb"); 750 verbose(env, "\n"); 751 } 752 753 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too 754 * small to hold src. This is different from krealloc since we don't want to preserve 755 * the contents of dst. 756 * 757 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could 758 * not be allocated. 759 */ 760 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags) 761 { 762 size_t bytes; 763 764 if (ZERO_OR_NULL_PTR(src)) 765 goto out; 766 767 if (unlikely(check_mul_overflow(n, size, &bytes))) 768 return NULL; 769 770 if (ksize(dst) < bytes) { 771 kfree(dst); 772 dst = kmalloc_track_caller(bytes, flags); 773 if (!dst) 774 return NULL; 775 } 776 777 memcpy(dst, src, bytes); 778 out: 779 return dst ? dst : ZERO_SIZE_PTR; 780 } 781 782 /* resize an array from old_n items to new_n items. the array is reallocated if it's too 783 * small to hold new_n items. new items are zeroed out if the array grows. 784 * 785 * Contrary to krealloc_array, does not free arr if new_n is zero. 786 */ 787 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) 788 { 789 if (!new_n || old_n == new_n) 790 goto out; 791 792 arr = krealloc_array(arr, new_n, size, GFP_KERNEL); 793 if (!arr) 794 return NULL; 795 796 if (new_n > old_n) 797 memset(arr + old_n * size, 0, (new_n - old_n) * size); 798 799 out: 800 return arr ? arr : ZERO_SIZE_PTR; 801 } 802 803 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 804 { 805 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, 806 sizeof(struct bpf_reference_state), GFP_KERNEL); 807 if (!dst->refs) 808 return -ENOMEM; 809 810 dst->acquired_refs = src->acquired_refs; 811 return 0; 812 } 813 814 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 815 { 816 size_t n = src->allocated_stack / BPF_REG_SIZE; 817 818 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), 819 GFP_KERNEL); 820 if (!dst->stack) 821 return -ENOMEM; 822 823 dst->allocated_stack = src->allocated_stack; 824 return 0; 825 } 826 827 static int resize_reference_state(struct bpf_func_state *state, size_t n) 828 { 829 state->refs = realloc_array(state->refs, state->acquired_refs, n, 830 sizeof(struct bpf_reference_state)); 831 if (!state->refs) 832 return -ENOMEM; 833 834 state->acquired_refs = n; 835 return 0; 836 } 837 838 static int grow_stack_state(struct bpf_func_state *state, int size) 839 { 840 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; 841 842 if (old_n >= n) 843 return 0; 844 845 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); 846 if (!state->stack) 847 return -ENOMEM; 848 849 state->allocated_stack = size; 850 return 0; 851 } 852 853 /* Acquire a pointer id from the env and update the state->refs to include 854 * this new pointer reference. 855 * On success, returns a valid pointer id to associate with the register 856 * On failure, returns a negative errno. 857 */ 858 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 859 { 860 struct bpf_func_state *state = cur_func(env); 861 int new_ofs = state->acquired_refs; 862 int id, err; 863 864 err = resize_reference_state(state, state->acquired_refs + 1); 865 if (err) 866 return err; 867 id = ++env->id_gen; 868 state->refs[new_ofs].id = id; 869 state->refs[new_ofs].insn_idx = insn_idx; 870 871 return id; 872 } 873 874 /* release function corresponding to acquire_reference_state(). Idempotent. */ 875 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 876 { 877 int i, last_idx; 878 879 last_idx = state->acquired_refs - 1; 880 for (i = 0; i < state->acquired_refs; i++) { 881 if (state->refs[i].id == ptr_id) { 882 if (last_idx && i != last_idx) 883 memcpy(&state->refs[i], &state->refs[last_idx], 884 sizeof(*state->refs)); 885 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 886 state->acquired_refs--; 887 return 0; 888 } 889 } 890 return -EINVAL; 891 } 892 893 static void free_func_state(struct bpf_func_state *state) 894 { 895 if (!state) 896 return; 897 kfree(state->refs); 898 kfree(state->stack); 899 kfree(state); 900 } 901 902 static void clear_jmp_history(struct bpf_verifier_state *state) 903 { 904 kfree(state->jmp_history); 905 state->jmp_history = NULL; 906 state->jmp_history_cnt = 0; 907 } 908 909 static void free_verifier_state(struct bpf_verifier_state *state, 910 bool free_self) 911 { 912 int i; 913 914 for (i = 0; i <= state->curframe; i++) { 915 free_func_state(state->frame[i]); 916 state->frame[i] = NULL; 917 } 918 clear_jmp_history(state); 919 if (free_self) 920 kfree(state); 921 } 922 923 /* copy verifier state from src to dst growing dst stack space 924 * when necessary to accommodate larger src stack 925 */ 926 static int copy_func_state(struct bpf_func_state *dst, 927 const struct bpf_func_state *src) 928 { 929 int err; 930 931 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 932 err = copy_reference_state(dst, src); 933 if (err) 934 return err; 935 return copy_stack_state(dst, src); 936 } 937 938 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 939 const struct bpf_verifier_state *src) 940 { 941 struct bpf_func_state *dst; 942 int i, err; 943 944 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, 945 src->jmp_history_cnt, sizeof(struct bpf_idx_pair), 946 GFP_USER); 947 if (!dst_state->jmp_history) 948 return -ENOMEM; 949 dst_state->jmp_history_cnt = src->jmp_history_cnt; 950 951 /* if dst has more stack frames then src frame, free them */ 952 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 953 free_func_state(dst_state->frame[i]); 954 dst_state->frame[i] = NULL; 955 } 956 dst_state->speculative = src->speculative; 957 dst_state->curframe = src->curframe; 958 dst_state->active_spin_lock = src->active_spin_lock; 959 dst_state->branches = src->branches; 960 dst_state->parent = src->parent; 961 dst_state->first_insn_idx = src->first_insn_idx; 962 dst_state->last_insn_idx = src->last_insn_idx; 963 for (i = 0; i <= src->curframe; i++) { 964 dst = dst_state->frame[i]; 965 if (!dst) { 966 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 967 if (!dst) 968 return -ENOMEM; 969 dst_state->frame[i] = dst; 970 } 971 err = copy_func_state(dst, src->frame[i]); 972 if (err) 973 return err; 974 } 975 return 0; 976 } 977 978 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 979 { 980 while (st) { 981 u32 br = --st->branches; 982 983 /* WARN_ON(br > 1) technically makes sense here, 984 * but see comment in push_stack(), hence: 985 */ 986 WARN_ONCE((int)br < 0, 987 "BUG update_branch_counts:branches_to_explore=%d\n", 988 br); 989 if (br) 990 break; 991 st = st->parent; 992 } 993 } 994 995 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 996 int *insn_idx, bool pop_log) 997 { 998 struct bpf_verifier_state *cur = env->cur_state; 999 struct bpf_verifier_stack_elem *elem, *head = env->head; 1000 int err; 1001 1002 if (env->head == NULL) 1003 return -ENOENT; 1004 1005 if (cur) { 1006 err = copy_verifier_state(cur, &head->st); 1007 if (err) 1008 return err; 1009 } 1010 if (pop_log) 1011 bpf_vlog_reset(&env->log, head->log_pos); 1012 if (insn_idx) 1013 *insn_idx = head->insn_idx; 1014 if (prev_insn_idx) 1015 *prev_insn_idx = head->prev_insn_idx; 1016 elem = head->next; 1017 free_verifier_state(&head->st, false); 1018 kfree(head); 1019 env->head = elem; 1020 env->stack_size--; 1021 return 0; 1022 } 1023 1024 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 1025 int insn_idx, int prev_insn_idx, 1026 bool speculative) 1027 { 1028 struct bpf_verifier_state *cur = env->cur_state; 1029 struct bpf_verifier_stack_elem *elem; 1030 int err; 1031 1032 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 1033 if (!elem) 1034 goto err; 1035 1036 elem->insn_idx = insn_idx; 1037 elem->prev_insn_idx = prev_insn_idx; 1038 elem->next = env->head; 1039 elem->log_pos = env->log.len_used; 1040 env->head = elem; 1041 env->stack_size++; 1042 err = copy_verifier_state(&elem->st, cur); 1043 if (err) 1044 goto err; 1045 elem->st.speculative |= speculative; 1046 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 1047 verbose(env, "The sequence of %d jumps is too complex.\n", 1048 env->stack_size); 1049 goto err; 1050 } 1051 if (elem->st.parent) { 1052 ++elem->st.parent->branches; 1053 /* WARN_ON(branches > 2) technically makes sense here, 1054 * but 1055 * 1. speculative states will bump 'branches' for non-branch 1056 * instructions 1057 * 2. is_state_visited() heuristics may decide not to create 1058 * a new state for a sequence of branches and all such current 1059 * and cloned states will be pointing to a single parent state 1060 * which might have large 'branches' count. 1061 */ 1062 } 1063 return &elem->st; 1064 err: 1065 free_verifier_state(env->cur_state, true); 1066 env->cur_state = NULL; 1067 /* pop all elements and return */ 1068 while (!pop_stack(env, NULL, NULL, false)); 1069 return NULL; 1070 } 1071 1072 #define CALLER_SAVED_REGS 6 1073 static const int caller_saved[CALLER_SAVED_REGS] = { 1074 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 1075 }; 1076 1077 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1078 struct bpf_reg_state *reg); 1079 1080 /* This helper doesn't clear reg->id */ 1081 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1082 { 1083 reg->var_off = tnum_const(imm); 1084 reg->smin_value = (s64)imm; 1085 reg->smax_value = (s64)imm; 1086 reg->umin_value = imm; 1087 reg->umax_value = imm; 1088 1089 reg->s32_min_value = (s32)imm; 1090 reg->s32_max_value = (s32)imm; 1091 reg->u32_min_value = (u32)imm; 1092 reg->u32_max_value = (u32)imm; 1093 } 1094 1095 /* Mark the unknown part of a register (variable offset or scalar value) as 1096 * known to have the value @imm. 1097 */ 1098 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1099 { 1100 /* Clear id, off, and union(map_ptr, range) */ 1101 memset(((u8 *)reg) + sizeof(reg->type), 0, 1102 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 1103 ___mark_reg_known(reg, imm); 1104 } 1105 1106 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 1107 { 1108 reg->var_off = tnum_const_subreg(reg->var_off, imm); 1109 reg->s32_min_value = (s32)imm; 1110 reg->s32_max_value = (s32)imm; 1111 reg->u32_min_value = (u32)imm; 1112 reg->u32_max_value = (u32)imm; 1113 } 1114 1115 /* Mark the 'variable offset' part of a register as zero. This should be 1116 * used only on registers holding a pointer type. 1117 */ 1118 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 1119 { 1120 __mark_reg_known(reg, 0); 1121 } 1122 1123 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 1124 { 1125 __mark_reg_known(reg, 0); 1126 reg->type = SCALAR_VALUE; 1127 } 1128 1129 static void mark_reg_known_zero(struct bpf_verifier_env *env, 1130 struct bpf_reg_state *regs, u32 regno) 1131 { 1132 if (WARN_ON(regno >= MAX_BPF_REG)) { 1133 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 1134 /* Something bad happened, let's kill all regs */ 1135 for (regno = 0; regno < MAX_BPF_REG; regno++) 1136 __mark_reg_not_init(env, regs + regno); 1137 return; 1138 } 1139 __mark_reg_known_zero(regs + regno); 1140 } 1141 1142 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) 1143 { 1144 switch (reg->type) { 1145 case PTR_TO_MAP_VALUE_OR_NULL: { 1146 const struct bpf_map *map = reg->map_ptr; 1147 1148 if (map->inner_map_meta) { 1149 reg->type = CONST_PTR_TO_MAP; 1150 reg->map_ptr = map->inner_map_meta; 1151 /* transfer reg's id which is unique for every map_lookup_elem 1152 * as UID of the inner map. 1153 */ 1154 if (map_value_has_timer(map->inner_map_meta)) 1155 reg->map_uid = reg->id; 1156 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 1157 reg->type = PTR_TO_XDP_SOCK; 1158 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || 1159 map->map_type == BPF_MAP_TYPE_SOCKHASH) { 1160 reg->type = PTR_TO_SOCKET; 1161 } else { 1162 reg->type = PTR_TO_MAP_VALUE; 1163 } 1164 break; 1165 } 1166 case PTR_TO_SOCKET_OR_NULL: 1167 reg->type = PTR_TO_SOCKET; 1168 break; 1169 case PTR_TO_SOCK_COMMON_OR_NULL: 1170 reg->type = PTR_TO_SOCK_COMMON; 1171 break; 1172 case PTR_TO_TCP_SOCK_OR_NULL: 1173 reg->type = PTR_TO_TCP_SOCK; 1174 break; 1175 case PTR_TO_BTF_ID_OR_NULL: 1176 reg->type = PTR_TO_BTF_ID; 1177 break; 1178 case PTR_TO_MEM_OR_NULL: 1179 reg->type = PTR_TO_MEM; 1180 break; 1181 case PTR_TO_RDONLY_BUF_OR_NULL: 1182 reg->type = PTR_TO_RDONLY_BUF; 1183 break; 1184 case PTR_TO_RDWR_BUF_OR_NULL: 1185 reg->type = PTR_TO_RDWR_BUF; 1186 break; 1187 default: 1188 WARN_ONCE(1, "unknown nullable register type"); 1189 } 1190 } 1191 1192 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 1193 { 1194 return type_is_pkt_pointer(reg->type); 1195 } 1196 1197 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 1198 { 1199 return reg_is_pkt_pointer(reg) || 1200 reg->type == PTR_TO_PACKET_END; 1201 } 1202 1203 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 1204 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 1205 enum bpf_reg_type which) 1206 { 1207 /* The register can already have a range from prior markings. 1208 * This is fine as long as it hasn't been advanced from its 1209 * origin. 1210 */ 1211 return reg->type == which && 1212 reg->id == 0 && 1213 reg->off == 0 && 1214 tnum_equals_const(reg->var_off, 0); 1215 } 1216 1217 /* Reset the min/max bounds of a register */ 1218 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 1219 { 1220 reg->smin_value = S64_MIN; 1221 reg->smax_value = S64_MAX; 1222 reg->umin_value = 0; 1223 reg->umax_value = U64_MAX; 1224 1225 reg->s32_min_value = S32_MIN; 1226 reg->s32_max_value = S32_MAX; 1227 reg->u32_min_value = 0; 1228 reg->u32_max_value = U32_MAX; 1229 } 1230 1231 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 1232 { 1233 reg->smin_value = S64_MIN; 1234 reg->smax_value = S64_MAX; 1235 reg->umin_value = 0; 1236 reg->umax_value = U64_MAX; 1237 } 1238 1239 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 1240 { 1241 reg->s32_min_value = S32_MIN; 1242 reg->s32_max_value = S32_MAX; 1243 reg->u32_min_value = 0; 1244 reg->u32_max_value = U32_MAX; 1245 } 1246 1247 static void __update_reg32_bounds(struct bpf_reg_state *reg) 1248 { 1249 struct tnum var32_off = tnum_subreg(reg->var_off); 1250 1251 /* min signed is max(sign bit) | min(other bits) */ 1252 reg->s32_min_value = max_t(s32, reg->s32_min_value, 1253 var32_off.value | (var32_off.mask & S32_MIN)); 1254 /* max signed is min(sign bit) | max(other bits) */ 1255 reg->s32_max_value = min_t(s32, reg->s32_max_value, 1256 var32_off.value | (var32_off.mask & S32_MAX)); 1257 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 1258 reg->u32_max_value = min(reg->u32_max_value, 1259 (u32)(var32_off.value | var32_off.mask)); 1260 } 1261 1262 static void __update_reg64_bounds(struct bpf_reg_state *reg) 1263 { 1264 /* min signed is max(sign bit) | min(other bits) */ 1265 reg->smin_value = max_t(s64, reg->smin_value, 1266 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 1267 /* max signed is min(sign bit) | max(other bits) */ 1268 reg->smax_value = min_t(s64, reg->smax_value, 1269 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 1270 reg->umin_value = max(reg->umin_value, reg->var_off.value); 1271 reg->umax_value = min(reg->umax_value, 1272 reg->var_off.value | reg->var_off.mask); 1273 } 1274 1275 static void __update_reg_bounds(struct bpf_reg_state *reg) 1276 { 1277 __update_reg32_bounds(reg); 1278 __update_reg64_bounds(reg); 1279 } 1280 1281 /* Uses signed min/max values to inform unsigned, and vice-versa */ 1282 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 1283 { 1284 /* Learn sign from signed bounds. 1285 * If we cannot cross the sign boundary, then signed and unsigned bounds 1286 * are the same, so combine. This works even in the negative case, e.g. 1287 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1288 */ 1289 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { 1290 reg->s32_min_value = reg->u32_min_value = 1291 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1292 reg->s32_max_value = reg->u32_max_value = 1293 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1294 return; 1295 } 1296 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1297 * boundary, so we must be careful. 1298 */ 1299 if ((s32)reg->u32_max_value >= 0) { 1300 /* Positive. We can't learn anything from the smin, but smax 1301 * is positive, hence safe. 1302 */ 1303 reg->s32_min_value = reg->u32_min_value; 1304 reg->s32_max_value = reg->u32_max_value = 1305 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1306 } else if ((s32)reg->u32_min_value < 0) { 1307 /* Negative. We can't learn anything from the smax, but smin 1308 * is negative, hence safe. 1309 */ 1310 reg->s32_min_value = reg->u32_min_value = 1311 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1312 reg->s32_max_value = reg->u32_max_value; 1313 } 1314 } 1315 1316 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 1317 { 1318 /* Learn sign from signed bounds. 1319 * If we cannot cross the sign boundary, then signed and unsigned bounds 1320 * are the same, so combine. This works even in the negative case, e.g. 1321 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1322 */ 1323 if (reg->smin_value >= 0 || reg->smax_value < 0) { 1324 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1325 reg->umin_value); 1326 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1327 reg->umax_value); 1328 return; 1329 } 1330 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1331 * boundary, so we must be careful. 1332 */ 1333 if ((s64)reg->umax_value >= 0) { 1334 /* Positive. We can't learn anything from the smin, but smax 1335 * is positive, hence safe. 1336 */ 1337 reg->smin_value = reg->umin_value; 1338 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1339 reg->umax_value); 1340 } else if ((s64)reg->umin_value < 0) { 1341 /* Negative. We can't learn anything from the smax, but smin 1342 * is negative, hence safe. 1343 */ 1344 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1345 reg->umin_value); 1346 reg->smax_value = reg->umax_value; 1347 } 1348 } 1349 1350 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 1351 { 1352 __reg32_deduce_bounds(reg); 1353 __reg64_deduce_bounds(reg); 1354 } 1355 1356 /* Attempts to improve var_off based on unsigned min/max information */ 1357 static void __reg_bound_offset(struct bpf_reg_state *reg) 1358 { 1359 struct tnum var64_off = tnum_intersect(reg->var_off, 1360 tnum_range(reg->umin_value, 1361 reg->umax_value)); 1362 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), 1363 tnum_range(reg->u32_min_value, 1364 reg->u32_max_value)); 1365 1366 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 1367 } 1368 1369 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 1370 { 1371 reg->umin_value = reg->u32_min_value; 1372 reg->umax_value = reg->u32_max_value; 1373 /* Attempt to pull 32-bit signed bounds into 64-bit bounds 1374 * but must be positive otherwise set to worse case bounds 1375 * and refine later from tnum. 1376 */ 1377 if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) 1378 reg->smax_value = reg->s32_max_value; 1379 else 1380 reg->smax_value = U32_MAX; 1381 if (reg->s32_min_value >= 0) 1382 reg->smin_value = reg->s32_min_value; 1383 else 1384 reg->smin_value = 0; 1385 } 1386 1387 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) 1388 { 1389 /* special case when 64-bit register has upper 32-bit register 1390 * zeroed. Typically happens after zext or <<32, >>32 sequence 1391 * allowing us to use 32-bit bounds directly, 1392 */ 1393 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { 1394 __reg_assign_32_into_64(reg); 1395 } else { 1396 /* Otherwise the best we can do is push lower 32bit known and 1397 * unknown bits into register (var_off set from jmp logic) 1398 * then learn as much as possible from the 64-bit tnum 1399 * known and unknown bits. The previous smin/smax bounds are 1400 * invalid here because of jmp32 compare so mark them unknown 1401 * so they do not impact tnum bounds calculation. 1402 */ 1403 __mark_reg64_unbounded(reg); 1404 __update_reg_bounds(reg); 1405 } 1406 1407 /* Intersecting with the old var_off might have improved our bounds 1408 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1409 * then new var_off is (0; 0x7f...fc) which improves our umax. 1410 */ 1411 __reg_deduce_bounds(reg); 1412 __reg_bound_offset(reg); 1413 __update_reg_bounds(reg); 1414 } 1415 1416 static bool __reg64_bound_s32(s64 a) 1417 { 1418 return a >= S32_MIN && a <= S32_MAX; 1419 } 1420 1421 static bool __reg64_bound_u32(u64 a) 1422 { 1423 return a >= U32_MIN && a <= U32_MAX; 1424 } 1425 1426 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) 1427 { 1428 __mark_reg32_unbounded(reg); 1429 1430 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { 1431 reg->s32_min_value = (s32)reg->smin_value; 1432 reg->s32_max_value = (s32)reg->smax_value; 1433 } 1434 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) { 1435 reg->u32_min_value = (u32)reg->umin_value; 1436 reg->u32_max_value = (u32)reg->umax_value; 1437 } 1438 1439 /* Intersecting with the old var_off might have improved our bounds 1440 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1441 * then new var_off is (0; 0x7f...fc) which improves our umax. 1442 */ 1443 __reg_deduce_bounds(reg); 1444 __reg_bound_offset(reg); 1445 __update_reg_bounds(reg); 1446 } 1447 1448 /* Mark a register as having a completely unknown (scalar) value. */ 1449 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1450 struct bpf_reg_state *reg) 1451 { 1452 /* 1453 * Clear type, id, off, and union(map_ptr, range) and 1454 * padding between 'type' and union 1455 */ 1456 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 1457 reg->type = SCALAR_VALUE; 1458 reg->var_off = tnum_unknown; 1459 reg->frameno = 0; 1460 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; 1461 __mark_reg_unbounded(reg); 1462 } 1463 1464 static void mark_reg_unknown(struct bpf_verifier_env *env, 1465 struct bpf_reg_state *regs, u32 regno) 1466 { 1467 if (WARN_ON(regno >= MAX_BPF_REG)) { 1468 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1469 /* Something bad happened, let's kill all regs except FP */ 1470 for (regno = 0; regno < BPF_REG_FP; regno++) 1471 __mark_reg_not_init(env, regs + regno); 1472 return; 1473 } 1474 __mark_reg_unknown(env, regs + regno); 1475 } 1476 1477 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1478 struct bpf_reg_state *reg) 1479 { 1480 __mark_reg_unknown(env, reg); 1481 reg->type = NOT_INIT; 1482 } 1483 1484 static void mark_reg_not_init(struct bpf_verifier_env *env, 1485 struct bpf_reg_state *regs, u32 regno) 1486 { 1487 if (WARN_ON(regno >= MAX_BPF_REG)) { 1488 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1489 /* Something bad happened, let's kill all regs except FP */ 1490 for (regno = 0; regno < BPF_REG_FP; regno++) 1491 __mark_reg_not_init(env, regs + regno); 1492 return; 1493 } 1494 __mark_reg_not_init(env, regs + regno); 1495 } 1496 1497 static void mark_btf_ld_reg(struct bpf_verifier_env *env, 1498 struct bpf_reg_state *regs, u32 regno, 1499 enum bpf_reg_type reg_type, 1500 struct btf *btf, u32 btf_id) 1501 { 1502 if (reg_type == SCALAR_VALUE) { 1503 mark_reg_unknown(env, regs, regno); 1504 return; 1505 } 1506 mark_reg_known_zero(env, regs, regno); 1507 regs[regno].type = PTR_TO_BTF_ID; 1508 regs[regno].btf = btf; 1509 regs[regno].btf_id = btf_id; 1510 } 1511 1512 #define DEF_NOT_SUBREG (0) 1513 static void init_reg_state(struct bpf_verifier_env *env, 1514 struct bpf_func_state *state) 1515 { 1516 struct bpf_reg_state *regs = state->regs; 1517 int i; 1518 1519 for (i = 0; i < MAX_BPF_REG; i++) { 1520 mark_reg_not_init(env, regs, i); 1521 regs[i].live = REG_LIVE_NONE; 1522 regs[i].parent = NULL; 1523 regs[i].subreg_def = DEF_NOT_SUBREG; 1524 } 1525 1526 /* frame pointer */ 1527 regs[BPF_REG_FP].type = PTR_TO_STACK; 1528 mark_reg_known_zero(env, regs, BPF_REG_FP); 1529 regs[BPF_REG_FP].frameno = state->frameno; 1530 } 1531 1532 #define BPF_MAIN_FUNC (-1) 1533 static void init_func_state(struct bpf_verifier_env *env, 1534 struct bpf_func_state *state, 1535 int callsite, int frameno, int subprogno) 1536 { 1537 state->callsite = callsite; 1538 state->frameno = frameno; 1539 state->subprogno = subprogno; 1540 init_reg_state(env, state); 1541 } 1542 1543 /* Similar to push_stack(), but for async callbacks */ 1544 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, 1545 int insn_idx, int prev_insn_idx, 1546 int subprog) 1547 { 1548 struct bpf_verifier_stack_elem *elem; 1549 struct bpf_func_state *frame; 1550 1551 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 1552 if (!elem) 1553 goto err; 1554 1555 elem->insn_idx = insn_idx; 1556 elem->prev_insn_idx = prev_insn_idx; 1557 elem->next = env->head; 1558 elem->log_pos = env->log.len_used; 1559 env->head = elem; 1560 env->stack_size++; 1561 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 1562 verbose(env, 1563 "The sequence of %d jumps is too complex for async cb.\n", 1564 env->stack_size); 1565 goto err; 1566 } 1567 /* Unlike push_stack() do not copy_verifier_state(). 1568 * The caller state doesn't matter. 1569 * This is async callback. It starts in a fresh stack. 1570 * Initialize it similar to do_check_common(). 1571 */ 1572 elem->st.branches = 1; 1573 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 1574 if (!frame) 1575 goto err; 1576 init_func_state(env, frame, 1577 BPF_MAIN_FUNC /* callsite */, 1578 0 /* frameno within this callchain */, 1579 subprog /* subprog number within this prog */); 1580 elem->st.frame[0] = frame; 1581 return &elem->st; 1582 err: 1583 free_verifier_state(env->cur_state, true); 1584 env->cur_state = NULL; 1585 /* pop all elements and return */ 1586 while (!pop_stack(env, NULL, NULL, false)); 1587 return NULL; 1588 } 1589 1590 1591 enum reg_arg_type { 1592 SRC_OP, /* register is used as source operand */ 1593 DST_OP, /* register is used as destination operand */ 1594 DST_OP_NO_MARK /* same as above, check only, don't mark */ 1595 }; 1596 1597 static int cmp_subprogs(const void *a, const void *b) 1598 { 1599 return ((struct bpf_subprog_info *)a)->start - 1600 ((struct bpf_subprog_info *)b)->start; 1601 } 1602 1603 static int find_subprog(struct bpf_verifier_env *env, int off) 1604 { 1605 struct bpf_subprog_info *p; 1606 1607 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 1608 sizeof(env->subprog_info[0]), cmp_subprogs); 1609 if (!p) 1610 return -ENOENT; 1611 return p - env->subprog_info; 1612 1613 } 1614 1615 static int add_subprog(struct bpf_verifier_env *env, int off) 1616 { 1617 int insn_cnt = env->prog->len; 1618 int ret; 1619 1620 if (off >= insn_cnt || off < 0) { 1621 verbose(env, "call to invalid destination\n"); 1622 return -EINVAL; 1623 } 1624 ret = find_subprog(env, off); 1625 if (ret >= 0) 1626 return ret; 1627 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 1628 verbose(env, "too many subprograms\n"); 1629 return -E2BIG; 1630 } 1631 /* determine subprog starts. The end is one before the next starts */ 1632 env->subprog_info[env->subprog_cnt++].start = off; 1633 sort(env->subprog_info, env->subprog_cnt, 1634 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 1635 return env->subprog_cnt - 1; 1636 } 1637 1638 #define MAX_KFUNC_DESCS 256 1639 #define MAX_KFUNC_BTFS 256 1640 1641 struct bpf_kfunc_desc { 1642 struct btf_func_model func_model; 1643 u32 func_id; 1644 s32 imm; 1645 u16 offset; 1646 }; 1647 1648 struct bpf_kfunc_btf { 1649 struct btf *btf; 1650 struct module *module; 1651 u16 offset; 1652 }; 1653 1654 struct bpf_kfunc_desc_tab { 1655 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; 1656 u32 nr_descs; 1657 }; 1658 1659 struct bpf_kfunc_btf_tab { 1660 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS]; 1661 u32 nr_descs; 1662 }; 1663 1664 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b) 1665 { 1666 const struct bpf_kfunc_desc *d0 = a; 1667 const struct bpf_kfunc_desc *d1 = b; 1668 1669 /* func_id is not greater than BTF_MAX_TYPE */ 1670 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; 1671 } 1672 1673 static int kfunc_btf_cmp_by_off(const void *a, const void *b) 1674 { 1675 const struct bpf_kfunc_btf *d0 = a; 1676 const struct bpf_kfunc_btf *d1 = b; 1677 1678 return d0->offset - d1->offset; 1679 } 1680 1681 static const struct bpf_kfunc_desc * 1682 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) 1683 { 1684 struct bpf_kfunc_desc desc = { 1685 .func_id = func_id, 1686 .offset = offset, 1687 }; 1688 struct bpf_kfunc_desc_tab *tab; 1689 1690 tab = prog->aux->kfunc_tab; 1691 return bsearch(&desc, tab->descs, tab->nr_descs, 1692 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); 1693 } 1694 1695 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, 1696 s16 offset, struct module **btf_modp) 1697 { 1698 struct bpf_kfunc_btf kf_btf = { .offset = offset }; 1699 struct bpf_kfunc_btf_tab *tab; 1700 struct bpf_kfunc_btf *b; 1701 struct module *mod; 1702 struct btf *btf; 1703 int btf_fd; 1704 1705 tab = env->prog->aux->kfunc_btf_tab; 1706 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, 1707 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); 1708 if (!b) { 1709 if (tab->nr_descs == MAX_KFUNC_BTFS) { 1710 verbose(env, "too many different module BTFs\n"); 1711 return ERR_PTR(-E2BIG); 1712 } 1713 1714 if (bpfptr_is_null(env->fd_array)) { 1715 verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); 1716 return ERR_PTR(-EPROTO); 1717 } 1718 1719 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, 1720 offset * sizeof(btf_fd), 1721 sizeof(btf_fd))) 1722 return ERR_PTR(-EFAULT); 1723 1724 btf = btf_get_by_fd(btf_fd); 1725 if (IS_ERR(btf)) { 1726 verbose(env, "invalid module BTF fd specified\n"); 1727 return btf; 1728 } 1729 1730 if (!btf_is_module(btf)) { 1731 verbose(env, "BTF fd for kfunc is not a module BTF\n"); 1732 btf_put(btf); 1733 return ERR_PTR(-EINVAL); 1734 } 1735 1736 mod = btf_try_get_module(btf); 1737 if (!mod) { 1738 btf_put(btf); 1739 return ERR_PTR(-ENXIO); 1740 } 1741 1742 b = &tab->descs[tab->nr_descs++]; 1743 b->btf = btf; 1744 b->module = mod; 1745 b->offset = offset; 1746 1747 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 1748 kfunc_btf_cmp_by_off, NULL); 1749 } 1750 if (btf_modp) 1751 *btf_modp = b->module; 1752 return b->btf; 1753 } 1754 1755 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) 1756 { 1757 if (!tab) 1758 return; 1759 1760 while (tab->nr_descs--) { 1761 module_put(tab->descs[tab->nr_descs].module); 1762 btf_put(tab->descs[tab->nr_descs].btf); 1763 } 1764 kfree(tab); 1765 } 1766 1767 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, 1768 u32 func_id, s16 offset, 1769 struct module **btf_modp) 1770 { 1771 if (offset) { 1772 if (offset < 0) { 1773 /* In the future, this can be allowed to increase limit 1774 * of fd index into fd_array, interpreted as u16. 1775 */ 1776 verbose(env, "negative offset disallowed for kernel module function call\n"); 1777 return ERR_PTR(-EINVAL); 1778 } 1779 1780 return __find_kfunc_desc_btf(env, offset, btf_modp); 1781 } 1782 return btf_vmlinux ?: ERR_PTR(-ENOENT); 1783 } 1784 1785 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) 1786 { 1787 const struct btf_type *func, *func_proto; 1788 struct bpf_kfunc_btf_tab *btf_tab; 1789 struct bpf_kfunc_desc_tab *tab; 1790 struct bpf_prog_aux *prog_aux; 1791 struct bpf_kfunc_desc *desc; 1792 const char *func_name; 1793 struct btf *desc_btf; 1794 unsigned long addr; 1795 int err; 1796 1797 prog_aux = env->prog->aux; 1798 tab = prog_aux->kfunc_tab; 1799 btf_tab = prog_aux->kfunc_btf_tab; 1800 if (!tab) { 1801 if (!btf_vmlinux) { 1802 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); 1803 return -ENOTSUPP; 1804 } 1805 1806 if (!env->prog->jit_requested) { 1807 verbose(env, "JIT is required for calling kernel function\n"); 1808 return -ENOTSUPP; 1809 } 1810 1811 if (!bpf_jit_supports_kfunc_call()) { 1812 verbose(env, "JIT does not support calling kernel function\n"); 1813 return -ENOTSUPP; 1814 } 1815 1816 if (!env->prog->gpl_compatible) { 1817 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); 1818 return -EINVAL; 1819 } 1820 1821 tab = kzalloc(sizeof(*tab), GFP_KERNEL); 1822 if (!tab) 1823 return -ENOMEM; 1824 prog_aux->kfunc_tab = tab; 1825 } 1826 1827 /* func_id == 0 is always invalid, but instead of returning an error, be 1828 * conservative and wait until the code elimination pass before returning 1829 * error, so that invalid calls that get pruned out can be in BPF programs 1830 * loaded from userspace. It is also required that offset be untouched 1831 * for such calls. 1832 */ 1833 if (!func_id && !offset) 1834 return 0; 1835 1836 if (!btf_tab && offset) { 1837 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); 1838 if (!btf_tab) 1839 return -ENOMEM; 1840 prog_aux->kfunc_btf_tab = btf_tab; 1841 } 1842 1843 desc_btf = find_kfunc_desc_btf(env, func_id, offset, NULL); 1844 if (IS_ERR(desc_btf)) { 1845 verbose(env, "failed to find BTF for kernel function\n"); 1846 return PTR_ERR(desc_btf); 1847 } 1848 1849 if (find_kfunc_desc(env->prog, func_id, offset)) 1850 return 0; 1851 1852 if (tab->nr_descs == MAX_KFUNC_DESCS) { 1853 verbose(env, "too many different kernel function calls\n"); 1854 return -E2BIG; 1855 } 1856 1857 func = btf_type_by_id(desc_btf, func_id); 1858 if (!func || !btf_type_is_func(func)) { 1859 verbose(env, "kernel btf_id %u is not a function\n", 1860 func_id); 1861 return -EINVAL; 1862 } 1863 func_proto = btf_type_by_id(desc_btf, func->type); 1864 if (!func_proto || !btf_type_is_func_proto(func_proto)) { 1865 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", 1866 func_id); 1867 return -EINVAL; 1868 } 1869 1870 func_name = btf_name_by_offset(desc_btf, func->name_off); 1871 addr = kallsyms_lookup_name(func_name); 1872 if (!addr) { 1873 verbose(env, "cannot find address for kernel function %s\n", 1874 func_name); 1875 return -EINVAL; 1876 } 1877 1878 desc = &tab->descs[tab->nr_descs++]; 1879 desc->func_id = func_id; 1880 desc->imm = BPF_CALL_IMM(addr); 1881 desc->offset = offset; 1882 err = btf_distill_func_proto(&env->log, desc_btf, 1883 func_proto, func_name, 1884 &desc->func_model); 1885 if (!err) 1886 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 1887 kfunc_desc_cmp_by_id_off, NULL); 1888 return err; 1889 } 1890 1891 static int kfunc_desc_cmp_by_imm(const void *a, const void *b) 1892 { 1893 const struct bpf_kfunc_desc *d0 = a; 1894 const struct bpf_kfunc_desc *d1 = b; 1895 1896 if (d0->imm > d1->imm) 1897 return 1; 1898 else if (d0->imm < d1->imm) 1899 return -1; 1900 return 0; 1901 } 1902 1903 static void sort_kfunc_descs_by_imm(struct bpf_prog *prog) 1904 { 1905 struct bpf_kfunc_desc_tab *tab; 1906 1907 tab = prog->aux->kfunc_tab; 1908 if (!tab) 1909 return; 1910 1911 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 1912 kfunc_desc_cmp_by_imm, NULL); 1913 } 1914 1915 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 1916 { 1917 return !!prog->aux->kfunc_tab; 1918 } 1919 1920 const struct btf_func_model * 1921 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 1922 const struct bpf_insn *insn) 1923 { 1924 const struct bpf_kfunc_desc desc = { 1925 .imm = insn->imm, 1926 }; 1927 const struct bpf_kfunc_desc *res; 1928 struct bpf_kfunc_desc_tab *tab; 1929 1930 tab = prog->aux->kfunc_tab; 1931 res = bsearch(&desc, tab->descs, tab->nr_descs, 1932 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm); 1933 1934 return res ? &res->func_model : NULL; 1935 } 1936 1937 static int add_subprog_and_kfunc(struct bpf_verifier_env *env) 1938 { 1939 struct bpf_subprog_info *subprog = env->subprog_info; 1940 struct bpf_insn *insn = env->prog->insnsi; 1941 int i, ret, insn_cnt = env->prog->len; 1942 1943 /* Add entry function. */ 1944 ret = add_subprog(env, 0); 1945 if (ret) 1946 return ret; 1947 1948 for (i = 0; i < insn_cnt; i++, insn++) { 1949 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) && 1950 !bpf_pseudo_kfunc_call(insn)) 1951 continue; 1952 1953 if (!env->bpf_capable) { 1954 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); 1955 return -EPERM; 1956 } 1957 1958 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) 1959 ret = add_subprog(env, i + insn->imm + 1); 1960 else 1961 ret = add_kfunc_call(env, insn->imm, insn->off); 1962 1963 if (ret < 0) 1964 return ret; 1965 } 1966 1967 /* Add a fake 'exit' subprog which could simplify subprog iteration 1968 * logic. 'subprog_cnt' should not be increased. 1969 */ 1970 subprog[env->subprog_cnt].start = insn_cnt; 1971 1972 if (env->log.level & BPF_LOG_LEVEL2) 1973 for (i = 0; i < env->subprog_cnt; i++) 1974 verbose(env, "func#%d @%d\n", i, subprog[i].start); 1975 1976 return 0; 1977 } 1978 1979 static int check_subprogs(struct bpf_verifier_env *env) 1980 { 1981 int i, subprog_start, subprog_end, off, cur_subprog = 0; 1982 struct bpf_subprog_info *subprog = env->subprog_info; 1983 struct bpf_insn *insn = env->prog->insnsi; 1984 int insn_cnt = env->prog->len; 1985 1986 /* now check that all jumps are within the same subprog */ 1987 subprog_start = subprog[cur_subprog].start; 1988 subprog_end = subprog[cur_subprog + 1].start; 1989 for (i = 0; i < insn_cnt; i++) { 1990 u8 code = insn[i].code; 1991 1992 if (code == (BPF_JMP | BPF_CALL) && 1993 insn[i].imm == BPF_FUNC_tail_call && 1994 insn[i].src_reg != BPF_PSEUDO_CALL) 1995 subprog[cur_subprog].has_tail_call = true; 1996 if (BPF_CLASS(code) == BPF_LD && 1997 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) 1998 subprog[cur_subprog].has_ld_abs = true; 1999 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 2000 goto next; 2001 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 2002 goto next; 2003 off = i + insn[i].off + 1; 2004 if (off < subprog_start || off >= subprog_end) { 2005 verbose(env, "jump out of range from insn %d to %d\n", i, off); 2006 return -EINVAL; 2007 } 2008 next: 2009 if (i == subprog_end - 1) { 2010 /* to avoid fall-through from one subprog into another 2011 * the last insn of the subprog should be either exit 2012 * or unconditional jump back 2013 */ 2014 if (code != (BPF_JMP | BPF_EXIT) && 2015 code != (BPF_JMP | BPF_JA)) { 2016 verbose(env, "last insn is not an exit or jmp\n"); 2017 return -EINVAL; 2018 } 2019 subprog_start = subprog_end; 2020 cur_subprog++; 2021 if (cur_subprog < env->subprog_cnt) 2022 subprog_end = subprog[cur_subprog + 1].start; 2023 } 2024 } 2025 return 0; 2026 } 2027 2028 /* Parentage chain of this register (or stack slot) should take care of all 2029 * issues like callee-saved registers, stack slot allocation time, etc. 2030 */ 2031 static int mark_reg_read(struct bpf_verifier_env *env, 2032 const struct bpf_reg_state *state, 2033 struct bpf_reg_state *parent, u8 flag) 2034 { 2035 bool writes = parent == state->parent; /* Observe write marks */ 2036 int cnt = 0; 2037 2038 while (parent) { 2039 /* if read wasn't screened by an earlier write ... */ 2040 if (writes && state->live & REG_LIVE_WRITTEN) 2041 break; 2042 if (parent->live & REG_LIVE_DONE) { 2043 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 2044 reg_type_str[parent->type], 2045 parent->var_off.value, parent->off); 2046 return -EFAULT; 2047 } 2048 /* The first condition is more likely to be true than the 2049 * second, checked it first. 2050 */ 2051 if ((parent->live & REG_LIVE_READ) == flag || 2052 parent->live & REG_LIVE_READ64) 2053 /* The parentage chain never changes and 2054 * this parent was already marked as LIVE_READ. 2055 * There is no need to keep walking the chain again and 2056 * keep re-marking all parents as LIVE_READ. 2057 * This case happens when the same register is read 2058 * multiple times without writes into it in-between. 2059 * Also, if parent has the stronger REG_LIVE_READ64 set, 2060 * then no need to set the weak REG_LIVE_READ32. 2061 */ 2062 break; 2063 /* ... then we depend on parent's value */ 2064 parent->live |= flag; 2065 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 2066 if (flag == REG_LIVE_READ64) 2067 parent->live &= ~REG_LIVE_READ32; 2068 state = parent; 2069 parent = state->parent; 2070 writes = true; 2071 cnt++; 2072 } 2073 2074 if (env->longest_mark_read_walk < cnt) 2075 env->longest_mark_read_walk = cnt; 2076 return 0; 2077 } 2078 2079 /* This function is supposed to be used by the following 32-bit optimization 2080 * code only. It returns TRUE if the source or destination register operates 2081 * on 64-bit, otherwise return FALSE. 2082 */ 2083 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 2084 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 2085 { 2086 u8 code, class, op; 2087 2088 code = insn->code; 2089 class = BPF_CLASS(code); 2090 op = BPF_OP(code); 2091 if (class == BPF_JMP) { 2092 /* BPF_EXIT for "main" will reach here. Return TRUE 2093 * conservatively. 2094 */ 2095 if (op == BPF_EXIT) 2096 return true; 2097 if (op == BPF_CALL) { 2098 /* BPF to BPF call will reach here because of marking 2099 * caller saved clobber with DST_OP_NO_MARK for which we 2100 * don't care the register def because they are anyway 2101 * marked as NOT_INIT already. 2102 */ 2103 if (insn->src_reg == BPF_PSEUDO_CALL) 2104 return false; 2105 /* Helper call will reach here because of arg type 2106 * check, conservatively return TRUE. 2107 */ 2108 if (t == SRC_OP) 2109 return true; 2110 2111 return false; 2112 } 2113 } 2114 2115 if (class == BPF_ALU64 || class == BPF_JMP || 2116 /* BPF_END always use BPF_ALU class. */ 2117 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 2118 return true; 2119 2120 if (class == BPF_ALU || class == BPF_JMP32) 2121 return false; 2122 2123 if (class == BPF_LDX) { 2124 if (t != SRC_OP) 2125 return BPF_SIZE(code) == BPF_DW; 2126 /* LDX source must be ptr. */ 2127 return true; 2128 } 2129 2130 if (class == BPF_STX) { 2131 /* BPF_STX (including atomic variants) has multiple source 2132 * operands, one of which is a ptr. Check whether the caller is 2133 * asking about it. 2134 */ 2135 if (t == SRC_OP && reg->type != SCALAR_VALUE) 2136 return true; 2137 return BPF_SIZE(code) == BPF_DW; 2138 } 2139 2140 if (class == BPF_LD) { 2141 u8 mode = BPF_MODE(code); 2142 2143 /* LD_IMM64 */ 2144 if (mode == BPF_IMM) 2145 return true; 2146 2147 /* Both LD_IND and LD_ABS return 32-bit data. */ 2148 if (t != SRC_OP) 2149 return false; 2150 2151 /* Implicit ctx ptr. */ 2152 if (regno == BPF_REG_6) 2153 return true; 2154 2155 /* Explicit source could be any width. */ 2156 return true; 2157 } 2158 2159 if (class == BPF_ST) 2160 /* The only source register for BPF_ST is a ptr. */ 2161 return true; 2162 2163 /* Conservatively return true at default. */ 2164 return true; 2165 } 2166 2167 /* Return the regno defined by the insn, or -1. */ 2168 static int insn_def_regno(const struct bpf_insn *insn) 2169 { 2170 switch (BPF_CLASS(insn->code)) { 2171 case BPF_JMP: 2172 case BPF_JMP32: 2173 case BPF_ST: 2174 return -1; 2175 case BPF_STX: 2176 if (BPF_MODE(insn->code) == BPF_ATOMIC && 2177 (insn->imm & BPF_FETCH)) { 2178 if (insn->imm == BPF_CMPXCHG) 2179 return BPF_REG_0; 2180 else 2181 return insn->src_reg; 2182 } else { 2183 return -1; 2184 } 2185 default: 2186 return insn->dst_reg; 2187 } 2188 } 2189 2190 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 2191 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 2192 { 2193 int dst_reg = insn_def_regno(insn); 2194 2195 if (dst_reg == -1) 2196 return false; 2197 2198 return !is_reg64(env, insn, dst_reg, NULL, DST_OP); 2199 } 2200 2201 static void mark_insn_zext(struct bpf_verifier_env *env, 2202 struct bpf_reg_state *reg) 2203 { 2204 s32 def_idx = reg->subreg_def; 2205 2206 if (def_idx == DEF_NOT_SUBREG) 2207 return; 2208 2209 env->insn_aux_data[def_idx - 1].zext_dst = true; 2210 /* The dst will be zero extended, so won't be sub-register anymore. */ 2211 reg->subreg_def = DEF_NOT_SUBREG; 2212 } 2213 2214 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 2215 enum reg_arg_type t) 2216 { 2217 struct bpf_verifier_state *vstate = env->cur_state; 2218 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2219 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 2220 struct bpf_reg_state *reg, *regs = state->regs; 2221 bool rw64; 2222 2223 if (regno >= MAX_BPF_REG) { 2224 verbose(env, "R%d is invalid\n", regno); 2225 return -EINVAL; 2226 } 2227 2228 reg = ®s[regno]; 2229 rw64 = is_reg64(env, insn, regno, reg, t); 2230 if (t == SRC_OP) { 2231 /* check whether register used as source operand can be read */ 2232 if (reg->type == NOT_INIT) { 2233 verbose(env, "R%d !read_ok\n", regno); 2234 return -EACCES; 2235 } 2236 /* We don't need to worry about FP liveness because it's read-only */ 2237 if (regno == BPF_REG_FP) 2238 return 0; 2239 2240 if (rw64) 2241 mark_insn_zext(env, reg); 2242 2243 return mark_reg_read(env, reg, reg->parent, 2244 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 2245 } else { 2246 /* check whether register used as dest operand can be written to */ 2247 if (regno == BPF_REG_FP) { 2248 verbose(env, "frame pointer is read only\n"); 2249 return -EACCES; 2250 } 2251 reg->live |= REG_LIVE_WRITTEN; 2252 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 2253 if (t == DST_OP) 2254 mark_reg_unknown(env, regs, regno); 2255 } 2256 return 0; 2257 } 2258 2259 /* for any branch, call, exit record the history of jmps in the given state */ 2260 static int push_jmp_history(struct bpf_verifier_env *env, 2261 struct bpf_verifier_state *cur) 2262 { 2263 u32 cnt = cur->jmp_history_cnt; 2264 struct bpf_idx_pair *p; 2265 2266 cnt++; 2267 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); 2268 if (!p) 2269 return -ENOMEM; 2270 p[cnt - 1].idx = env->insn_idx; 2271 p[cnt - 1].prev_idx = env->prev_insn_idx; 2272 cur->jmp_history = p; 2273 cur->jmp_history_cnt = cnt; 2274 return 0; 2275 } 2276 2277 /* Backtrack one insn at a time. If idx is not at the top of recorded 2278 * history then previous instruction came from straight line execution. 2279 */ 2280 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 2281 u32 *history) 2282 { 2283 u32 cnt = *history; 2284 2285 if (cnt && st->jmp_history[cnt - 1].idx == i) { 2286 i = st->jmp_history[cnt - 1].prev_idx; 2287 (*history)--; 2288 } else { 2289 i--; 2290 } 2291 return i; 2292 } 2293 2294 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) 2295 { 2296 const struct btf_type *func; 2297 struct btf *desc_btf; 2298 2299 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) 2300 return NULL; 2301 2302 desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off, NULL); 2303 if (IS_ERR(desc_btf)) 2304 return "<error>"; 2305 2306 func = btf_type_by_id(desc_btf, insn->imm); 2307 return btf_name_by_offset(desc_btf, func->name_off); 2308 } 2309 2310 /* For given verifier state backtrack_insn() is called from the last insn to 2311 * the first insn. Its purpose is to compute a bitmask of registers and 2312 * stack slots that needs precision in the parent verifier state. 2313 */ 2314 static int backtrack_insn(struct bpf_verifier_env *env, int idx, 2315 u32 *reg_mask, u64 *stack_mask) 2316 { 2317 const struct bpf_insn_cbs cbs = { 2318 .cb_call = disasm_kfunc_name, 2319 .cb_print = verbose, 2320 .private_data = env, 2321 }; 2322 struct bpf_insn *insn = env->prog->insnsi + idx; 2323 u8 class = BPF_CLASS(insn->code); 2324 u8 opcode = BPF_OP(insn->code); 2325 u8 mode = BPF_MODE(insn->code); 2326 u32 dreg = 1u << insn->dst_reg; 2327 u32 sreg = 1u << insn->src_reg; 2328 u32 spi; 2329 2330 if (insn->code == 0) 2331 return 0; 2332 if (env->log.level & BPF_LOG_LEVEL) { 2333 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); 2334 verbose(env, "%d: ", idx); 2335 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 2336 } 2337 2338 if (class == BPF_ALU || class == BPF_ALU64) { 2339 if (!(*reg_mask & dreg)) 2340 return 0; 2341 if (opcode == BPF_MOV) { 2342 if (BPF_SRC(insn->code) == BPF_X) { 2343 /* dreg = sreg 2344 * dreg needs precision after this insn 2345 * sreg needs precision before this insn 2346 */ 2347 *reg_mask &= ~dreg; 2348 *reg_mask |= sreg; 2349 } else { 2350 /* dreg = K 2351 * dreg needs precision after this insn. 2352 * Corresponding register is already marked 2353 * as precise=true in this verifier state. 2354 * No further markings in parent are necessary 2355 */ 2356 *reg_mask &= ~dreg; 2357 } 2358 } else { 2359 if (BPF_SRC(insn->code) == BPF_X) { 2360 /* dreg += sreg 2361 * both dreg and sreg need precision 2362 * before this insn 2363 */ 2364 *reg_mask |= sreg; 2365 } /* else dreg += K 2366 * dreg still needs precision before this insn 2367 */ 2368 } 2369 } else if (class == BPF_LDX) { 2370 if (!(*reg_mask & dreg)) 2371 return 0; 2372 *reg_mask &= ~dreg; 2373 2374 /* scalars can only be spilled into stack w/o losing precision. 2375 * Load from any other memory can be zero extended. 2376 * The desire to keep that precision is already indicated 2377 * by 'precise' mark in corresponding register of this state. 2378 * No further tracking necessary. 2379 */ 2380 if (insn->src_reg != BPF_REG_FP) 2381 return 0; 2382 if (BPF_SIZE(insn->code) != BPF_DW) 2383 return 0; 2384 2385 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 2386 * that [fp - off] slot contains scalar that needs to be 2387 * tracked with precision 2388 */ 2389 spi = (-insn->off - 1) / BPF_REG_SIZE; 2390 if (spi >= 64) { 2391 verbose(env, "BUG spi %d\n", spi); 2392 WARN_ONCE(1, "verifier backtracking bug"); 2393 return -EFAULT; 2394 } 2395 *stack_mask |= 1ull << spi; 2396 } else if (class == BPF_STX || class == BPF_ST) { 2397 if (*reg_mask & dreg) 2398 /* stx & st shouldn't be using _scalar_ dst_reg 2399 * to access memory. It means backtracking 2400 * encountered a case of pointer subtraction. 2401 */ 2402 return -ENOTSUPP; 2403 /* scalars can only be spilled into stack */ 2404 if (insn->dst_reg != BPF_REG_FP) 2405 return 0; 2406 if (BPF_SIZE(insn->code) != BPF_DW) 2407 return 0; 2408 spi = (-insn->off - 1) / BPF_REG_SIZE; 2409 if (spi >= 64) { 2410 verbose(env, "BUG spi %d\n", spi); 2411 WARN_ONCE(1, "verifier backtracking bug"); 2412 return -EFAULT; 2413 } 2414 if (!(*stack_mask & (1ull << spi))) 2415 return 0; 2416 *stack_mask &= ~(1ull << spi); 2417 if (class == BPF_STX) 2418 *reg_mask |= sreg; 2419 } else if (class == BPF_JMP || class == BPF_JMP32) { 2420 if (opcode == BPF_CALL) { 2421 if (insn->src_reg == BPF_PSEUDO_CALL) 2422 return -ENOTSUPP; 2423 /* regular helper call sets R0 */ 2424 *reg_mask &= ~1; 2425 if (*reg_mask & 0x3f) { 2426 /* if backtracing was looking for registers R1-R5 2427 * they should have been found already. 2428 */ 2429 verbose(env, "BUG regs %x\n", *reg_mask); 2430 WARN_ONCE(1, "verifier backtracking bug"); 2431 return -EFAULT; 2432 } 2433 } else if (opcode == BPF_EXIT) { 2434 return -ENOTSUPP; 2435 } 2436 } else if (class == BPF_LD) { 2437 if (!(*reg_mask & dreg)) 2438 return 0; 2439 *reg_mask &= ~dreg; 2440 /* It's ld_imm64 or ld_abs or ld_ind. 2441 * For ld_imm64 no further tracking of precision 2442 * into parent is necessary 2443 */ 2444 if (mode == BPF_IND || mode == BPF_ABS) 2445 /* to be analyzed */ 2446 return -ENOTSUPP; 2447 } 2448 return 0; 2449 } 2450 2451 /* the scalar precision tracking algorithm: 2452 * . at the start all registers have precise=false. 2453 * . scalar ranges are tracked as normal through alu and jmp insns. 2454 * . once precise value of the scalar register is used in: 2455 * . ptr + scalar alu 2456 * . if (scalar cond K|scalar) 2457 * . helper_call(.., scalar, ...) where ARG_CONST is expected 2458 * backtrack through the verifier states and mark all registers and 2459 * stack slots with spilled constants that these scalar regisers 2460 * should be precise. 2461 * . during state pruning two registers (or spilled stack slots) 2462 * are equivalent if both are not precise. 2463 * 2464 * Note the verifier cannot simply walk register parentage chain, 2465 * since many different registers and stack slots could have been 2466 * used to compute single precise scalar. 2467 * 2468 * The approach of starting with precise=true for all registers and then 2469 * backtrack to mark a register as not precise when the verifier detects 2470 * that program doesn't care about specific value (e.g., when helper 2471 * takes register as ARG_ANYTHING parameter) is not safe. 2472 * 2473 * It's ok to walk single parentage chain of the verifier states. 2474 * It's possible that this backtracking will go all the way till 1st insn. 2475 * All other branches will be explored for needing precision later. 2476 * 2477 * The backtracking needs to deal with cases like: 2478 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 2479 * r9 -= r8 2480 * r5 = r9 2481 * if r5 > 0x79f goto pc+7 2482 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 2483 * r5 += 1 2484 * ... 2485 * call bpf_perf_event_output#25 2486 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 2487 * 2488 * and this case: 2489 * r6 = 1 2490 * call foo // uses callee's r6 inside to compute r0 2491 * r0 += r6 2492 * if r0 == 0 goto 2493 * 2494 * to track above reg_mask/stack_mask needs to be independent for each frame. 2495 * 2496 * Also if parent's curframe > frame where backtracking started, 2497 * the verifier need to mark registers in both frames, otherwise callees 2498 * may incorrectly prune callers. This is similar to 2499 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 2500 * 2501 * For now backtracking falls back into conservative marking. 2502 */ 2503 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 2504 struct bpf_verifier_state *st) 2505 { 2506 struct bpf_func_state *func; 2507 struct bpf_reg_state *reg; 2508 int i, j; 2509 2510 /* big hammer: mark all scalars precise in this path. 2511 * pop_stack may still get !precise scalars. 2512 */ 2513 for (; st; st = st->parent) 2514 for (i = 0; i <= st->curframe; i++) { 2515 func = st->frame[i]; 2516 for (j = 0; j < BPF_REG_FP; j++) { 2517 reg = &func->regs[j]; 2518 if (reg->type != SCALAR_VALUE) 2519 continue; 2520 reg->precise = true; 2521 } 2522 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 2523 if (!is_spilled_reg(&func->stack[j])) 2524 continue; 2525 reg = &func->stack[j].spilled_ptr; 2526 if (reg->type != SCALAR_VALUE) 2527 continue; 2528 reg->precise = true; 2529 } 2530 } 2531 } 2532 2533 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, 2534 int spi) 2535 { 2536 struct bpf_verifier_state *st = env->cur_state; 2537 int first_idx = st->first_insn_idx; 2538 int last_idx = env->insn_idx; 2539 struct bpf_func_state *func; 2540 struct bpf_reg_state *reg; 2541 u32 reg_mask = regno >= 0 ? 1u << regno : 0; 2542 u64 stack_mask = spi >= 0 ? 1ull << spi : 0; 2543 bool skip_first = true; 2544 bool new_marks = false; 2545 int i, err; 2546 2547 if (!env->bpf_capable) 2548 return 0; 2549 2550 func = st->frame[st->curframe]; 2551 if (regno >= 0) { 2552 reg = &func->regs[regno]; 2553 if (reg->type != SCALAR_VALUE) { 2554 WARN_ONCE(1, "backtracing misuse"); 2555 return -EFAULT; 2556 } 2557 if (!reg->precise) 2558 new_marks = true; 2559 else 2560 reg_mask = 0; 2561 reg->precise = true; 2562 } 2563 2564 while (spi >= 0) { 2565 if (!is_spilled_reg(&func->stack[spi])) { 2566 stack_mask = 0; 2567 break; 2568 } 2569 reg = &func->stack[spi].spilled_ptr; 2570 if (reg->type != SCALAR_VALUE) { 2571 stack_mask = 0; 2572 break; 2573 } 2574 if (!reg->precise) 2575 new_marks = true; 2576 else 2577 stack_mask = 0; 2578 reg->precise = true; 2579 break; 2580 } 2581 2582 if (!new_marks) 2583 return 0; 2584 if (!reg_mask && !stack_mask) 2585 return 0; 2586 for (;;) { 2587 DECLARE_BITMAP(mask, 64); 2588 u32 history = st->jmp_history_cnt; 2589 2590 if (env->log.level & BPF_LOG_LEVEL) 2591 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); 2592 for (i = last_idx;;) { 2593 if (skip_first) { 2594 err = 0; 2595 skip_first = false; 2596 } else { 2597 err = backtrack_insn(env, i, ®_mask, &stack_mask); 2598 } 2599 if (err == -ENOTSUPP) { 2600 mark_all_scalars_precise(env, st); 2601 return 0; 2602 } else if (err) { 2603 return err; 2604 } 2605 if (!reg_mask && !stack_mask) 2606 /* Found assignment(s) into tracked register in this state. 2607 * Since this state is already marked, just return. 2608 * Nothing to be tracked further in the parent state. 2609 */ 2610 return 0; 2611 if (i == first_idx) 2612 break; 2613 i = get_prev_insn_idx(st, i, &history); 2614 if (i >= env->prog->len) { 2615 /* This can happen if backtracking reached insn 0 2616 * and there are still reg_mask or stack_mask 2617 * to backtrack. 2618 * It means the backtracking missed the spot where 2619 * particular register was initialized with a constant. 2620 */ 2621 verbose(env, "BUG backtracking idx %d\n", i); 2622 WARN_ONCE(1, "verifier backtracking bug"); 2623 return -EFAULT; 2624 } 2625 } 2626 st = st->parent; 2627 if (!st) 2628 break; 2629 2630 new_marks = false; 2631 func = st->frame[st->curframe]; 2632 bitmap_from_u64(mask, reg_mask); 2633 for_each_set_bit(i, mask, 32) { 2634 reg = &func->regs[i]; 2635 if (reg->type != SCALAR_VALUE) { 2636 reg_mask &= ~(1u << i); 2637 continue; 2638 } 2639 if (!reg->precise) 2640 new_marks = true; 2641 reg->precise = true; 2642 } 2643 2644 bitmap_from_u64(mask, stack_mask); 2645 for_each_set_bit(i, mask, 64) { 2646 if (i >= func->allocated_stack / BPF_REG_SIZE) { 2647 /* the sequence of instructions: 2648 * 2: (bf) r3 = r10 2649 * 3: (7b) *(u64 *)(r3 -8) = r0 2650 * 4: (79) r4 = *(u64 *)(r10 -8) 2651 * doesn't contain jmps. It's backtracked 2652 * as a single block. 2653 * During backtracking insn 3 is not recognized as 2654 * stack access, so at the end of backtracking 2655 * stack slot fp-8 is still marked in stack_mask. 2656 * However the parent state may not have accessed 2657 * fp-8 and it's "unallocated" stack space. 2658 * In such case fallback to conservative. 2659 */ 2660 mark_all_scalars_precise(env, st); 2661 return 0; 2662 } 2663 2664 if (!is_spilled_reg(&func->stack[i])) { 2665 stack_mask &= ~(1ull << i); 2666 continue; 2667 } 2668 reg = &func->stack[i].spilled_ptr; 2669 if (reg->type != SCALAR_VALUE) { 2670 stack_mask &= ~(1ull << i); 2671 continue; 2672 } 2673 if (!reg->precise) 2674 new_marks = true; 2675 reg->precise = true; 2676 } 2677 if (env->log.level & BPF_LOG_LEVEL) { 2678 print_verifier_state(env, func); 2679 verbose(env, "parent %s regs=%x stack=%llx marks\n", 2680 new_marks ? "didn't have" : "already had", 2681 reg_mask, stack_mask); 2682 } 2683 2684 if (!reg_mask && !stack_mask) 2685 break; 2686 if (!new_marks) 2687 break; 2688 2689 last_idx = st->last_insn_idx; 2690 first_idx = st->first_insn_idx; 2691 } 2692 return 0; 2693 } 2694 2695 static int mark_chain_precision(struct bpf_verifier_env *env, int regno) 2696 { 2697 return __mark_chain_precision(env, regno, -1); 2698 } 2699 2700 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) 2701 { 2702 return __mark_chain_precision(env, -1, spi); 2703 } 2704 2705 static bool is_spillable_regtype(enum bpf_reg_type type) 2706 { 2707 switch (type) { 2708 case PTR_TO_MAP_VALUE: 2709 case PTR_TO_MAP_VALUE_OR_NULL: 2710 case PTR_TO_STACK: 2711 case PTR_TO_CTX: 2712 case PTR_TO_PACKET: 2713 case PTR_TO_PACKET_META: 2714 case PTR_TO_PACKET_END: 2715 case PTR_TO_FLOW_KEYS: 2716 case CONST_PTR_TO_MAP: 2717 case PTR_TO_SOCKET: 2718 case PTR_TO_SOCKET_OR_NULL: 2719 case PTR_TO_SOCK_COMMON: 2720 case PTR_TO_SOCK_COMMON_OR_NULL: 2721 case PTR_TO_TCP_SOCK: 2722 case PTR_TO_TCP_SOCK_OR_NULL: 2723 case PTR_TO_XDP_SOCK: 2724 case PTR_TO_BTF_ID: 2725 case PTR_TO_BTF_ID_OR_NULL: 2726 case PTR_TO_RDONLY_BUF: 2727 case PTR_TO_RDONLY_BUF_OR_NULL: 2728 case PTR_TO_RDWR_BUF: 2729 case PTR_TO_RDWR_BUF_OR_NULL: 2730 case PTR_TO_PERCPU_BTF_ID: 2731 case PTR_TO_MEM: 2732 case PTR_TO_MEM_OR_NULL: 2733 case PTR_TO_FUNC: 2734 case PTR_TO_MAP_KEY: 2735 return true; 2736 default: 2737 return false; 2738 } 2739 } 2740 2741 /* Does this register contain a constant zero? */ 2742 static bool register_is_null(struct bpf_reg_state *reg) 2743 { 2744 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 2745 } 2746 2747 static bool register_is_const(struct bpf_reg_state *reg) 2748 { 2749 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 2750 } 2751 2752 static bool __is_scalar_unbounded(struct bpf_reg_state *reg) 2753 { 2754 return tnum_is_unknown(reg->var_off) && 2755 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && 2756 reg->umin_value == 0 && reg->umax_value == U64_MAX && 2757 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && 2758 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; 2759 } 2760 2761 static bool register_is_bounded(struct bpf_reg_state *reg) 2762 { 2763 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); 2764 } 2765 2766 static bool __is_pointer_value(bool allow_ptr_leaks, 2767 const struct bpf_reg_state *reg) 2768 { 2769 if (allow_ptr_leaks) 2770 return false; 2771 2772 return reg->type != SCALAR_VALUE; 2773 } 2774 2775 static void save_register_state(struct bpf_func_state *state, 2776 int spi, struct bpf_reg_state *reg, 2777 int size) 2778 { 2779 int i; 2780 2781 state->stack[spi].spilled_ptr = *reg; 2782 if (size == BPF_REG_SIZE) 2783 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2784 2785 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) 2786 state->stack[spi].slot_type[i - 1] = STACK_SPILL; 2787 2788 /* size < 8 bytes spill */ 2789 for (; i; i--) 2790 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); 2791 } 2792 2793 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, 2794 * stack boundary and alignment are checked in check_mem_access() 2795 */ 2796 static int check_stack_write_fixed_off(struct bpf_verifier_env *env, 2797 /* stack frame we're writing to */ 2798 struct bpf_func_state *state, 2799 int off, int size, int value_regno, 2800 int insn_idx) 2801 { 2802 struct bpf_func_state *cur; /* state of the current function */ 2803 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 2804 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; 2805 struct bpf_reg_state *reg = NULL; 2806 2807 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); 2808 if (err) 2809 return err; 2810 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 2811 * so it's aligned access and [off, off + size) are within stack limits 2812 */ 2813 if (!env->allow_ptr_leaks && 2814 state->stack[spi].slot_type[0] == STACK_SPILL && 2815 size != BPF_REG_SIZE) { 2816 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 2817 return -EACCES; 2818 } 2819 2820 cur = env->cur_state->frame[env->cur_state->curframe]; 2821 if (value_regno >= 0) 2822 reg = &cur->regs[value_regno]; 2823 if (!env->bypass_spec_v4) { 2824 bool sanitize = reg && is_spillable_regtype(reg->type); 2825 2826 for (i = 0; i < size; i++) { 2827 if (state->stack[spi].slot_type[i] == STACK_INVALID) { 2828 sanitize = true; 2829 break; 2830 } 2831 } 2832 2833 if (sanitize) 2834 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; 2835 } 2836 2837 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && 2838 !register_is_null(reg) && env->bpf_capable) { 2839 if (dst_reg != BPF_REG_FP) { 2840 /* The backtracking logic can only recognize explicit 2841 * stack slot address like [fp - 8]. Other spill of 2842 * scalar via different register has to be conservative. 2843 * Backtrack from here and mark all registers as precise 2844 * that contributed into 'reg' being a constant. 2845 */ 2846 err = mark_chain_precision(env, value_regno); 2847 if (err) 2848 return err; 2849 } 2850 save_register_state(state, spi, reg, size); 2851 } else if (reg && is_spillable_regtype(reg->type)) { 2852 /* register containing pointer is being spilled into stack */ 2853 if (size != BPF_REG_SIZE) { 2854 verbose_linfo(env, insn_idx, "; "); 2855 verbose(env, "invalid size of register spill\n"); 2856 return -EACCES; 2857 } 2858 if (state != cur && reg->type == PTR_TO_STACK) { 2859 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 2860 return -EINVAL; 2861 } 2862 save_register_state(state, spi, reg, size); 2863 } else { 2864 u8 type = STACK_MISC; 2865 2866 /* regular write of data into stack destroys any spilled ptr */ 2867 state->stack[spi].spilled_ptr.type = NOT_INIT; 2868 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ 2869 if (is_spilled_reg(&state->stack[spi])) 2870 for (i = 0; i < BPF_REG_SIZE; i++) 2871 scrub_spilled_slot(&state->stack[spi].slot_type[i]); 2872 2873 /* only mark the slot as written if all 8 bytes were written 2874 * otherwise read propagation may incorrectly stop too soon 2875 * when stack slots are partially written. 2876 * This heuristic means that read propagation will be 2877 * conservative, since it will add reg_live_read marks 2878 * to stack slots all the way to first state when programs 2879 * writes+reads less than 8 bytes 2880 */ 2881 if (size == BPF_REG_SIZE) 2882 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2883 2884 /* when we zero initialize stack slots mark them as such */ 2885 if (reg && register_is_null(reg)) { 2886 /* backtracking doesn't work for STACK_ZERO yet. */ 2887 err = mark_chain_precision(env, value_regno); 2888 if (err) 2889 return err; 2890 type = STACK_ZERO; 2891 } 2892 2893 /* Mark slots affected by this stack write. */ 2894 for (i = 0; i < size; i++) 2895 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 2896 type; 2897 } 2898 return 0; 2899 } 2900 2901 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is 2902 * known to contain a variable offset. 2903 * This function checks whether the write is permitted and conservatively 2904 * tracks the effects of the write, considering that each stack slot in the 2905 * dynamic range is potentially written to. 2906 * 2907 * 'off' includes 'regno->off'. 2908 * 'value_regno' can be -1, meaning that an unknown value is being written to 2909 * the stack. 2910 * 2911 * Spilled pointers in range are not marked as written because we don't know 2912 * what's going to be actually written. This means that read propagation for 2913 * future reads cannot be terminated by this write. 2914 * 2915 * For privileged programs, uninitialized stack slots are considered 2916 * initialized by this write (even though we don't know exactly what offsets 2917 * are going to be written to). The idea is that we don't want the verifier to 2918 * reject future reads that access slots written to through variable offsets. 2919 */ 2920 static int check_stack_write_var_off(struct bpf_verifier_env *env, 2921 /* func where register points to */ 2922 struct bpf_func_state *state, 2923 int ptr_regno, int off, int size, 2924 int value_regno, int insn_idx) 2925 { 2926 struct bpf_func_state *cur; /* state of the current function */ 2927 int min_off, max_off; 2928 int i, err; 2929 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; 2930 bool writing_zero = false; 2931 /* set if the fact that we're writing a zero is used to let any 2932 * stack slots remain STACK_ZERO 2933 */ 2934 bool zero_used = false; 2935 2936 cur = env->cur_state->frame[env->cur_state->curframe]; 2937 ptr_reg = &cur->regs[ptr_regno]; 2938 min_off = ptr_reg->smin_value + off; 2939 max_off = ptr_reg->smax_value + off + size; 2940 if (value_regno >= 0) 2941 value_reg = &cur->regs[value_regno]; 2942 if (value_reg && register_is_null(value_reg)) 2943 writing_zero = true; 2944 2945 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); 2946 if (err) 2947 return err; 2948 2949 2950 /* Variable offset writes destroy any spilled pointers in range. */ 2951 for (i = min_off; i < max_off; i++) { 2952 u8 new_type, *stype; 2953 int slot, spi; 2954 2955 slot = -i - 1; 2956 spi = slot / BPF_REG_SIZE; 2957 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 2958 2959 if (!env->allow_ptr_leaks 2960 && *stype != NOT_INIT 2961 && *stype != SCALAR_VALUE) { 2962 /* Reject the write if there's are spilled pointers in 2963 * range. If we didn't reject here, the ptr status 2964 * would be erased below (even though not all slots are 2965 * actually overwritten), possibly opening the door to 2966 * leaks. 2967 */ 2968 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", 2969 insn_idx, i); 2970 return -EINVAL; 2971 } 2972 2973 /* Erase all spilled pointers. */ 2974 state->stack[spi].spilled_ptr.type = NOT_INIT; 2975 2976 /* Update the slot type. */ 2977 new_type = STACK_MISC; 2978 if (writing_zero && *stype == STACK_ZERO) { 2979 new_type = STACK_ZERO; 2980 zero_used = true; 2981 } 2982 /* If the slot is STACK_INVALID, we check whether it's OK to 2983 * pretend that it will be initialized by this write. The slot 2984 * might not actually be written to, and so if we mark it as 2985 * initialized future reads might leak uninitialized memory. 2986 * For privileged programs, we will accept such reads to slots 2987 * that may or may not be written because, if we're reject 2988 * them, the error would be too confusing. 2989 */ 2990 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { 2991 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", 2992 insn_idx, i); 2993 return -EINVAL; 2994 } 2995 *stype = new_type; 2996 } 2997 if (zero_used) { 2998 /* backtracking doesn't work for STACK_ZERO yet. */ 2999 err = mark_chain_precision(env, value_regno); 3000 if (err) 3001 return err; 3002 } 3003 return 0; 3004 } 3005 3006 /* When register 'dst_regno' is assigned some values from stack[min_off, 3007 * max_off), we set the register's type according to the types of the 3008 * respective stack slots. If all the stack values are known to be zeros, then 3009 * so is the destination reg. Otherwise, the register is considered to be 3010 * SCALAR. This function does not deal with register filling; the caller must 3011 * ensure that all spilled registers in the stack range have been marked as 3012 * read. 3013 */ 3014 static void mark_reg_stack_read(struct bpf_verifier_env *env, 3015 /* func where src register points to */ 3016 struct bpf_func_state *ptr_state, 3017 int min_off, int max_off, int dst_regno) 3018 { 3019 struct bpf_verifier_state *vstate = env->cur_state; 3020 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3021 int i, slot, spi; 3022 u8 *stype; 3023 int zeros = 0; 3024 3025 for (i = min_off; i < max_off; i++) { 3026 slot = -i - 1; 3027 spi = slot / BPF_REG_SIZE; 3028 stype = ptr_state->stack[spi].slot_type; 3029 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) 3030 break; 3031 zeros++; 3032 } 3033 if (zeros == max_off - min_off) { 3034 /* any access_size read into register is zero extended, 3035 * so the whole register == const_zero 3036 */ 3037 __mark_reg_const_zero(&state->regs[dst_regno]); 3038 /* backtracking doesn't support STACK_ZERO yet, 3039 * so mark it precise here, so that later 3040 * backtracking can stop here. 3041 * Backtracking may not need this if this register 3042 * doesn't participate in pointer adjustment. 3043 * Forward propagation of precise flag is not 3044 * necessary either. This mark is only to stop 3045 * backtracking. Any register that contributed 3046 * to const 0 was marked precise before spill. 3047 */ 3048 state->regs[dst_regno].precise = true; 3049 } else { 3050 /* have read misc data from the stack */ 3051 mark_reg_unknown(env, state->regs, dst_regno); 3052 } 3053 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 3054 } 3055 3056 /* Read the stack at 'off' and put the results into the register indicated by 3057 * 'dst_regno'. It handles reg filling if the addressed stack slot is a 3058 * spilled reg. 3059 * 3060 * 'dst_regno' can be -1, meaning that the read value is not going to a 3061 * register. 3062 * 3063 * The access is assumed to be within the current stack bounds. 3064 */ 3065 static int check_stack_read_fixed_off(struct bpf_verifier_env *env, 3066 /* func where src register points to */ 3067 struct bpf_func_state *reg_state, 3068 int off, int size, int dst_regno) 3069 { 3070 struct bpf_verifier_state *vstate = env->cur_state; 3071 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3072 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 3073 struct bpf_reg_state *reg; 3074 u8 *stype, type; 3075 3076 stype = reg_state->stack[spi].slot_type; 3077 reg = ®_state->stack[spi].spilled_ptr; 3078 3079 if (is_spilled_reg(®_state->stack[spi])) { 3080 u8 spill_size = 1; 3081 3082 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) 3083 spill_size++; 3084 3085 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { 3086 if (reg->type != SCALAR_VALUE) { 3087 verbose_linfo(env, env->insn_idx, "; "); 3088 verbose(env, "invalid size of register fill\n"); 3089 return -EACCES; 3090 } 3091 3092 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 3093 if (dst_regno < 0) 3094 return 0; 3095 3096 if (!(off % BPF_REG_SIZE) && size == spill_size) { 3097 /* The earlier check_reg_arg() has decided the 3098 * subreg_def for this insn. Save it first. 3099 */ 3100 s32 subreg_def = state->regs[dst_regno].subreg_def; 3101 3102 state->regs[dst_regno] = *reg; 3103 state->regs[dst_regno].subreg_def = subreg_def; 3104 } else { 3105 for (i = 0; i < size; i++) { 3106 type = stype[(slot - i) % BPF_REG_SIZE]; 3107 if (type == STACK_SPILL) 3108 continue; 3109 if (type == STACK_MISC) 3110 continue; 3111 verbose(env, "invalid read from stack off %d+%d size %d\n", 3112 off, i, size); 3113 return -EACCES; 3114 } 3115 mark_reg_unknown(env, state->regs, dst_regno); 3116 } 3117 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 3118 return 0; 3119 } 3120 3121 if (dst_regno >= 0) { 3122 /* restore register state from stack */ 3123 state->regs[dst_regno] = *reg; 3124 /* mark reg as written since spilled pointer state likely 3125 * has its liveness marks cleared by is_state_visited() 3126 * which resets stack/reg liveness for state transitions 3127 */ 3128 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 3129 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { 3130 /* If dst_regno==-1, the caller is asking us whether 3131 * it is acceptable to use this value as a SCALAR_VALUE 3132 * (e.g. for XADD). 3133 * We must not allow unprivileged callers to do that 3134 * with spilled pointers. 3135 */ 3136 verbose(env, "leaking pointer from stack off %d\n", 3137 off); 3138 return -EACCES; 3139 } 3140 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 3141 } else { 3142 for (i = 0; i < size; i++) { 3143 type = stype[(slot - i) % BPF_REG_SIZE]; 3144 if (type == STACK_MISC) 3145 continue; 3146 if (type == STACK_ZERO) 3147 continue; 3148 verbose(env, "invalid read from stack off %d+%d size %d\n", 3149 off, i, size); 3150 return -EACCES; 3151 } 3152 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 3153 if (dst_regno >= 0) 3154 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); 3155 } 3156 return 0; 3157 } 3158 3159 enum stack_access_src { 3160 ACCESS_DIRECT = 1, /* the access is performed by an instruction */ 3161 ACCESS_HELPER = 2, /* the access is performed by a helper */ 3162 }; 3163 3164 static int check_stack_range_initialized(struct bpf_verifier_env *env, 3165 int regno, int off, int access_size, 3166 bool zero_size_allowed, 3167 enum stack_access_src type, 3168 struct bpf_call_arg_meta *meta); 3169 3170 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 3171 { 3172 return cur_regs(env) + regno; 3173 } 3174 3175 /* Read the stack at 'ptr_regno + off' and put the result into the register 3176 * 'dst_regno'. 3177 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'), 3178 * but not its variable offset. 3179 * 'size' is assumed to be <= reg size and the access is assumed to be aligned. 3180 * 3181 * As opposed to check_stack_read_fixed_off, this function doesn't deal with 3182 * filling registers (i.e. reads of spilled register cannot be detected when 3183 * the offset is not fixed). We conservatively mark 'dst_regno' as containing 3184 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable 3185 * offset; for a fixed offset check_stack_read_fixed_off should be used 3186 * instead. 3187 */ 3188 static int check_stack_read_var_off(struct bpf_verifier_env *env, 3189 int ptr_regno, int off, int size, int dst_regno) 3190 { 3191 /* The state of the source register. */ 3192 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 3193 struct bpf_func_state *ptr_state = func(env, reg); 3194 int err; 3195 int min_off, max_off; 3196 3197 /* Note that we pass a NULL meta, so raw access will not be permitted. 3198 */ 3199 err = check_stack_range_initialized(env, ptr_regno, off, size, 3200 false, ACCESS_DIRECT, NULL); 3201 if (err) 3202 return err; 3203 3204 min_off = reg->smin_value + off; 3205 max_off = reg->smax_value + off; 3206 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); 3207 return 0; 3208 } 3209 3210 /* check_stack_read dispatches to check_stack_read_fixed_off or 3211 * check_stack_read_var_off. 3212 * 3213 * The caller must ensure that the offset falls within the allocated stack 3214 * bounds. 3215 * 3216 * 'dst_regno' is a register which will receive the value from the stack. It 3217 * can be -1, meaning that the read value is not going to a register. 3218 */ 3219 static int check_stack_read(struct bpf_verifier_env *env, 3220 int ptr_regno, int off, int size, 3221 int dst_regno) 3222 { 3223 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 3224 struct bpf_func_state *state = func(env, reg); 3225 int err; 3226 /* Some accesses are only permitted with a static offset. */ 3227 bool var_off = !tnum_is_const(reg->var_off); 3228 3229 /* The offset is required to be static when reads don't go to a 3230 * register, in order to not leak pointers (see 3231 * check_stack_read_fixed_off). 3232 */ 3233 if (dst_regno < 0 && var_off) { 3234 char tn_buf[48]; 3235 3236 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3237 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n", 3238 tn_buf, off, size); 3239 return -EACCES; 3240 } 3241 /* Variable offset is prohibited for unprivileged mode for simplicity 3242 * since it requires corresponding support in Spectre masking for stack 3243 * ALU. See also retrieve_ptr_limit(). 3244 */ 3245 if (!env->bypass_spec_v1 && var_off) { 3246 char tn_buf[48]; 3247 3248 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3249 verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n", 3250 ptr_regno, tn_buf); 3251 return -EACCES; 3252 } 3253 3254 if (!var_off) { 3255 off += reg->var_off.value; 3256 err = check_stack_read_fixed_off(env, state, off, size, 3257 dst_regno); 3258 } else { 3259 /* Variable offset stack reads need more conservative handling 3260 * than fixed offset ones. Note that dst_regno >= 0 on this 3261 * branch. 3262 */ 3263 err = check_stack_read_var_off(env, ptr_regno, off, size, 3264 dst_regno); 3265 } 3266 return err; 3267 } 3268 3269 3270 /* check_stack_write dispatches to check_stack_write_fixed_off or 3271 * check_stack_write_var_off. 3272 * 3273 * 'ptr_regno' is the register used as a pointer into the stack. 3274 * 'off' includes 'ptr_regno->off', but not its variable offset (if any). 3275 * 'value_regno' is the register whose value we're writing to the stack. It can 3276 * be -1, meaning that we're not writing from a register. 3277 * 3278 * The caller must ensure that the offset falls within the maximum stack size. 3279 */ 3280 static int check_stack_write(struct bpf_verifier_env *env, 3281 int ptr_regno, int off, int size, 3282 int value_regno, int insn_idx) 3283 { 3284 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 3285 struct bpf_func_state *state = func(env, reg); 3286 int err; 3287 3288 if (tnum_is_const(reg->var_off)) { 3289 off += reg->var_off.value; 3290 err = check_stack_write_fixed_off(env, state, off, size, 3291 value_regno, insn_idx); 3292 } else { 3293 /* Variable offset stack reads need more conservative handling 3294 * than fixed offset ones. 3295 */ 3296 err = check_stack_write_var_off(env, state, 3297 ptr_regno, off, size, 3298 value_regno, insn_idx); 3299 } 3300 return err; 3301 } 3302 3303 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 3304 int off, int size, enum bpf_access_type type) 3305 { 3306 struct bpf_reg_state *regs = cur_regs(env); 3307 struct bpf_map *map = regs[regno].map_ptr; 3308 u32 cap = bpf_map_flags_to_cap(map); 3309 3310 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 3311 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 3312 map->value_size, off, size); 3313 return -EACCES; 3314 } 3315 3316 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 3317 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 3318 map->value_size, off, size); 3319 return -EACCES; 3320 } 3321 3322 return 0; 3323 } 3324 3325 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ 3326 static int __check_mem_access(struct bpf_verifier_env *env, int regno, 3327 int off, int size, u32 mem_size, 3328 bool zero_size_allowed) 3329 { 3330 bool size_ok = size > 0 || (size == 0 && zero_size_allowed); 3331 struct bpf_reg_state *reg; 3332 3333 if (off >= 0 && size_ok && (u64)off + size <= mem_size) 3334 return 0; 3335 3336 reg = &cur_regs(env)[regno]; 3337 switch (reg->type) { 3338 case PTR_TO_MAP_KEY: 3339 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", 3340 mem_size, off, size); 3341 break; 3342 case PTR_TO_MAP_VALUE: 3343 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 3344 mem_size, off, size); 3345 break; 3346 case PTR_TO_PACKET: 3347 case PTR_TO_PACKET_META: 3348 case PTR_TO_PACKET_END: 3349 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 3350 off, size, regno, reg->id, off, mem_size); 3351 break; 3352 case PTR_TO_MEM: 3353 default: 3354 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", 3355 mem_size, off, size); 3356 } 3357 3358 return -EACCES; 3359 } 3360 3361 /* check read/write into a memory region with possible variable offset */ 3362 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, 3363 int off, int size, u32 mem_size, 3364 bool zero_size_allowed) 3365 { 3366 struct bpf_verifier_state *vstate = env->cur_state; 3367 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3368 struct bpf_reg_state *reg = &state->regs[regno]; 3369 int err; 3370 3371 /* We may have adjusted the register pointing to memory region, so we 3372 * need to try adding each of min_value and max_value to off 3373 * to make sure our theoretical access will be safe. 3374 */ 3375 if (env->log.level & BPF_LOG_LEVEL) 3376 print_verifier_state(env, state); 3377 3378 /* The minimum value is only important with signed 3379 * comparisons where we can't assume the floor of a 3380 * value is 0. If we are using signed variables for our 3381 * index'es we need to make sure that whatever we use 3382 * will have a set floor within our range. 3383 */ 3384 if (reg->smin_value < 0 && 3385 (reg->smin_value == S64_MIN || 3386 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 3387 reg->smin_value + off < 0)) { 3388 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 3389 regno); 3390 return -EACCES; 3391 } 3392 err = __check_mem_access(env, regno, reg->smin_value + off, size, 3393 mem_size, zero_size_allowed); 3394 if (err) { 3395 verbose(env, "R%d min value is outside of the allowed memory range\n", 3396 regno); 3397 return err; 3398 } 3399 3400 /* If we haven't set a max value then we need to bail since we can't be 3401 * sure we won't do bad things. 3402 * If reg->umax_value + off could overflow, treat that as unbounded too. 3403 */ 3404 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 3405 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", 3406 regno); 3407 return -EACCES; 3408 } 3409 err = __check_mem_access(env, regno, reg->umax_value + off, size, 3410 mem_size, zero_size_allowed); 3411 if (err) { 3412 verbose(env, "R%d max value is outside of the allowed memory range\n", 3413 regno); 3414 return err; 3415 } 3416 3417 return 0; 3418 } 3419 3420 /* check read/write into a map element with possible variable offset */ 3421 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 3422 int off, int size, bool zero_size_allowed) 3423 { 3424 struct bpf_verifier_state *vstate = env->cur_state; 3425 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3426 struct bpf_reg_state *reg = &state->regs[regno]; 3427 struct bpf_map *map = reg->map_ptr; 3428 int err; 3429 3430 err = check_mem_region_access(env, regno, off, size, map->value_size, 3431 zero_size_allowed); 3432 if (err) 3433 return err; 3434 3435 if (map_value_has_spin_lock(map)) { 3436 u32 lock = map->spin_lock_off; 3437 3438 /* if any part of struct bpf_spin_lock can be touched by 3439 * load/store reject this program. 3440 * To check that [x1, x2) overlaps with [y1, y2) 3441 * it is sufficient to check x1 < y2 && y1 < x2. 3442 */ 3443 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && 3444 lock < reg->umax_value + off + size) { 3445 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); 3446 return -EACCES; 3447 } 3448 } 3449 if (map_value_has_timer(map)) { 3450 u32 t = map->timer_off; 3451 3452 if (reg->smin_value + off < t + sizeof(struct bpf_timer) && 3453 t < reg->umax_value + off + size) { 3454 verbose(env, "bpf_timer cannot be accessed directly by load/store\n"); 3455 return -EACCES; 3456 } 3457 } 3458 return err; 3459 } 3460 3461 #define MAX_PACKET_OFF 0xffff 3462 3463 static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) 3464 { 3465 return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; 3466 } 3467 3468 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 3469 const struct bpf_call_arg_meta *meta, 3470 enum bpf_access_type t) 3471 { 3472 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 3473 3474 switch (prog_type) { 3475 /* Program types only with direct read access go here! */ 3476 case BPF_PROG_TYPE_LWT_IN: 3477 case BPF_PROG_TYPE_LWT_OUT: 3478 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 3479 case BPF_PROG_TYPE_SK_REUSEPORT: 3480 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3481 case BPF_PROG_TYPE_CGROUP_SKB: 3482 if (t == BPF_WRITE) 3483 return false; 3484 fallthrough; 3485 3486 /* Program types with direct read + write access go here! */ 3487 case BPF_PROG_TYPE_SCHED_CLS: 3488 case BPF_PROG_TYPE_SCHED_ACT: 3489 case BPF_PROG_TYPE_XDP: 3490 case BPF_PROG_TYPE_LWT_XMIT: 3491 case BPF_PROG_TYPE_SK_SKB: 3492 case BPF_PROG_TYPE_SK_MSG: 3493 if (meta) 3494 return meta->pkt_access; 3495 3496 env->seen_direct_write = true; 3497 return true; 3498 3499 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3500 if (t == BPF_WRITE) 3501 env->seen_direct_write = true; 3502 3503 return true; 3504 3505 default: 3506 return false; 3507 } 3508 } 3509 3510 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 3511 int size, bool zero_size_allowed) 3512 { 3513 struct bpf_reg_state *regs = cur_regs(env); 3514 struct bpf_reg_state *reg = ®s[regno]; 3515 int err; 3516 3517 /* We may have added a variable offset to the packet pointer; but any 3518 * reg->range we have comes after that. We are only checking the fixed 3519 * offset. 3520 */ 3521 3522 /* We don't allow negative numbers, because we aren't tracking enough 3523 * detail to prove they're safe. 3524 */ 3525 if (reg->smin_value < 0) { 3526 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 3527 regno); 3528 return -EACCES; 3529 } 3530 3531 err = reg->range < 0 ? -EINVAL : 3532 __check_mem_access(env, regno, off, size, reg->range, 3533 zero_size_allowed); 3534 if (err) { 3535 verbose(env, "R%d offset is outside of the packet\n", regno); 3536 return err; 3537 } 3538 3539 /* __check_mem_access has made sure "off + size - 1" is within u16. 3540 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 3541 * otherwise find_good_pkt_pointers would have refused to set range info 3542 * that __check_mem_access would have rejected this pkt access. 3543 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 3544 */ 3545 env->prog->aux->max_pkt_offset = 3546 max_t(u32, env->prog->aux->max_pkt_offset, 3547 off + reg->umax_value + size - 1); 3548 3549 return err; 3550 } 3551 3552 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 3553 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 3554 enum bpf_access_type t, enum bpf_reg_type *reg_type, 3555 struct btf **btf, u32 *btf_id) 3556 { 3557 struct bpf_insn_access_aux info = { 3558 .reg_type = *reg_type, 3559 .log = &env->log, 3560 }; 3561 3562 if (env->ops->is_valid_access && 3563 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 3564 /* A non zero info.ctx_field_size indicates that this field is a 3565 * candidate for later verifier transformation to load the whole 3566 * field and then apply a mask when accessed with a narrower 3567 * access than actual ctx access size. A zero info.ctx_field_size 3568 * will only allow for whole field access and rejects any other 3569 * type of narrower access. 3570 */ 3571 *reg_type = info.reg_type; 3572 3573 if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) { 3574 *btf = info.btf; 3575 *btf_id = info.btf_id; 3576 } else { 3577 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 3578 } 3579 /* remember the offset of last byte accessed in ctx */ 3580 if (env->prog->aux->max_ctx_offset < off + size) 3581 env->prog->aux->max_ctx_offset = off + size; 3582 return 0; 3583 } 3584 3585 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 3586 return -EACCES; 3587 } 3588 3589 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 3590 int size) 3591 { 3592 if (size < 0 || off < 0 || 3593 (u64)off + size > sizeof(struct bpf_flow_keys)) { 3594 verbose(env, "invalid access to flow keys off=%d size=%d\n", 3595 off, size); 3596 return -EACCES; 3597 } 3598 return 0; 3599 } 3600 3601 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 3602 u32 regno, int off, int size, 3603 enum bpf_access_type t) 3604 { 3605 struct bpf_reg_state *regs = cur_regs(env); 3606 struct bpf_reg_state *reg = ®s[regno]; 3607 struct bpf_insn_access_aux info = {}; 3608 bool valid; 3609 3610 if (reg->smin_value < 0) { 3611 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 3612 regno); 3613 return -EACCES; 3614 } 3615 3616 switch (reg->type) { 3617 case PTR_TO_SOCK_COMMON: 3618 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 3619 break; 3620 case PTR_TO_SOCKET: 3621 valid = bpf_sock_is_valid_access(off, size, t, &info); 3622 break; 3623 case PTR_TO_TCP_SOCK: 3624 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 3625 break; 3626 case PTR_TO_XDP_SOCK: 3627 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 3628 break; 3629 default: 3630 valid = false; 3631 } 3632 3633 3634 if (valid) { 3635 env->insn_aux_data[insn_idx].ctx_field_size = 3636 info.ctx_field_size; 3637 return 0; 3638 } 3639 3640 verbose(env, "R%d invalid %s access off=%d size=%d\n", 3641 regno, reg_type_str[reg->type], off, size); 3642 3643 return -EACCES; 3644 } 3645 3646 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 3647 { 3648 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 3649 } 3650 3651 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 3652 { 3653 const struct bpf_reg_state *reg = reg_state(env, regno); 3654 3655 return reg->type == PTR_TO_CTX; 3656 } 3657 3658 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 3659 { 3660 const struct bpf_reg_state *reg = reg_state(env, regno); 3661 3662 return type_is_sk_pointer(reg->type); 3663 } 3664 3665 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 3666 { 3667 const struct bpf_reg_state *reg = reg_state(env, regno); 3668 3669 return type_is_pkt_pointer(reg->type); 3670 } 3671 3672 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 3673 { 3674 const struct bpf_reg_state *reg = reg_state(env, regno); 3675 3676 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 3677 return reg->type == PTR_TO_FLOW_KEYS; 3678 } 3679 3680 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 3681 const struct bpf_reg_state *reg, 3682 int off, int size, bool strict) 3683 { 3684 struct tnum reg_off; 3685 int ip_align; 3686 3687 /* Byte size accesses are always allowed. */ 3688 if (!strict || size == 1) 3689 return 0; 3690 3691 /* For platforms that do not have a Kconfig enabling 3692 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 3693 * NET_IP_ALIGN is universally set to '2'. And on platforms 3694 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 3695 * to this code only in strict mode where we want to emulate 3696 * the NET_IP_ALIGN==2 checking. Therefore use an 3697 * unconditional IP align value of '2'. 3698 */ 3699 ip_align = 2; 3700 3701 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 3702 if (!tnum_is_aligned(reg_off, size)) { 3703 char tn_buf[48]; 3704 3705 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3706 verbose(env, 3707 "misaligned packet access off %d+%s+%d+%d size %d\n", 3708 ip_align, tn_buf, reg->off, off, size); 3709 return -EACCES; 3710 } 3711 3712 return 0; 3713 } 3714 3715 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 3716 const struct bpf_reg_state *reg, 3717 const char *pointer_desc, 3718 int off, int size, bool strict) 3719 { 3720 struct tnum reg_off; 3721 3722 /* Byte size accesses are always allowed. */ 3723 if (!strict || size == 1) 3724 return 0; 3725 3726 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 3727 if (!tnum_is_aligned(reg_off, size)) { 3728 char tn_buf[48]; 3729 3730 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3731 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 3732 pointer_desc, tn_buf, reg->off, off, size); 3733 return -EACCES; 3734 } 3735 3736 return 0; 3737 } 3738 3739 static int check_ptr_alignment(struct bpf_verifier_env *env, 3740 const struct bpf_reg_state *reg, int off, 3741 int size, bool strict_alignment_once) 3742 { 3743 bool strict = env->strict_alignment || strict_alignment_once; 3744 const char *pointer_desc = ""; 3745 3746 switch (reg->type) { 3747 case PTR_TO_PACKET: 3748 case PTR_TO_PACKET_META: 3749 /* Special case, because of NET_IP_ALIGN. Given metadata sits 3750 * right in front, treat it the very same way. 3751 */ 3752 return check_pkt_ptr_alignment(env, reg, off, size, strict); 3753 case PTR_TO_FLOW_KEYS: 3754 pointer_desc = "flow keys "; 3755 break; 3756 case PTR_TO_MAP_KEY: 3757 pointer_desc = "key "; 3758 break; 3759 case PTR_TO_MAP_VALUE: 3760 pointer_desc = "value "; 3761 break; 3762 case PTR_TO_CTX: 3763 pointer_desc = "context "; 3764 break; 3765 case PTR_TO_STACK: 3766 pointer_desc = "stack "; 3767 /* The stack spill tracking logic in check_stack_write_fixed_off() 3768 * and check_stack_read_fixed_off() relies on stack accesses being 3769 * aligned. 3770 */ 3771 strict = true; 3772 break; 3773 case PTR_TO_SOCKET: 3774 pointer_desc = "sock "; 3775 break; 3776 case PTR_TO_SOCK_COMMON: 3777 pointer_desc = "sock_common "; 3778 break; 3779 case PTR_TO_TCP_SOCK: 3780 pointer_desc = "tcp_sock "; 3781 break; 3782 case PTR_TO_XDP_SOCK: 3783 pointer_desc = "xdp_sock "; 3784 break; 3785 default: 3786 break; 3787 } 3788 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 3789 strict); 3790 } 3791 3792 static int update_stack_depth(struct bpf_verifier_env *env, 3793 const struct bpf_func_state *func, 3794 int off) 3795 { 3796 u16 stack = env->subprog_info[func->subprogno].stack_depth; 3797 3798 if (stack >= -off) 3799 return 0; 3800 3801 /* update known max for given subprogram */ 3802 env->subprog_info[func->subprogno].stack_depth = -off; 3803 return 0; 3804 } 3805 3806 /* starting from main bpf function walk all instructions of the function 3807 * and recursively walk all callees that given function can call. 3808 * Ignore jump and exit insns. 3809 * Since recursion is prevented by check_cfg() this algorithm 3810 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 3811 */ 3812 static int check_max_stack_depth(struct bpf_verifier_env *env) 3813 { 3814 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 3815 struct bpf_subprog_info *subprog = env->subprog_info; 3816 struct bpf_insn *insn = env->prog->insnsi; 3817 bool tail_call_reachable = false; 3818 int ret_insn[MAX_CALL_FRAMES]; 3819 int ret_prog[MAX_CALL_FRAMES]; 3820 int j; 3821 3822 process_func: 3823 /* protect against potential stack overflow that might happen when 3824 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack 3825 * depth for such case down to 256 so that the worst case scenario 3826 * would result in 8k stack size (32 which is tailcall limit * 256 = 3827 * 8k). 3828 * 3829 * To get the idea what might happen, see an example: 3830 * func1 -> sub rsp, 128 3831 * subfunc1 -> sub rsp, 256 3832 * tailcall1 -> add rsp, 256 3833 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) 3834 * subfunc2 -> sub rsp, 64 3835 * subfunc22 -> sub rsp, 128 3836 * tailcall2 -> add rsp, 128 3837 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) 3838 * 3839 * tailcall will unwind the current stack frame but it will not get rid 3840 * of caller's stack as shown on the example above. 3841 */ 3842 if (idx && subprog[idx].has_tail_call && depth >= 256) { 3843 verbose(env, 3844 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", 3845 depth); 3846 return -EACCES; 3847 } 3848 /* round up to 32-bytes, since this is granularity 3849 * of interpreter stack size 3850 */ 3851 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 3852 if (depth > MAX_BPF_STACK) { 3853 verbose(env, "combined stack size of %d calls is %d. Too large\n", 3854 frame + 1, depth); 3855 return -EACCES; 3856 } 3857 continue_func: 3858 subprog_end = subprog[idx + 1].start; 3859 for (; i < subprog_end; i++) { 3860 int next_insn; 3861 3862 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) 3863 continue; 3864 /* remember insn and function to return to */ 3865 ret_insn[frame] = i + 1; 3866 ret_prog[frame] = idx; 3867 3868 /* find the callee */ 3869 next_insn = i + insn[i].imm + 1; 3870 idx = find_subprog(env, next_insn); 3871 if (idx < 0) { 3872 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 3873 next_insn); 3874 return -EFAULT; 3875 } 3876 if (subprog[idx].is_async_cb) { 3877 if (subprog[idx].has_tail_call) { 3878 verbose(env, "verifier bug. subprog has tail_call and async cb\n"); 3879 return -EFAULT; 3880 } 3881 /* async callbacks don't increase bpf prog stack size */ 3882 continue; 3883 } 3884 i = next_insn; 3885 3886 if (subprog[idx].has_tail_call) 3887 tail_call_reachable = true; 3888 3889 frame++; 3890 if (frame >= MAX_CALL_FRAMES) { 3891 verbose(env, "the call stack of %d frames is too deep !\n", 3892 frame); 3893 return -E2BIG; 3894 } 3895 goto process_func; 3896 } 3897 /* if tail call got detected across bpf2bpf calls then mark each of the 3898 * currently present subprog frames as tail call reachable subprogs; 3899 * this info will be utilized by JIT so that we will be preserving the 3900 * tail call counter throughout bpf2bpf calls combined with tailcalls 3901 */ 3902 if (tail_call_reachable) 3903 for (j = 0; j < frame; j++) 3904 subprog[ret_prog[j]].tail_call_reachable = true; 3905 if (subprog[0].tail_call_reachable) 3906 env->prog->aux->tail_call_reachable = true; 3907 3908 /* end of for() loop means the last insn of the 'subprog' 3909 * was reached. Doesn't matter whether it was JA or EXIT 3910 */ 3911 if (frame == 0) 3912 return 0; 3913 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 3914 frame--; 3915 i = ret_insn[frame]; 3916 idx = ret_prog[frame]; 3917 goto continue_func; 3918 } 3919 3920 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 3921 static int get_callee_stack_depth(struct bpf_verifier_env *env, 3922 const struct bpf_insn *insn, int idx) 3923 { 3924 int start = idx + insn->imm + 1, subprog; 3925 3926 subprog = find_subprog(env, start); 3927 if (subprog < 0) { 3928 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 3929 start); 3930 return -EFAULT; 3931 } 3932 return env->subprog_info[subprog].stack_depth; 3933 } 3934 #endif 3935 3936 int check_ctx_reg(struct bpf_verifier_env *env, 3937 const struct bpf_reg_state *reg, int regno) 3938 { 3939 /* Access to ctx or passing it to a helper is only allowed in 3940 * its original, unmodified form. 3941 */ 3942 3943 if (reg->off) { 3944 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", 3945 regno, reg->off); 3946 return -EACCES; 3947 } 3948 3949 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3950 char tn_buf[48]; 3951 3952 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3953 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); 3954 return -EACCES; 3955 } 3956 3957 return 0; 3958 } 3959 3960 static int __check_buffer_access(struct bpf_verifier_env *env, 3961 const char *buf_info, 3962 const struct bpf_reg_state *reg, 3963 int regno, int off, int size) 3964 { 3965 if (off < 0) { 3966 verbose(env, 3967 "R%d invalid %s buffer access: off=%d, size=%d\n", 3968 regno, buf_info, off, size); 3969 return -EACCES; 3970 } 3971 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3972 char tn_buf[48]; 3973 3974 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3975 verbose(env, 3976 "R%d invalid variable buffer offset: off=%d, var_off=%s\n", 3977 regno, off, tn_buf); 3978 return -EACCES; 3979 } 3980 3981 return 0; 3982 } 3983 3984 static int check_tp_buffer_access(struct bpf_verifier_env *env, 3985 const struct bpf_reg_state *reg, 3986 int regno, int off, int size) 3987 { 3988 int err; 3989 3990 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); 3991 if (err) 3992 return err; 3993 3994 if (off + size > env->prog->aux->max_tp_access) 3995 env->prog->aux->max_tp_access = off + size; 3996 3997 return 0; 3998 } 3999 4000 static int check_buffer_access(struct bpf_verifier_env *env, 4001 const struct bpf_reg_state *reg, 4002 int regno, int off, int size, 4003 bool zero_size_allowed, 4004 const char *buf_info, 4005 u32 *max_access) 4006 { 4007 int err; 4008 4009 err = __check_buffer_access(env, buf_info, reg, regno, off, size); 4010 if (err) 4011 return err; 4012 4013 if (off + size > *max_access) 4014 *max_access = off + size; 4015 4016 return 0; 4017 } 4018 4019 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 4020 static void zext_32_to_64(struct bpf_reg_state *reg) 4021 { 4022 reg->var_off = tnum_subreg(reg->var_off); 4023 __reg_assign_32_into_64(reg); 4024 } 4025 4026 /* truncate register to smaller size (in bytes) 4027 * must be called with size < BPF_REG_SIZE 4028 */ 4029 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 4030 { 4031 u64 mask; 4032 4033 /* clear high bits in bit representation */ 4034 reg->var_off = tnum_cast(reg->var_off, size); 4035 4036 /* fix arithmetic bounds */ 4037 mask = ((u64)1 << (size * 8)) - 1; 4038 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 4039 reg->umin_value &= mask; 4040 reg->umax_value &= mask; 4041 } else { 4042 reg->umin_value = 0; 4043 reg->umax_value = mask; 4044 } 4045 reg->smin_value = reg->umin_value; 4046 reg->smax_value = reg->umax_value; 4047 4048 /* If size is smaller than 32bit register the 32bit register 4049 * values are also truncated so we push 64-bit bounds into 4050 * 32-bit bounds. Above were truncated < 32-bits already. 4051 */ 4052 if (size >= 4) 4053 return; 4054 __reg_combine_64_into_32(reg); 4055 } 4056 4057 static bool bpf_map_is_rdonly(const struct bpf_map *map) 4058 { 4059 /* A map is considered read-only if the following condition are true: 4060 * 4061 * 1) BPF program side cannot change any of the map content. The 4062 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map 4063 * and was set at map creation time. 4064 * 2) The map value(s) have been initialized from user space by a 4065 * loader and then "frozen", such that no new map update/delete 4066 * operations from syscall side are possible for the rest of 4067 * the map's lifetime from that point onwards. 4068 * 3) Any parallel/pending map update/delete operations from syscall 4069 * side have been completed. Only after that point, it's safe to 4070 * assume that map value(s) are immutable. 4071 */ 4072 return (map->map_flags & BPF_F_RDONLY_PROG) && 4073 READ_ONCE(map->frozen) && 4074 !bpf_map_write_active(map); 4075 } 4076 4077 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) 4078 { 4079 void *ptr; 4080 u64 addr; 4081 int err; 4082 4083 err = map->ops->map_direct_value_addr(map, &addr, off); 4084 if (err) 4085 return err; 4086 ptr = (void *)(long)addr + off; 4087 4088 switch (size) { 4089 case sizeof(u8): 4090 *val = (u64)*(u8 *)ptr; 4091 break; 4092 case sizeof(u16): 4093 *val = (u64)*(u16 *)ptr; 4094 break; 4095 case sizeof(u32): 4096 *val = (u64)*(u32 *)ptr; 4097 break; 4098 case sizeof(u64): 4099 *val = *(u64 *)ptr; 4100 break; 4101 default: 4102 return -EINVAL; 4103 } 4104 return 0; 4105 } 4106 4107 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 4108 struct bpf_reg_state *regs, 4109 int regno, int off, int size, 4110 enum bpf_access_type atype, 4111 int value_regno) 4112 { 4113 struct bpf_reg_state *reg = regs + regno; 4114 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); 4115 const char *tname = btf_name_by_offset(reg->btf, t->name_off); 4116 u32 btf_id; 4117 int ret; 4118 4119 if (off < 0) { 4120 verbose(env, 4121 "R%d is ptr_%s invalid negative access: off=%d\n", 4122 regno, tname, off); 4123 return -EACCES; 4124 } 4125 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 4126 char tn_buf[48]; 4127 4128 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4129 verbose(env, 4130 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 4131 regno, tname, off, tn_buf); 4132 return -EACCES; 4133 } 4134 4135 if (env->ops->btf_struct_access) { 4136 ret = env->ops->btf_struct_access(&env->log, reg->btf, t, 4137 off, size, atype, &btf_id); 4138 } else { 4139 if (atype != BPF_READ) { 4140 verbose(env, "only read is supported\n"); 4141 return -EACCES; 4142 } 4143 4144 ret = btf_struct_access(&env->log, reg->btf, t, off, size, 4145 atype, &btf_id); 4146 } 4147 4148 if (ret < 0) 4149 return ret; 4150 4151 if (atype == BPF_READ && value_regno >= 0) 4152 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id); 4153 4154 return 0; 4155 } 4156 4157 static int check_ptr_to_map_access(struct bpf_verifier_env *env, 4158 struct bpf_reg_state *regs, 4159 int regno, int off, int size, 4160 enum bpf_access_type atype, 4161 int value_regno) 4162 { 4163 struct bpf_reg_state *reg = regs + regno; 4164 struct bpf_map *map = reg->map_ptr; 4165 const struct btf_type *t; 4166 const char *tname; 4167 u32 btf_id; 4168 int ret; 4169 4170 if (!btf_vmlinux) { 4171 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); 4172 return -ENOTSUPP; 4173 } 4174 4175 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { 4176 verbose(env, "map_ptr access not supported for map type %d\n", 4177 map->map_type); 4178 return -ENOTSUPP; 4179 } 4180 4181 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); 4182 tname = btf_name_by_offset(btf_vmlinux, t->name_off); 4183 4184 if (!env->allow_ptr_to_map_access) { 4185 verbose(env, 4186 "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 4187 tname); 4188 return -EPERM; 4189 } 4190 4191 if (off < 0) { 4192 verbose(env, "R%d is %s invalid negative access: off=%d\n", 4193 regno, tname, off); 4194 return -EACCES; 4195 } 4196 4197 if (atype != BPF_READ) { 4198 verbose(env, "only read from %s is supported\n", tname); 4199 return -EACCES; 4200 } 4201 4202 ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id); 4203 if (ret < 0) 4204 return ret; 4205 4206 if (value_regno >= 0) 4207 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id); 4208 4209 return 0; 4210 } 4211 4212 /* Check that the stack access at the given offset is within bounds. The 4213 * maximum valid offset is -1. 4214 * 4215 * The minimum valid offset is -MAX_BPF_STACK for writes, and 4216 * -state->allocated_stack for reads. 4217 */ 4218 static int check_stack_slot_within_bounds(int off, 4219 struct bpf_func_state *state, 4220 enum bpf_access_type t) 4221 { 4222 int min_valid_off; 4223 4224 if (t == BPF_WRITE) 4225 min_valid_off = -MAX_BPF_STACK; 4226 else 4227 min_valid_off = -state->allocated_stack; 4228 4229 if (off < min_valid_off || off > -1) 4230 return -EACCES; 4231 return 0; 4232 } 4233 4234 /* Check that the stack access at 'regno + off' falls within the maximum stack 4235 * bounds. 4236 * 4237 * 'off' includes `regno->offset`, but not its dynamic part (if any). 4238 */ 4239 static int check_stack_access_within_bounds( 4240 struct bpf_verifier_env *env, 4241 int regno, int off, int access_size, 4242 enum stack_access_src src, enum bpf_access_type type) 4243 { 4244 struct bpf_reg_state *regs = cur_regs(env); 4245 struct bpf_reg_state *reg = regs + regno; 4246 struct bpf_func_state *state = func(env, reg); 4247 int min_off, max_off; 4248 int err; 4249 char *err_extra; 4250 4251 if (src == ACCESS_HELPER) 4252 /* We don't know if helpers are reading or writing (or both). */ 4253 err_extra = " indirect access to"; 4254 else if (type == BPF_READ) 4255 err_extra = " read from"; 4256 else 4257 err_extra = " write to"; 4258 4259 if (tnum_is_const(reg->var_off)) { 4260 min_off = reg->var_off.value + off; 4261 if (access_size > 0) 4262 max_off = min_off + access_size - 1; 4263 else 4264 max_off = min_off; 4265 } else { 4266 if (reg->smax_value >= BPF_MAX_VAR_OFF || 4267 reg->smin_value <= -BPF_MAX_VAR_OFF) { 4268 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", 4269 err_extra, regno); 4270 return -EACCES; 4271 } 4272 min_off = reg->smin_value + off; 4273 if (access_size > 0) 4274 max_off = reg->smax_value + off + access_size - 1; 4275 else 4276 max_off = min_off; 4277 } 4278 4279 err = check_stack_slot_within_bounds(min_off, state, type); 4280 if (!err) 4281 err = check_stack_slot_within_bounds(max_off, state, type); 4282 4283 if (err) { 4284 if (tnum_is_const(reg->var_off)) { 4285 verbose(env, "invalid%s stack R%d off=%d size=%d\n", 4286 err_extra, regno, off, access_size); 4287 } else { 4288 char tn_buf[48]; 4289 4290 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4291 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", 4292 err_extra, regno, tn_buf, access_size); 4293 } 4294 } 4295 return err; 4296 } 4297 4298 /* check whether memory at (regno + off) is accessible for t = (read | write) 4299 * if t==write, value_regno is a register which value is stored into memory 4300 * if t==read, value_regno is a register which will receive the value from memory 4301 * if t==write && value_regno==-1, some unknown value is stored into memory 4302 * if t==read && value_regno==-1, don't care what we read from memory 4303 */ 4304 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 4305 int off, int bpf_size, enum bpf_access_type t, 4306 int value_regno, bool strict_alignment_once) 4307 { 4308 struct bpf_reg_state *regs = cur_regs(env); 4309 struct bpf_reg_state *reg = regs + regno; 4310 struct bpf_func_state *state; 4311 int size, err = 0; 4312 4313 size = bpf_size_to_bytes(bpf_size); 4314 if (size < 0) 4315 return size; 4316 4317 /* alignment checks will add in reg->off themselves */ 4318 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 4319 if (err) 4320 return err; 4321 4322 /* for access checks, reg->off is just part of off */ 4323 off += reg->off; 4324 4325 if (reg->type == PTR_TO_MAP_KEY) { 4326 if (t == BPF_WRITE) { 4327 verbose(env, "write to change key R%d not allowed\n", regno); 4328 return -EACCES; 4329 } 4330 4331 err = check_mem_region_access(env, regno, off, size, 4332 reg->map_ptr->key_size, false); 4333 if (err) 4334 return err; 4335 if (value_regno >= 0) 4336 mark_reg_unknown(env, regs, value_regno); 4337 } else if (reg->type == PTR_TO_MAP_VALUE) { 4338 if (t == BPF_WRITE && value_regno >= 0 && 4339 is_pointer_value(env, value_regno)) { 4340 verbose(env, "R%d leaks addr into map\n", value_regno); 4341 return -EACCES; 4342 } 4343 err = check_map_access_type(env, regno, off, size, t); 4344 if (err) 4345 return err; 4346 err = check_map_access(env, regno, off, size, false); 4347 if (!err && t == BPF_READ && value_regno >= 0) { 4348 struct bpf_map *map = reg->map_ptr; 4349 4350 /* if map is read-only, track its contents as scalars */ 4351 if (tnum_is_const(reg->var_off) && 4352 bpf_map_is_rdonly(map) && 4353 map->ops->map_direct_value_addr) { 4354 int map_off = off + reg->var_off.value; 4355 u64 val = 0; 4356 4357 err = bpf_map_direct_read(map, map_off, size, 4358 &val); 4359 if (err) 4360 return err; 4361 4362 regs[value_regno].type = SCALAR_VALUE; 4363 __mark_reg_known(®s[value_regno], val); 4364 } else { 4365 mark_reg_unknown(env, regs, value_regno); 4366 } 4367 } 4368 } else if (reg->type == PTR_TO_MEM) { 4369 if (t == BPF_WRITE && value_regno >= 0 && 4370 is_pointer_value(env, value_regno)) { 4371 verbose(env, "R%d leaks addr into mem\n", value_regno); 4372 return -EACCES; 4373 } 4374 err = check_mem_region_access(env, regno, off, size, 4375 reg->mem_size, false); 4376 if (!err && t == BPF_READ && value_regno >= 0) 4377 mark_reg_unknown(env, regs, value_regno); 4378 } else if (reg->type == PTR_TO_CTX) { 4379 enum bpf_reg_type reg_type = SCALAR_VALUE; 4380 struct btf *btf = NULL; 4381 u32 btf_id = 0; 4382 4383 if (t == BPF_WRITE && value_regno >= 0 && 4384 is_pointer_value(env, value_regno)) { 4385 verbose(env, "R%d leaks addr into ctx\n", value_regno); 4386 return -EACCES; 4387 } 4388 4389 err = check_ctx_reg(env, reg, regno); 4390 if (err < 0) 4391 return err; 4392 4393 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, &btf_id); 4394 if (err) 4395 verbose_linfo(env, insn_idx, "; "); 4396 if (!err && t == BPF_READ && value_regno >= 0) { 4397 /* ctx access returns either a scalar, or a 4398 * PTR_TO_PACKET[_META,_END]. In the latter 4399 * case, we know the offset is zero. 4400 */ 4401 if (reg_type == SCALAR_VALUE) { 4402 mark_reg_unknown(env, regs, value_regno); 4403 } else { 4404 mark_reg_known_zero(env, regs, 4405 value_regno); 4406 if (reg_type_may_be_null(reg_type)) 4407 regs[value_regno].id = ++env->id_gen; 4408 /* A load of ctx field could have different 4409 * actual load size with the one encoded in the 4410 * insn. When the dst is PTR, it is for sure not 4411 * a sub-register. 4412 */ 4413 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 4414 if (reg_type == PTR_TO_BTF_ID || 4415 reg_type == PTR_TO_BTF_ID_OR_NULL) { 4416 regs[value_regno].btf = btf; 4417 regs[value_regno].btf_id = btf_id; 4418 } 4419 } 4420 regs[value_regno].type = reg_type; 4421 } 4422 4423 } else if (reg->type == PTR_TO_STACK) { 4424 /* Basic bounds checks. */ 4425 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t); 4426 if (err) 4427 return err; 4428 4429 state = func(env, reg); 4430 err = update_stack_depth(env, state, off); 4431 if (err) 4432 return err; 4433 4434 if (t == BPF_READ) 4435 err = check_stack_read(env, regno, off, size, 4436 value_regno); 4437 else 4438 err = check_stack_write(env, regno, off, size, 4439 value_regno, insn_idx); 4440 } else if (reg_is_pkt_pointer(reg)) { 4441 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 4442 verbose(env, "cannot write into packet\n"); 4443 return -EACCES; 4444 } 4445 if (t == BPF_WRITE && value_regno >= 0 && 4446 is_pointer_value(env, value_regno)) { 4447 verbose(env, "R%d leaks addr into packet\n", 4448 value_regno); 4449 return -EACCES; 4450 } 4451 err = check_packet_access(env, regno, off, size, false); 4452 if (!err && t == BPF_READ && value_regno >= 0) 4453 mark_reg_unknown(env, regs, value_regno); 4454 } else if (reg->type == PTR_TO_FLOW_KEYS) { 4455 if (t == BPF_WRITE && value_regno >= 0 && 4456 is_pointer_value(env, value_regno)) { 4457 verbose(env, "R%d leaks addr into flow keys\n", 4458 value_regno); 4459 return -EACCES; 4460 } 4461 4462 err = check_flow_keys_access(env, off, size); 4463 if (!err && t == BPF_READ && value_regno >= 0) 4464 mark_reg_unknown(env, regs, value_regno); 4465 } else if (type_is_sk_pointer(reg->type)) { 4466 if (t == BPF_WRITE) { 4467 verbose(env, "R%d cannot write into %s\n", 4468 regno, reg_type_str[reg->type]); 4469 return -EACCES; 4470 } 4471 err = check_sock_access(env, insn_idx, regno, off, size, t); 4472 if (!err && value_regno >= 0) 4473 mark_reg_unknown(env, regs, value_regno); 4474 } else if (reg->type == PTR_TO_TP_BUFFER) { 4475 err = check_tp_buffer_access(env, reg, regno, off, size); 4476 if (!err && t == BPF_READ && value_regno >= 0) 4477 mark_reg_unknown(env, regs, value_regno); 4478 } else if (reg->type == PTR_TO_BTF_ID) { 4479 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 4480 value_regno); 4481 } else if (reg->type == CONST_PTR_TO_MAP) { 4482 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 4483 value_regno); 4484 } else if (reg->type == PTR_TO_RDONLY_BUF) { 4485 if (t == BPF_WRITE) { 4486 verbose(env, "R%d cannot write into %s\n", 4487 regno, reg_type_str[reg->type]); 4488 return -EACCES; 4489 } 4490 err = check_buffer_access(env, reg, regno, off, size, false, 4491 "rdonly", 4492 &env->prog->aux->max_rdonly_access); 4493 if (!err && value_regno >= 0) 4494 mark_reg_unknown(env, regs, value_regno); 4495 } else if (reg->type == PTR_TO_RDWR_BUF) { 4496 err = check_buffer_access(env, reg, regno, off, size, false, 4497 "rdwr", 4498 &env->prog->aux->max_rdwr_access); 4499 if (!err && t == BPF_READ && value_regno >= 0) 4500 mark_reg_unknown(env, regs, value_regno); 4501 } else { 4502 verbose(env, "R%d invalid mem access '%s'\n", regno, 4503 reg_type_str[reg->type]); 4504 return -EACCES; 4505 } 4506 4507 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 4508 regs[value_regno].type == SCALAR_VALUE) { 4509 /* b/h/w load zero-extends, mark upper bits as known 0 */ 4510 coerce_reg_to_size(®s[value_regno], size); 4511 } 4512 return err; 4513 } 4514 4515 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 4516 { 4517 int load_reg; 4518 int err; 4519 4520 switch (insn->imm) { 4521 case BPF_ADD: 4522 case BPF_ADD | BPF_FETCH: 4523 case BPF_AND: 4524 case BPF_AND | BPF_FETCH: 4525 case BPF_OR: 4526 case BPF_OR | BPF_FETCH: 4527 case BPF_XOR: 4528 case BPF_XOR | BPF_FETCH: 4529 case BPF_XCHG: 4530 case BPF_CMPXCHG: 4531 break; 4532 default: 4533 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); 4534 return -EINVAL; 4535 } 4536 4537 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { 4538 verbose(env, "invalid atomic operand size\n"); 4539 return -EINVAL; 4540 } 4541 4542 /* check src1 operand */ 4543 err = check_reg_arg(env, insn->src_reg, SRC_OP); 4544 if (err) 4545 return err; 4546 4547 /* check src2 operand */ 4548 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 4549 if (err) 4550 return err; 4551 4552 if (insn->imm == BPF_CMPXCHG) { 4553 /* Check comparison of R0 with memory location */ 4554 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 4555 if (err) 4556 return err; 4557 } 4558 4559 if (is_pointer_value(env, insn->src_reg)) { 4560 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 4561 return -EACCES; 4562 } 4563 4564 if (is_ctx_reg(env, insn->dst_reg) || 4565 is_pkt_reg(env, insn->dst_reg) || 4566 is_flow_key_reg(env, insn->dst_reg) || 4567 is_sk_reg(env, insn->dst_reg)) { 4568 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", 4569 insn->dst_reg, 4570 reg_type_str[reg_state(env, insn->dst_reg)->type]); 4571 return -EACCES; 4572 } 4573 4574 if (insn->imm & BPF_FETCH) { 4575 if (insn->imm == BPF_CMPXCHG) 4576 load_reg = BPF_REG_0; 4577 else 4578 load_reg = insn->src_reg; 4579 4580 /* check and record load of old value */ 4581 err = check_reg_arg(env, load_reg, DST_OP); 4582 if (err) 4583 return err; 4584 } else { 4585 /* This instruction accesses a memory location but doesn't 4586 * actually load it into a register. 4587 */ 4588 load_reg = -1; 4589 } 4590 4591 /* check whether we can read the memory */ 4592 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4593 BPF_SIZE(insn->code), BPF_READ, load_reg, true); 4594 if (err) 4595 return err; 4596 4597 /* check whether we can write into the same memory */ 4598 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 4599 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 4600 if (err) 4601 return err; 4602 4603 return 0; 4604 } 4605 4606 /* When register 'regno' is used to read the stack (either directly or through 4607 * a helper function) make sure that it's within stack boundary and, depending 4608 * on the access type, that all elements of the stack are initialized. 4609 * 4610 * 'off' includes 'regno->off', but not its dynamic part (if any). 4611 * 4612 * All registers that have been spilled on the stack in the slots within the 4613 * read offsets are marked as read. 4614 */ 4615 static int check_stack_range_initialized( 4616 struct bpf_verifier_env *env, int regno, int off, 4617 int access_size, bool zero_size_allowed, 4618 enum stack_access_src type, struct bpf_call_arg_meta *meta) 4619 { 4620 struct bpf_reg_state *reg = reg_state(env, regno); 4621 struct bpf_func_state *state = func(env, reg); 4622 int err, min_off, max_off, i, j, slot, spi; 4623 char *err_extra = type == ACCESS_HELPER ? " indirect" : ""; 4624 enum bpf_access_type bounds_check_type; 4625 /* Some accesses can write anything into the stack, others are 4626 * read-only. 4627 */ 4628 bool clobber = false; 4629 4630 if (access_size == 0 && !zero_size_allowed) { 4631 verbose(env, "invalid zero-sized read\n"); 4632 return -EACCES; 4633 } 4634 4635 if (type == ACCESS_HELPER) { 4636 /* The bounds checks for writes are more permissive than for 4637 * reads. However, if raw_mode is not set, we'll do extra 4638 * checks below. 4639 */ 4640 bounds_check_type = BPF_WRITE; 4641 clobber = true; 4642 } else { 4643 bounds_check_type = BPF_READ; 4644 } 4645 err = check_stack_access_within_bounds(env, regno, off, access_size, 4646 type, bounds_check_type); 4647 if (err) 4648 return err; 4649 4650 4651 if (tnum_is_const(reg->var_off)) { 4652 min_off = max_off = reg->var_off.value + off; 4653 } else { 4654 /* Variable offset is prohibited for unprivileged mode for 4655 * simplicity since it requires corresponding support in 4656 * Spectre masking for stack ALU. 4657 * See also retrieve_ptr_limit(). 4658 */ 4659 if (!env->bypass_spec_v1) { 4660 char tn_buf[48]; 4661 4662 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4663 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n", 4664 regno, err_extra, tn_buf); 4665 return -EACCES; 4666 } 4667 /* Only initialized buffer on stack is allowed to be accessed 4668 * with variable offset. With uninitialized buffer it's hard to 4669 * guarantee that whole memory is marked as initialized on 4670 * helper return since specific bounds are unknown what may 4671 * cause uninitialized stack leaking. 4672 */ 4673 if (meta && meta->raw_mode) 4674 meta = NULL; 4675 4676 min_off = reg->smin_value + off; 4677 max_off = reg->smax_value + off; 4678 } 4679 4680 if (meta && meta->raw_mode) { 4681 meta->access_size = access_size; 4682 meta->regno = regno; 4683 return 0; 4684 } 4685 4686 for (i = min_off; i < max_off + access_size; i++) { 4687 u8 *stype; 4688 4689 slot = -i - 1; 4690 spi = slot / BPF_REG_SIZE; 4691 if (state->allocated_stack <= slot) 4692 goto err; 4693 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 4694 if (*stype == STACK_MISC) 4695 goto mark; 4696 if (*stype == STACK_ZERO) { 4697 if (clobber) { 4698 /* helper can write anything into the stack */ 4699 *stype = STACK_MISC; 4700 } 4701 goto mark; 4702 } 4703 4704 if (is_spilled_reg(&state->stack[spi]) && 4705 state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) 4706 goto mark; 4707 4708 if (is_spilled_reg(&state->stack[spi]) && 4709 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || 4710 env->allow_ptr_leaks)) { 4711 if (clobber) { 4712 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 4713 for (j = 0; j < BPF_REG_SIZE; j++) 4714 scrub_spilled_slot(&state->stack[spi].slot_type[j]); 4715 } 4716 goto mark; 4717 } 4718 4719 err: 4720 if (tnum_is_const(reg->var_off)) { 4721 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n", 4722 err_extra, regno, min_off, i - min_off, access_size); 4723 } else { 4724 char tn_buf[48]; 4725 4726 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 4727 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n", 4728 err_extra, regno, tn_buf, i - min_off, access_size); 4729 } 4730 return -EACCES; 4731 mark: 4732 /* reading any byte out of 8-byte 'spill_slot' will cause 4733 * the whole slot to be marked as 'read' 4734 */ 4735 mark_reg_read(env, &state->stack[spi].spilled_ptr, 4736 state->stack[spi].spilled_ptr.parent, 4737 REG_LIVE_READ64); 4738 } 4739 return update_stack_depth(env, state, min_off); 4740 } 4741 4742 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 4743 int access_size, bool zero_size_allowed, 4744 struct bpf_call_arg_meta *meta) 4745 { 4746 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4747 4748 switch (reg->type) { 4749 case PTR_TO_PACKET: 4750 case PTR_TO_PACKET_META: 4751 return check_packet_access(env, regno, reg->off, access_size, 4752 zero_size_allowed); 4753 case PTR_TO_MAP_KEY: 4754 return check_mem_region_access(env, regno, reg->off, access_size, 4755 reg->map_ptr->key_size, false); 4756 case PTR_TO_MAP_VALUE: 4757 if (check_map_access_type(env, regno, reg->off, access_size, 4758 meta && meta->raw_mode ? BPF_WRITE : 4759 BPF_READ)) 4760 return -EACCES; 4761 return check_map_access(env, regno, reg->off, access_size, 4762 zero_size_allowed); 4763 case PTR_TO_MEM: 4764 return check_mem_region_access(env, regno, reg->off, 4765 access_size, reg->mem_size, 4766 zero_size_allowed); 4767 case PTR_TO_RDONLY_BUF: 4768 if (meta && meta->raw_mode) 4769 return -EACCES; 4770 return check_buffer_access(env, reg, regno, reg->off, 4771 access_size, zero_size_allowed, 4772 "rdonly", 4773 &env->prog->aux->max_rdonly_access); 4774 case PTR_TO_RDWR_BUF: 4775 return check_buffer_access(env, reg, regno, reg->off, 4776 access_size, zero_size_allowed, 4777 "rdwr", 4778 &env->prog->aux->max_rdwr_access); 4779 case PTR_TO_STACK: 4780 return check_stack_range_initialized( 4781 env, 4782 regno, reg->off, access_size, 4783 zero_size_allowed, ACCESS_HELPER, meta); 4784 default: /* scalar_value or invalid ptr */ 4785 /* Allow zero-byte read from NULL, regardless of pointer type */ 4786 if (zero_size_allowed && access_size == 0 && 4787 register_is_null(reg)) 4788 return 0; 4789 4790 verbose(env, "R%d type=%s expected=%s\n", regno, 4791 reg_type_str[reg->type], 4792 reg_type_str[PTR_TO_STACK]); 4793 return -EACCES; 4794 } 4795 } 4796 4797 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 4798 u32 regno, u32 mem_size) 4799 { 4800 if (register_is_null(reg)) 4801 return 0; 4802 4803 if (reg_type_may_be_null(reg->type)) { 4804 /* Assuming that the register contains a value check if the memory 4805 * access is safe. Temporarily save and restore the register's state as 4806 * the conversion shouldn't be visible to a caller. 4807 */ 4808 const struct bpf_reg_state saved_reg = *reg; 4809 int rv; 4810 4811 mark_ptr_not_null_reg(reg); 4812 rv = check_helper_mem_access(env, regno, mem_size, true, NULL); 4813 *reg = saved_reg; 4814 return rv; 4815 } 4816 4817 return check_helper_mem_access(env, regno, mem_size, true, NULL); 4818 } 4819 4820 /* Implementation details: 4821 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL 4822 * Two bpf_map_lookups (even with the same key) will have different reg->id. 4823 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after 4824 * value_or_null->value transition, since the verifier only cares about 4825 * the range of access to valid map value pointer and doesn't care about actual 4826 * address of the map element. 4827 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 4828 * reg->id > 0 after value_or_null->value transition. By doing so 4829 * two bpf_map_lookups will be considered two different pointers that 4830 * point to different bpf_spin_locks. 4831 * The verifier allows taking only one bpf_spin_lock at a time to avoid 4832 * dead-locks. 4833 * Since only one bpf_spin_lock is allowed the checks are simpler than 4834 * reg_is_refcounted() logic. The verifier needs to remember only 4835 * one spin_lock instead of array of acquired_refs. 4836 * cur_state->active_spin_lock remembers which map value element got locked 4837 * and clears it after bpf_spin_unlock. 4838 */ 4839 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 4840 bool is_lock) 4841 { 4842 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4843 struct bpf_verifier_state *cur = env->cur_state; 4844 bool is_const = tnum_is_const(reg->var_off); 4845 struct bpf_map *map = reg->map_ptr; 4846 u64 val = reg->var_off.value; 4847 4848 if (!is_const) { 4849 verbose(env, 4850 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 4851 regno); 4852 return -EINVAL; 4853 } 4854 if (!map->btf) { 4855 verbose(env, 4856 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 4857 map->name); 4858 return -EINVAL; 4859 } 4860 if (!map_value_has_spin_lock(map)) { 4861 if (map->spin_lock_off == -E2BIG) 4862 verbose(env, 4863 "map '%s' has more than one 'struct bpf_spin_lock'\n", 4864 map->name); 4865 else if (map->spin_lock_off == -ENOENT) 4866 verbose(env, 4867 "map '%s' doesn't have 'struct bpf_spin_lock'\n", 4868 map->name); 4869 else 4870 verbose(env, 4871 "map '%s' is not a struct type or bpf_spin_lock is mangled\n", 4872 map->name); 4873 return -EINVAL; 4874 } 4875 if (map->spin_lock_off != val + reg->off) { 4876 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", 4877 val + reg->off); 4878 return -EINVAL; 4879 } 4880 if (is_lock) { 4881 if (cur->active_spin_lock) { 4882 verbose(env, 4883 "Locking two bpf_spin_locks are not allowed\n"); 4884 return -EINVAL; 4885 } 4886 cur->active_spin_lock = reg->id; 4887 } else { 4888 if (!cur->active_spin_lock) { 4889 verbose(env, "bpf_spin_unlock without taking a lock\n"); 4890 return -EINVAL; 4891 } 4892 if (cur->active_spin_lock != reg->id) { 4893 verbose(env, "bpf_spin_unlock of different lock\n"); 4894 return -EINVAL; 4895 } 4896 cur->active_spin_lock = 0; 4897 } 4898 return 0; 4899 } 4900 4901 static int process_timer_func(struct bpf_verifier_env *env, int regno, 4902 struct bpf_call_arg_meta *meta) 4903 { 4904 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4905 bool is_const = tnum_is_const(reg->var_off); 4906 struct bpf_map *map = reg->map_ptr; 4907 u64 val = reg->var_off.value; 4908 4909 if (!is_const) { 4910 verbose(env, 4911 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", 4912 regno); 4913 return -EINVAL; 4914 } 4915 if (!map->btf) { 4916 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", 4917 map->name); 4918 return -EINVAL; 4919 } 4920 if (!map_value_has_timer(map)) { 4921 if (map->timer_off == -E2BIG) 4922 verbose(env, 4923 "map '%s' has more than one 'struct bpf_timer'\n", 4924 map->name); 4925 else if (map->timer_off == -ENOENT) 4926 verbose(env, 4927 "map '%s' doesn't have 'struct bpf_timer'\n", 4928 map->name); 4929 else 4930 verbose(env, 4931 "map '%s' is not a struct type or bpf_timer is mangled\n", 4932 map->name); 4933 return -EINVAL; 4934 } 4935 if (map->timer_off != val + reg->off) { 4936 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", 4937 val + reg->off, map->timer_off); 4938 return -EINVAL; 4939 } 4940 if (meta->map_ptr) { 4941 verbose(env, "verifier bug. Two map pointers in a timer helper\n"); 4942 return -EFAULT; 4943 } 4944 meta->map_uid = reg->map_uid; 4945 meta->map_ptr = map; 4946 return 0; 4947 } 4948 4949 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 4950 { 4951 return type == ARG_PTR_TO_MEM || 4952 type == ARG_PTR_TO_MEM_OR_NULL || 4953 type == ARG_PTR_TO_UNINIT_MEM; 4954 } 4955 4956 static bool arg_type_is_mem_size(enum bpf_arg_type type) 4957 { 4958 return type == ARG_CONST_SIZE || 4959 type == ARG_CONST_SIZE_OR_ZERO; 4960 } 4961 4962 static bool arg_type_is_alloc_size(enum bpf_arg_type type) 4963 { 4964 return type == ARG_CONST_ALLOC_SIZE_OR_ZERO; 4965 } 4966 4967 static bool arg_type_is_int_ptr(enum bpf_arg_type type) 4968 { 4969 return type == ARG_PTR_TO_INT || 4970 type == ARG_PTR_TO_LONG; 4971 } 4972 4973 static int int_ptr_type_to_size(enum bpf_arg_type type) 4974 { 4975 if (type == ARG_PTR_TO_INT) 4976 return sizeof(u32); 4977 else if (type == ARG_PTR_TO_LONG) 4978 return sizeof(u64); 4979 4980 return -EINVAL; 4981 } 4982 4983 static int resolve_map_arg_type(struct bpf_verifier_env *env, 4984 const struct bpf_call_arg_meta *meta, 4985 enum bpf_arg_type *arg_type) 4986 { 4987 if (!meta->map_ptr) { 4988 /* kernel subsystem misconfigured verifier */ 4989 verbose(env, "invalid map_ptr to access map->type\n"); 4990 return -EACCES; 4991 } 4992 4993 switch (meta->map_ptr->map_type) { 4994 case BPF_MAP_TYPE_SOCKMAP: 4995 case BPF_MAP_TYPE_SOCKHASH: 4996 if (*arg_type == ARG_PTR_TO_MAP_VALUE) { 4997 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON; 4998 } else { 4999 verbose(env, "invalid arg_type for sockmap/sockhash\n"); 5000 return -EINVAL; 5001 } 5002 break; 5003 case BPF_MAP_TYPE_BLOOM_FILTER: 5004 if (meta->func_id == BPF_FUNC_map_peek_elem) 5005 *arg_type = ARG_PTR_TO_MAP_VALUE; 5006 break; 5007 default: 5008 break; 5009 } 5010 return 0; 5011 } 5012 5013 struct bpf_reg_types { 5014 const enum bpf_reg_type types[10]; 5015 u32 *btf_id; 5016 }; 5017 5018 static const struct bpf_reg_types map_key_value_types = { 5019 .types = { 5020 PTR_TO_STACK, 5021 PTR_TO_PACKET, 5022 PTR_TO_PACKET_META, 5023 PTR_TO_MAP_KEY, 5024 PTR_TO_MAP_VALUE, 5025 }, 5026 }; 5027 5028 static const struct bpf_reg_types sock_types = { 5029 .types = { 5030 PTR_TO_SOCK_COMMON, 5031 PTR_TO_SOCKET, 5032 PTR_TO_TCP_SOCK, 5033 PTR_TO_XDP_SOCK, 5034 }, 5035 }; 5036 5037 #ifdef CONFIG_NET 5038 static const struct bpf_reg_types btf_id_sock_common_types = { 5039 .types = { 5040 PTR_TO_SOCK_COMMON, 5041 PTR_TO_SOCKET, 5042 PTR_TO_TCP_SOCK, 5043 PTR_TO_XDP_SOCK, 5044 PTR_TO_BTF_ID, 5045 }, 5046 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 5047 }; 5048 #endif 5049 5050 static const struct bpf_reg_types mem_types = { 5051 .types = { 5052 PTR_TO_STACK, 5053 PTR_TO_PACKET, 5054 PTR_TO_PACKET_META, 5055 PTR_TO_MAP_KEY, 5056 PTR_TO_MAP_VALUE, 5057 PTR_TO_MEM, 5058 PTR_TO_RDONLY_BUF, 5059 PTR_TO_RDWR_BUF, 5060 }, 5061 }; 5062 5063 static const struct bpf_reg_types int_ptr_types = { 5064 .types = { 5065 PTR_TO_STACK, 5066 PTR_TO_PACKET, 5067 PTR_TO_PACKET_META, 5068 PTR_TO_MAP_KEY, 5069 PTR_TO_MAP_VALUE, 5070 }, 5071 }; 5072 5073 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; 5074 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; 5075 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; 5076 static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } }; 5077 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; 5078 static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } }; 5079 static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } }; 5080 static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } }; 5081 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; 5082 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; 5083 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; 5084 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; 5085 5086 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { 5087 [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, 5088 [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types, 5089 [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types, 5090 [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types, 5091 [ARG_CONST_SIZE] = &scalar_types, 5092 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, 5093 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, 5094 [ARG_CONST_MAP_PTR] = &const_map_ptr_types, 5095 [ARG_PTR_TO_CTX] = &context_types, 5096 [ARG_PTR_TO_CTX_OR_NULL] = &context_types, 5097 [ARG_PTR_TO_SOCK_COMMON] = &sock_types, 5098 #ifdef CONFIG_NET 5099 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, 5100 #endif 5101 [ARG_PTR_TO_SOCKET] = &fullsock_types, 5102 [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types, 5103 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, 5104 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, 5105 [ARG_PTR_TO_MEM] = &mem_types, 5106 [ARG_PTR_TO_MEM_OR_NULL] = &mem_types, 5107 [ARG_PTR_TO_UNINIT_MEM] = &mem_types, 5108 [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types, 5109 [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types, 5110 [ARG_PTR_TO_INT] = &int_ptr_types, 5111 [ARG_PTR_TO_LONG] = &int_ptr_types, 5112 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, 5113 [ARG_PTR_TO_FUNC] = &func_ptr_types, 5114 [ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types, 5115 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, 5116 [ARG_PTR_TO_TIMER] = &timer_types, 5117 }; 5118 5119 static int check_reg_type(struct bpf_verifier_env *env, u32 regno, 5120 enum bpf_arg_type arg_type, 5121 const u32 *arg_btf_id) 5122 { 5123 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 5124 enum bpf_reg_type expected, type = reg->type; 5125 const struct bpf_reg_types *compatible; 5126 int i, j; 5127 5128 compatible = compatible_reg_types[arg_type]; 5129 if (!compatible) { 5130 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); 5131 return -EFAULT; 5132 } 5133 5134 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { 5135 expected = compatible->types[i]; 5136 if (expected == NOT_INIT) 5137 break; 5138 5139 if (type == expected) 5140 goto found; 5141 } 5142 5143 verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]); 5144 for (j = 0; j + 1 < i; j++) 5145 verbose(env, "%s, ", reg_type_str[compatible->types[j]]); 5146 verbose(env, "%s\n", reg_type_str[compatible->types[j]]); 5147 return -EACCES; 5148 5149 found: 5150 if (type == PTR_TO_BTF_ID) { 5151 if (!arg_btf_id) { 5152 if (!compatible->btf_id) { 5153 verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); 5154 return -EFAULT; 5155 } 5156 arg_btf_id = compatible->btf_id; 5157 } 5158 5159 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 5160 btf_vmlinux, *arg_btf_id)) { 5161 verbose(env, "R%d is of type %s but %s is expected\n", 5162 regno, kernel_type_name(reg->btf, reg->btf_id), 5163 kernel_type_name(btf_vmlinux, *arg_btf_id)); 5164 return -EACCES; 5165 } 5166 5167 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 5168 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", 5169 regno); 5170 return -EACCES; 5171 } 5172 } 5173 5174 return 0; 5175 } 5176 5177 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, 5178 struct bpf_call_arg_meta *meta, 5179 const struct bpf_func_proto *fn) 5180 { 5181 u32 regno = BPF_REG_1 + arg; 5182 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 5183 enum bpf_arg_type arg_type = fn->arg_type[arg]; 5184 enum bpf_reg_type type = reg->type; 5185 int err = 0; 5186 5187 if (arg_type == ARG_DONTCARE) 5188 return 0; 5189 5190 err = check_reg_arg(env, regno, SRC_OP); 5191 if (err) 5192 return err; 5193 5194 if (arg_type == ARG_ANYTHING) { 5195 if (is_pointer_value(env, regno)) { 5196 verbose(env, "R%d leaks addr into helper function\n", 5197 regno); 5198 return -EACCES; 5199 } 5200 return 0; 5201 } 5202 5203 if (type_is_pkt_pointer(type) && 5204 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 5205 verbose(env, "helper access to the packet is not allowed\n"); 5206 return -EACCES; 5207 } 5208 5209 if (arg_type == ARG_PTR_TO_MAP_VALUE || 5210 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || 5211 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { 5212 err = resolve_map_arg_type(env, meta, &arg_type); 5213 if (err) 5214 return err; 5215 } 5216 5217 if (register_is_null(reg) && arg_type_may_be_null(arg_type)) 5218 /* A NULL register has a SCALAR_VALUE type, so skip 5219 * type checking. 5220 */ 5221 goto skip_type_check; 5222 5223 err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]); 5224 if (err) 5225 return err; 5226 5227 if (type == PTR_TO_CTX) { 5228 err = check_ctx_reg(env, reg, regno); 5229 if (err < 0) 5230 return err; 5231 } 5232 5233 skip_type_check: 5234 if (reg->ref_obj_id) { 5235 if (meta->ref_obj_id) { 5236 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 5237 regno, reg->ref_obj_id, 5238 meta->ref_obj_id); 5239 return -EFAULT; 5240 } 5241 meta->ref_obj_id = reg->ref_obj_id; 5242 } 5243 5244 if (arg_type == ARG_CONST_MAP_PTR) { 5245 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 5246 if (meta->map_ptr) { 5247 /* Use map_uid (which is unique id of inner map) to reject: 5248 * inner_map1 = bpf_map_lookup_elem(outer_map, key1) 5249 * inner_map2 = bpf_map_lookup_elem(outer_map, key2) 5250 * if (inner_map1 && inner_map2) { 5251 * timer = bpf_map_lookup_elem(inner_map1); 5252 * if (timer) 5253 * // mismatch would have been allowed 5254 * bpf_timer_init(timer, inner_map2); 5255 * } 5256 * 5257 * Comparing map_ptr is enough to distinguish normal and outer maps. 5258 */ 5259 if (meta->map_ptr != reg->map_ptr || 5260 meta->map_uid != reg->map_uid) { 5261 verbose(env, 5262 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", 5263 meta->map_uid, reg->map_uid); 5264 return -EINVAL; 5265 } 5266 } 5267 meta->map_ptr = reg->map_ptr; 5268 meta->map_uid = reg->map_uid; 5269 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 5270 /* bpf_map_xxx(..., map_ptr, ..., key) call: 5271 * check that [key, key + map->key_size) are within 5272 * stack limits and initialized 5273 */ 5274 if (!meta->map_ptr) { 5275 /* in function declaration map_ptr must come before 5276 * map_key, so that it's verified and known before 5277 * we have to check map_key here. Otherwise it means 5278 * that kernel subsystem misconfigured verifier 5279 */ 5280 verbose(env, "invalid map_ptr to access map->key\n"); 5281 return -EACCES; 5282 } 5283 err = check_helper_mem_access(env, regno, 5284 meta->map_ptr->key_size, false, 5285 NULL); 5286 } else if (arg_type == ARG_PTR_TO_MAP_VALUE || 5287 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && 5288 !register_is_null(reg)) || 5289 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { 5290 /* bpf_map_xxx(..., map_ptr, ..., value) call: 5291 * check [value, value + map->value_size) validity 5292 */ 5293 if (!meta->map_ptr) { 5294 /* kernel subsystem misconfigured verifier */ 5295 verbose(env, "invalid map_ptr to access map->value\n"); 5296 return -EACCES; 5297 } 5298 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); 5299 err = check_helper_mem_access(env, regno, 5300 meta->map_ptr->value_size, false, 5301 meta); 5302 } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) { 5303 if (!reg->btf_id) { 5304 verbose(env, "Helper has invalid btf_id in R%d\n", regno); 5305 return -EACCES; 5306 } 5307 meta->ret_btf = reg->btf; 5308 meta->ret_btf_id = reg->btf_id; 5309 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { 5310 if (meta->func_id == BPF_FUNC_spin_lock) { 5311 if (process_spin_lock(env, regno, true)) 5312 return -EACCES; 5313 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 5314 if (process_spin_lock(env, regno, false)) 5315 return -EACCES; 5316 } else { 5317 verbose(env, "verifier internal error\n"); 5318 return -EFAULT; 5319 } 5320 } else if (arg_type == ARG_PTR_TO_TIMER) { 5321 if (process_timer_func(env, regno, meta)) 5322 return -EACCES; 5323 } else if (arg_type == ARG_PTR_TO_FUNC) { 5324 meta->subprogno = reg->subprogno; 5325 } else if (arg_type_is_mem_ptr(arg_type)) { 5326 /* The access to this pointer is only checked when we hit the 5327 * next is_mem_size argument below. 5328 */ 5329 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM); 5330 } else if (arg_type_is_mem_size(arg_type)) { 5331 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 5332 5333 /* This is used to refine r0 return value bounds for helpers 5334 * that enforce this value as an upper bound on return values. 5335 * See do_refine_retval_range() for helpers that can refine 5336 * the return value. C type of helper is u32 so we pull register 5337 * bound from umax_value however, if negative verifier errors 5338 * out. Only upper bounds can be learned because retval is an 5339 * int type and negative retvals are allowed. 5340 */ 5341 meta->msize_max_value = reg->umax_value; 5342 5343 /* The register is SCALAR_VALUE; the access check 5344 * happens using its boundaries. 5345 */ 5346 if (!tnum_is_const(reg->var_off)) 5347 /* For unprivileged variable accesses, disable raw 5348 * mode so that the program is required to 5349 * initialize all the memory that the helper could 5350 * just partially fill up. 5351 */ 5352 meta = NULL; 5353 5354 if (reg->smin_value < 0) { 5355 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 5356 regno); 5357 return -EACCES; 5358 } 5359 5360 if (reg->umin_value == 0) { 5361 err = check_helper_mem_access(env, regno - 1, 0, 5362 zero_size_allowed, 5363 meta); 5364 if (err) 5365 return err; 5366 } 5367 5368 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 5369 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 5370 regno); 5371 return -EACCES; 5372 } 5373 err = check_helper_mem_access(env, regno - 1, 5374 reg->umax_value, 5375 zero_size_allowed, meta); 5376 if (!err) 5377 err = mark_chain_precision(env, regno); 5378 } else if (arg_type_is_alloc_size(arg_type)) { 5379 if (!tnum_is_const(reg->var_off)) { 5380 verbose(env, "R%d is not a known constant'\n", 5381 regno); 5382 return -EACCES; 5383 } 5384 meta->mem_size = reg->var_off.value; 5385 } else if (arg_type_is_int_ptr(arg_type)) { 5386 int size = int_ptr_type_to_size(arg_type); 5387 5388 err = check_helper_mem_access(env, regno, size, false, meta); 5389 if (err) 5390 return err; 5391 err = check_ptr_alignment(env, reg, 0, size, true); 5392 } else if (arg_type == ARG_PTR_TO_CONST_STR) { 5393 struct bpf_map *map = reg->map_ptr; 5394 int map_off; 5395 u64 map_addr; 5396 char *str_ptr; 5397 5398 if (!bpf_map_is_rdonly(map)) { 5399 verbose(env, "R%d does not point to a readonly map'\n", regno); 5400 return -EACCES; 5401 } 5402 5403 if (!tnum_is_const(reg->var_off)) { 5404 verbose(env, "R%d is not a constant address'\n", regno); 5405 return -EACCES; 5406 } 5407 5408 if (!map->ops->map_direct_value_addr) { 5409 verbose(env, "no direct value access support for this map type\n"); 5410 return -EACCES; 5411 } 5412 5413 err = check_map_access(env, regno, reg->off, 5414 map->value_size - reg->off, false); 5415 if (err) 5416 return err; 5417 5418 map_off = reg->off + reg->var_off.value; 5419 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); 5420 if (err) { 5421 verbose(env, "direct value access on string failed\n"); 5422 return err; 5423 } 5424 5425 str_ptr = (char *)(long)(map_addr); 5426 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { 5427 verbose(env, "string is not zero-terminated\n"); 5428 return -EINVAL; 5429 } 5430 } 5431 5432 return err; 5433 } 5434 5435 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) 5436 { 5437 enum bpf_attach_type eatype = env->prog->expected_attach_type; 5438 enum bpf_prog_type type = resolve_prog_type(env->prog); 5439 5440 if (func_id != BPF_FUNC_map_update_elem) 5441 return false; 5442 5443 /* It's not possible to get access to a locked struct sock in these 5444 * contexts, so updating is safe. 5445 */ 5446 switch (type) { 5447 case BPF_PROG_TYPE_TRACING: 5448 if (eatype == BPF_TRACE_ITER) 5449 return true; 5450 break; 5451 case BPF_PROG_TYPE_SOCKET_FILTER: 5452 case BPF_PROG_TYPE_SCHED_CLS: 5453 case BPF_PROG_TYPE_SCHED_ACT: 5454 case BPF_PROG_TYPE_XDP: 5455 case BPF_PROG_TYPE_SK_REUSEPORT: 5456 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5457 case BPF_PROG_TYPE_SK_LOOKUP: 5458 return true; 5459 default: 5460 break; 5461 } 5462 5463 verbose(env, "cannot update sockmap in this context\n"); 5464 return false; 5465 } 5466 5467 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) 5468 { 5469 return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64); 5470 } 5471 5472 static int check_map_func_compatibility(struct bpf_verifier_env *env, 5473 struct bpf_map *map, int func_id) 5474 { 5475 if (!map) 5476 return 0; 5477 5478 /* We need a two way check, first is from map perspective ... */ 5479 switch (map->map_type) { 5480 case BPF_MAP_TYPE_PROG_ARRAY: 5481 if (func_id != BPF_FUNC_tail_call) 5482 goto error; 5483 break; 5484 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 5485 if (func_id != BPF_FUNC_perf_event_read && 5486 func_id != BPF_FUNC_perf_event_output && 5487 func_id != BPF_FUNC_skb_output && 5488 func_id != BPF_FUNC_perf_event_read_value && 5489 func_id != BPF_FUNC_xdp_output) 5490 goto error; 5491 break; 5492 case BPF_MAP_TYPE_RINGBUF: 5493 if (func_id != BPF_FUNC_ringbuf_output && 5494 func_id != BPF_FUNC_ringbuf_reserve && 5495 func_id != BPF_FUNC_ringbuf_query) 5496 goto error; 5497 break; 5498 case BPF_MAP_TYPE_STACK_TRACE: 5499 if (func_id != BPF_FUNC_get_stackid) 5500 goto error; 5501 break; 5502 case BPF_MAP_TYPE_CGROUP_ARRAY: 5503 if (func_id != BPF_FUNC_skb_under_cgroup && 5504 func_id != BPF_FUNC_current_task_under_cgroup) 5505 goto error; 5506 break; 5507 case BPF_MAP_TYPE_CGROUP_STORAGE: 5508 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 5509 if (func_id != BPF_FUNC_get_local_storage) 5510 goto error; 5511 break; 5512 case BPF_MAP_TYPE_DEVMAP: 5513 case BPF_MAP_TYPE_DEVMAP_HASH: 5514 if (func_id != BPF_FUNC_redirect_map && 5515 func_id != BPF_FUNC_map_lookup_elem) 5516 goto error; 5517 break; 5518 /* Restrict bpf side of cpumap and xskmap, open when use-cases 5519 * appear. 5520 */ 5521 case BPF_MAP_TYPE_CPUMAP: 5522 if (func_id != BPF_FUNC_redirect_map) 5523 goto error; 5524 break; 5525 case BPF_MAP_TYPE_XSKMAP: 5526 if (func_id != BPF_FUNC_redirect_map && 5527 func_id != BPF_FUNC_map_lookup_elem) 5528 goto error; 5529 break; 5530 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 5531 case BPF_MAP_TYPE_HASH_OF_MAPS: 5532 if (func_id != BPF_FUNC_map_lookup_elem) 5533 goto error; 5534 break; 5535 case BPF_MAP_TYPE_SOCKMAP: 5536 if (func_id != BPF_FUNC_sk_redirect_map && 5537 func_id != BPF_FUNC_sock_map_update && 5538 func_id != BPF_FUNC_map_delete_elem && 5539 func_id != BPF_FUNC_msg_redirect_map && 5540 func_id != BPF_FUNC_sk_select_reuseport && 5541 func_id != BPF_FUNC_map_lookup_elem && 5542 !may_update_sockmap(env, func_id)) 5543 goto error; 5544 break; 5545 case BPF_MAP_TYPE_SOCKHASH: 5546 if (func_id != BPF_FUNC_sk_redirect_hash && 5547 func_id != BPF_FUNC_sock_hash_update && 5548 func_id != BPF_FUNC_map_delete_elem && 5549 func_id != BPF_FUNC_msg_redirect_hash && 5550 func_id != BPF_FUNC_sk_select_reuseport && 5551 func_id != BPF_FUNC_map_lookup_elem && 5552 !may_update_sockmap(env, func_id)) 5553 goto error; 5554 break; 5555 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 5556 if (func_id != BPF_FUNC_sk_select_reuseport) 5557 goto error; 5558 break; 5559 case BPF_MAP_TYPE_QUEUE: 5560 case BPF_MAP_TYPE_STACK: 5561 if (func_id != BPF_FUNC_map_peek_elem && 5562 func_id != BPF_FUNC_map_pop_elem && 5563 func_id != BPF_FUNC_map_push_elem) 5564 goto error; 5565 break; 5566 case BPF_MAP_TYPE_SK_STORAGE: 5567 if (func_id != BPF_FUNC_sk_storage_get && 5568 func_id != BPF_FUNC_sk_storage_delete) 5569 goto error; 5570 break; 5571 case BPF_MAP_TYPE_INODE_STORAGE: 5572 if (func_id != BPF_FUNC_inode_storage_get && 5573 func_id != BPF_FUNC_inode_storage_delete) 5574 goto error; 5575 break; 5576 case BPF_MAP_TYPE_TASK_STORAGE: 5577 if (func_id != BPF_FUNC_task_storage_get && 5578 func_id != BPF_FUNC_task_storage_delete) 5579 goto error; 5580 break; 5581 case BPF_MAP_TYPE_BLOOM_FILTER: 5582 if (func_id != BPF_FUNC_map_peek_elem && 5583 func_id != BPF_FUNC_map_push_elem) 5584 goto error; 5585 break; 5586 default: 5587 break; 5588 } 5589 5590 /* ... and second from the function itself. */ 5591 switch (func_id) { 5592 case BPF_FUNC_tail_call: 5593 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 5594 goto error; 5595 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { 5596 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 5597 return -EINVAL; 5598 } 5599 break; 5600 case BPF_FUNC_perf_event_read: 5601 case BPF_FUNC_perf_event_output: 5602 case BPF_FUNC_perf_event_read_value: 5603 case BPF_FUNC_skb_output: 5604 case BPF_FUNC_xdp_output: 5605 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 5606 goto error; 5607 break; 5608 case BPF_FUNC_ringbuf_output: 5609 case BPF_FUNC_ringbuf_reserve: 5610 case BPF_FUNC_ringbuf_query: 5611 if (map->map_type != BPF_MAP_TYPE_RINGBUF) 5612 goto error; 5613 break; 5614 case BPF_FUNC_get_stackid: 5615 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 5616 goto error; 5617 break; 5618 case BPF_FUNC_current_task_under_cgroup: 5619 case BPF_FUNC_skb_under_cgroup: 5620 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 5621 goto error; 5622 break; 5623 case BPF_FUNC_redirect_map: 5624 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 5625 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 5626 map->map_type != BPF_MAP_TYPE_CPUMAP && 5627 map->map_type != BPF_MAP_TYPE_XSKMAP) 5628 goto error; 5629 break; 5630 case BPF_FUNC_sk_redirect_map: 5631 case BPF_FUNC_msg_redirect_map: 5632 case BPF_FUNC_sock_map_update: 5633 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 5634 goto error; 5635 break; 5636 case BPF_FUNC_sk_redirect_hash: 5637 case BPF_FUNC_msg_redirect_hash: 5638 case BPF_FUNC_sock_hash_update: 5639 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 5640 goto error; 5641 break; 5642 case BPF_FUNC_get_local_storage: 5643 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 5644 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 5645 goto error; 5646 break; 5647 case BPF_FUNC_sk_select_reuseport: 5648 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 5649 map->map_type != BPF_MAP_TYPE_SOCKMAP && 5650 map->map_type != BPF_MAP_TYPE_SOCKHASH) 5651 goto error; 5652 break; 5653 case BPF_FUNC_map_pop_elem: 5654 if (map->map_type != BPF_MAP_TYPE_QUEUE && 5655 map->map_type != BPF_MAP_TYPE_STACK) 5656 goto error; 5657 break; 5658 case BPF_FUNC_map_peek_elem: 5659 case BPF_FUNC_map_push_elem: 5660 if (map->map_type != BPF_MAP_TYPE_QUEUE && 5661 map->map_type != BPF_MAP_TYPE_STACK && 5662 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) 5663 goto error; 5664 break; 5665 case BPF_FUNC_sk_storage_get: 5666 case BPF_FUNC_sk_storage_delete: 5667 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 5668 goto error; 5669 break; 5670 case BPF_FUNC_inode_storage_get: 5671 case BPF_FUNC_inode_storage_delete: 5672 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) 5673 goto error; 5674 break; 5675 case BPF_FUNC_task_storage_get: 5676 case BPF_FUNC_task_storage_delete: 5677 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 5678 goto error; 5679 break; 5680 default: 5681 break; 5682 } 5683 5684 return 0; 5685 error: 5686 verbose(env, "cannot pass map_type %d into func %s#%d\n", 5687 map->map_type, func_id_name(func_id), func_id); 5688 return -EINVAL; 5689 } 5690 5691 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 5692 { 5693 int count = 0; 5694 5695 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 5696 count++; 5697 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 5698 count++; 5699 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 5700 count++; 5701 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 5702 count++; 5703 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 5704 count++; 5705 5706 /* We only support one arg being in raw mode at the moment, 5707 * which is sufficient for the helper functions we have 5708 * right now. 5709 */ 5710 return count <= 1; 5711 } 5712 5713 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 5714 enum bpf_arg_type arg_next) 5715 { 5716 return (arg_type_is_mem_ptr(arg_curr) && 5717 !arg_type_is_mem_size(arg_next)) || 5718 (!arg_type_is_mem_ptr(arg_curr) && 5719 arg_type_is_mem_size(arg_next)); 5720 } 5721 5722 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 5723 { 5724 /* bpf_xxx(..., buf, len) call will access 'len' 5725 * bytes from memory 'buf'. Both arg types need 5726 * to be paired, so make sure there's no buggy 5727 * helper function specification. 5728 */ 5729 if (arg_type_is_mem_size(fn->arg1_type) || 5730 arg_type_is_mem_ptr(fn->arg5_type) || 5731 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 5732 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 5733 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 5734 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 5735 return false; 5736 5737 return true; 5738 } 5739 5740 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) 5741 { 5742 int count = 0; 5743 5744 if (arg_type_may_be_refcounted(fn->arg1_type)) 5745 count++; 5746 if (arg_type_may_be_refcounted(fn->arg2_type)) 5747 count++; 5748 if (arg_type_may_be_refcounted(fn->arg3_type)) 5749 count++; 5750 if (arg_type_may_be_refcounted(fn->arg4_type)) 5751 count++; 5752 if (arg_type_may_be_refcounted(fn->arg5_type)) 5753 count++; 5754 5755 /* A reference acquiring function cannot acquire 5756 * another refcounted ptr. 5757 */ 5758 if (may_be_acquire_function(func_id) && count) 5759 return false; 5760 5761 /* We only support one arg being unreferenced at the moment, 5762 * which is sufficient for the helper functions we have right now. 5763 */ 5764 return count <= 1; 5765 } 5766 5767 static bool check_btf_id_ok(const struct bpf_func_proto *fn) 5768 { 5769 int i; 5770 5771 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { 5772 if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) 5773 return false; 5774 5775 if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i]) 5776 return false; 5777 } 5778 5779 return true; 5780 } 5781 5782 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 5783 { 5784 return check_raw_mode_ok(fn) && 5785 check_arg_pair_ok(fn) && 5786 check_btf_id_ok(fn) && 5787 check_refcount_ok(fn, func_id) ? 0 : -EINVAL; 5788 } 5789 5790 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 5791 * are now invalid, so turn them into unknown SCALAR_VALUE. 5792 */ 5793 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 5794 struct bpf_func_state *state) 5795 { 5796 struct bpf_reg_state *regs = state->regs, *reg; 5797 int i; 5798 5799 for (i = 0; i < MAX_BPF_REG; i++) 5800 if (reg_is_pkt_pointer_any(®s[i])) 5801 mark_reg_unknown(env, regs, i); 5802 5803 bpf_for_each_spilled_reg(i, state, reg) { 5804 if (!reg) 5805 continue; 5806 if (reg_is_pkt_pointer_any(reg)) 5807 __mark_reg_unknown(env, reg); 5808 } 5809 } 5810 5811 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 5812 { 5813 struct bpf_verifier_state *vstate = env->cur_state; 5814 int i; 5815 5816 for (i = 0; i <= vstate->curframe; i++) 5817 __clear_all_pkt_pointers(env, vstate->frame[i]); 5818 } 5819 5820 enum { 5821 AT_PKT_END = -1, 5822 BEYOND_PKT_END = -2, 5823 }; 5824 5825 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) 5826 { 5827 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5828 struct bpf_reg_state *reg = &state->regs[regn]; 5829 5830 if (reg->type != PTR_TO_PACKET) 5831 /* PTR_TO_PACKET_META is not supported yet */ 5832 return; 5833 5834 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. 5835 * How far beyond pkt_end it goes is unknown. 5836 * if (!range_open) it's the case of pkt >= pkt_end 5837 * if (range_open) it's the case of pkt > pkt_end 5838 * hence this pointer is at least 1 byte bigger than pkt_end 5839 */ 5840 if (range_open) 5841 reg->range = BEYOND_PKT_END; 5842 else 5843 reg->range = AT_PKT_END; 5844 } 5845 5846 static void release_reg_references(struct bpf_verifier_env *env, 5847 struct bpf_func_state *state, 5848 int ref_obj_id) 5849 { 5850 struct bpf_reg_state *regs = state->regs, *reg; 5851 int i; 5852 5853 for (i = 0; i < MAX_BPF_REG; i++) 5854 if (regs[i].ref_obj_id == ref_obj_id) 5855 mark_reg_unknown(env, regs, i); 5856 5857 bpf_for_each_spilled_reg(i, state, reg) { 5858 if (!reg) 5859 continue; 5860 if (reg->ref_obj_id == ref_obj_id) 5861 __mark_reg_unknown(env, reg); 5862 } 5863 } 5864 5865 /* The pointer with the specified id has released its reference to kernel 5866 * resources. Identify all copies of the same pointer and clear the reference. 5867 */ 5868 static int release_reference(struct bpf_verifier_env *env, 5869 int ref_obj_id) 5870 { 5871 struct bpf_verifier_state *vstate = env->cur_state; 5872 int err; 5873 int i; 5874 5875 err = release_reference_state(cur_func(env), ref_obj_id); 5876 if (err) 5877 return err; 5878 5879 for (i = 0; i <= vstate->curframe; i++) 5880 release_reg_references(env, vstate->frame[i], ref_obj_id); 5881 5882 return 0; 5883 } 5884 5885 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 5886 struct bpf_reg_state *regs) 5887 { 5888 int i; 5889 5890 /* after the call registers r0 - r5 were scratched */ 5891 for (i = 0; i < CALLER_SAVED_REGS; i++) { 5892 mark_reg_not_init(env, regs, caller_saved[i]); 5893 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 5894 } 5895 } 5896 5897 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, 5898 struct bpf_func_state *caller, 5899 struct bpf_func_state *callee, 5900 int insn_idx); 5901 5902 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 5903 int *insn_idx, int subprog, 5904 set_callee_state_fn set_callee_state_cb) 5905 { 5906 struct bpf_verifier_state *state = env->cur_state; 5907 struct bpf_func_info_aux *func_info_aux; 5908 struct bpf_func_state *caller, *callee; 5909 int err; 5910 bool is_global = false; 5911 5912 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 5913 verbose(env, "the call stack of %d frames is too deep\n", 5914 state->curframe + 2); 5915 return -E2BIG; 5916 } 5917 5918 caller = state->frame[state->curframe]; 5919 if (state->frame[state->curframe + 1]) { 5920 verbose(env, "verifier bug. Frame %d already allocated\n", 5921 state->curframe + 1); 5922 return -EFAULT; 5923 } 5924 5925 func_info_aux = env->prog->aux->func_info_aux; 5926 if (func_info_aux) 5927 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 5928 err = btf_check_subprog_arg_match(env, subprog, caller->regs); 5929 if (err == -EFAULT) 5930 return err; 5931 if (is_global) { 5932 if (err) { 5933 verbose(env, "Caller passes invalid args into func#%d\n", 5934 subprog); 5935 return err; 5936 } else { 5937 if (env->log.level & BPF_LOG_LEVEL) 5938 verbose(env, 5939 "Func#%d is global and valid. Skipping.\n", 5940 subprog); 5941 clear_caller_saved_regs(env, caller->regs); 5942 5943 /* All global functions return a 64-bit SCALAR_VALUE */ 5944 mark_reg_unknown(env, caller->regs, BPF_REG_0); 5945 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 5946 5947 /* continue with next insn after call */ 5948 return 0; 5949 } 5950 } 5951 5952 if (insn->code == (BPF_JMP | BPF_CALL) && 5953 insn->imm == BPF_FUNC_timer_set_callback) { 5954 struct bpf_verifier_state *async_cb; 5955 5956 /* there is no real recursion here. timer callbacks are async */ 5957 env->subprog_info[subprog].is_async_cb = true; 5958 async_cb = push_async_cb(env, env->subprog_info[subprog].start, 5959 *insn_idx, subprog); 5960 if (!async_cb) 5961 return -EFAULT; 5962 callee = async_cb->frame[0]; 5963 callee->async_entry_cnt = caller->async_entry_cnt + 1; 5964 5965 /* Convert bpf_timer_set_callback() args into timer callback args */ 5966 err = set_callee_state_cb(env, caller, callee, *insn_idx); 5967 if (err) 5968 return err; 5969 5970 clear_caller_saved_regs(env, caller->regs); 5971 mark_reg_unknown(env, caller->regs, BPF_REG_0); 5972 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 5973 /* continue with next insn after call */ 5974 return 0; 5975 } 5976 5977 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 5978 if (!callee) 5979 return -ENOMEM; 5980 state->frame[state->curframe + 1] = callee; 5981 5982 /* callee cannot access r0, r6 - r9 for reading and has to write 5983 * into its own stack before reading from it. 5984 * callee can read/write into caller's stack 5985 */ 5986 init_func_state(env, callee, 5987 /* remember the callsite, it will be used by bpf_exit */ 5988 *insn_idx /* callsite */, 5989 state->curframe + 1 /* frameno within this callchain */, 5990 subprog /* subprog number within this prog */); 5991 5992 /* Transfer references to the callee */ 5993 err = copy_reference_state(callee, caller); 5994 if (err) 5995 return err; 5996 5997 err = set_callee_state_cb(env, caller, callee, *insn_idx); 5998 if (err) 5999 return err; 6000 6001 clear_caller_saved_regs(env, caller->regs); 6002 6003 /* only increment it after check_reg_arg() finished */ 6004 state->curframe++; 6005 6006 /* and go analyze first insn of the callee */ 6007 *insn_idx = env->subprog_info[subprog].start - 1; 6008 6009 if (env->log.level & BPF_LOG_LEVEL) { 6010 verbose(env, "caller:\n"); 6011 print_verifier_state(env, caller); 6012 verbose(env, "callee:\n"); 6013 print_verifier_state(env, callee); 6014 } 6015 return 0; 6016 } 6017 6018 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 6019 struct bpf_func_state *caller, 6020 struct bpf_func_state *callee) 6021 { 6022 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, 6023 * void *callback_ctx, u64 flags); 6024 * callback_fn(struct bpf_map *map, void *key, void *value, 6025 * void *callback_ctx); 6026 */ 6027 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 6028 6029 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 6030 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 6031 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; 6032 6033 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 6034 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 6035 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; 6036 6037 /* pointer to stack or null */ 6038 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; 6039 6040 /* unused */ 6041 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 6042 return 0; 6043 } 6044 6045 static int set_callee_state(struct bpf_verifier_env *env, 6046 struct bpf_func_state *caller, 6047 struct bpf_func_state *callee, int insn_idx) 6048 { 6049 int i; 6050 6051 /* copy r1 - r5 args that callee can access. The copy includes parent 6052 * pointers, which connects us up to the liveness chain 6053 */ 6054 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 6055 callee->regs[i] = caller->regs[i]; 6056 return 0; 6057 } 6058 6059 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 6060 int *insn_idx) 6061 { 6062 int subprog, target_insn; 6063 6064 target_insn = *insn_idx + insn->imm + 1; 6065 subprog = find_subprog(env, target_insn); 6066 if (subprog < 0) { 6067 verbose(env, "verifier bug. No program starts at insn %d\n", 6068 target_insn); 6069 return -EFAULT; 6070 } 6071 6072 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); 6073 } 6074 6075 static int set_map_elem_callback_state(struct bpf_verifier_env *env, 6076 struct bpf_func_state *caller, 6077 struct bpf_func_state *callee, 6078 int insn_idx) 6079 { 6080 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; 6081 struct bpf_map *map; 6082 int err; 6083 6084 if (bpf_map_ptr_poisoned(insn_aux)) { 6085 verbose(env, "tail_call abusing map_ptr\n"); 6086 return -EINVAL; 6087 } 6088 6089 map = BPF_MAP_PTR(insn_aux->map_ptr_state); 6090 if (!map->ops->map_set_for_each_callback_args || 6091 !map->ops->map_for_each_callback) { 6092 verbose(env, "callback function not allowed for map\n"); 6093 return -ENOTSUPP; 6094 } 6095 6096 err = map->ops->map_set_for_each_callback_args(env, caller, callee); 6097 if (err) 6098 return err; 6099 6100 callee->in_callback_fn = true; 6101 return 0; 6102 } 6103 6104 static int set_timer_callback_state(struct bpf_verifier_env *env, 6105 struct bpf_func_state *caller, 6106 struct bpf_func_state *callee, 6107 int insn_idx) 6108 { 6109 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; 6110 6111 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); 6112 * callback_fn(struct bpf_map *map, void *key, void *value); 6113 */ 6114 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; 6115 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); 6116 callee->regs[BPF_REG_1].map_ptr = map_ptr; 6117 6118 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 6119 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 6120 callee->regs[BPF_REG_2].map_ptr = map_ptr; 6121 6122 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 6123 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 6124 callee->regs[BPF_REG_3].map_ptr = map_ptr; 6125 6126 /* unused */ 6127 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 6128 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 6129 callee->in_async_callback_fn = true; 6130 return 0; 6131 } 6132 6133 static int set_find_vma_callback_state(struct bpf_verifier_env *env, 6134 struct bpf_func_state *caller, 6135 struct bpf_func_state *callee, 6136 int insn_idx) 6137 { 6138 /* bpf_find_vma(struct task_struct *task, u64 addr, 6139 * void *callback_fn, void *callback_ctx, u64 flags) 6140 * (callback_fn)(struct task_struct *task, 6141 * struct vm_area_struct *vma, void *callback_ctx); 6142 */ 6143 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 6144 6145 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; 6146 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 6147 callee->regs[BPF_REG_2].btf = btf_vmlinux; 6148 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], 6149 6150 /* pointer to stack or null */ 6151 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; 6152 6153 /* unused */ 6154 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 6155 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 6156 callee->in_callback_fn = true; 6157 return 0; 6158 } 6159 6160 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 6161 { 6162 struct bpf_verifier_state *state = env->cur_state; 6163 struct bpf_func_state *caller, *callee; 6164 struct bpf_reg_state *r0; 6165 int err; 6166 6167 callee = state->frame[state->curframe]; 6168 r0 = &callee->regs[BPF_REG_0]; 6169 if (r0->type == PTR_TO_STACK) { 6170 /* technically it's ok to return caller's stack pointer 6171 * (or caller's caller's pointer) back to the caller, 6172 * since these pointers are valid. Only current stack 6173 * pointer will be invalid as soon as function exits, 6174 * but let's be conservative 6175 */ 6176 verbose(env, "cannot return stack pointer to the caller\n"); 6177 return -EINVAL; 6178 } 6179 6180 state->curframe--; 6181 caller = state->frame[state->curframe]; 6182 if (callee->in_callback_fn) { 6183 /* enforce R0 return value range [0, 1]. */ 6184 struct tnum range = tnum_range(0, 1); 6185 6186 if (r0->type != SCALAR_VALUE) { 6187 verbose(env, "R0 not a scalar value\n"); 6188 return -EACCES; 6189 } 6190 if (!tnum_in(range, r0->var_off)) { 6191 verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); 6192 return -EINVAL; 6193 } 6194 } else { 6195 /* return to the caller whatever r0 had in the callee */ 6196 caller->regs[BPF_REG_0] = *r0; 6197 } 6198 6199 /* Transfer references to the caller */ 6200 err = copy_reference_state(caller, callee); 6201 if (err) 6202 return err; 6203 6204 *insn_idx = callee->callsite + 1; 6205 if (env->log.level & BPF_LOG_LEVEL) { 6206 verbose(env, "returning from callee:\n"); 6207 print_verifier_state(env, callee); 6208 verbose(env, "to caller at %d:\n", *insn_idx); 6209 print_verifier_state(env, caller); 6210 } 6211 /* clear everything in the callee */ 6212 free_func_state(callee); 6213 state->frame[state->curframe + 1] = NULL; 6214 return 0; 6215 } 6216 6217 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 6218 int func_id, 6219 struct bpf_call_arg_meta *meta) 6220 { 6221 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 6222 6223 if (ret_type != RET_INTEGER || 6224 (func_id != BPF_FUNC_get_stack && 6225 func_id != BPF_FUNC_get_task_stack && 6226 func_id != BPF_FUNC_probe_read_str && 6227 func_id != BPF_FUNC_probe_read_kernel_str && 6228 func_id != BPF_FUNC_probe_read_user_str)) 6229 return; 6230 6231 ret_reg->smax_value = meta->msize_max_value; 6232 ret_reg->s32_max_value = meta->msize_max_value; 6233 ret_reg->smin_value = -MAX_ERRNO; 6234 ret_reg->s32_min_value = -MAX_ERRNO; 6235 __reg_deduce_bounds(ret_reg); 6236 __reg_bound_offset(ret_reg); 6237 __update_reg_bounds(ret_reg); 6238 } 6239 6240 static int 6241 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 6242 int func_id, int insn_idx) 6243 { 6244 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 6245 struct bpf_map *map = meta->map_ptr; 6246 6247 if (func_id != BPF_FUNC_tail_call && 6248 func_id != BPF_FUNC_map_lookup_elem && 6249 func_id != BPF_FUNC_map_update_elem && 6250 func_id != BPF_FUNC_map_delete_elem && 6251 func_id != BPF_FUNC_map_push_elem && 6252 func_id != BPF_FUNC_map_pop_elem && 6253 func_id != BPF_FUNC_map_peek_elem && 6254 func_id != BPF_FUNC_for_each_map_elem && 6255 func_id != BPF_FUNC_redirect_map) 6256 return 0; 6257 6258 if (map == NULL) { 6259 verbose(env, "kernel subsystem misconfigured verifier\n"); 6260 return -EINVAL; 6261 } 6262 6263 /* In case of read-only, some additional restrictions 6264 * need to be applied in order to prevent altering the 6265 * state of the map from program side. 6266 */ 6267 if ((map->map_flags & BPF_F_RDONLY_PROG) && 6268 (func_id == BPF_FUNC_map_delete_elem || 6269 func_id == BPF_FUNC_map_update_elem || 6270 func_id == BPF_FUNC_map_push_elem || 6271 func_id == BPF_FUNC_map_pop_elem)) { 6272 verbose(env, "write into map forbidden\n"); 6273 return -EACCES; 6274 } 6275 6276 if (!BPF_MAP_PTR(aux->map_ptr_state)) 6277 bpf_map_ptr_store(aux, meta->map_ptr, 6278 !meta->map_ptr->bypass_spec_v1); 6279 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 6280 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 6281 !meta->map_ptr->bypass_spec_v1); 6282 return 0; 6283 } 6284 6285 static int 6286 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 6287 int func_id, int insn_idx) 6288 { 6289 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 6290 struct bpf_reg_state *regs = cur_regs(env), *reg; 6291 struct bpf_map *map = meta->map_ptr; 6292 struct tnum range; 6293 u64 val; 6294 int err; 6295 6296 if (func_id != BPF_FUNC_tail_call) 6297 return 0; 6298 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 6299 verbose(env, "kernel subsystem misconfigured verifier\n"); 6300 return -EINVAL; 6301 } 6302 6303 range = tnum_range(0, map->max_entries - 1); 6304 reg = ®s[BPF_REG_3]; 6305 6306 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { 6307 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 6308 return 0; 6309 } 6310 6311 err = mark_chain_precision(env, BPF_REG_3); 6312 if (err) 6313 return err; 6314 6315 val = reg->var_off.value; 6316 if (bpf_map_key_unseen(aux)) 6317 bpf_map_key_store(aux, val); 6318 else if (!bpf_map_key_poisoned(aux) && 6319 bpf_map_key_immediate(aux) != val) 6320 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 6321 return 0; 6322 } 6323 6324 static int check_reference_leak(struct bpf_verifier_env *env) 6325 { 6326 struct bpf_func_state *state = cur_func(env); 6327 int i; 6328 6329 for (i = 0; i < state->acquired_refs; i++) { 6330 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 6331 state->refs[i].id, state->refs[i].insn_idx); 6332 } 6333 return state->acquired_refs ? -EINVAL : 0; 6334 } 6335 6336 static int check_bpf_snprintf_call(struct bpf_verifier_env *env, 6337 struct bpf_reg_state *regs) 6338 { 6339 struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; 6340 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; 6341 struct bpf_map *fmt_map = fmt_reg->map_ptr; 6342 int err, fmt_map_off, num_args; 6343 u64 fmt_addr; 6344 char *fmt; 6345 6346 /* data must be an array of u64 */ 6347 if (data_len_reg->var_off.value % 8) 6348 return -EINVAL; 6349 num_args = data_len_reg->var_off.value / 8; 6350 6351 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const 6352 * and map_direct_value_addr is set. 6353 */ 6354 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; 6355 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, 6356 fmt_map_off); 6357 if (err) { 6358 verbose(env, "verifier bug\n"); 6359 return -EFAULT; 6360 } 6361 fmt = (char *)(long)fmt_addr + fmt_map_off; 6362 6363 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we 6364 * can focus on validating the format specifiers. 6365 */ 6366 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args); 6367 if (err < 0) 6368 verbose(env, "Invalid format string\n"); 6369 6370 return err; 6371 } 6372 6373 static int check_get_func_ip(struct bpf_verifier_env *env) 6374 { 6375 enum bpf_attach_type eatype = env->prog->expected_attach_type; 6376 enum bpf_prog_type type = resolve_prog_type(env->prog); 6377 int func_id = BPF_FUNC_get_func_ip; 6378 6379 if (type == BPF_PROG_TYPE_TRACING) { 6380 if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT && 6381 eatype != BPF_MODIFY_RETURN) { 6382 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", 6383 func_id_name(func_id), func_id); 6384 return -ENOTSUPP; 6385 } 6386 return 0; 6387 } else if (type == BPF_PROG_TYPE_KPROBE) { 6388 return 0; 6389 } 6390 6391 verbose(env, "func %s#%d not supported for program type %d\n", 6392 func_id_name(func_id), func_id, type); 6393 return -ENOTSUPP; 6394 } 6395 6396 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 6397 int *insn_idx_p) 6398 { 6399 const struct bpf_func_proto *fn = NULL; 6400 struct bpf_reg_state *regs; 6401 struct bpf_call_arg_meta meta; 6402 int insn_idx = *insn_idx_p; 6403 bool changes_data; 6404 int i, err, func_id; 6405 6406 /* find function prototype */ 6407 func_id = insn->imm; 6408 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 6409 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 6410 func_id); 6411 return -EINVAL; 6412 } 6413 6414 if (env->ops->get_func_proto) 6415 fn = env->ops->get_func_proto(func_id, env->prog); 6416 if (!fn) { 6417 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 6418 func_id); 6419 return -EINVAL; 6420 } 6421 6422 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 6423 if (!env->prog->gpl_compatible && fn->gpl_only) { 6424 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 6425 return -EINVAL; 6426 } 6427 6428 if (fn->allowed && !fn->allowed(env->prog)) { 6429 verbose(env, "helper call is not allowed in probe\n"); 6430 return -EINVAL; 6431 } 6432 6433 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 6434 changes_data = bpf_helper_changes_pkt_data(fn->func); 6435 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 6436 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 6437 func_id_name(func_id), func_id); 6438 return -EINVAL; 6439 } 6440 6441 memset(&meta, 0, sizeof(meta)); 6442 meta.pkt_access = fn->pkt_access; 6443 6444 err = check_func_proto(fn, func_id); 6445 if (err) { 6446 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 6447 func_id_name(func_id), func_id); 6448 return err; 6449 } 6450 6451 meta.func_id = func_id; 6452 /* check args */ 6453 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { 6454 err = check_func_arg(env, i, &meta, fn); 6455 if (err) 6456 return err; 6457 } 6458 6459 err = record_func_map(env, &meta, func_id, insn_idx); 6460 if (err) 6461 return err; 6462 6463 err = record_func_key(env, &meta, func_id, insn_idx); 6464 if (err) 6465 return err; 6466 6467 /* Mark slots with STACK_MISC in case of raw mode, stack offset 6468 * is inferred from register state. 6469 */ 6470 for (i = 0; i < meta.access_size; i++) { 6471 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 6472 BPF_WRITE, -1, false); 6473 if (err) 6474 return err; 6475 } 6476 6477 if (func_id == BPF_FUNC_tail_call) { 6478 err = check_reference_leak(env); 6479 if (err) { 6480 verbose(env, "tail_call would lead to reference leak\n"); 6481 return err; 6482 } 6483 } else if (is_release_function(func_id)) { 6484 err = release_reference(env, meta.ref_obj_id); 6485 if (err) { 6486 verbose(env, "func %s#%d reference has not been acquired before\n", 6487 func_id_name(func_id), func_id); 6488 return err; 6489 } 6490 } 6491 6492 regs = cur_regs(env); 6493 6494 /* check that flags argument in get_local_storage(map, flags) is 0, 6495 * this is required because get_local_storage() can't return an error. 6496 */ 6497 if (func_id == BPF_FUNC_get_local_storage && 6498 !register_is_null(®s[BPF_REG_2])) { 6499 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 6500 return -EINVAL; 6501 } 6502 6503 if (func_id == BPF_FUNC_for_each_map_elem) { 6504 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 6505 set_map_elem_callback_state); 6506 if (err < 0) 6507 return -EINVAL; 6508 } 6509 6510 if (func_id == BPF_FUNC_timer_set_callback) { 6511 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 6512 set_timer_callback_state); 6513 if (err < 0) 6514 return -EINVAL; 6515 } 6516 6517 if (func_id == BPF_FUNC_find_vma) { 6518 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 6519 set_find_vma_callback_state); 6520 if (err < 0) 6521 return -EINVAL; 6522 } 6523 6524 if (func_id == BPF_FUNC_snprintf) { 6525 err = check_bpf_snprintf_call(env, regs); 6526 if (err < 0) 6527 return err; 6528 } 6529 6530 /* reset caller saved regs */ 6531 for (i = 0; i < CALLER_SAVED_REGS; i++) { 6532 mark_reg_not_init(env, regs, caller_saved[i]); 6533 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 6534 } 6535 6536 /* helper call returns 64-bit value. */ 6537 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 6538 6539 /* update return register (already marked as written above) */ 6540 if (fn->ret_type == RET_INTEGER) { 6541 /* sets type to SCALAR_VALUE */ 6542 mark_reg_unknown(env, regs, BPF_REG_0); 6543 } else if (fn->ret_type == RET_VOID) { 6544 regs[BPF_REG_0].type = NOT_INIT; 6545 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || 6546 fn->ret_type == RET_PTR_TO_MAP_VALUE) { 6547 /* There is no offset yet applied, variable or fixed */ 6548 mark_reg_known_zero(env, regs, BPF_REG_0); 6549 /* remember map_ptr, so that check_map_access() 6550 * can check 'value_size' boundary of memory access 6551 * to map element returned from bpf_map_lookup_elem() 6552 */ 6553 if (meta.map_ptr == NULL) { 6554 verbose(env, 6555 "kernel subsystem misconfigured verifier\n"); 6556 return -EINVAL; 6557 } 6558 regs[BPF_REG_0].map_ptr = meta.map_ptr; 6559 regs[BPF_REG_0].map_uid = meta.map_uid; 6560 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { 6561 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; 6562 if (map_value_has_spin_lock(meta.map_ptr)) 6563 regs[BPF_REG_0].id = ++env->id_gen; 6564 } else { 6565 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 6566 } 6567 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { 6568 mark_reg_known_zero(env, regs, BPF_REG_0); 6569 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; 6570 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { 6571 mark_reg_known_zero(env, regs, BPF_REG_0); 6572 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; 6573 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { 6574 mark_reg_known_zero(env, regs, BPF_REG_0); 6575 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; 6576 } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { 6577 mark_reg_known_zero(env, regs, BPF_REG_0); 6578 regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; 6579 regs[BPF_REG_0].mem_size = meta.mem_size; 6580 } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL || 6581 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) { 6582 const struct btf_type *t; 6583 6584 mark_reg_known_zero(env, regs, BPF_REG_0); 6585 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL); 6586 if (!btf_type_is_struct(t)) { 6587 u32 tsize; 6588 const struct btf_type *ret; 6589 const char *tname; 6590 6591 /* resolve the type size of ksym. */ 6592 ret = btf_resolve_size(meta.ret_btf, t, &tsize); 6593 if (IS_ERR(ret)) { 6594 tname = btf_name_by_offset(meta.ret_btf, t->name_off); 6595 verbose(env, "unable to resolve the size of type '%s': %ld\n", 6596 tname, PTR_ERR(ret)); 6597 return -EINVAL; 6598 } 6599 regs[BPF_REG_0].type = 6600 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? 6601 PTR_TO_MEM : PTR_TO_MEM_OR_NULL; 6602 regs[BPF_REG_0].mem_size = tsize; 6603 } else { 6604 regs[BPF_REG_0].type = 6605 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? 6606 PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL; 6607 regs[BPF_REG_0].btf = meta.ret_btf; 6608 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 6609 } 6610 } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL || 6611 fn->ret_type == RET_PTR_TO_BTF_ID) { 6612 int ret_btf_id; 6613 6614 mark_reg_known_zero(env, regs, BPF_REG_0); 6615 regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ? 6616 PTR_TO_BTF_ID : 6617 PTR_TO_BTF_ID_OR_NULL; 6618 ret_btf_id = *fn->ret_btf_id; 6619 if (ret_btf_id == 0) { 6620 verbose(env, "invalid return type %d of func %s#%d\n", 6621 fn->ret_type, func_id_name(func_id), func_id); 6622 return -EINVAL; 6623 } 6624 /* current BPF helper definitions are only coming from 6625 * built-in code with type IDs from vmlinux BTF 6626 */ 6627 regs[BPF_REG_0].btf = btf_vmlinux; 6628 regs[BPF_REG_0].btf_id = ret_btf_id; 6629 } else { 6630 verbose(env, "unknown return type %d of func %s#%d\n", 6631 fn->ret_type, func_id_name(func_id), func_id); 6632 return -EINVAL; 6633 } 6634 6635 if (reg_type_may_be_null(regs[BPF_REG_0].type)) 6636 regs[BPF_REG_0].id = ++env->id_gen; 6637 6638 if (is_ptr_cast_function(func_id)) { 6639 /* For release_reference() */ 6640 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 6641 } else if (is_acquire_function(func_id, meta.map_ptr)) { 6642 int id = acquire_reference_state(env, insn_idx); 6643 6644 if (id < 0) 6645 return id; 6646 /* For mark_ptr_or_null_reg() */ 6647 regs[BPF_REG_0].id = id; 6648 /* For release_reference() */ 6649 regs[BPF_REG_0].ref_obj_id = id; 6650 } 6651 6652 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 6653 6654 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 6655 if (err) 6656 return err; 6657 6658 if ((func_id == BPF_FUNC_get_stack || 6659 func_id == BPF_FUNC_get_task_stack) && 6660 !env->prog->has_callchain_buf) { 6661 const char *err_str; 6662 6663 #ifdef CONFIG_PERF_EVENTS 6664 err = get_callchain_buffers(sysctl_perf_event_max_stack); 6665 err_str = "cannot get callchain buffer for func %s#%d\n"; 6666 #else 6667 err = -ENOTSUPP; 6668 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 6669 #endif 6670 if (err) { 6671 verbose(env, err_str, func_id_name(func_id), func_id); 6672 return err; 6673 } 6674 6675 env->prog->has_callchain_buf = true; 6676 } 6677 6678 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) 6679 env->prog->call_get_stack = true; 6680 6681 if (func_id == BPF_FUNC_get_func_ip) { 6682 if (check_get_func_ip(env)) 6683 return -ENOTSUPP; 6684 env->prog->call_get_func_ip = true; 6685 } 6686 6687 if (changes_data) 6688 clear_all_pkt_pointers(env); 6689 return 0; 6690 } 6691 6692 /* mark_btf_func_reg_size() is used when the reg size is determined by 6693 * the BTF func_proto's return value size and argument. 6694 */ 6695 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, 6696 size_t reg_size) 6697 { 6698 struct bpf_reg_state *reg = &cur_regs(env)[regno]; 6699 6700 if (regno == BPF_REG_0) { 6701 /* Function return value */ 6702 reg->live |= REG_LIVE_WRITTEN; 6703 reg->subreg_def = reg_size == sizeof(u64) ? 6704 DEF_NOT_SUBREG : env->insn_idx + 1; 6705 } else { 6706 /* Function argument */ 6707 if (reg_size == sizeof(u64)) { 6708 mark_insn_zext(env, reg); 6709 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 6710 } else { 6711 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); 6712 } 6713 } 6714 } 6715 6716 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn) 6717 { 6718 const struct btf_type *t, *func, *func_proto, *ptr_type; 6719 struct bpf_reg_state *regs = cur_regs(env); 6720 const char *func_name, *ptr_type_name; 6721 u32 i, nargs, func_id, ptr_type_id; 6722 struct module *btf_mod = NULL; 6723 const struct btf_param *args; 6724 struct btf *desc_btf; 6725 int err; 6726 6727 /* skip for now, but return error when we find this in fixup_kfunc_call */ 6728 if (!insn->imm) 6729 return 0; 6730 6731 desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off, &btf_mod); 6732 if (IS_ERR(desc_btf)) 6733 return PTR_ERR(desc_btf); 6734 6735 func_id = insn->imm; 6736 func = btf_type_by_id(desc_btf, func_id); 6737 func_name = btf_name_by_offset(desc_btf, func->name_off); 6738 func_proto = btf_type_by_id(desc_btf, func->type); 6739 6740 if (!env->ops->check_kfunc_call || 6741 !env->ops->check_kfunc_call(func_id, btf_mod)) { 6742 verbose(env, "calling kernel function %s is not allowed\n", 6743 func_name); 6744 return -EACCES; 6745 } 6746 6747 /* Check the arguments */ 6748 err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs); 6749 if (err) 6750 return err; 6751 6752 for (i = 0; i < CALLER_SAVED_REGS; i++) 6753 mark_reg_not_init(env, regs, caller_saved[i]); 6754 6755 /* Check return type */ 6756 t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL); 6757 if (btf_type_is_scalar(t)) { 6758 mark_reg_unknown(env, regs, BPF_REG_0); 6759 mark_btf_func_reg_size(env, BPF_REG_0, t->size); 6760 } else if (btf_type_is_ptr(t)) { 6761 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, 6762 &ptr_type_id); 6763 if (!btf_type_is_struct(ptr_type)) { 6764 ptr_type_name = btf_name_by_offset(desc_btf, 6765 ptr_type->name_off); 6766 verbose(env, "kernel function %s returns pointer type %s %s is not supported\n", 6767 func_name, btf_type_str(ptr_type), 6768 ptr_type_name); 6769 return -EINVAL; 6770 } 6771 mark_reg_known_zero(env, regs, BPF_REG_0); 6772 regs[BPF_REG_0].btf = desc_btf; 6773 regs[BPF_REG_0].type = PTR_TO_BTF_ID; 6774 regs[BPF_REG_0].btf_id = ptr_type_id; 6775 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); 6776 } /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */ 6777 6778 nargs = btf_type_vlen(func_proto); 6779 args = (const struct btf_param *)(func_proto + 1); 6780 for (i = 0; i < nargs; i++) { 6781 u32 regno = i + 1; 6782 6783 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); 6784 if (btf_type_is_ptr(t)) 6785 mark_btf_func_reg_size(env, regno, sizeof(void *)); 6786 else 6787 /* scalar. ensured by btf_check_kfunc_arg_match() */ 6788 mark_btf_func_reg_size(env, regno, t->size); 6789 } 6790 6791 return 0; 6792 } 6793 6794 static bool signed_add_overflows(s64 a, s64 b) 6795 { 6796 /* Do the add in u64, where overflow is well-defined */ 6797 s64 res = (s64)((u64)a + (u64)b); 6798 6799 if (b < 0) 6800 return res > a; 6801 return res < a; 6802 } 6803 6804 static bool signed_add32_overflows(s32 a, s32 b) 6805 { 6806 /* Do the add in u32, where overflow is well-defined */ 6807 s32 res = (s32)((u32)a + (u32)b); 6808 6809 if (b < 0) 6810 return res > a; 6811 return res < a; 6812 } 6813 6814 static bool signed_sub_overflows(s64 a, s64 b) 6815 { 6816 /* Do the sub in u64, where overflow is well-defined */ 6817 s64 res = (s64)((u64)a - (u64)b); 6818 6819 if (b < 0) 6820 return res < a; 6821 return res > a; 6822 } 6823 6824 static bool signed_sub32_overflows(s32 a, s32 b) 6825 { 6826 /* Do the sub in u32, where overflow is well-defined */ 6827 s32 res = (s32)((u32)a - (u32)b); 6828 6829 if (b < 0) 6830 return res < a; 6831 return res > a; 6832 } 6833 6834 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 6835 const struct bpf_reg_state *reg, 6836 enum bpf_reg_type type) 6837 { 6838 bool known = tnum_is_const(reg->var_off); 6839 s64 val = reg->var_off.value; 6840 s64 smin = reg->smin_value; 6841 6842 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 6843 verbose(env, "math between %s pointer and %lld is not allowed\n", 6844 reg_type_str[type], val); 6845 return false; 6846 } 6847 6848 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 6849 verbose(env, "%s pointer offset %d is not allowed\n", 6850 reg_type_str[type], reg->off); 6851 return false; 6852 } 6853 6854 if (smin == S64_MIN) { 6855 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 6856 reg_type_str[type]); 6857 return false; 6858 } 6859 6860 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 6861 verbose(env, "value %lld makes %s pointer be out of bounds\n", 6862 smin, reg_type_str[type]); 6863 return false; 6864 } 6865 6866 return true; 6867 } 6868 6869 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 6870 { 6871 return &env->insn_aux_data[env->insn_idx]; 6872 } 6873 6874 enum { 6875 REASON_BOUNDS = -1, 6876 REASON_TYPE = -2, 6877 REASON_PATHS = -3, 6878 REASON_LIMIT = -4, 6879 REASON_STACK = -5, 6880 }; 6881 6882 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 6883 u32 *alu_limit, bool mask_to_left) 6884 { 6885 u32 max = 0, ptr_limit = 0; 6886 6887 switch (ptr_reg->type) { 6888 case PTR_TO_STACK: 6889 /* Offset 0 is out-of-bounds, but acceptable start for the 6890 * left direction, see BPF_REG_FP. Also, unknown scalar 6891 * offset where we would need to deal with min/max bounds is 6892 * currently prohibited for unprivileged. 6893 */ 6894 max = MAX_BPF_STACK + mask_to_left; 6895 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); 6896 break; 6897 case PTR_TO_MAP_VALUE: 6898 max = ptr_reg->map_ptr->value_size; 6899 ptr_limit = (mask_to_left ? 6900 ptr_reg->smin_value : 6901 ptr_reg->umax_value) + ptr_reg->off; 6902 break; 6903 default: 6904 return REASON_TYPE; 6905 } 6906 6907 if (ptr_limit >= max) 6908 return REASON_LIMIT; 6909 *alu_limit = ptr_limit; 6910 return 0; 6911 } 6912 6913 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 6914 const struct bpf_insn *insn) 6915 { 6916 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; 6917 } 6918 6919 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 6920 u32 alu_state, u32 alu_limit) 6921 { 6922 /* If we arrived here from different branches with different 6923 * state or limits to sanitize, then this won't work. 6924 */ 6925 if (aux->alu_state && 6926 (aux->alu_state != alu_state || 6927 aux->alu_limit != alu_limit)) 6928 return REASON_PATHS; 6929 6930 /* Corresponding fixup done in do_misc_fixups(). */ 6931 aux->alu_state = alu_state; 6932 aux->alu_limit = alu_limit; 6933 return 0; 6934 } 6935 6936 static int sanitize_val_alu(struct bpf_verifier_env *env, 6937 struct bpf_insn *insn) 6938 { 6939 struct bpf_insn_aux_data *aux = cur_aux(env); 6940 6941 if (can_skip_alu_sanitation(env, insn)) 6942 return 0; 6943 6944 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 6945 } 6946 6947 static bool sanitize_needed(u8 opcode) 6948 { 6949 return opcode == BPF_ADD || opcode == BPF_SUB; 6950 } 6951 6952 struct bpf_sanitize_info { 6953 struct bpf_insn_aux_data aux; 6954 bool mask_to_left; 6955 }; 6956 6957 static struct bpf_verifier_state * 6958 sanitize_speculative_path(struct bpf_verifier_env *env, 6959 const struct bpf_insn *insn, 6960 u32 next_idx, u32 curr_idx) 6961 { 6962 struct bpf_verifier_state *branch; 6963 struct bpf_reg_state *regs; 6964 6965 branch = push_stack(env, next_idx, curr_idx, true); 6966 if (branch && insn) { 6967 regs = branch->frame[branch->curframe]->regs; 6968 if (BPF_SRC(insn->code) == BPF_K) { 6969 mark_reg_unknown(env, regs, insn->dst_reg); 6970 } else if (BPF_SRC(insn->code) == BPF_X) { 6971 mark_reg_unknown(env, regs, insn->dst_reg); 6972 mark_reg_unknown(env, regs, insn->src_reg); 6973 } 6974 } 6975 return branch; 6976 } 6977 6978 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 6979 struct bpf_insn *insn, 6980 const struct bpf_reg_state *ptr_reg, 6981 const struct bpf_reg_state *off_reg, 6982 struct bpf_reg_state *dst_reg, 6983 struct bpf_sanitize_info *info, 6984 const bool commit_window) 6985 { 6986 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; 6987 struct bpf_verifier_state *vstate = env->cur_state; 6988 bool off_is_imm = tnum_is_const(off_reg->var_off); 6989 bool off_is_neg = off_reg->smin_value < 0; 6990 bool ptr_is_dst_reg = ptr_reg == dst_reg; 6991 u8 opcode = BPF_OP(insn->code); 6992 u32 alu_state, alu_limit; 6993 struct bpf_reg_state tmp; 6994 bool ret; 6995 int err; 6996 6997 if (can_skip_alu_sanitation(env, insn)) 6998 return 0; 6999 7000 /* We already marked aux for masking from non-speculative 7001 * paths, thus we got here in the first place. We only care 7002 * to explore bad access from here. 7003 */ 7004 if (vstate->speculative) 7005 goto do_sim; 7006 7007 if (!commit_window) { 7008 if (!tnum_is_const(off_reg->var_off) && 7009 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) 7010 return REASON_BOUNDS; 7011 7012 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || 7013 (opcode == BPF_SUB && !off_is_neg); 7014 } 7015 7016 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); 7017 if (err < 0) 7018 return err; 7019 7020 if (commit_window) { 7021 /* In commit phase we narrow the masking window based on 7022 * the observed pointer move after the simulated operation. 7023 */ 7024 alu_state = info->aux.alu_state; 7025 alu_limit = abs(info->aux.alu_limit - alu_limit); 7026 } else { 7027 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 7028 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; 7029 alu_state |= ptr_is_dst_reg ? 7030 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 7031 7032 /* Limit pruning on unknown scalars to enable deep search for 7033 * potential masking differences from other program paths. 7034 */ 7035 if (!off_is_imm) 7036 env->explore_alu_limits = true; 7037 } 7038 7039 err = update_alu_sanitation_state(aux, alu_state, alu_limit); 7040 if (err < 0) 7041 return err; 7042 do_sim: 7043 /* If we're in commit phase, we're done here given we already 7044 * pushed the truncated dst_reg into the speculative verification 7045 * stack. 7046 * 7047 * Also, when register is a known constant, we rewrite register-based 7048 * operation to immediate-based, and thus do not need masking (and as 7049 * a consequence, do not need to simulate the zero-truncation either). 7050 */ 7051 if (commit_window || off_is_imm) 7052 return 0; 7053 7054 /* Simulate and find potential out-of-bounds access under 7055 * speculative execution from truncation as a result of 7056 * masking when off was not within expected range. If off 7057 * sits in dst, then we temporarily need to move ptr there 7058 * to simulate dst (== 0) +/-= ptr. Needed, for example, 7059 * for cases where we use K-based arithmetic in one direction 7060 * and truncated reg-based in the other in order to explore 7061 * bad access. 7062 */ 7063 if (!ptr_is_dst_reg) { 7064 tmp = *dst_reg; 7065 *dst_reg = *ptr_reg; 7066 } 7067 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, 7068 env->insn_idx); 7069 if (!ptr_is_dst_reg && ret) 7070 *dst_reg = tmp; 7071 return !ret ? REASON_STACK : 0; 7072 } 7073 7074 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) 7075 { 7076 struct bpf_verifier_state *vstate = env->cur_state; 7077 7078 /* If we simulate paths under speculation, we don't update the 7079 * insn as 'seen' such that when we verify unreachable paths in 7080 * the non-speculative domain, sanitize_dead_code() can still 7081 * rewrite/sanitize them. 7082 */ 7083 if (!vstate->speculative) 7084 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 7085 } 7086 7087 static int sanitize_err(struct bpf_verifier_env *env, 7088 const struct bpf_insn *insn, int reason, 7089 const struct bpf_reg_state *off_reg, 7090 const struct bpf_reg_state *dst_reg) 7091 { 7092 static const char *err = "pointer arithmetic with it prohibited for !root"; 7093 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; 7094 u32 dst = insn->dst_reg, src = insn->src_reg; 7095 7096 switch (reason) { 7097 case REASON_BOUNDS: 7098 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", 7099 off_reg == dst_reg ? dst : src, err); 7100 break; 7101 case REASON_TYPE: 7102 verbose(env, "R%d has pointer with unsupported alu operation, %s\n", 7103 off_reg == dst_reg ? src : dst, err); 7104 break; 7105 case REASON_PATHS: 7106 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", 7107 dst, op, err); 7108 break; 7109 case REASON_LIMIT: 7110 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", 7111 dst, op, err); 7112 break; 7113 case REASON_STACK: 7114 verbose(env, "R%d could not be pushed for speculative verification, %s\n", 7115 dst, err); 7116 break; 7117 default: 7118 verbose(env, "verifier internal error: unknown reason (%d)\n", 7119 reason); 7120 break; 7121 } 7122 7123 return -EACCES; 7124 } 7125 7126 /* check that stack access falls within stack limits and that 'reg' doesn't 7127 * have a variable offset. 7128 * 7129 * Variable offset is prohibited for unprivileged mode for simplicity since it 7130 * requires corresponding support in Spectre masking for stack ALU. See also 7131 * retrieve_ptr_limit(). 7132 * 7133 * 7134 * 'off' includes 'reg->off'. 7135 */ 7136 static int check_stack_access_for_ptr_arithmetic( 7137 struct bpf_verifier_env *env, 7138 int regno, 7139 const struct bpf_reg_state *reg, 7140 int off) 7141 { 7142 if (!tnum_is_const(reg->var_off)) { 7143 char tn_buf[48]; 7144 7145 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 7146 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", 7147 regno, tn_buf, off); 7148 return -EACCES; 7149 } 7150 7151 if (off >= 0 || off < -MAX_BPF_STACK) { 7152 verbose(env, "R%d stack pointer arithmetic goes out of range, " 7153 "prohibited for !root; off=%d\n", regno, off); 7154 return -EACCES; 7155 } 7156 7157 return 0; 7158 } 7159 7160 static int sanitize_check_bounds(struct bpf_verifier_env *env, 7161 const struct bpf_insn *insn, 7162 const struct bpf_reg_state *dst_reg) 7163 { 7164 u32 dst = insn->dst_reg; 7165 7166 /* For unprivileged we require that resulting offset must be in bounds 7167 * in order to be able to sanitize access later on. 7168 */ 7169 if (env->bypass_spec_v1) 7170 return 0; 7171 7172 switch (dst_reg->type) { 7173 case PTR_TO_STACK: 7174 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, 7175 dst_reg->off + dst_reg->var_off.value)) 7176 return -EACCES; 7177 break; 7178 case PTR_TO_MAP_VALUE: 7179 if (check_map_access(env, dst, dst_reg->off, 1, false)) { 7180 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 7181 "prohibited for !root\n", dst); 7182 return -EACCES; 7183 } 7184 break; 7185 default: 7186 break; 7187 } 7188 7189 return 0; 7190 } 7191 7192 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 7193 * Caller should also handle BPF_MOV case separately. 7194 * If we return -EACCES, caller may want to try again treating pointer as a 7195 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 7196 */ 7197 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 7198 struct bpf_insn *insn, 7199 const struct bpf_reg_state *ptr_reg, 7200 const struct bpf_reg_state *off_reg) 7201 { 7202 struct bpf_verifier_state *vstate = env->cur_state; 7203 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 7204 struct bpf_reg_state *regs = state->regs, *dst_reg; 7205 bool known = tnum_is_const(off_reg->var_off); 7206 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 7207 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 7208 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 7209 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 7210 struct bpf_sanitize_info info = {}; 7211 u8 opcode = BPF_OP(insn->code); 7212 u32 dst = insn->dst_reg; 7213 int ret; 7214 7215 dst_reg = ®s[dst]; 7216 7217 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 7218 smin_val > smax_val || umin_val > umax_val) { 7219 /* Taint dst register if offset had invalid bounds derived from 7220 * e.g. dead branches. 7221 */ 7222 __mark_reg_unknown(env, dst_reg); 7223 return 0; 7224 } 7225 7226 if (BPF_CLASS(insn->code) != BPF_ALU64) { 7227 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 7228 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 7229 __mark_reg_unknown(env, dst_reg); 7230 return 0; 7231 } 7232 7233 verbose(env, 7234 "R%d 32-bit pointer arithmetic prohibited\n", 7235 dst); 7236 return -EACCES; 7237 } 7238 7239 switch (ptr_reg->type) { 7240 case PTR_TO_MAP_VALUE_OR_NULL: 7241 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 7242 dst, reg_type_str[ptr_reg->type]); 7243 return -EACCES; 7244 case CONST_PTR_TO_MAP: 7245 /* smin_val represents the known value */ 7246 if (known && smin_val == 0 && opcode == BPF_ADD) 7247 break; 7248 fallthrough; 7249 case PTR_TO_PACKET_END: 7250 case PTR_TO_SOCKET: 7251 case PTR_TO_SOCKET_OR_NULL: 7252 case PTR_TO_SOCK_COMMON: 7253 case PTR_TO_SOCK_COMMON_OR_NULL: 7254 case PTR_TO_TCP_SOCK: 7255 case PTR_TO_TCP_SOCK_OR_NULL: 7256 case PTR_TO_XDP_SOCK: 7257 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 7258 dst, reg_type_str[ptr_reg->type]); 7259 return -EACCES; 7260 default: 7261 break; 7262 } 7263 7264 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 7265 * The id may be overwritten later if we create a new variable offset. 7266 */ 7267 dst_reg->type = ptr_reg->type; 7268 dst_reg->id = ptr_reg->id; 7269 7270 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 7271 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 7272 return -EINVAL; 7273 7274 /* pointer types do not carry 32-bit bounds at the moment. */ 7275 __mark_reg32_unbounded(dst_reg); 7276 7277 if (sanitize_needed(opcode)) { 7278 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, 7279 &info, false); 7280 if (ret < 0) 7281 return sanitize_err(env, insn, ret, off_reg, dst_reg); 7282 } 7283 7284 switch (opcode) { 7285 case BPF_ADD: 7286 /* We can take a fixed offset as long as it doesn't overflow 7287 * the s32 'off' field 7288 */ 7289 if (known && (ptr_reg->off + smin_val == 7290 (s64)(s32)(ptr_reg->off + smin_val))) { 7291 /* pointer += K. Accumulate it into fixed offset */ 7292 dst_reg->smin_value = smin_ptr; 7293 dst_reg->smax_value = smax_ptr; 7294 dst_reg->umin_value = umin_ptr; 7295 dst_reg->umax_value = umax_ptr; 7296 dst_reg->var_off = ptr_reg->var_off; 7297 dst_reg->off = ptr_reg->off + smin_val; 7298 dst_reg->raw = ptr_reg->raw; 7299 break; 7300 } 7301 /* A new variable offset is created. Note that off_reg->off 7302 * == 0, since it's a scalar. 7303 * dst_reg gets the pointer type and since some positive 7304 * integer value was added to the pointer, give it a new 'id' 7305 * if it's a PTR_TO_PACKET. 7306 * this creates a new 'base' pointer, off_reg (variable) gets 7307 * added into the variable offset, and we copy the fixed offset 7308 * from ptr_reg. 7309 */ 7310 if (signed_add_overflows(smin_ptr, smin_val) || 7311 signed_add_overflows(smax_ptr, smax_val)) { 7312 dst_reg->smin_value = S64_MIN; 7313 dst_reg->smax_value = S64_MAX; 7314 } else { 7315 dst_reg->smin_value = smin_ptr + smin_val; 7316 dst_reg->smax_value = smax_ptr + smax_val; 7317 } 7318 if (umin_ptr + umin_val < umin_ptr || 7319 umax_ptr + umax_val < umax_ptr) { 7320 dst_reg->umin_value = 0; 7321 dst_reg->umax_value = U64_MAX; 7322 } else { 7323 dst_reg->umin_value = umin_ptr + umin_val; 7324 dst_reg->umax_value = umax_ptr + umax_val; 7325 } 7326 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 7327 dst_reg->off = ptr_reg->off; 7328 dst_reg->raw = ptr_reg->raw; 7329 if (reg_is_pkt_pointer(ptr_reg)) { 7330 dst_reg->id = ++env->id_gen; 7331 /* something was added to pkt_ptr, set range to zero */ 7332 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 7333 } 7334 break; 7335 case BPF_SUB: 7336 if (dst_reg == off_reg) { 7337 /* scalar -= pointer. Creates an unknown scalar */ 7338 verbose(env, "R%d tried to subtract pointer from scalar\n", 7339 dst); 7340 return -EACCES; 7341 } 7342 /* We don't allow subtraction from FP, because (according to 7343 * test_verifier.c test "invalid fp arithmetic", JITs might not 7344 * be able to deal with it. 7345 */ 7346 if (ptr_reg->type == PTR_TO_STACK) { 7347 verbose(env, "R%d subtraction from stack pointer prohibited\n", 7348 dst); 7349 return -EACCES; 7350 } 7351 if (known && (ptr_reg->off - smin_val == 7352 (s64)(s32)(ptr_reg->off - smin_val))) { 7353 /* pointer -= K. Subtract it from fixed offset */ 7354 dst_reg->smin_value = smin_ptr; 7355 dst_reg->smax_value = smax_ptr; 7356 dst_reg->umin_value = umin_ptr; 7357 dst_reg->umax_value = umax_ptr; 7358 dst_reg->var_off = ptr_reg->var_off; 7359 dst_reg->id = ptr_reg->id; 7360 dst_reg->off = ptr_reg->off - smin_val; 7361 dst_reg->raw = ptr_reg->raw; 7362 break; 7363 } 7364 /* A new variable offset is created. If the subtrahend is known 7365 * nonnegative, then any reg->range we had before is still good. 7366 */ 7367 if (signed_sub_overflows(smin_ptr, smax_val) || 7368 signed_sub_overflows(smax_ptr, smin_val)) { 7369 /* Overflow possible, we know nothing */ 7370 dst_reg->smin_value = S64_MIN; 7371 dst_reg->smax_value = S64_MAX; 7372 } else { 7373 dst_reg->smin_value = smin_ptr - smax_val; 7374 dst_reg->smax_value = smax_ptr - smin_val; 7375 } 7376 if (umin_ptr < umax_val) { 7377 /* Overflow possible, we know nothing */ 7378 dst_reg->umin_value = 0; 7379 dst_reg->umax_value = U64_MAX; 7380 } else { 7381 /* Cannot overflow (as long as bounds are consistent) */ 7382 dst_reg->umin_value = umin_ptr - umax_val; 7383 dst_reg->umax_value = umax_ptr - umin_val; 7384 } 7385 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 7386 dst_reg->off = ptr_reg->off; 7387 dst_reg->raw = ptr_reg->raw; 7388 if (reg_is_pkt_pointer(ptr_reg)) { 7389 dst_reg->id = ++env->id_gen; 7390 /* something was added to pkt_ptr, set range to zero */ 7391 if (smin_val < 0) 7392 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 7393 } 7394 break; 7395 case BPF_AND: 7396 case BPF_OR: 7397 case BPF_XOR: 7398 /* bitwise ops on pointers are troublesome, prohibit. */ 7399 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 7400 dst, bpf_alu_string[opcode >> 4]); 7401 return -EACCES; 7402 default: 7403 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 7404 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 7405 dst, bpf_alu_string[opcode >> 4]); 7406 return -EACCES; 7407 } 7408 7409 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 7410 return -EINVAL; 7411 7412 __update_reg_bounds(dst_reg); 7413 __reg_deduce_bounds(dst_reg); 7414 __reg_bound_offset(dst_reg); 7415 7416 if (sanitize_check_bounds(env, insn, dst_reg) < 0) 7417 return -EACCES; 7418 if (sanitize_needed(opcode)) { 7419 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, 7420 &info, true); 7421 if (ret < 0) 7422 return sanitize_err(env, insn, ret, off_reg, dst_reg); 7423 } 7424 7425 return 0; 7426 } 7427 7428 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 7429 struct bpf_reg_state *src_reg) 7430 { 7431 s32 smin_val = src_reg->s32_min_value; 7432 s32 smax_val = src_reg->s32_max_value; 7433 u32 umin_val = src_reg->u32_min_value; 7434 u32 umax_val = src_reg->u32_max_value; 7435 7436 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 7437 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 7438 dst_reg->s32_min_value = S32_MIN; 7439 dst_reg->s32_max_value = S32_MAX; 7440 } else { 7441 dst_reg->s32_min_value += smin_val; 7442 dst_reg->s32_max_value += smax_val; 7443 } 7444 if (dst_reg->u32_min_value + umin_val < umin_val || 7445 dst_reg->u32_max_value + umax_val < umax_val) { 7446 dst_reg->u32_min_value = 0; 7447 dst_reg->u32_max_value = U32_MAX; 7448 } else { 7449 dst_reg->u32_min_value += umin_val; 7450 dst_reg->u32_max_value += umax_val; 7451 } 7452 } 7453 7454 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 7455 struct bpf_reg_state *src_reg) 7456 { 7457 s64 smin_val = src_reg->smin_value; 7458 s64 smax_val = src_reg->smax_value; 7459 u64 umin_val = src_reg->umin_value; 7460 u64 umax_val = src_reg->umax_value; 7461 7462 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 7463 signed_add_overflows(dst_reg->smax_value, smax_val)) { 7464 dst_reg->smin_value = S64_MIN; 7465 dst_reg->smax_value = S64_MAX; 7466 } else { 7467 dst_reg->smin_value += smin_val; 7468 dst_reg->smax_value += smax_val; 7469 } 7470 if (dst_reg->umin_value + umin_val < umin_val || 7471 dst_reg->umax_value + umax_val < umax_val) { 7472 dst_reg->umin_value = 0; 7473 dst_reg->umax_value = U64_MAX; 7474 } else { 7475 dst_reg->umin_value += umin_val; 7476 dst_reg->umax_value += umax_val; 7477 } 7478 } 7479 7480 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 7481 struct bpf_reg_state *src_reg) 7482 { 7483 s32 smin_val = src_reg->s32_min_value; 7484 s32 smax_val = src_reg->s32_max_value; 7485 u32 umin_val = src_reg->u32_min_value; 7486 u32 umax_val = src_reg->u32_max_value; 7487 7488 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 7489 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 7490 /* Overflow possible, we know nothing */ 7491 dst_reg->s32_min_value = S32_MIN; 7492 dst_reg->s32_max_value = S32_MAX; 7493 } else { 7494 dst_reg->s32_min_value -= smax_val; 7495 dst_reg->s32_max_value -= smin_val; 7496 } 7497 if (dst_reg->u32_min_value < umax_val) { 7498 /* Overflow possible, we know nothing */ 7499 dst_reg->u32_min_value = 0; 7500 dst_reg->u32_max_value = U32_MAX; 7501 } else { 7502 /* Cannot overflow (as long as bounds are consistent) */ 7503 dst_reg->u32_min_value -= umax_val; 7504 dst_reg->u32_max_value -= umin_val; 7505 } 7506 } 7507 7508 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 7509 struct bpf_reg_state *src_reg) 7510 { 7511 s64 smin_val = src_reg->smin_value; 7512 s64 smax_val = src_reg->smax_value; 7513 u64 umin_val = src_reg->umin_value; 7514 u64 umax_val = src_reg->umax_value; 7515 7516 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 7517 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 7518 /* Overflow possible, we know nothing */ 7519 dst_reg->smin_value = S64_MIN; 7520 dst_reg->smax_value = S64_MAX; 7521 } else { 7522 dst_reg->smin_value -= smax_val; 7523 dst_reg->smax_value -= smin_val; 7524 } 7525 if (dst_reg->umin_value < umax_val) { 7526 /* Overflow possible, we know nothing */ 7527 dst_reg->umin_value = 0; 7528 dst_reg->umax_value = U64_MAX; 7529 } else { 7530 /* Cannot overflow (as long as bounds are consistent) */ 7531 dst_reg->umin_value -= umax_val; 7532 dst_reg->umax_value -= umin_val; 7533 } 7534 } 7535 7536 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 7537 struct bpf_reg_state *src_reg) 7538 { 7539 s32 smin_val = src_reg->s32_min_value; 7540 u32 umin_val = src_reg->u32_min_value; 7541 u32 umax_val = src_reg->u32_max_value; 7542 7543 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 7544 /* Ain't nobody got time to multiply that sign */ 7545 __mark_reg32_unbounded(dst_reg); 7546 return; 7547 } 7548 /* Both values are positive, so we can work with unsigned and 7549 * copy the result to signed (unless it exceeds S32_MAX). 7550 */ 7551 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 7552 /* Potential overflow, we know nothing */ 7553 __mark_reg32_unbounded(dst_reg); 7554 return; 7555 } 7556 dst_reg->u32_min_value *= umin_val; 7557 dst_reg->u32_max_value *= umax_val; 7558 if (dst_reg->u32_max_value > S32_MAX) { 7559 /* Overflow possible, we know nothing */ 7560 dst_reg->s32_min_value = S32_MIN; 7561 dst_reg->s32_max_value = S32_MAX; 7562 } else { 7563 dst_reg->s32_min_value = dst_reg->u32_min_value; 7564 dst_reg->s32_max_value = dst_reg->u32_max_value; 7565 } 7566 } 7567 7568 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 7569 struct bpf_reg_state *src_reg) 7570 { 7571 s64 smin_val = src_reg->smin_value; 7572 u64 umin_val = src_reg->umin_value; 7573 u64 umax_val = src_reg->umax_value; 7574 7575 if (smin_val < 0 || dst_reg->smin_value < 0) { 7576 /* Ain't nobody got time to multiply that sign */ 7577 __mark_reg64_unbounded(dst_reg); 7578 return; 7579 } 7580 /* Both values are positive, so we can work with unsigned and 7581 * copy the result to signed (unless it exceeds S64_MAX). 7582 */ 7583 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 7584 /* Potential overflow, we know nothing */ 7585 __mark_reg64_unbounded(dst_reg); 7586 return; 7587 } 7588 dst_reg->umin_value *= umin_val; 7589 dst_reg->umax_value *= umax_val; 7590 if (dst_reg->umax_value > S64_MAX) { 7591 /* Overflow possible, we know nothing */ 7592 dst_reg->smin_value = S64_MIN; 7593 dst_reg->smax_value = S64_MAX; 7594 } else { 7595 dst_reg->smin_value = dst_reg->umin_value; 7596 dst_reg->smax_value = dst_reg->umax_value; 7597 } 7598 } 7599 7600 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 7601 struct bpf_reg_state *src_reg) 7602 { 7603 bool src_known = tnum_subreg_is_const(src_reg->var_off); 7604 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 7605 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 7606 s32 smin_val = src_reg->s32_min_value; 7607 u32 umax_val = src_reg->u32_max_value; 7608 7609 if (src_known && dst_known) { 7610 __mark_reg32_known(dst_reg, var32_off.value); 7611 return; 7612 } 7613 7614 /* We get our minimum from the var_off, since that's inherently 7615 * bitwise. Our maximum is the minimum of the operands' maxima. 7616 */ 7617 dst_reg->u32_min_value = var32_off.value; 7618 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 7619 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 7620 /* Lose signed bounds when ANDing negative numbers, 7621 * ain't nobody got time for that. 7622 */ 7623 dst_reg->s32_min_value = S32_MIN; 7624 dst_reg->s32_max_value = S32_MAX; 7625 } else { 7626 /* ANDing two positives gives a positive, so safe to 7627 * cast result into s64. 7628 */ 7629 dst_reg->s32_min_value = dst_reg->u32_min_value; 7630 dst_reg->s32_max_value = dst_reg->u32_max_value; 7631 } 7632 } 7633 7634 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 7635 struct bpf_reg_state *src_reg) 7636 { 7637 bool src_known = tnum_is_const(src_reg->var_off); 7638 bool dst_known = tnum_is_const(dst_reg->var_off); 7639 s64 smin_val = src_reg->smin_value; 7640 u64 umax_val = src_reg->umax_value; 7641 7642 if (src_known && dst_known) { 7643 __mark_reg_known(dst_reg, dst_reg->var_off.value); 7644 return; 7645 } 7646 7647 /* We get our minimum from the var_off, since that's inherently 7648 * bitwise. Our maximum is the minimum of the operands' maxima. 7649 */ 7650 dst_reg->umin_value = dst_reg->var_off.value; 7651 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 7652 if (dst_reg->smin_value < 0 || smin_val < 0) { 7653 /* Lose signed bounds when ANDing negative numbers, 7654 * ain't nobody got time for that. 7655 */ 7656 dst_reg->smin_value = S64_MIN; 7657 dst_reg->smax_value = S64_MAX; 7658 } else { 7659 /* ANDing two positives gives a positive, so safe to 7660 * cast result into s64. 7661 */ 7662 dst_reg->smin_value = dst_reg->umin_value; 7663 dst_reg->smax_value = dst_reg->umax_value; 7664 } 7665 /* We may learn something more from the var_off */ 7666 __update_reg_bounds(dst_reg); 7667 } 7668 7669 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 7670 struct bpf_reg_state *src_reg) 7671 { 7672 bool src_known = tnum_subreg_is_const(src_reg->var_off); 7673 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 7674 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 7675 s32 smin_val = src_reg->s32_min_value; 7676 u32 umin_val = src_reg->u32_min_value; 7677 7678 if (src_known && dst_known) { 7679 __mark_reg32_known(dst_reg, var32_off.value); 7680 return; 7681 } 7682 7683 /* We get our maximum from the var_off, and our minimum is the 7684 * maximum of the operands' minima 7685 */ 7686 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 7687 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 7688 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 7689 /* Lose signed bounds when ORing negative numbers, 7690 * ain't nobody got time for that. 7691 */ 7692 dst_reg->s32_min_value = S32_MIN; 7693 dst_reg->s32_max_value = S32_MAX; 7694 } else { 7695 /* ORing two positives gives a positive, so safe to 7696 * cast result into s64. 7697 */ 7698 dst_reg->s32_min_value = dst_reg->u32_min_value; 7699 dst_reg->s32_max_value = dst_reg->u32_max_value; 7700 } 7701 } 7702 7703 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 7704 struct bpf_reg_state *src_reg) 7705 { 7706 bool src_known = tnum_is_const(src_reg->var_off); 7707 bool dst_known = tnum_is_const(dst_reg->var_off); 7708 s64 smin_val = src_reg->smin_value; 7709 u64 umin_val = src_reg->umin_value; 7710 7711 if (src_known && dst_known) { 7712 __mark_reg_known(dst_reg, dst_reg->var_off.value); 7713 return; 7714 } 7715 7716 /* We get our maximum from the var_off, and our minimum is the 7717 * maximum of the operands' minima 7718 */ 7719 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 7720 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 7721 if (dst_reg->smin_value < 0 || smin_val < 0) { 7722 /* Lose signed bounds when ORing negative numbers, 7723 * ain't nobody got time for that. 7724 */ 7725 dst_reg->smin_value = S64_MIN; 7726 dst_reg->smax_value = S64_MAX; 7727 } else { 7728 /* ORing two positives gives a positive, so safe to 7729 * cast result into s64. 7730 */ 7731 dst_reg->smin_value = dst_reg->umin_value; 7732 dst_reg->smax_value = dst_reg->umax_value; 7733 } 7734 /* We may learn something more from the var_off */ 7735 __update_reg_bounds(dst_reg); 7736 } 7737 7738 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, 7739 struct bpf_reg_state *src_reg) 7740 { 7741 bool src_known = tnum_subreg_is_const(src_reg->var_off); 7742 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 7743 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 7744 s32 smin_val = src_reg->s32_min_value; 7745 7746 if (src_known && dst_known) { 7747 __mark_reg32_known(dst_reg, var32_off.value); 7748 return; 7749 } 7750 7751 /* We get both minimum and maximum from the var32_off. */ 7752 dst_reg->u32_min_value = var32_off.value; 7753 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 7754 7755 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { 7756 /* XORing two positive sign numbers gives a positive, 7757 * so safe to cast u32 result into s32. 7758 */ 7759 dst_reg->s32_min_value = dst_reg->u32_min_value; 7760 dst_reg->s32_max_value = dst_reg->u32_max_value; 7761 } else { 7762 dst_reg->s32_min_value = S32_MIN; 7763 dst_reg->s32_max_value = S32_MAX; 7764 } 7765 } 7766 7767 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, 7768 struct bpf_reg_state *src_reg) 7769 { 7770 bool src_known = tnum_is_const(src_reg->var_off); 7771 bool dst_known = tnum_is_const(dst_reg->var_off); 7772 s64 smin_val = src_reg->smin_value; 7773 7774 if (src_known && dst_known) { 7775 /* dst_reg->var_off.value has been updated earlier */ 7776 __mark_reg_known(dst_reg, dst_reg->var_off.value); 7777 return; 7778 } 7779 7780 /* We get both minimum and maximum from the var_off. */ 7781 dst_reg->umin_value = dst_reg->var_off.value; 7782 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 7783 7784 if (dst_reg->smin_value >= 0 && smin_val >= 0) { 7785 /* XORing two positive sign numbers gives a positive, 7786 * so safe to cast u64 result into s64. 7787 */ 7788 dst_reg->smin_value = dst_reg->umin_value; 7789 dst_reg->smax_value = dst_reg->umax_value; 7790 } else { 7791 dst_reg->smin_value = S64_MIN; 7792 dst_reg->smax_value = S64_MAX; 7793 } 7794 7795 __update_reg_bounds(dst_reg); 7796 } 7797 7798 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 7799 u64 umin_val, u64 umax_val) 7800 { 7801 /* We lose all sign bit information (except what we can pick 7802 * up from var_off) 7803 */ 7804 dst_reg->s32_min_value = S32_MIN; 7805 dst_reg->s32_max_value = S32_MAX; 7806 /* If we might shift our top bit out, then we know nothing */ 7807 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 7808 dst_reg->u32_min_value = 0; 7809 dst_reg->u32_max_value = U32_MAX; 7810 } else { 7811 dst_reg->u32_min_value <<= umin_val; 7812 dst_reg->u32_max_value <<= umax_val; 7813 } 7814 } 7815 7816 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 7817 struct bpf_reg_state *src_reg) 7818 { 7819 u32 umax_val = src_reg->u32_max_value; 7820 u32 umin_val = src_reg->u32_min_value; 7821 /* u32 alu operation will zext upper bits */ 7822 struct tnum subreg = tnum_subreg(dst_reg->var_off); 7823 7824 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 7825 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 7826 /* Not required but being careful mark reg64 bounds as unknown so 7827 * that we are forced to pick them up from tnum and zext later and 7828 * if some path skips this step we are still safe. 7829 */ 7830 __mark_reg64_unbounded(dst_reg); 7831 __update_reg32_bounds(dst_reg); 7832 } 7833 7834 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 7835 u64 umin_val, u64 umax_val) 7836 { 7837 /* Special case <<32 because it is a common compiler pattern to sign 7838 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 7839 * positive we know this shift will also be positive so we can track 7840 * bounds correctly. Otherwise we lose all sign bit information except 7841 * what we can pick up from var_off. Perhaps we can generalize this 7842 * later to shifts of any length. 7843 */ 7844 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 7845 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 7846 else 7847 dst_reg->smax_value = S64_MAX; 7848 7849 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 7850 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 7851 else 7852 dst_reg->smin_value = S64_MIN; 7853 7854 /* If we might shift our top bit out, then we know nothing */ 7855 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 7856 dst_reg->umin_value = 0; 7857 dst_reg->umax_value = U64_MAX; 7858 } else { 7859 dst_reg->umin_value <<= umin_val; 7860 dst_reg->umax_value <<= umax_val; 7861 } 7862 } 7863 7864 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 7865 struct bpf_reg_state *src_reg) 7866 { 7867 u64 umax_val = src_reg->umax_value; 7868 u64 umin_val = src_reg->umin_value; 7869 7870 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 7871 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 7872 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 7873 7874 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 7875 /* We may learn something more from the var_off */ 7876 __update_reg_bounds(dst_reg); 7877 } 7878 7879 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 7880 struct bpf_reg_state *src_reg) 7881 { 7882 struct tnum subreg = tnum_subreg(dst_reg->var_off); 7883 u32 umax_val = src_reg->u32_max_value; 7884 u32 umin_val = src_reg->u32_min_value; 7885 7886 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 7887 * be negative, then either: 7888 * 1) src_reg might be zero, so the sign bit of the result is 7889 * unknown, so we lose our signed bounds 7890 * 2) it's known negative, thus the unsigned bounds capture the 7891 * signed bounds 7892 * 3) the signed bounds cross zero, so they tell us nothing 7893 * about the result 7894 * If the value in dst_reg is known nonnegative, then again the 7895 * unsigned bounds capture the signed bounds. 7896 * Thus, in all cases it suffices to blow away our signed bounds 7897 * and rely on inferring new ones from the unsigned bounds and 7898 * var_off of the result. 7899 */ 7900 dst_reg->s32_min_value = S32_MIN; 7901 dst_reg->s32_max_value = S32_MAX; 7902 7903 dst_reg->var_off = tnum_rshift(subreg, umin_val); 7904 dst_reg->u32_min_value >>= umax_val; 7905 dst_reg->u32_max_value >>= umin_val; 7906 7907 __mark_reg64_unbounded(dst_reg); 7908 __update_reg32_bounds(dst_reg); 7909 } 7910 7911 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 7912 struct bpf_reg_state *src_reg) 7913 { 7914 u64 umax_val = src_reg->umax_value; 7915 u64 umin_val = src_reg->umin_value; 7916 7917 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 7918 * be negative, then either: 7919 * 1) src_reg might be zero, so the sign bit of the result is 7920 * unknown, so we lose our signed bounds 7921 * 2) it's known negative, thus the unsigned bounds capture the 7922 * signed bounds 7923 * 3) the signed bounds cross zero, so they tell us nothing 7924 * about the result 7925 * If the value in dst_reg is known nonnegative, then again the 7926 * unsigned bounds capture the signed bounds. 7927 * Thus, in all cases it suffices to blow away our signed bounds 7928 * and rely on inferring new ones from the unsigned bounds and 7929 * var_off of the result. 7930 */ 7931 dst_reg->smin_value = S64_MIN; 7932 dst_reg->smax_value = S64_MAX; 7933 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 7934 dst_reg->umin_value >>= umax_val; 7935 dst_reg->umax_value >>= umin_val; 7936 7937 /* Its not easy to operate on alu32 bounds here because it depends 7938 * on bits being shifted in. Take easy way out and mark unbounded 7939 * so we can recalculate later from tnum. 7940 */ 7941 __mark_reg32_unbounded(dst_reg); 7942 __update_reg_bounds(dst_reg); 7943 } 7944 7945 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 7946 struct bpf_reg_state *src_reg) 7947 { 7948 u64 umin_val = src_reg->u32_min_value; 7949 7950 /* Upon reaching here, src_known is true and 7951 * umax_val is equal to umin_val. 7952 */ 7953 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 7954 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 7955 7956 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 7957 7958 /* blow away the dst_reg umin_value/umax_value and rely on 7959 * dst_reg var_off to refine the result. 7960 */ 7961 dst_reg->u32_min_value = 0; 7962 dst_reg->u32_max_value = U32_MAX; 7963 7964 __mark_reg64_unbounded(dst_reg); 7965 __update_reg32_bounds(dst_reg); 7966 } 7967 7968 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 7969 struct bpf_reg_state *src_reg) 7970 { 7971 u64 umin_val = src_reg->umin_value; 7972 7973 /* Upon reaching here, src_known is true and umax_val is equal 7974 * to umin_val. 7975 */ 7976 dst_reg->smin_value >>= umin_val; 7977 dst_reg->smax_value >>= umin_val; 7978 7979 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 7980 7981 /* blow away the dst_reg umin_value/umax_value and rely on 7982 * dst_reg var_off to refine the result. 7983 */ 7984 dst_reg->umin_value = 0; 7985 dst_reg->umax_value = U64_MAX; 7986 7987 /* Its not easy to operate on alu32 bounds here because it depends 7988 * on bits being shifted in from upper 32-bits. Take easy way out 7989 * and mark unbounded so we can recalculate later from tnum. 7990 */ 7991 __mark_reg32_unbounded(dst_reg); 7992 __update_reg_bounds(dst_reg); 7993 } 7994 7995 /* WARNING: This function does calculations on 64-bit values, but the actual 7996 * execution may occur on 32-bit values. Therefore, things like bitshifts 7997 * need extra checks in the 32-bit case. 7998 */ 7999 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 8000 struct bpf_insn *insn, 8001 struct bpf_reg_state *dst_reg, 8002 struct bpf_reg_state src_reg) 8003 { 8004 struct bpf_reg_state *regs = cur_regs(env); 8005 u8 opcode = BPF_OP(insn->code); 8006 bool src_known; 8007 s64 smin_val, smax_val; 8008 u64 umin_val, umax_val; 8009 s32 s32_min_val, s32_max_val; 8010 u32 u32_min_val, u32_max_val; 8011 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 8012 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 8013 int ret; 8014 8015 smin_val = src_reg.smin_value; 8016 smax_val = src_reg.smax_value; 8017 umin_val = src_reg.umin_value; 8018 umax_val = src_reg.umax_value; 8019 8020 s32_min_val = src_reg.s32_min_value; 8021 s32_max_val = src_reg.s32_max_value; 8022 u32_min_val = src_reg.u32_min_value; 8023 u32_max_val = src_reg.u32_max_value; 8024 8025 if (alu32) { 8026 src_known = tnum_subreg_is_const(src_reg.var_off); 8027 if ((src_known && 8028 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 8029 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 8030 /* Taint dst register if offset had invalid bounds 8031 * derived from e.g. dead branches. 8032 */ 8033 __mark_reg_unknown(env, dst_reg); 8034 return 0; 8035 } 8036 } else { 8037 src_known = tnum_is_const(src_reg.var_off); 8038 if ((src_known && 8039 (smin_val != smax_val || umin_val != umax_val)) || 8040 smin_val > smax_val || umin_val > umax_val) { 8041 /* Taint dst register if offset had invalid bounds 8042 * derived from e.g. dead branches. 8043 */ 8044 __mark_reg_unknown(env, dst_reg); 8045 return 0; 8046 } 8047 } 8048 8049 if (!src_known && 8050 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 8051 __mark_reg_unknown(env, dst_reg); 8052 return 0; 8053 } 8054 8055 if (sanitize_needed(opcode)) { 8056 ret = sanitize_val_alu(env, insn); 8057 if (ret < 0) 8058 return sanitize_err(env, insn, ret, NULL, NULL); 8059 } 8060 8061 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 8062 * There are two classes of instructions: The first class we track both 8063 * alu32 and alu64 sign/unsigned bounds independently this provides the 8064 * greatest amount of precision when alu operations are mixed with jmp32 8065 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 8066 * and BPF_OR. This is possible because these ops have fairly easy to 8067 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 8068 * See alu32 verifier tests for examples. The second class of 8069 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 8070 * with regards to tracking sign/unsigned bounds because the bits may 8071 * cross subreg boundaries in the alu64 case. When this happens we mark 8072 * the reg unbounded in the subreg bound space and use the resulting 8073 * tnum to calculate an approximation of the sign/unsigned bounds. 8074 */ 8075 switch (opcode) { 8076 case BPF_ADD: 8077 scalar32_min_max_add(dst_reg, &src_reg); 8078 scalar_min_max_add(dst_reg, &src_reg); 8079 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 8080 break; 8081 case BPF_SUB: 8082 scalar32_min_max_sub(dst_reg, &src_reg); 8083 scalar_min_max_sub(dst_reg, &src_reg); 8084 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 8085 break; 8086 case BPF_MUL: 8087 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 8088 scalar32_min_max_mul(dst_reg, &src_reg); 8089 scalar_min_max_mul(dst_reg, &src_reg); 8090 break; 8091 case BPF_AND: 8092 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 8093 scalar32_min_max_and(dst_reg, &src_reg); 8094 scalar_min_max_and(dst_reg, &src_reg); 8095 break; 8096 case BPF_OR: 8097 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 8098 scalar32_min_max_or(dst_reg, &src_reg); 8099 scalar_min_max_or(dst_reg, &src_reg); 8100 break; 8101 case BPF_XOR: 8102 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); 8103 scalar32_min_max_xor(dst_reg, &src_reg); 8104 scalar_min_max_xor(dst_reg, &src_reg); 8105 break; 8106 case BPF_LSH: 8107 if (umax_val >= insn_bitness) { 8108 /* Shifts greater than 31 or 63 are undefined. 8109 * This includes shifts by a negative number. 8110 */ 8111 mark_reg_unknown(env, regs, insn->dst_reg); 8112 break; 8113 } 8114 if (alu32) 8115 scalar32_min_max_lsh(dst_reg, &src_reg); 8116 else 8117 scalar_min_max_lsh(dst_reg, &src_reg); 8118 break; 8119 case BPF_RSH: 8120 if (umax_val >= insn_bitness) { 8121 /* Shifts greater than 31 or 63 are undefined. 8122 * This includes shifts by a negative number. 8123 */ 8124 mark_reg_unknown(env, regs, insn->dst_reg); 8125 break; 8126 } 8127 if (alu32) 8128 scalar32_min_max_rsh(dst_reg, &src_reg); 8129 else 8130 scalar_min_max_rsh(dst_reg, &src_reg); 8131 break; 8132 case BPF_ARSH: 8133 if (umax_val >= insn_bitness) { 8134 /* Shifts greater than 31 or 63 are undefined. 8135 * This includes shifts by a negative number. 8136 */ 8137 mark_reg_unknown(env, regs, insn->dst_reg); 8138 break; 8139 } 8140 if (alu32) 8141 scalar32_min_max_arsh(dst_reg, &src_reg); 8142 else 8143 scalar_min_max_arsh(dst_reg, &src_reg); 8144 break; 8145 default: 8146 mark_reg_unknown(env, regs, insn->dst_reg); 8147 break; 8148 } 8149 8150 /* ALU32 ops are zero extended into 64bit register */ 8151 if (alu32) 8152 zext_32_to_64(dst_reg); 8153 8154 __update_reg_bounds(dst_reg); 8155 __reg_deduce_bounds(dst_reg); 8156 __reg_bound_offset(dst_reg); 8157 return 0; 8158 } 8159 8160 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 8161 * and var_off. 8162 */ 8163 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 8164 struct bpf_insn *insn) 8165 { 8166 struct bpf_verifier_state *vstate = env->cur_state; 8167 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 8168 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 8169 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 8170 u8 opcode = BPF_OP(insn->code); 8171 int err; 8172 8173 dst_reg = ®s[insn->dst_reg]; 8174 src_reg = NULL; 8175 if (dst_reg->type != SCALAR_VALUE) 8176 ptr_reg = dst_reg; 8177 else 8178 /* Make sure ID is cleared otherwise dst_reg min/max could be 8179 * incorrectly propagated into other registers by find_equal_scalars() 8180 */ 8181 dst_reg->id = 0; 8182 if (BPF_SRC(insn->code) == BPF_X) { 8183 src_reg = ®s[insn->src_reg]; 8184 if (src_reg->type != SCALAR_VALUE) { 8185 if (dst_reg->type != SCALAR_VALUE) { 8186 /* Combining two pointers by any ALU op yields 8187 * an arbitrary scalar. Disallow all math except 8188 * pointer subtraction 8189 */ 8190 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 8191 mark_reg_unknown(env, regs, insn->dst_reg); 8192 return 0; 8193 } 8194 verbose(env, "R%d pointer %s pointer prohibited\n", 8195 insn->dst_reg, 8196 bpf_alu_string[opcode >> 4]); 8197 return -EACCES; 8198 } else { 8199 /* scalar += pointer 8200 * This is legal, but we have to reverse our 8201 * src/dest handling in computing the range 8202 */ 8203 err = mark_chain_precision(env, insn->dst_reg); 8204 if (err) 8205 return err; 8206 return adjust_ptr_min_max_vals(env, insn, 8207 src_reg, dst_reg); 8208 } 8209 } else if (ptr_reg) { 8210 /* pointer += scalar */ 8211 err = mark_chain_precision(env, insn->src_reg); 8212 if (err) 8213 return err; 8214 return adjust_ptr_min_max_vals(env, insn, 8215 dst_reg, src_reg); 8216 } 8217 } else { 8218 /* Pretend the src is a reg with a known value, since we only 8219 * need to be able to read from this state. 8220 */ 8221 off_reg.type = SCALAR_VALUE; 8222 __mark_reg_known(&off_reg, insn->imm); 8223 src_reg = &off_reg; 8224 if (ptr_reg) /* pointer += K */ 8225 return adjust_ptr_min_max_vals(env, insn, 8226 ptr_reg, src_reg); 8227 } 8228 8229 /* Got here implies adding two SCALAR_VALUEs */ 8230 if (WARN_ON_ONCE(ptr_reg)) { 8231 print_verifier_state(env, state); 8232 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 8233 return -EINVAL; 8234 } 8235 if (WARN_ON(!src_reg)) { 8236 print_verifier_state(env, state); 8237 verbose(env, "verifier internal error: no src_reg\n"); 8238 return -EINVAL; 8239 } 8240 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 8241 } 8242 8243 /* check validity of 32-bit and 64-bit arithmetic operations */ 8244 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 8245 { 8246 struct bpf_reg_state *regs = cur_regs(env); 8247 u8 opcode = BPF_OP(insn->code); 8248 int err; 8249 8250 if (opcode == BPF_END || opcode == BPF_NEG) { 8251 if (opcode == BPF_NEG) { 8252 if (BPF_SRC(insn->code) != 0 || 8253 insn->src_reg != BPF_REG_0 || 8254 insn->off != 0 || insn->imm != 0) { 8255 verbose(env, "BPF_NEG uses reserved fields\n"); 8256 return -EINVAL; 8257 } 8258 } else { 8259 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 8260 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 8261 BPF_CLASS(insn->code) == BPF_ALU64) { 8262 verbose(env, "BPF_END uses reserved fields\n"); 8263 return -EINVAL; 8264 } 8265 } 8266 8267 /* check src operand */ 8268 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 8269 if (err) 8270 return err; 8271 8272 if (is_pointer_value(env, insn->dst_reg)) { 8273 verbose(env, "R%d pointer arithmetic prohibited\n", 8274 insn->dst_reg); 8275 return -EACCES; 8276 } 8277 8278 /* check dest operand */ 8279 err = check_reg_arg(env, insn->dst_reg, DST_OP); 8280 if (err) 8281 return err; 8282 8283 } else if (opcode == BPF_MOV) { 8284 8285 if (BPF_SRC(insn->code) == BPF_X) { 8286 if (insn->imm != 0 || insn->off != 0) { 8287 verbose(env, "BPF_MOV uses reserved fields\n"); 8288 return -EINVAL; 8289 } 8290 8291 /* check src operand */ 8292 err = check_reg_arg(env, insn->src_reg, SRC_OP); 8293 if (err) 8294 return err; 8295 } else { 8296 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 8297 verbose(env, "BPF_MOV uses reserved fields\n"); 8298 return -EINVAL; 8299 } 8300 } 8301 8302 /* check dest operand, mark as required later */ 8303 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 8304 if (err) 8305 return err; 8306 8307 if (BPF_SRC(insn->code) == BPF_X) { 8308 struct bpf_reg_state *src_reg = regs + insn->src_reg; 8309 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 8310 8311 if (BPF_CLASS(insn->code) == BPF_ALU64) { 8312 /* case: R1 = R2 8313 * copy register state to dest reg 8314 */ 8315 if (src_reg->type == SCALAR_VALUE && !src_reg->id) 8316 /* Assign src and dst registers the same ID 8317 * that will be used by find_equal_scalars() 8318 * to propagate min/max range. 8319 */ 8320 src_reg->id = ++env->id_gen; 8321 *dst_reg = *src_reg; 8322 dst_reg->live |= REG_LIVE_WRITTEN; 8323 dst_reg->subreg_def = DEF_NOT_SUBREG; 8324 } else { 8325 /* R1 = (u32) R2 */ 8326 if (is_pointer_value(env, insn->src_reg)) { 8327 verbose(env, 8328 "R%d partial copy of pointer\n", 8329 insn->src_reg); 8330 return -EACCES; 8331 } else if (src_reg->type == SCALAR_VALUE) { 8332 *dst_reg = *src_reg; 8333 /* Make sure ID is cleared otherwise 8334 * dst_reg min/max could be incorrectly 8335 * propagated into src_reg by find_equal_scalars() 8336 */ 8337 dst_reg->id = 0; 8338 dst_reg->live |= REG_LIVE_WRITTEN; 8339 dst_reg->subreg_def = env->insn_idx + 1; 8340 } else { 8341 mark_reg_unknown(env, regs, 8342 insn->dst_reg); 8343 } 8344 zext_32_to_64(dst_reg); 8345 } 8346 } else { 8347 /* case: R = imm 8348 * remember the value we stored into this reg 8349 */ 8350 /* clear any state __mark_reg_known doesn't set */ 8351 mark_reg_unknown(env, regs, insn->dst_reg); 8352 regs[insn->dst_reg].type = SCALAR_VALUE; 8353 if (BPF_CLASS(insn->code) == BPF_ALU64) { 8354 __mark_reg_known(regs + insn->dst_reg, 8355 insn->imm); 8356 } else { 8357 __mark_reg_known(regs + insn->dst_reg, 8358 (u32)insn->imm); 8359 } 8360 } 8361 8362 } else if (opcode > BPF_END) { 8363 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 8364 return -EINVAL; 8365 8366 } else { /* all other ALU ops: and, sub, xor, add, ... */ 8367 8368 if (BPF_SRC(insn->code) == BPF_X) { 8369 if (insn->imm != 0 || insn->off != 0) { 8370 verbose(env, "BPF_ALU uses reserved fields\n"); 8371 return -EINVAL; 8372 } 8373 /* check src1 operand */ 8374 err = check_reg_arg(env, insn->src_reg, SRC_OP); 8375 if (err) 8376 return err; 8377 } else { 8378 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 8379 verbose(env, "BPF_ALU uses reserved fields\n"); 8380 return -EINVAL; 8381 } 8382 } 8383 8384 /* check src2 operand */ 8385 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 8386 if (err) 8387 return err; 8388 8389 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 8390 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 8391 verbose(env, "div by zero\n"); 8392 return -EINVAL; 8393 } 8394 8395 if ((opcode == BPF_LSH || opcode == BPF_RSH || 8396 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 8397 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 8398 8399 if (insn->imm < 0 || insn->imm >= size) { 8400 verbose(env, "invalid shift %d\n", insn->imm); 8401 return -EINVAL; 8402 } 8403 } 8404 8405 /* check dest operand */ 8406 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 8407 if (err) 8408 return err; 8409 8410 return adjust_reg_min_max_vals(env, insn); 8411 } 8412 8413 return 0; 8414 } 8415 8416 static void __find_good_pkt_pointers(struct bpf_func_state *state, 8417 struct bpf_reg_state *dst_reg, 8418 enum bpf_reg_type type, int new_range) 8419 { 8420 struct bpf_reg_state *reg; 8421 int i; 8422 8423 for (i = 0; i < MAX_BPF_REG; i++) { 8424 reg = &state->regs[i]; 8425 if (reg->type == type && reg->id == dst_reg->id) 8426 /* keep the maximum range already checked */ 8427 reg->range = max(reg->range, new_range); 8428 } 8429 8430 bpf_for_each_spilled_reg(i, state, reg) { 8431 if (!reg) 8432 continue; 8433 if (reg->type == type && reg->id == dst_reg->id) 8434 reg->range = max(reg->range, new_range); 8435 } 8436 } 8437 8438 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 8439 struct bpf_reg_state *dst_reg, 8440 enum bpf_reg_type type, 8441 bool range_right_open) 8442 { 8443 int new_range, i; 8444 8445 if (dst_reg->off < 0 || 8446 (dst_reg->off == 0 && range_right_open)) 8447 /* This doesn't give us any range */ 8448 return; 8449 8450 if (dst_reg->umax_value > MAX_PACKET_OFF || 8451 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 8452 /* Risk of overflow. For instance, ptr + (1<<63) may be less 8453 * than pkt_end, but that's because it's also less than pkt. 8454 */ 8455 return; 8456 8457 new_range = dst_reg->off; 8458 if (range_right_open) 8459 new_range--; 8460 8461 /* Examples for register markings: 8462 * 8463 * pkt_data in dst register: 8464 * 8465 * r2 = r3; 8466 * r2 += 8; 8467 * if (r2 > pkt_end) goto <handle exception> 8468 * <access okay> 8469 * 8470 * r2 = r3; 8471 * r2 += 8; 8472 * if (r2 < pkt_end) goto <access okay> 8473 * <handle exception> 8474 * 8475 * Where: 8476 * r2 == dst_reg, pkt_end == src_reg 8477 * r2=pkt(id=n,off=8,r=0) 8478 * r3=pkt(id=n,off=0,r=0) 8479 * 8480 * pkt_data in src register: 8481 * 8482 * r2 = r3; 8483 * r2 += 8; 8484 * if (pkt_end >= r2) goto <access okay> 8485 * <handle exception> 8486 * 8487 * r2 = r3; 8488 * r2 += 8; 8489 * if (pkt_end <= r2) goto <handle exception> 8490 * <access okay> 8491 * 8492 * Where: 8493 * pkt_end == dst_reg, r2 == src_reg 8494 * r2=pkt(id=n,off=8,r=0) 8495 * r3=pkt(id=n,off=0,r=0) 8496 * 8497 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 8498 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 8499 * and [r3, r3 + 8-1) respectively is safe to access depending on 8500 * the check. 8501 */ 8502 8503 /* If our ids match, then we must have the same max_value. And we 8504 * don't care about the other reg's fixed offset, since if it's too big 8505 * the range won't allow anything. 8506 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 8507 */ 8508 for (i = 0; i <= vstate->curframe; i++) 8509 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, 8510 new_range); 8511 } 8512 8513 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) 8514 { 8515 struct tnum subreg = tnum_subreg(reg->var_off); 8516 s32 sval = (s32)val; 8517 8518 switch (opcode) { 8519 case BPF_JEQ: 8520 if (tnum_is_const(subreg)) 8521 return !!tnum_equals_const(subreg, val); 8522 break; 8523 case BPF_JNE: 8524 if (tnum_is_const(subreg)) 8525 return !tnum_equals_const(subreg, val); 8526 break; 8527 case BPF_JSET: 8528 if ((~subreg.mask & subreg.value) & val) 8529 return 1; 8530 if (!((subreg.mask | subreg.value) & val)) 8531 return 0; 8532 break; 8533 case BPF_JGT: 8534 if (reg->u32_min_value > val) 8535 return 1; 8536 else if (reg->u32_max_value <= val) 8537 return 0; 8538 break; 8539 case BPF_JSGT: 8540 if (reg->s32_min_value > sval) 8541 return 1; 8542 else if (reg->s32_max_value <= sval) 8543 return 0; 8544 break; 8545 case BPF_JLT: 8546 if (reg->u32_max_value < val) 8547 return 1; 8548 else if (reg->u32_min_value >= val) 8549 return 0; 8550 break; 8551 case BPF_JSLT: 8552 if (reg->s32_max_value < sval) 8553 return 1; 8554 else if (reg->s32_min_value >= sval) 8555 return 0; 8556 break; 8557 case BPF_JGE: 8558 if (reg->u32_min_value >= val) 8559 return 1; 8560 else if (reg->u32_max_value < val) 8561 return 0; 8562 break; 8563 case BPF_JSGE: 8564 if (reg->s32_min_value >= sval) 8565 return 1; 8566 else if (reg->s32_max_value < sval) 8567 return 0; 8568 break; 8569 case BPF_JLE: 8570 if (reg->u32_max_value <= val) 8571 return 1; 8572 else if (reg->u32_min_value > val) 8573 return 0; 8574 break; 8575 case BPF_JSLE: 8576 if (reg->s32_max_value <= sval) 8577 return 1; 8578 else if (reg->s32_min_value > sval) 8579 return 0; 8580 break; 8581 } 8582 8583 return -1; 8584 } 8585 8586 8587 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) 8588 { 8589 s64 sval = (s64)val; 8590 8591 switch (opcode) { 8592 case BPF_JEQ: 8593 if (tnum_is_const(reg->var_off)) 8594 return !!tnum_equals_const(reg->var_off, val); 8595 break; 8596 case BPF_JNE: 8597 if (tnum_is_const(reg->var_off)) 8598 return !tnum_equals_const(reg->var_off, val); 8599 break; 8600 case BPF_JSET: 8601 if ((~reg->var_off.mask & reg->var_off.value) & val) 8602 return 1; 8603 if (!((reg->var_off.mask | reg->var_off.value) & val)) 8604 return 0; 8605 break; 8606 case BPF_JGT: 8607 if (reg->umin_value > val) 8608 return 1; 8609 else if (reg->umax_value <= val) 8610 return 0; 8611 break; 8612 case BPF_JSGT: 8613 if (reg->smin_value > sval) 8614 return 1; 8615 else if (reg->smax_value <= sval) 8616 return 0; 8617 break; 8618 case BPF_JLT: 8619 if (reg->umax_value < val) 8620 return 1; 8621 else if (reg->umin_value >= val) 8622 return 0; 8623 break; 8624 case BPF_JSLT: 8625 if (reg->smax_value < sval) 8626 return 1; 8627 else if (reg->smin_value >= sval) 8628 return 0; 8629 break; 8630 case BPF_JGE: 8631 if (reg->umin_value >= val) 8632 return 1; 8633 else if (reg->umax_value < val) 8634 return 0; 8635 break; 8636 case BPF_JSGE: 8637 if (reg->smin_value >= sval) 8638 return 1; 8639 else if (reg->smax_value < sval) 8640 return 0; 8641 break; 8642 case BPF_JLE: 8643 if (reg->umax_value <= val) 8644 return 1; 8645 else if (reg->umin_value > val) 8646 return 0; 8647 break; 8648 case BPF_JSLE: 8649 if (reg->smax_value <= sval) 8650 return 1; 8651 else if (reg->smin_value > sval) 8652 return 0; 8653 break; 8654 } 8655 8656 return -1; 8657 } 8658 8659 /* compute branch direction of the expression "if (reg opcode val) goto target;" 8660 * and return: 8661 * 1 - branch will be taken and "goto target" will be executed 8662 * 0 - branch will not be taken and fall-through to next insn 8663 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value 8664 * range [0,10] 8665 */ 8666 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 8667 bool is_jmp32) 8668 { 8669 if (__is_pointer_value(false, reg)) { 8670 if (!reg_type_not_null(reg->type)) 8671 return -1; 8672 8673 /* If pointer is valid tests against zero will fail so we can 8674 * use this to direct branch taken. 8675 */ 8676 if (val != 0) 8677 return -1; 8678 8679 switch (opcode) { 8680 case BPF_JEQ: 8681 return 0; 8682 case BPF_JNE: 8683 return 1; 8684 default: 8685 return -1; 8686 } 8687 } 8688 8689 if (is_jmp32) 8690 return is_branch32_taken(reg, val, opcode); 8691 return is_branch64_taken(reg, val, opcode); 8692 } 8693 8694 static int flip_opcode(u32 opcode) 8695 { 8696 /* How can we transform "a <op> b" into "b <op> a"? */ 8697 static const u8 opcode_flip[16] = { 8698 /* these stay the same */ 8699 [BPF_JEQ >> 4] = BPF_JEQ, 8700 [BPF_JNE >> 4] = BPF_JNE, 8701 [BPF_JSET >> 4] = BPF_JSET, 8702 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 8703 [BPF_JGE >> 4] = BPF_JLE, 8704 [BPF_JGT >> 4] = BPF_JLT, 8705 [BPF_JLE >> 4] = BPF_JGE, 8706 [BPF_JLT >> 4] = BPF_JGT, 8707 [BPF_JSGE >> 4] = BPF_JSLE, 8708 [BPF_JSGT >> 4] = BPF_JSLT, 8709 [BPF_JSLE >> 4] = BPF_JSGE, 8710 [BPF_JSLT >> 4] = BPF_JSGT 8711 }; 8712 return opcode_flip[opcode >> 4]; 8713 } 8714 8715 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, 8716 struct bpf_reg_state *src_reg, 8717 u8 opcode) 8718 { 8719 struct bpf_reg_state *pkt; 8720 8721 if (src_reg->type == PTR_TO_PACKET_END) { 8722 pkt = dst_reg; 8723 } else if (dst_reg->type == PTR_TO_PACKET_END) { 8724 pkt = src_reg; 8725 opcode = flip_opcode(opcode); 8726 } else { 8727 return -1; 8728 } 8729 8730 if (pkt->range >= 0) 8731 return -1; 8732 8733 switch (opcode) { 8734 case BPF_JLE: 8735 /* pkt <= pkt_end */ 8736 fallthrough; 8737 case BPF_JGT: 8738 /* pkt > pkt_end */ 8739 if (pkt->range == BEYOND_PKT_END) 8740 /* pkt has at last one extra byte beyond pkt_end */ 8741 return opcode == BPF_JGT; 8742 break; 8743 case BPF_JLT: 8744 /* pkt < pkt_end */ 8745 fallthrough; 8746 case BPF_JGE: 8747 /* pkt >= pkt_end */ 8748 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) 8749 return opcode == BPF_JGE; 8750 break; 8751 } 8752 return -1; 8753 } 8754 8755 /* Adjusts the register min/max values in the case that the dst_reg is the 8756 * variable register that we are working on, and src_reg is a constant or we're 8757 * simply doing a BPF_K check. 8758 * In JEQ/JNE cases we also adjust the var_off values. 8759 */ 8760 static void reg_set_min_max(struct bpf_reg_state *true_reg, 8761 struct bpf_reg_state *false_reg, 8762 u64 val, u32 val32, 8763 u8 opcode, bool is_jmp32) 8764 { 8765 struct tnum false_32off = tnum_subreg(false_reg->var_off); 8766 struct tnum false_64off = false_reg->var_off; 8767 struct tnum true_32off = tnum_subreg(true_reg->var_off); 8768 struct tnum true_64off = true_reg->var_off; 8769 s64 sval = (s64)val; 8770 s32 sval32 = (s32)val32; 8771 8772 /* If the dst_reg is a pointer, we can't learn anything about its 8773 * variable offset from the compare (unless src_reg were a pointer into 8774 * the same object, but we don't bother with that. 8775 * Since false_reg and true_reg have the same type by construction, we 8776 * only need to check one of them for pointerness. 8777 */ 8778 if (__is_pointer_value(false, false_reg)) 8779 return; 8780 8781 switch (opcode) { 8782 case BPF_JEQ: 8783 case BPF_JNE: 8784 { 8785 struct bpf_reg_state *reg = 8786 opcode == BPF_JEQ ? true_reg : false_reg; 8787 8788 /* JEQ/JNE comparison doesn't change the register equivalence. 8789 * r1 = r2; 8790 * if (r1 == 42) goto label; 8791 * ... 8792 * label: // here both r1 and r2 are known to be 42. 8793 * 8794 * Hence when marking register as known preserve it's ID. 8795 */ 8796 if (is_jmp32) 8797 __mark_reg32_known(reg, val32); 8798 else 8799 ___mark_reg_known(reg, val); 8800 break; 8801 } 8802 case BPF_JSET: 8803 if (is_jmp32) { 8804 false_32off = tnum_and(false_32off, tnum_const(~val32)); 8805 if (is_power_of_2(val32)) 8806 true_32off = tnum_or(true_32off, 8807 tnum_const(val32)); 8808 } else { 8809 false_64off = tnum_and(false_64off, tnum_const(~val)); 8810 if (is_power_of_2(val)) 8811 true_64off = tnum_or(true_64off, 8812 tnum_const(val)); 8813 } 8814 break; 8815 case BPF_JGE: 8816 case BPF_JGT: 8817 { 8818 if (is_jmp32) { 8819 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; 8820 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; 8821 8822 false_reg->u32_max_value = min(false_reg->u32_max_value, 8823 false_umax); 8824 true_reg->u32_min_value = max(true_reg->u32_min_value, 8825 true_umin); 8826 } else { 8827 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 8828 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 8829 8830 false_reg->umax_value = min(false_reg->umax_value, false_umax); 8831 true_reg->umin_value = max(true_reg->umin_value, true_umin); 8832 } 8833 break; 8834 } 8835 case BPF_JSGE: 8836 case BPF_JSGT: 8837 { 8838 if (is_jmp32) { 8839 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; 8840 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; 8841 8842 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); 8843 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); 8844 } else { 8845 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 8846 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 8847 8848 false_reg->smax_value = min(false_reg->smax_value, false_smax); 8849 true_reg->smin_value = max(true_reg->smin_value, true_smin); 8850 } 8851 break; 8852 } 8853 case BPF_JLE: 8854 case BPF_JLT: 8855 { 8856 if (is_jmp32) { 8857 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; 8858 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; 8859 8860 false_reg->u32_min_value = max(false_reg->u32_min_value, 8861 false_umin); 8862 true_reg->u32_max_value = min(true_reg->u32_max_value, 8863 true_umax); 8864 } else { 8865 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 8866 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 8867 8868 false_reg->umin_value = max(false_reg->umin_value, false_umin); 8869 true_reg->umax_value = min(true_reg->umax_value, true_umax); 8870 } 8871 break; 8872 } 8873 case BPF_JSLE: 8874 case BPF_JSLT: 8875 { 8876 if (is_jmp32) { 8877 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; 8878 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; 8879 8880 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); 8881 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); 8882 } else { 8883 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 8884 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 8885 8886 false_reg->smin_value = max(false_reg->smin_value, false_smin); 8887 true_reg->smax_value = min(true_reg->smax_value, true_smax); 8888 } 8889 break; 8890 } 8891 default: 8892 return; 8893 } 8894 8895 if (is_jmp32) { 8896 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), 8897 tnum_subreg(false_32off)); 8898 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), 8899 tnum_subreg(true_32off)); 8900 __reg_combine_32_into_64(false_reg); 8901 __reg_combine_32_into_64(true_reg); 8902 } else { 8903 false_reg->var_off = false_64off; 8904 true_reg->var_off = true_64off; 8905 __reg_combine_64_into_32(false_reg); 8906 __reg_combine_64_into_32(true_reg); 8907 } 8908 } 8909 8910 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 8911 * the variable reg. 8912 */ 8913 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 8914 struct bpf_reg_state *false_reg, 8915 u64 val, u32 val32, 8916 u8 opcode, bool is_jmp32) 8917 { 8918 opcode = flip_opcode(opcode); 8919 /* This uses zero as "not present in table"; luckily the zero opcode, 8920 * BPF_JA, can't get here. 8921 */ 8922 if (opcode) 8923 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); 8924 } 8925 8926 /* Regs are known to be equal, so intersect their min/max/var_off */ 8927 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 8928 struct bpf_reg_state *dst_reg) 8929 { 8930 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 8931 dst_reg->umin_value); 8932 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 8933 dst_reg->umax_value); 8934 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 8935 dst_reg->smin_value); 8936 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 8937 dst_reg->smax_value); 8938 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 8939 dst_reg->var_off); 8940 /* We might have learned new bounds from the var_off. */ 8941 __update_reg_bounds(src_reg); 8942 __update_reg_bounds(dst_reg); 8943 /* We might have learned something about the sign bit. */ 8944 __reg_deduce_bounds(src_reg); 8945 __reg_deduce_bounds(dst_reg); 8946 /* We might have learned some bits from the bounds. */ 8947 __reg_bound_offset(src_reg); 8948 __reg_bound_offset(dst_reg); 8949 /* Intersecting with the old var_off might have improved our bounds 8950 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 8951 * then new var_off is (0; 0x7f...fc) which improves our umax. 8952 */ 8953 __update_reg_bounds(src_reg); 8954 __update_reg_bounds(dst_reg); 8955 } 8956 8957 static void reg_combine_min_max(struct bpf_reg_state *true_src, 8958 struct bpf_reg_state *true_dst, 8959 struct bpf_reg_state *false_src, 8960 struct bpf_reg_state *false_dst, 8961 u8 opcode) 8962 { 8963 switch (opcode) { 8964 case BPF_JEQ: 8965 __reg_combine_min_max(true_src, true_dst); 8966 break; 8967 case BPF_JNE: 8968 __reg_combine_min_max(false_src, false_dst); 8969 break; 8970 } 8971 } 8972 8973 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 8974 struct bpf_reg_state *reg, u32 id, 8975 bool is_null) 8976 { 8977 if (reg_type_may_be_null(reg->type) && reg->id == id && 8978 !WARN_ON_ONCE(!reg->id)) { 8979 /* Old offset (both fixed and variable parts) should 8980 * have been known-zero, because we don't allow pointer 8981 * arithmetic on pointers that might be NULL. 8982 */ 8983 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 8984 !tnum_equals_const(reg->var_off, 0) || 8985 reg->off)) { 8986 __mark_reg_known_zero(reg); 8987 reg->off = 0; 8988 } 8989 if (is_null) { 8990 reg->type = SCALAR_VALUE; 8991 /* We don't need id and ref_obj_id from this point 8992 * onwards anymore, thus we should better reset it, 8993 * so that state pruning has chances to take effect. 8994 */ 8995 reg->id = 0; 8996 reg->ref_obj_id = 0; 8997 8998 return; 8999 } 9000 9001 mark_ptr_not_null_reg(reg); 9002 9003 if (!reg_may_point_to_spin_lock(reg)) { 9004 /* For not-NULL ptr, reg->ref_obj_id will be reset 9005 * in release_reg_references(). 9006 * 9007 * reg->id is still used by spin_lock ptr. Other 9008 * than spin_lock ptr type, reg->id can be reset. 9009 */ 9010 reg->id = 0; 9011 } 9012 } 9013 } 9014 9015 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, 9016 bool is_null) 9017 { 9018 struct bpf_reg_state *reg; 9019 int i; 9020 9021 for (i = 0; i < MAX_BPF_REG; i++) 9022 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); 9023 9024 bpf_for_each_spilled_reg(i, state, reg) { 9025 if (!reg) 9026 continue; 9027 mark_ptr_or_null_reg(state, reg, id, is_null); 9028 } 9029 } 9030 9031 /* The logic is similar to find_good_pkt_pointers(), both could eventually 9032 * be folded together at some point. 9033 */ 9034 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 9035 bool is_null) 9036 { 9037 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 9038 struct bpf_reg_state *regs = state->regs; 9039 u32 ref_obj_id = regs[regno].ref_obj_id; 9040 u32 id = regs[regno].id; 9041 int i; 9042 9043 if (ref_obj_id && ref_obj_id == id && is_null) 9044 /* regs[regno] is in the " == NULL" branch. 9045 * No one could have freed the reference state before 9046 * doing the NULL check. 9047 */ 9048 WARN_ON_ONCE(release_reference_state(state, id)); 9049 9050 for (i = 0; i <= vstate->curframe; i++) 9051 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); 9052 } 9053 9054 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 9055 struct bpf_reg_state *dst_reg, 9056 struct bpf_reg_state *src_reg, 9057 struct bpf_verifier_state *this_branch, 9058 struct bpf_verifier_state *other_branch) 9059 { 9060 if (BPF_SRC(insn->code) != BPF_X) 9061 return false; 9062 9063 /* Pointers are always 64-bit. */ 9064 if (BPF_CLASS(insn->code) == BPF_JMP32) 9065 return false; 9066 9067 switch (BPF_OP(insn->code)) { 9068 case BPF_JGT: 9069 if ((dst_reg->type == PTR_TO_PACKET && 9070 src_reg->type == PTR_TO_PACKET_END) || 9071 (dst_reg->type == PTR_TO_PACKET_META && 9072 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 9073 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 9074 find_good_pkt_pointers(this_branch, dst_reg, 9075 dst_reg->type, false); 9076 mark_pkt_end(other_branch, insn->dst_reg, true); 9077 } else if ((dst_reg->type == PTR_TO_PACKET_END && 9078 src_reg->type == PTR_TO_PACKET) || 9079 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 9080 src_reg->type == PTR_TO_PACKET_META)) { 9081 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 9082 find_good_pkt_pointers(other_branch, src_reg, 9083 src_reg->type, true); 9084 mark_pkt_end(this_branch, insn->src_reg, false); 9085 } else { 9086 return false; 9087 } 9088 break; 9089 case BPF_JLT: 9090 if ((dst_reg->type == PTR_TO_PACKET && 9091 src_reg->type == PTR_TO_PACKET_END) || 9092 (dst_reg->type == PTR_TO_PACKET_META && 9093 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 9094 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 9095 find_good_pkt_pointers(other_branch, dst_reg, 9096 dst_reg->type, true); 9097 mark_pkt_end(this_branch, insn->dst_reg, false); 9098 } else if ((dst_reg->type == PTR_TO_PACKET_END && 9099 src_reg->type == PTR_TO_PACKET) || 9100 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 9101 src_reg->type == PTR_TO_PACKET_META)) { 9102 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 9103 find_good_pkt_pointers(this_branch, src_reg, 9104 src_reg->type, false); 9105 mark_pkt_end(other_branch, insn->src_reg, true); 9106 } else { 9107 return false; 9108 } 9109 break; 9110 case BPF_JGE: 9111 if ((dst_reg->type == PTR_TO_PACKET && 9112 src_reg->type == PTR_TO_PACKET_END) || 9113 (dst_reg->type == PTR_TO_PACKET_META && 9114 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 9115 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 9116 find_good_pkt_pointers(this_branch, dst_reg, 9117 dst_reg->type, true); 9118 mark_pkt_end(other_branch, insn->dst_reg, false); 9119 } else if ((dst_reg->type == PTR_TO_PACKET_END && 9120 src_reg->type == PTR_TO_PACKET) || 9121 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 9122 src_reg->type == PTR_TO_PACKET_META)) { 9123 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 9124 find_good_pkt_pointers(other_branch, src_reg, 9125 src_reg->type, false); 9126 mark_pkt_end(this_branch, insn->src_reg, true); 9127 } else { 9128 return false; 9129 } 9130 break; 9131 case BPF_JLE: 9132 if ((dst_reg->type == PTR_TO_PACKET && 9133 src_reg->type == PTR_TO_PACKET_END) || 9134 (dst_reg->type == PTR_TO_PACKET_META && 9135 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 9136 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 9137 find_good_pkt_pointers(other_branch, dst_reg, 9138 dst_reg->type, false); 9139 mark_pkt_end(this_branch, insn->dst_reg, true); 9140 } else if ((dst_reg->type == PTR_TO_PACKET_END && 9141 src_reg->type == PTR_TO_PACKET) || 9142 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 9143 src_reg->type == PTR_TO_PACKET_META)) { 9144 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 9145 find_good_pkt_pointers(this_branch, src_reg, 9146 src_reg->type, true); 9147 mark_pkt_end(other_branch, insn->src_reg, false); 9148 } else { 9149 return false; 9150 } 9151 break; 9152 default: 9153 return false; 9154 } 9155 9156 return true; 9157 } 9158 9159 static void find_equal_scalars(struct bpf_verifier_state *vstate, 9160 struct bpf_reg_state *known_reg) 9161 { 9162 struct bpf_func_state *state; 9163 struct bpf_reg_state *reg; 9164 int i, j; 9165 9166 for (i = 0; i <= vstate->curframe; i++) { 9167 state = vstate->frame[i]; 9168 for (j = 0; j < MAX_BPF_REG; j++) { 9169 reg = &state->regs[j]; 9170 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 9171 *reg = *known_reg; 9172 } 9173 9174 bpf_for_each_spilled_reg(j, state, reg) { 9175 if (!reg) 9176 continue; 9177 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 9178 *reg = *known_reg; 9179 } 9180 } 9181 } 9182 9183 static int check_cond_jmp_op(struct bpf_verifier_env *env, 9184 struct bpf_insn *insn, int *insn_idx) 9185 { 9186 struct bpf_verifier_state *this_branch = env->cur_state; 9187 struct bpf_verifier_state *other_branch; 9188 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 9189 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 9190 u8 opcode = BPF_OP(insn->code); 9191 bool is_jmp32; 9192 int pred = -1; 9193 int err; 9194 9195 /* Only conditional jumps are expected to reach here. */ 9196 if (opcode == BPF_JA || opcode > BPF_JSLE) { 9197 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 9198 return -EINVAL; 9199 } 9200 9201 if (BPF_SRC(insn->code) == BPF_X) { 9202 if (insn->imm != 0) { 9203 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 9204 return -EINVAL; 9205 } 9206 9207 /* check src1 operand */ 9208 err = check_reg_arg(env, insn->src_reg, SRC_OP); 9209 if (err) 9210 return err; 9211 9212 if (is_pointer_value(env, insn->src_reg)) { 9213 verbose(env, "R%d pointer comparison prohibited\n", 9214 insn->src_reg); 9215 return -EACCES; 9216 } 9217 src_reg = ®s[insn->src_reg]; 9218 } else { 9219 if (insn->src_reg != BPF_REG_0) { 9220 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 9221 return -EINVAL; 9222 } 9223 } 9224 9225 /* check src2 operand */ 9226 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 9227 if (err) 9228 return err; 9229 9230 dst_reg = ®s[insn->dst_reg]; 9231 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 9232 9233 if (BPF_SRC(insn->code) == BPF_K) { 9234 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); 9235 } else if (src_reg->type == SCALAR_VALUE && 9236 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { 9237 pred = is_branch_taken(dst_reg, 9238 tnum_subreg(src_reg->var_off).value, 9239 opcode, 9240 is_jmp32); 9241 } else if (src_reg->type == SCALAR_VALUE && 9242 !is_jmp32 && tnum_is_const(src_reg->var_off)) { 9243 pred = is_branch_taken(dst_reg, 9244 src_reg->var_off.value, 9245 opcode, 9246 is_jmp32); 9247 } else if (reg_is_pkt_pointer_any(dst_reg) && 9248 reg_is_pkt_pointer_any(src_reg) && 9249 !is_jmp32) { 9250 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode); 9251 } 9252 9253 if (pred >= 0) { 9254 /* If we get here with a dst_reg pointer type it is because 9255 * above is_branch_taken() special cased the 0 comparison. 9256 */ 9257 if (!__is_pointer_value(false, dst_reg)) 9258 err = mark_chain_precision(env, insn->dst_reg); 9259 if (BPF_SRC(insn->code) == BPF_X && !err && 9260 !__is_pointer_value(false, src_reg)) 9261 err = mark_chain_precision(env, insn->src_reg); 9262 if (err) 9263 return err; 9264 } 9265 9266 if (pred == 1) { 9267 /* Only follow the goto, ignore fall-through. If needed, push 9268 * the fall-through branch for simulation under speculative 9269 * execution. 9270 */ 9271 if (!env->bypass_spec_v1 && 9272 !sanitize_speculative_path(env, insn, *insn_idx + 1, 9273 *insn_idx)) 9274 return -EFAULT; 9275 *insn_idx += insn->off; 9276 return 0; 9277 } else if (pred == 0) { 9278 /* Only follow the fall-through branch, since that's where the 9279 * program will go. If needed, push the goto branch for 9280 * simulation under speculative execution. 9281 */ 9282 if (!env->bypass_spec_v1 && 9283 !sanitize_speculative_path(env, insn, 9284 *insn_idx + insn->off + 1, 9285 *insn_idx)) 9286 return -EFAULT; 9287 return 0; 9288 } 9289 9290 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 9291 false); 9292 if (!other_branch) 9293 return -EFAULT; 9294 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 9295 9296 /* detect if we are comparing against a constant value so we can adjust 9297 * our min/max values for our dst register. 9298 * this is only legit if both are scalars (or pointers to the same 9299 * object, I suppose, but we don't support that right now), because 9300 * otherwise the different base pointers mean the offsets aren't 9301 * comparable. 9302 */ 9303 if (BPF_SRC(insn->code) == BPF_X) { 9304 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 9305 9306 if (dst_reg->type == SCALAR_VALUE && 9307 src_reg->type == SCALAR_VALUE) { 9308 if (tnum_is_const(src_reg->var_off) || 9309 (is_jmp32 && 9310 tnum_is_const(tnum_subreg(src_reg->var_off)))) 9311 reg_set_min_max(&other_branch_regs[insn->dst_reg], 9312 dst_reg, 9313 src_reg->var_off.value, 9314 tnum_subreg(src_reg->var_off).value, 9315 opcode, is_jmp32); 9316 else if (tnum_is_const(dst_reg->var_off) || 9317 (is_jmp32 && 9318 tnum_is_const(tnum_subreg(dst_reg->var_off)))) 9319 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 9320 src_reg, 9321 dst_reg->var_off.value, 9322 tnum_subreg(dst_reg->var_off).value, 9323 opcode, is_jmp32); 9324 else if (!is_jmp32 && 9325 (opcode == BPF_JEQ || opcode == BPF_JNE)) 9326 /* Comparing for equality, we can combine knowledge */ 9327 reg_combine_min_max(&other_branch_regs[insn->src_reg], 9328 &other_branch_regs[insn->dst_reg], 9329 src_reg, dst_reg, opcode); 9330 if (src_reg->id && 9331 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { 9332 find_equal_scalars(this_branch, src_reg); 9333 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); 9334 } 9335 9336 } 9337 } else if (dst_reg->type == SCALAR_VALUE) { 9338 reg_set_min_max(&other_branch_regs[insn->dst_reg], 9339 dst_reg, insn->imm, (u32)insn->imm, 9340 opcode, is_jmp32); 9341 } 9342 9343 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && 9344 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { 9345 find_equal_scalars(this_branch, dst_reg); 9346 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); 9347 } 9348 9349 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 9350 * NOTE: these optimizations below are related with pointer comparison 9351 * which will never be JMP32. 9352 */ 9353 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 9354 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 9355 reg_type_may_be_null(dst_reg->type)) { 9356 /* Mark all identical registers in each branch as either 9357 * safe or unknown depending R == 0 or R != 0 conditional. 9358 */ 9359 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 9360 opcode == BPF_JNE); 9361 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 9362 opcode == BPF_JEQ); 9363 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 9364 this_branch, other_branch) && 9365 is_pointer_value(env, insn->dst_reg)) { 9366 verbose(env, "R%d pointer comparison prohibited\n", 9367 insn->dst_reg); 9368 return -EACCES; 9369 } 9370 if (env->log.level & BPF_LOG_LEVEL) 9371 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 9372 return 0; 9373 } 9374 9375 /* verify BPF_LD_IMM64 instruction */ 9376 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 9377 { 9378 struct bpf_insn_aux_data *aux = cur_aux(env); 9379 struct bpf_reg_state *regs = cur_regs(env); 9380 struct bpf_reg_state *dst_reg; 9381 struct bpf_map *map; 9382 int err; 9383 9384 if (BPF_SIZE(insn->code) != BPF_DW) { 9385 verbose(env, "invalid BPF_LD_IMM insn\n"); 9386 return -EINVAL; 9387 } 9388 if (insn->off != 0) { 9389 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 9390 return -EINVAL; 9391 } 9392 9393 err = check_reg_arg(env, insn->dst_reg, DST_OP); 9394 if (err) 9395 return err; 9396 9397 dst_reg = ®s[insn->dst_reg]; 9398 if (insn->src_reg == 0) { 9399 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 9400 9401 dst_reg->type = SCALAR_VALUE; 9402 __mark_reg_known(®s[insn->dst_reg], imm); 9403 return 0; 9404 } 9405 9406 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { 9407 mark_reg_known_zero(env, regs, insn->dst_reg); 9408 9409 dst_reg->type = aux->btf_var.reg_type; 9410 switch (dst_reg->type) { 9411 case PTR_TO_MEM: 9412 dst_reg->mem_size = aux->btf_var.mem_size; 9413 break; 9414 case PTR_TO_BTF_ID: 9415 case PTR_TO_PERCPU_BTF_ID: 9416 dst_reg->btf = aux->btf_var.btf; 9417 dst_reg->btf_id = aux->btf_var.btf_id; 9418 break; 9419 default: 9420 verbose(env, "bpf verifier is misconfigured\n"); 9421 return -EFAULT; 9422 } 9423 return 0; 9424 } 9425 9426 if (insn->src_reg == BPF_PSEUDO_FUNC) { 9427 struct bpf_prog_aux *aux = env->prog->aux; 9428 u32 subprogno = find_subprog(env, 9429 env->insn_idx + insn->imm + 1); 9430 9431 if (!aux->func_info) { 9432 verbose(env, "missing btf func_info\n"); 9433 return -EINVAL; 9434 } 9435 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { 9436 verbose(env, "callback function not static\n"); 9437 return -EINVAL; 9438 } 9439 9440 dst_reg->type = PTR_TO_FUNC; 9441 dst_reg->subprogno = subprogno; 9442 return 0; 9443 } 9444 9445 map = env->used_maps[aux->map_index]; 9446 mark_reg_known_zero(env, regs, insn->dst_reg); 9447 dst_reg->map_ptr = map; 9448 9449 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || 9450 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { 9451 dst_reg->type = PTR_TO_MAP_VALUE; 9452 dst_reg->off = aux->map_off; 9453 if (map_value_has_spin_lock(map)) 9454 dst_reg->id = ++env->id_gen; 9455 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || 9456 insn->src_reg == BPF_PSEUDO_MAP_IDX) { 9457 dst_reg->type = CONST_PTR_TO_MAP; 9458 } else { 9459 verbose(env, "bpf verifier is misconfigured\n"); 9460 return -EINVAL; 9461 } 9462 9463 return 0; 9464 } 9465 9466 static bool may_access_skb(enum bpf_prog_type type) 9467 { 9468 switch (type) { 9469 case BPF_PROG_TYPE_SOCKET_FILTER: 9470 case BPF_PROG_TYPE_SCHED_CLS: 9471 case BPF_PROG_TYPE_SCHED_ACT: 9472 return true; 9473 default: 9474 return false; 9475 } 9476 } 9477 9478 /* verify safety of LD_ABS|LD_IND instructions: 9479 * - they can only appear in the programs where ctx == skb 9480 * - since they are wrappers of function calls, they scratch R1-R5 registers, 9481 * preserve R6-R9, and store return value into R0 9482 * 9483 * Implicit input: 9484 * ctx == skb == R6 == CTX 9485 * 9486 * Explicit input: 9487 * SRC == any register 9488 * IMM == 32-bit immediate 9489 * 9490 * Output: 9491 * R0 - 8/16/32-bit skb data converted to cpu endianness 9492 */ 9493 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 9494 { 9495 struct bpf_reg_state *regs = cur_regs(env); 9496 static const int ctx_reg = BPF_REG_6; 9497 u8 mode = BPF_MODE(insn->code); 9498 int i, err; 9499 9500 if (!may_access_skb(resolve_prog_type(env->prog))) { 9501 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 9502 return -EINVAL; 9503 } 9504 9505 if (!env->ops->gen_ld_abs) { 9506 verbose(env, "bpf verifier is misconfigured\n"); 9507 return -EINVAL; 9508 } 9509 9510 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 9511 BPF_SIZE(insn->code) == BPF_DW || 9512 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 9513 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 9514 return -EINVAL; 9515 } 9516 9517 /* check whether implicit source operand (register R6) is readable */ 9518 err = check_reg_arg(env, ctx_reg, SRC_OP); 9519 if (err) 9520 return err; 9521 9522 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 9523 * gen_ld_abs() may terminate the program at runtime, leading to 9524 * reference leak. 9525 */ 9526 err = check_reference_leak(env); 9527 if (err) { 9528 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 9529 return err; 9530 } 9531 9532 if (env->cur_state->active_spin_lock) { 9533 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 9534 return -EINVAL; 9535 } 9536 9537 if (regs[ctx_reg].type != PTR_TO_CTX) { 9538 verbose(env, 9539 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 9540 return -EINVAL; 9541 } 9542 9543 if (mode == BPF_IND) { 9544 /* check explicit source operand */ 9545 err = check_reg_arg(env, insn->src_reg, SRC_OP); 9546 if (err) 9547 return err; 9548 } 9549 9550 err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); 9551 if (err < 0) 9552 return err; 9553 9554 /* reset caller saved regs to unreadable */ 9555 for (i = 0; i < CALLER_SAVED_REGS; i++) { 9556 mark_reg_not_init(env, regs, caller_saved[i]); 9557 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 9558 } 9559 9560 /* mark destination R0 register as readable, since it contains 9561 * the value fetched from the packet. 9562 * Already marked as written above. 9563 */ 9564 mark_reg_unknown(env, regs, BPF_REG_0); 9565 /* ld_abs load up to 32-bit skb data. */ 9566 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 9567 return 0; 9568 } 9569 9570 static int check_return_code(struct bpf_verifier_env *env) 9571 { 9572 struct tnum enforce_attach_type_range = tnum_unknown; 9573 const struct bpf_prog *prog = env->prog; 9574 struct bpf_reg_state *reg; 9575 struct tnum range = tnum_range(0, 1); 9576 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 9577 int err; 9578 struct bpf_func_state *frame = env->cur_state->frame[0]; 9579 const bool is_subprog = frame->subprogno; 9580 9581 /* LSM and struct_ops func-ptr's return type could be "void" */ 9582 if (!is_subprog && 9583 (prog_type == BPF_PROG_TYPE_STRUCT_OPS || 9584 prog_type == BPF_PROG_TYPE_LSM) && 9585 !prog->aux->attach_func_proto->type) 9586 return 0; 9587 9588 /* eBPF calling convention is such that R0 is used 9589 * to return the value from eBPF program. 9590 * Make sure that it's readable at this time 9591 * of bpf_exit, which means that program wrote 9592 * something into it earlier 9593 */ 9594 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 9595 if (err) 9596 return err; 9597 9598 if (is_pointer_value(env, BPF_REG_0)) { 9599 verbose(env, "R0 leaks addr as return value\n"); 9600 return -EACCES; 9601 } 9602 9603 reg = cur_regs(env) + BPF_REG_0; 9604 9605 if (frame->in_async_callback_fn) { 9606 /* enforce return zero from async callbacks like timer */ 9607 if (reg->type != SCALAR_VALUE) { 9608 verbose(env, "In async callback the register R0 is not a known value (%s)\n", 9609 reg_type_str[reg->type]); 9610 return -EINVAL; 9611 } 9612 9613 if (!tnum_in(tnum_const(0), reg->var_off)) { 9614 verbose_invalid_scalar(env, reg, &range, "async callback", "R0"); 9615 return -EINVAL; 9616 } 9617 return 0; 9618 } 9619 9620 if (is_subprog) { 9621 if (reg->type != SCALAR_VALUE) { 9622 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", 9623 reg_type_str[reg->type]); 9624 return -EINVAL; 9625 } 9626 return 0; 9627 } 9628 9629 switch (prog_type) { 9630 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 9631 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 9632 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || 9633 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || 9634 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || 9635 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || 9636 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) 9637 range = tnum_range(1, 1); 9638 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || 9639 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) 9640 range = tnum_range(0, 3); 9641 break; 9642 case BPF_PROG_TYPE_CGROUP_SKB: 9643 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 9644 range = tnum_range(0, 3); 9645 enforce_attach_type_range = tnum_range(2, 3); 9646 } 9647 break; 9648 case BPF_PROG_TYPE_CGROUP_SOCK: 9649 case BPF_PROG_TYPE_SOCK_OPS: 9650 case BPF_PROG_TYPE_CGROUP_DEVICE: 9651 case BPF_PROG_TYPE_CGROUP_SYSCTL: 9652 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 9653 break; 9654 case BPF_PROG_TYPE_RAW_TRACEPOINT: 9655 if (!env->prog->aux->attach_btf_id) 9656 return 0; 9657 range = tnum_const(0); 9658 break; 9659 case BPF_PROG_TYPE_TRACING: 9660 switch (env->prog->expected_attach_type) { 9661 case BPF_TRACE_FENTRY: 9662 case BPF_TRACE_FEXIT: 9663 range = tnum_const(0); 9664 break; 9665 case BPF_TRACE_RAW_TP: 9666 case BPF_MODIFY_RETURN: 9667 return 0; 9668 case BPF_TRACE_ITER: 9669 break; 9670 default: 9671 return -ENOTSUPP; 9672 } 9673 break; 9674 case BPF_PROG_TYPE_SK_LOOKUP: 9675 range = tnum_range(SK_DROP, SK_PASS); 9676 break; 9677 case BPF_PROG_TYPE_EXT: 9678 /* freplace program can return anything as its return value 9679 * depends on the to-be-replaced kernel func or bpf program. 9680 */ 9681 default: 9682 return 0; 9683 } 9684 9685 if (reg->type != SCALAR_VALUE) { 9686 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 9687 reg_type_str[reg->type]); 9688 return -EINVAL; 9689 } 9690 9691 if (!tnum_in(range, reg->var_off)) { 9692 verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); 9693 return -EINVAL; 9694 } 9695 9696 if (!tnum_is_unknown(enforce_attach_type_range) && 9697 tnum_in(enforce_attach_type_range, reg->var_off)) 9698 env->prog->enforce_expected_attach_type = 1; 9699 return 0; 9700 } 9701 9702 /* non-recursive DFS pseudo code 9703 * 1 procedure DFS-iterative(G,v): 9704 * 2 label v as discovered 9705 * 3 let S be a stack 9706 * 4 S.push(v) 9707 * 5 while S is not empty 9708 * 6 t <- S.pop() 9709 * 7 if t is what we're looking for: 9710 * 8 return t 9711 * 9 for all edges e in G.adjacentEdges(t) do 9712 * 10 if edge e is already labelled 9713 * 11 continue with the next edge 9714 * 12 w <- G.adjacentVertex(t,e) 9715 * 13 if vertex w is not discovered and not explored 9716 * 14 label e as tree-edge 9717 * 15 label w as discovered 9718 * 16 S.push(w) 9719 * 17 continue at 5 9720 * 18 else if vertex w is discovered 9721 * 19 label e as back-edge 9722 * 20 else 9723 * 21 // vertex w is explored 9724 * 22 label e as forward- or cross-edge 9725 * 23 label t as explored 9726 * 24 S.pop() 9727 * 9728 * convention: 9729 * 0x10 - discovered 9730 * 0x11 - discovered and fall-through edge labelled 9731 * 0x12 - discovered and fall-through and branch edges labelled 9732 * 0x20 - explored 9733 */ 9734 9735 enum { 9736 DISCOVERED = 0x10, 9737 EXPLORED = 0x20, 9738 FALLTHROUGH = 1, 9739 BRANCH = 2, 9740 }; 9741 9742 static u32 state_htab_size(struct bpf_verifier_env *env) 9743 { 9744 return env->prog->len; 9745 } 9746 9747 static struct bpf_verifier_state_list **explored_state( 9748 struct bpf_verifier_env *env, 9749 int idx) 9750 { 9751 struct bpf_verifier_state *cur = env->cur_state; 9752 struct bpf_func_state *state = cur->frame[cur->curframe]; 9753 9754 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 9755 } 9756 9757 static void init_explored_state(struct bpf_verifier_env *env, int idx) 9758 { 9759 env->insn_aux_data[idx].prune_point = true; 9760 } 9761 9762 enum { 9763 DONE_EXPLORING = 0, 9764 KEEP_EXPLORING = 1, 9765 }; 9766 9767 /* t, w, e - match pseudo-code above: 9768 * t - index of current instruction 9769 * w - next instruction 9770 * e - edge 9771 */ 9772 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 9773 bool loop_ok) 9774 { 9775 int *insn_stack = env->cfg.insn_stack; 9776 int *insn_state = env->cfg.insn_state; 9777 9778 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 9779 return DONE_EXPLORING; 9780 9781 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 9782 return DONE_EXPLORING; 9783 9784 if (w < 0 || w >= env->prog->len) { 9785 verbose_linfo(env, t, "%d: ", t); 9786 verbose(env, "jump out of range from insn %d to %d\n", t, w); 9787 return -EINVAL; 9788 } 9789 9790 if (e == BRANCH) 9791 /* mark branch target for state pruning */ 9792 init_explored_state(env, w); 9793 9794 if (insn_state[w] == 0) { 9795 /* tree-edge */ 9796 insn_state[t] = DISCOVERED | e; 9797 insn_state[w] = DISCOVERED; 9798 if (env->cfg.cur_stack >= env->prog->len) 9799 return -E2BIG; 9800 insn_stack[env->cfg.cur_stack++] = w; 9801 return KEEP_EXPLORING; 9802 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 9803 if (loop_ok && env->bpf_capable) 9804 return DONE_EXPLORING; 9805 verbose_linfo(env, t, "%d: ", t); 9806 verbose_linfo(env, w, "%d: ", w); 9807 verbose(env, "back-edge from insn %d to %d\n", t, w); 9808 return -EINVAL; 9809 } else if (insn_state[w] == EXPLORED) { 9810 /* forward- or cross-edge */ 9811 insn_state[t] = DISCOVERED | e; 9812 } else { 9813 verbose(env, "insn state internal bug\n"); 9814 return -EFAULT; 9815 } 9816 return DONE_EXPLORING; 9817 } 9818 9819 static int visit_func_call_insn(int t, int insn_cnt, 9820 struct bpf_insn *insns, 9821 struct bpf_verifier_env *env, 9822 bool visit_callee) 9823 { 9824 int ret; 9825 9826 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 9827 if (ret) 9828 return ret; 9829 9830 if (t + 1 < insn_cnt) 9831 init_explored_state(env, t + 1); 9832 if (visit_callee) { 9833 init_explored_state(env, t); 9834 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, 9835 /* It's ok to allow recursion from CFG point of 9836 * view. __check_func_call() will do the actual 9837 * check. 9838 */ 9839 bpf_pseudo_func(insns + t)); 9840 } 9841 return ret; 9842 } 9843 9844 /* Visits the instruction at index t and returns one of the following: 9845 * < 0 - an error occurred 9846 * DONE_EXPLORING - the instruction was fully explored 9847 * KEEP_EXPLORING - there is still work to be done before it is fully explored 9848 */ 9849 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) 9850 { 9851 struct bpf_insn *insns = env->prog->insnsi; 9852 int ret; 9853 9854 if (bpf_pseudo_func(insns + t)) 9855 return visit_func_call_insn(t, insn_cnt, insns, env, true); 9856 9857 /* All non-branch instructions have a single fall-through edge. */ 9858 if (BPF_CLASS(insns[t].code) != BPF_JMP && 9859 BPF_CLASS(insns[t].code) != BPF_JMP32) 9860 return push_insn(t, t + 1, FALLTHROUGH, env, false); 9861 9862 switch (BPF_OP(insns[t].code)) { 9863 case BPF_EXIT: 9864 return DONE_EXPLORING; 9865 9866 case BPF_CALL: 9867 if (insns[t].imm == BPF_FUNC_timer_set_callback) 9868 /* Mark this call insn to trigger is_state_visited() check 9869 * before call itself is processed by __check_func_call(). 9870 * Otherwise new async state will be pushed for further 9871 * exploration. 9872 */ 9873 init_explored_state(env, t); 9874 return visit_func_call_insn(t, insn_cnt, insns, env, 9875 insns[t].src_reg == BPF_PSEUDO_CALL); 9876 9877 case BPF_JA: 9878 if (BPF_SRC(insns[t].code) != BPF_K) 9879 return -EINVAL; 9880 9881 /* unconditional jump with single edge */ 9882 ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env, 9883 true); 9884 if (ret) 9885 return ret; 9886 9887 /* unconditional jmp is not a good pruning point, 9888 * but it's marked, since backtracking needs 9889 * to record jmp history in is_state_visited(). 9890 */ 9891 init_explored_state(env, t + insns[t].off + 1); 9892 /* tell verifier to check for equivalent states 9893 * after every call and jump 9894 */ 9895 if (t + 1 < insn_cnt) 9896 init_explored_state(env, t + 1); 9897 9898 return ret; 9899 9900 default: 9901 /* conditional jump with two edges */ 9902 init_explored_state(env, t); 9903 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 9904 if (ret) 9905 return ret; 9906 9907 return push_insn(t, t + insns[t].off + 1, BRANCH, env, true); 9908 } 9909 } 9910 9911 /* non-recursive depth-first-search to detect loops in BPF program 9912 * loop == back-edge in directed graph 9913 */ 9914 static int check_cfg(struct bpf_verifier_env *env) 9915 { 9916 int insn_cnt = env->prog->len; 9917 int *insn_stack, *insn_state; 9918 int ret = 0; 9919 int i; 9920 9921 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 9922 if (!insn_state) 9923 return -ENOMEM; 9924 9925 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 9926 if (!insn_stack) { 9927 kvfree(insn_state); 9928 return -ENOMEM; 9929 } 9930 9931 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 9932 insn_stack[0] = 0; /* 0 is the first instruction */ 9933 env->cfg.cur_stack = 1; 9934 9935 while (env->cfg.cur_stack > 0) { 9936 int t = insn_stack[env->cfg.cur_stack - 1]; 9937 9938 ret = visit_insn(t, insn_cnt, env); 9939 switch (ret) { 9940 case DONE_EXPLORING: 9941 insn_state[t] = EXPLORED; 9942 env->cfg.cur_stack--; 9943 break; 9944 case KEEP_EXPLORING: 9945 break; 9946 default: 9947 if (ret > 0) { 9948 verbose(env, "visit_insn internal bug\n"); 9949 ret = -EFAULT; 9950 } 9951 goto err_free; 9952 } 9953 } 9954 9955 if (env->cfg.cur_stack < 0) { 9956 verbose(env, "pop stack internal bug\n"); 9957 ret = -EFAULT; 9958 goto err_free; 9959 } 9960 9961 for (i = 0; i < insn_cnt; i++) { 9962 if (insn_state[i] != EXPLORED) { 9963 verbose(env, "unreachable insn %d\n", i); 9964 ret = -EINVAL; 9965 goto err_free; 9966 } 9967 } 9968 ret = 0; /* cfg looks good */ 9969 9970 err_free: 9971 kvfree(insn_state); 9972 kvfree(insn_stack); 9973 env->cfg.insn_state = env->cfg.insn_stack = NULL; 9974 return ret; 9975 } 9976 9977 static int check_abnormal_return(struct bpf_verifier_env *env) 9978 { 9979 int i; 9980 9981 for (i = 1; i < env->subprog_cnt; i++) { 9982 if (env->subprog_info[i].has_ld_abs) { 9983 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); 9984 return -EINVAL; 9985 } 9986 if (env->subprog_info[i].has_tail_call) { 9987 verbose(env, "tail_call is not allowed in subprogs without BTF\n"); 9988 return -EINVAL; 9989 } 9990 } 9991 return 0; 9992 } 9993 9994 /* The minimum supported BTF func info size */ 9995 #define MIN_BPF_FUNCINFO_SIZE 8 9996 #define MAX_FUNCINFO_REC_SIZE 252 9997 9998 static int check_btf_func(struct bpf_verifier_env *env, 9999 const union bpf_attr *attr, 10000 bpfptr_t uattr) 10001 { 10002 const struct btf_type *type, *func_proto, *ret_type; 10003 u32 i, nfuncs, urec_size, min_size; 10004 u32 krec_size = sizeof(struct bpf_func_info); 10005 struct bpf_func_info *krecord; 10006 struct bpf_func_info_aux *info_aux = NULL; 10007 struct bpf_prog *prog; 10008 const struct btf *btf; 10009 bpfptr_t urecord; 10010 u32 prev_offset = 0; 10011 bool scalar_return; 10012 int ret = -ENOMEM; 10013 10014 nfuncs = attr->func_info_cnt; 10015 if (!nfuncs) { 10016 if (check_abnormal_return(env)) 10017 return -EINVAL; 10018 return 0; 10019 } 10020 10021 if (nfuncs != env->subprog_cnt) { 10022 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 10023 return -EINVAL; 10024 } 10025 10026 urec_size = attr->func_info_rec_size; 10027 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 10028 urec_size > MAX_FUNCINFO_REC_SIZE || 10029 urec_size % sizeof(u32)) { 10030 verbose(env, "invalid func info rec size %u\n", urec_size); 10031 return -EINVAL; 10032 } 10033 10034 prog = env->prog; 10035 btf = prog->aux->btf; 10036 10037 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); 10038 min_size = min_t(u32, krec_size, urec_size); 10039 10040 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 10041 if (!krecord) 10042 return -ENOMEM; 10043 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 10044 if (!info_aux) 10045 goto err_free; 10046 10047 for (i = 0; i < nfuncs; i++) { 10048 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 10049 if (ret) { 10050 if (ret == -E2BIG) { 10051 verbose(env, "nonzero tailing record in func info"); 10052 /* set the size kernel expects so loader can zero 10053 * out the rest of the record. 10054 */ 10055 if (copy_to_bpfptr_offset(uattr, 10056 offsetof(union bpf_attr, func_info_rec_size), 10057 &min_size, sizeof(min_size))) 10058 ret = -EFAULT; 10059 } 10060 goto err_free; 10061 } 10062 10063 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) { 10064 ret = -EFAULT; 10065 goto err_free; 10066 } 10067 10068 /* check insn_off */ 10069 ret = -EINVAL; 10070 if (i == 0) { 10071 if (krecord[i].insn_off) { 10072 verbose(env, 10073 "nonzero insn_off %u for the first func info record", 10074 krecord[i].insn_off); 10075 goto err_free; 10076 } 10077 } else if (krecord[i].insn_off <= prev_offset) { 10078 verbose(env, 10079 "same or smaller insn offset (%u) than previous func info record (%u)", 10080 krecord[i].insn_off, prev_offset); 10081 goto err_free; 10082 } 10083 10084 if (env->subprog_info[i].start != krecord[i].insn_off) { 10085 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 10086 goto err_free; 10087 } 10088 10089 /* check type_id */ 10090 type = btf_type_by_id(btf, krecord[i].type_id); 10091 if (!type || !btf_type_is_func(type)) { 10092 verbose(env, "invalid type id %d in func info", 10093 krecord[i].type_id); 10094 goto err_free; 10095 } 10096 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 10097 10098 func_proto = btf_type_by_id(btf, type->type); 10099 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto))) 10100 /* btf_func_check() already verified it during BTF load */ 10101 goto err_free; 10102 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); 10103 scalar_return = 10104 btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type); 10105 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { 10106 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); 10107 goto err_free; 10108 } 10109 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { 10110 verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); 10111 goto err_free; 10112 } 10113 10114 prev_offset = krecord[i].insn_off; 10115 bpfptr_add(&urecord, urec_size); 10116 } 10117 10118 prog->aux->func_info = krecord; 10119 prog->aux->func_info_cnt = nfuncs; 10120 prog->aux->func_info_aux = info_aux; 10121 return 0; 10122 10123 err_free: 10124 kvfree(krecord); 10125 kfree(info_aux); 10126 return ret; 10127 } 10128 10129 static void adjust_btf_func(struct bpf_verifier_env *env) 10130 { 10131 struct bpf_prog_aux *aux = env->prog->aux; 10132 int i; 10133 10134 if (!aux->func_info) 10135 return; 10136 10137 for (i = 0; i < env->subprog_cnt; i++) 10138 aux->func_info[i].insn_off = env->subprog_info[i].start; 10139 } 10140 10141 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ 10142 sizeof(((struct bpf_line_info *)(0))->line_col)) 10143 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 10144 10145 static int check_btf_line(struct bpf_verifier_env *env, 10146 const union bpf_attr *attr, 10147 bpfptr_t uattr) 10148 { 10149 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 10150 struct bpf_subprog_info *sub; 10151 struct bpf_line_info *linfo; 10152 struct bpf_prog *prog; 10153 const struct btf *btf; 10154 bpfptr_t ulinfo; 10155 int err; 10156 10157 nr_linfo = attr->line_info_cnt; 10158 if (!nr_linfo) 10159 return 0; 10160 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) 10161 return -EINVAL; 10162 10163 rec_size = attr->line_info_rec_size; 10164 if (rec_size < MIN_BPF_LINEINFO_SIZE || 10165 rec_size > MAX_LINEINFO_REC_SIZE || 10166 rec_size & (sizeof(u32) - 1)) 10167 return -EINVAL; 10168 10169 /* Need to zero it in case the userspace may 10170 * pass in a smaller bpf_line_info object. 10171 */ 10172 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 10173 GFP_KERNEL | __GFP_NOWARN); 10174 if (!linfo) 10175 return -ENOMEM; 10176 10177 prog = env->prog; 10178 btf = prog->aux->btf; 10179 10180 s = 0; 10181 sub = env->subprog_info; 10182 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); 10183 expected_size = sizeof(struct bpf_line_info); 10184 ncopy = min_t(u32, expected_size, rec_size); 10185 for (i = 0; i < nr_linfo; i++) { 10186 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 10187 if (err) { 10188 if (err == -E2BIG) { 10189 verbose(env, "nonzero tailing record in line_info"); 10190 if (copy_to_bpfptr_offset(uattr, 10191 offsetof(union bpf_attr, line_info_rec_size), 10192 &expected_size, sizeof(expected_size))) 10193 err = -EFAULT; 10194 } 10195 goto err_free; 10196 } 10197 10198 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) { 10199 err = -EFAULT; 10200 goto err_free; 10201 } 10202 10203 /* 10204 * Check insn_off to ensure 10205 * 1) strictly increasing AND 10206 * 2) bounded by prog->len 10207 * 10208 * The linfo[0].insn_off == 0 check logically falls into 10209 * the later "missing bpf_line_info for func..." case 10210 * because the first linfo[0].insn_off must be the 10211 * first sub also and the first sub must have 10212 * subprog_info[0].start == 0. 10213 */ 10214 if ((i && linfo[i].insn_off <= prev_offset) || 10215 linfo[i].insn_off >= prog->len) { 10216 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 10217 i, linfo[i].insn_off, prev_offset, 10218 prog->len); 10219 err = -EINVAL; 10220 goto err_free; 10221 } 10222 10223 if (!prog->insnsi[linfo[i].insn_off].code) { 10224 verbose(env, 10225 "Invalid insn code at line_info[%u].insn_off\n", 10226 i); 10227 err = -EINVAL; 10228 goto err_free; 10229 } 10230 10231 if (!btf_name_by_offset(btf, linfo[i].line_off) || 10232 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 10233 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 10234 err = -EINVAL; 10235 goto err_free; 10236 } 10237 10238 if (s != env->subprog_cnt) { 10239 if (linfo[i].insn_off == sub[s].start) { 10240 sub[s].linfo_idx = i; 10241 s++; 10242 } else if (sub[s].start < linfo[i].insn_off) { 10243 verbose(env, "missing bpf_line_info for func#%u\n", s); 10244 err = -EINVAL; 10245 goto err_free; 10246 } 10247 } 10248 10249 prev_offset = linfo[i].insn_off; 10250 bpfptr_add(&ulinfo, rec_size); 10251 } 10252 10253 if (s != env->subprog_cnt) { 10254 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 10255 env->subprog_cnt - s, s); 10256 err = -EINVAL; 10257 goto err_free; 10258 } 10259 10260 prog->aux->linfo = linfo; 10261 prog->aux->nr_linfo = nr_linfo; 10262 10263 return 0; 10264 10265 err_free: 10266 kvfree(linfo); 10267 return err; 10268 } 10269 10270 static int check_btf_info(struct bpf_verifier_env *env, 10271 const union bpf_attr *attr, 10272 bpfptr_t uattr) 10273 { 10274 struct btf *btf; 10275 int err; 10276 10277 if (!attr->func_info_cnt && !attr->line_info_cnt) { 10278 if (check_abnormal_return(env)) 10279 return -EINVAL; 10280 return 0; 10281 } 10282 10283 btf = btf_get_by_fd(attr->prog_btf_fd); 10284 if (IS_ERR(btf)) 10285 return PTR_ERR(btf); 10286 if (btf_is_kernel(btf)) { 10287 btf_put(btf); 10288 return -EACCES; 10289 } 10290 env->prog->aux->btf = btf; 10291 10292 err = check_btf_func(env, attr, uattr); 10293 if (err) 10294 return err; 10295 10296 err = check_btf_line(env, attr, uattr); 10297 if (err) 10298 return err; 10299 10300 return 0; 10301 } 10302 10303 /* check %cur's range satisfies %old's */ 10304 static bool range_within(struct bpf_reg_state *old, 10305 struct bpf_reg_state *cur) 10306 { 10307 return old->umin_value <= cur->umin_value && 10308 old->umax_value >= cur->umax_value && 10309 old->smin_value <= cur->smin_value && 10310 old->smax_value >= cur->smax_value && 10311 old->u32_min_value <= cur->u32_min_value && 10312 old->u32_max_value >= cur->u32_max_value && 10313 old->s32_min_value <= cur->s32_min_value && 10314 old->s32_max_value >= cur->s32_max_value; 10315 } 10316 10317 /* If in the old state two registers had the same id, then they need to have 10318 * the same id in the new state as well. But that id could be different from 10319 * the old state, so we need to track the mapping from old to new ids. 10320 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 10321 * regs with old id 5 must also have new id 9 for the new state to be safe. But 10322 * regs with a different old id could still have new id 9, we don't care about 10323 * that. 10324 * So we look through our idmap to see if this old id has been seen before. If 10325 * so, we require the new id to match; otherwise, we add the id pair to the map. 10326 */ 10327 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap) 10328 { 10329 unsigned int i; 10330 10331 for (i = 0; i < BPF_ID_MAP_SIZE; i++) { 10332 if (!idmap[i].old) { 10333 /* Reached an empty slot; haven't seen this id before */ 10334 idmap[i].old = old_id; 10335 idmap[i].cur = cur_id; 10336 return true; 10337 } 10338 if (idmap[i].old == old_id) 10339 return idmap[i].cur == cur_id; 10340 } 10341 /* We ran out of idmap slots, which should be impossible */ 10342 WARN_ON_ONCE(1); 10343 return false; 10344 } 10345 10346 static void clean_func_state(struct bpf_verifier_env *env, 10347 struct bpf_func_state *st) 10348 { 10349 enum bpf_reg_liveness live; 10350 int i, j; 10351 10352 for (i = 0; i < BPF_REG_FP; i++) { 10353 live = st->regs[i].live; 10354 /* liveness must not touch this register anymore */ 10355 st->regs[i].live |= REG_LIVE_DONE; 10356 if (!(live & REG_LIVE_READ)) 10357 /* since the register is unused, clear its state 10358 * to make further comparison simpler 10359 */ 10360 __mark_reg_not_init(env, &st->regs[i]); 10361 } 10362 10363 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 10364 live = st->stack[i].spilled_ptr.live; 10365 /* liveness must not touch this stack slot anymore */ 10366 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 10367 if (!(live & REG_LIVE_READ)) { 10368 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 10369 for (j = 0; j < BPF_REG_SIZE; j++) 10370 st->stack[i].slot_type[j] = STACK_INVALID; 10371 } 10372 } 10373 } 10374 10375 static void clean_verifier_state(struct bpf_verifier_env *env, 10376 struct bpf_verifier_state *st) 10377 { 10378 int i; 10379 10380 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 10381 /* all regs in this state in all frames were already marked */ 10382 return; 10383 10384 for (i = 0; i <= st->curframe; i++) 10385 clean_func_state(env, st->frame[i]); 10386 } 10387 10388 /* the parentage chains form a tree. 10389 * the verifier states are added to state lists at given insn and 10390 * pushed into state stack for future exploration. 10391 * when the verifier reaches bpf_exit insn some of the verifer states 10392 * stored in the state lists have their final liveness state already, 10393 * but a lot of states will get revised from liveness point of view when 10394 * the verifier explores other branches. 10395 * Example: 10396 * 1: r0 = 1 10397 * 2: if r1 == 100 goto pc+1 10398 * 3: r0 = 2 10399 * 4: exit 10400 * when the verifier reaches exit insn the register r0 in the state list of 10401 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 10402 * of insn 2 and goes exploring further. At the insn 4 it will walk the 10403 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 10404 * 10405 * Since the verifier pushes the branch states as it sees them while exploring 10406 * the program the condition of walking the branch instruction for the second 10407 * time means that all states below this branch were already explored and 10408 * their final liveness marks are already propagated. 10409 * Hence when the verifier completes the search of state list in is_state_visited() 10410 * we can call this clean_live_states() function to mark all liveness states 10411 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 10412 * will not be used. 10413 * This function also clears the registers and stack for states that !READ 10414 * to simplify state merging. 10415 * 10416 * Important note here that walking the same branch instruction in the callee 10417 * doesn't meant that the states are DONE. The verifier has to compare 10418 * the callsites 10419 */ 10420 static void clean_live_states(struct bpf_verifier_env *env, int insn, 10421 struct bpf_verifier_state *cur) 10422 { 10423 struct bpf_verifier_state_list *sl; 10424 int i; 10425 10426 sl = *explored_state(env, insn); 10427 while (sl) { 10428 if (sl->state.branches) 10429 goto next; 10430 if (sl->state.insn_idx != insn || 10431 sl->state.curframe != cur->curframe) 10432 goto next; 10433 for (i = 0; i <= cur->curframe; i++) 10434 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 10435 goto next; 10436 clean_verifier_state(env, &sl->state); 10437 next: 10438 sl = sl->next; 10439 } 10440 } 10441 10442 /* Returns true if (rold safe implies rcur safe) */ 10443 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, 10444 struct bpf_reg_state *rcur, struct bpf_id_pair *idmap) 10445 { 10446 bool equal; 10447 10448 if (!(rold->live & REG_LIVE_READ)) 10449 /* explored state didn't use this */ 10450 return true; 10451 10452 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; 10453 10454 if (rold->type == PTR_TO_STACK) 10455 /* two stack pointers are equal only if they're pointing to 10456 * the same stack frame, since fp-8 in foo != fp-8 in bar 10457 */ 10458 return equal && rold->frameno == rcur->frameno; 10459 10460 if (equal) 10461 return true; 10462 10463 if (rold->type == NOT_INIT) 10464 /* explored state can't have used this */ 10465 return true; 10466 if (rcur->type == NOT_INIT) 10467 return false; 10468 switch (rold->type) { 10469 case SCALAR_VALUE: 10470 if (env->explore_alu_limits) 10471 return false; 10472 if (rcur->type == SCALAR_VALUE) { 10473 if (!rold->precise && !rcur->precise) 10474 return true; 10475 /* new val must satisfy old val knowledge */ 10476 return range_within(rold, rcur) && 10477 tnum_in(rold->var_off, rcur->var_off); 10478 } else { 10479 /* We're trying to use a pointer in place of a scalar. 10480 * Even if the scalar was unbounded, this could lead to 10481 * pointer leaks because scalars are allowed to leak 10482 * while pointers are not. We could make this safe in 10483 * special cases if root is calling us, but it's 10484 * probably not worth the hassle. 10485 */ 10486 return false; 10487 } 10488 case PTR_TO_MAP_KEY: 10489 case PTR_TO_MAP_VALUE: 10490 /* If the new min/max/var_off satisfy the old ones and 10491 * everything else matches, we are OK. 10492 * 'id' is not compared, since it's only used for maps with 10493 * bpf_spin_lock inside map element and in such cases if 10494 * the rest of the prog is valid for one map element then 10495 * it's valid for all map elements regardless of the key 10496 * used in bpf_map_lookup() 10497 */ 10498 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 10499 range_within(rold, rcur) && 10500 tnum_in(rold->var_off, rcur->var_off); 10501 case PTR_TO_MAP_VALUE_OR_NULL: 10502 /* a PTR_TO_MAP_VALUE could be safe to use as a 10503 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 10504 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 10505 * checked, doing so could have affected others with the same 10506 * id, and we can't check for that because we lost the id when 10507 * we converted to a PTR_TO_MAP_VALUE. 10508 */ 10509 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 10510 return false; 10511 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 10512 return false; 10513 /* Check our ids match any regs they're supposed to */ 10514 return check_ids(rold->id, rcur->id, idmap); 10515 case PTR_TO_PACKET_META: 10516 case PTR_TO_PACKET: 10517 if (rcur->type != rold->type) 10518 return false; 10519 /* We must have at least as much range as the old ptr 10520 * did, so that any accesses which were safe before are 10521 * still safe. This is true even if old range < old off, 10522 * since someone could have accessed through (ptr - k), or 10523 * even done ptr -= k in a register, to get a safe access. 10524 */ 10525 if (rold->range > rcur->range) 10526 return false; 10527 /* If the offsets don't match, we can't trust our alignment; 10528 * nor can we be sure that we won't fall out of range. 10529 */ 10530 if (rold->off != rcur->off) 10531 return false; 10532 /* id relations must be preserved */ 10533 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 10534 return false; 10535 /* new val must satisfy old val knowledge */ 10536 return range_within(rold, rcur) && 10537 tnum_in(rold->var_off, rcur->var_off); 10538 case PTR_TO_CTX: 10539 case CONST_PTR_TO_MAP: 10540 case PTR_TO_PACKET_END: 10541 case PTR_TO_FLOW_KEYS: 10542 case PTR_TO_SOCKET: 10543 case PTR_TO_SOCKET_OR_NULL: 10544 case PTR_TO_SOCK_COMMON: 10545 case PTR_TO_SOCK_COMMON_OR_NULL: 10546 case PTR_TO_TCP_SOCK: 10547 case PTR_TO_TCP_SOCK_OR_NULL: 10548 case PTR_TO_XDP_SOCK: 10549 /* Only valid matches are exact, which memcmp() above 10550 * would have accepted 10551 */ 10552 default: 10553 /* Don't know what's going on, just say it's not safe */ 10554 return false; 10555 } 10556 10557 /* Shouldn't get here; if we do, say it's not safe */ 10558 WARN_ON_ONCE(1); 10559 return false; 10560 } 10561 10562 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, 10563 struct bpf_func_state *cur, struct bpf_id_pair *idmap) 10564 { 10565 int i, spi; 10566 10567 /* walk slots of the explored stack and ignore any additional 10568 * slots in the current stack, since explored(safe) state 10569 * didn't use them 10570 */ 10571 for (i = 0; i < old->allocated_stack; i++) { 10572 spi = i / BPF_REG_SIZE; 10573 10574 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 10575 i += BPF_REG_SIZE - 1; 10576 /* explored state didn't use this */ 10577 continue; 10578 } 10579 10580 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 10581 continue; 10582 10583 /* explored stack has more populated slots than current stack 10584 * and these slots were used 10585 */ 10586 if (i >= cur->allocated_stack) 10587 return false; 10588 10589 /* if old state was safe with misc data in the stack 10590 * it will be safe with zero-initialized stack. 10591 * The opposite is not true 10592 */ 10593 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 10594 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 10595 continue; 10596 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 10597 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 10598 /* Ex: old explored (safe) state has STACK_SPILL in 10599 * this stack slot, but current has STACK_MISC -> 10600 * this verifier states are not equivalent, 10601 * return false to continue verification of this path 10602 */ 10603 return false; 10604 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) 10605 continue; 10606 if (!is_spilled_reg(&old->stack[spi])) 10607 continue; 10608 if (!regsafe(env, &old->stack[spi].spilled_ptr, 10609 &cur->stack[spi].spilled_ptr, idmap)) 10610 /* when explored and current stack slot are both storing 10611 * spilled registers, check that stored pointers types 10612 * are the same as well. 10613 * Ex: explored safe path could have stored 10614 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 10615 * but current path has stored: 10616 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 10617 * such verifier states are not equivalent. 10618 * return false to continue verification of this path 10619 */ 10620 return false; 10621 } 10622 return true; 10623 } 10624 10625 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) 10626 { 10627 if (old->acquired_refs != cur->acquired_refs) 10628 return false; 10629 return !memcmp(old->refs, cur->refs, 10630 sizeof(*old->refs) * old->acquired_refs); 10631 } 10632 10633 /* compare two verifier states 10634 * 10635 * all states stored in state_list are known to be valid, since 10636 * verifier reached 'bpf_exit' instruction through them 10637 * 10638 * this function is called when verifier exploring different branches of 10639 * execution popped from the state stack. If it sees an old state that has 10640 * more strict register state and more strict stack state then this execution 10641 * branch doesn't need to be explored further, since verifier already 10642 * concluded that more strict state leads to valid finish. 10643 * 10644 * Therefore two states are equivalent if register state is more conservative 10645 * and explored stack state is more conservative than the current one. 10646 * Example: 10647 * explored current 10648 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 10649 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 10650 * 10651 * In other words if current stack state (one being explored) has more 10652 * valid slots than old one that already passed validation, it means 10653 * the verifier can stop exploring and conclude that current state is valid too 10654 * 10655 * Similarly with registers. If explored state has register type as invalid 10656 * whereas register type in current state is meaningful, it means that 10657 * the current state will reach 'bpf_exit' instruction safely 10658 */ 10659 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, 10660 struct bpf_func_state *cur) 10661 { 10662 int i; 10663 10664 memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); 10665 for (i = 0; i < MAX_BPF_REG; i++) 10666 if (!regsafe(env, &old->regs[i], &cur->regs[i], 10667 env->idmap_scratch)) 10668 return false; 10669 10670 if (!stacksafe(env, old, cur, env->idmap_scratch)) 10671 return false; 10672 10673 if (!refsafe(old, cur)) 10674 return false; 10675 10676 return true; 10677 } 10678 10679 static bool states_equal(struct bpf_verifier_env *env, 10680 struct bpf_verifier_state *old, 10681 struct bpf_verifier_state *cur) 10682 { 10683 int i; 10684 10685 if (old->curframe != cur->curframe) 10686 return false; 10687 10688 /* Verification state from speculative execution simulation 10689 * must never prune a non-speculative execution one. 10690 */ 10691 if (old->speculative && !cur->speculative) 10692 return false; 10693 10694 if (old->active_spin_lock != cur->active_spin_lock) 10695 return false; 10696 10697 /* for states to be equal callsites have to be the same 10698 * and all frame states need to be equivalent 10699 */ 10700 for (i = 0; i <= old->curframe; i++) { 10701 if (old->frame[i]->callsite != cur->frame[i]->callsite) 10702 return false; 10703 if (!func_states_equal(env, old->frame[i], cur->frame[i])) 10704 return false; 10705 } 10706 return true; 10707 } 10708 10709 /* Return 0 if no propagation happened. Return negative error code if error 10710 * happened. Otherwise, return the propagated bit. 10711 */ 10712 static int propagate_liveness_reg(struct bpf_verifier_env *env, 10713 struct bpf_reg_state *reg, 10714 struct bpf_reg_state *parent_reg) 10715 { 10716 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 10717 u8 flag = reg->live & REG_LIVE_READ; 10718 int err; 10719 10720 /* When comes here, read flags of PARENT_REG or REG could be any of 10721 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 10722 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 10723 */ 10724 if (parent_flag == REG_LIVE_READ64 || 10725 /* Or if there is no read flag from REG. */ 10726 !flag || 10727 /* Or if the read flag from REG is the same as PARENT_REG. */ 10728 parent_flag == flag) 10729 return 0; 10730 10731 err = mark_reg_read(env, reg, parent_reg, flag); 10732 if (err) 10733 return err; 10734 10735 return flag; 10736 } 10737 10738 /* A write screens off any subsequent reads; but write marks come from the 10739 * straight-line code between a state and its parent. When we arrive at an 10740 * equivalent state (jump target or such) we didn't arrive by the straight-line 10741 * code, so read marks in the state must propagate to the parent regardless 10742 * of the state's write marks. That's what 'parent == state->parent' comparison 10743 * in mark_reg_read() is for. 10744 */ 10745 static int propagate_liveness(struct bpf_verifier_env *env, 10746 const struct bpf_verifier_state *vstate, 10747 struct bpf_verifier_state *vparent) 10748 { 10749 struct bpf_reg_state *state_reg, *parent_reg; 10750 struct bpf_func_state *state, *parent; 10751 int i, frame, err = 0; 10752 10753 if (vparent->curframe != vstate->curframe) { 10754 WARN(1, "propagate_live: parent frame %d current frame %d\n", 10755 vparent->curframe, vstate->curframe); 10756 return -EFAULT; 10757 } 10758 /* Propagate read liveness of registers... */ 10759 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 10760 for (frame = 0; frame <= vstate->curframe; frame++) { 10761 parent = vparent->frame[frame]; 10762 state = vstate->frame[frame]; 10763 parent_reg = parent->regs; 10764 state_reg = state->regs; 10765 /* We don't need to worry about FP liveness, it's read-only */ 10766 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 10767 err = propagate_liveness_reg(env, &state_reg[i], 10768 &parent_reg[i]); 10769 if (err < 0) 10770 return err; 10771 if (err == REG_LIVE_READ64) 10772 mark_insn_zext(env, &parent_reg[i]); 10773 } 10774 10775 /* Propagate stack slots. */ 10776 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 10777 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 10778 parent_reg = &parent->stack[i].spilled_ptr; 10779 state_reg = &state->stack[i].spilled_ptr; 10780 err = propagate_liveness_reg(env, state_reg, 10781 parent_reg); 10782 if (err < 0) 10783 return err; 10784 } 10785 } 10786 return 0; 10787 } 10788 10789 /* find precise scalars in the previous equivalent state and 10790 * propagate them into the current state 10791 */ 10792 static int propagate_precision(struct bpf_verifier_env *env, 10793 const struct bpf_verifier_state *old) 10794 { 10795 struct bpf_reg_state *state_reg; 10796 struct bpf_func_state *state; 10797 int i, err = 0; 10798 10799 state = old->frame[old->curframe]; 10800 state_reg = state->regs; 10801 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 10802 if (state_reg->type != SCALAR_VALUE || 10803 !state_reg->precise) 10804 continue; 10805 if (env->log.level & BPF_LOG_LEVEL2) 10806 verbose(env, "propagating r%d\n", i); 10807 err = mark_chain_precision(env, i); 10808 if (err < 0) 10809 return err; 10810 } 10811 10812 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 10813 if (!is_spilled_reg(&state->stack[i])) 10814 continue; 10815 state_reg = &state->stack[i].spilled_ptr; 10816 if (state_reg->type != SCALAR_VALUE || 10817 !state_reg->precise) 10818 continue; 10819 if (env->log.level & BPF_LOG_LEVEL2) 10820 verbose(env, "propagating fp%d\n", 10821 (-i - 1) * BPF_REG_SIZE); 10822 err = mark_chain_precision_stack(env, i); 10823 if (err < 0) 10824 return err; 10825 } 10826 return 0; 10827 } 10828 10829 static bool states_maybe_looping(struct bpf_verifier_state *old, 10830 struct bpf_verifier_state *cur) 10831 { 10832 struct bpf_func_state *fold, *fcur; 10833 int i, fr = cur->curframe; 10834 10835 if (old->curframe != fr) 10836 return false; 10837 10838 fold = old->frame[fr]; 10839 fcur = cur->frame[fr]; 10840 for (i = 0; i < MAX_BPF_REG; i++) 10841 if (memcmp(&fold->regs[i], &fcur->regs[i], 10842 offsetof(struct bpf_reg_state, parent))) 10843 return false; 10844 return true; 10845 } 10846 10847 10848 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 10849 { 10850 struct bpf_verifier_state_list *new_sl; 10851 struct bpf_verifier_state_list *sl, **pprev; 10852 struct bpf_verifier_state *cur = env->cur_state, *new; 10853 int i, j, err, states_cnt = 0; 10854 bool add_new_state = env->test_state_freq ? true : false; 10855 10856 cur->last_insn_idx = env->prev_insn_idx; 10857 if (!env->insn_aux_data[insn_idx].prune_point) 10858 /* this 'insn_idx' instruction wasn't marked, so we will not 10859 * be doing state search here 10860 */ 10861 return 0; 10862 10863 /* bpf progs typically have pruning point every 4 instructions 10864 * http://vger.kernel.org/bpfconf2019.html#session-1 10865 * Do not add new state for future pruning if the verifier hasn't seen 10866 * at least 2 jumps and at least 8 instructions. 10867 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 10868 * In tests that amounts to up to 50% reduction into total verifier 10869 * memory consumption and 20% verifier time speedup. 10870 */ 10871 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 10872 env->insn_processed - env->prev_insn_processed >= 8) 10873 add_new_state = true; 10874 10875 pprev = explored_state(env, insn_idx); 10876 sl = *pprev; 10877 10878 clean_live_states(env, insn_idx, cur); 10879 10880 while (sl) { 10881 states_cnt++; 10882 if (sl->state.insn_idx != insn_idx) 10883 goto next; 10884 10885 if (sl->state.branches) { 10886 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; 10887 10888 if (frame->in_async_callback_fn && 10889 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { 10890 /* Different async_entry_cnt means that the verifier is 10891 * processing another entry into async callback. 10892 * Seeing the same state is not an indication of infinite 10893 * loop or infinite recursion. 10894 * But finding the same state doesn't mean that it's safe 10895 * to stop processing the current state. The previous state 10896 * hasn't yet reached bpf_exit, since state.branches > 0. 10897 * Checking in_async_callback_fn alone is not enough either. 10898 * Since the verifier still needs to catch infinite loops 10899 * inside async callbacks. 10900 */ 10901 } else if (states_maybe_looping(&sl->state, cur) && 10902 states_equal(env, &sl->state, cur)) { 10903 verbose_linfo(env, insn_idx, "; "); 10904 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 10905 return -EINVAL; 10906 } 10907 /* if the verifier is processing a loop, avoid adding new state 10908 * too often, since different loop iterations have distinct 10909 * states and may not help future pruning. 10910 * This threshold shouldn't be too low to make sure that 10911 * a loop with large bound will be rejected quickly. 10912 * The most abusive loop will be: 10913 * r1 += 1 10914 * if r1 < 1000000 goto pc-2 10915 * 1M insn_procssed limit / 100 == 10k peak states. 10916 * This threshold shouldn't be too high either, since states 10917 * at the end of the loop are likely to be useful in pruning. 10918 */ 10919 if (env->jmps_processed - env->prev_jmps_processed < 20 && 10920 env->insn_processed - env->prev_insn_processed < 100) 10921 add_new_state = false; 10922 goto miss; 10923 } 10924 if (states_equal(env, &sl->state, cur)) { 10925 sl->hit_cnt++; 10926 /* reached equivalent register/stack state, 10927 * prune the search. 10928 * Registers read by the continuation are read by us. 10929 * If we have any write marks in env->cur_state, they 10930 * will prevent corresponding reads in the continuation 10931 * from reaching our parent (an explored_state). Our 10932 * own state will get the read marks recorded, but 10933 * they'll be immediately forgotten as we're pruning 10934 * this state and will pop a new one. 10935 */ 10936 err = propagate_liveness(env, &sl->state, cur); 10937 10938 /* if previous state reached the exit with precision and 10939 * current state is equivalent to it (except precsion marks) 10940 * the precision needs to be propagated back in 10941 * the current state. 10942 */ 10943 err = err ? : push_jmp_history(env, cur); 10944 err = err ? : propagate_precision(env, &sl->state); 10945 if (err) 10946 return err; 10947 return 1; 10948 } 10949 miss: 10950 /* when new state is not going to be added do not increase miss count. 10951 * Otherwise several loop iterations will remove the state 10952 * recorded earlier. The goal of these heuristics is to have 10953 * states from some iterations of the loop (some in the beginning 10954 * and some at the end) to help pruning. 10955 */ 10956 if (add_new_state) 10957 sl->miss_cnt++; 10958 /* heuristic to determine whether this state is beneficial 10959 * to keep checking from state equivalence point of view. 10960 * Higher numbers increase max_states_per_insn and verification time, 10961 * but do not meaningfully decrease insn_processed. 10962 */ 10963 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 10964 /* the state is unlikely to be useful. Remove it to 10965 * speed up verification 10966 */ 10967 *pprev = sl->next; 10968 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 10969 u32 br = sl->state.branches; 10970 10971 WARN_ONCE(br, 10972 "BUG live_done but branches_to_explore %d\n", 10973 br); 10974 free_verifier_state(&sl->state, false); 10975 kfree(sl); 10976 env->peak_states--; 10977 } else { 10978 /* cannot free this state, since parentage chain may 10979 * walk it later. Add it for free_list instead to 10980 * be freed at the end of verification 10981 */ 10982 sl->next = env->free_list; 10983 env->free_list = sl; 10984 } 10985 sl = *pprev; 10986 continue; 10987 } 10988 next: 10989 pprev = &sl->next; 10990 sl = *pprev; 10991 } 10992 10993 if (env->max_states_per_insn < states_cnt) 10994 env->max_states_per_insn = states_cnt; 10995 10996 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 10997 return push_jmp_history(env, cur); 10998 10999 if (!add_new_state) 11000 return push_jmp_history(env, cur); 11001 11002 /* There were no equivalent states, remember the current one. 11003 * Technically the current state is not proven to be safe yet, 11004 * but it will either reach outer most bpf_exit (which means it's safe) 11005 * or it will be rejected. When there are no loops the verifier won't be 11006 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 11007 * again on the way to bpf_exit. 11008 * When looping the sl->state.branches will be > 0 and this state 11009 * will not be considered for equivalence until branches == 0. 11010 */ 11011 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 11012 if (!new_sl) 11013 return -ENOMEM; 11014 env->total_states++; 11015 env->peak_states++; 11016 env->prev_jmps_processed = env->jmps_processed; 11017 env->prev_insn_processed = env->insn_processed; 11018 11019 /* add new state to the head of linked list */ 11020 new = &new_sl->state; 11021 err = copy_verifier_state(new, cur); 11022 if (err) { 11023 free_verifier_state(new, false); 11024 kfree(new_sl); 11025 return err; 11026 } 11027 new->insn_idx = insn_idx; 11028 WARN_ONCE(new->branches != 1, 11029 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 11030 11031 cur->parent = new; 11032 cur->first_insn_idx = insn_idx; 11033 clear_jmp_history(cur); 11034 new_sl->next = *explored_state(env, insn_idx); 11035 *explored_state(env, insn_idx) = new_sl; 11036 /* connect new state to parentage chain. Current frame needs all 11037 * registers connected. Only r6 - r9 of the callers are alive (pushed 11038 * to the stack implicitly by JITs) so in callers' frames connect just 11039 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 11040 * the state of the call instruction (with WRITTEN set), and r0 comes 11041 * from callee with its full parentage chain, anyway. 11042 */ 11043 /* clear write marks in current state: the writes we did are not writes 11044 * our child did, so they don't screen off its reads from us. 11045 * (There are no read marks in current state, because reads always mark 11046 * their parent and current state never has children yet. Only 11047 * explored_states can get read marks.) 11048 */ 11049 for (j = 0; j <= cur->curframe; j++) { 11050 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 11051 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 11052 for (i = 0; i < BPF_REG_FP; i++) 11053 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 11054 } 11055 11056 /* all stack frames are accessible from callee, clear them all */ 11057 for (j = 0; j <= cur->curframe; j++) { 11058 struct bpf_func_state *frame = cur->frame[j]; 11059 struct bpf_func_state *newframe = new->frame[j]; 11060 11061 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 11062 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 11063 frame->stack[i].spilled_ptr.parent = 11064 &newframe->stack[i].spilled_ptr; 11065 } 11066 } 11067 return 0; 11068 } 11069 11070 /* Return true if it's OK to have the same insn return a different type. */ 11071 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 11072 { 11073 switch (type) { 11074 case PTR_TO_CTX: 11075 case PTR_TO_SOCKET: 11076 case PTR_TO_SOCKET_OR_NULL: 11077 case PTR_TO_SOCK_COMMON: 11078 case PTR_TO_SOCK_COMMON_OR_NULL: 11079 case PTR_TO_TCP_SOCK: 11080 case PTR_TO_TCP_SOCK_OR_NULL: 11081 case PTR_TO_XDP_SOCK: 11082 case PTR_TO_BTF_ID: 11083 case PTR_TO_BTF_ID_OR_NULL: 11084 return false; 11085 default: 11086 return true; 11087 } 11088 } 11089 11090 /* If an instruction was previously used with particular pointer types, then we 11091 * need to be careful to avoid cases such as the below, where it may be ok 11092 * for one branch accessing the pointer, but not ok for the other branch: 11093 * 11094 * R1 = sock_ptr 11095 * goto X; 11096 * ... 11097 * R1 = some_other_valid_ptr; 11098 * goto X; 11099 * ... 11100 * R2 = *(u32 *)(R1 + 0); 11101 */ 11102 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 11103 { 11104 return src != prev && (!reg_type_mismatch_ok(src) || 11105 !reg_type_mismatch_ok(prev)); 11106 } 11107 11108 static int do_check(struct bpf_verifier_env *env) 11109 { 11110 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 11111 struct bpf_verifier_state *state = env->cur_state; 11112 struct bpf_insn *insns = env->prog->insnsi; 11113 struct bpf_reg_state *regs; 11114 int insn_cnt = env->prog->len; 11115 bool do_print_state = false; 11116 int prev_insn_idx = -1; 11117 11118 for (;;) { 11119 struct bpf_insn *insn; 11120 u8 class; 11121 int err; 11122 11123 env->prev_insn_idx = prev_insn_idx; 11124 if (env->insn_idx >= insn_cnt) { 11125 verbose(env, "invalid insn idx %d insn_cnt %d\n", 11126 env->insn_idx, insn_cnt); 11127 return -EFAULT; 11128 } 11129 11130 insn = &insns[env->insn_idx]; 11131 class = BPF_CLASS(insn->code); 11132 11133 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 11134 verbose(env, 11135 "BPF program is too large. Processed %d insn\n", 11136 env->insn_processed); 11137 return -E2BIG; 11138 } 11139 11140 err = is_state_visited(env, env->insn_idx); 11141 if (err < 0) 11142 return err; 11143 if (err == 1) { 11144 /* found equivalent state, can prune the search */ 11145 if (env->log.level & BPF_LOG_LEVEL) { 11146 if (do_print_state) 11147 verbose(env, "\nfrom %d to %d%s: safe\n", 11148 env->prev_insn_idx, env->insn_idx, 11149 env->cur_state->speculative ? 11150 " (speculative execution)" : ""); 11151 else 11152 verbose(env, "%d: safe\n", env->insn_idx); 11153 } 11154 goto process_bpf_exit; 11155 } 11156 11157 if (signal_pending(current)) 11158 return -EAGAIN; 11159 11160 if (need_resched()) 11161 cond_resched(); 11162 11163 if (env->log.level & BPF_LOG_LEVEL2 || 11164 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { 11165 if (env->log.level & BPF_LOG_LEVEL2) 11166 verbose(env, "%d:", env->insn_idx); 11167 else 11168 verbose(env, "\nfrom %d to %d%s:", 11169 env->prev_insn_idx, env->insn_idx, 11170 env->cur_state->speculative ? 11171 " (speculative execution)" : ""); 11172 print_verifier_state(env, state->frame[state->curframe]); 11173 do_print_state = false; 11174 } 11175 11176 if (env->log.level & BPF_LOG_LEVEL) { 11177 const struct bpf_insn_cbs cbs = { 11178 .cb_call = disasm_kfunc_name, 11179 .cb_print = verbose, 11180 .private_data = env, 11181 }; 11182 11183 verbose_linfo(env, env->insn_idx, "; "); 11184 verbose(env, "%d: ", env->insn_idx); 11185 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 11186 } 11187 11188 if (bpf_prog_is_dev_bound(env->prog->aux)) { 11189 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 11190 env->prev_insn_idx); 11191 if (err) 11192 return err; 11193 } 11194 11195 regs = cur_regs(env); 11196 sanitize_mark_insn_seen(env); 11197 prev_insn_idx = env->insn_idx; 11198 11199 if (class == BPF_ALU || class == BPF_ALU64) { 11200 err = check_alu_op(env, insn); 11201 if (err) 11202 return err; 11203 11204 } else if (class == BPF_LDX) { 11205 enum bpf_reg_type *prev_src_type, src_reg_type; 11206 11207 /* check for reserved fields is already done */ 11208 11209 /* check src operand */ 11210 err = check_reg_arg(env, insn->src_reg, SRC_OP); 11211 if (err) 11212 return err; 11213 11214 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 11215 if (err) 11216 return err; 11217 11218 src_reg_type = regs[insn->src_reg].type; 11219 11220 /* check that memory (src_reg + off) is readable, 11221 * the state of dst_reg will be updated by this func 11222 */ 11223 err = check_mem_access(env, env->insn_idx, insn->src_reg, 11224 insn->off, BPF_SIZE(insn->code), 11225 BPF_READ, insn->dst_reg, false); 11226 if (err) 11227 return err; 11228 11229 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; 11230 11231 if (*prev_src_type == NOT_INIT) { 11232 /* saw a valid insn 11233 * dst_reg = *(u32 *)(src_reg + off) 11234 * save type to validate intersecting paths 11235 */ 11236 *prev_src_type = src_reg_type; 11237 11238 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { 11239 /* ABuser program is trying to use the same insn 11240 * dst_reg = *(u32*) (src_reg + off) 11241 * with different pointer types: 11242 * src_reg == ctx in one branch and 11243 * src_reg == stack|map in some other branch. 11244 * Reject it. 11245 */ 11246 verbose(env, "same insn cannot be used with different pointers\n"); 11247 return -EINVAL; 11248 } 11249 11250 } else if (class == BPF_STX) { 11251 enum bpf_reg_type *prev_dst_type, dst_reg_type; 11252 11253 if (BPF_MODE(insn->code) == BPF_ATOMIC) { 11254 err = check_atomic(env, env->insn_idx, insn); 11255 if (err) 11256 return err; 11257 env->insn_idx++; 11258 continue; 11259 } 11260 11261 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { 11262 verbose(env, "BPF_STX uses reserved fields\n"); 11263 return -EINVAL; 11264 } 11265 11266 /* check src1 operand */ 11267 err = check_reg_arg(env, insn->src_reg, SRC_OP); 11268 if (err) 11269 return err; 11270 /* check src2 operand */ 11271 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 11272 if (err) 11273 return err; 11274 11275 dst_reg_type = regs[insn->dst_reg].type; 11276 11277 /* check that memory (dst_reg + off) is writeable */ 11278 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 11279 insn->off, BPF_SIZE(insn->code), 11280 BPF_WRITE, insn->src_reg, false); 11281 if (err) 11282 return err; 11283 11284 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; 11285 11286 if (*prev_dst_type == NOT_INIT) { 11287 *prev_dst_type = dst_reg_type; 11288 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { 11289 verbose(env, "same insn cannot be used with different pointers\n"); 11290 return -EINVAL; 11291 } 11292 11293 } else if (class == BPF_ST) { 11294 if (BPF_MODE(insn->code) != BPF_MEM || 11295 insn->src_reg != BPF_REG_0) { 11296 verbose(env, "BPF_ST uses reserved fields\n"); 11297 return -EINVAL; 11298 } 11299 /* check src operand */ 11300 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 11301 if (err) 11302 return err; 11303 11304 if (is_ctx_reg(env, insn->dst_reg)) { 11305 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 11306 insn->dst_reg, 11307 reg_type_str[reg_state(env, insn->dst_reg)->type]); 11308 return -EACCES; 11309 } 11310 11311 /* check that memory (dst_reg + off) is writeable */ 11312 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 11313 insn->off, BPF_SIZE(insn->code), 11314 BPF_WRITE, -1, false); 11315 if (err) 11316 return err; 11317 11318 } else if (class == BPF_JMP || class == BPF_JMP32) { 11319 u8 opcode = BPF_OP(insn->code); 11320 11321 env->jmps_processed++; 11322 if (opcode == BPF_CALL) { 11323 if (BPF_SRC(insn->code) != BPF_K || 11324 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL 11325 && insn->off != 0) || 11326 (insn->src_reg != BPF_REG_0 && 11327 insn->src_reg != BPF_PSEUDO_CALL && 11328 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || 11329 insn->dst_reg != BPF_REG_0 || 11330 class == BPF_JMP32) { 11331 verbose(env, "BPF_CALL uses reserved fields\n"); 11332 return -EINVAL; 11333 } 11334 11335 if (env->cur_state->active_spin_lock && 11336 (insn->src_reg == BPF_PSEUDO_CALL || 11337 insn->imm != BPF_FUNC_spin_unlock)) { 11338 verbose(env, "function calls are not allowed while holding a lock\n"); 11339 return -EINVAL; 11340 } 11341 if (insn->src_reg == BPF_PSEUDO_CALL) 11342 err = check_func_call(env, insn, &env->insn_idx); 11343 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) 11344 err = check_kfunc_call(env, insn); 11345 else 11346 err = check_helper_call(env, insn, &env->insn_idx); 11347 if (err) 11348 return err; 11349 } else if (opcode == BPF_JA) { 11350 if (BPF_SRC(insn->code) != BPF_K || 11351 insn->imm != 0 || 11352 insn->src_reg != BPF_REG_0 || 11353 insn->dst_reg != BPF_REG_0 || 11354 class == BPF_JMP32) { 11355 verbose(env, "BPF_JA uses reserved fields\n"); 11356 return -EINVAL; 11357 } 11358 11359 env->insn_idx += insn->off + 1; 11360 continue; 11361 11362 } else if (opcode == BPF_EXIT) { 11363 if (BPF_SRC(insn->code) != BPF_K || 11364 insn->imm != 0 || 11365 insn->src_reg != BPF_REG_0 || 11366 insn->dst_reg != BPF_REG_0 || 11367 class == BPF_JMP32) { 11368 verbose(env, "BPF_EXIT uses reserved fields\n"); 11369 return -EINVAL; 11370 } 11371 11372 if (env->cur_state->active_spin_lock) { 11373 verbose(env, "bpf_spin_unlock is missing\n"); 11374 return -EINVAL; 11375 } 11376 11377 if (state->curframe) { 11378 /* exit from nested function */ 11379 err = prepare_func_exit(env, &env->insn_idx); 11380 if (err) 11381 return err; 11382 do_print_state = true; 11383 continue; 11384 } 11385 11386 err = check_reference_leak(env); 11387 if (err) 11388 return err; 11389 11390 err = check_return_code(env); 11391 if (err) 11392 return err; 11393 process_bpf_exit: 11394 update_branch_counts(env, env->cur_state); 11395 err = pop_stack(env, &prev_insn_idx, 11396 &env->insn_idx, pop_log); 11397 if (err < 0) { 11398 if (err != -ENOENT) 11399 return err; 11400 break; 11401 } else { 11402 do_print_state = true; 11403 continue; 11404 } 11405 } else { 11406 err = check_cond_jmp_op(env, insn, &env->insn_idx); 11407 if (err) 11408 return err; 11409 } 11410 } else if (class == BPF_LD) { 11411 u8 mode = BPF_MODE(insn->code); 11412 11413 if (mode == BPF_ABS || mode == BPF_IND) { 11414 err = check_ld_abs(env, insn); 11415 if (err) 11416 return err; 11417 11418 } else if (mode == BPF_IMM) { 11419 err = check_ld_imm(env, insn); 11420 if (err) 11421 return err; 11422 11423 env->insn_idx++; 11424 sanitize_mark_insn_seen(env); 11425 } else { 11426 verbose(env, "invalid BPF_LD mode\n"); 11427 return -EINVAL; 11428 } 11429 } else { 11430 verbose(env, "unknown insn class %d\n", class); 11431 return -EINVAL; 11432 } 11433 11434 env->insn_idx++; 11435 } 11436 11437 return 0; 11438 } 11439 11440 static int find_btf_percpu_datasec(struct btf *btf) 11441 { 11442 const struct btf_type *t; 11443 const char *tname; 11444 int i, n; 11445 11446 /* 11447 * Both vmlinux and module each have their own ".data..percpu" 11448 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF 11449 * types to look at only module's own BTF types. 11450 */ 11451 n = btf_nr_types(btf); 11452 if (btf_is_module(btf)) 11453 i = btf_nr_types(btf_vmlinux); 11454 else 11455 i = 1; 11456 11457 for(; i < n; i++) { 11458 t = btf_type_by_id(btf, i); 11459 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) 11460 continue; 11461 11462 tname = btf_name_by_offset(btf, t->name_off); 11463 if (!strcmp(tname, ".data..percpu")) 11464 return i; 11465 } 11466 11467 return -ENOENT; 11468 } 11469 11470 /* replace pseudo btf_id with kernel symbol address */ 11471 static int check_pseudo_btf_id(struct bpf_verifier_env *env, 11472 struct bpf_insn *insn, 11473 struct bpf_insn_aux_data *aux) 11474 { 11475 const struct btf_var_secinfo *vsi; 11476 const struct btf_type *datasec; 11477 struct btf_mod_pair *btf_mod; 11478 const struct btf_type *t; 11479 const char *sym_name; 11480 bool percpu = false; 11481 u32 type, id = insn->imm; 11482 struct btf *btf; 11483 s32 datasec_id; 11484 u64 addr; 11485 int i, btf_fd, err; 11486 11487 btf_fd = insn[1].imm; 11488 if (btf_fd) { 11489 btf = btf_get_by_fd(btf_fd); 11490 if (IS_ERR(btf)) { 11491 verbose(env, "invalid module BTF object FD specified.\n"); 11492 return -EINVAL; 11493 } 11494 } else { 11495 if (!btf_vmlinux) { 11496 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); 11497 return -EINVAL; 11498 } 11499 btf = btf_vmlinux; 11500 btf_get(btf); 11501 } 11502 11503 t = btf_type_by_id(btf, id); 11504 if (!t) { 11505 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); 11506 err = -ENOENT; 11507 goto err_put; 11508 } 11509 11510 if (!btf_type_is_var(t)) { 11511 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id); 11512 err = -EINVAL; 11513 goto err_put; 11514 } 11515 11516 sym_name = btf_name_by_offset(btf, t->name_off); 11517 addr = kallsyms_lookup_name(sym_name); 11518 if (!addr) { 11519 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", 11520 sym_name); 11521 err = -ENOENT; 11522 goto err_put; 11523 } 11524 11525 datasec_id = find_btf_percpu_datasec(btf); 11526 if (datasec_id > 0) { 11527 datasec = btf_type_by_id(btf, datasec_id); 11528 for_each_vsi(i, datasec, vsi) { 11529 if (vsi->type == id) { 11530 percpu = true; 11531 break; 11532 } 11533 } 11534 } 11535 11536 insn[0].imm = (u32)addr; 11537 insn[1].imm = addr >> 32; 11538 11539 type = t->type; 11540 t = btf_type_skip_modifiers(btf, type, NULL); 11541 if (percpu) { 11542 aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID; 11543 aux->btf_var.btf = btf; 11544 aux->btf_var.btf_id = type; 11545 } else if (!btf_type_is_struct(t)) { 11546 const struct btf_type *ret; 11547 const char *tname; 11548 u32 tsize; 11549 11550 /* resolve the type size of ksym. */ 11551 ret = btf_resolve_size(btf, t, &tsize); 11552 if (IS_ERR(ret)) { 11553 tname = btf_name_by_offset(btf, t->name_off); 11554 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", 11555 tname, PTR_ERR(ret)); 11556 err = -EINVAL; 11557 goto err_put; 11558 } 11559 aux->btf_var.reg_type = PTR_TO_MEM; 11560 aux->btf_var.mem_size = tsize; 11561 } else { 11562 aux->btf_var.reg_type = PTR_TO_BTF_ID; 11563 aux->btf_var.btf = btf; 11564 aux->btf_var.btf_id = type; 11565 } 11566 11567 /* check whether we recorded this BTF (and maybe module) already */ 11568 for (i = 0; i < env->used_btf_cnt; i++) { 11569 if (env->used_btfs[i].btf == btf) { 11570 btf_put(btf); 11571 return 0; 11572 } 11573 } 11574 11575 if (env->used_btf_cnt >= MAX_USED_BTFS) { 11576 err = -E2BIG; 11577 goto err_put; 11578 } 11579 11580 btf_mod = &env->used_btfs[env->used_btf_cnt]; 11581 btf_mod->btf = btf; 11582 btf_mod->module = NULL; 11583 11584 /* if we reference variables from kernel module, bump its refcount */ 11585 if (btf_is_module(btf)) { 11586 btf_mod->module = btf_try_get_module(btf); 11587 if (!btf_mod->module) { 11588 err = -ENXIO; 11589 goto err_put; 11590 } 11591 } 11592 11593 env->used_btf_cnt++; 11594 11595 return 0; 11596 err_put: 11597 btf_put(btf); 11598 return err; 11599 } 11600 11601 static int check_map_prealloc(struct bpf_map *map) 11602 { 11603 return (map->map_type != BPF_MAP_TYPE_HASH && 11604 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 11605 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 11606 !(map->map_flags & BPF_F_NO_PREALLOC); 11607 } 11608 11609 static bool is_tracing_prog_type(enum bpf_prog_type type) 11610 { 11611 switch (type) { 11612 case BPF_PROG_TYPE_KPROBE: 11613 case BPF_PROG_TYPE_TRACEPOINT: 11614 case BPF_PROG_TYPE_PERF_EVENT: 11615 case BPF_PROG_TYPE_RAW_TRACEPOINT: 11616 return true; 11617 default: 11618 return false; 11619 } 11620 } 11621 11622 static bool is_preallocated_map(struct bpf_map *map) 11623 { 11624 if (!check_map_prealloc(map)) 11625 return false; 11626 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) 11627 return false; 11628 return true; 11629 } 11630 11631 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 11632 struct bpf_map *map, 11633 struct bpf_prog *prog) 11634 11635 { 11636 enum bpf_prog_type prog_type = resolve_prog_type(prog); 11637 /* 11638 * Validate that trace type programs use preallocated hash maps. 11639 * 11640 * For programs attached to PERF events this is mandatory as the 11641 * perf NMI can hit any arbitrary code sequence. 11642 * 11643 * All other trace types using preallocated hash maps are unsafe as 11644 * well because tracepoint or kprobes can be inside locked regions 11645 * of the memory allocator or at a place where a recursion into the 11646 * memory allocator would see inconsistent state. 11647 * 11648 * On RT enabled kernels run-time allocation of all trace type 11649 * programs is strictly prohibited due to lock type constraints. On 11650 * !RT kernels it is allowed for backwards compatibility reasons for 11651 * now, but warnings are emitted so developers are made aware of 11652 * the unsafety and can fix their programs before this is enforced. 11653 */ 11654 if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) { 11655 if (prog_type == BPF_PROG_TYPE_PERF_EVENT) { 11656 verbose(env, "perf_event programs can only use preallocated hash map\n"); 11657 return -EINVAL; 11658 } 11659 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 11660 verbose(env, "trace type programs can only use preallocated hash map\n"); 11661 return -EINVAL; 11662 } 11663 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n"); 11664 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n"); 11665 } 11666 11667 if (map_value_has_spin_lock(map)) { 11668 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { 11669 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); 11670 return -EINVAL; 11671 } 11672 11673 if (is_tracing_prog_type(prog_type)) { 11674 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 11675 return -EINVAL; 11676 } 11677 11678 if (prog->aux->sleepable) { 11679 verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n"); 11680 return -EINVAL; 11681 } 11682 } 11683 11684 if (map_value_has_timer(map)) { 11685 if (is_tracing_prog_type(prog_type)) { 11686 verbose(env, "tracing progs cannot use bpf_timer yet\n"); 11687 return -EINVAL; 11688 } 11689 } 11690 11691 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 11692 !bpf_offload_prog_map_match(prog, map)) { 11693 verbose(env, "offload device mismatch between prog and map\n"); 11694 return -EINVAL; 11695 } 11696 11697 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 11698 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 11699 return -EINVAL; 11700 } 11701 11702 if (prog->aux->sleepable) 11703 switch (map->map_type) { 11704 case BPF_MAP_TYPE_HASH: 11705 case BPF_MAP_TYPE_LRU_HASH: 11706 case BPF_MAP_TYPE_ARRAY: 11707 case BPF_MAP_TYPE_PERCPU_HASH: 11708 case BPF_MAP_TYPE_PERCPU_ARRAY: 11709 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 11710 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 11711 case BPF_MAP_TYPE_HASH_OF_MAPS: 11712 if (!is_preallocated_map(map)) { 11713 verbose(env, 11714 "Sleepable programs can only use preallocated maps\n"); 11715 return -EINVAL; 11716 } 11717 break; 11718 case BPF_MAP_TYPE_RINGBUF: 11719 break; 11720 default: 11721 verbose(env, 11722 "Sleepable programs can only use array, hash, and ringbuf maps\n"); 11723 return -EINVAL; 11724 } 11725 11726 return 0; 11727 } 11728 11729 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 11730 { 11731 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 11732 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 11733 } 11734 11735 /* find and rewrite pseudo imm in ld_imm64 instructions: 11736 * 11737 * 1. if it accesses map FD, replace it with actual map pointer. 11738 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. 11739 * 11740 * NOTE: btf_vmlinux is required for converting pseudo btf_id. 11741 */ 11742 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) 11743 { 11744 struct bpf_insn *insn = env->prog->insnsi; 11745 int insn_cnt = env->prog->len; 11746 int i, j, err; 11747 11748 err = bpf_prog_calc_tag(env->prog); 11749 if (err) 11750 return err; 11751 11752 for (i = 0; i < insn_cnt; i++, insn++) { 11753 if (BPF_CLASS(insn->code) == BPF_LDX && 11754 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 11755 verbose(env, "BPF_LDX uses reserved fields\n"); 11756 return -EINVAL; 11757 } 11758 11759 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 11760 struct bpf_insn_aux_data *aux; 11761 struct bpf_map *map; 11762 struct fd f; 11763 u64 addr; 11764 u32 fd; 11765 11766 if (i == insn_cnt - 1 || insn[1].code != 0 || 11767 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 11768 insn[1].off != 0) { 11769 verbose(env, "invalid bpf_ld_imm64 insn\n"); 11770 return -EINVAL; 11771 } 11772 11773 if (insn[0].src_reg == 0) 11774 /* valid generic load 64-bit imm */ 11775 goto next_insn; 11776 11777 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { 11778 aux = &env->insn_aux_data[i]; 11779 err = check_pseudo_btf_id(env, insn, aux); 11780 if (err) 11781 return err; 11782 goto next_insn; 11783 } 11784 11785 if (insn[0].src_reg == BPF_PSEUDO_FUNC) { 11786 aux = &env->insn_aux_data[i]; 11787 aux->ptr_type = PTR_TO_FUNC; 11788 goto next_insn; 11789 } 11790 11791 /* In final convert_pseudo_ld_imm64() step, this is 11792 * converted into regular 64-bit imm load insn. 11793 */ 11794 switch (insn[0].src_reg) { 11795 case BPF_PSEUDO_MAP_VALUE: 11796 case BPF_PSEUDO_MAP_IDX_VALUE: 11797 break; 11798 case BPF_PSEUDO_MAP_FD: 11799 case BPF_PSEUDO_MAP_IDX: 11800 if (insn[1].imm == 0) 11801 break; 11802 fallthrough; 11803 default: 11804 verbose(env, "unrecognized bpf_ld_imm64 insn\n"); 11805 return -EINVAL; 11806 } 11807 11808 switch (insn[0].src_reg) { 11809 case BPF_PSEUDO_MAP_IDX_VALUE: 11810 case BPF_PSEUDO_MAP_IDX: 11811 if (bpfptr_is_null(env->fd_array)) { 11812 verbose(env, "fd_idx without fd_array is invalid\n"); 11813 return -EPROTO; 11814 } 11815 if (copy_from_bpfptr_offset(&fd, env->fd_array, 11816 insn[0].imm * sizeof(fd), 11817 sizeof(fd))) 11818 return -EFAULT; 11819 break; 11820 default: 11821 fd = insn[0].imm; 11822 break; 11823 } 11824 11825 f = fdget(fd); 11826 map = __bpf_map_get(f); 11827 if (IS_ERR(map)) { 11828 verbose(env, "fd %d is not pointing to valid bpf_map\n", 11829 insn[0].imm); 11830 return PTR_ERR(map); 11831 } 11832 11833 err = check_map_prog_compatibility(env, map, env->prog); 11834 if (err) { 11835 fdput(f); 11836 return err; 11837 } 11838 11839 aux = &env->insn_aux_data[i]; 11840 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || 11841 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { 11842 addr = (unsigned long)map; 11843 } else { 11844 u32 off = insn[1].imm; 11845 11846 if (off >= BPF_MAX_VAR_OFF) { 11847 verbose(env, "direct value offset of %u is not allowed\n", off); 11848 fdput(f); 11849 return -EINVAL; 11850 } 11851 11852 if (!map->ops->map_direct_value_addr) { 11853 verbose(env, "no direct value access support for this map type\n"); 11854 fdput(f); 11855 return -EINVAL; 11856 } 11857 11858 err = map->ops->map_direct_value_addr(map, &addr, off); 11859 if (err) { 11860 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 11861 map->value_size, off); 11862 fdput(f); 11863 return err; 11864 } 11865 11866 aux->map_off = off; 11867 addr += off; 11868 } 11869 11870 insn[0].imm = (u32)addr; 11871 insn[1].imm = addr >> 32; 11872 11873 /* check whether we recorded this map already */ 11874 for (j = 0; j < env->used_map_cnt; j++) { 11875 if (env->used_maps[j] == map) { 11876 aux->map_index = j; 11877 fdput(f); 11878 goto next_insn; 11879 } 11880 } 11881 11882 if (env->used_map_cnt >= MAX_USED_MAPS) { 11883 fdput(f); 11884 return -E2BIG; 11885 } 11886 11887 /* hold the map. If the program is rejected by verifier, 11888 * the map will be released by release_maps() or it 11889 * will be used by the valid program until it's unloaded 11890 * and all maps are released in free_used_maps() 11891 */ 11892 bpf_map_inc(map); 11893 11894 aux->map_index = env->used_map_cnt; 11895 env->used_maps[env->used_map_cnt++] = map; 11896 11897 if (bpf_map_is_cgroup_storage(map) && 11898 bpf_cgroup_storage_assign(env->prog->aux, map)) { 11899 verbose(env, "only one cgroup storage of each type is allowed\n"); 11900 fdput(f); 11901 return -EBUSY; 11902 } 11903 11904 fdput(f); 11905 next_insn: 11906 insn++; 11907 i++; 11908 continue; 11909 } 11910 11911 /* Basic sanity check before we invest more work here. */ 11912 if (!bpf_opcode_in_insntable(insn->code)) { 11913 verbose(env, "unknown opcode %02x\n", insn->code); 11914 return -EINVAL; 11915 } 11916 } 11917 11918 /* now all pseudo BPF_LD_IMM64 instructions load valid 11919 * 'struct bpf_map *' into a register instead of user map_fd. 11920 * These pointers will be used later by verifier to validate map access. 11921 */ 11922 return 0; 11923 } 11924 11925 /* drop refcnt of maps used by the rejected program */ 11926 static void release_maps(struct bpf_verifier_env *env) 11927 { 11928 __bpf_free_used_maps(env->prog->aux, env->used_maps, 11929 env->used_map_cnt); 11930 } 11931 11932 /* drop refcnt of maps used by the rejected program */ 11933 static void release_btfs(struct bpf_verifier_env *env) 11934 { 11935 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, 11936 env->used_btf_cnt); 11937 } 11938 11939 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 11940 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 11941 { 11942 struct bpf_insn *insn = env->prog->insnsi; 11943 int insn_cnt = env->prog->len; 11944 int i; 11945 11946 for (i = 0; i < insn_cnt; i++, insn++) { 11947 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) 11948 continue; 11949 if (insn->src_reg == BPF_PSEUDO_FUNC) 11950 continue; 11951 insn->src_reg = 0; 11952 } 11953 } 11954 11955 /* single env->prog->insni[off] instruction was replaced with the range 11956 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 11957 * [0, off) and [off, end) to new locations, so the patched range stays zero 11958 */ 11959 static void adjust_insn_aux_data(struct bpf_verifier_env *env, 11960 struct bpf_insn_aux_data *new_data, 11961 struct bpf_prog *new_prog, u32 off, u32 cnt) 11962 { 11963 struct bpf_insn_aux_data *old_data = env->insn_aux_data; 11964 struct bpf_insn *insn = new_prog->insnsi; 11965 u32 old_seen = old_data[off].seen; 11966 u32 prog_len; 11967 int i; 11968 11969 /* aux info at OFF always needs adjustment, no matter fast path 11970 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 11971 * original insn at old prog. 11972 */ 11973 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 11974 11975 if (cnt == 1) 11976 return; 11977 prog_len = new_prog->len; 11978 11979 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 11980 memcpy(new_data + off + cnt - 1, old_data + off, 11981 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 11982 for (i = off; i < off + cnt - 1; i++) { 11983 /* Expand insni[off]'s seen count to the patched range. */ 11984 new_data[i].seen = old_seen; 11985 new_data[i].zext_dst = insn_has_def32(env, insn + i); 11986 } 11987 env->insn_aux_data = new_data; 11988 vfree(old_data); 11989 } 11990 11991 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 11992 { 11993 int i; 11994 11995 if (len == 1) 11996 return; 11997 /* NOTE: fake 'exit' subprog should be updated as well. */ 11998 for (i = 0; i <= env->subprog_cnt; i++) { 11999 if (env->subprog_info[i].start <= off) 12000 continue; 12001 env->subprog_info[i].start += len - 1; 12002 } 12003 } 12004 12005 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 12006 { 12007 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 12008 int i, sz = prog->aux->size_poke_tab; 12009 struct bpf_jit_poke_descriptor *desc; 12010 12011 for (i = 0; i < sz; i++) { 12012 desc = &tab[i]; 12013 if (desc->insn_idx <= off) 12014 continue; 12015 desc->insn_idx += len - 1; 12016 } 12017 } 12018 12019 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 12020 const struct bpf_insn *patch, u32 len) 12021 { 12022 struct bpf_prog *new_prog; 12023 struct bpf_insn_aux_data *new_data = NULL; 12024 12025 if (len > 1) { 12026 new_data = vzalloc(array_size(env->prog->len + len - 1, 12027 sizeof(struct bpf_insn_aux_data))); 12028 if (!new_data) 12029 return NULL; 12030 } 12031 12032 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 12033 if (IS_ERR(new_prog)) { 12034 if (PTR_ERR(new_prog) == -ERANGE) 12035 verbose(env, 12036 "insn %d cannot be patched due to 16-bit range\n", 12037 env->insn_aux_data[off].orig_idx); 12038 vfree(new_data); 12039 return NULL; 12040 } 12041 adjust_insn_aux_data(env, new_data, new_prog, off, len); 12042 adjust_subprog_starts(env, off, len); 12043 adjust_poke_descs(new_prog, off, len); 12044 return new_prog; 12045 } 12046 12047 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 12048 u32 off, u32 cnt) 12049 { 12050 int i, j; 12051 12052 /* find first prog starting at or after off (first to remove) */ 12053 for (i = 0; i < env->subprog_cnt; i++) 12054 if (env->subprog_info[i].start >= off) 12055 break; 12056 /* find first prog starting at or after off + cnt (first to stay) */ 12057 for (j = i; j < env->subprog_cnt; j++) 12058 if (env->subprog_info[j].start >= off + cnt) 12059 break; 12060 /* if j doesn't start exactly at off + cnt, we are just removing 12061 * the front of previous prog 12062 */ 12063 if (env->subprog_info[j].start != off + cnt) 12064 j--; 12065 12066 if (j > i) { 12067 struct bpf_prog_aux *aux = env->prog->aux; 12068 int move; 12069 12070 /* move fake 'exit' subprog as well */ 12071 move = env->subprog_cnt + 1 - j; 12072 12073 memmove(env->subprog_info + i, 12074 env->subprog_info + j, 12075 sizeof(*env->subprog_info) * move); 12076 env->subprog_cnt -= j - i; 12077 12078 /* remove func_info */ 12079 if (aux->func_info) { 12080 move = aux->func_info_cnt - j; 12081 12082 memmove(aux->func_info + i, 12083 aux->func_info + j, 12084 sizeof(*aux->func_info) * move); 12085 aux->func_info_cnt -= j - i; 12086 /* func_info->insn_off is set after all code rewrites, 12087 * in adjust_btf_func() - no need to adjust 12088 */ 12089 } 12090 } else { 12091 /* convert i from "first prog to remove" to "first to adjust" */ 12092 if (env->subprog_info[i].start == off) 12093 i++; 12094 } 12095 12096 /* update fake 'exit' subprog as well */ 12097 for (; i <= env->subprog_cnt; i++) 12098 env->subprog_info[i].start -= cnt; 12099 12100 return 0; 12101 } 12102 12103 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 12104 u32 cnt) 12105 { 12106 struct bpf_prog *prog = env->prog; 12107 u32 i, l_off, l_cnt, nr_linfo; 12108 struct bpf_line_info *linfo; 12109 12110 nr_linfo = prog->aux->nr_linfo; 12111 if (!nr_linfo) 12112 return 0; 12113 12114 linfo = prog->aux->linfo; 12115 12116 /* find first line info to remove, count lines to be removed */ 12117 for (i = 0; i < nr_linfo; i++) 12118 if (linfo[i].insn_off >= off) 12119 break; 12120 12121 l_off = i; 12122 l_cnt = 0; 12123 for (; i < nr_linfo; i++) 12124 if (linfo[i].insn_off < off + cnt) 12125 l_cnt++; 12126 else 12127 break; 12128 12129 /* First live insn doesn't match first live linfo, it needs to "inherit" 12130 * last removed linfo. prog is already modified, so prog->len == off 12131 * means no live instructions after (tail of the program was removed). 12132 */ 12133 if (prog->len != off && l_cnt && 12134 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 12135 l_cnt--; 12136 linfo[--i].insn_off = off + cnt; 12137 } 12138 12139 /* remove the line info which refer to the removed instructions */ 12140 if (l_cnt) { 12141 memmove(linfo + l_off, linfo + i, 12142 sizeof(*linfo) * (nr_linfo - i)); 12143 12144 prog->aux->nr_linfo -= l_cnt; 12145 nr_linfo = prog->aux->nr_linfo; 12146 } 12147 12148 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 12149 for (i = l_off; i < nr_linfo; i++) 12150 linfo[i].insn_off -= cnt; 12151 12152 /* fix up all subprogs (incl. 'exit') which start >= off */ 12153 for (i = 0; i <= env->subprog_cnt; i++) 12154 if (env->subprog_info[i].linfo_idx > l_off) { 12155 /* program may have started in the removed region but 12156 * may not be fully removed 12157 */ 12158 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 12159 env->subprog_info[i].linfo_idx -= l_cnt; 12160 else 12161 env->subprog_info[i].linfo_idx = l_off; 12162 } 12163 12164 return 0; 12165 } 12166 12167 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 12168 { 12169 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 12170 unsigned int orig_prog_len = env->prog->len; 12171 int err; 12172 12173 if (bpf_prog_is_dev_bound(env->prog->aux)) 12174 bpf_prog_offload_remove_insns(env, off, cnt); 12175 12176 err = bpf_remove_insns(env->prog, off, cnt); 12177 if (err) 12178 return err; 12179 12180 err = adjust_subprog_starts_after_remove(env, off, cnt); 12181 if (err) 12182 return err; 12183 12184 err = bpf_adj_linfo_after_remove(env, off, cnt); 12185 if (err) 12186 return err; 12187 12188 memmove(aux_data + off, aux_data + off + cnt, 12189 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 12190 12191 return 0; 12192 } 12193 12194 /* The verifier does more data flow analysis than llvm and will not 12195 * explore branches that are dead at run time. Malicious programs can 12196 * have dead code too. Therefore replace all dead at-run-time code 12197 * with 'ja -1'. 12198 * 12199 * Just nops are not optimal, e.g. if they would sit at the end of the 12200 * program and through another bug we would manage to jump there, then 12201 * we'd execute beyond program memory otherwise. Returning exception 12202 * code also wouldn't work since we can have subprogs where the dead 12203 * code could be located. 12204 */ 12205 static void sanitize_dead_code(struct bpf_verifier_env *env) 12206 { 12207 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 12208 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 12209 struct bpf_insn *insn = env->prog->insnsi; 12210 const int insn_cnt = env->prog->len; 12211 int i; 12212 12213 for (i = 0; i < insn_cnt; i++) { 12214 if (aux_data[i].seen) 12215 continue; 12216 memcpy(insn + i, &trap, sizeof(trap)); 12217 aux_data[i].zext_dst = false; 12218 } 12219 } 12220 12221 static bool insn_is_cond_jump(u8 code) 12222 { 12223 u8 op; 12224 12225 if (BPF_CLASS(code) == BPF_JMP32) 12226 return true; 12227 12228 if (BPF_CLASS(code) != BPF_JMP) 12229 return false; 12230 12231 op = BPF_OP(code); 12232 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 12233 } 12234 12235 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 12236 { 12237 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 12238 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 12239 struct bpf_insn *insn = env->prog->insnsi; 12240 const int insn_cnt = env->prog->len; 12241 int i; 12242 12243 for (i = 0; i < insn_cnt; i++, insn++) { 12244 if (!insn_is_cond_jump(insn->code)) 12245 continue; 12246 12247 if (!aux_data[i + 1].seen) 12248 ja.off = insn->off; 12249 else if (!aux_data[i + 1 + insn->off].seen) 12250 ja.off = 0; 12251 else 12252 continue; 12253 12254 if (bpf_prog_is_dev_bound(env->prog->aux)) 12255 bpf_prog_offload_replace_insn(env, i, &ja); 12256 12257 memcpy(insn, &ja, sizeof(ja)); 12258 } 12259 } 12260 12261 static int opt_remove_dead_code(struct bpf_verifier_env *env) 12262 { 12263 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 12264 int insn_cnt = env->prog->len; 12265 int i, err; 12266 12267 for (i = 0; i < insn_cnt; i++) { 12268 int j; 12269 12270 j = 0; 12271 while (i + j < insn_cnt && !aux_data[i + j].seen) 12272 j++; 12273 if (!j) 12274 continue; 12275 12276 err = verifier_remove_insns(env, i, j); 12277 if (err) 12278 return err; 12279 insn_cnt = env->prog->len; 12280 } 12281 12282 return 0; 12283 } 12284 12285 static int opt_remove_nops(struct bpf_verifier_env *env) 12286 { 12287 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 12288 struct bpf_insn *insn = env->prog->insnsi; 12289 int insn_cnt = env->prog->len; 12290 int i, err; 12291 12292 for (i = 0; i < insn_cnt; i++) { 12293 if (memcmp(&insn[i], &ja, sizeof(ja))) 12294 continue; 12295 12296 err = verifier_remove_insns(env, i, 1); 12297 if (err) 12298 return err; 12299 insn_cnt--; 12300 i--; 12301 } 12302 12303 return 0; 12304 } 12305 12306 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 12307 const union bpf_attr *attr) 12308 { 12309 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 12310 struct bpf_insn_aux_data *aux = env->insn_aux_data; 12311 int i, patch_len, delta = 0, len = env->prog->len; 12312 struct bpf_insn *insns = env->prog->insnsi; 12313 struct bpf_prog *new_prog; 12314 bool rnd_hi32; 12315 12316 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 12317 zext_patch[1] = BPF_ZEXT_REG(0); 12318 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 12319 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 12320 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 12321 for (i = 0; i < len; i++) { 12322 int adj_idx = i + delta; 12323 struct bpf_insn insn; 12324 int load_reg; 12325 12326 insn = insns[adj_idx]; 12327 load_reg = insn_def_regno(&insn); 12328 if (!aux[adj_idx].zext_dst) { 12329 u8 code, class; 12330 u32 imm_rnd; 12331 12332 if (!rnd_hi32) 12333 continue; 12334 12335 code = insn.code; 12336 class = BPF_CLASS(code); 12337 if (load_reg == -1) 12338 continue; 12339 12340 /* NOTE: arg "reg" (the fourth one) is only used for 12341 * BPF_STX + SRC_OP, so it is safe to pass NULL 12342 * here. 12343 */ 12344 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { 12345 if (class == BPF_LD && 12346 BPF_MODE(code) == BPF_IMM) 12347 i++; 12348 continue; 12349 } 12350 12351 /* ctx load could be transformed into wider load. */ 12352 if (class == BPF_LDX && 12353 aux[adj_idx].ptr_type == PTR_TO_CTX) 12354 continue; 12355 12356 imm_rnd = get_random_int(); 12357 rnd_hi32_patch[0] = insn; 12358 rnd_hi32_patch[1].imm = imm_rnd; 12359 rnd_hi32_patch[3].dst_reg = load_reg; 12360 patch = rnd_hi32_patch; 12361 patch_len = 4; 12362 goto apply_patch_buffer; 12363 } 12364 12365 /* Add in an zero-extend instruction if a) the JIT has requested 12366 * it or b) it's a CMPXCHG. 12367 * 12368 * The latter is because: BPF_CMPXCHG always loads a value into 12369 * R0, therefore always zero-extends. However some archs' 12370 * equivalent instruction only does this load when the 12371 * comparison is successful. This detail of CMPXCHG is 12372 * orthogonal to the general zero-extension behaviour of the 12373 * CPU, so it's treated independently of bpf_jit_needs_zext. 12374 */ 12375 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) 12376 continue; 12377 12378 if (WARN_ON(load_reg == -1)) { 12379 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); 12380 return -EFAULT; 12381 } 12382 12383 zext_patch[0] = insn; 12384 zext_patch[1].dst_reg = load_reg; 12385 zext_patch[1].src_reg = load_reg; 12386 patch = zext_patch; 12387 patch_len = 2; 12388 apply_patch_buffer: 12389 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 12390 if (!new_prog) 12391 return -ENOMEM; 12392 env->prog = new_prog; 12393 insns = new_prog->insnsi; 12394 aux = env->insn_aux_data; 12395 delta += patch_len - 1; 12396 } 12397 12398 return 0; 12399 } 12400 12401 /* convert load instructions that access fields of a context type into a 12402 * sequence of instructions that access fields of the underlying structure: 12403 * struct __sk_buff -> struct sk_buff 12404 * struct bpf_sock_ops -> struct sock 12405 */ 12406 static int convert_ctx_accesses(struct bpf_verifier_env *env) 12407 { 12408 const struct bpf_verifier_ops *ops = env->ops; 12409 int i, cnt, size, ctx_field_size, delta = 0; 12410 const int insn_cnt = env->prog->len; 12411 struct bpf_insn insn_buf[16], *insn; 12412 u32 target_size, size_default, off; 12413 struct bpf_prog *new_prog; 12414 enum bpf_access_type type; 12415 bool is_narrower_load; 12416 12417 if (ops->gen_prologue || env->seen_direct_write) { 12418 if (!ops->gen_prologue) { 12419 verbose(env, "bpf verifier is misconfigured\n"); 12420 return -EINVAL; 12421 } 12422 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 12423 env->prog); 12424 if (cnt >= ARRAY_SIZE(insn_buf)) { 12425 verbose(env, "bpf verifier is misconfigured\n"); 12426 return -EINVAL; 12427 } else if (cnt) { 12428 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 12429 if (!new_prog) 12430 return -ENOMEM; 12431 12432 env->prog = new_prog; 12433 delta += cnt - 1; 12434 } 12435 } 12436 12437 if (bpf_prog_is_dev_bound(env->prog->aux)) 12438 return 0; 12439 12440 insn = env->prog->insnsi + delta; 12441 12442 for (i = 0; i < insn_cnt; i++, insn++) { 12443 bpf_convert_ctx_access_t convert_ctx_access; 12444 bool ctx_access; 12445 12446 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 12447 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 12448 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 12449 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) { 12450 type = BPF_READ; 12451 ctx_access = true; 12452 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 12453 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 12454 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 12455 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || 12456 insn->code == (BPF_ST | BPF_MEM | BPF_B) || 12457 insn->code == (BPF_ST | BPF_MEM | BPF_H) || 12458 insn->code == (BPF_ST | BPF_MEM | BPF_W) || 12459 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { 12460 type = BPF_WRITE; 12461 ctx_access = BPF_CLASS(insn->code) == BPF_STX; 12462 } else { 12463 continue; 12464 } 12465 12466 if (type == BPF_WRITE && 12467 env->insn_aux_data[i + delta].sanitize_stack_spill) { 12468 struct bpf_insn patch[] = { 12469 *insn, 12470 BPF_ST_NOSPEC(), 12471 }; 12472 12473 cnt = ARRAY_SIZE(patch); 12474 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 12475 if (!new_prog) 12476 return -ENOMEM; 12477 12478 delta += cnt - 1; 12479 env->prog = new_prog; 12480 insn = new_prog->insnsi + i + delta; 12481 continue; 12482 } 12483 12484 if (!ctx_access) 12485 continue; 12486 12487 switch (env->insn_aux_data[i + delta].ptr_type) { 12488 case PTR_TO_CTX: 12489 if (!ops->convert_ctx_access) 12490 continue; 12491 convert_ctx_access = ops->convert_ctx_access; 12492 break; 12493 case PTR_TO_SOCKET: 12494 case PTR_TO_SOCK_COMMON: 12495 convert_ctx_access = bpf_sock_convert_ctx_access; 12496 break; 12497 case PTR_TO_TCP_SOCK: 12498 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 12499 break; 12500 case PTR_TO_XDP_SOCK: 12501 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 12502 break; 12503 case PTR_TO_BTF_ID: 12504 if (type == BPF_READ) { 12505 insn->code = BPF_LDX | BPF_PROBE_MEM | 12506 BPF_SIZE((insn)->code); 12507 env->prog->aux->num_exentries++; 12508 } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) { 12509 verbose(env, "Writes through BTF pointers are not allowed\n"); 12510 return -EINVAL; 12511 } 12512 continue; 12513 default: 12514 continue; 12515 } 12516 12517 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 12518 size = BPF_LDST_BYTES(insn); 12519 12520 /* If the read access is a narrower load of the field, 12521 * convert to a 4/8-byte load, to minimum program type specific 12522 * convert_ctx_access changes. If conversion is successful, 12523 * we will apply proper mask to the result. 12524 */ 12525 is_narrower_load = size < ctx_field_size; 12526 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 12527 off = insn->off; 12528 if (is_narrower_load) { 12529 u8 size_code; 12530 12531 if (type == BPF_WRITE) { 12532 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 12533 return -EINVAL; 12534 } 12535 12536 size_code = BPF_H; 12537 if (ctx_field_size == 4) 12538 size_code = BPF_W; 12539 else if (ctx_field_size == 8) 12540 size_code = BPF_DW; 12541 12542 insn->off = off & ~(size_default - 1); 12543 insn->code = BPF_LDX | BPF_MEM | size_code; 12544 } 12545 12546 target_size = 0; 12547 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 12548 &target_size); 12549 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 12550 (ctx_field_size && !target_size)) { 12551 verbose(env, "bpf verifier is misconfigured\n"); 12552 return -EINVAL; 12553 } 12554 12555 if (is_narrower_load && size < target_size) { 12556 u8 shift = bpf_ctx_narrow_access_offset( 12557 off, size, size_default) * 8; 12558 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { 12559 verbose(env, "bpf verifier narrow ctx load misconfigured\n"); 12560 return -EINVAL; 12561 } 12562 if (ctx_field_size <= 4) { 12563 if (shift) 12564 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 12565 insn->dst_reg, 12566 shift); 12567 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 12568 (1 << size * 8) - 1); 12569 } else { 12570 if (shift) 12571 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 12572 insn->dst_reg, 12573 shift); 12574 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 12575 (1ULL << size * 8) - 1); 12576 } 12577 } 12578 12579 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 12580 if (!new_prog) 12581 return -ENOMEM; 12582 12583 delta += cnt - 1; 12584 12585 /* keep walking new program and skip insns we just inserted */ 12586 env->prog = new_prog; 12587 insn = new_prog->insnsi + i + delta; 12588 } 12589 12590 return 0; 12591 } 12592 12593 static int jit_subprogs(struct bpf_verifier_env *env) 12594 { 12595 struct bpf_prog *prog = env->prog, **func, *tmp; 12596 int i, j, subprog_start, subprog_end = 0, len, subprog; 12597 struct bpf_map *map_ptr; 12598 struct bpf_insn *insn; 12599 void *old_bpf_func; 12600 int err, num_exentries; 12601 12602 if (env->subprog_cnt <= 1) 12603 return 0; 12604 12605 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 12606 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) 12607 continue; 12608 12609 /* Upon error here we cannot fall back to interpreter but 12610 * need a hard reject of the program. Thus -EFAULT is 12611 * propagated in any case. 12612 */ 12613 subprog = find_subprog(env, i + insn->imm + 1); 12614 if (subprog < 0) { 12615 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 12616 i + insn->imm + 1); 12617 return -EFAULT; 12618 } 12619 /* temporarily remember subprog id inside insn instead of 12620 * aux_data, since next loop will split up all insns into funcs 12621 */ 12622 insn->off = subprog; 12623 /* remember original imm in case JIT fails and fallback 12624 * to interpreter will be needed 12625 */ 12626 env->insn_aux_data[i].call_imm = insn->imm; 12627 /* point imm to __bpf_call_base+1 from JITs point of view */ 12628 insn->imm = 1; 12629 if (bpf_pseudo_func(insn)) 12630 /* jit (e.g. x86_64) may emit fewer instructions 12631 * if it learns a u32 imm is the same as a u64 imm. 12632 * Force a non zero here. 12633 */ 12634 insn[1].imm = 1; 12635 } 12636 12637 err = bpf_prog_alloc_jited_linfo(prog); 12638 if (err) 12639 goto out_undo_insn; 12640 12641 err = -ENOMEM; 12642 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 12643 if (!func) 12644 goto out_undo_insn; 12645 12646 for (i = 0; i < env->subprog_cnt; i++) { 12647 subprog_start = subprog_end; 12648 subprog_end = env->subprog_info[i + 1].start; 12649 12650 len = subprog_end - subprog_start; 12651 /* bpf_prog_run() doesn't call subprogs directly, 12652 * hence main prog stats include the runtime of subprogs. 12653 * subprogs don't have IDs and not reachable via prog_get_next_id 12654 * func[i]->stats will never be accessed and stays NULL 12655 */ 12656 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 12657 if (!func[i]) 12658 goto out_free; 12659 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 12660 len * sizeof(struct bpf_insn)); 12661 func[i]->type = prog->type; 12662 func[i]->len = len; 12663 if (bpf_prog_calc_tag(func[i])) 12664 goto out_free; 12665 func[i]->is_func = 1; 12666 func[i]->aux->func_idx = i; 12667 /* Below members will be freed only at prog->aux */ 12668 func[i]->aux->btf = prog->aux->btf; 12669 func[i]->aux->func_info = prog->aux->func_info; 12670 func[i]->aux->poke_tab = prog->aux->poke_tab; 12671 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; 12672 12673 for (j = 0; j < prog->aux->size_poke_tab; j++) { 12674 struct bpf_jit_poke_descriptor *poke; 12675 12676 poke = &prog->aux->poke_tab[j]; 12677 if (poke->insn_idx < subprog_end && 12678 poke->insn_idx >= subprog_start) 12679 poke->aux = func[i]->aux; 12680 } 12681 12682 /* Use bpf_prog_F_tag to indicate functions in stack traces. 12683 * Long term would need debug info to populate names 12684 */ 12685 func[i]->aux->name[0] = 'F'; 12686 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 12687 func[i]->jit_requested = 1; 12688 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; 12689 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; 12690 func[i]->aux->linfo = prog->aux->linfo; 12691 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 12692 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 12693 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 12694 num_exentries = 0; 12695 insn = func[i]->insnsi; 12696 for (j = 0; j < func[i]->len; j++, insn++) { 12697 if (BPF_CLASS(insn->code) == BPF_LDX && 12698 BPF_MODE(insn->code) == BPF_PROBE_MEM) 12699 num_exentries++; 12700 } 12701 func[i]->aux->num_exentries = num_exentries; 12702 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; 12703 func[i] = bpf_int_jit_compile(func[i]); 12704 if (!func[i]->jited) { 12705 err = -ENOTSUPP; 12706 goto out_free; 12707 } 12708 cond_resched(); 12709 } 12710 12711 /* at this point all bpf functions were successfully JITed 12712 * now populate all bpf_calls with correct addresses and 12713 * run last pass of JIT 12714 */ 12715 for (i = 0; i < env->subprog_cnt; i++) { 12716 insn = func[i]->insnsi; 12717 for (j = 0; j < func[i]->len; j++, insn++) { 12718 if (bpf_pseudo_func(insn)) { 12719 subprog = insn->off; 12720 insn[0].imm = (u32)(long)func[subprog]->bpf_func; 12721 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; 12722 continue; 12723 } 12724 if (!bpf_pseudo_call(insn)) 12725 continue; 12726 subprog = insn->off; 12727 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); 12728 } 12729 12730 /* we use the aux data to keep a list of the start addresses 12731 * of the JITed images for each function in the program 12732 * 12733 * for some architectures, such as powerpc64, the imm field 12734 * might not be large enough to hold the offset of the start 12735 * address of the callee's JITed image from __bpf_call_base 12736 * 12737 * in such cases, we can lookup the start address of a callee 12738 * by using its subprog id, available from the off field of 12739 * the call instruction, as an index for this list 12740 */ 12741 func[i]->aux->func = func; 12742 func[i]->aux->func_cnt = env->subprog_cnt; 12743 } 12744 for (i = 0; i < env->subprog_cnt; i++) { 12745 old_bpf_func = func[i]->bpf_func; 12746 tmp = bpf_int_jit_compile(func[i]); 12747 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 12748 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 12749 err = -ENOTSUPP; 12750 goto out_free; 12751 } 12752 cond_resched(); 12753 } 12754 12755 /* finally lock prog and jit images for all functions and 12756 * populate kallsysm 12757 */ 12758 for (i = 0; i < env->subprog_cnt; i++) { 12759 bpf_prog_lock_ro(func[i]); 12760 bpf_prog_kallsyms_add(func[i]); 12761 } 12762 12763 /* Last step: make now unused interpreter insns from main 12764 * prog consistent for later dump requests, so they can 12765 * later look the same as if they were interpreted only. 12766 */ 12767 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 12768 if (bpf_pseudo_func(insn)) { 12769 insn[0].imm = env->insn_aux_data[i].call_imm; 12770 insn[1].imm = insn->off; 12771 insn->off = 0; 12772 continue; 12773 } 12774 if (!bpf_pseudo_call(insn)) 12775 continue; 12776 insn->off = env->insn_aux_data[i].call_imm; 12777 subprog = find_subprog(env, i + insn->off + 1); 12778 insn->imm = subprog; 12779 } 12780 12781 prog->jited = 1; 12782 prog->bpf_func = func[0]->bpf_func; 12783 prog->aux->func = func; 12784 prog->aux->func_cnt = env->subprog_cnt; 12785 bpf_prog_jit_attempt_done(prog); 12786 return 0; 12787 out_free: 12788 /* We failed JIT'ing, so at this point we need to unregister poke 12789 * descriptors from subprogs, so that kernel is not attempting to 12790 * patch it anymore as we're freeing the subprog JIT memory. 12791 */ 12792 for (i = 0; i < prog->aux->size_poke_tab; i++) { 12793 map_ptr = prog->aux->poke_tab[i].tail_call.map; 12794 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); 12795 } 12796 /* At this point we're guaranteed that poke descriptors are not 12797 * live anymore. We can just unlink its descriptor table as it's 12798 * released with the main prog. 12799 */ 12800 for (i = 0; i < env->subprog_cnt; i++) { 12801 if (!func[i]) 12802 continue; 12803 func[i]->aux->poke_tab = NULL; 12804 bpf_jit_free(func[i]); 12805 } 12806 kfree(func); 12807 out_undo_insn: 12808 /* cleanup main prog to be interpreted */ 12809 prog->jit_requested = 0; 12810 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 12811 if (!bpf_pseudo_call(insn)) 12812 continue; 12813 insn->off = 0; 12814 insn->imm = env->insn_aux_data[i].call_imm; 12815 } 12816 bpf_prog_jit_attempt_done(prog); 12817 return err; 12818 } 12819 12820 static int fixup_call_args(struct bpf_verifier_env *env) 12821 { 12822 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 12823 struct bpf_prog *prog = env->prog; 12824 struct bpf_insn *insn = prog->insnsi; 12825 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); 12826 int i, depth; 12827 #endif 12828 int err = 0; 12829 12830 if (env->prog->jit_requested && 12831 !bpf_prog_is_dev_bound(env->prog->aux)) { 12832 err = jit_subprogs(env); 12833 if (err == 0) 12834 return 0; 12835 if (err == -EFAULT) 12836 return err; 12837 } 12838 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 12839 if (has_kfunc_call) { 12840 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); 12841 return -EINVAL; 12842 } 12843 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { 12844 /* When JIT fails the progs with bpf2bpf calls and tail_calls 12845 * have to be rejected, since interpreter doesn't support them yet. 12846 */ 12847 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 12848 return -EINVAL; 12849 } 12850 for (i = 0; i < prog->len; i++, insn++) { 12851 if (bpf_pseudo_func(insn)) { 12852 /* When JIT fails the progs with callback calls 12853 * have to be rejected, since interpreter doesn't support them yet. 12854 */ 12855 verbose(env, "callbacks are not allowed in non-JITed programs\n"); 12856 return -EINVAL; 12857 } 12858 12859 if (!bpf_pseudo_call(insn)) 12860 continue; 12861 depth = get_callee_stack_depth(env, insn, i); 12862 if (depth < 0) 12863 return depth; 12864 bpf_patch_call_args(insn, depth); 12865 } 12866 err = 0; 12867 #endif 12868 return err; 12869 } 12870 12871 static int fixup_kfunc_call(struct bpf_verifier_env *env, 12872 struct bpf_insn *insn) 12873 { 12874 const struct bpf_kfunc_desc *desc; 12875 12876 if (!insn->imm) { 12877 verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); 12878 return -EINVAL; 12879 } 12880 12881 /* insn->imm has the btf func_id. Replace it with 12882 * an address (relative to __bpf_base_call). 12883 */ 12884 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); 12885 if (!desc) { 12886 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", 12887 insn->imm); 12888 return -EFAULT; 12889 } 12890 12891 insn->imm = desc->imm; 12892 12893 return 0; 12894 } 12895 12896 /* Do various post-verification rewrites in a single program pass. 12897 * These rewrites simplify JIT and interpreter implementations. 12898 */ 12899 static int do_misc_fixups(struct bpf_verifier_env *env) 12900 { 12901 struct bpf_prog *prog = env->prog; 12902 bool expect_blinding = bpf_jit_blinding_enabled(prog); 12903 enum bpf_prog_type prog_type = resolve_prog_type(prog); 12904 struct bpf_insn *insn = prog->insnsi; 12905 const struct bpf_func_proto *fn; 12906 const int insn_cnt = prog->len; 12907 const struct bpf_map_ops *ops; 12908 struct bpf_insn_aux_data *aux; 12909 struct bpf_insn insn_buf[16]; 12910 struct bpf_prog *new_prog; 12911 struct bpf_map *map_ptr; 12912 int i, ret, cnt, delta = 0; 12913 12914 for (i = 0; i < insn_cnt; i++, insn++) { 12915 /* Make divide-by-zero exceptions impossible. */ 12916 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 12917 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 12918 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 12919 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 12920 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 12921 bool isdiv = BPF_OP(insn->code) == BPF_DIV; 12922 struct bpf_insn *patchlet; 12923 struct bpf_insn chk_and_div[] = { 12924 /* [R,W]x div 0 -> 0 */ 12925 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 12926 BPF_JNE | BPF_K, insn->src_reg, 12927 0, 2, 0), 12928 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 12929 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 12930 *insn, 12931 }; 12932 struct bpf_insn chk_and_mod[] = { 12933 /* [R,W]x mod 0 -> [R,W]x */ 12934 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 12935 BPF_JEQ | BPF_K, insn->src_reg, 12936 0, 1 + (is64 ? 0 : 1), 0), 12937 *insn, 12938 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 12939 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), 12940 }; 12941 12942 patchlet = isdiv ? chk_and_div : chk_and_mod; 12943 cnt = isdiv ? ARRAY_SIZE(chk_and_div) : 12944 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); 12945 12946 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 12947 if (!new_prog) 12948 return -ENOMEM; 12949 12950 delta += cnt - 1; 12951 env->prog = prog = new_prog; 12952 insn = new_prog->insnsi + i + delta; 12953 continue; 12954 } 12955 12956 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ 12957 if (BPF_CLASS(insn->code) == BPF_LD && 12958 (BPF_MODE(insn->code) == BPF_ABS || 12959 BPF_MODE(insn->code) == BPF_IND)) { 12960 cnt = env->ops->gen_ld_abs(insn, insn_buf); 12961 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 12962 verbose(env, "bpf verifier is misconfigured\n"); 12963 return -EINVAL; 12964 } 12965 12966 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 12967 if (!new_prog) 12968 return -ENOMEM; 12969 12970 delta += cnt - 1; 12971 env->prog = prog = new_prog; 12972 insn = new_prog->insnsi + i + delta; 12973 continue; 12974 } 12975 12976 /* Rewrite pointer arithmetic to mitigate speculation attacks. */ 12977 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 12978 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 12979 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 12980 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 12981 struct bpf_insn *patch = &insn_buf[0]; 12982 bool issrc, isneg, isimm; 12983 u32 off_reg; 12984 12985 aux = &env->insn_aux_data[i + delta]; 12986 if (!aux->alu_state || 12987 aux->alu_state == BPF_ALU_NON_POINTER) 12988 continue; 12989 12990 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 12991 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 12992 BPF_ALU_SANITIZE_SRC; 12993 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; 12994 12995 off_reg = issrc ? insn->src_reg : insn->dst_reg; 12996 if (isimm) { 12997 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 12998 } else { 12999 if (isneg) 13000 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 13001 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 13002 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 13003 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 13004 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 13005 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 13006 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); 13007 } 13008 if (!issrc) 13009 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); 13010 insn->src_reg = BPF_REG_AX; 13011 if (isneg) 13012 insn->code = insn->code == code_add ? 13013 code_sub : code_add; 13014 *patch++ = *insn; 13015 if (issrc && isneg && !isimm) 13016 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 13017 cnt = patch - insn_buf; 13018 13019 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 13020 if (!new_prog) 13021 return -ENOMEM; 13022 13023 delta += cnt - 1; 13024 env->prog = prog = new_prog; 13025 insn = new_prog->insnsi + i + delta; 13026 continue; 13027 } 13028 13029 if (insn->code != (BPF_JMP | BPF_CALL)) 13030 continue; 13031 if (insn->src_reg == BPF_PSEUDO_CALL) 13032 continue; 13033 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 13034 ret = fixup_kfunc_call(env, insn); 13035 if (ret) 13036 return ret; 13037 continue; 13038 } 13039 13040 if (insn->imm == BPF_FUNC_get_route_realm) 13041 prog->dst_needed = 1; 13042 if (insn->imm == BPF_FUNC_get_prandom_u32) 13043 bpf_user_rnd_init_once(); 13044 if (insn->imm == BPF_FUNC_override_return) 13045 prog->kprobe_override = 1; 13046 if (insn->imm == BPF_FUNC_tail_call) { 13047 /* If we tail call into other programs, we 13048 * cannot make any assumptions since they can 13049 * be replaced dynamically during runtime in 13050 * the program array. 13051 */ 13052 prog->cb_access = 1; 13053 if (!allow_tail_call_in_subprogs(env)) 13054 prog->aux->stack_depth = MAX_BPF_STACK; 13055 prog->aux->max_pkt_offset = MAX_PACKET_OFF; 13056 13057 /* mark bpf_tail_call as different opcode to avoid 13058 * conditional branch in the interpreter for every normal 13059 * call and to prevent accidental JITing by JIT compiler 13060 * that doesn't support bpf_tail_call yet 13061 */ 13062 insn->imm = 0; 13063 insn->code = BPF_JMP | BPF_TAIL_CALL; 13064 13065 aux = &env->insn_aux_data[i + delta]; 13066 if (env->bpf_capable && !expect_blinding && 13067 prog->jit_requested && 13068 !bpf_map_key_poisoned(aux) && 13069 !bpf_map_ptr_poisoned(aux) && 13070 !bpf_map_ptr_unpriv(aux)) { 13071 struct bpf_jit_poke_descriptor desc = { 13072 .reason = BPF_POKE_REASON_TAIL_CALL, 13073 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 13074 .tail_call.key = bpf_map_key_immediate(aux), 13075 .insn_idx = i + delta, 13076 }; 13077 13078 ret = bpf_jit_add_poke_descriptor(prog, &desc); 13079 if (ret < 0) { 13080 verbose(env, "adding tail call poke descriptor failed\n"); 13081 return ret; 13082 } 13083 13084 insn->imm = ret + 1; 13085 continue; 13086 } 13087 13088 if (!bpf_map_ptr_unpriv(aux)) 13089 continue; 13090 13091 /* instead of changing every JIT dealing with tail_call 13092 * emit two extra insns: 13093 * if (index >= max_entries) goto out; 13094 * index &= array->index_mask; 13095 * to avoid out-of-bounds cpu speculation 13096 */ 13097 if (bpf_map_ptr_poisoned(aux)) { 13098 verbose(env, "tail_call abusing map_ptr\n"); 13099 return -EINVAL; 13100 } 13101 13102 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 13103 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 13104 map_ptr->max_entries, 2); 13105 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 13106 container_of(map_ptr, 13107 struct bpf_array, 13108 map)->index_mask); 13109 insn_buf[2] = *insn; 13110 cnt = 3; 13111 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 13112 if (!new_prog) 13113 return -ENOMEM; 13114 13115 delta += cnt - 1; 13116 env->prog = prog = new_prog; 13117 insn = new_prog->insnsi + i + delta; 13118 continue; 13119 } 13120 13121 if (insn->imm == BPF_FUNC_timer_set_callback) { 13122 /* The verifier will process callback_fn as many times as necessary 13123 * with different maps and the register states prepared by 13124 * set_timer_callback_state will be accurate. 13125 * 13126 * The following use case is valid: 13127 * map1 is shared by prog1, prog2, prog3. 13128 * prog1 calls bpf_timer_init for some map1 elements 13129 * prog2 calls bpf_timer_set_callback for some map1 elements. 13130 * Those that were not bpf_timer_init-ed will return -EINVAL. 13131 * prog3 calls bpf_timer_start for some map1 elements. 13132 * Those that were not both bpf_timer_init-ed and 13133 * bpf_timer_set_callback-ed will return -EINVAL. 13134 */ 13135 struct bpf_insn ld_addrs[2] = { 13136 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), 13137 }; 13138 13139 insn_buf[0] = ld_addrs[0]; 13140 insn_buf[1] = ld_addrs[1]; 13141 insn_buf[2] = *insn; 13142 cnt = 3; 13143 13144 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 13145 if (!new_prog) 13146 return -ENOMEM; 13147 13148 delta += cnt - 1; 13149 env->prog = prog = new_prog; 13150 insn = new_prog->insnsi + i + delta; 13151 goto patch_call_imm; 13152 } 13153 13154 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 13155 * and other inlining handlers are currently limited to 64 bit 13156 * only. 13157 */ 13158 if (prog->jit_requested && BITS_PER_LONG == 64 && 13159 (insn->imm == BPF_FUNC_map_lookup_elem || 13160 insn->imm == BPF_FUNC_map_update_elem || 13161 insn->imm == BPF_FUNC_map_delete_elem || 13162 insn->imm == BPF_FUNC_map_push_elem || 13163 insn->imm == BPF_FUNC_map_pop_elem || 13164 insn->imm == BPF_FUNC_map_peek_elem || 13165 insn->imm == BPF_FUNC_redirect_map || 13166 insn->imm == BPF_FUNC_for_each_map_elem)) { 13167 aux = &env->insn_aux_data[i + delta]; 13168 if (bpf_map_ptr_poisoned(aux)) 13169 goto patch_call_imm; 13170 13171 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 13172 ops = map_ptr->ops; 13173 if (insn->imm == BPF_FUNC_map_lookup_elem && 13174 ops->map_gen_lookup) { 13175 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 13176 if (cnt == -EOPNOTSUPP) 13177 goto patch_map_ops_generic; 13178 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { 13179 verbose(env, "bpf verifier is misconfigured\n"); 13180 return -EINVAL; 13181 } 13182 13183 new_prog = bpf_patch_insn_data(env, i + delta, 13184 insn_buf, cnt); 13185 if (!new_prog) 13186 return -ENOMEM; 13187 13188 delta += cnt - 1; 13189 env->prog = prog = new_prog; 13190 insn = new_prog->insnsi + i + delta; 13191 continue; 13192 } 13193 13194 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 13195 (void *(*)(struct bpf_map *map, void *key))NULL)); 13196 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 13197 (int (*)(struct bpf_map *map, void *key))NULL)); 13198 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 13199 (int (*)(struct bpf_map *map, void *key, void *value, 13200 u64 flags))NULL)); 13201 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 13202 (int (*)(struct bpf_map *map, void *value, 13203 u64 flags))NULL)); 13204 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 13205 (int (*)(struct bpf_map *map, void *value))NULL)); 13206 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 13207 (int (*)(struct bpf_map *map, void *value))NULL)); 13208 BUILD_BUG_ON(!__same_type(ops->map_redirect, 13209 (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL)); 13210 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, 13211 (int (*)(struct bpf_map *map, 13212 bpf_callback_t callback_fn, 13213 void *callback_ctx, 13214 u64 flags))NULL)); 13215 13216 patch_map_ops_generic: 13217 switch (insn->imm) { 13218 case BPF_FUNC_map_lookup_elem: 13219 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); 13220 continue; 13221 case BPF_FUNC_map_update_elem: 13222 insn->imm = BPF_CALL_IMM(ops->map_update_elem); 13223 continue; 13224 case BPF_FUNC_map_delete_elem: 13225 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); 13226 continue; 13227 case BPF_FUNC_map_push_elem: 13228 insn->imm = BPF_CALL_IMM(ops->map_push_elem); 13229 continue; 13230 case BPF_FUNC_map_pop_elem: 13231 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); 13232 continue; 13233 case BPF_FUNC_map_peek_elem: 13234 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); 13235 continue; 13236 case BPF_FUNC_redirect_map: 13237 insn->imm = BPF_CALL_IMM(ops->map_redirect); 13238 continue; 13239 case BPF_FUNC_for_each_map_elem: 13240 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); 13241 continue; 13242 } 13243 13244 goto patch_call_imm; 13245 } 13246 13247 /* Implement bpf_jiffies64 inline. */ 13248 if (prog->jit_requested && BITS_PER_LONG == 64 && 13249 insn->imm == BPF_FUNC_jiffies64) { 13250 struct bpf_insn ld_jiffies_addr[2] = { 13251 BPF_LD_IMM64(BPF_REG_0, 13252 (unsigned long)&jiffies), 13253 }; 13254 13255 insn_buf[0] = ld_jiffies_addr[0]; 13256 insn_buf[1] = ld_jiffies_addr[1]; 13257 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 13258 BPF_REG_0, 0); 13259 cnt = 3; 13260 13261 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 13262 cnt); 13263 if (!new_prog) 13264 return -ENOMEM; 13265 13266 delta += cnt - 1; 13267 env->prog = prog = new_prog; 13268 insn = new_prog->insnsi + i + delta; 13269 continue; 13270 } 13271 13272 /* Implement bpf_get_func_ip inline. */ 13273 if (prog_type == BPF_PROG_TYPE_TRACING && 13274 insn->imm == BPF_FUNC_get_func_ip) { 13275 /* Load IP address from ctx - 8 */ 13276 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 13277 13278 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 13279 if (!new_prog) 13280 return -ENOMEM; 13281 13282 env->prog = prog = new_prog; 13283 insn = new_prog->insnsi + i + delta; 13284 continue; 13285 } 13286 13287 patch_call_imm: 13288 fn = env->ops->get_func_proto(insn->imm, env->prog); 13289 /* all functions that have prototype and verifier allowed 13290 * programs to call them, must be real in-kernel functions 13291 */ 13292 if (!fn->func) { 13293 verbose(env, 13294 "kernel subsystem misconfigured func %s#%d\n", 13295 func_id_name(insn->imm), insn->imm); 13296 return -EFAULT; 13297 } 13298 insn->imm = fn->func - __bpf_call_base; 13299 } 13300 13301 /* Since poke tab is now finalized, publish aux to tracker. */ 13302 for (i = 0; i < prog->aux->size_poke_tab; i++) { 13303 map_ptr = prog->aux->poke_tab[i].tail_call.map; 13304 if (!map_ptr->ops->map_poke_track || 13305 !map_ptr->ops->map_poke_untrack || 13306 !map_ptr->ops->map_poke_run) { 13307 verbose(env, "bpf verifier is misconfigured\n"); 13308 return -EINVAL; 13309 } 13310 13311 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 13312 if (ret < 0) { 13313 verbose(env, "tracking tail call prog failed\n"); 13314 return ret; 13315 } 13316 } 13317 13318 sort_kfunc_descs_by_imm(env->prog); 13319 13320 return 0; 13321 } 13322 13323 static void free_states(struct bpf_verifier_env *env) 13324 { 13325 struct bpf_verifier_state_list *sl, *sln; 13326 int i; 13327 13328 sl = env->free_list; 13329 while (sl) { 13330 sln = sl->next; 13331 free_verifier_state(&sl->state, false); 13332 kfree(sl); 13333 sl = sln; 13334 } 13335 env->free_list = NULL; 13336 13337 if (!env->explored_states) 13338 return; 13339 13340 for (i = 0; i < state_htab_size(env); i++) { 13341 sl = env->explored_states[i]; 13342 13343 while (sl) { 13344 sln = sl->next; 13345 free_verifier_state(&sl->state, false); 13346 kfree(sl); 13347 sl = sln; 13348 } 13349 env->explored_states[i] = NULL; 13350 } 13351 } 13352 13353 static int do_check_common(struct bpf_verifier_env *env, int subprog) 13354 { 13355 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 13356 struct bpf_verifier_state *state; 13357 struct bpf_reg_state *regs; 13358 int ret, i; 13359 13360 env->prev_linfo = NULL; 13361 env->pass_cnt++; 13362 13363 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 13364 if (!state) 13365 return -ENOMEM; 13366 state->curframe = 0; 13367 state->speculative = false; 13368 state->branches = 1; 13369 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 13370 if (!state->frame[0]) { 13371 kfree(state); 13372 return -ENOMEM; 13373 } 13374 env->cur_state = state; 13375 init_func_state(env, state->frame[0], 13376 BPF_MAIN_FUNC /* callsite */, 13377 0 /* frameno */, 13378 subprog); 13379 13380 regs = state->frame[state->curframe]->regs; 13381 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 13382 ret = btf_prepare_func_args(env, subprog, regs); 13383 if (ret) 13384 goto out; 13385 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 13386 if (regs[i].type == PTR_TO_CTX) 13387 mark_reg_known_zero(env, regs, i); 13388 else if (regs[i].type == SCALAR_VALUE) 13389 mark_reg_unknown(env, regs, i); 13390 else if (regs[i].type == PTR_TO_MEM_OR_NULL) { 13391 const u32 mem_size = regs[i].mem_size; 13392 13393 mark_reg_known_zero(env, regs, i); 13394 regs[i].mem_size = mem_size; 13395 regs[i].id = ++env->id_gen; 13396 } 13397 } 13398 } else { 13399 /* 1st arg to a function */ 13400 regs[BPF_REG_1].type = PTR_TO_CTX; 13401 mark_reg_known_zero(env, regs, BPF_REG_1); 13402 ret = btf_check_subprog_arg_match(env, subprog, regs); 13403 if (ret == -EFAULT) 13404 /* unlikely verifier bug. abort. 13405 * ret == 0 and ret < 0 are sadly acceptable for 13406 * main() function due to backward compatibility. 13407 * Like socket filter program may be written as: 13408 * int bpf_prog(struct pt_regs *ctx) 13409 * and never dereference that ctx in the program. 13410 * 'struct pt_regs' is a type mismatch for socket 13411 * filter that should be using 'struct __sk_buff'. 13412 */ 13413 goto out; 13414 } 13415 13416 ret = do_check(env); 13417 out: 13418 /* check for NULL is necessary, since cur_state can be freed inside 13419 * do_check() under memory pressure. 13420 */ 13421 if (env->cur_state) { 13422 free_verifier_state(env->cur_state, true); 13423 env->cur_state = NULL; 13424 } 13425 while (!pop_stack(env, NULL, NULL, false)); 13426 if (!ret && pop_log) 13427 bpf_vlog_reset(&env->log, 0); 13428 free_states(env); 13429 return ret; 13430 } 13431 13432 /* Verify all global functions in a BPF program one by one based on their BTF. 13433 * All global functions must pass verification. Otherwise the whole program is rejected. 13434 * Consider: 13435 * int bar(int); 13436 * int foo(int f) 13437 * { 13438 * return bar(f); 13439 * } 13440 * int bar(int b) 13441 * { 13442 * ... 13443 * } 13444 * foo() will be verified first for R1=any_scalar_value. During verification it 13445 * will be assumed that bar() already verified successfully and call to bar() 13446 * from foo() will be checked for type match only. Later bar() will be verified 13447 * independently to check that it's safe for R1=any_scalar_value. 13448 */ 13449 static int do_check_subprogs(struct bpf_verifier_env *env) 13450 { 13451 struct bpf_prog_aux *aux = env->prog->aux; 13452 int i, ret; 13453 13454 if (!aux->func_info) 13455 return 0; 13456 13457 for (i = 1; i < env->subprog_cnt; i++) { 13458 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 13459 continue; 13460 env->insn_idx = env->subprog_info[i].start; 13461 WARN_ON_ONCE(env->insn_idx == 0); 13462 ret = do_check_common(env, i); 13463 if (ret) { 13464 return ret; 13465 } else if (env->log.level & BPF_LOG_LEVEL) { 13466 verbose(env, 13467 "Func#%d is safe for any args that match its prototype\n", 13468 i); 13469 } 13470 } 13471 return 0; 13472 } 13473 13474 static int do_check_main(struct bpf_verifier_env *env) 13475 { 13476 int ret; 13477 13478 env->insn_idx = 0; 13479 ret = do_check_common(env, 0); 13480 if (!ret) 13481 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 13482 return ret; 13483 } 13484 13485 13486 static void print_verification_stats(struct bpf_verifier_env *env) 13487 { 13488 int i; 13489 13490 if (env->log.level & BPF_LOG_STATS) { 13491 verbose(env, "verification time %lld usec\n", 13492 div_u64(env->verification_time, 1000)); 13493 verbose(env, "stack depth "); 13494 for (i = 0; i < env->subprog_cnt; i++) { 13495 u32 depth = env->subprog_info[i].stack_depth; 13496 13497 verbose(env, "%d", depth); 13498 if (i + 1 < env->subprog_cnt) 13499 verbose(env, "+"); 13500 } 13501 verbose(env, "\n"); 13502 } 13503 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 13504 "total_states %d peak_states %d mark_read %d\n", 13505 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 13506 env->max_states_per_insn, env->total_states, 13507 env->peak_states, env->longest_mark_read_walk); 13508 } 13509 13510 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 13511 { 13512 const struct btf_type *t, *func_proto; 13513 const struct bpf_struct_ops *st_ops; 13514 const struct btf_member *member; 13515 struct bpf_prog *prog = env->prog; 13516 u32 btf_id, member_idx; 13517 const char *mname; 13518 13519 if (!prog->gpl_compatible) { 13520 verbose(env, "struct ops programs must have a GPL compatible license\n"); 13521 return -EINVAL; 13522 } 13523 13524 btf_id = prog->aux->attach_btf_id; 13525 st_ops = bpf_struct_ops_find(btf_id); 13526 if (!st_ops) { 13527 verbose(env, "attach_btf_id %u is not a supported struct\n", 13528 btf_id); 13529 return -ENOTSUPP; 13530 } 13531 13532 t = st_ops->type; 13533 member_idx = prog->expected_attach_type; 13534 if (member_idx >= btf_type_vlen(t)) { 13535 verbose(env, "attach to invalid member idx %u of struct %s\n", 13536 member_idx, st_ops->name); 13537 return -EINVAL; 13538 } 13539 13540 member = &btf_type_member(t)[member_idx]; 13541 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 13542 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 13543 NULL); 13544 if (!func_proto) { 13545 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 13546 mname, member_idx, st_ops->name); 13547 return -EINVAL; 13548 } 13549 13550 if (st_ops->check_member) { 13551 int err = st_ops->check_member(t, member); 13552 13553 if (err) { 13554 verbose(env, "attach to unsupported member %s of struct %s\n", 13555 mname, st_ops->name); 13556 return err; 13557 } 13558 } 13559 13560 prog->aux->attach_func_proto = func_proto; 13561 prog->aux->attach_func_name = mname; 13562 env->ops = st_ops->verifier_ops; 13563 13564 return 0; 13565 } 13566 #define SECURITY_PREFIX "security_" 13567 13568 static int check_attach_modify_return(unsigned long addr, const char *func_name) 13569 { 13570 if (within_error_injection_list(addr) || 13571 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) 13572 return 0; 13573 13574 return -EINVAL; 13575 } 13576 13577 /* list of non-sleepable functions that are otherwise on 13578 * ALLOW_ERROR_INJECTION list 13579 */ 13580 BTF_SET_START(btf_non_sleepable_error_inject) 13581 /* Three functions below can be called from sleepable and non-sleepable context. 13582 * Assume non-sleepable from bpf safety point of view. 13583 */ 13584 BTF_ID(func, __filemap_add_folio) 13585 BTF_ID(func, should_fail_alloc_page) 13586 BTF_ID(func, should_failslab) 13587 BTF_SET_END(btf_non_sleepable_error_inject) 13588 13589 static int check_non_sleepable_error_inject(u32 btf_id) 13590 { 13591 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); 13592 } 13593 13594 int bpf_check_attach_target(struct bpf_verifier_log *log, 13595 const struct bpf_prog *prog, 13596 const struct bpf_prog *tgt_prog, 13597 u32 btf_id, 13598 struct bpf_attach_target_info *tgt_info) 13599 { 13600 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 13601 const char prefix[] = "btf_trace_"; 13602 int ret = 0, subprog = -1, i; 13603 const struct btf_type *t; 13604 bool conservative = true; 13605 const char *tname; 13606 struct btf *btf; 13607 long addr = 0; 13608 13609 if (!btf_id) { 13610 bpf_log(log, "Tracing programs must provide btf_id\n"); 13611 return -EINVAL; 13612 } 13613 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; 13614 if (!btf) { 13615 bpf_log(log, 13616 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 13617 return -EINVAL; 13618 } 13619 t = btf_type_by_id(btf, btf_id); 13620 if (!t) { 13621 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id); 13622 return -EINVAL; 13623 } 13624 tname = btf_name_by_offset(btf, t->name_off); 13625 if (!tname) { 13626 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id); 13627 return -EINVAL; 13628 } 13629 if (tgt_prog) { 13630 struct bpf_prog_aux *aux = tgt_prog->aux; 13631 13632 for (i = 0; i < aux->func_info_cnt; i++) 13633 if (aux->func_info[i].type_id == btf_id) { 13634 subprog = i; 13635 break; 13636 } 13637 if (subprog == -1) { 13638 bpf_log(log, "Subprog %s doesn't exist\n", tname); 13639 return -EINVAL; 13640 } 13641 conservative = aux->func_info_aux[subprog].unreliable; 13642 if (prog_extension) { 13643 if (conservative) { 13644 bpf_log(log, 13645 "Cannot replace static functions\n"); 13646 return -EINVAL; 13647 } 13648 if (!prog->jit_requested) { 13649 bpf_log(log, 13650 "Extension programs should be JITed\n"); 13651 return -EINVAL; 13652 } 13653 } 13654 if (!tgt_prog->jited) { 13655 bpf_log(log, "Can attach to only JITed progs\n"); 13656 return -EINVAL; 13657 } 13658 if (tgt_prog->type == prog->type) { 13659 /* Cannot fentry/fexit another fentry/fexit program. 13660 * Cannot attach program extension to another extension. 13661 * It's ok to attach fentry/fexit to extension program. 13662 */ 13663 bpf_log(log, "Cannot recursively attach\n"); 13664 return -EINVAL; 13665 } 13666 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 13667 prog_extension && 13668 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 13669 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 13670 /* Program extensions can extend all program types 13671 * except fentry/fexit. The reason is the following. 13672 * The fentry/fexit programs are used for performance 13673 * analysis, stats and can be attached to any program 13674 * type except themselves. When extension program is 13675 * replacing XDP function it is necessary to allow 13676 * performance analysis of all functions. Both original 13677 * XDP program and its program extension. Hence 13678 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 13679 * allowed. If extending of fentry/fexit was allowed it 13680 * would be possible to create long call chain 13681 * fentry->extension->fentry->extension beyond 13682 * reasonable stack size. Hence extending fentry is not 13683 * allowed. 13684 */ 13685 bpf_log(log, "Cannot extend fentry/fexit\n"); 13686 return -EINVAL; 13687 } 13688 } else { 13689 if (prog_extension) { 13690 bpf_log(log, "Cannot replace kernel functions\n"); 13691 return -EINVAL; 13692 } 13693 } 13694 13695 switch (prog->expected_attach_type) { 13696 case BPF_TRACE_RAW_TP: 13697 if (tgt_prog) { 13698 bpf_log(log, 13699 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 13700 return -EINVAL; 13701 } 13702 if (!btf_type_is_typedef(t)) { 13703 bpf_log(log, "attach_btf_id %u is not a typedef\n", 13704 btf_id); 13705 return -EINVAL; 13706 } 13707 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 13708 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n", 13709 btf_id, tname); 13710 return -EINVAL; 13711 } 13712 tname += sizeof(prefix) - 1; 13713 t = btf_type_by_id(btf, t->type); 13714 if (!btf_type_is_ptr(t)) 13715 /* should never happen in valid vmlinux build */ 13716 return -EINVAL; 13717 t = btf_type_by_id(btf, t->type); 13718 if (!btf_type_is_func_proto(t)) 13719 /* should never happen in valid vmlinux build */ 13720 return -EINVAL; 13721 13722 break; 13723 case BPF_TRACE_ITER: 13724 if (!btf_type_is_func(t)) { 13725 bpf_log(log, "attach_btf_id %u is not a function\n", 13726 btf_id); 13727 return -EINVAL; 13728 } 13729 t = btf_type_by_id(btf, t->type); 13730 if (!btf_type_is_func_proto(t)) 13731 return -EINVAL; 13732 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 13733 if (ret) 13734 return ret; 13735 break; 13736 default: 13737 if (!prog_extension) 13738 return -EINVAL; 13739 fallthrough; 13740 case BPF_MODIFY_RETURN: 13741 case BPF_LSM_MAC: 13742 case BPF_TRACE_FENTRY: 13743 case BPF_TRACE_FEXIT: 13744 if (!btf_type_is_func(t)) { 13745 bpf_log(log, "attach_btf_id %u is not a function\n", 13746 btf_id); 13747 return -EINVAL; 13748 } 13749 if (prog_extension && 13750 btf_check_type_match(log, prog, btf, t)) 13751 return -EINVAL; 13752 t = btf_type_by_id(btf, t->type); 13753 if (!btf_type_is_func_proto(t)) 13754 return -EINVAL; 13755 13756 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && 13757 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || 13758 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) 13759 return -EINVAL; 13760 13761 if (tgt_prog && conservative) 13762 t = NULL; 13763 13764 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 13765 if (ret < 0) 13766 return ret; 13767 13768 if (tgt_prog) { 13769 if (subprog == 0) 13770 addr = (long) tgt_prog->bpf_func; 13771 else 13772 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 13773 } else { 13774 addr = kallsyms_lookup_name(tname); 13775 if (!addr) { 13776 bpf_log(log, 13777 "The address of function %s cannot be found\n", 13778 tname); 13779 return -ENOENT; 13780 } 13781 } 13782 13783 if (prog->aux->sleepable) { 13784 ret = -EINVAL; 13785 switch (prog->type) { 13786 case BPF_PROG_TYPE_TRACING: 13787 /* fentry/fexit/fmod_ret progs can be sleepable only if they are 13788 * attached to ALLOW_ERROR_INJECTION and are not in denylist. 13789 */ 13790 if (!check_non_sleepable_error_inject(btf_id) && 13791 within_error_injection_list(addr)) 13792 ret = 0; 13793 break; 13794 case BPF_PROG_TYPE_LSM: 13795 /* LSM progs check that they are attached to bpf_lsm_*() funcs. 13796 * Only some of them are sleepable. 13797 */ 13798 if (bpf_lsm_is_sleepable_hook(btf_id)) 13799 ret = 0; 13800 break; 13801 default: 13802 break; 13803 } 13804 if (ret) { 13805 bpf_log(log, "%s is not sleepable\n", tname); 13806 return ret; 13807 } 13808 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 13809 if (tgt_prog) { 13810 bpf_log(log, "can't modify return codes of BPF programs\n"); 13811 return -EINVAL; 13812 } 13813 ret = check_attach_modify_return(addr, tname); 13814 if (ret) { 13815 bpf_log(log, "%s() is not modifiable\n", tname); 13816 return ret; 13817 } 13818 } 13819 13820 break; 13821 } 13822 tgt_info->tgt_addr = addr; 13823 tgt_info->tgt_name = tname; 13824 tgt_info->tgt_type = t; 13825 return 0; 13826 } 13827 13828 BTF_SET_START(btf_id_deny) 13829 BTF_ID_UNUSED 13830 #ifdef CONFIG_SMP 13831 BTF_ID(func, migrate_disable) 13832 BTF_ID(func, migrate_enable) 13833 #endif 13834 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU 13835 BTF_ID(func, rcu_read_unlock_strict) 13836 #endif 13837 BTF_SET_END(btf_id_deny) 13838 13839 static int check_attach_btf_id(struct bpf_verifier_env *env) 13840 { 13841 struct bpf_prog *prog = env->prog; 13842 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 13843 struct bpf_attach_target_info tgt_info = {}; 13844 u32 btf_id = prog->aux->attach_btf_id; 13845 struct bpf_trampoline *tr; 13846 int ret; 13847 u64 key; 13848 13849 if (prog->type == BPF_PROG_TYPE_SYSCALL) { 13850 if (prog->aux->sleepable) 13851 /* attach_btf_id checked to be zero already */ 13852 return 0; 13853 verbose(env, "Syscall programs can only be sleepable\n"); 13854 return -EINVAL; 13855 } 13856 13857 if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && 13858 prog->type != BPF_PROG_TYPE_LSM) { 13859 verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n"); 13860 return -EINVAL; 13861 } 13862 13863 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 13864 return check_struct_ops_btf_id(env); 13865 13866 if (prog->type != BPF_PROG_TYPE_TRACING && 13867 prog->type != BPF_PROG_TYPE_LSM && 13868 prog->type != BPF_PROG_TYPE_EXT) 13869 return 0; 13870 13871 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); 13872 if (ret) 13873 return ret; 13874 13875 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { 13876 /* to make freplace equivalent to their targets, they need to 13877 * inherit env->ops and expected_attach_type for the rest of the 13878 * verification 13879 */ 13880 env->ops = bpf_verifier_ops[tgt_prog->type]; 13881 prog->expected_attach_type = tgt_prog->expected_attach_type; 13882 } 13883 13884 /* store info about the attachment target that will be used later */ 13885 prog->aux->attach_func_proto = tgt_info.tgt_type; 13886 prog->aux->attach_func_name = tgt_info.tgt_name; 13887 13888 if (tgt_prog) { 13889 prog->aux->saved_dst_prog_type = tgt_prog->type; 13890 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; 13891 } 13892 13893 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { 13894 prog->aux->attach_btf_trace = true; 13895 return 0; 13896 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { 13897 if (!bpf_iter_prog_supported(prog)) 13898 return -EINVAL; 13899 return 0; 13900 } 13901 13902 if (prog->type == BPF_PROG_TYPE_LSM) { 13903 ret = bpf_lsm_verify_prog(&env->log, prog); 13904 if (ret < 0) 13905 return ret; 13906 } else if (prog->type == BPF_PROG_TYPE_TRACING && 13907 btf_id_set_contains(&btf_id_deny, btf_id)) { 13908 return -EINVAL; 13909 } 13910 13911 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); 13912 tr = bpf_trampoline_get(key, &tgt_info); 13913 if (!tr) 13914 return -ENOMEM; 13915 13916 prog->aux->dst_trampoline = tr; 13917 return 0; 13918 } 13919 13920 struct btf *bpf_get_btf_vmlinux(void) 13921 { 13922 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 13923 mutex_lock(&bpf_verifier_lock); 13924 if (!btf_vmlinux) 13925 btf_vmlinux = btf_parse_vmlinux(); 13926 mutex_unlock(&bpf_verifier_lock); 13927 } 13928 return btf_vmlinux; 13929 } 13930 13931 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) 13932 { 13933 u64 start_time = ktime_get_ns(); 13934 struct bpf_verifier_env *env; 13935 struct bpf_verifier_log *log; 13936 int i, len, ret = -EINVAL; 13937 bool is_priv; 13938 13939 /* no program is valid */ 13940 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 13941 return -EINVAL; 13942 13943 /* 'struct bpf_verifier_env' can be global, but since it's not small, 13944 * allocate/free it every time bpf_check() is called 13945 */ 13946 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 13947 if (!env) 13948 return -ENOMEM; 13949 log = &env->log; 13950 13951 len = (*prog)->len; 13952 env->insn_aux_data = 13953 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 13954 ret = -ENOMEM; 13955 if (!env->insn_aux_data) 13956 goto err_free_env; 13957 for (i = 0; i < len; i++) 13958 env->insn_aux_data[i].orig_idx = i; 13959 env->prog = *prog; 13960 env->ops = bpf_verifier_ops[env->prog->type]; 13961 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); 13962 is_priv = bpf_capable(); 13963 13964 bpf_get_btf_vmlinux(); 13965 13966 /* grab the mutex to protect few globals used by verifier */ 13967 if (!is_priv) 13968 mutex_lock(&bpf_verifier_lock); 13969 13970 if (attr->log_level || attr->log_buf || attr->log_size) { 13971 /* user requested verbose verifier output 13972 * and supplied buffer to store the verification trace 13973 */ 13974 log->level = attr->log_level; 13975 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 13976 log->len_total = attr->log_size; 13977 13978 ret = -EINVAL; 13979 /* log attributes have to be sane */ 13980 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || 13981 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) 13982 goto err_unlock; 13983 } 13984 13985 if (IS_ERR(btf_vmlinux)) { 13986 /* Either gcc or pahole or kernel are broken. */ 13987 verbose(env, "in-kernel BTF is malformed\n"); 13988 ret = PTR_ERR(btf_vmlinux); 13989 goto skip_full_check; 13990 } 13991 13992 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 13993 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 13994 env->strict_alignment = true; 13995 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 13996 env->strict_alignment = false; 13997 13998 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); 13999 env->allow_uninit_stack = bpf_allow_uninit_stack(); 14000 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); 14001 env->bypass_spec_v1 = bpf_bypass_spec_v1(); 14002 env->bypass_spec_v4 = bpf_bypass_spec_v4(); 14003 env->bpf_capable = bpf_capable(); 14004 14005 if (is_priv) 14006 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 14007 14008 env->explored_states = kvcalloc(state_htab_size(env), 14009 sizeof(struct bpf_verifier_state_list *), 14010 GFP_USER); 14011 ret = -ENOMEM; 14012 if (!env->explored_states) 14013 goto skip_full_check; 14014 14015 ret = add_subprog_and_kfunc(env); 14016 if (ret < 0) 14017 goto skip_full_check; 14018 14019 ret = check_subprogs(env); 14020 if (ret < 0) 14021 goto skip_full_check; 14022 14023 ret = check_btf_info(env, attr, uattr); 14024 if (ret < 0) 14025 goto skip_full_check; 14026 14027 ret = check_attach_btf_id(env); 14028 if (ret) 14029 goto skip_full_check; 14030 14031 ret = resolve_pseudo_ldimm64(env); 14032 if (ret < 0) 14033 goto skip_full_check; 14034 14035 if (bpf_prog_is_dev_bound(env->prog->aux)) { 14036 ret = bpf_prog_offload_verifier_prep(env->prog); 14037 if (ret) 14038 goto skip_full_check; 14039 } 14040 14041 ret = check_cfg(env); 14042 if (ret < 0) 14043 goto skip_full_check; 14044 14045 ret = do_check_subprogs(env); 14046 ret = ret ?: do_check_main(env); 14047 14048 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) 14049 ret = bpf_prog_offload_finalize(env); 14050 14051 skip_full_check: 14052 kvfree(env->explored_states); 14053 14054 if (ret == 0) 14055 ret = check_max_stack_depth(env); 14056 14057 /* instruction rewrites happen after this point */ 14058 if (is_priv) { 14059 if (ret == 0) 14060 opt_hard_wire_dead_code_branches(env); 14061 if (ret == 0) 14062 ret = opt_remove_dead_code(env); 14063 if (ret == 0) 14064 ret = opt_remove_nops(env); 14065 } else { 14066 if (ret == 0) 14067 sanitize_dead_code(env); 14068 } 14069 14070 if (ret == 0) 14071 /* program is valid, convert *(u32*)(ctx + off) accesses */ 14072 ret = convert_ctx_accesses(env); 14073 14074 if (ret == 0) 14075 ret = do_misc_fixups(env); 14076 14077 /* do 32-bit optimization after insn patching has done so those patched 14078 * insns could be handled correctly. 14079 */ 14080 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { 14081 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 14082 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 14083 : false; 14084 } 14085 14086 if (ret == 0) 14087 ret = fixup_call_args(env); 14088 14089 env->verification_time = ktime_get_ns() - start_time; 14090 print_verification_stats(env); 14091 env->prog->aux->verified_insns = env->insn_processed; 14092 14093 if (log->level && bpf_verifier_log_full(log)) 14094 ret = -ENOSPC; 14095 if (log->level && !log->ubuf) { 14096 ret = -EFAULT; 14097 goto err_release_maps; 14098 } 14099 14100 if (ret) 14101 goto err_release_maps; 14102 14103 if (env->used_map_cnt) { 14104 /* if program passed verifier, update used_maps in bpf_prog_info */ 14105 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 14106 sizeof(env->used_maps[0]), 14107 GFP_KERNEL); 14108 14109 if (!env->prog->aux->used_maps) { 14110 ret = -ENOMEM; 14111 goto err_release_maps; 14112 } 14113 14114 memcpy(env->prog->aux->used_maps, env->used_maps, 14115 sizeof(env->used_maps[0]) * env->used_map_cnt); 14116 env->prog->aux->used_map_cnt = env->used_map_cnt; 14117 } 14118 if (env->used_btf_cnt) { 14119 /* if program passed verifier, update used_btfs in bpf_prog_aux */ 14120 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, 14121 sizeof(env->used_btfs[0]), 14122 GFP_KERNEL); 14123 if (!env->prog->aux->used_btfs) { 14124 ret = -ENOMEM; 14125 goto err_release_maps; 14126 } 14127 14128 memcpy(env->prog->aux->used_btfs, env->used_btfs, 14129 sizeof(env->used_btfs[0]) * env->used_btf_cnt); 14130 env->prog->aux->used_btf_cnt = env->used_btf_cnt; 14131 } 14132 if (env->used_map_cnt || env->used_btf_cnt) { 14133 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 14134 * bpf_ld_imm64 instructions 14135 */ 14136 convert_pseudo_ld_imm64(env); 14137 } 14138 14139 adjust_btf_func(env); 14140 14141 err_release_maps: 14142 if (!env->prog->aux->used_maps) 14143 /* if we didn't copy map pointers into bpf_prog_info, release 14144 * them now. Otherwise free_used_maps() will release them. 14145 */ 14146 release_maps(env); 14147 if (!env->prog->aux->used_btfs) 14148 release_btfs(env); 14149 14150 /* extension progs temporarily inherit the attach_type of their targets 14151 for verification purposes, so set it back to zero before returning 14152 */ 14153 if (env->prog->type == BPF_PROG_TYPE_EXT) 14154 env->prog->expected_attach_type = 0; 14155 14156 *prog = env->prog; 14157 err_unlock: 14158 if (!is_priv) 14159 mutex_unlock(&bpf_verifier_lock); 14160 vfree(env->insn_aux_data); 14161 err_free_env: 14162 kfree(env); 14163 return ret; 14164 } 14165