1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/bpf.h> 11 #include <linux/btf.h> 12 #include <linux/bpf_verifier.h> 13 #include <linux/filter.h> 14 #include <net/netlink.h> 15 #include <linux/file.h> 16 #include <linux/vmalloc.h> 17 #include <linux/stringify.h> 18 #include <linux/bsearch.h> 19 #include <linux/sort.h> 20 #include <linux/perf_event.h> 21 #include <linux/ctype.h> 22 #include <linux/error-injection.h> 23 #include <linux/bpf_lsm.h> 24 #include <linux/btf_ids.h> 25 26 #include "disasm.h" 27 28 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 29 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 30 [_id] = & _name ## _verifier_ops, 31 #define BPF_MAP_TYPE(_id, _ops) 32 #define BPF_LINK_TYPE(_id, _name) 33 #include <linux/bpf_types.h> 34 #undef BPF_PROG_TYPE 35 #undef BPF_MAP_TYPE 36 #undef BPF_LINK_TYPE 37 }; 38 39 /* bpf_check() is a static code analyzer that walks eBPF program 40 * instruction by instruction and updates register/stack state. 41 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 42 * 43 * The first pass is depth-first-search to check that the program is a DAG. 44 * It rejects the following programs: 45 * - larger than BPF_MAXINSNS insns 46 * - if loop is present (detected via back-edge) 47 * - unreachable insns exist (shouldn't be a forest. program = one function) 48 * - out of bounds or malformed jumps 49 * The second pass is all possible path descent from the 1st insn. 50 * Since it's analyzing all pathes through the program, the length of the 51 * analysis is limited to 64k insn, which may be hit even if total number of 52 * insn is less then 4K, but there are too many branches that change stack/regs. 53 * Number of 'branches to be analyzed' is limited to 1k 54 * 55 * On entry to each instruction, each register has a type, and the instruction 56 * changes the types of the registers depending on instruction semantics. 57 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 58 * copied to R1. 59 * 60 * All registers are 64-bit. 61 * R0 - return register 62 * R1-R5 argument passing registers 63 * R6-R9 callee saved registers 64 * R10 - frame pointer read-only 65 * 66 * At the start of BPF program the register R1 contains a pointer to bpf_context 67 * and has type PTR_TO_CTX. 68 * 69 * Verifier tracks arithmetic operations on pointers in case: 70 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 71 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 72 * 1st insn copies R10 (which has FRAME_PTR) type into R1 73 * and 2nd arithmetic instruction is pattern matched to recognize 74 * that it wants to construct a pointer to some element within stack. 75 * So after 2nd insn, the register R1 has type PTR_TO_STACK 76 * (and -20 constant is saved for further stack bounds checking). 77 * Meaning that this reg is a pointer to stack plus known immediate constant. 78 * 79 * Most of the time the registers have SCALAR_VALUE type, which 80 * means the register has some value, but it's not a valid pointer. 81 * (like pointer plus pointer becomes SCALAR_VALUE type) 82 * 83 * When verifier sees load or store instructions the type of base register 84 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 85 * four pointer types recognized by check_mem_access() function. 86 * 87 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 88 * and the range of [ptr, ptr + map's value_size) is accessible. 89 * 90 * registers used to pass values to function calls are checked against 91 * function argument constraints. 92 * 93 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 94 * It means that the register type passed to this function must be 95 * PTR_TO_STACK and it will be used inside the function as 96 * 'pointer to map element key' 97 * 98 * For example the argument constraints for bpf_map_lookup_elem(): 99 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 100 * .arg1_type = ARG_CONST_MAP_PTR, 101 * .arg2_type = ARG_PTR_TO_MAP_KEY, 102 * 103 * ret_type says that this function returns 'pointer to map elem value or null' 104 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 105 * 2nd argument should be a pointer to stack, which will be used inside 106 * the helper function as a pointer to map element key. 107 * 108 * On the kernel side the helper function looks like: 109 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 110 * { 111 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 112 * void *key = (void *) (unsigned long) r2; 113 * void *value; 114 * 115 * here kernel can access 'key' and 'map' pointers safely, knowing that 116 * [key, key + map->key_size) bytes are valid and were initialized on 117 * the stack of eBPF program. 118 * } 119 * 120 * Corresponding eBPF program may look like: 121 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 122 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 123 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 124 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 125 * here verifier looks at prototype of map_lookup_elem() and sees: 126 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 127 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 128 * 129 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 130 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 131 * and were initialized prior to this call. 132 * If it's ok, then verifier allows this BPF_CALL insn and looks at 133 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 134 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 135 * returns ether pointer to map value or NULL. 136 * 137 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 138 * insn, the register holding that pointer in the true branch changes state to 139 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 140 * branch. See check_cond_jmp_op(). 141 * 142 * After the call R0 is set to return type of the function and registers R1-R5 143 * are set to NOT_INIT to indicate that they are no longer readable. 144 * 145 * The following reference types represent a potential reference to a kernel 146 * resource which, after first being allocated, must be checked and freed by 147 * the BPF program: 148 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 149 * 150 * When the verifier sees a helper call return a reference type, it allocates a 151 * pointer id for the reference and stores it in the current function state. 152 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 153 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 154 * passes through a NULL-check conditional. For the branch wherein the state is 155 * changed to CONST_IMM, the verifier releases the reference. 156 * 157 * For each helper function that allocates a reference, such as 158 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 159 * bpf_sk_release(). When a reference type passes into the release function, 160 * the verifier also releases the reference. If any unchecked or unreleased 161 * reference remains at the end of the program, the verifier rejects it. 162 */ 163 164 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 165 struct bpf_verifier_stack_elem { 166 /* verifer state is 'st' 167 * before processing instruction 'insn_idx' 168 * and after processing instruction 'prev_insn_idx' 169 */ 170 struct bpf_verifier_state st; 171 int insn_idx; 172 int prev_insn_idx; 173 struct bpf_verifier_stack_elem *next; 174 /* length of verifier log at the time this state was pushed on stack */ 175 u32 log_pos; 176 }; 177 178 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 179 #define BPF_COMPLEXITY_LIMIT_STATES 64 180 181 #define BPF_MAP_KEY_POISON (1ULL << 63) 182 #define BPF_MAP_KEY_SEEN (1ULL << 62) 183 184 #define BPF_MAP_PTR_UNPRIV 1UL 185 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 186 POISON_POINTER_DELTA)) 187 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 188 189 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 190 { 191 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 192 } 193 194 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 195 { 196 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 197 } 198 199 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 200 const struct bpf_map *map, bool unpriv) 201 { 202 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 203 unpriv |= bpf_map_ptr_unpriv(aux); 204 aux->map_ptr_state = (unsigned long)map | 205 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 206 } 207 208 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 209 { 210 return aux->map_key_state & BPF_MAP_KEY_POISON; 211 } 212 213 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 214 { 215 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 216 } 217 218 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 219 { 220 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 221 } 222 223 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 224 { 225 bool poisoned = bpf_map_key_poisoned(aux); 226 227 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 228 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 229 } 230 231 struct bpf_call_arg_meta { 232 struct bpf_map *map_ptr; 233 bool raw_mode; 234 bool pkt_access; 235 int regno; 236 int access_size; 237 int mem_size; 238 u64 msize_max_value; 239 int ref_obj_id; 240 int func_id; 241 struct btf *btf; 242 u32 btf_id; 243 struct btf *ret_btf; 244 u32 ret_btf_id; 245 }; 246 247 struct btf *btf_vmlinux; 248 249 static DEFINE_MUTEX(bpf_verifier_lock); 250 251 static const struct bpf_line_info * 252 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 253 { 254 const struct bpf_line_info *linfo; 255 const struct bpf_prog *prog; 256 u32 i, nr_linfo; 257 258 prog = env->prog; 259 nr_linfo = prog->aux->nr_linfo; 260 261 if (!nr_linfo || insn_off >= prog->len) 262 return NULL; 263 264 linfo = prog->aux->linfo; 265 for (i = 1; i < nr_linfo; i++) 266 if (insn_off < linfo[i].insn_off) 267 break; 268 269 return &linfo[i - 1]; 270 } 271 272 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, 273 va_list args) 274 { 275 unsigned int n; 276 277 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); 278 279 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, 280 "verifier log line truncated - local buffer too short\n"); 281 282 n = min(log->len_total - log->len_used - 1, n); 283 log->kbuf[n] = '\0'; 284 285 if (log->level == BPF_LOG_KERNEL) { 286 pr_err("BPF:%s\n", log->kbuf); 287 return; 288 } 289 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) 290 log->len_used += n; 291 else 292 log->ubuf = NULL; 293 } 294 295 static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos) 296 { 297 char zero = 0; 298 299 if (!bpf_verifier_log_needed(log)) 300 return; 301 302 log->len_used = new_pos; 303 if (put_user(zero, log->ubuf + new_pos)) 304 log->ubuf = NULL; 305 } 306 307 /* log_level controls verbosity level of eBPF verifier. 308 * bpf_verifier_log_write() is used to dump the verification trace to the log, 309 * so the user can figure out what's wrong with the program 310 */ 311 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 312 const char *fmt, ...) 313 { 314 va_list args; 315 316 if (!bpf_verifier_log_needed(&env->log)) 317 return; 318 319 va_start(args, fmt); 320 bpf_verifier_vlog(&env->log, fmt, args); 321 va_end(args); 322 } 323 EXPORT_SYMBOL_GPL(bpf_verifier_log_write); 324 325 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 326 { 327 struct bpf_verifier_env *env = private_data; 328 va_list args; 329 330 if (!bpf_verifier_log_needed(&env->log)) 331 return; 332 333 va_start(args, fmt); 334 bpf_verifier_vlog(&env->log, fmt, args); 335 va_end(args); 336 } 337 338 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 339 const char *fmt, ...) 340 { 341 va_list args; 342 343 if (!bpf_verifier_log_needed(log)) 344 return; 345 346 va_start(args, fmt); 347 bpf_verifier_vlog(log, fmt, args); 348 va_end(args); 349 } 350 351 static const char *ltrim(const char *s) 352 { 353 while (isspace(*s)) 354 s++; 355 356 return s; 357 } 358 359 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 360 u32 insn_off, 361 const char *prefix_fmt, ...) 362 { 363 const struct bpf_line_info *linfo; 364 365 if (!bpf_verifier_log_needed(&env->log)) 366 return; 367 368 linfo = find_linfo(env, insn_off); 369 if (!linfo || linfo == env->prev_linfo) 370 return; 371 372 if (prefix_fmt) { 373 va_list args; 374 375 va_start(args, prefix_fmt); 376 bpf_verifier_vlog(&env->log, prefix_fmt, args); 377 va_end(args); 378 } 379 380 verbose(env, "%s\n", 381 ltrim(btf_name_by_offset(env->prog->aux->btf, 382 linfo->line_off))); 383 384 env->prev_linfo = linfo; 385 } 386 387 static bool type_is_pkt_pointer(enum bpf_reg_type type) 388 { 389 return type == PTR_TO_PACKET || 390 type == PTR_TO_PACKET_META; 391 } 392 393 static bool type_is_sk_pointer(enum bpf_reg_type type) 394 { 395 return type == PTR_TO_SOCKET || 396 type == PTR_TO_SOCK_COMMON || 397 type == PTR_TO_TCP_SOCK || 398 type == PTR_TO_XDP_SOCK; 399 } 400 401 static bool reg_type_not_null(enum bpf_reg_type type) 402 { 403 return type == PTR_TO_SOCKET || 404 type == PTR_TO_TCP_SOCK || 405 type == PTR_TO_MAP_VALUE || 406 type == PTR_TO_SOCK_COMMON; 407 } 408 409 static bool reg_type_may_be_null(enum bpf_reg_type type) 410 { 411 return type == PTR_TO_MAP_VALUE_OR_NULL || 412 type == PTR_TO_SOCKET_OR_NULL || 413 type == PTR_TO_SOCK_COMMON_OR_NULL || 414 type == PTR_TO_TCP_SOCK_OR_NULL || 415 type == PTR_TO_BTF_ID_OR_NULL || 416 type == PTR_TO_MEM_OR_NULL || 417 type == PTR_TO_RDONLY_BUF_OR_NULL || 418 type == PTR_TO_RDWR_BUF_OR_NULL; 419 } 420 421 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 422 { 423 return reg->type == PTR_TO_MAP_VALUE && 424 map_value_has_spin_lock(reg->map_ptr); 425 } 426 427 static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) 428 { 429 return type == PTR_TO_SOCKET || 430 type == PTR_TO_SOCKET_OR_NULL || 431 type == PTR_TO_TCP_SOCK || 432 type == PTR_TO_TCP_SOCK_OR_NULL || 433 type == PTR_TO_MEM || 434 type == PTR_TO_MEM_OR_NULL; 435 } 436 437 static bool arg_type_may_be_refcounted(enum bpf_arg_type type) 438 { 439 return type == ARG_PTR_TO_SOCK_COMMON; 440 } 441 442 static bool arg_type_may_be_null(enum bpf_arg_type type) 443 { 444 return type == ARG_PTR_TO_MAP_VALUE_OR_NULL || 445 type == ARG_PTR_TO_MEM_OR_NULL || 446 type == ARG_PTR_TO_CTX_OR_NULL || 447 type == ARG_PTR_TO_SOCKET_OR_NULL || 448 type == ARG_PTR_TO_ALLOC_MEM_OR_NULL; 449 } 450 451 /* Determine whether the function releases some resources allocated by another 452 * function call. The first reference type argument will be assumed to be 453 * released by release_reference(). 454 */ 455 static bool is_release_function(enum bpf_func_id func_id) 456 { 457 return func_id == BPF_FUNC_sk_release || 458 func_id == BPF_FUNC_ringbuf_submit || 459 func_id == BPF_FUNC_ringbuf_discard; 460 } 461 462 static bool may_be_acquire_function(enum bpf_func_id func_id) 463 { 464 return func_id == BPF_FUNC_sk_lookup_tcp || 465 func_id == BPF_FUNC_sk_lookup_udp || 466 func_id == BPF_FUNC_skc_lookup_tcp || 467 func_id == BPF_FUNC_map_lookup_elem || 468 func_id == BPF_FUNC_ringbuf_reserve; 469 } 470 471 static bool is_acquire_function(enum bpf_func_id func_id, 472 const struct bpf_map *map) 473 { 474 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; 475 476 if (func_id == BPF_FUNC_sk_lookup_tcp || 477 func_id == BPF_FUNC_sk_lookup_udp || 478 func_id == BPF_FUNC_skc_lookup_tcp || 479 func_id == BPF_FUNC_ringbuf_reserve) 480 return true; 481 482 if (func_id == BPF_FUNC_map_lookup_elem && 483 (map_type == BPF_MAP_TYPE_SOCKMAP || 484 map_type == BPF_MAP_TYPE_SOCKHASH)) 485 return true; 486 487 return false; 488 } 489 490 static bool is_ptr_cast_function(enum bpf_func_id func_id) 491 { 492 return func_id == BPF_FUNC_tcp_sock || 493 func_id == BPF_FUNC_sk_fullsock || 494 func_id == BPF_FUNC_skc_to_tcp_sock || 495 func_id == BPF_FUNC_skc_to_tcp6_sock || 496 func_id == BPF_FUNC_skc_to_udp6_sock || 497 func_id == BPF_FUNC_skc_to_tcp_timewait_sock || 498 func_id == BPF_FUNC_skc_to_tcp_request_sock; 499 } 500 501 /* string representation of 'enum bpf_reg_type' */ 502 static const char * const reg_type_str[] = { 503 [NOT_INIT] = "?", 504 [SCALAR_VALUE] = "inv", 505 [PTR_TO_CTX] = "ctx", 506 [CONST_PTR_TO_MAP] = "map_ptr", 507 [PTR_TO_MAP_VALUE] = "map_value", 508 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", 509 [PTR_TO_STACK] = "fp", 510 [PTR_TO_PACKET] = "pkt", 511 [PTR_TO_PACKET_META] = "pkt_meta", 512 [PTR_TO_PACKET_END] = "pkt_end", 513 [PTR_TO_FLOW_KEYS] = "flow_keys", 514 [PTR_TO_SOCKET] = "sock", 515 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", 516 [PTR_TO_SOCK_COMMON] = "sock_common", 517 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", 518 [PTR_TO_TCP_SOCK] = "tcp_sock", 519 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", 520 [PTR_TO_TP_BUFFER] = "tp_buffer", 521 [PTR_TO_XDP_SOCK] = "xdp_sock", 522 [PTR_TO_BTF_ID] = "ptr_", 523 [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", 524 [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", 525 [PTR_TO_MEM] = "mem", 526 [PTR_TO_MEM_OR_NULL] = "mem_or_null", 527 [PTR_TO_RDONLY_BUF] = "rdonly_buf", 528 [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", 529 [PTR_TO_RDWR_BUF] = "rdwr_buf", 530 [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", 531 }; 532 533 static char slot_type_char[] = { 534 [STACK_INVALID] = '?', 535 [STACK_SPILL] = 'r', 536 [STACK_MISC] = 'm', 537 [STACK_ZERO] = '0', 538 }; 539 540 static void print_liveness(struct bpf_verifier_env *env, 541 enum bpf_reg_liveness live) 542 { 543 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 544 verbose(env, "_"); 545 if (live & REG_LIVE_READ) 546 verbose(env, "r"); 547 if (live & REG_LIVE_WRITTEN) 548 verbose(env, "w"); 549 if (live & REG_LIVE_DONE) 550 verbose(env, "D"); 551 } 552 553 static struct bpf_func_state *func(struct bpf_verifier_env *env, 554 const struct bpf_reg_state *reg) 555 { 556 struct bpf_verifier_state *cur = env->cur_state; 557 558 return cur->frame[reg->frameno]; 559 } 560 561 static const char *kernel_type_name(const struct btf* btf, u32 id) 562 { 563 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); 564 } 565 566 static void print_verifier_state(struct bpf_verifier_env *env, 567 const struct bpf_func_state *state) 568 { 569 const struct bpf_reg_state *reg; 570 enum bpf_reg_type t; 571 int i; 572 573 if (state->frameno) 574 verbose(env, " frame%d:", state->frameno); 575 for (i = 0; i < MAX_BPF_REG; i++) { 576 reg = &state->regs[i]; 577 t = reg->type; 578 if (t == NOT_INIT) 579 continue; 580 verbose(env, " R%d", i); 581 print_liveness(env, reg->live); 582 verbose(env, "=%s", reg_type_str[t]); 583 if (t == SCALAR_VALUE && reg->precise) 584 verbose(env, "P"); 585 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 586 tnum_is_const(reg->var_off)) { 587 /* reg->off should be 0 for SCALAR_VALUE */ 588 verbose(env, "%lld", reg->var_off.value + reg->off); 589 } else { 590 if (t == PTR_TO_BTF_ID || 591 t == PTR_TO_BTF_ID_OR_NULL || 592 t == PTR_TO_PERCPU_BTF_ID) 593 verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); 594 verbose(env, "(id=%d", reg->id); 595 if (reg_type_may_be_refcounted_or_null(t)) 596 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); 597 if (t != SCALAR_VALUE) 598 verbose(env, ",off=%d", reg->off); 599 if (type_is_pkt_pointer(t)) 600 verbose(env, ",r=%d", reg->range); 601 else if (t == CONST_PTR_TO_MAP || 602 t == PTR_TO_MAP_VALUE || 603 t == PTR_TO_MAP_VALUE_OR_NULL) 604 verbose(env, ",ks=%d,vs=%d", 605 reg->map_ptr->key_size, 606 reg->map_ptr->value_size); 607 if (tnum_is_const(reg->var_off)) { 608 /* Typically an immediate SCALAR_VALUE, but 609 * could be a pointer whose offset is too big 610 * for reg->off 611 */ 612 verbose(env, ",imm=%llx", reg->var_off.value); 613 } else { 614 if (reg->smin_value != reg->umin_value && 615 reg->smin_value != S64_MIN) 616 verbose(env, ",smin_value=%lld", 617 (long long)reg->smin_value); 618 if (reg->smax_value != reg->umax_value && 619 reg->smax_value != S64_MAX) 620 verbose(env, ",smax_value=%lld", 621 (long long)reg->smax_value); 622 if (reg->umin_value != 0) 623 verbose(env, ",umin_value=%llu", 624 (unsigned long long)reg->umin_value); 625 if (reg->umax_value != U64_MAX) 626 verbose(env, ",umax_value=%llu", 627 (unsigned long long)reg->umax_value); 628 if (!tnum_is_unknown(reg->var_off)) { 629 char tn_buf[48]; 630 631 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 632 verbose(env, ",var_off=%s", tn_buf); 633 } 634 if (reg->s32_min_value != reg->smin_value && 635 reg->s32_min_value != S32_MIN) 636 verbose(env, ",s32_min_value=%d", 637 (int)(reg->s32_min_value)); 638 if (reg->s32_max_value != reg->smax_value && 639 reg->s32_max_value != S32_MAX) 640 verbose(env, ",s32_max_value=%d", 641 (int)(reg->s32_max_value)); 642 if (reg->u32_min_value != reg->umin_value && 643 reg->u32_min_value != U32_MIN) 644 verbose(env, ",u32_min_value=%d", 645 (int)(reg->u32_min_value)); 646 if (reg->u32_max_value != reg->umax_value && 647 reg->u32_max_value != U32_MAX) 648 verbose(env, ",u32_max_value=%d", 649 (int)(reg->u32_max_value)); 650 } 651 verbose(env, ")"); 652 } 653 } 654 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 655 char types_buf[BPF_REG_SIZE + 1]; 656 bool valid = false; 657 int j; 658 659 for (j = 0; j < BPF_REG_SIZE; j++) { 660 if (state->stack[i].slot_type[j] != STACK_INVALID) 661 valid = true; 662 types_buf[j] = slot_type_char[ 663 state->stack[i].slot_type[j]]; 664 } 665 types_buf[BPF_REG_SIZE] = 0; 666 if (!valid) 667 continue; 668 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 669 print_liveness(env, state->stack[i].spilled_ptr.live); 670 if (state->stack[i].slot_type[0] == STACK_SPILL) { 671 reg = &state->stack[i].spilled_ptr; 672 t = reg->type; 673 verbose(env, "=%s", reg_type_str[t]); 674 if (t == SCALAR_VALUE && reg->precise) 675 verbose(env, "P"); 676 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 677 verbose(env, "%lld", reg->var_off.value + reg->off); 678 } else { 679 verbose(env, "=%s", types_buf); 680 } 681 } 682 if (state->acquired_refs && state->refs[0].id) { 683 verbose(env, " refs=%d", state->refs[0].id); 684 for (i = 1; i < state->acquired_refs; i++) 685 if (state->refs[i].id) 686 verbose(env, ",%d", state->refs[i].id); 687 } 688 verbose(env, "\n"); 689 } 690 691 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 692 static int copy_##NAME##_state(struct bpf_func_state *dst, \ 693 const struct bpf_func_state *src) \ 694 { \ 695 if (!src->FIELD) \ 696 return 0; \ 697 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ 698 /* internal bug, make state invalid to reject the program */ \ 699 memset(dst, 0, sizeof(*dst)); \ 700 return -EFAULT; \ 701 } \ 702 memcpy(dst->FIELD, src->FIELD, \ 703 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ 704 return 0; \ 705 } 706 /* copy_reference_state() */ 707 COPY_STATE_FN(reference, acquired_refs, refs, 1) 708 /* copy_stack_state() */ 709 COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 710 #undef COPY_STATE_FN 711 712 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ 713 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ 714 bool copy_old) \ 715 { \ 716 u32 old_size = state->COUNT; \ 717 struct bpf_##NAME##_state *new_##FIELD; \ 718 int slot = size / SIZE; \ 719 \ 720 if (size <= old_size || !size) { \ 721 if (copy_old) \ 722 return 0; \ 723 state->COUNT = slot * SIZE; \ 724 if (!size && old_size) { \ 725 kfree(state->FIELD); \ 726 state->FIELD = NULL; \ 727 } \ 728 return 0; \ 729 } \ 730 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ 731 GFP_KERNEL); \ 732 if (!new_##FIELD) \ 733 return -ENOMEM; \ 734 if (copy_old) { \ 735 if (state->FIELD) \ 736 memcpy(new_##FIELD, state->FIELD, \ 737 sizeof(*new_##FIELD) * (old_size / SIZE)); \ 738 memset(new_##FIELD + old_size / SIZE, 0, \ 739 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ 740 } \ 741 state->COUNT = slot * SIZE; \ 742 kfree(state->FIELD); \ 743 state->FIELD = new_##FIELD; \ 744 return 0; \ 745 } 746 /* realloc_reference_state() */ 747 REALLOC_STATE_FN(reference, acquired_refs, refs, 1) 748 /* realloc_stack_state() */ 749 REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 750 #undef REALLOC_STATE_FN 751 752 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to 753 * make it consume minimal amount of memory. check_stack_write() access from 754 * the program calls into realloc_func_state() to grow the stack size. 755 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state 756 * which realloc_stack_state() copies over. It points to previous 757 * bpf_verifier_state which is never reallocated. 758 */ 759 static int realloc_func_state(struct bpf_func_state *state, int stack_size, 760 int refs_size, bool copy_old) 761 { 762 int err = realloc_reference_state(state, refs_size, copy_old); 763 if (err) 764 return err; 765 return realloc_stack_state(state, stack_size, copy_old); 766 } 767 768 /* Acquire a pointer id from the env and update the state->refs to include 769 * this new pointer reference. 770 * On success, returns a valid pointer id to associate with the register 771 * On failure, returns a negative errno. 772 */ 773 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 774 { 775 struct bpf_func_state *state = cur_func(env); 776 int new_ofs = state->acquired_refs; 777 int id, err; 778 779 err = realloc_reference_state(state, state->acquired_refs + 1, true); 780 if (err) 781 return err; 782 id = ++env->id_gen; 783 state->refs[new_ofs].id = id; 784 state->refs[new_ofs].insn_idx = insn_idx; 785 786 return id; 787 } 788 789 /* release function corresponding to acquire_reference_state(). Idempotent. */ 790 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 791 { 792 int i, last_idx; 793 794 last_idx = state->acquired_refs - 1; 795 for (i = 0; i < state->acquired_refs; i++) { 796 if (state->refs[i].id == ptr_id) { 797 if (last_idx && i != last_idx) 798 memcpy(&state->refs[i], &state->refs[last_idx], 799 sizeof(*state->refs)); 800 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 801 state->acquired_refs--; 802 return 0; 803 } 804 } 805 return -EINVAL; 806 } 807 808 static int transfer_reference_state(struct bpf_func_state *dst, 809 struct bpf_func_state *src) 810 { 811 int err = realloc_reference_state(dst, src->acquired_refs, false); 812 if (err) 813 return err; 814 err = copy_reference_state(dst, src); 815 if (err) 816 return err; 817 return 0; 818 } 819 820 static void free_func_state(struct bpf_func_state *state) 821 { 822 if (!state) 823 return; 824 kfree(state->refs); 825 kfree(state->stack); 826 kfree(state); 827 } 828 829 static void clear_jmp_history(struct bpf_verifier_state *state) 830 { 831 kfree(state->jmp_history); 832 state->jmp_history = NULL; 833 state->jmp_history_cnt = 0; 834 } 835 836 static void free_verifier_state(struct bpf_verifier_state *state, 837 bool free_self) 838 { 839 int i; 840 841 for (i = 0; i <= state->curframe; i++) { 842 free_func_state(state->frame[i]); 843 state->frame[i] = NULL; 844 } 845 clear_jmp_history(state); 846 if (free_self) 847 kfree(state); 848 } 849 850 /* copy verifier state from src to dst growing dst stack space 851 * when necessary to accommodate larger src stack 852 */ 853 static int copy_func_state(struct bpf_func_state *dst, 854 const struct bpf_func_state *src) 855 { 856 int err; 857 858 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, 859 false); 860 if (err) 861 return err; 862 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 863 err = copy_reference_state(dst, src); 864 if (err) 865 return err; 866 return copy_stack_state(dst, src); 867 } 868 869 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 870 const struct bpf_verifier_state *src) 871 { 872 struct bpf_func_state *dst; 873 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; 874 int i, err; 875 876 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { 877 kfree(dst_state->jmp_history); 878 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); 879 if (!dst_state->jmp_history) 880 return -ENOMEM; 881 } 882 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); 883 dst_state->jmp_history_cnt = src->jmp_history_cnt; 884 885 /* if dst has more stack frames then src frame, free them */ 886 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 887 free_func_state(dst_state->frame[i]); 888 dst_state->frame[i] = NULL; 889 } 890 dst_state->speculative = src->speculative; 891 dst_state->curframe = src->curframe; 892 dst_state->active_spin_lock = src->active_spin_lock; 893 dst_state->branches = src->branches; 894 dst_state->parent = src->parent; 895 dst_state->first_insn_idx = src->first_insn_idx; 896 dst_state->last_insn_idx = src->last_insn_idx; 897 for (i = 0; i <= src->curframe; i++) { 898 dst = dst_state->frame[i]; 899 if (!dst) { 900 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 901 if (!dst) 902 return -ENOMEM; 903 dst_state->frame[i] = dst; 904 } 905 err = copy_func_state(dst, src->frame[i]); 906 if (err) 907 return err; 908 } 909 return 0; 910 } 911 912 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 913 { 914 while (st) { 915 u32 br = --st->branches; 916 917 /* WARN_ON(br > 1) technically makes sense here, 918 * but see comment in push_stack(), hence: 919 */ 920 WARN_ONCE((int)br < 0, 921 "BUG update_branch_counts:branches_to_explore=%d\n", 922 br); 923 if (br) 924 break; 925 st = st->parent; 926 } 927 } 928 929 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 930 int *insn_idx, bool pop_log) 931 { 932 struct bpf_verifier_state *cur = env->cur_state; 933 struct bpf_verifier_stack_elem *elem, *head = env->head; 934 int err; 935 936 if (env->head == NULL) 937 return -ENOENT; 938 939 if (cur) { 940 err = copy_verifier_state(cur, &head->st); 941 if (err) 942 return err; 943 } 944 if (pop_log) 945 bpf_vlog_reset(&env->log, head->log_pos); 946 if (insn_idx) 947 *insn_idx = head->insn_idx; 948 if (prev_insn_idx) 949 *prev_insn_idx = head->prev_insn_idx; 950 elem = head->next; 951 free_verifier_state(&head->st, false); 952 kfree(head); 953 env->head = elem; 954 env->stack_size--; 955 return 0; 956 } 957 958 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 959 int insn_idx, int prev_insn_idx, 960 bool speculative) 961 { 962 struct bpf_verifier_state *cur = env->cur_state; 963 struct bpf_verifier_stack_elem *elem; 964 int err; 965 966 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 967 if (!elem) 968 goto err; 969 970 elem->insn_idx = insn_idx; 971 elem->prev_insn_idx = prev_insn_idx; 972 elem->next = env->head; 973 elem->log_pos = env->log.len_used; 974 env->head = elem; 975 env->stack_size++; 976 err = copy_verifier_state(&elem->st, cur); 977 if (err) 978 goto err; 979 elem->st.speculative |= speculative; 980 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 981 verbose(env, "The sequence of %d jumps is too complex.\n", 982 env->stack_size); 983 goto err; 984 } 985 if (elem->st.parent) { 986 ++elem->st.parent->branches; 987 /* WARN_ON(branches > 2) technically makes sense here, 988 * but 989 * 1. speculative states will bump 'branches' for non-branch 990 * instructions 991 * 2. is_state_visited() heuristics may decide not to create 992 * a new state for a sequence of branches and all such current 993 * and cloned states will be pointing to a single parent state 994 * which might have large 'branches' count. 995 */ 996 } 997 return &elem->st; 998 err: 999 free_verifier_state(env->cur_state, true); 1000 env->cur_state = NULL; 1001 /* pop all elements and return */ 1002 while (!pop_stack(env, NULL, NULL, false)); 1003 return NULL; 1004 } 1005 1006 #define CALLER_SAVED_REGS 6 1007 static const int caller_saved[CALLER_SAVED_REGS] = { 1008 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 1009 }; 1010 1011 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1012 struct bpf_reg_state *reg); 1013 1014 /* This helper doesn't clear reg->id */ 1015 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1016 { 1017 reg->var_off = tnum_const(imm); 1018 reg->smin_value = (s64)imm; 1019 reg->smax_value = (s64)imm; 1020 reg->umin_value = imm; 1021 reg->umax_value = imm; 1022 1023 reg->s32_min_value = (s32)imm; 1024 reg->s32_max_value = (s32)imm; 1025 reg->u32_min_value = (u32)imm; 1026 reg->u32_max_value = (u32)imm; 1027 } 1028 1029 /* Mark the unknown part of a register (variable offset or scalar value) as 1030 * known to have the value @imm. 1031 */ 1032 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 1033 { 1034 /* Clear id, off, and union(map_ptr, range) */ 1035 memset(((u8 *)reg) + sizeof(reg->type), 0, 1036 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 1037 ___mark_reg_known(reg, imm); 1038 } 1039 1040 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 1041 { 1042 reg->var_off = tnum_const_subreg(reg->var_off, imm); 1043 reg->s32_min_value = (s32)imm; 1044 reg->s32_max_value = (s32)imm; 1045 reg->u32_min_value = (u32)imm; 1046 reg->u32_max_value = (u32)imm; 1047 } 1048 1049 /* Mark the 'variable offset' part of a register as zero. This should be 1050 * used only on registers holding a pointer type. 1051 */ 1052 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 1053 { 1054 __mark_reg_known(reg, 0); 1055 } 1056 1057 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 1058 { 1059 __mark_reg_known(reg, 0); 1060 reg->type = SCALAR_VALUE; 1061 } 1062 1063 static void mark_reg_known_zero(struct bpf_verifier_env *env, 1064 struct bpf_reg_state *regs, u32 regno) 1065 { 1066 if (WARN_ON(regno >= MAX_BPF_REG)) { 1067 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 1068 /* Something bad happened, let's kill all regs */ 1069 for (regno = 0; regno < MAX_BPF_REG; regno++) 1070 __mark_reg_not_init(env, regs + regno); 1071 return; 1072 } 1073 __mark_reg_known_zero(regs + regno); 1074 } 1075 1076 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 1077 { 1078 return type_is_pkt_pointer(reg->type); 1079 } 1080 1081 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 1082 { 1083 return reg_is_pkt_pointer(reg) || 1084 reg->type == PTR_TO_PACKET_END; 1085 } 1086 1087 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 1088 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 1089 enum bpf_reg_type which) 1090 { 1091 /* The register can already have a range from prior markings. 1092 * This is fine as long as it hasn't been advanced from its 1093 * origin. 1094 */ 1095 return reg->type == which && 1096 reg->id == 0 && 1097 reg->off == 0 && 1098 tnum_equals_const(reg->var_off, 0); 1099 } 1100 1101 /* Reset the min/max bounds of a register */ 1102 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 1103 { 1104 reg->smin_value = S64_MIN; 1105 reg->smax_value = S64_MAX; 1106 reg->umin_value = 0; 1107 reg->umax_value = U64_MAX; 1108 1109 reg->s32_min_value = S32_MIN; 1110 reg->s32_max_value = S32_MAX; 1111 reg->u32_min_value = 0; 1112 reg->u32_max_value = U32_MAX; 1113 } 1114 1115 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 1116 { 1117 reg->smin_value = S64_MIN; 1118 reg->smax_value = S64_MAX; 1119 reg->umin_value = 0; 1120 reg->umax_value = U64_MAX; 1121 } 1122 1123 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 1124 { 1125 reg->s32_min_value = S32_MIN; 1126 reg->s32_max_value = S32_MAX; 1127 reg->u32_min_value = 0; 1128 reg->u32_max_value = U32_MAX; 1129 } 1130 1131 static void __update_reg32_bounds(struct bpf_reg_state *reg) 1132 { 1133 struct tnum var32_off = tnum_subreg(reg->var_off); 1134 1135 /* min signed is max(sign bit) | min(other bits) */ 1136 reg->s32_min_value = max_t(s32, reg->s32_min_value, 1137 var32_off.value | (var32_off.mask & S32_MIN)); 1138 /* max signed is min(sign bit) | max(other bits) */ 1139 reg->s32_max_value = min_t(s32, reg->s32_max_value, 1140 var32_off.value | (var32_off.mask & S32_MAX)); 1141 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 1142 reg->u32_max_value = min(reg->u32_max_value, 1143 (u32)(var32_off.value | var32_off.mask)); 1144 } 1145 1146 static void __update_reg64_bounds(struct bpf_reg_state *reg) 1147 { 1148 /* min signed is max(sign bit) | min(other bits) */ 1149 reg->smin_value = max_t(s64, reg->smin_value, 1150 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 1151 /* max signed is min(sign bit) | max(other bits) */ 1152 reg->smax_value = min_t(s64, reg->smax_value, 1153 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 1154 reg->umin_value = max(reg->umin_value, reg->var_off.value); 1155 reg->umax_value = min(reg->umax_value, 1156 reg->var_off.value | reg->var_off.mask); 1157 } 1158 1159 static void __update_reg_bounds(struct bpf_reg_state *reg) 1160 { 1161 __update_reg32_bounds(reg); 1162 __update_reg64_bounds(reg); 1163 } 1164 1165 /* Uses signed min/max values to inform unsigned, and vice-versa */ 1166 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 1167 { 1168 /* Learn sign from signed bounds. 1169 * If we cannot cross the sign boundary, then signed and unsigned bounds 1170 * are the same, so combine. This works even in the negative case, e.g. 1171 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1172 */ 1173 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { 1174 reg->s32_min_value = reg->u32_min_value = 1175 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1176 reg->s32_max_value = reg->u32_max_value = 1177 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1178 return; 1179 } 1180 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1181 * boundary, so we must be careful. 1182 */ 1183 if ((s32)reg->u32_max_value >= 0) { 1184 /* Positive. We can't learn anything from the smin, but smax 1185 * is positive, hence safe. 1186 */ 1187 reg->s32_min_value = reg->u32_min_value; 1188 reg->s32_max_value = reg->u32_max_value = 1189 min_t(u32, reg->s32_max_value, reg->u32_max_value); 1190 } else if ((s32)reg->u32_min_value < 0) { 1191 /* Negative. We can't learn anything from the smax, but smin 1192 * is negative, hence safe. 1193 */ 1194 reg->s32_min_value = reg->u32_min_value = 1195 max_t(u32, reg->s32_min_value, reg->u32_min_value); 1196 reg->s32_max_value = reg->u32_max_value; 1197 } 1198 } 1199 1200 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 1201 { 1202 /* Learn sign from signed bounds. 1203 * If we cannot cross the sign boundary, then signed and unsigned bounds 1204 * are the same, so combine. This works even in the negative case, e.g. 1205 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 1206 */ 1207 if (reg->smin_value >= 0 || reg->smax_value < 0) { 1208 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1209 reg->umin_value); 1210 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1211 reg->umax_value); 1212 return; 1213 } 1214 /* Learn sign from unsigned bounds. Signed bounds cross the sign 1215 * boundary, so we must be careful. 1216 */ 1217 if ((s64)reg->umax_value >= 0) { 1218 /* Positive. We can't learn anything from the smin, but smax 1219 * is positive, hence safe. 1220 */ 1221 reg->smin_value = reg->umin_value; 1222 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 1223 reg->umax_value); 1224 } else if ((s64)reg->umin_value < 0) { 1225 /* Negative. We can't learn anything from the smax, but smin 1226 * is negative, hence safe. 1227 */ 1228 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 1229 reg->umin_value); 1230 reg->smax_value = reg->umax_value; 1231 } 1232 } 1233 1234 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 1235 { 1236 __reg32_deduce_bounds(reg); 1237 __reg64_deduce_bounds(reg); 1238 } 1239 1240 /* Attempts to improve var_off based on unsigned min/max information */ 1241 static void __reg_bound_offset(struct bpf_reg_state *reg) 1242 { 1243 struct tnum var64_off = tnum_intersect(reg->var_off, 1244 tnum_range(reg->umin_value, 1245 reg->umax_value)); 1246 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), 1247 tnum_range(reg->u32_min_value, 1248 reg->u32_max_value)); 1249 1250 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 1251 } 1252 1253 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 1254 { 1255 reg->umin_value = reg->u32_min_value; 1256 reg->umax_value = reg->u32_max_value; 1257 /* Attempt to pull 32-bit signed bounds into 64-bit bounds 1258 * but must be positive otherwise set to worse case bounds 1259 * and refine later from tnum. 1260 */ 1261 if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) 1262 reg->smax_value = reg->s32_max_value; 1263 else 1264 reg->smax_value = U32_MAX; 1265 if (reg->s32_min_value >= 0) 1266 reg->smin_value = reg->s32_min_value; 1267 else 1268 reg->smin_value = 0; 1269 } 1270 1271 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) 1272 { 1273 /* special case when 64-bit register has upper 32-bit register 1274 * zeroed. Typically happens after zext or <<32, >>32 sequence 1275 * allowing us to use 32-bit bounds directly, 1276 */ 1277 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { 1278 __reg_assign_32_into_64(reg); 1279 } else { 1280 /* Otherwise the best we can do is push lower 32bit known and 1281 * unknown bits into register (var_off set from jmp logic) 1282 * then learn as much as possible from the 64-bit tnum 1283 * known and unknown bits. The previous smin/smax bounds are 1284 * invalid here because of jmp32 compare so mark them unknown 1285 * so they do not impact tnum bounds calculation. 1286 */ 1287 __mark_reg64_unbounded(reg); 1288 __update_reg_bounds(reg); 1289 } 1290 1291 /* Intersecting with the old var_off might have improved our bounds 1292 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1293 * then new var_off is (0; 0x7f...fc) which improves our umax. 1294 */ 1295 __reg_deduce_bounds(reg); 1296 __reg_bound_offset(reg); 1297 __update_reg_bounds(reg); 1298 } 1299 1300 static bool __reg64_bound_s32(s64 a) 1301 { 1302 return a > S32_MIN && a < S32_MAX; 1303 } 1304 1305 static bool __reg64_bound_u32(u64 a) 1306 { 1307 if (a > U32_MIN && a < U32_MAX) 1308 return true; 1309 return false; 1310 } 1311 1312 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) 1313 { 1314 __mark_reg32_unbounded(reg); 1315 1316 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { 1317 reg->s32_min_value = (s32)reg->smin_value; 1318 reg->s32_max_value = (s32)reg->smax_value; 1319 } 1320 if (__reg64_bound_u32(reg->umin_value)) 1321 reg->u32_min_value = (u32)reg->umin_value; 1322 if (__reg64_bound_u32(reg->umax_value)) 1323 reg->u32_max_value = (u32)reg->umax_value; 1324 1325 /* Intersecting with the old var_off might have improved our bounds 1326 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 1327 * then new var_off is (0; 0x7f...fc) which improves our umax. 1328 */ 1329 __reg_deduce_bounds(reg); 1330 __reg_bound_offset(reg); 1331 __update_reg_bounds(reg); 1332 } 1333 1334 /* Mark a register as having a completely unknown (scalar) value. */ 1335 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1336 struct bpf_reg_state *reg) 1337 { 1338 /* 1339 * Clear type, id, off, and union(map_ptr, range) and 1340 * padding between 'type' and union 1341 */ 1342 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 1343 reg->type = SCALAR_VALUE; 1344 reg->var_off = tnum_unknown; 1345 reg->frameno = 0; 1346 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; 1347 __mark_reg_unbounded(reg); 1348 } 1349 1350 static void mark_reg_unknown(struct bpf_verifier_env *env, 1351 struct bpf_reg_state *regs, u32 regno) 1352 { 1353 if (WARN_ON(regno >= MAX_BPF_REG)) { 1354 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1355 /* Something bad happened, let's kill all regs except FP */ 1356 for (regno = 0; regno < BPF_REG_FP; regno++) 1357 __mark_reg_not_init(env, regs + regno); 1358 return; 1359 } 1360 __mark_reg_unknown(env, regs + regno); 1361 } 1362 1363 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1364 struct bpf_reg_state *reg) 1365 { 1366 __mark_reg_unknown(env, reg); 1367 reg->type = NOT_INIT; 1368 } 1369 1370 static void mark_reg_not_init(struct bpf_verifier_env *env, 1371 struct bpf_reg_state *regs, u32 regno) 1372 { 1373 if (WARN_ON(regno >= MAX_BPF_REG)) { 1374 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1375 /* Something bad happened, let's kill all regs except FP */ 1376 for (regno = 0; regno < BPF_REG_FP; regno++) 1377 __mark_reg_not_init(env, regs + regno); 1378 return; 1379 } 1380 __mark_reg_not_init(env, regs + regno); 1381 } 1382 1383 static void mark_btf_ld_reg(struct bpf_verifier_env *env, 1384 struct bpf_reg_state *regs, u32 regno, 1385 enum bpf_reg_type reg_type, 1386 struct btf *btf, u32 btf_id) 1387 { 1388 if (reg_type == SCALAR_VALUE) { 1389 mark_reg_unknown(env, regs, regno); 1390 return; 1391 } 1392 mark_reg_known_zero(env, regs, regno); 1393 regs[regno].type = PTR_TO_BTF_ID; 1394 regs[regno].btf = btf; 1395 regs[regno].btf_id = btf_id; 1396 } 1397 1398 #define DEF_NOT_SUBREG (0) 1399 static void init_reg_state(struct bpf_verifier_env *env, 1400 struct bpf_func_state *state) 1401 { 1402 struct bpf_reg_state *regs = state->regs; 1403 int i; 1404 1405 for (i = 0; i < MAX_BPF_REG; i++) { 1406 mark_reg_not_init(env, regs, i); 1407 regs[i].live = REG_LIVE_NONE; 1408 regs[i].parent = NULL; 1409 regs[i].subreg_def = DEF_NOT_SUBREG; 1410 } 1411 1412 /* frame pointer */ 1413 regs[BPF_REG_FP].type = PTR_TO_STACK; 1414 mark_reg_known_zero(env, regs, BPF_REG_FP); 1415 regs[BPF_REG_FP].frameno = state->frameno; 1416 } 1417 1418 #define BPF_MAIN_FUNC (-1) 1419 static void init_func_state(struct bpf_verifier_env *env, 1420 struct bpf_func_state *state, 1421 int callsite, int frameno, int subprogno) 1422 { 1423 state->callsite = callsite; 1424 state->frameno = frameno; 1425 state->subprogno = subprogno; 1426 init_reg_state(env, state); 1427 } 1428 1429 enum reg_arg_type { 1430 SRC_OP, /* register is used as source operand */ 1431 DST_OP, /* register is used as destination operand */ 1432 DST_OP_NO_MARK /* same as above, check only, don't mark */ 1433 }; 1434 1435 static int cmp_subprogs(const void *a, const void *b) 1436 { 1437 return ((struct bpf_subprog_info *)a)->start - 1438 ((struct bpf_subprog_info *)b)->start; 1439 } 1440 1441 static int find_subprog(struct bpf_verifier_env *env, int off) 1442 { 1443 struct bpf_subprog_info *p; 1444 1445 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 1446 sizeof(env->subprog_info[0]), cmp_subprogs); 1447 if (!p) 1448 return -ENOENT; 1449 return p - env->subprog_info; 1450 1451 } 1452 1453 static int add_subprog(struct bpf_verifier_env *env, int off) 1454 { 1455 int insn_cnt = env->prog->len; 1456 int ret; 1457 1458 if (off >= insn_cnt || off < 0) { 1459 verbose(env, "call to invalid destination\n"); 1460 return -EINVAL; 1461 } 1462 ret = find_subprog(env, off); 1463 if (ret >= 0) 1464 return 0; 1465 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 1466 verbose(env, "too many subprograms\n"); 1467 return -E2BIG; 1468 } 1469 env->subprog_info[env->subprog_cnt++].start = off; 1470 sort(env->subprog_info, env->subprog_cnt, 1471 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 1472 return 0; 1473 } 1474 1475 static int check_subprogs(struct bpf_verifier_env *env) 1476 { 1477 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; 1478 struct bpf_subprog_info *subprog = env->subprog_info; 1479 struct bpf_insn *insn = env->prog->insnsi; 1480 int insn_cnt = env->prog->len; 1481 1482 /* Add entry function. */ 1483 ret = add_subprog(env, 0); 1484 if (ret < 0) 1485 return ret; 1486 1487 /* determine subprog starts. The end is one before the next starts */ 1488 for (i = 0; i < insn_cnt; i++) { 1489 if (insn[i].code != (BPF_JMP | BPF_CALL)) 1490 continue; 1491 if (insn[i].src_reg != BPF_PSEUDO_CALL) 1492 continue; 1493 if (!env->bpf_capable) { 1494 verbose(env, 1495 "function calls to other bpf functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); 1496 return -EPERM; 1497 } 1498 ret = add_subprog(env, i + insn[i].imm + 1); 1499 if (ret < 0) 1500 return ret; 1501 } 1502 1503 /* Add a fake 'exit' subprog which could simplify subprog iteration 1504 * logic. 'subprog_cnt' should not be increased. 1505 */ 1506 subprog[env->subprog_cnt].start = insn_cnt; 1507 1508 if (env->log.level & BPF_LOG_LEVEL2) 1509 for (i = 0; i < env->subprog_cnt; i++) 1510 verbose(env, "func#%d @%d\n", i, subprog[i].start); 1511 1512 /* now check that all jumps are within the same subprog */ 1513 subprog_start = subprog[cur_subprog].start; 1514 subprog_end = subprog[cur_subprog + 1].start; 1515 for (i = 0; i < insn_cnt; i++) { 1516 u8 code = insn[i].code; 1517 1518 if (code == (BPF_JMP | BPF_CALL) && 1519 insn[i].imm == BPF_FUNC_tail_call && 1520 insn[i].src_reg != BPF_PSEUDO_CALL) 1521 subprog[cur_subprog].has_tail_call = true; 1522 if (BPF_CLASS(code) == BPF_LD && 1523 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) 1524 subprog[cur_subprog].has_ld_abs = true; 1525 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 1526 goto next; 1527 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 1528 goto next; 1529 off = i + insn[i].off + 1; 1530 if (off < subprog_start || off >= subprog_end) { 1531 verbose(env, "jump out of range from insn %d to %d\n", i, off); 1532 return -EINVAL; 1533 } 1534 next: 1535 if (i == subprog_end - 1) { 1536 /* to avoid fall-through from one subprog into another 1537 * the last insn of the subprog should be either exit 1538 * or unconditional jump back 1539 */ 1540 if (code != (BPF_JMP | BPF_EXIT) && 1541 code != (BPF_JMP | BPF_JA)) { 1542 verbose(env, "last insn is not an exit or jmp\n"); 1543 return -EINVAL; 1544 } 1545 subprog_start = subprog_end; 1546 cur_subprog++; 1547 if (cur_subprog < env->subprog_cnt) 1548 subprog_end = subprog[cur_subprog + 1].start; 1549 } 1550 } 1551 return 0; 1552 } 1553 1554 /* Parentage chain of this register (or stack slot) should take care of all 1555 * issues like callee-saved registers, stack slot allocation time, etc. 1556 */ 1557 static int mark_reg_read(struct bpf_verifier_env *env, 1558 const struct bpf_reg_state *state, 1559 struct bpf_reg_state *parent, u8 flag) 1560 { 1561 bool writes = parent == state->parent; /* Observe write marks */ 1562 int cnt = 0; 1563 1564 while (parent) { 1565 /* if read wasn't screened by an earlier write ... */ 1566 if (writes && state->live & REG_LIVE_WRITTEN) 1567 break; 1568 if (parent->live & REG_LIVE_DONE) { 1569 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 1570 reg_type_str[parent->type], 1571 parent->var_off.value, parent->off); 1572 return -EFAULT; 1573 } 1574 /* The first condition is more likely to be true than the 1575 * second, checked it first. 1576 */ 1577 if ((parent->live & REG_LIVE_READ) == flag || 1578 parent->live & REG_LIVE_READ64) 1579 /* The parentage chain never changes and 1580 * this parent was already marked as LIVE_READ. 1581 * There is no need to keep walking the chain again and 1582 * keep re-marking all parents as LIVE_READ. 1583 * This case happens when the same register is read 1584 * multiple times without writes into it in-between. 1585 * Also, if parent has the stronger REG_LIVE_READ64 set, 1586 * then no need to set the weak REG_LIVE_READ32. 1587 */ 1588 break; 1589 /* ... then we depend on parent's value */ 1590 parent->live |= flag; 1591 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 1592 if (flag == REG_LIVE_READ64) 1593 parent->live &= ~REG_LIVE_READ32; 1594 state = parent; 1595 parent = state->parent; 1596 writes = true; 1597 cnt++; 1598 } 1599 1600 if (env->longest_mark_read_walk < cnt) 1601 env->longest_mark_read_walk = cnt; 1602 return 0; 1603 } 1604 1605 /* This function is supposed to be used by the following 32-bit optimization 1606 * code only. It returns TRUE if the source or destination register operates 1607 * on 64-bit, otherwise return FALSE. 1608 */ 1609 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 1610 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 1611 { 1612 u8 code, class, op; 1613 1614 code = insn->code; 1615 class = BPF_CLASS(code); 1616 op = BPF_OP(code); 1617 if (class == BPF_JMP) { 1618 /* BPF_EXIT for "main" will reach here. Return TRUE 1619 * conservatively. 1620 */ 1621 if (op == BPF_EXIT) 1622 return true; 1623 if (op == BPF_CALL) { 1624 /* BPF to BPF call will reach here because of marking 1625 * caller saved clobber with DST_OP_NO_MARK for which we 1626 * don't care the register def because they are anyway 1627 * marked as NOT_INIT already. 1628 */ 1629 if (insn->src_reg == BPF_PSEUDO_CALL) 1630 return false; 1631 /* Helper call will reach here because of arg type 1632 * check, conservatively return TRUE. 1633 */ 1634 if (t == SRC_OP) 1635 return true; 1636 1637 return false; 1638 } 1639 } 1640 1641 if (class == BPF_ALU64 || class == BPF_JMP || 1642 /* BPF_END always use BPF_ALU class. */ 1643 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 1644 return true; 1645 1646 if (class == BPF_ALU || class == BPF_JMP32) 1647 return false; 1648 1649 if (class == BPF_LDX) { 1650 if (t != SRC_OP) 1651 return BPF_SIZE(code) == BPF_DW; 1652 /* LDX source must be ptr. */ 1653 return true; 1654 } 1655 1656 if (class == BPF_STX) { 1657 if (reg->type != SCALAR_VALUE) 1658 return true; 1659 return BPF_SIZE(code) == BPF_DW; 1660 } 1661 1662 if (class == BPF_LD) { 1663 u8 mode = BPF_MODE(code); 1664 1665 /* LD_IMM64 */ 1666 if (mode == BPF_IMM) 1667 return true; 1668 1669 /* Both LD_IND and LD_ABS return 32-bit data. */ 1670 if (t != SRC_OP) 1671 return false; 1672 1673 /* Implicit ctx ptr. */ 1674 if (regno == BPF_REG_6) 1675 return true; 1676 1677 /* Explicit source could be any width. */ 1678 return true; 1679 } 1680 1681 if (class == BPF_ST) 1682 /* The only source register for BPF_ST is a ptr. */ 1683 return true; 1684 1685 /* Conservatively return true at default. */ 1686 return true; 1687 } 1688 1689 /* Return TRUE if INSN doesn't have explicit value define. */ 1690 static bool insn_no_def(struct bpf_insn *insn) 1691 { 1692 u8 class = BPF_CLASS(insn->code); 1693 1694 return (class == BPF_JMP || class == BPF_JMP32 || 1695 class == BPF_STX || class == BPF_ST); 1696 } 1697 1698 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 1699 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 1700 { 1701 if (insn_no_def(insn)) 1702 return false; 1703 1704 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); 1705 } 1706 1707 static void mark_insn_zext(struct bpf_verifier_env *env, 1708 struct bpf_reg_state *reg) 1709 { 1710 s32 def_idx = reg->subreg_def; 1711 1712 if (def_idx == DEF_NOT_SUBREG) 1713 return; 1714 1715 env->insn_aux_data[def_idx - 1].zext_dst = true; 1716 /* The dst will be zero extended, so won't be sub-register anymore. */ 1717 reg->subreg_def = DEF_NOT_SUBREG; 1718 } 1719 1720 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 1721 enum reg_arg_type t) 1722 { 1723 struct bpf_verifier_state *vstate = env->cur_state; 1724 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 1725 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 1726 struct bpf_reg_state *reg, *regs = state->regs; 1727 bool rw64; 1728 1729 if (regno >= MAX_BPF_REG) { 1730 verbose(env, "R%d is invalid\n", regno); 1731 return -EINVAL; 1732 } 1733 1734 reg = ®s[regno]; 1735 rw64 = is_reg64(env, insn, regno, reg, t); 1736 if (t == SRC_OP) { 1737 /* check whether register used as source operand can be read */ 1738 if (reg->type == NOT_INIT) { 1739 verbose(env, "R%d !read_ok\n", regno); 1740 return -EACCES; 1741 } 1742 /* We don't need to worry about FP liveness because it's read-only */ 1743 if (regno == BPF_REG_FP) 1744 return 0; 1745 1746 if (rw64) 1747 mark_insn_zext(env, reg); 1748 1749 return mark_reg_read(env, reg, reg->parent, 1750 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 1751 } else { 1752 /* check whether register used as dest operand can be written to */ 1753 if (regno == BPF_REG_FP) { 1754 verbose(env, "frame pointer is read only\n"); 1755 return -EACCES; 1756 } 1757 reg->live |= REG_LIVE_WRITTEN; 1758 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 1759 if (t == DST_OP) 1760 mark_reg_unknown(env, regs, regno); 1761 } 1762 return 0; 1763 } 1764 1765 /* for any branch, call, exit record the history of jmps in the given state */ 1766 static int push_jmp_history(struct bpf_verifier_env *env, 1767 struct bpf_verifier_state *cur) 1768 { 1769 u32 cnt = cur->jmp_history_cnt; 1770 struct bpf_idx_pair *p; 1771 1772 cnt++; 1773 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); 1774 if (!p) 1775 return -ENOMEM; 1776 p[cnt - 1].idx = env->insn_idx; 1777 p[cnt - 1].prev_idx = env->prev_insn_idx; 1778 cur->jmp_history = p; 1779 cur->jmp_history_cnt = cnt; 1780 return 0; 1781 } 1782 1783 /* Backtrack one insn at a time. If idx is not at the top of recorded 1784 * history then previous instruction came from straight line execution. 1785 */ 1786 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 1787 u32 *history) 1788 { 1789 u32 cnt = *history; 1790 1791 if (cnt && st->jmp_history[cnt - 1].idx == i) { 1792 i = st->jmp_history[cnt - 1].prev_idx; 1793 (*history)--; 1794 } else { 1795 i--; 1796 } 1797 return i; 1798 } 1799 1800 /* For given verifier state backtrack_insn() is called from the last insn to 1801 * the first insn. Its purpose is to compute a bitmask of registers and 1802 * stack slots that needs precision in the parent verifier state. 1803 */ 1804 static int backtrack_insn(struct bpf_verifier_env *env, int idx, 1805 u32 *reg_mask, u64 *stack_mask) 1806 { 1807 const struct bpf_insn_cbs cbs = { 1808 .cb_print = verbose, 1809 .private_data = env, 1810 }; 1811 struct bpf_insn *insn = env->prog->insnsi + idx; 1812 u8 class = BPF_CLASS(insn->code); 1813 u8 opcode = BPF_OP(insn->code); 1814 u8 mode = BPF_MODE(insn->code); 1815 u32 dreg = 1u << insn->dst_reg; 1816 u32 sreg = 1u << insn->src_reg; 1817 u32 spi; 1818 1819 if (insn->code == 0) 1820 return 0; 1821 if (env->log.level & BPF_LOG_LEVEL) { 1822 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); 1823 verbose(env, "%d: ", idx); 1824 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 1825 } 1826 1827 if (class == BPF_ALU || class == BPF_ALU64) { 1828 if (!(*reg_mask & dreg)) 1829 return 0; 1830 if (opcode == BPF_MOV) { 1831 if (BPF_SRC(insn->code) == BPF_X) { 1832 /* dreg = sreg 1833 * dreg needs precision after this insn 1834 * sreg needs precision before this insn 1835 */ 1836 *reg_mask &= ~dreg; 1837 *reg_mask |= sreg; 1838 } else { 1839 /* dreg = K 1840 * dreg needs precision after this insn. 1841 * Corresponding register is already marked 1842 * as precise=true in this verifier state. 1843 * No further markings in parent are necessary 1844 */ 1845 *reg_mask &= ~dreg; 1846 } 1847 } else { 1848 if (BPF_SRC(insn->code) == BPF_X) { 1849 /* dreg += sreg 1850 * both dreg and sreg need precision 1851 * before this insn 1852 */ 1853 *reg_mask |= sreg; 1854 } /* else dreg += K 1855 * dreg still needs precision before this insn 1856 */ 1857 } 1858 } else if (class == BPF_LDX) { 1859 if (!(*reg_mask & dreg)) 1860 return 0; 1861 *reg_mask &= ~dreg; 1862 1863 /* scalars can only be spilled into stack w/o losing precision. 1864 * Load from any other memory can be zero extended. 1865 * The desire to keep that precision is already indicated 1866 * by 'precise' mark in corresponding register of this state. 1867 * No further tracking necessary. 1868 */ 1869 if (insn->src_reg != BPF_REG_FP) 1870 return 0; 1871 if (BPF_SIZE(insn->code) != BPF_DW) 1872 return 0; 1873 1874 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 1875 * that [fp - off] slot contains scalar that needs to be 1876 * tracked with precision 1877 */ 1878 spi = (-insn->off - 1) / BPF_REG_SIZE; 1879 if (spi >= 64) { 1880 verbose(env, "BUG spi %d\n", spi); 1881 WARN_ONCE(1, "verifier backtracking bug"); 1882 return -EFAULT; 1883 } 1884 *stack_mask |= 1ull << spi; 1885 } else if (class == BPF_STX || class == BPF_ST) { 1886 if (*reg_mask & dreg) 1887 /* stx & st shouldn't be using _scalar_ dst_reg 1888 * to access memory. It means backtracking 1889 * encountered a case of pointer subtraction. 1890 */ 1891 return -ENOTSUPP; 1892 /* scalars can only be spilled into stack */ 1893 if (insn->dst_reg != BPF_REG_FP) 1894 return 0; 1895 if (BPF_SIZE(insn->code) != BPF_DW) 1896 return 0; 1897 spi = (-insn->off - 1) / BPF_REG_SIZE; 1898 if (spi >= 64) { 1899 verbose(env, "BUG spi %d\n", spi); 1900 WARN_ONCE(1, "verifier backtracking bug"); 1901 return -EFAULT; 1902 } 1903 if (!(*stack_mask & (1ull << spi))) 1904 return 0; 1905 *stack_mask &= ~(1ull << spi); 1906 if (class == BPF_STX) 1907 *reg_mask |= sreg; 1908 } else if (class == BPF_JMP || class == BPF_JMP32) { 1909 if (opcode == BPF_CALL) { 1910 if (insn->src_reg == BPF_PSEUDO_CALL) 1911 return -ENOTSUPP; 1912 /* regular helper call sets R0 */ 1913 *reg_mask &= ~1; 1914 if (*reg_mask & 0x3f) { 1915 /* if backtracing was looking for registers R1-R5 1916 * they should have been found already. 1917 */ 1918 verbose(env, "BUG regs %x\n", *reg_mask); 1919 WARN_ONCE(1, "verifier backtracking bug"); 1920 return -EFAULT; 1921 } 1922 } else if (opcode == BPF_EXIT) { 1923 return -ENOTSUPP; 1924 } 1925 } else if (class == BPF_LD) { 1926 if (!(*reg_mask & dreg)) 1927 return 0; 1928 *reg_mask &= ~dreg; 1929 /* It's ld_imm64 or ld_abs or ld_ind. 1930 * For ld_imm64 no further tracking of precision 1931 * into parent is necessary 1932 */ 1933 if (mode == BPF_IND || mode == BPF_ABS) 1934 /* to be analyzed */ 1935 return -ENOTSUPP; 1936 } 1937 return 0; 1938 } 1939 1940 /* the scalar precision tracking algorithm: 1941 * . at the start all registers have precise=false. 1942 * . scalar ranges are tracked as normal through alu and jmp insns. 1943 * . once precise value of the scalar register is used in: 1944 * . ptr + scalar alu 1945 * . if (scalar cond K|scalar) 1946 * . helper_call(.., scalar, ...) where ARG_CONST is expected 1947 * backtrack through the verifier states and mark all registers and 1948 * stack slots with spilled constants that these scalar regisers 1949 * should be precise. 1950 * . during state pruning two registers (or spilled stack slots) 1951 * are equivalent if both are not precise. 1952 * 1953 * Note the verifier cannot simply walk register parentage chain, 1954 * since many different registers and stack slots could have been 1955 * used to compute single precise scalar. 1956 * 1957 * The approach of starting with precise=true for all registers and then 1958 * backtrack to mark a register as not precise when the verifier detects 1959 * that program doesn't care about specific value (e.g., when helper 1960 * takes register as ARG_ANYTHING parameter) is not safe. 1961 * 1962 * It's ok to walk single parentage chain of the verifier states. 1963 * It's possible that this backtracking will go all the way till 1st insn. 1964 * All other branches will be explored for needing precision later. 1965 * 1966 * The backtracking needs to deal with cases like: 1967 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 1968 * r9 -= r8 1969 * r5 = r9 1970 * if r5 > 0x79f goto pc+7 1971 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 1972 * r5 += 1 1973 * ... 1974 * call bpf_perf_event_output#25 1975 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 1976 * 1977 * and this case: 1978 * r6 = 1 1979 * call foo // uses callee's r6 inside to compute r0 1980 * r0 += r6 1981 * if r0 == 0 goto 1982 * 1983 * to track above reg_mask/stack_mask needs to be independent for each frame. 1984 * 1985 * Also if parent's curframe > frame where backtracking started, 1986 * the verifier need to mark registers in both frames, otherwise callees 1987 * may incorrectly prune callers. This is similar to 1988 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 1989 * 1990 * For now backtracking falls back into conservative marking. 1991 */ 1992 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 1993 struct bpf_verifier_state *st) 1994 { 1995 struct bpf_func_state *func; 1996 struct bpf_reg_state *reg; 1997 int i, j; 1998 1999 /* big hammer: mark all scalars precise in this path. 2000 * pop_stack may still get !precise scalars. 2001 */ 2002 for (; st; st = st->parent) 2003 for (i = 0; i <= st->curframe; i++) { 2004 func = st->frame[i]; 2005 for (j = 0; j < BPF_REG_FP; j++) { 2006 reg = &func->regs[j]; 2007 if (reg->type != SCALAR_VALUE) 2008 continue; 2009 reg->precise = true; 2010 } 2011 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 2012 if (func->stack[j].slot_type[0] != STACK_SPILL) 2013 continue; 2014 reg = &func->stack[j].spilled_ptr; 2015 if (reg->type != SCALAR_VALUE) 2016 continue; 2017 reg->precise = true; 2018 } 2019 } 2020 } 2021 2022 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, 2023 int spi) 2024 { 2025 struct bpf_verifier_state *st = env->cur_state; 2026 int first_idx = st->first_insn_idx; 2027 int last_idx = env->insn_idx; 2028 struct bpf_func_state *func; 2029 struct bpf_reg_state *reg; 2030 u32 reg_mask = regno >= 0 ? 1u << regno : 0; 2031 u64 stack_mask = spi >= 0 ? 1ull << spi : 0; 2032 bool skip_first = true; 2033 bool new_marks = false; 2034 int i, err; 2035 2036 if (!env->bpf_capable) 2037 return 0; 2038 2039 func = st->frame[st->curframe]; 2040 if (regno >= 0) { 2041 reg = &func->regs[regno]; 2042 if (reg->type != SCALAR_VALUE) { 2043 WARN_ONCE(1, "backtracing misuse"); 2044 return -EFAULT; 2045 } 2046 if (!reg->precise) 2047 new_marks = true; 2048 else 2049 reg_mask = 0; 2050 reg->precise = true; 2051 } 2052 2053 while (spi >= 0) { 2054 if (func->stack[spi].slot_type[0] != STACK_SPILL) { 2055 stack_mask = 0; 2056 break; 2057 } 2058 reg = &func->stack[spi].spilled_ptr; 2059 if (reg->type != SCALAR_VALUE) { 2060 stack_mask = 0; 2061 break; 2062 } 2063 if (!reg->precise) 2064 new_marks = true; 2065 else 2066 stack_mask = 0; 2067 reg->precise = true; 2068 break; 2069 } 2070 2071 if (!new_marks) 2072 return 0; 2073 if (!reg_mask && !stack_mask) 2074 return 0; 2075 for (;;) { 2076 DECLARE_BITMAP(mask, 64); 2077 u32 history = st->jmp_history_cnt; 2078 2079 if (env->log.level & BPF_LOG_LEVEL) 2080 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); 2081 for (i = last_idx;;) { 2082 if (skip_first) { 2083 err = 0; 2084 skip_first = false; 2085 } else { 2086 err = backtrack_insn(env, i, ®_mask, &stack_mask); 2087 } 2088 if (err == -ENOTSUPP) { 2089 mark_all_scalars_precise(env, st); 2090 return 0; 2091 } else if (err) { 2092 return err; 2093 } 2094 if (!reg_mask && !stack_mask) 2095 /* Found assignment(s) into tracked register in this state. 2096 * Since this state is already marked, just return. 2097 * Nothing to be tracked further in the parent state. 2098 */ 2099 return 0; 2100 if (i == first_idx) 2101 break; 2102 i = get_prev_insn_idx(st, i, &history); 2103 if (i >= env->prog->len) { 2104 /* This can happen if backtracking reached insn 0 2105 * and there are still reg_mask or stack_mask 2106 * to backtrack. 2107 * It means the backtracking missed the spot where 2108 * particular register was initialized with a constant. 2109 */ 2110 verbose(env, "BUG backtracking idx %d\n", i); 2111 WARN_ONCE(1, "verifier backtracking bug"); 2112 return -EFAULT; 2113 } 2114 } 2115 st = st->parent; 2116 if (!st) 2117 break; 2118 2119 new_marks = false; 2120 func = st->frame[st->curframe]; 2121 bitmap_from_u64(mask, reg_mask); 2122 for_each_set_bit(i, mask, 32) { 2123 reg = &func->regs[i]; 2124 if (reg->type != SCALAR_VALUE) { 2125 reg_mask &= ~(1u << i); 2126 continue; 2127 } 2128 if (!reg->precise) 2129 new_marks = true; 2130 reg->precise = true; 2131 } 2132 2133 bitmap_from_u64(mask, stack_mask); 2134 for_each_set_bit(i, mask, 64) { 2135 if (i >= func->allocated_stack / BPF_REG_SIZE) { 2136 /* the sequence of instructions: 2137 * 2: (bf) r3 = r10 2138 * 3: (7b) *(u64 *)(r3 -8) = r0 2139 * 4: (79) r4 = *(u64 *)(r10 -8) 2140 * doesn't contain jmps. It's backtracked 2141 * as a single block. 2142 * During backtracking insn 3 is not recognized as 2143 * stack access, so at the end of backtracking 2144 * stack slot fp-8 is still marked in stack_mask. 2145 * However the parent state may not have accessed 2146 * fp-8 and it's "unallocated" stack space. 2147 * In such case fallback to conservative. 2148 */ 2149 mark_all_scalars_precise(env, st); 2150 return 0; 2151 } 2152 2153 if (func->stack[i].slot_type[0] != STACK_SPILL) { 2154 stack_mask &= ~(1ull << i); 2155 continue; 2156 } 2157 reg = &func->stack[i].spilled_ptr; 2158 if (reg->type != SCALAR_VALUE) { 2159 stack_mask &= ~(1ull << i); 2160 continue; 2161 } 2162 if (!reg->precise) 2163 new_marks = true; 2164 reg->precise = true; 2165 } 2166 if (env->log.level & BPF_LOG_LEVEL) { 2167 print_verifier_state(env, func); 2168 verbose(env, "parent %s regs=%x stack=%llx marks\n", 2169 new_marks ? "didn't have" : "already had", 2170 reg_mask, stack_mask); 2171 } 2172 2173 if (!reg_mask && !stack_mask) 2174 break; 2175 if (!new_marks) 2176 break; 2177 2178 last_idx = st->last_insn_idx; 2179 first_idx = st->first_insn_idx; 2180 } 2181 return 0; 2182 } 2183 2184 static int mark_chain_precision(struct bpf_verifier_env *env, int regno) 2185 { 2186 return __mark_chain_precision(env, regno, -1); 2187 } 2188 2189 static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) 2190 { 2191 return __mark_chain_precision(env, -1, spi); 2192 } 2193 2194 static bool is_spillable_regtype(enum bpf_reg_type type) 2195 { 2196 switch (type) { 2197 case PTR_TO_MAP_VALUE: 2198 case PTR_TO_MAP_VALUE_OR_NULL: 2199 case PTR_TO_STACK: 2200 case PTR_TO_CTX: 2201 case PTR_TO_PACKET: 2202 case PTR_TO_PACKET_META: 2203 case PTR_TO_PACKET_END: 2204 case PTR_TO_FLOW_KEYS: 2205 case CONST_PTR_TO_MAP: 2206 case PTR_TO_SOCKET: 2207 case PTR_TO_SOCKET_OR_NULL: 2208 case PTR_TO_SOCK_COMMON: 2209 case PTR_TO_SOCK_COMMON_OR_NULL: 2210 case PTR_TO_TCP_SOCK: 2211 case PTR_TO_TCP_SOCK_OR_NULL: 2212 case PTR_TO_XDP_SOCK: 2213 case PTR_TO_BTF_ID: 2214 case PTR_TO_BTF_ID_OR_NULL: 2215 case PTR_TO_RDONLY_BUF: 2216 case PTR_TO_RDONLY_BUF_OR_NULL: 2217 case PTR_TO_RDWR_BUF: 2218 case PTR_TO_RDWR_BUF_OR_NULL: 2219 case PTR_TO_PERCPU_BTF_ID: 2220 case PTR_TO_MEM: 2221 case PTR_TO_MEM_OR_NULL: 2222 return true; 2223 default: 2224 return false; 2225 } 2226 } 2227 2228 /* Does this register contain a constant zero? */ 2229 static bool register_is_null(struct bpf_reg_state *reg) 2230 { 2231 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 2232 } 2233 2234 static bool register_is_const(struct bpf_reg_state *reg) 2235 { 2236 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 2237 } 2238 2239 static bool __is_scalar_unbounded(struct bpf_reg_state *reg) 2240 { 2241 return tnum_is_unknown(reg->var_off) && 2242 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && 2243 reg->umin_value == 0 && reg->umax_value == U64_MAX && 2244 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && 2245 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; 2246 } 2247 2248 static bool register_is_bounded(struct bpf_reg_state *reg) 2249 { 2250 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); 2251 } 2252 2253 static bool __is_pointer_value(bool allow_ptr_leaks, 2254 const struct bpf_reg_state *reg) 2255 { 2256 if (allow_ptr_leaks) 2257 return false; 2258 2259 return reg->type != SCALAR_VALUE; 2260 } 2261 2262 static void save_register_state(struct bpf_func_state *state, 2263 int spi, struct bpf_reg_state *reg) 2264 { 2265 int i; 2266 2267 state->stack[spi].spilled_ptr = *reg; 2268 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2269 2270 for (i = 0; i < BPF_REG_SIZE; i++) 2271 state->stack[spi].slot_type[i] = STACK_SPILL; 2272 } 2273 2274 /* check_stack_read/write functions track spill/fill of registers, 2275 * stack boundary and alignment are checked in check_mem_access() 2276 */ 2277 static int check_stack_write(struct bpf_verifier_env *env, 2278 struct bpf_func_state *state, /* func where register points to */ 2279 int off, int size, int value_regno, int insn_idx) 2280 { 2281 struct bpf_func_state *cur; /* state of the current function */ 2282 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 2283 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; 2284 struct bpf_reg_state *reg = NULL; 2285 2286 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), 2287 state->acquired_refs, true); 2288 if (err) 2289 return err; 2290 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 2291 * so it's aligned access and [off, off + size) are within stack limits 2292 */ 2293 if (!env->allow_ptr_leaks && 2294 state->stack[spi].slot_type[0] == STACK_SPILL && 2295 size != BPF_REG_SIZE) { 2296 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 2297 return -EACCES; 2298 } 2299 2300 cur = env->cur_state->frame[env->cur_state->curframe]; 2301 if (value_regno >= 0) 2302 reg = &cur->regs[value_regno]; 2303 2304 if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) && 2305 !register_is_null(reg) && env->bpf_capable) { 2306 if (dst_reg != BPF_REG_FP) { 2307 /* The backtracking logic can only recognize explicit 2308 * stack slot address like [fp - 8]. Other spill of 2309 * scalar via different register has to be conervative. 2310 * Backtrack from here and mark all registers as precise 2311 * that contributed into 'reg' being a constant. 2312 */ 2313 err = mark_chain_precision(env, value_regno); 2314 if (err) 2315 return err; 2316 } 2317 save_register_state(state, spi, reg); 2318 } else if (reg && is_spillable_regtype(reg->type)) { 2319 /* register containing pointer is being spilled into stack */ 2320 if (size != BPF_REG_SIZE) { 2321 verbose_linfo(env, insn_idx, "; "); 2322 verbose(env, "invalid size of register spill\n"); 2323 return -EACCES; 2324 } 2325 2326 if (state != cur && reg->type == PTR_TO_STACK) { 2327 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 2328 return -EINVAL; 2329 } 2330 2331 if (!env->bypass_spec_v4) { 2332 bool sanitize = false; 2333 2334 if (state->stack[spi].slot_type[0] == STACK_SPILL && 2335 register_is_const(&state->stack[spi].spilled_ptr)) 2336 sanitize = true; 2337 for (i = 0; i < BPF_REG_SIZE; i++) 2338 if (state->stack[spi].slot_type[i] == STACK_MISC) { 2339 sanitize = true; 2340 break; 2341 } 2342 if (sanitize) { 2343 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; 2344 int soff = (-spi - 1) * BPF_REG_SIZE; 2345 2346 /* detected reuse of integer stack slot with a pointer 2347 * which means either llvm is reusing stack slot or 2348 * an attacker is trying to exploit CVE-2018-3639 2349 * (speculative store bypass) 2350 * Have to sanitize that slot with preemptive 2351 * store of zero. 2352 */ 2353 if (*poff && *poff != soff) { 2354 /* disallow programs where single insn stores 2355 * into two different stack slots, since verifier 2356 * cannot sanitize them 2357 */ 2358 verbose(env, 2359 "insn %d cannot access two stack slots fp%d and fp%d", 2360 insn_idx, *poff, soff); 2361 return -EINVAL; 2362 } 2363 *poff = soff; 2364 } 2365 } 2366 save_register_state(state, spi, reg); 2367 } else { 2368 u8 type = STACK_MISC; 2369 2370 /* regular write of data into stack destroys any spilled ptr */ 2371 state->stack[spi].spilled_ptr.type = NOT_INIT; 2372 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ 2373 if (state->stack[spi].slot_type[0] == STACK_SPILL) 2374 for (i = 0; i < BPF_REG_SIZE; i++) 2375 state->stack[spi].slot_type[i] = STACK_MISC; 2376 2377 /* only mark the slot as written if all 8 bytes were written 2378 * otherwise read propagation may incorrectly stop too soon 2379 * when stack slots are partially written. 2380 * This heuristic means that read propagation will be 2381 * conservative, since it will add reg_live_read marks 2382 * to stack slots all the way to first state when programs 2383 * writes+reads less than 8 bytes 2384 */ 2385 if (size == BPF_REG_SIZE) 2386 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 2387 2388 /* when we zero initialize stack slots mark them as such */ 2389 if (reg && register_is_null(reg)) { 2390 /* backtracking doesn't work for STACK_ZERO yet. */ 2391 err = mark_chain_precision(env, value_regno); 2392 if (err) 2393 return err; 2394 type = STACK_ZERO; 2395 } 2396 2397 /* Mark slots affected by this stack write. */ 2398 for (i = 0; i < size; i++) 2399 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 2400 type; 2401 } 2402 return 0; 2403 } 2404 2405 static int check_stack_read(struct bpf_verifier_env *env, 2406 struct bpf_func_state *reg_state /* func where register points to */, 2407 int off, int size, int value_regno) 2408 { 2409 struct bpf_verifier_state *vstate = env->cur_state; 2410 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2411 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 2412 struct bpf_reg_state *reg; 2413 u8 *stype; 2414 2415 if (reg_state->allocated_stack <= slot) { 2416 verbose(env, "invalid read from stack off %d+0 size %d\n", 2417 off, size); 2418 return -EACCES; 2419 } 2420 stype = reg_state->stack[spi].slot_type; 2421 reg = ®_state->stack[spi].spilled_ptr; 2422 2423 if (stype[0] == STACK_SPILL) { 2424 if (size != BPF_REG_SIZE) { 2425 if (reg->type != SCALAR_VALUE) { 2426 verbose_linfo(env, env->insn_idx, "; "); 2427 verbose(env, "invalid size of register fill\n"); 2428 return -EACCES; 2429 } 2430 if (value_regno >= 0) { 2431 mark_reg_unknown(env, state->regs, value_regno); 2432 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2433 } 2434 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2435 return 0; 2436 } 2437 for (i = 1; i < BPF_REG_SIZE; i++) { 2438 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { 2439 verbose(env, "corrupted spill memory\n"); 2440 return -EACCES; 2441 } 2442 } 2443 2444 if (value_regno >= 0) { 2445 /* restore register state from stack */ 2446 state->regs[value_regno] = *reg; 2447 /* mark reg as written since spilled pointer state likely 2448 * has its liveness marks cleared by is_state_visited() 2449 * which resets stack/reg liveness for state transitions 2450 */ 2451 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2452 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { 2453 /* If value_regno==-1, the caller is asking us whether 2454 * it is acceptable to use this value as a SCALAR_VALUE 2455 * (e.g. for XADD). 2456 * We must not allow unprivileged callers to do that 2457 * with spilled pointers. 2458 */ 2459 verbose(env, "leaking pointer from stack off %d\n", 2460 off); 2461 return -EACCES; 2462 } 2463 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2464 } else { 2465 int zeros = 0; 2466 2467 for (i = 0; i < size; i++) { 2468 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) 2469 continue; 2470 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { 2471 zeros++; 2472 continue; 2473 } 2474 verbose(env, "invalid read from stack off %d+%d size %d\n", 2475 off, i, size); 2476 return -EACCES; 2477 } 2478 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 2479 if (value_regno >= 0) { 2480 if (zeros == size) { 2481 /* any size read into register is zero extended, 2482 * so the whole register == const_zero 2483 */ 2484 __mark_reg_const_zero(&state->regs[value_regno]); 2485 /* backtracking doesn't support STACK_ZERO yet, 2486 * so mark it precise here, so that later 2487 * backtracking can stop here. 2488 * Backtracking may not need this if this register 2489 * doesn't participate in pointer adjustment. 2490 * Forward propagation of precise flag is not 2491 * necessary either. This mark is only to stop 2492 * backtracking. Any register that contributed 2493 * to const 0 was marked precise before spill. 2494 */ 2495 state->regs[value_regno].precise = true; 2496 } else { 2497 /* have read misc data from the stack */ 2498 mark_reg_unknown(env, state->regs, value_regno); 2499 } 2500 state->regs[value_regno].live |= REG_LIVE_WRITTEN; 2501 } 2502 } 2503 return 0; 2504 } 2505 2506 static int check_stack_access(struct bpf_verifier_env *env, 2507 const struct bpf_reg_state *reg, 2508 int off, int size) 2509 { 2510 /* Stack accesses must be at a fixed offset, so that we 2511 * can determine what type of data were returned. See 2512 * check_stack_read(). 2513 */ 2514 if (!tnum_is_const(reg->var_off)) { 2515 char tn_buf[48]; 2516 2517 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2518 verbose(env, "variable stack access var_off=%s off=%d size=%d\n", 2519 tn_buf, off, size); 2520 return -EACCES; 2521 } 2522 2523 if (off >= 0 || off < -MAX_BPF_STACK) { 2524 verbose(env, "invalid stack off=%d size=%d\n", off, size); 2525 return -EACCES; 2526 } 2527 2528 return 0; 2529 } 2530 2531 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 2532 int off, int size, enum bpf_access_type type) 2533 { 2534 struct bpf_reg_state *regs = cur_regs(env); 2535 struct bpf_map *map = regs[regno].map_ptr; 2536 u32 cap = bpf_map_flags_to_cap(map); 2537 2538 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 2539 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 2540 map->value_size, off, size); 2541 return -EACCES; 2542 } 2543 2544 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 2545 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 2546 map->value_size, off, size); 2547 return -EACCES; 2548 } 2549 2550 return 0; 2551 } 2552 2553 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ 2554 static int __check_mem_access(struct bpf_verifier_env *env, int regno, 2555 int off, int size, u32 mem_size, 2556 bool zero_size_allowed) 2557 { 2558 bool size_ok = size > 0 || (size == 0 && zero_size_allowed); 2559 struct bpf_reg_state *reg; 2560 2561 if (off >= 0 && size_ok && (u64)off + size <= mem_size) 2562 return 0; 2563 2564 reg = &cur_regs(env)[regno]; 2565 switch (reg->type) { 2566 case PTR_TO_MAP_VALUE: 2567 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 2568 mem_size, off, size); 2569 break; 2570 case PTR_TO_PACKET: 2571 case PTR_TO_PACKET_META: 2572 case PTR_TO_PACKET_END: 2573 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 2574 off, size, regno, reg->id, off, mem_size); 2575 break; 2576 case PTR_TO_MEM: 2577 default: 2578 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", 2579 mem_size, off, size); 2580 } 2581 2582 return -EACCES; 2583 } 2584 2585 /* check read/write into a memory region with possible variable offset */ 2586 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, 2587 int off, int size, u32 mem_size, 2588 bool zero_size_allowed) 2589 { 2590 struct bpf_verifier_state *vstate = env->cur_state; 2591 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2592 struct bpf_reg_state *reg = &state->regs[regno]; 2593 int err; 2594 2595 /* We may have adjusted the register pointing to memory region, so we 2596 * need to try adding each of min_value and max_value to off 2597 * to make sure our theoretical access will be safe. 2598 */ 2599 if (env->log.level & BPF_LOG_LEVEL) 2600 print_verifier_state(env, state); 2601 2602 /* The minimum value is only important with signed 2603 * comparisons where we can't assume the floor of a 2604 * value is 0. If we are using signed variables for our 2605 * index'es we need to make sure that whatever we use 2606 * will have a set floor within our range. 2607 */ 2608 if (reg->smin_value < 0 && 2609 (reg->smin_value == S64_MIN || 2610 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 2611 reg->smin_value + off < 0)) { 2612 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2613 regno); 2614 return -EACCES; 2615 } 2616 err = __check_mem_access(env, regno, reg->smin_value + off, size, 2617 mem_size, zero_size_allowed); 2618 if (err) { 2619 verbose(env, "R%d min value is outside of the allowed memory range\n", 2620 regno); 2621 return err; 2622 } 2623 2624 /* If we haven't set a max value then we need to bail since we can't be 2625 * sure we won't do bad things. 2626 * If reg->umax_value + off could overflow, treat that as unbounded too. 2627 */ 2628 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 2629 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", 2630 regno); 2631 return -EACCES; 2632 } 2633 err = __check_mem_access(env, regno, reg->umax_value + off, size, 2634 mem_size, zero_size_allowed); 2635 if (err) { 2636 verbose(env, "R%d max value is outside of the allowed memory range\n", 2637 regno); 2638 return err; 2639 } 2640 2641 return 0; 2642 } 2643 2644 /* check read/write into a map element with possible variable offset */ 2645 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 2646 int off, int size, bool zero_size_allowed) 2647 { 2648 struct bpf_verifier_state *vstate = env->cur_state; 2649 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 2650 struct bpf_reg_state *reg = &state->regs[regno]; 2651 struct bpf_map *map = reg->map_ptr; 2652 int err; 2653 2654 err = check_mem_region_access(env, regno, off, size, map->value_size, 2655 zero_size_allowed); 2656 if (err) 2657 return err; 2658 2659 if (map_value_has_spin_lock(map)) { 2660 u32 lock = map->spin_lock_off; 2661 2662 /* if any part of struct bpf_spin_lock can be touched by 2663 * load/store reject this program. 2664 * To check that [x1, x2) overlaps with [y1, y2) 2665 * it is sufficient to check x1 < y2 && y1 < x2. 2666 */ 2667 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && 2668 lock < reg->umax_value + off + size) { 2669 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); 2670 return -EACCES; 2671 } 2672 } 2673 return err; 2674 } 2675 2676 #define MAX_PACKET_OFF 0xffff 2677 2678 static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) 2679 { 2680 return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; 2681 } 2682 2683 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 2684 const struct bpf_call_arg_meta *meta, 2685 enum bpf_access_type t) 2686 { 2687 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 2688 2689 switch (prog_type) { 2690 /* Program types only with direct read access go here! */ 2691 case BPF_PROG_TYPE_LWT_IN: 2692 case BPF_PROG_TYPE_LWT_OUT: 2693 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2694 case BPF_PROG_TYPE_SK_REUSEPORT: 2695 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2696 case BPF_PROG_TYPE_CGROUP_SKB: 2697 if (t == BPF_WRITE) 2698 return false; 2699 fallthrough; 2700 2701 /* Program types with direct read + write access go here! */ 2702 case BPF_PROG_TYPE_SCHED_CLS: 2703 case BPF_PROG_TYPE_SCHED_ACT: 2704 case BPF_PROG_TYPE_XDP: 2705 case BPF_PROG_TYPE_LWT_XMIT: 2706 case BPF_PROG_TYPE_SK_SKB: 2707 case BPF_PROG_TYPE_SK_MSG: 2708 if (meta) 2709 return meta->pkt_access; 2710 2711 env->seen_direct_write = true; 2712 return true; 2713 2714 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2715 if (t == BPF_WRITE) 2716 env->seen_direct_write = true; 2717 2718 return true; 2719 2720 default: 2721 return false; 2722 } 2723 } 2724 2725 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 2726 int size, bool zero_size_allowed) 2727 { 2728 struct bpf_reg_state *regs = cur_regs(env); 2729 struct bpf_reg_state *reg = ®s[regno]; 2730 int err; 2731 2732 /* We may have added a variable offset to the packet pointer; but any 2733 * reg->range we have comes after that. We are only checking the fixed 2734 * offset. 2735 */ 2736 2737 /* We don't allow negative numbers, because we aren't tracking enough 2738 * detail to prove they're safe. 2739 */ 2740 if (reg->smin_value < 0) { 2741 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2742 regno); 2743 return -EACCES; 2744 } 2745 2746 err = reg->range < 0 ? -EINVAL : 2747 __check_mem_access(env, regno, off, size, reg->range, 2748 zero_size_allowed); 2749 if (err) { 2750 verbose(env, "R%d offset is outside of the packet\n", regno); 2751 return err; 2752 } 2753 2754 /* __check_mem_access has made sure "off + size - 1" is within u16. 2755 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 2756 * otherwise find_good_pkt_pointers would have refused to set range info 2757 * that __check_mem_access would have rejected this pkt access. 2758 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 2759 */ 2760 env->prog->aux->max_pkt_offset = 2761 max_t(u32, env->prog->aux->max_pkt_offset, 2762 off + reg->umax_value + size - 1); 2763 2764 return err; 2765 } 2766 2767 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 2768 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 2769 enum bpf_access_type t, enum bpf_reg_type *reg_type, 2770 struct btf **btf, u32 *btf_id) 2771 { 2772 struct bpf_insn_access_aux info = { 2773 .reg_type = *reg_type, 2774 .log = &env->log, 2775 }; 2776 2777 if (env->ops->is_valid_access && 2778 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 2779 /* A non zero info.ctx_field_size indicates that this field is a 2780 * candidate for later verifier transformation to load the whole 2781 * field and then apply a mask when accessed with a narrower 2782 * access than actual ctx access size. A zero info.ctx_field_size 2783 * will only allow for whole field access and rejects any other 2784 * type of narrower access. 2785 */ 2786 *reg_type = info.reg_type; 2787 2788 if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) { 2789 *btf = info.btf; 2790 *btf_id = info.btf_id; 2791 } else { 2792 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 2793 } 2794 /* remember the offset of last byte accessed in ctx */ 2795 if (env->prog->aux->max_ctx_offset < off + size) 2796 env->prog->aux->max_ctx_offset = off + size; 2797 return 0; 2798 } 2799 2800 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 2801 return -EACCES; 2802 } 2803 2804 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 2805 int size) 2806 { 2807 if (size < 0 || off < 0 || 2808 (u64)off + size > sizeof(struct bpf_flow_keys)) { 2809 verbose(env, "invalid access to flow keys off=%d size=%d\n", 2810 off, size); 2811 return -EACCES; 2812 } 2813 return 0; 2814 } 2815 2816 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 2817 u32 regno, int off, int size, 2818 enum bpf_access_type t) 2819 { 2820 struct bpf_reg_state *regs = cur_regs(env); 2821 struct bpf_reg_state *reg = ®s[regno]; 2822 struct bpf_insn_access_aux info = {}; 2823 bool valid; 2824 2825 if (reg->smin_value < 0) { 2826 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 2827 regno); 2828 return -EACCES; 2829 } 2830 2831 switch (reg->type) { 2832 case PTR_TO_SOCK_COMMON: 2833 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 2834 break; 2835 case PTR_TO_SOCKET: 2836 valid = bpf_sock_is_valid_access(off, size, t, &info); 2837 break; 2838 case PTR_TO_TCP_SOCK: 2839 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 2840 break; 2841 case PTR_TO_XDP_SOCK: 2842 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 2843 break; 2844 default: 2845 valid = false; 2846 } 2847 2848 2849 if (valid) { 2850 env->insn_aux_data[insn_idx].ctx_field_size = 2851 info.ctx_field_size; 2852 return 0; 2853 } 2854 2855 verbose(env, "R%d invalid %s access off=%d size=%d\n", 2856 regno, reg_type_str[reg->type], off, size); 2857 2858 return -EACCES; 2859 } 2860 2861 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 2862 { 2863 return cur_regs(env) + regno; 2864 } 2865 2866 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 2867 { 2868 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 2869 } 2870 2871 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 2872 { 2873 const struct bpf_reg_state *reg = reg_state(env, regno); 2874 2875 return reg->type == PTR_TO_CTX; 2876 } 2877 2878 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 2879 { 2880 const struct bpf_reg_state *reg = reg_state(env, regno); 2881 2882 return type_is_sk_pointer(reg->type); 2883 } 2884 2885 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 2886 { 2887 const struct bpf_reg_state *reg = reg_state(env, regno); 2888 2889 return type_is_pkt_pointer(reg->type); 2890 } 2891 2892 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 2893 { 2894 const struct bpf_reg_state *reg = reg_state(env, regno); 2895 2896 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 2897 return reg->type == PTR_TO_FLOW_KEYS; 2898 } 2899 2900 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 2901 const struct bpf_reg_state *reg, 2902 int off, int size, bool strict) 2903 { 2904 struct tnum reg_off; 2905 int ip_align; 2906 2907 /* Byte size accesses are always allowed. */ 2908 if (!strict || size == 1) 2909 return 0; 2910 2911 /* For platforms that do not have a Kconfig enabling 2912 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 2913 * NET_IP_ALIGN is universally set to '2'. And on platforms 2914 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 2915 * to this code only in strict mode where we want to emulate 2916 * the NET_IP_ALIGN==2 checking. Therefore use an 2917 * unconditional IP align value of '2'. 2918 */ 2919 ip_align = 2; 2920 2921 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 2922 if (!tnum_is_aligned(reg_off, size)) { 2923 char tn_buf[48]; 2924 2925 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2926 verbose(env, 2927 "misaligned packet access off %d+%s+%d+%d size %d\n", 2928 ip_align, tn_buf, reg->off, off, size); 2929 return -EACCES; 2930 } 2931 2932 return 0; 2933 } 2934 2935 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 2936 const struct bpf_reg_state *reg, 2937 const char *pointer_desc, 2938 int off, int size, bool strict) 2939 { 2940 struct tnum reg_off; 2941 2942 /* Byte size accesses are always allowed. */ 2943 if (!strict || size == 1) 2944 return 0; 2945 2946 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 2947 if (!tnum_is_aligned(reg_off, size)) { 2948 char tn_buf[48]; 2949 2950 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 2951 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 2952 pointer_desc, tn_buf, reg->off, off, size); 2953 return -EACCES; 2954 } 2955 2956 return 0; 2957 } 2958 2959 static int check_ptr_alignment(struct bpf_verifier_env *env, 2960 const struct bpf_reg_state *reg, int off, 2961 int size, bool strict_alignment_once) 2962 { 2963 bool strict = env->strict_alignment || strict_alignment_once; 2964 const char *pointer_desc = ""; 2965 2966 switch (reg->type) { 2967 case PTR_TO_PACKET: 2968 case PTR_TO_PACKET_META: 2969 /* Special case, because of NET_IP_ALIGN. Given metadata sits 2970 * right in front, treat it the very same way. 2971 */ 2972 return check_pkt_ptr_alignment(env, reg, off, size, strict); 2973 case PTR_TO_FLOW_KEYS: 2974 pointer_desc = "flow keys "; 2975 break; 2976 case PTR_TO_MAP_VALUE: 2977 pointer_desc = "value "; 2978 break; 2979 case PTR_TO_CTX: 2980 pointer_desc = "context "; 2981 break; 2982 case PTR_TO_STACK: 2983 pointer_desc = "stack "; 2984 /* The stack spill tracking logic in check_stack_write() 2985 * and check_stack_read() relies on stack accesses being 2986 * aligned. 2987 */ 2988 strict = true; 2989 break; 2990 case PTR_TO_SOCKET: 2991 pointer_desc = "sock "; 2992 break; 2993 case PTR_TO_SOCK_COMMON: 2994 pointer_desc = "sock_common "; 2995 break; 2996 case PTR_TO_TCP_SOCK: 2997 pointer_desc = "tcp_sock "; 2998 break; 2999 case PTR_TO_XDP_SOCK: 3000 pointer_desc = "xdp_sock "; 3001 break; 3002 default: 3003 break; 3004 } 3005 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 3006 strict); 3007 } 3008 3009 static int update_stack_depth(struct bpf_verifier_env *env, 3010 const struct bpf_func_state *func, 3011 int off) 3012 { 3013 u16 stack = env->subprog_info[func->subprogno].stack_depth; 3014 3015 if (stack >= -off) 3016 return 0; 3017 3018 /* update known max for given subprogram */ 3019 env->subprog_info[func->subprogno].stack_depth = -off; 3020 return 0; 3021 } 3022 3023 /* starting from main bpf function walk all instructions of the function 3024 * and recursively walk all callees that given function can call. 3025 * Ignore jump and exit insns. 3026 * Since recursion is prevented by check_cfg() this algorithm 3027 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 3028 */ 3029 static int check_max_stack_depth(struct bpf_verifier_env *env) 3030 { 3031 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; 3032 struct bpf_subprog_info *subprog = env->subprog_info; 3033 struct bpf_insn *insn = env->prog->insnsi; 3034 bool tail_call_reachable = false; 3035 int ret_insn[MAX_CALL_FRAMES]; 3036 int ret_prog[MAX_CALL_FRAMES]; 3037 int j; 3038 3039 process_func: 3040 /* protect against potential stack overflow that might happen when 3041 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack 3042 * depth for such case down to 256 so that the worst case scenario 3043 * would result in 8k stack size (32 which is tailcall limit * 256 = 3044 * 8k). 3045 * 3046 * To get the idea what might happen, see an example: 3047 * func1 -> sub rsp, 128 3048 * subfunc1 -> sub rsp, 256 3049 * tailcall1 -> add rsp, 256 3050 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) 3051 * subfunc2 -> sub rsp, 64 3052 * subfunc22 -> sub rsp, 128 3053 * tailcall2 -> add rsp, 128 3054 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) 3055 * 3056 * tailcall will unwind the current stack frame but it will not get rid 3057 * of caller's stack as shown on the example above. 3058 */ 3059 if (idx && subprog[idx].has_tail_call && depth >= 256) { 3060 verbose(env, 3061 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", 3062 depth); 3063 return -EACCES; 3064 } 3065 /* round up to 32-bytes, since this is granularity 3066 * of interpreter stack size 3067 */ 3068 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 3069 if (depth > MAX_BPF_STACK) { 3070 verbose(env, "combined stack size of %d calls is %d. Too large\n", 3071 frame + 1, depth); 3072 return -EACCES; 3073 } 3074 continue_func: 3075 subprog_end = subprog[idx + 1].start; 3076 for (; i < subprog_end; i++) { 3077 if (insn[i].code != (BPF_JMP | BPF_CALL)) 3078 continue; 3079 if (insn[i].src_reg != BPF_PSEUDO_CALL) 3080 continue; 3081 /* remember insn and function to return to */ 3082 ret_insn[frame] = i + 1; 3083 ret_prog[frame] = idx; 3084 3085 /* find the callee */ 3086 i = i + insn[i].imm + 1; 3087 idx = find_subprog(env, i); 3088 if (idx < 0) { 3089 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 3090 i); 3091 return -EFAULT; 3092 } 3093 3094 if (subprog[idx].has_tail_call) 3095 tail_call_reachable = true; 3096 3097 frame++; 3098 if (frame >= MAX_CALL_FRAMES) { 3099 verbose(env, "the call stack of %d frames is too deep !\n", 3100 frame); 3101 return -E2BIG; 3102 } 3103 goto process_func; 3104 } 3105 /* if tail call got detected across bpf2bpf calls then mark each of the 3106 * currently present subprog frames as tail call reachable subprogs; 3107 * this info will be utilized by JIT so that we will be preserving the 3108 * tail call counter throughout bpf2bpf calls combined with tailcalls 3109 */ 3110 if (tail_call_reachable) 3111 for (j = 0; j < frame; j++) 3112 subprog[ret_prog[j]].tail_call_reachable = true; 3113 3114 /* end of for() loop means the last insn of the 'subprog' 3115 * was reached. Doesn't matter whether it was JA or EXIT 3116 */ 3117 if (frame == 0) 3118 return 0; 3119 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 3120 frame--; 3121 i = ret_insn[frame]; 3122 idx = ret_prog[frame]; 3123 goto continue_func; 3124 } 3125 3126 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 3127 static int get_callee_stack_depth(struct bpf_verifier_env *env, 3128 const struct bpf_insn *insn, int idx) 3129 { 3130 int start = idx + insn->imm + 1, subprog; 3131 3132 subprog = find_subprog(env, start); 3133 if (subprog < 0) { 3134 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 3135 start); 3136 return -EFAULT; 3137 } 3138 return env->subprog_info[subprog].stack_depth; 3139 } 3140 #endif 3141 3142 int check_ctx_reg(struct bpf_verifier_env *env, 3143 const struct bpf_reg_state *reg, int regno) 3144 { 3145 /* Access to ctx or passing it to a helper is only allowed in 3146 * its original, unmodified form. 3147 */ 3148 3149 if (reg->off) { 3150 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", 3151 regno, reg->off); 3152 return -EACCES; 3153 } 3154 3155 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3156 char tn_buf[48]; 3157 3158 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3159 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); 3160 return -EACCES; 3161 } 3162 3163 return 0; 3164 } 3165 3166 static int __check_buffer_access(struct bpf_verifier_env *env, 3167 const char *buf_info, 3168 const struct bpf_reg_state *reg, 3169 int regno, int off, int size) 3170 { 3171 if (off < 0) { 3172 verbose(env, 3173 "R%d invalid %s buffer access: off=%d, size=%d\n", 3174 regno, buf_info, off, size); 3175 return -EACCES; 3176 } 3177 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3178 char tn_buf[48]; 3179 3180 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3181 verbose(env, 3182 "R%d invalid variable buffer offset: off=%d, var_off=%s\n", 3183 regno, off, tn_buf); 3184 return -EACCES; 3185 } 3186 3187 return 0; 3188 } 3189 3190 static int check_tp_buffer_access(struct bpf_verifier_env *env, 3191 const struct bpf_reg_state *reg, 3192 int regno, int off, int size) 3193 { 3194 int err; 3195 3196 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); 3197 if (err) 3198 return err; 3199 3200 if (off + size > env->prog->aux->max_tp_access) 3201 env->prog->aux->max_tp_access = off + size; 3202 3203 return 0; 3204 } 3205 3206 static int check_buffer_access(struct bpf_verifier_env *env, 3207 const struct bpf_reg_state *reg, 3208 int regno, int off, int size, 3209 bool zero_size_allowed, 3210 const char *buf_info, 3211 u32 *max_access) 3212 { 3213 int err; 3214 3215 err = __check_buffer_access(env, buf_info, reg, regno, off, size); 3216 if (err) 3217 return err; 3218 3219 if (off + size > *max_access) 3220 *max_access = off + size; 3221 3222 return 0; 3223 } 3224 3225 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 3226 static void zext_32_to_64(struct bpf_reg_state *reg) 3227 { 3228 reg->var_off = tnum_subreg(reg->var_off); 3229 __reg_assign_32_into_64(reg); 3230 } 3231 3232 /* truncate register to smaller size (in bytes) 3233 * must be called with size < BPF_REG_SIZE 3234 */ 3235 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 3236 { 3237 u64 mask; 3238 3239 /* clear high bits in bit representation */ 3240 reg->var_off = tnum_cast(reg->var_off, size); 3241 3242 /* fix arithmetic bounds */ 3243 mask = ((u64)1 << (size * 8)) - 1; 3244 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 3245 reg->umin_value &= mask; 3246 reg->umax_value &= mask; 3247 } else { 3248 reg->umin_value = 0; 3249 reg->umax_value = mask; 3250 } 3251 reg->smin_value = reg->umin_value; 3252 reg->smax_value = reg->umax_value; 3253 3254 /* If size is smaller than 32bit register the 32bit register 3255 * values are also truncated so we push 64-bit bounds into 3256 * 32-bit bounds. Above were truncated < 32-bits already. 3257 */ 3258 if (size >= 4) 3259 return; 3260 __reg_combine_64_into_32(reg); 3261 } 3262 3263 static bool bpf_map_is_rdonly(const struct bpf_map *map) 3264 { 3265 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen; 3266 } 3267 3268 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) 3269 { 3270 void *ptr; 3271 u64 addr; 3272 int err; 3273 3274 err = map->ops->map_direct_value_addr(map, &addr, off); 3275 if (err) 3276 return err; 3277 ptr = (void *)(long)addr + off; 3278 3279 switch (size) { 3280 case sizeof(u8): 3281 *val = (u64)*(u8 *)ptr; 3282 break; 3283 case sizeof(u16): 3284 *val = (u64)*(u16 *)ptr; 3285 break; 3286 case sizeof(u32): 3287 *val = (u64)*(u32 *)ptr; 3288 break; 3289 case sizeof(u64): 3290 *val = *(u64 *)ptr; 3291 break; 3292 default: 3293 return -EINVAL; 3294 } 3295 return 0; 3296 } 3297 3298 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 3299 struct bpf_reg_state *regs, 3300 int regno, int off, int size, 3301 enum bpf_access_type atype, 3302 int value_regno) 3303 { 3304 struct bpf_reg_state *reg = regs + regno; 3305 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); 3306 const char *tname = btf_name_by_offset(reg->btf, t->name_off); 3307 u32 btf_id; 3308 int ret; 3309 3310 if (off < 0) { 3311 verbose(env, 3312 "R%d is ptr_%s invalid negative access: off=%d\n", 3313 regno, tname, off); 3314 return -EACCES; 3315 } 3316 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 3317 char tn_buf[48]; 3318 3319 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3320 verbose(env, 3321 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 3322 regno, tname, off, tn_buf); 3323 return -EACCES; 3324 } 3325 3326 if (env->ops->btf_struct_access) { 3327 ret = env->ops->btf_struct_access(&env->log, reg->btf, t, 3328 off, size, atype, &btf_id); 3329 } else { 3330 if (atype != BPF_READ) { 3331 verbose(env, "only read is supported\n"); 3332 return -EACCES; 3333 } 3334 3335 ret = btf_struct_access(&env->log, reg->btf, t, off, size, 3336 atype, &btf_id); 3337 } 3338 3339 if (ret < 0) 3340 return ret; 3341 3342 if (atype == BPF_READ && value_regno >= 0) 3343 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id); 3344 3345 return 0; 3346 } 3347 3348 static int check_ptr_to_map_access(struct bpf_verifier_env *env, 3349 struct bpf_reg_state *regs, 3350 int regno, int off, int size, 3351 enum bpf_access_type atype, 3352 int value_regno) 3353 { 3354 struct bpf_reg_state *reg = regs + regno; 3355 struct bpf_map *map = reg->map_ptr; 3356 const struct btf_type *t; 3357 const char *tname; 3358 u32 btf_id; 3359 int ret; 3360 3361 if (!btf_vmlinux) { 3362 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); 3363 return -ENOTSUPP; 3364 } 3365 3366 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { 3367 verbose(env, "map_ptr access not supported for map type %d\n", 3368 map->map_type); 3369 return -ENOTSUPP; 3370 } 3371 3372 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); 3373 tname = btf_name_by_offset(btf_vmlinux, t->name_off); 3374 3375 if (!env->allow_ptr_to_map_access) { 3376 verbose(env, 3377 "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 3378 tname); 3379 return -EPERM; 3380 } 3381 3382 if (off < 0) { 3383 verbose(env, "R%d is %s invalid negative access: off=%d\n", 3384 regno, tname, off); 3385 return -EACCES; 3386 } 3387 3388 if (atype != BPF_READ) { 3389 verbose(env, "only read from %s is supported\n", tname); 3390 return -EACCES; 3391 } 3392 3393 ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id); 3394 if (ret < 0) 3395 return ret; 3396 3397 if (value_regno >= 0) 3398 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id); 3399 3400 return 0; 3401 } 3402 3403 3404 /* check whether memory at (regno + off) is accessible for t = (read | write) 3405 * if t==write, value_regno is a register which value is stored into memory 3406 * if t==read, value_regno is a register which will receive the value from memory 3407 * if t==write && value_regno==-1, some unknown value is stored into memory 3408 * if t==read && value_regno==-1, don't care what we read from memory 3409 */ 3410 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 3411 int off, int bpf_size, enum bpf_access_type t, 3412 int value_regno, bool strict_alignment_once) 3413 { 3414 struct bpf_reg_state *regs = cur_regs(env); 3415 struct bpf_reg_state *reg = regs + regno; 3416 struct bpf_func_state *state; 3417 int size, err = 0; 3418 3419 size = bpf_size_to_bytes(bpf_size); 3420 if (size < 0) 3421 return size; 3422 3423 /* alignment checks will add in reg->off themselves */ 3424 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 3425 if (err) 3426 return err; 3427 3428 /* for access checks, reg->off is just part of off */ 3429 off += reg->off; 3430 3431 if (reg->type == PTR_TO_MAP_VALUE) { 3432 if (t == BPF_WRITE && value_regno >= 0 && 3433 is_pointer_value(env, value_regno)) { 3434 verbose(env, "R%d leaks addr into map\n", value_regno); 3435 return -EACCES; 3436 } 3437 err = check_map_access_type(env, regno, off, size, t); 3438 if (err) 3439 return err; 3440 err = check_map_access(env, regno, off, size, false); 3441 if (!err && t == BPF_READ && value_regno >= 0) { 3442 struct bpf_map *map = reg->map_ptr; 3443 3444 /* if map is read-only, track its contents as scalars */ 3445 if (tnum_is_const(reg->var_off) && 3446 bpf_map_is_rdonly(map) && 3447 map->ops->map_direct_value_addr) { 3448 int map_off = off + reg->var_off.value; 3449 u64 val = 0; 3450 3451 err = bpf_map_direct_read(map, map_off, size, 3452 &val); 3453 if (err) 3454 return err; 3455 3456 regs[value_regno].type = SCALAR_VALUE; 3457 __mark_reg_known(®s[value_regno], val); 3458 } else { 3459 mark_reg_unknown(env, regs, value_regno); 3460 } 3461 } 3462 } else if (reg->type == PTR_TO_MEM) { 3463 if (t == BPF_WRITE && value_regno >= 0 && 3464 is_pointer_value(env, value_regno)) { 3465 verbose(env, "R%d leaks addr into mem\n", value_regno); 3466 return -EACCES; 3467 } 3468 err = check_mem_region_access(env, regno, off, size, 3469 reg->mem_size, false); 3470 if (!err && t == BPF_READ && value_regno >= 0) 3471 mark_reg_unknown(env, regs, value_regno); 3472 } else if (reg->type == PTR_TO_CTX) { 3473 enum bpf_reg_type reg_type = SCALAR_VALUE; 3474 struct btf *btf = NULL; 3475 u32 btf_id = 0; 3476 3477 if (t == BPF_WRITE && value_regno >= 0 && 3478 is_pointer_value(env, value_regno)) { 3479 verbose(env, "R%d leaks addr into ctx\n", value_regno); 3480 return -EACCES; 3481 } 3482 3483 err = check_ctx_reg(env, reg, regno); 3484 if (err < 0) 3485 return err; 3486 3487 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, &btf_id); 3488 if (err) 3489 verbose_linfo(env, insn_idx, "; "); 3490 if (!err && t == BPF_READ && value_regno >= 0) { 3491 /* ctx access returns either a scalar, or a 3492 * PTR_TO_PACKET[_META,_END]. In the latter 3493 * case, we know the offset is zero. 3494 */ 3495 if (reg_type == SCALAR_VALUE) { 3496 mark_reg_unknown(env, regs, value_regno); 3497 } else { 3498 mark_reg_known_zero(env, regs, 3499 value_regno); 3500 if (reg_type_may_be_null(reg_type)) 3501 regs[value_regno].id = ++env->id_gen; 3502 /* A load of ctx field could have different 3503 * actual load size with the one encoded in the 3504 * insn. When the dst is PTR, it is for sure not 3505 * a sub-register. 3506 */ 3507 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 3508 if (reg_type == PTR_TO_BTF_ID || 3509 reg_type == PTR_TO_BTF_ID_OR_NULL) { 3510 regs[value_regno].btf = btf; 3511 regs[value_regno].btf_id = btf_id; 3512 } 3513 } 3514 regs[value_regno].type = reg_type; 3515 } 3516 3517 } else if (reg->type == PTR_TO_STACK) { 3518 off += reg->var_off.value; 3519 err = check_stack_access(env, reg, off, size); 3520 if (err) 3521 return err; 3522 3523 state = func(env, reg); 3524 err = update_stack_depth(env, state, off); 3525 if (err) 3526 return err; 3527 3528 if (t == BPF_WRITE) 3529 err = check_stack_write(env, state, off, size, 3530 value_regno, insn_idx); 3531 else 3532 err = check_stack_read(env, state, off, size, 3533 value_regno); 3534 } else if (reg_is_pkt_pointer(reg)) { 3535 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 3536 verbose(env, "cannot write into packet\n"); 3537 return -EACCES; 3538 } 3539 if (t == BPF_WRITE && value_regno >= 0 && 3540 is_pointer_value(env, value_regno)) { 3541 verbose(env, "R%d leaks addr into packet\n", 3542 value_regno); 3543 return -EACCES; 3544 } 3545 err = check_packet_access(env, regno, off, size, false); 3546 if (!err && t == BPF_READ && value_regno >= 0) 3547 mark_reg_unknown(env, regs, value_regno); 3548 } else if (reg->type == PTR_TO_FLOW_KEYS) { 3549 if (t == BPF_WRITE && value_regno >= 0 && 3550 is_pointer_value(env, value_regno)) { 3551 verbose(env, "R%d leaks addr into flow keys\n", 3552 value_regno); 3553 return -EACCES; 3554 } 3555 3556 err = check_flow_keys_access(env, off, size); 3557 if (!err && t == BPF_READ && value_regno >= 0) 3558 mark_reg_unknown(env, regs, value_regno); 3559 } else if (type_is_sk_pointer(reg->type)) { 3560 if (t == BPF_WRITE) { 3561 verbose(env, "R%d cannot write into %s\n", 3562 regno, reg_type_str[reg->type]); 3563 return -EACCES; 3564 } 3565 err = check_sock_access(env, insn_idx, regno, off, size, t); 3566 if (!err && value_regno >= 0) 3567 mark_reg_unknown(env, regs, value_regno); 3568 } else if (reg->type == PTR_TO_TP_BUFFER) { 3569 err = check_tp_buffer_access(env, reg, regno, off, size); 3570 if (!err && t == BPF_READ && value_regno >= 0) 3571 mark_reg_unknown(env, regs, value_regno); 3572 } else if (reg->type == PTR_TO_BTF_ID) { 3573 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 3574 value_regno); 3575 } else if (reg->type == CONST_PTR_TO_MAP) { 3576 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 3577 value_regno); 3578 } else if (reg->type == PTR_TO_RDONLY_BUF) { 3579 if (t == BPF_WRITE) { 3580 verbose(env, "R%d cannot write into %s\n", 3581 regno, reg_type_str[reg->type]); 3582 return -EACCES; 3583 } 3584 err = check_buffer_access(env, reg, regno, off, size, false, 3585 "rdonly", 3586 &env->prog->aux->max_rdonly_access); 3587 if (!err && value_regno >= 0) 3588 mark_reg_unknown(env, regs, value_regno); 3589 } else if (reg->type == PTR_TO_RDWR_BUF) { 3590 err = check_buffer_access(env, reg, regno, off, size, false, 3591 "rdwr", 3592 &env->prog->aux->max_rdwr_access); 3593 if (!err && t == BPF_READ && value_regno >= 0) 3594 mark_reg_unknown(env, regs, value_regno); 3595 } else { 3596 verbose(env, "R%d invalid mem access '%s'\n", regno, 3597 reg_type_str[reg->type]); 3598 return -EACCES; 3599 } 3600 3601 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 3602 regs[value_regno].type == SCALAR_VALUE) { 3603 /* b/h/w load zero-extends, mark upper bits as known 0 */ 3604 coerce_reg_to_size(®s[value_regno], size); 3605 } 3606 return err; 3607 } 3608 3609 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 3610 { 3611 int load_reg; 3612 int err; 3613 3614 switch (insn->imm) { 3615 case BPF_ADD: 3616 case BPF_ADD | BPF_FETCH: 3617 case BPF_AND: 3618 case BPF_AND | BPF_FETCH: 3619 case BPF_OR: 3620 case BPF_OR | BPF_FETCH: 3621 case BPF_XOR: 3622 case BPF_XOR | BPF_FETCH: 3623 case BPF_XCHG: 3624 case BPF_CMPXCHG: 3625 break; 3626 default: 3627 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); 3628 return -EINVAL; 3629 } 3630 3631 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { 3632 verbose(env, "invalid atomic operand size\n"); 3633 return -EINVAL; 3634 } 3635 3636 /* check src1 operand */ 3637 err = check_reg_arg(env, insn->src_reg, SRC_OP); 3638 if (err) 3639 return err; 3640 3641 /* check src2 operand */ 3642 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 3643 if (err) 3644 return err; 3645 3646 if (insn->imm == BPF_CMPXCHG) { 3647 /* Check comparison of R0 with memory location */ 3648 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 3649 if (err) 3650 return err; 3651 } 3652 3653 if (is_pointer_value(env, insn->src_reg)) { 3654 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 3655 return -EACCES; 3656 } 3657 3658 if (is_ctx_reg(env, insn->dst_reg) || 3659 is_pkt_reg(env, insn->dst_reg) || 3660 is_flow_key_reg(env, insn->dst_reg) || 3661 is_sk_reg(env, insn->dst_reg)) { 3662 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", 3663 insn->dst_reg, 3664 reg_type_str[reg_state(env, insn->dst_reg)->type]); 3665 return -EACCES; 3666 } 3667 3668 /* check whether we can read the memory */ 3669 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3670 BPF_SIZE(insn->code), BPF_READ, -1, true); 3671 if (err) 3672 return err; 3673 3674 /* check whether we can write into the same memory */ 3675 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3676 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 3677 if (err) 3678 return err; 3679 3680 if (!(insn->imm & BPF_FETCH)) 3681 return 0; 3682 3683 if (insn->imm == BPF_CMPXCHG) 3684 load_reg = BPF_REG_0; 3685 else 3686 load_reg = insn->src_reg; 3687 3688 /* check and record load of old value */ 3689 err = check_reg_arg(env, load_reg, DST_OP); 3690 if (err) 3691 return err; 3692 3693 return 0; 3694 } 3695 3696 static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, 3697 int off, int access_size, 3698 bool zero_size_allowed) 3699 { 3700 struct bpf_reg_state *reg = reg_state(env, regno); 3701 3702 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 3703 access_size < 0 || (access_size == 0 && !zero_size_allowed)) { 3704 if (tnum_is_const(reg->var_off)) { 3705 verbose(env, "invalid stack type R%d off=%d access_size=%d\n", 3706 regno, off, access_size); 3707 } else { 3708 char tn_buf[48]; 3709 3710 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3711 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", 3712 regno, tn_buf, access_size); 3713 } 3714 return -EACCES; 3715 } 3716 return 0; 3717 } 3718 3719 /* when register 'regno' is passed into function that will read 'access_size' 3720 * bytes from that pointer, make sure that it's within stack boundary 3721 * and all elements of stack are initialized. 3722 * Unlike most pointer bounds-checking functions, this one doesn't take an 3723 * 'off' argument, so it has to add in reg->off itself. 3724 */ 3725 static int check_stack_boundary(struct bpf_verifier_env *env, int regno, 3726 int access_size, bool zero_size_allowed, 3727 struct bpf_call_arg_meta *meta) 3728 { 3729 struct bpf_reg_state *reg = reg_state(env, regno); 3730 struct bpf_func_state *state = func(env, reg); 3731 int err, min_off, max_off, i, j, slot, spi; 3732 3733 if (tnum_is_const(reg->var_off)) { 3734 min_off = max_off = reg->var_off.value + reg->off; 3735 err = __check_stack_boundary(env, regno, min_off, access_size, 3736 zero_size_allowed); 3737 if (err) 3738 return err; 3739 } else { 3740 /* Variable offset is prohibited for unprivileged mode for 3741 * simplicity since it requires corresponding support in 3742 * Spectre masking for stack ALU. 3743 * See also retrieve_ptr_limit(). 3744 */ 3745 if (!env->bypass_spec_v1) { 3746 char tn_buf[48]; 3747 3748 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3749 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", 3750 regno, tn_buf); 3751 return -EACCES; 3752 } 3753 /* Only initialized buffer on stack is allowed to be accessed 3754 * with variable offset. With uninitialized buffer it's hard to 3755 * guarantee that whole memory is marked as initialized on 3756 * helper return since specific bounds are unknown what may 3757 * cause uninitialized stack leaking. 3758 */ 3759 if (meta && meta->raw_mode) 3760 meta = NULL; 3761 3762 if (reg->smax_value >= BPF_MAX_VAR_OFF || 3763 reg->smax_value <= -BPF_MAX_VAR_OFF) { 3764 verbose(env, "R%d unbounded indirect variable offset stack access\n", 3765 regno); 3766 return -EACCES; 3767 } 3768 min_off = reg->smin_value + reg->off; 3769 max_off = reg->smax_value + reg->off; 3770 err = __check_stack_boundary(env, regno, min_off, access_size, 3771 zero_size_allowed); 3772 if (err) { 3773 verbose(env, "R%d min value is outside of stack bound\n", 3774 regno); 3775 return err; 3776 } 3777 err = __check_stack_boundary(env, regno, max_off, access_size, 3778 zero_size_allowed); 3779 if (err) { 3780 verbose(env, "R%d max value is outside of stack bound\n", 3781 regno); 3782 return err; 3783 } 3784 } 3785 3786 if (meta && meta->raw_mode) { 3787 meta->access_size = access_size; 3788 meta->regno = regno; 3789 return 0; 3790 } 3791 3792 for (i = min_off; i < max_off + access_size; i++) { 3793 u8 *stype; 3794 3795 slot = -i - 1; 3796 spi = slot / BPF_REG_SIZE; 3797 if (state->allocated_stack <= slot) 3798 goto err; 3799 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 3800 if (*stype == STACK_MISC) 3801 goto mark; 3802 if (*stype == STACK_ZERO) { 3803 /* helper can write anything into the stack */ 3804 *stype = STACK_MISC; 3805 goto mark; 3806 } 3807 3808 if (state->stack[spi].slot_type[0] == STACK_SPILL && 3809 state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) 3810 goto mark; 3811 3812 if (state->stack[spi].slot_type[0] == STACK_SPILL && 3813 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || 3814 env->allow_ptr_leaks)) { 3815 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 3816 for (j = 0; j < BPF_REG_SIZE; j++) 3817 state->stack[spi].slot_type[j] = STACK_MISC; 3818 goto mark; 3819 } 3820 3821 err: 3822 if (tnum_is_const(reg->var_off)) { 3823 verbose(env, "invalid indirect read from stack off %d+%d size %d\n", 3824 min_off, i - min_off, access_size); 3825 } else { 3826 char tn_buf[48]; 3827 3828 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 3829 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", 3830 tn_buf, i - min_off, access_size); 3831 } 3832 return -EACCES; 3833 mark: 3834 /* reading any byte out of 8-byte 'spill_slot' will cause 3835 * the whole slot to be marked as 'read' 3836 */ 3837 mark_reg_read(env, &state->stack[spi].spilled_ptr, 3838 state->stack[spi].spilled_ptr.parent, 3839 REG_LIVE_READ64); 3840 } 3841 return update_stack_depth(env, state, min_off); 3842 } 3843 3844 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 3845 int access_size, bool zero_size_allowed, 3846 struct bpf_call_arg_meta *meta) 3847 { 3848 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3849 3850 switch (reg->type) { 3851 case PTR_TO_PACKET: 3852 case PTR_TO_PACKET_META: 3853 return check_packet_access(env, regno, reg->off, access_size, 3854 zero_size_allowed); 3855 case PTR_TO_MAP_VALUE: 3856 if (check_map_access_type(env, regno, reg->off, access_size, 3857 meta && meta->raw_mode ? BPF_WRITE : 3858 BPF_READ)) 3859 return -EACCES; 3860 return check_map_access(env, regno, reg->off, access_size, 3861 zero_size_allowed); 3862 case PTR_TO_MEM: 3863 return check_mem_region_access(env, regno, reg->off, 3864 access_size, reg->mem_size, 3865 zero_size_allowed); 3866 case PTR_TO_RDONLY_BUF: 3867 if (meta && meta->raw_mode) 3868 return -EACCES; 3869 return check_buffer_access(env, reg, regno, reg->off, 3870 access_size, zero_size_allowed, 3871 "rdonly", 3872 &env->prog->aux->max_rdonly_access); 3873 case PTR_TO_RDWR_BUF: 3874 return check_buffer_access(env, reg, regno, reg->off, 3875 access_size, zero_size_allowed, 3876 "rdwr", 3877 &env->prog->aux->max_rdwr_access); 3878 case PTR_TO_STACK: 3879 return check_stack_boundary(env, regno, access_size, 3880 zero_size_allowed, meta); 3881 default: /* scalar_value or invalid ptr */ 3882 /* Allow zero-byte read from NULL, regardless of pointer type */ 3883 if (zero_size_allowed && access_size == 0 && 3884 register_is_null(reg)) 3885 return 0; 3886 3887 verbose(env, "R%d type=%s expected=%s\n", regno, 3888 reg_type_str[reg->type], 3889 reg_type_str[PTR_TO_STACK]); 3890 return -EACCES; 3891 } 3892 } 3893 3894 /* Implementation details: 3895 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL 3896 * Two bpf_map_lookups (even with the same key) will have different reg->id. 3897 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after 3898 * value_or_null->value transition, since the verifier only cares about 3899 * the range of access to valid map value pointer and doesn't care about actual 3900 * address of the map element. 3901 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 3902 * reg->id > 0 after value_or_null->value transition. By doing so 3903 * two bpf_map_lookups will be considered two different pointers that 3904 * point to different bpf_spin_locks. 3905 * The verifier allows taking only one bpf_spin_lock at a time to avoid 3906 * dead-locks. 3907 * Since only one bpf_spin_lock is allowed the checks are simpler than 3908 * reg_is_refcounted() logic. The verifier needs to remember only 3909 * one spin_lock instead of array of acquired_refs. 3910 * cur_state->active_spin_lock remembers which map value element got locked 3911 * and clears it after bpf_spin_unlock. 3912 */ 3913 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 3914 bool is_lock) 3915 { 3916 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 3917 struct bpf_verifier_state *cur = env->cur_state; 3918 bool is_const = tnum_is_const(reg->var_off); 3919 struct bpf_map *map = reg->map_ptr; 3920 u64 val = reg->var_off.value; 3921 3922 if (!is_const) { 3923 verbose(env, 3924 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 3925 regno); 3926 return -EINVAL; 3927 } 3928 if (!map->btf) { 3929 verbose(env, 3930 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 3931 map->name); 3932 return -EINVAL; 3933 } 3934 if (!map_value_has_spin_lock(map)) { 3935 if (map->spin_lock_off == -E2BIG) 3936 verbose(env, 3937 "map '%s' has more than one 'struct bpf_spin_lock'\n", 3938 map->name); 3939 else if (map->spin_lock_off == -ENOENT) 3940 verbose(env, 3941 "map '%s' doesn't have 'struct bpf_spin_lock'\n", 3942 map->name); 3943 else 3944 verbose(env, 3945 "map '%s' is not a struct type or bpf_spin_lock is mangled\n", 3946 map->name); 3947 return -EINVAL; 3948 } 3949 if (map->spin_lock_off != val + reg->off) { 3950 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", 3951 val + reg->off); 3952 return -EINVAL; 3953 } 3954 if (is_lock) { 3955 if (cur->active_spin_lock) { 3956 verbose(env, 3957 "Locking two bpf_spin_locks are not allowed\n"); 3958 return -EINVAL; 3959 } 3960 cur->active_spin_lock = reg->id; 3961 } else { 3962 if (!cur->active_spin_lock) { 3963 verbose(env, "bpf_spin_unlock without taking a lock\n"); 3964 return -EINVAL; 3965 } 3966 if (cur->active_spin_lock != reg->id) { 3967 verbose(env, "bpf_spin_unlock of different lock\n"); 3968 return -EINVAL; 3969 } 3970 cur->active_spin_lock = 0; 3971 } 3972 return 0; 3973 } 3974 3975 static bool arg_type_is_mem_ptr(enum bpf_arg_type type) 3976 { 3977 return type == ARG_PTR_TO_MEM || 3978 type == ARG_PTR_TO_MEM_OR_NULL || 3979 type == ARG_PTR_TO_UNINIT_MEM; 3980 } 3981 3982 static bool arg_type_is_mem_size(enum bpf_arg_type type) 3983 { 3984 return type == ARG_CONST_SIZE || 3985 type == ARG_CONST_SIZE_OR_ZERO; 3986 } 3987 3988 static bool arg_type_is_alloc_size(enum bpf_arg_type type) 3989 { 3990 return type == ARG_CONST_ALLOC_SIZE_OR_ZERO; 3991 } 3992 3993 static bool arg_type_is_int_ptr(enum bpf_arg_type type) 3994 { 3995 return type == ARG_PTR_TO_INT || 3996 type == ARG_PTR_TO_LONG; 3997 } 3998 3999 static int int_ptr_type_to_size(enum bpf_arg_type type) 4000 { 4001 if (type == ARG_PTR_TO_INT) 4002 return sizeof(u32); 4003 else if (type == ARG_PTR_TO_LONG) 4004 return sizeof(u64); 4005 4006 return -EINVAL; 4007 } 4008 4009 static int resolve_map_arg_type(struct bpf_verifier_env *env, 4010 const struct bpf_call_arg_meta *meta, 4011 enum bpf_arg_type *arg_type) 4012 { 4013 if (!meta->map_ptr) { 4014 /* kernel subsystem misconfigured verifier */ 4015 verbose(env, "invalid map_ptr to access map->type\n"); 4016 return -EACCES; 4017 } 4018 4019 switch (meta->map_ptr->map_type) { 4020 case BPF_MAP_TYPE_SOCKMAP: 4021 case BPF_MAP_TYPE_SOCKHASH: 4022 if (*arg_type == ARG_PTR_TO_MAP_VALUE) { 4023 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON; 4024 } else { 4025 verbose(env, "invalid arg_type for sockmap/sockhash\n"); 4026 return -EINVAL; 4027 } 4028 break; 4029 4030 default: 4031 break; 4032 } 4033 return 0; 4034 } 4035 4036 struct bpf_reg_types { 4037 const enum bpf_reg_type types[10]; 4038 u32 *btf_id; 4039 }; 4040 4041 static const struct bpf_reg_types map_key_value_types = { 4042 .types = { 4043 PTR_TO_STACK, 4044 PTR_TO_PACKET, 4045 PTR_TO_PACKET_META, 4046 PTR_TO_MAP_VALUE, 4047 }, 4048 }; 4049 4050 static const struct bpf_reg_types sock_types = { 4051 .types = { 4052 PTR_TO_SOCK_COMMON, 4053 PTR_TO_SOCKET, 4054 PTR_TO_TCP_SOCK, 4055 PTR_TO_XDP_SOCK, 4056 }, 4057 }; 4058 4059 #ifdef CONFIG_NET 4060 static const struct bpf_reg_types btf_id_sock_common_types = { 4061 .types = { 4062 PTR_TO_SOCK_COMMON, 4063 PTR_TO_SOCKET, 4064 PTR_TO_TCP_SOCK, 4065 PTR_TO_XDP_SOCK, 4066 PTR_TO_BTF_ID, 4067 }, 4068 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 4069 }; 4070 #endif 4071 4072 static const struct bpf_reg_types mem_types = { 4073 .types = { 4074 PTR_TO_STACK, 4075 PTR_TO_PACKET, 4076 PTR_TO_PACKET_META, 4077 PTR_TO_MAP_VALUE, 4078 PTR_TO_MEM, 4079 PTR_TO_RDONLY_BUF, 4080 PTR_TO_RDWR_BUF, 4081 }, 4082 }; 4083 4084 static const struct bpf_reg_types int_ptr_types = { 4085 .types = { 4086 PTR_TO_STACK, 4087 PTR_TO_PACKET, 4088 PTR_TO_PACKET_META, 4089 PTR_TO_MAP_VALUE, 4090 }, 4091 }; 4092 4093 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; 4094 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; 4095 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; 4096 static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } }; 4097 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; 4098 static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } }; 4099 static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } }; 4100 static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } }; 4101 4102 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { 4103 [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, 4104 [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types, 4105 [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types, 4106 [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types, 4107 [ARG_CONST_SIZE] = &scalar_types, 4108 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, 4109 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, 4110 [ARG_CONST_MAP_PTR] = &const_map_ptr_types, 4111 [ARG_PTR_TO_CTX] = &context_types, 4112 [ARG_PTR_TO_CTX_OR_NULL] = &context_types, 4113 [ARG_PTR_TO_SOCK_COMMON] = &sock_types, 4114 #ifdef CONFIG_NET 4115 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, 4116 #endif 4117 [ARG_PTR_TO_SOCKET] = &fullsock_types, 4118 [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types, 4119 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, 4120 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, 4121 [ARG_PTR_TO_MEM] = &mem_types, 4122 [ARG_PTR_TO_MEM_OR_NULL] = &mem_types, 4123 [ARG_PTR_TO_UNINIT_MEM] = &mem_types, 4124 [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types, 4125 [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types, 4126 [ARG_PTR_TO_INT] = &int_ptr_types, 4127 [ARG_PTR_TO_LONG] = &int_ptr_types, 4128 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, 4129 }; 4130 4131 static int check_reg_type(struct bpf_verifier_env *env, u32 regno, 4132 enum bpf_arg_type arg_type, 4133 const u32 *arg_btf_id) 4134 { 4135 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4136 enum bpf_reg_type expected, type = reg->type; 4137 const struct bpf_reg_types *compatible; 4138 int i, j; 4139 4140 compatible = compatible_reg_types[arg_type]; 4141 if (!compatible) { 4142 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); 4143 return -EFAULT; 4144 } 4145 4146 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { 4147 expected = compatible->types[i]; 4148 if (expected == NOT_INIT) 4149 break; 4150 4151 if (type == expected) 4152 goto found; 4153 } 4154 4155 verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]); 4156 for (j = 0; j + 1 < i; j++) 4157 verbose(env, "%s, ", reg_type_str[compatible->types[j]]); 4158 verbose(env, "%s\n", reg_type_str[compatible->types[j]]); 4159 return -EACCES; 4160 4161 found: 4162 if (type == PTR_TO_BTF_ID) { 4163 if (!arg_btf_id) { 4164 if (!compatible->btf_id) { 4165 verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); 4166 return -EFAULT; 4167 } 4168 arg_btf_id = compatible->btf_id; 4169 } 4170 4171 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 4172 btf_vmlinux, *arg_btf_id)) { 4173 verbose(env, "R%d is of type %s but %s is expected\n", 4174 regno, kernel_type_name(reg->btf, reg->btf_id), 4175 kernel_type_name(btf_vmlinux, *arg_btf_id)); 4176 return -EACCES; 4177 } 4178 4179 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 4180 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", 4181 regno); 4182 return -EACCES; 4183 } 4184 } 4185 4186 return 0; 4187 } 4188 4189 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, 4190 struct bpf_call_arg_meta *meta, 4191 const struct bpf_func_proto *fn) 4192 { 4193 u32 regno = BPF_REG_1 + arg; 4194 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 4195 enum bpf_arg_type arg_type = fn->arg_type[arg]; 4196 enum bpf_reg_type type = reg->type; 4197 int err = 0; 4198 4199 if (arg_type == ARG_DONTCARE) 4200 return 0; 4201 4202 err = check_reg_arg(env, regno, SRC_OP); 4203 if (err) 4204 return err; 4205 4206 if (arg_type == ARG_ANYTHING) { 4207 if (is_pointer_value(env, regno)) { 4208 verbose(env, "R%d leaks addr into helper function\n", 4209 regno); 4210 return -EACCES; 4211 } 4212 return 0; 4213 } 4214 4215 if (type_is_pkt_pointer(type) && 4216 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 4217 verbose(env, "helper access to the packet is not allowed\n"); 4218 return -EACCES; 4219 } 4220 4221 if (arg_type == ARG_PTR_TO_MAP_VALUE || 4222 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || 4223 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { 4224 err = resolve_map_arg_type(env, meta, &arg_type); 4225 if (err) 4226 return err; 4227 } 4228 4229 if (register_is_null(reg) && arg_type_may_be_null(arg_type)) 4230 /* A NULL register has a SCALAR_VALUE type, so skip 4231 * type checking. 4232 */ 4233 goto skip_type_check; 4234 4235 err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]); 4236 if (err) 4237 return err; 4238 4239 if (type == PTR_TO_CTX) { 4240 err = check_ctx_reg(env, reg, regno); 4241 if (err < 0) 4242 return err; 4243 } 4244 4245 skip_type_check: 4246 if (reg->ref_obj_id) { 4247 if (meta->ref_obj_id) { 4248 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 4249 regno, reg->ref_obj_id, 4250 meta->ref_obj_id); 4251 return -EFAULT; 4252 } 4253 meta->ref_obj_id = reg->ref_obj_id; 4254 } 4255 4256 if (arg_type == ARG_CONST_MAP_PTR) { 4257 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 4258 meta->map_ptr = reg->map_ptr; 4259 } else if (arg_type == ARG_PTR_TO_MAP_KEY) { 4260 /* bpf_map_xxx(..., map_ptr, ..., key) call: 4261 * check that [key, key + map->key_size) are within 4262 * stack limits and initialized 4263 */ 4264 if (!meta->map_ptr) { 4265 /* in function declaration map_ptr must come before 4266 * map_key, so that it's verified and known before 4267 * we have to check map_key here. Otherwise it means 4268 * that kernel subsystem misconfigured verifier 4269 */ 4270 verbose(env, "invalid map_ptr to access map->key\n"); 4271 return -EACCES; 4272 } 4273 err = check_helper_mem_access(env, regno, 4274 meta->map_ptr->key_size, false, 4275 NULL); 4276 } else if (arg_type == ARG_PTR_TO_MAP_VALUE || 4277 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && 4278 !register_is_null(reg)) || 4279 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { 4280 /* bpf_map_xxx(..., map_ptr, ..., value) call: 4281 * check [value, value + map->value_size) validity 4282 */ 4283 if (!meta->map_ptr) { 4284 /* kernel subsystem misconfigured verifier */ 4285 verbose(env, "invalid map_ptr to access map->value\n"); 4286 return -EACCES; 4287 } 4288 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); 4289 err = check_helper_mem_access(env, regno, 4290 meta->map_ptr->value_size, false, 4291 meta); 4292 } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) { 4293 if (!reg->btf_id) { 4294 verbose(env, "Helper has invalid btf_id in R%d\n", regno); 4295 return -EACCES; 4296 } 4297 meta->ret_btf = reg->btf; 4298 meta->ret_btf_id = reg->btf_id; 4299 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { 4300 if (meta->func_id == BPF_FUNC_spin_lock) { 4301 if (process_spin_lock(env, regno, true)) 4302 return -EACCES; 4303 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 4304 if (process_spin_lock(env, regno, false)) 4305 return -EACCES; 4306 } else { 4307 verbose(env, "verifier internal error\n"); 4308 return -EFAULT; 4309 } 4310 } else if (arg_type_is_mem_ptr(arg_type)) { 4311 /* The access to this pointer is only checked when we hit the 4312 * next is_mem_size argument below. 4313 */ 4314 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM); 4315 } else if (arg_type_is_mem_size(arg_type)) { 4316 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); 4317 4318 /* This is used to refine r0 return value bounds for helpers 4319 * that enforce this value as an upper bound on return values. 4320 * See do_refine_retval_range() for helpers that can refine 4321 * the return value. C type of helper is u32 so we pull register 4322 * bound from umax_value however, if negative verifier errors 4323 * out. Only upper bounds can be learned because retval is an 4324 * int type and negative retvals are allowed. 4325 */ 4326 meta->msize_max_value = reg->umax_value; 4327 4328 /* The register is SCALAR_VALUE; the access check 4329 * happens using its boundaries. 4330 */ 4331 if (!tnum_is_const(reg->var_off)) 4332 /* For unprivileged variable accesses, disable raw 4333 * mode so that the program is required to 4334 * initialize all the memory that the helper could 4335 * just partially fill up. 4336 */ 4337 meta = NULL; 4338 4339 if (reg->smin_value < 0) { 4340 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 4341 regno); 4342 return -EACCES; 4343 } 4344 4345 if (reg->umin_value == 0) { 4346 err = check_helper_mem_access(env, regno - 1, 0, 4347 zero_size_allowed, 4348 meta); 4349 if (err) 4350 return err; 4351 } 4352 4353 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 4354 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 4355 regno); 4356 return -EACCES; 4357 } 4358 err = check_helper_mem_access(env, regno - 1, 4359 reg->umax_value, 4360 zero_size_allowed, meta); 4361 if (!err) 4362 err = mark_chain_precision(env, regno); 4363 } else if (arg_type_is_alloc_size(arg_type)) { 4364 if (!tnum_is_const(reg->var_off)) { 4365 verbose(env, "R%d is not a known constant'\n", 4366 regno); 4367 return -EACCES; 4368 } 4369 meta->mem_size = reg->var_off.value; 4370 } else if (arg_type_is_int_ptr(arg_type)) { 4371 int size = int_ptr_type_to_size(arg_type); 4372 4373 err = check_helper_mem_access(env, regno, size, false, meta); 4374 if (err) 4375 return err; 4376 err = check_ptr_alignment(env, reg, 0, size, true); 4377 } 4378 4379 return err; 4380 } 4381 4382 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) 4383 { 4384 enum bpf_attach_type eatype = env->prog->expected_attach_type; 4385 enum bpf_prog_type type = resolve_prog_type(env->prog); 4386 4387 if (func_id != BPF_FUNC_map_update_elem) 4388 return false; 4389 4390 /* It's not possible to get access to a locked struct sock in these 4391 * contexts, so updating is safe. 4392 */ 4393 switch (type) { 4394 case BPF_PROG_TYPE_TRACING: 4395 if (eatype == BPF_TRACE_ITER) 4396 return true; 4397 break; 4398 case BPF_PROG_TYPE_SOCKET_FILTER: 4399 case BPF_PROG_TYPE_SCHED_CLS: 4400 case BPF_PROG_TYPE_SCHED_ACT: 4401 case BPF_PROG_TYPE_XDP: 4402 case BPF_PROG_TYPE_SK_REUSEPORT: 4403 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4404 case BPF_PROG_TYPE_SK_LOOKUP: 4405 return true; 4406 default: 4407 break; 4408 } 4409 4410 verbose(env, "cannot update sockmap in this context\n"); 4411 return false; 4412 } 4413 4414 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) 4415 { 4416 return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64); 4417 } 4418 4419 static int check_map_func_compatibility(struct bpf_verifier_env *env, 4420 struct bpf_map *map, int func_id) 4421 { 4422 if (!map) 4423 return 0; 4424 4425 /* We need a two way check, first is from map perspective ... */ 4426 switch (map->map_type) { 4427 case BPF_MAP_TYPE_PROG_ARRAY: 4428 if (func_id != BPF_FUNC_tail_call) 4429 goto error; 4430 break; 4431 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 4432 if (func_id != BPF_FUNC_perf_event_read && 4433 func_id != BPF_FUNC_perf_event_output && 4434 func_id != BPF_FUNC_skb_output && 4435 func_id != BPF_FUNC_perf_event_read_value && 4436 func_id != BPF_FUNC_xdp_output) 4437 goto error; 4438 break; 4439 case BPF_MAP_TYPE_RINGBUF: 4440 if (func_id != BPF_FUNC_ringbuf_output && 4441 func_id != BPF_FUNC_ringbuf_reserve && 4442 func_id != BPF_FUNC_ringbuf_submit && 4443 func_id != BPF_FUNC_ringbuf_discard && 4444 func_id != BPF_FUNC_ringbuf_query) 4445 goto error; 4446 break; 4447 case BPF_MAP_TYPE_STACK_TRACE: 4448 if (func_id != BPF_FUNC_get_stackid) 4449 goto error; 4450 break; 4451 case BPF_MAP_TYPE_CGROUP_ARRAY: 4452 if (func_id != BPF_FUNC_skb_under_cgroup && 4453 func_id != BPF_FUNC_current_task_under_cgroup) 4454 goto error; 4455 break; 4456 case BPF_MAP_TYPE_CGROUP_STORAGE: 4457 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 4458 if (func_id != BPF_FUNC_get_local_storage) 4459 goto error; 4460 break; 4461 case BPF_MAP_TYPE_DEVMAP: 4462 case BPF_MAP_TYPE_DEVMAP_HASH: 4463 if (func_id != BPF_FUNC_redirect_map && 4464 func_id != BPF_FUNC_map_lookup_elem) 4465 goto error; 4466 break; 4467 /* Restrict bpf side of cpumap and xskmap, open when use-cases 4468 * appear. 4469 */ 4470 case BPF_MAP_TYPE_CPUMAP: 4471 if (func_id != BPF_FUNC_redirect_map) 4472 goto error; 4473 break; 4474 case BPF_MAP_TYPE_XSKMAP: 4475 if (func_id != BPF_FUNC_redirect_map && 4476 func_id != BPF_FUNC_map_lookup_elem) 4477 goto error; 4478 break; 4479 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 4480 case BPF_MAP_TYPE_HASH_OF_MAPS: 4481 if (func_id != BPF_FUNC_map_lookup_elem) 4482 goto error; 4483 break; 4484 case BPF_MAP_TYPE_SOCKMAP: 4485 if (func_id != BPF_FUNC_sk_redirect_map && 4486 func_id != BPF_FUNC_sock_map_update && 4487 func_id != BPF_FUNC_map_delete_elem && 4488 func_id != BPF_FUNC_msg_redirect_map && 4489 func_id != BPF_FUNC_sk_select_reuseport && 4490 func_id != BPF_FUNC_map_lookup_elem && 4491 !may_update_sockmap(env, func_id)) 4492 goto error; 4493 break; 4494 case BPF_MAP_TYPE_SOCKHASH: 4495 if (func_id != BPF_FUNC_sk_redirect_hash && 4496 func_id != BPF_FUNC_sock_hash_update && 4497 func_id != BPF_FUNC_map_delete_elem && 4498 func_id != BPF_FUNC_msg_redirect_hash && 4499 func_id != BPF_FUNC_sk_select_reuseport && 4500 func_id != BPF_FUNC_map_lookup_elem && 4501 !may_update_sockmap(env, func_id)) 4502 goto error; 4503 break; 4504 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 4505 if (func_id != BPF_FUNC_sk_select_reuseport) 4506 goto error; 4507 break; 4508 case BPF_MAP_TYPE_QUEUE: 4509 case BPF_MAP_TYPE_STACK: 4510 if (func_id != BPF_FUNC_map_peek_elem && 4511 func_id != BPF_FUNC_map_pop_elem && 4512 func_id != BPF_FUNC_map_push_elem) 4513 goto error; 4514 break; 4515 case BPF_MAP_TYPE_SK_STORAGE: 4516 if (func_id != BPF_FUNC_sk_storage_get && 4517 func_id != BPF_FUNC_sk_storage_delete) 4518 goto error; 4519 break; 4520 case BPF_MAP_TYPE_INODE_STORAGE: 4521 if (func_id != BPF_FUNC_inode_storage_get && 4522 func_id != BPF_FUNC_inode_storage_delete) 4523 goto error; 4524 break; 4525 case BPF_MAP_TYPE_TASK_STORAGE: 4526 if (func_id != BPF_FUNC_task_storage_get && 4527 func_id != BPF_FUNC_task_storage_delete) 4528 goto error; 4529 break; 4530 default: 4531 break; 4532 } 4533 4534 /* ... and second from the function itself. */ 4535 switch (func_id) { 4536 case BPF_FUNC_tail_call: 4537 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 4538 goto error; 4539 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { 4540 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 4541 return -EINVAL; 4542 } 4543 break; 4544 case BPF_FUNC_perf_event_read: 4545 case BPF_FUNC_perf_event_output: 4546 case BPF_FUNC_perf_event_read_value: 4547 case BPF_FUNC_skb_output: 4548 case BPF_FUNC_xdp_output: 4549 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 4550 goto error; 4551 break; 4552 case BPF_FUNC_get_stackid: 4553 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 4554 goto error; 4555 break; 4556 case BPF_FUNC_current_task_under_cgroup: 4557 case BPF_FUNC_skb_under_cgroup: 4558 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 4559 goto error; 4560 break; 4561 case BPF_FUNC_redirect_map: 4562 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 4563 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 4564 map->map_type != BPF_MAP_TYPE_CPUMAP && 4565 map->map_type != BPF_MAP_TYPE_XSKMAP) 4566 goto error; 4567 break; 4568 case BPF_FUNC_sk_redirect_map: 4569 case BPF_FUNC_msg_redirect_map: 4570 case BPF_FUNC_sock_map_update: 4571 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 4572 goto error; 4573 break; 4574 case BPF_FUNC_sk_redirect_hash: 4575 case BPF_FUNC_msg_redirect_hash: 4576 case BPF_FUNC_sock_hash_update: 4577 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 4578 goto error; 4579 break; 4580 case BPF_FUNC_get_local_storage: 4581 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 4582 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 4583 goto error; 4584 break; 4585 case BPF_FUNC_sk_select_reuseport: 4586 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 4587 map->map_type != BPF_MAP_TYPE_SOCKMAP && 4588 map->map_type != BPF_MAP_TYPE_SOCKHASH) 4589 goto error; 4590 break; 4591 case BPF_FUNC_map_peek_elem: 4592 case BPF_FUNC_map_pop_elem: 4593 case BPF_FUNC_map_push_elem: 4594 if (map->map_type != BPF_MAP_TYPE_QUEUE && 4595 map->map_type != BPF_MAP_TYPE_STACK) 4596 goto error; 4597 break; 4598 case BPF_FUNC_sk_storage_get: 4599 case BPF_FUNC_sk_storage_delete: 4600 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 4601 goto error; 4602 break; 4603 case BPF_FUNC_inode_storage_get: 4604 case BPF_FUNC_inode_storage_delete: 4605 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) 4606 goto error; 4607 break; 4608 case BPF_FUNC_task_storage_get: 4609 case BPF_FUNC_task_storage_delete: 4610 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 4611 goto error; 4612 break; 4613 default: 4614 break; 4615 } 4616 4617 return 0; 4618 error: 4619 verbose(env, "cannot pass map_type %d into func %s#%d\n", 4620 map->map_type, func_id_name(func_id), func_id); 4621 return -EINVAL; 4622 } 4623 4624 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 4625 { 4626 int count = 0; 4627 4628 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 4629 count++; 4630 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 4631 count++; 4632 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 4633 count++; 4634 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 4635 count++; 4636 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 4637 count++; 4638 4639 /* We only support one arg being in raw mode at the moment, 4640 * which is sufficient for the helper functions we have 4641 * right now. 4642 */ 4643 return count <= 1; 4644 } 4645 4646 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, 4647 enum bpf_arg_type arg_next) 4648 { 4649 return (arg_type_is_mem_ptr(arg_curr) && 4650 !arg_type_is_mem_size(arg_next)) || 4651 (!arg_type_is_mem_ptr(arg_curr) && 4652 arg_type_is_mem_size(arg_next)); 4653 } 4654 4655 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 4656 { 4657 /* bpf_xxx(..., buf, len) call will access 'len' 4658 * bytes from memory 'buf'. Both arg types need 4659 * to be paired, so make sure there's no buggy 4660 * helper function specification. 4661 */ 4662 if (arg_type_is_mem_size(fn->arg1_type) || 4663 arg_type_is_mem_ptr(fn->arg5_type) || 4664 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || 4665 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || 4666 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || 4667 check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) 4668 return false; 4669 4670 return true; 4671 } 4672 4673 static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) 4674 { 4675 int count = 0; 4676 4677 if (arg_type_may_be_refcounted(fn->arg1_type)) 4678 count++; 4679 if (arg_type_may_be_refcounted(fn->arg2_type)) 4680 count++; 4681 if (arg_type_may_be_refcounted(fn->arg3_type)) 4682 count++; 4683 if (arg_type_may_be_refcounted(fn->arg4_type)) 4684 count++; 4685 if (arg_type_may_be_refcounted(fn->arg5_type)) 4686 count++; 4687 4688 /* A reference acquiring function cannot acquire 4689 * another refcounted ptr. 4690 */ 4691 if (may_be_acquire_function(func_id) && count) 4692 return false; 4693 4694 /* We only support one arg being unreferenced at the moment, 4695 * which is sufficient for the helper functions we have right now. 4696 */ 4697 return count <= 1; 4698 } 4699 4700 static bool check_btf_id_ok(const struct bpf_func_proto *fn) 4701 { 4702 int i; 4703 4704 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { 4705 if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i]) 4706 return false; 4707 4708 if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i]) 4709 return false; 4710 } 4711 4712 return true; 4713 } 4714 4715 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 4716 { 4717 return check_raw_mode_ok(fn) && 4718 check_arg_pair_ok(fn) && 4719 check_btf_id_ok(fn) && 4720 check_refcount_ok(fn, func_id) ? 0 : -EINVAL; 4721 } 4722 4723 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 4724 * are now invalid, so turn them into unknown SCALAR_VALUE. 4725 */ 4726 static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, 4727 struct bpf_func_state *state) 4728 { 4729 struct bpf_reg_state *regs = state->regs, *reg; 4730 int i; 4731 4732 for (i = 0; i < MAX_BPF_REG; i++) 4733 if (reg_is_pkt_pointer_any(®s[i])) 4734 mark_reg_unknown(env, regs, i); 4735 4736 bpf_for_each_spilled_reg(i, state, reg) { 4737 if (!reg) 4738 continue; 4739 if (reg_is_pkt_pointer_any(reg)) 4740 __mark_reg_unknown(env, reg); 4741 } 4742 } 4743 4744 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 4745 { 4746 struct bpf_verifier_state *vstate = env->cur_state; 4747 int i; 4748 4749 for (i = 0; i <= vstate->curframe; i++) 4750 __clear_all_pkt_pointers(env, vstate->frame[i]); 4751 } 4752 4753 enum { 4754 AT_PKT_END = -1, 4755 BEYOND_PKT_END = -2, 4756 }; 4757 4758 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) 4759 { 4760 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4761 struct bpf_reg_state *reg = &state->regs[regn]; 4762 4763 if (reg->type != PTR_TO_PACKET) 4764 /* PTR_TO_PACKET_META is not supported yet */ 4765 return; 4766 4767 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. 4768 * How far beyond pkt_end it goes is unknown. 4769 * if (!range_open) it's the case of pkt >= pkt_end 4770 * if (range_open) it's the case of pkt > pkt_end 4771 * hence this pointer is at least 1 byte bigger than pkt_end 4772 */ 4773 if (range_open) 4774 reg->range = BEYOND_PKT_END; 4775 else 4776 reg->range = AT_PKT_END; 4777 } 4778 4779 static void release_reg_references(struct bpf_verifier_env *env, 4780 struct bpf_func_state *state, 4781 int ref_obj_id) 4782 { 4783 struct bpf_reg_state *regs = state->regs, *reg; 4784 int i; 4785 4786 for (i = 0; i < MAX_BPF_REG; i++) 4787 if (regs[i].ref_obj_id == ref_obj_id) 4788 mark_reg_unknown(env, regs, i); 4789 4790 bpf_for_each_spilled_reg(i, state, reg) { 4791 if (!reg) 4792 continue; 4793 if (reg->ref_obj_id == ref_obj_id) 4794 __mark_reg_unknown(env, reg); 4795 } 4796 } 4797 4798 /* The pointer with the specified id has released its reference to kernel 4799 * resources. Identify all copies of the same pointer and clear the reference. 4800 */ 4801 static int release_reference(struct bpf_verifier_env *env, 4802 int ref_obj_id) 4803 { 4804 struct bpf_verifier_state *vstate = env->cur_state; 4805 int err; 4806 int i; 4807 4808 err = release_reference_state(cur_func(env), ref_obj_id); 4809 if (err) 4810 return err; 4811 4812 for (i = 0; i <= vstate->curframe; i++) 4813 release_reg_references(env, vstate->frame[i], ref_obj_id); 4814 4815 return 0; 4816 } 4817 4818 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 4819 struct bpf_reg_state *regs) 4820 { 4821 int i; 4822 4823 /* after the call registers r0 - r5 were scratched */ 4824 for (i = 0; i < CALLER_SAVED_REGS; i++) { 4825 mark_reg_not_init(env, regs, caller_saved[i]); 4826 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 4827 } 4828 } 4829 4830 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 4831 int *insn_idx) 4832 { 4833 struct bpf_verifier_state *state = env->cur_state; 4834 struct bpf_func_info_aux *func_info_aux; 4835 struct bpf_func_state *caller, *callee; 4836 int i, err, subprog, target_insn; 4837 bool is_global = false; 4838 4839 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 4840 verbose(env, "the call stack of %d frames is too deep\n", 4841 state->curframe + 2); 4842 return -E2BIG; 4843 } 4844 4845 target_insn = *insn_idx + insn->imm; 4846 subprog = find_subprog(env, target_insn + 1); 4847 if (subprog < 0) { 4848 verbose(env, "verifier bug. No program starts at insn %d\n", 4849 target_insn + 1); 4850 return -EFAULT; 4851 } 4852 4853 caller = state->frame[state->curframe]; 4854 if (state->frame[state->curframe + 1]) { 4855 verbose(env, "verifier bug. Frame %d already allocated\n", 4856 state->curframe + 1); 4857 return -EFAULT; 4858 } 4859 4860 func_info_aux = env->prog->aux->func_info_aux; 4861 if (func_info_aux) 4862 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 4863 err = btf_check_func_arg_match(env, subprog, caller->regs); 4864 if (err == -EFAULT) 4865 return err; 4866 if (is_global) { 4867 if (err) { 4868 verbose(env, "Caller passes invalid args into func#%d\n", 4869 subprog); 4870 return err; 4871 } else { 4872 if (env->log.level & BPF_LOG_LEVEL) 4873 verbose(env, 4874 "Func#%d is global and valid. Skipping.\n", 4875 subprog); 4876 clear_caller_saved_regs(env, caller->regs); 4877 4878 /* All global functions return SCALAR_VALUE */ 4879 mark_reg_unknown(env, caller->regs, BPF_REG_0); 4880 4881 /* continue with next insn after call */ 4882 return 0; 4883 } 4884 } 4885 4886 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 4887 if (!callee) 4888 return -ENOMEM; 4889 state->frame[state->curframe + 1] = callee; 4890 4891 /* callee cannot access r0, r6 - r9 for reading and has to write 4892 * into its own stack before reading from it. 4893 * callee can read/write into caller's stack 4894 */ 4895 init_func_state(env, callee, 4896 /* remember the callsite, it will be used by bpf_exit */ 4897 *insn_idx /* callsite */, 4898 state->curframe + 1 /* frameno within this callchain */, 4899 subprog /* subprog number within this prog */); 4900 4901 /* Transfer references to the callee */ 4902 err = transfer_reference_state(callee, caller); 4903 if (err) 4904 return err; 4905 4906 /* copy r1 - r5 args that callee can access. The copy includes parent 4907 * pointers, which connects us up to the liveness chain 4908 */ 4909 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 4910 callee->regs[i] = caller->regs[i]; 4911 4912 clear_caller_saved_regs(env, caller->regs); 4913 4914 /* only increment it after check_reg_arg() finished */ 4915 state->curframe++; 4916 4917 /* and go analyze first insn of the callee */ 4918 *insn_idx = target_insn; 4919 4920 if (env->log.level & BPF_LOG_LEVEL) { 4921 verbose(env, "caller:\n"); 4922 print_verifier_state(env, caller); 4923 verbose(env, "callee:\n"); 4924 print_verifier_state(env, callee); 4925 } 4926 return 0; 4927 } 4928 4929 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 4930 { 4931 struct bpf_verifier_state *state = env->cur_state; 4932 struct bpf_func_state *caller, *callee; 4933 struct bpf_reg_state *r0; 4934 int err; 4935 4936 callee = state->frame[state->curframe]; 4937 r0 = &callee->regs[BPF_REG_0]; 4938 if (r0->type == PTR_TO_STACK) { 4939 /* technically it's ok to return caller's stack pointer 4940 * (or caller's caller's pointer) back to the caller, 4941 * since these pointers are valid. Only current stack 4942 * pointer will be invalid as soon as function exits, 4943 * but let's be conservative 4944 */ 4945 verbose(env, "cannot return stack pointer to the caller\n"); 4946 return -EINVAL; 4947 } 4948 4949 state->curframe--; 4950 caller = state->frame[state->curframe]; 4951 /* return to the caller whatever r0 had in the callee */ 4952 caller->regs[BPF_REG_0] = *r0; 4953 4954 /* Transfer references to the caller */ 4955 err = transfer_reference_state(caller, callee); 4956 if (err) 4957 return err; 4958 4959 *insn_idx = callee->callsite + 1; 4960 if (env->log.level & BPF_LOG_LEVEL) { 4961 verbose(env, "returning from callee:\n"); 4962 print_verifier_state(env, callee); 4963 verbose(env, "to caller at %d:\n", *insn_idx); 4964 print_verifier_state(env, caller); 4965 } 4966 /* clear everything in the callee */ 4967 free_func_state(callee); 4968 state->frame[state->curframe + 1] = NULL; 4969 return 0; 4970 } 4971 4972 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 4973 int func_id, 4974 struct bpf_call_arg_meta *meta) 4975 { 4976 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 4977 4978 if (ret_type != RET_INTEGER || 4979 (func_id != BPF_FUNC_get_stack && 4980 func_id != BPF_FUNC_probe_read_str && 4981 func_id != BPF_FUNC_probe_read_kernel_str && 4982 func_id != BPF_FUNC_probe_read_user_str)) 4983 return; 4984 4985 ret_reg->smax_value = meta->msize_max_value; 4986 ret_reg->s32_max_value = meta->msize_max_value; 4987 ret_reg->smin_value = -MAX_ERRNO; 4988 ret_reg->s32_min_value = -MAX_ERRNO; 4989 __reg_deduce_bounds(ret_reg); 4990 __reg_bound_offset(ret_reg); 4991 __update_reg_bounds(ret_reg); 4992 } 4993 4994 static int 4995 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 4996 int func_id, int insn_idx) 4997 { 4998 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 4999 struct bpf_map *map = meta->map_ptr; 5000 5001 if (func_id != BPF_FUNC_tail_call && 5002 func_id != BPF_FUNC_map_lookup_elem && 5003 func_id != BPF_FUNC_map_update_elem && 5004 func_id != BPF_FUNC_map_delete_elem && 5005 func_id != BPF_FUNC_map_push_elem && 5006 func_id != BPF_FUNC_map_pop_elem && 5007 func_id != BPF_FUNC_map_peek_elem) 5008 return 0; 5009 5010 if (map == NULL) { 5011 verbose(env, "kernel subsystem misconfigured verifier\n"); 5012 return -EINVAL; 5013 } 5014 5015 /* In case of read-only, some additional restrictions 5016 * need to be applied in order to prevent altering the 5017 * state of the map from program side. 5018 */ 5019 if ((map->map_flags & BPF_F_RDONLY_PROG) && 5020 (func_id == BPF_FUNC_map_delete_elem || 5021 func_id == BPF_FUNC_map_update_elem || 5022 func_id == BPF_FUNC_map_push_elem || 5023 func_id == BPF_FUNC_map_pop_elem)) { 5024 verbose(env, "write into map forbidden\n"); 5025 return -EACCES; 5026 } 5027 5028 if (!BPF_MAP_PTR(aux->map_ptr_state)) 5029 bpf_map_ptr_store(aux, meta->map_ptr, 5030 !meta->map_ptr->bypass_spec_v1); 5031 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 5032 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 5033 !meta->map_ptr->bypass_spec_v1); 5034 return 0; 5035 } 5036 5037 static int 5038 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 5039 int func_id, int insn_idx) 5040 { 5041 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 5042 struct bpf_reg_state *regs = cur_regs(env), *reg; 5043 struct bpf_map *map = meta->map_ptr; 5044 struct tnum range; 5045 u64 val; 5046 int err; 5047 5048 if (func_id != BPF_FUNC_tail_call) 5049 return 0; 5050 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 5051 verbose(env, "kernel subsystem misconfigured verifier\n"); 5052 return -EINVAL; 5053 } 5054 5055 range = tnum_range(0, map->max_entries - 1); 5056 reg = ®s[BPF_REG_3]; 5057 5058 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) { 5059 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 5060 return 0; 5061 } 5062 5063 err = mark_chain_precision(env, BPF_REG_3); 5064 if (err) 5065 return err; 5066 5067 val = reg->var_off.value; 5068 if (bpf_map_key_unseen(aux)) 5069 bpf_map_key_store(aux, val); 5070 else if (!bpf_map_key_poisoned(aux) && 5071 bpf_map_key_immediate(aux) != val) 5072 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 5073 return 0; 5074 } 5075 5076 static int check_reference_leak(struct bpf_verifier_env *env) 5077 { 5078 struct bpf_func_state *state = cur_func(env); 5079 int i; 5080 5081 for (i = 0; i < state->acquired_refs; i++) { 5082 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 5083 state->refs[i].id, state->refs[i].insn_idx); 5084 } 5085 return state->acquired_refs ? -EINVAL : 0; 5086 } 5087 5088 static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 5089 { 5090 const struct bpf_func_proto *fn = NULL; 5091 struct bpf_reg_state *regs; 5092 struct bpf_call_arg_meta meta; 5093 bool changes_data; 5094 int i, err; 5095 5096 /* find function prototype */ 5097 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 5098 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 5099 func_id); 5100 return -EINVAL; 5101 } 5102 5103 if (env->ops->get_func_proto) 5104 fn = env->ops->get_func_proto(func_id, env->prog); 5105 if (!fn) { 5106 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 5107 func_id); 5108 return -EINVAL; 5109 } 5110 5111 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 5112 if (!env->prog->gpl_compatible && fn->gpl_only) { 5113 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 5114 return -EINVAL; 5115 } 5116 5117 if (fn->allowed && !fn->allowed(env->prog)) { 5118 verbose(env, "helper call is not allowed in probe\n"); 5119 return -EINVAL; 5120 } 5121 5122 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 5123 changes_data = bpf_helper_changes_pkt_data(fn->func); 5124 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 5125 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 5126 func_id_name(func_id), func_id); 5127 return -EINVAL; 5128 } 5129 5130 memset(&meta, 0, sizeof(meta)); 5131 meta.pkt_access = fn->pkt_access; 5132 5133 err = check_func_proto(fn, func_id); 5134 if (err) { 5135 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 5136 func_id_name(func_id), func_id); 5137 return err; 5138 } 5139 5140 meta.func_id = func_id; 5141 /* check args */ 5142 for (i = 0; i < 5; i++) { 5143 err = check_func_arg(env, i, &meta, fn); 5144 if (err) 5145 return err; 5146 } 5147 5148 err = record_func_map(env, &meta, func_id, insn_idx); 5149 if (err) 5150 return err; 5151 5152 err = record_func_key(env, &meta, func_id, insn_idx); 5153 if (err) 5154 return err; 5155 5156 /* Mark slots with STACK_MISC in case of raw mode, stack offset 5157 * is inferred from register state. 5158 */ 5159 for (i = 0; i < meta.access_size; i++) { 5160 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 5161 BPF_WRITE, -1, false); 5162 if (err) 5163 return err; 5164 } 5165 5166 if (func_id == BPF_FUNC_tail_call) { 5167 err = check_reference_leak(env); 5168 if (err) { 5169 verbose(env, "tail_call would lead to reference leak\n"); 5170 return err; 5171 } 5172 } else if (is_release_function(func_id)) { 5173 err = release_reference(env, meta.ref_obj_id); 5174 if (err) { 5175 verbose(env, "func %s#%d reference has not been acquired before\n", 5176 func_id_name(func_id), func_id); 5177 return err; 5178 } 5179 } 5180 5181 regs = cur_regs(env); 5182 5183 /* check that flags argument in get_local_storage(map, flags) is 0, 5184 * this is required because get_local_storage() can't return an error. 5185 */ 5186 if (func_id == BPF_FUNC_get_local_storage && 5187 !register_is_null(®s[BPF_REG_2])) { 5188 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 5189 return -EINVAL; 5190 } 5191 5192 /* reset caller saved regs */ 5193 for (i = 0; i < CALLER_SAVED_REGS; i++) { 5194 mark_reg_not_init(env, regs, caller_saved[i]); 5195 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 5196 } 5197 5198 /* helper call returns 64-bit value. */ 5199 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 5200 5201 /* update return register (already marked as written above) */ 5202 if (fn->ret_type == RET_INTEGER) { 5203 /* sets type to SCALAR_VALUE */ 5204 mark_reg_unknown(env, regs, BPF_REG_0); 5205 } else if (fn->ret_type == RET_VOID) { 5206 regs[BPF_REG_0].type = NOT_INIT; 5207 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || 5208 fn->ret_type == RET_PTR_TO_MAP_VALUE) { 5209 /* There is no offset yet applied, variable or fixed */ 5210 mark_reg_known_zero(env, regs, BPF_REG_0); 5211 /* remember map_ptr, so that check_map_access() 5212 * can check 'value_size' boundary of memory access 5213 * to map element returned from bpf_map_lookup_elem() 5214 */ 5215 if (meta.map_ptr == NULL) { 5216 verbose(env, 5217 "kernel subsystem misconfigured verifier\n"); 5218 return -EINVAL; 5219 } 5220 regs[BPF_REG_0].map_ptr = meta.map_ptr; 5221 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { 5222 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; 5223 if (map_value_has_spin_lock(meta.map_ptr)) 5224 regs[BPF_REG_0].id = ++env->id_gen; 5225 } else { 5226 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 5227 } 5228 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { 5229 mark_reg_known_zero(env, regs, BPF_REG_0); 5230 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; 5231 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { 5232 mark_reg_known_zero(env, regs, BPF_REG_0); 5233 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; 5234 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { 5235 mark_reg_known_zero(env, regs, BPF_REG_0); 5236 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; 5237 } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { 5238 mark_reg_known_zero(env, regs, BPF_REG_0); 5239 regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; 5240 regs[BPF_REG_0].mem_size = meta.mem_size; 5241 } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL || 5242 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) { 5243 const struct btf_type *t; 5244 5245 mark_reg_known_zero(env, regs, BPF_REG_0); 5246 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL); 5247 if (!btf_type_is_struct(t)) { 5248 u32 tsize; 5249 const struct btf_type *ret; 5250 const char *tname; 5251 5252 /* resolve the type size of ksym. */ 5253 ret = btf_resolve_size(meta.ret_btf, t, &tsize); 5254 if (IS_ERR(ret)) { 5255 tname = btf_name_by_offset(meta.ret_btf, t->name_off); 5256 verbose(env, "unable to resolve the size of type '%s': %ld\n", 5257 tname, PTR_ERR(ret)); 5258 return -EINVAL; 5259 } 5260 regs[BPF_REG_0].type = 5261 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? 5262 PTR_TO_MEM : PTR_TO_MEM_OR_NULL; 5263 regs[BPF_REG_0].mem_size = tsize; 5264 } else { 5265 regs[BPF_REG_0].type = 5266 fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? 5267 PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL; 5268 regs[BPF_REG_0].btf = meta.ret_btf; 5269 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 5270 } 5271 } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL || 5272 fn->ret_type == RET_PTR_TO_BTF_ID) { 5273 int ret_btf_id; 5274 5275 mark_reg_known_zero(env, regs, BPF_REG_0); 5276 regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ? 5277 PTR_TO_BTF_ID : 5278 PTR_TO_BTF_ID_OR_NULL; 5279 ret_btf_id = *fn->ret_btf_id; 5280 if (ret_btf_id == 0) { 5281 verbose(env, "invalid return type %d of func %s#%d\n", 5282 fn->ret_type, func_id_name(func_id), func_id); 5283 return -EINVAL; 5284 } 5285 /* current BPF helper definitions are only coming from 5286 * built-in code with type IDs from vmlinux BTF 5287 */ 5288 regs[BPF_REG_0].btf = btf_vmlinux; 5289 regs[BPF_REG_0].btf_id = ret_btf_id; 5290 } else { 5291 verbose(env, "unknown return type %d of func %s#%d\n", 5292 fn->ret_type, func_id_name(func_id), func_id); 5293 return -EINVAL; 5294 } 5295 5296 if (reg_type_may_be_null(regs[BPF_REG_0].type)) 5297 regs[BPF_REG_0].id = ++env->id_gen; 5298 5299 if (is_ptr_cast_function(func_id)) { 5300 /* For release_reference() */ 5301 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 5302 } else if (is_acquire_function(func_id, meta.map_ptr)) { 5303 int id = acquire_reference_state(env, insn_idx); 5304 5305 if (id < 0) 5306 return id; 5307 /* For mark_ptr_or_null_reg() */ 5308 regs[BPF_REG_0].id = id; 5309 /* For release_reference() */ 5310 regs[BPF_REG_0].ref_obj_id = id; 5311 } 5312 5313 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 5314 5315 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 5316 if (err) 5317 return err; 5318 5319 if ((func_id == BPF_FUNC_get_stack || 5320 func_id == BPF_FUNC_get_task_stack) && 5321 !env->prog->has_callchain_buf) { 5322 const char *err_str; 5323 5324 #ifdef CONFIG_PERF_EVENTS 5325 err = get_callchain_buffers(sysctl_perf_event_max_stack); 5326 err_str = "cannot get callchain buffer for func %s#%d\n"; 5327 #else 5328 err = -ENOTSUPP; 5329 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 5330 #endif 5331 if (err) { 5332 verbose(env, err_str, func_id_name(func_id), func_id); 5333 return err; 5334 } 5335 5336 env->prog->has_callchain_buf = true; 5337 } 5338 5339 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) 5340 env->prog->call_get_stack = true; 5341 5342 if (changes_data) 5343 clear_all_pkt_pointers(env); 5344 return 0; 5345 } 5346 5347 static bool signed_add_overflows(s64 a, s64 b) 5348 { 5349 /* Do the add in u64, where overflow is well-defined */ 5350 s64 res = (s64)((u64)a + (u64)b); 5351 5352 if (b < 0) 5353 return res > a; 5354 return res < a; 5355 } 5356 5357 static bool signed_add32_overflows(s32 a, s32 b) 5358 { 5359 /* Do the add in u32, where overflow is well-defined */ 5360 s32 res = (s32)((u32)a + (u32)b); 5361 5362 if (b < 0) 5363 return res > a; 5364 return res < a; 5365 } 5366 5367 static bool signed_sub_overflows(s64 a, s64 b) 5368 { 5369 /* Do the sub in u64, where overflow is well-defined */ 5370 s64 res = (s64)((u64)a - (u64)b); 5371 5372 if (b < 0) 5373 return res < a; 5374 return res > a; 5375 } 5376 5377 static bool signed_sub32_overflows(s32 a, s32 b) 5378 { 5379 /* Do the sub in u32, where overflow is well-defined */ 5380 s32 res = (s32)((u32)a - (u32)b); 5381 5382 if (b < 0) 5383 return res < a; 5384 return res > a; 5385 } 5386 5387 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 5388 const struct bpf_reg_state *reg, 5389 enum bpf_reg_type type) 5390 { 5391 bool known = tnum_is_const(reg->var_off); 5392 s64 val = reg->var_off.value; 5393 s64 smin = reg->smin_value; 5394 5395 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 5396 verbose(env, "math between %s pointer and %lld is not allowed\n", 5397 reg_type_str[type], val); 5398 return false; 5399 } 5400 5401 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 5402 verbose(env, "%s pointer offset %d is not allowed\n", 5403 reg_type_str[type], reg->off); 5404 return false; 5405 } 5406 5407 if (smin == S64_MIN) { 5408 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 5409 reg_type_str[type]); 5410 return false; 5411 } 5412 5413 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 5414 verbose(env, "value %lld makes %s pointer be out of bounds\n", 5415 smin, reg_type_str[type]); 5416 return false; 5417 } 5418 5419 return true; 5420 } 5421 5422 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 5423 { 5424 return &env->insn_aux_data[env->insn_idx]; 5425 } 5426 5427 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 5428 u32 *ptr_limit, u8 opcode, bool off_is_neg) 5429 { 5430 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || 5431 (opcode == BPF_SUB && !off_is_neg); 5432 u32 off; 5433 5434 switch (ptr_reg->type) { 5435 case PTR_TO_STACK: 5436 /* Indirect variable offset stack access is prohibited in 5437 * unprivileged mode so it's not handled here. 5438 */ 5439 off = ptr_reg->off + ptr_reg->var_off.value; 5440 if (mask_to_left) 5441 *ptr_limit = MAX_BPF_STACK + off; 5442 else 5443 *ptr_limit = -off; 5444 return 0; 5445 case PTR_TO_MAP_VALUE: 5446 if (mask_to_left) { 5447 *ptr_limit = ptr_reg->umax_value + ptr_reg->off; 5448 } else { 5449 off = ptr_reg->smin_value + ptr_reg->off; 5450 *ptr_limit = ptr_reg->map_ptr->value_size - off; 5451 } 5452 return 0; 5453 default: 5454 return -EINVAL; 5455 } 5456 } 5457 5458 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 5459 const struct bpf_insn *insn) 5460 { 5461 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; 5462 } 5463 5464 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 5465 u32 alu_state, u32 alu_limit) 5466 { 5467 /* If we arrived here from different branches with different 5468 * state or limits to sanitize, then this won't work. 5469 */ 5470 if (aux->alu_state && 5471 (aux->alu_state != alu_state || 5472 aux->alu_limit != alu_limit)) 5473 return -EACCES; 5474 5475 /* Corresponding fixup done in fixup_bpf_calls(). */ 5476 aux->alu_state = alu_state; 5477 aux->alu_limit = alu_limit; 5478 return 0; 5479 } 5480 5481 static int sanitize_val_alu(struct bpf_verifier_env *env, 5482 struct bpf_insn *insn) 5483 { 5484 struct bpf_insn_aux_data *aux = cur_aux(env); 5485 5486 if (can_skip_alu_sanitation(env, insn)) 5487 return 0; 5488 5489 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 5490 } 5491 5492 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 5493 struct bpf_insn *insn, 5494 const struct bpf_reg_state *ptr_reg, 5495 struct bpf_reg_state *dst_reg, 5496 bool off_is_neg) 5497 { 5498 struct bpf_verifier_state *vstate = env->cur_state; 5499 struct bpf_insn_aux_data *aux = cur_aux(env); 5500 bool ptr_is_dst_reg = ptr_reg == dst_reg; 5501 u8 opcode = BPF_OP(insn->code); 5502 u32 alu_state, alu_limit; 5503 struct bpf_reg_state tmp; 5504 bool ret; 5505 5506 if (can_skip_alu_sanitation(env, insn)) 5507 return 0; 5508 5509 /* We already marked aux for masking from non-speculative 5510 * paths, thus we got here in the first place. We only care 5511 * to explore bad access from here. 5512 */ 5513 if (vstate->speculative) 5514 goto do_sim; 5515 5516 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 5517 alu_state |= ptr_is_dst_reg ? 5518 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 5519 5520 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 5521 return 0; 5522 if (update_alu_sanitation_state(aux, alu_state, alu_limit)) 5523 return -EACCES; 5524 do_sim: 5525 /* Simulate and find potential out-of-bounds access under 5526 * speculative execution from truncation as a result of 5527 * masking when off was not within expected range. If off 5528 * sits in dst, then we temporarily need to move ptr there 5529 * to simulate dst (== 0) +/-= ptr. Needed, for example, 5530 * for cases where we use K-based arithmetic in one direction 5531 * and truncated reg-based in the other in order to explore 5532 * bad access. 5533 */ 5534 if (!ptr_is_dst_reg) { 5535 tmp = *dst_reg; 5536 *dst_reg = *ptr_reg; 5537 } 5538 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); 5539 if (!ptr_is_dst_reg && ret) 5540 *dst_reg = tmp; 5541 return !ret ? -EFAULT : 0; 5542 } 5543 5544 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 5545 * Caller should also handle BPF_MOV case separately. 5546 * If we return -EACCES, caller may want to try again treating pointer as a 5547 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 5548 */ 5549 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 5550 struct bpf_insn *insn, 5551 const struct bpf_reg_state *ptr_reg, 5552 const struct bpf_reg_state *off_reg) 5553 { 5554 struct bpf_verifier_state *vstate = env->cur_state; 5555 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5556 struct bpf_reg_state *regs = state->regs, *dst_reg; 5557 bool known = tnum_is_const(off_reg->var_off); 5558 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 5559 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 5560 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 5561 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 5562 u32 dst = insn->dst_reg, src = insn->src_reg; 5563 u8 opcode = BPF_OP(insn->code); 5564 int ret; 5565 5566 dst_reg = ®s[dst]; 5567 5568 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 5569 smin_val > smax_val || umin_val > umax_val) { 5570 /* Taint dst register if offset had invalid bounds derived from 5571 * e.g. dead branches. 5572 */ 5573 __mark_reg_unknown(env, dst_reg); 5574 return 0; 5575 } 5576 5577 if (BPF_CLASS(insn->code) != BPF_ALU64) { 5578 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 5579 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 5580 __mark_reg_unknown(env, dst_reg); 5581 return 0; 5582 } 5583 5584 verbose(env, 5585 "R%d 32-bit pointer arithmetic prohibited\n", 5586 dst); 5587 return -EACCES; 5588 } 5589 5590 switch (ptr_reg->type) { 5591 case PTR_TO_MAP_VALUE_OR_NULL: 5592 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 5593 dst, reg_type_str[ptr_reg->type]); 5594 return -EACCES; 5595 case CONST_PTR_TO_MAP: 5596 /* smin_val represents the known value */ 5597 if (known && smin_val == 0 && opcode == BPF_ADD) 5598 break; 5599 fallthrough; 5600 case PTR_TO_PACKET_END: 5601 case PTR_TO_SOCKET: 5602 case PTR_TO_SOCKET_OR_NULL: 5603 case PTR_TO_SOCK_COMMON: 5604 case PTR_TO_SOCK_COMMON_OR_NULL: 5605 case PTR_TO_TCP_SOCK: 5606 case PTR_TO_TCP_SOCK_OR_NULL: 5607 case PTR_TO_XDP_SOCK: 5608 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 5609 dst, reg_type_str[ptr_reg->type]); 5610 return -EACCES; 5611 case PTR_TO_MAP_VALUE: 5612 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { 5613 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", 5614 off_reg == dst_reg ? dst : src); 5615 return -EACCES; 5616 } 5617 fallthrough; 5618 default: 5619 break; 5620 } 5621 5622 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 5623 * The id may be overwritten later if we create a new variable offset. 5624 */ 5625 dst_reg->type = ptr_reg->type; 5626 dst_reg->id = ptr_reg->id; 5627 5628 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 5629 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 5630 return -EINVAL; 5631 5632 /* pointer types do not carry 32-bit bounds at the moment. */ 5633 __mark_reg32_unbounded(dst_reg); 5634 5635 switch (opcode) { 5636 case BPF_ADD: 5637 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 5638 if (ret < 0) { 5639 verbose(env, "R%d tried to add from different maps or paths\n", dst); 5640 return ret; 5641 } 5642 /* We can take a fixed offset as long as it doesn't overflow 5643 * the s32 'off' field 5644 */ 5645 if (known && (ptr_reg->off + smin_val == 5646 (s64)(s32)(ptr_reg->off + smin_val))) { 5647 /* pointer += K. Accumulate it into fixed offset */ 5648 dst_reg->smin_value = smin_ptr; 5649 dst_reg->smax_value = smax_ptr; 5650 dst_reg->umin_value = umin_ptr; 5651 dst_reg->umax_value = umax_ptr; 5652 dst_reg->var_off = ptr_reg->var_off; 5653 dst_reg->off = ptr_reg->off + smin_val; 5654 dst_reg->raw = ptr_reg->raw; 5655 break; 5656 } 5657 /* A new variable offset is created. Note that off_reg->off 5658 * == 0, since it's a scalar. 5659 * dst_reg gets the pointer type and since some positive 5660 * integer value was added to the pointer, give it a new 'id' 5661 * if it's a PTR_TO_PACKET. 5662 * this creates a new 'base' pointer, off_reg (variable) gets 5663 * added into the variable offset, and we copy the fixed offset 5664 * from ptr_reg. 5665 */ 5666 if (signed_add_overflows(smin_ptr, smin_val) || 5667 signed_add_overflows(smax_ptr, smax_val)) { 5668 dst_reg->smin_value = S64_MIN; 5669 dst_reg->smax_value = S64_MAX; 5670 } else { 5671 dst_reg->smin_value = smin_ptr + smin_val; 5672 dst_reg->smax_value = smax_ptr + smax_val; 5673 } 5674 if (umin_ptr + umin_val < umin_ptr || 5675 umax_ptr + umax_val < umax_ptr) { 5676 dst_reg->umin_value = 0; 5677 dst_reg->umax_value = U64_MAX; 5678 } else { 5679 dst_reg->umin_value = umin_ptr + umin_val; 5680 dst_reg->umax_value = umax_ptr + umax_val; 5681 } 5682 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 5683 dst_reg->off = ptr_reg->off; 5684 dst_reg->raw = ptr_reg->raw; 5685 if (reg_is_pkt_pointer(ptr_reg)) { 5686 dst_reg->id = ++env->id_gen; 5687 /* something was added to pkt_ptr, set range to zero */ 5688 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 5689 } 5690 break; 5691 case BPF_SUB: 5692 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 5693 if (ret < 0) { 5694 verbose(env, "R%d tried to sub from different maps or paths\n", dst); 5695 return ret; 5696 } 5697 if (dst_reg == off_reg) { 5698 /* scalar -= pointer. Creates an unknown scalar */ 5699 verbose(env, "R%d tried to subtract pointer from scalar\n", 5700 dst); 5701 return -EACCES; 5702 } 5703 /* We don't allow subtraction from FP, because (according to 5704 * test_verifier.c test "invalid fp arithmetic", JITs might not 5705 * be able to deal with it. 5706 */ 5707 if (ptr_reg->type == PTR_TO_STACK) { 5708 verbose(env, "R%d subtraction from stack pointer prohibited\n", 5709 dst); 5710 return -EACCES; 5711 } 5712 if (known && (ptr_reg->off - smin_val == 5713 (s64)(s32)(ptr_reg->off - smin_val))) { 5714 /* pointer -= K. Subtract it from fixed offset */ 5715 dst_reg->smin_value = smin_ptr; 5716 dst_reg->smax_value = smax_ptr; 5717 dst_reg->umin_value = umin_ptr; 5718 dst_reg->umax_value = umax_ptr; 5719 dst_reg->var_off = ptr_reg->var_off; 5720 dst_reg->id = ptr_reg->id; 5721 dst_reg->off = ptr_reg->off - smin_val; 5722 dst_reg->raw = ptr_reg->raw; 5723 break; 5724 } 5725 /* A new variable offset is created. If the subtrahend is known 5726 * nonnegative, then any reg->range we had before is still good. 5727 */ 5728 if (signed_sub_overflows(smin_ptr, smax_val) || 5729 signed_sub_overflows(smax_ptr, smin_val)) { 5730 /* Overflow possible, we know nothing */ 5731 dst_reg->smin_value = S64_MIN; 5732 dst_reg->smax_value = S64_MAX; 5733 } else { 5734 dst_reg->smin_value = smin_ptr - smax_val; 5735 dst_reg->smax_value = smax_ptr - smin_val; 5736 } 5737 if (umin_ptr < umax_val) { 5738 /* Overflow possible, we know nothing */ 5739 dst_reg->umin_value = 0; 5740 dst_reg->umax_value = U64_MAX; 5741 } else { 5742 /* Cannot overflow (as long as bounds are consistent) */ 5743 dst_reg->umin_value = umin_ptr - umax_val; 5744 dst_reg->umax_value = umax_ptr - umin_val; 5745 } 5746 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 5747 dst_reg->off = ptr_reg->off; 5748 dst_reg->raw = ptr_reg->raw; 5749 if (reg_is_pkt_pointer(ptr_reg)) { 5750 dst_reg->id = ++env->id_gen; 5751 /* something was added to pkt_ptr, set range to zero */ 5752 if (smin_val < 0) 5753 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 5754 } 5755 break; 5756 case BPF_AND: 5757 case BPF_OR: 5758 case BPF_XOR: 5759 /* bitwise ops on pointers are troublesome, prohibit. */ 5760 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 5761 dst, bpf_alu_string[opcode >> 4]); 5762 return -EACCES; 5763 default: 5764 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 5765 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 5766 dst, bpf_alu_string[opcode >> 4]); 5767 return -EACCES; 5768 } 5769 5770 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 5771 return -EINVAL; 5772 5773 __update_reg_bounds(dst_reg); 5774 __reg_deduce_bounds(dst_reg); 5775 __reg_bound_offset(dst_reg); 5776 5777 /* For unprivileged we require that resulting offset must be in bounds 5778 * in order to be able to sanitize access later on. 5779 */ 5780 if (!env->bypass_spec_v1) { 5781 if (dst_reg->type == PTR_TO_MAP_VALUE && 5782 check_map_access(env, dst, dst_reg->off, 1, false)) { 5783 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 5784 "prohibited for !root\n", dst); 5785 return -EACCES; 5786 } else if (dst_reg->type == PTR_TO_STACK && 5787 check_stack_access(env, dst_reg, dst_reg->off + 5788 dst_reg->var_off.value, 1)) { 5789 verbose(env, "R%d stack pointer arithmetic goes out of range, " 5790 "prohibited for !root\n", dst); 5791 return -EACCES; 5792 } 5793 } 5794 5795 return 0; 5796 } 5797 5798 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 5799 struct bpf_reg_state *src_reg) 5800 { 5801 s32 smin_val = src_reg->s32_min_value; 5802 s32 smax_val = src_reg->s32_max_value; 5803 u32 umin_val = src_reg->u32_min_value; 5804 u32 umax_val = src_reg->u32_max_value; 5805 5806 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 5807 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 5808 dst_reg->s32_min_value = S32_MIN; 5809 dst_reg->s32_max_value = S32_MAX; 5810 } else { 5811 dst_reg->s32_min_value += smin_val; 5812 dst_reg->s32_max_value += smax_val; 5813 } 5814 if (dst_reg->u32_min_value + umin_val < umin_val || 5815 dst_reg->u32_max_value + umax_val < umax_val) { 5816 dst_reg->u32_min_value = 0; 5817 dst_reg->u32_max_value = U32_MAX; 5818 } else { 5819 dst_reg->u32_min_value += umin_val; 5820 dst_reg->u32_max_value += umax_val; 5821 } 5822 } 5823 5824 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 5825 struct bpf_reg_state *src_reg) 5826 { 5827 s64 smin_val = src_reg->smin_value; 5828 s64 smax_val = src_reg->smax_value; 5829 u64 umin_val = src_reg->umin_value; 5830 u64 umax_val = src_reg->umax_value; 5831 5832 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 5833 signed_add_overflows(dst_reg->smax_value, smax_val)) { 5834 dst_reg->smin_value = S64_MIN; 5835 dst_reg->smax_value = S64_MAX; 5836 } else { 5837 dst_reg->smin_value += smin_val; 5838 dst_reg->smax_value += smax_val; 5839 } 5840 if (dst_reg->umin_value + umin_val < umin_val || 5841 dst_reg->umax_value + umax_val < umax_val) { 5842 dst_reg->umin_value = 0; 5843 dst_reg->umax_value = U64_MAX; 5844 } else { 5845 dst_reg->umin_value += umin_val; 5846 dst_reg->umax_value += umax_val; 5847 } 5848 } 5849 5850 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 5851 struct bpf_reg_state *src_reg) 5852 { 5853 s32 smin_val = src_reg->s32_min_value; 5854 s32 smax_val = src_reg->s32_max_value; 5855 u32 umin_val = src_reg->u32_min_value; 5856 u32 umax_val = src_reg->u32_max_value; 5857 5858 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 5859 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 5860 /* Overflow possible, we know nothing */ 5861 dst_reg->s32_min_value = S32_MIN; 5862 dst_reg->s32_max_value = S32_MAX; 5863 } else { 5864 dst_reg->s32_min_value -= smax_val; 5865 dst_reg->s32_max_value -= smin_val; 5866 } 5867 if (dst_reg->u32_min_value < umax_val) { 5868 /* Overflow possible, we know nothing */ 5869 dst_reg->u32_min_value = 0; 5870 dst_reg->u32_max_value = U32_MAX; 5871 } else { 5872 /* Cannot overflow (as long as bounds are consistent) */ 5873 dst_reg->u32_min_value -= umax_val; 5874 dst_reg->u32_max_value -= umin_val; 5875 } 5876 } 5877 5878 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 5879 struct bpf_reg_state *src_reg) 5880 { 5881 s64 smin_val = src_reg->smin_value; 5882 s64 smax_val = src_reg->smax_value; 5883 u64 umin_val = src_reg->umin_value; 5884 u64 umax_val = src_reg->umax_value; 5885 5886 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 5887 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 5888 /* Overflow possible, we know nothing */ 5889 dst_reg->smin_value = S64_MIN; 5890 dst_reg->smax_value = S64_MAX; 5891 } else { 5892 dst_reg->smin_value -= smax_val; 5893 dst_reg->smax_value -= smin_val; 5894 } 5895 if (dst_reg->umin_value < umax_val) { 5896 /* Overflow possible, we know nothing */ 5897 dst_reg->umin_value = 0; 5898 dst_reg->umax_value = U64_MAX; 5899 } else { 5900 /* Cannot overflow (as long as bounds are consistent) */ 5901 dst_reg->umin_value -= umax_val; 5902 dst_reg->umax_value -= umin_val; 5903 } 5904 } 5905 5906 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 5907 struct bpf_reg_state *src_reg) 5908 { 5909 s32 smin_val = src_reg->s32_min_value; 5910 u32 umin_val = src_reg->u32_min_value; 5911 u32 umax_val = src_reg->u32_max_value; 5912 5913 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 5914 /* Ain't nobody got time to multiply that sign */ 5915 __mark_reg32_unbounded(dst_reg); 5916 return; 5917 } 5918 /* Both values are positive, so we can work with unsigned and 5919 * copy the result to signed (unless it exceeds S32_MAX). 5920 */ 5921 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 5922 /* Potential overflow, we know nothing */ 5923 __mark_reg32_unbounded(dst_reg); 5924 return; 5925 } 5926 dst_reg->u32_min_value *= umin_val; 5927 dst_reg->u32_max_value *= umax_val; 5928 if (dst_reg->u32_max_value > S32_MAX) { 5929 /* Overflow possible, we know nothing */ 5930 dst_reg->s32_min_value = S32_MIN; 5931 dst_reg->s32_max_value = S32_MAX; 5932 } else { 5933 dst_reg->s32_min_value = dst_reg->u32_min_value; 5934 dst_reg->s32_max_value = dst_reg->u32_max_value; 5935 } 5936 } 5937 5938 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 5939 struct bpf_reg_state *src_reg) 5940 { 5941 s64 smin_val = src_reg->smin_value; 5942 u64 umin_val = src_reg->umin_value; 5943 u64 umax_val = src_reg->umax_value; 5944 5945 if (smin_val < 0 || dst_reg->smin_value < 0) { 5946 /* Ain't nobody got time to multiply that sign */ 5947 __mark_reg64_unbounded(dst_reg); 5948 return; 5949 } 5950 /* Both values are positive, so we can work with unsigned and 5951 * copy the result to signed (unless it exceeds S64_MAX). 5952 */ 5953 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 5954 /* Potential overflow, we know nothing */ 5955 __mark_reg64_unbounded(dst_reg); 5956 return; 5957 } 5958 dst_reg->umin_value *= umin_val; 5959 dst_reg->umax_value *= umax_val; 5960 if (dst_reg->umax_value > S64_MAX) { 5961 /* Overflow possible, we know nothing */ 5962 dst_reg->smin_value = S64_MIN; 5963 dst_reg->smax_value = S64_MAX; 5964 } else { 5965 dst_reg->smin_value = dst_reg->umin_value; 5966 dst_reg->smax_value = dst_reg->umax_value; 5967 } 5968 } 5969 5970 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 5971 struct bpf_reg_state *src_reg) 5972 { 5973 bool src_known = tnum_subreg_is_const(src_reg->var_off); 5974 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 5975 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 5976 s32 smin_val = src_reg->s32_min_value; 5977 u32 umax_val = src_reg->u32_max_value; 5978 5979 /* Assuming scalar64_min_max_and will be called so its safe 5980 * to skip updating register for known 32-bit case. 5981 */ 5982 if (src_known && dst_known) 5983 return; 5984 5985 /* We get our minimum from the var_off, since that's inherently 5986 * bitwise. Our maximum is the minimum of the operands' maxima. 5987 */ 5988 dst_reg->u32_min_value = var32_off.value; 5989 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 5990 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 5991 /* Lose signed bounds when ANDing negative numbers, 5992 * ain't nobody got time for that. 5993 */ 5994 dst_reg->s32_min_value = S32_MIN; 5995 dst_reg->s32_max_value = S32_MAX; 5996 } else { 5997 /* ANDing two positives gives a positive, so safe to 5998 * cast result into s64. 5999 */ 6000 dst_reg->s32_min_value = dst_reg->u32_min_value; 6001 dst_reg->s32_max_value = dst_reg->u32_max_value; 6002 } 6003 6004 } 6005 6006 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 6007 struct bpf_reg_state *src_reg) 6008 { 6009 bool src_known = tnum_is_const(src_reg->var_off); 6010 bool dst_known = tnum_is_const(dst_reg->var_off); 6011 s64 smin_val = src_reg->smin_value; 6012 u64 umax_val = src_reg->umax_value; 6013 6014 if (src_known && dst_known) { 6015 __mark_reg_known(dst_reg, dst_reg->var_off.value); 6016 return; 6017 } 6018 6019 /* We get our minimum from the var_off, since that's inherently 6020 * bitwise. Our maximum is the minimum of the operands' maxima. 6021 */ 6022 dst_reg->umin_value = dst_reg->var_off.value; 6023 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 6024 if (dst_reg->smin_value < 0 || smin_val < 0) { 6025 /* Lose signed bounds when ANDing negative numbers, 6026 * ain't nobody got time for that. 6027 */ 6028 dst_reg->smin_value = S64_MIN; 6029 dst_reg->smax_value = S64_MAX; 6030 } else { 6031 /* ANDing two positives gives a positive, so safe to 6032 * cast result into s64. 6033 */ 6034 dst_reg->smin_value = dst_reg->umin_value; 6035 dst_reg->smax_value = dst_reg->umax_value; 6036 } 6037 /* We may learn something more from the var_off */ 6038 __update_reg_bounds(dst_reg); 6039 } 6040 6041 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 6042 struct bpf_reg_state *src_reg) 6043 { 6044 bool src_known = tnum_subreg_is_const(src_reg->var_off); 6045 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 6046 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 6047 s32 smin_val = src_reg->s32_min_value; 6048 u32 umin_val = src_reg->u32_min_value; 6049 6050 /* Assuming scalar64_min_max_or will be called so it is safe 6051 * to skip updating register for known case. 6052 */ 6053 if (src_known && dst_known) 6054 return; 6055 6056 /* We get our maximum from the var_off, and our minimum is the 6057 * maximum of the operands' minima 6058 */ 6059 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 6060 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 6061 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 6062 /* Lose signed bounds when ORing negative numbers, 6063 * ain't nobody got time for that. 6064 */ 6065 dst_reg->s32_min_value = S32_MIN; 6066 dst_reg->s32_max_value = S32_MAX; 6067 } else { 6068 /* ORing two positives gives a positive, so safe to 6069 * cast result into s64. 6070 */ 6071 dst_reg->s32_min_value = dst_reg->u32_min_value; 6072 dst_reg->s32_max_value = dst_reg->u32_max_value; 6073 } 6074 } 6075 6076 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 6077 struct bpf_reg_state *src_reg) 6078 { 6079 bool src_known = tnum_is_const(src_reg->var_off); 6080 bool dst_known = tnum_is_const(dst_reg->var_off); 6081 s64 smin_val = src_reg->smin_value; 6082 u64 umin_val = src_reg->umin_value; 6083 6084 if (src_known && dst_known) { 6085 __mark_reg_known(dst_reg, dst_reg->var_off.value); 6086 return; 6087 } 6088 6089 /* We get our maximum from the var_off, and our minimum is the 6090 * maximum of the operands' minima 6091 */ 6092 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 6093 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 6094 if (dst_reg->smin_value < 0 || smin_val < 0) { 6095 /* Lose signed bounds when ORing negative numbers, 6096 * ain't nobody got time for that. 6097 */ 6098 dst_reg->smin_value = S64_MIN; 6099 dst_reg->smax_value = S64_MAX; 6100 } else { 6101 /* ORing two positives gives a positive, so safe to 6102 * cast result into s64. 6103 */ 6104 dst_reg->smin_value = dst_reg->umin_value; 6105 dst_reg->smax_value = dst_reg->umax_value; 6106 } 6107 /* We may learn something more from the var_off */ 6108 __update_reg_bounds(dst_reg); 6109 } 6110 6111 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, 6112 struct bpf_reg_state *src_reg) 6113 { 6114 bool src_known = tnum_subreg_is_const(src_reg->var_off); 6115 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 6116 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 6117 s32 smin_val = src_reg->s32_min_value; 6118 6119 /* Assuming scalar64_min_max_xor will be called so it is safe 6120 * to skip updating register for known case. 6121 */ 6122 if (src_known && dst_known) 6123 return; 6124 6125 /* We get both minimum and maximum from the var32_off. */ 6126 dst_reg->u32_min_value = var32_off.value; 6127 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 6128 6129 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { 6130 /* XORing two positive sign numbers gives a positive, 6131 * so safe to cast u32 result into s32. 6132 */ 6133 dst_reg->s32_min_value = dst_reg->u32_min_value; 6134 dst_reg->s32_max_value = dst_reg->u32_max_value; 6135 } else { 6136 dst_reg->s32_min_value = S32_MIN; 6137 dst_reg->s32_max_value = S32_MAX; 6138 } 6139 } 6140 6141 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, 6142 struct bpf_reg_state *src_reg) 6143 { 6144 bool src_known = tnum_is_const(src_reg->var_off); 6145 bool dst_known = tnum_is_const(dst_reg->var_off); 6146 s64 smin_val = src_reg->smin_value; 6147 6148 if (src_known && dst_known) { 6149 /* dst_reg->var_off.value has been updated earlier */ 6150 __mark_reg_known(dst_reg, dst_reg->var_off.value); 6151 return; 6152 } 6153 6154 /* We get both minimum and maximum from the var_off. */ 6155 dst_reg->umin_value = dst_reg->var_off.value; 6156 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 6157 6158 if (dst_reg->smin_value >= 0 && smin_val >= 0) { 6159 /* XORing two positive sign numbers gives a positive, 6160 * so safe to cast u64 result into s64. 6161 */ 6162 dst_reg->smin_value = dst_reg->umin_value; 6163 dst_reg->smax_value = dst_reg->umax_value; 6164 } else { 6165 dst_reg->smin_value = S64_MIN; 6166 dst_reg->smax_value = S64_MAX; 6167 } 6168 6169 __update_reg_bounds(dst_reg); 6170 } 6171 6172 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 6173 u64 umin_val, u64 umax_val) 6174 { 6175 /* We lose all sign bit information (except what we can pick 6176 * up from var_off) 6177 */ 6178 dst_reg->s32_min_value = S32_MIN; 6179 dst_reg->s32_max_value = S32_MAX; 6180 /* If we might shift our top bit out, then we know nothing */ 6181 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 6182 dst_reg->u32_min_value = 0; 6183 dst_reg->u32_max_value = U32_MAX; 6184 } else { 6185 dst_reg->u32_min_value <<= umin_val; 6186 dst_reg->u32_max_value <<= umax_val; 6187 } 6188 } 6189 6190 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 6191 struct bpf_reg_state *src_reg) 6192 { 6193 u32 umax_val = src_reg->u32_max_value; 6194 u32 umin_val = src_reg->u32_min_value; 6195 /* u32 alu operation will zext upper bits */ 6196 struct tnum subreg = tnum_subreg(dst_reg->var_off); 6197 6198 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 6199 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 6200 /* Not required but being careful mark reg64 bounds as unknown so 6201 * that we are forced to pick them up from tnum and zext later and 6202 * if some path skips this step we are still safe. 6203 */ 6204 __mark_reg64_unbounded(dst_reg); 6205 __update_reg32_bounds(dst_reg); 6206 } 6207 6208 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 6209 u64 umin_val, u64 umax_val) 6210 { 6211 /* Special case <<32 because it is a common compiler pattern to sign 6212 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 6213 * positive we know this shift will also be positive so we can track 6214 * bounds correctly. Otherwise we lose all sign bit information except 6215 * what we can pick up from var_off. Perhaps we can generalize this 6216 * later to shifts of any length. 6217 */ 6218 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 6219 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 6220 else 6221 dst_reg->smax_value = S64_MAX; 6222 6223 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 6224 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 6225 else 6226 dst_reg->smin_value = S64_MIN; 6227 6228 /* If we might shift our top bit out, then we know nothing */ 6229 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 6230 dst_reg->umin_value = 0; 6231 dst_reg->umax_value = U64_MAX; 6232 } else { 6233 dst_reg->umin_value <<= umin_val; 6234 dst_reg->umax_value <<= umax_val; 6235 } 6236 } 6237 6238 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 6239 struct bpf_reg_state *src_reg) 6240 { 6241 u64 umax_val = src_reg->umax_value; 6242 u64 umin_val = src_reg->umin_value; 6243 6244 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 6245 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 6246 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 6247 6248 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 6249 /* We may learn something more from the var_off */ 6250 __update_reg_bounds(dst_reg); 6251 } 6252 6253 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 6254 struct bpf_reg_state *src_reg) 6255 { 6256 struct tnum subreg = tnum_subreg(dst_reg->var_off); 6257 u32 umax_val = src_reg->u32_max_value; 6258 u32 umin_val = src_reg->u32_min_value; 6259 6260 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 6261 * be negative, then either: 6262 * 1) src_reg might be zero, so the sign bit of the result is 6263 * unknown, so we lose our signed bounds 6264 * 2) it's known negative, thus the unsigned bounds capture the 6265 * signed bounds 6266 * 3) the signed bounds cross zero, so they tell us nothing 6267 * about the result 6268 * If the value in dst_reg is known nonnegative, then again the 6269 * unsigned bounts capture the signed bounds. 6270 * Thus, in all cases it suffices to blow away our signed bounds 6271 * and rely on inferring new ones from the unsigned bounds and 6272 * var_off of the result. 6273 */ 6274 dst_reg->s32_min_value = S32_MIN; 6275 dst_reg->s32_max_value = S32_MAX; 6276 6277 dst_reg->var_off = tnum_rshift(subreg, umin_val); 6278 dst_reg->u32_min_value >>= umax_val; 6279 dst_reg->u32_max_value >>= umin_val; 6280 6281 __mark_reg64_unbounded(dst_reg); 6282 __update_reg32_bounds(dst_reg); 6283 } 6284 6285 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 6286 struct bpf_reg_state *src_reg) 6287 { 6288 u64 umax_val = src_reg->umax_value; 6289 u64 umin_val = src_reg->umin_value; 6290 6291 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 6292 * be negative, then either: 6293 * 1) src_reg might be zero, so the sign bit of the result is 6294 * unknown, so we lose our signed bounds 6295 * 2) it's known negative, thus the unsigned bounds capture the 6296 * signed bounds 6297 * 3) the signed bounds cross zero, so they tell us nothing 6298 * about the result 6299 * If the value in dst_reg is known nonnegative, then again the 6300 * unsigned bounts capture the signed bounds. 6301 * Thus, in all cases it suffices to blow away our signed bounds 6302 * and rely on inferring new ones from the unsigned bounds and 6303 * var_off of the result. 6304 */ 6305 dst_reg->smin_value = S64_MIN; 6306 dst_reg->smax_value = S64_MAX; 6307 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 6308 dst_reg->umin_value >>= umax_val; 6309 dst_reg->umax_value >>= umin_val; 6310 6311 /* Its not easy to operate on alu32 bounds here because it depends 6312 * on bits being shifted in. Take easy way out and mark unbounded 6313 * so we can recalculate later from tnum. 6314 */ 6315 __mark_reg32_unbounded(dst_reg); 6316 __update_reg_bounds(dst_reg); 6317 } 6318 6319 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 6320 struct bpf_reg_state *src_reg) 6321 { 6322 u64 umin_val = src_reg->u32_min_value; 6323 6324 /* Upon reaching here, src_known is true and 6325 * umax_val is equal to umin_val. 6326 */ 6327 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 6328 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 6329 6330 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 6331 6332 /* blow away the dst_reg umin_value/umax_value and rely on 6333 * dst_reg var_off to refine the result. 6334 */ 6335 dst_reg->u32_min_value = 0; 6336 dst_reg->u32_max_value = U32_MAX; 6337 6338 __mark_reg64_unbounded(dst_reg); 6339 __update_reg32_bounds(dst_reg); 6340 } 6341 6342 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 6343 struct bpf_reg_state *src_reg) 6344 { 6345 u64 umin_val = src_reg->umin_value; 6346 6347 /* Upon reaching here, src_known is true and umax_val is equal 6348 * to umin_val. 6349 */ 6350 dst_reg->smin_value >>= umin_val; 6351 dst_reg->smax_value >>= umin_val; 6352 6353 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 6354 6355 /* blow away the dst_reg umin_value/umax_value and rely on 6356 * dst_reg var_off to refine the result. 6357 */ 6358 dst_reg->umin_value = 0; 6359 dst_reg->umax_value = U64_MAX; 6360 6361 /* Its not easy to operate on alu32 bounds here because it depends 6362 * on bits being shifted in from upper 32-bits. Take easy way out 6363 * and mark unbounded so we can recalculate later from tnum. 6364 */ 6365 __mark_reg32_unbounded(dst_reg); 6366 __update_reg_bounds(dst_reg); 6367 } 6368 6369 /* WARNING: This function does calculations on 64-bit values, but the actual 6370 * execution may occur on 32-bit values. Therefore, things like bitshifts 6371 * need extra checks in the 32-bit case. 6372 */ 6373 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 6374 struct bpf_insn *insn, 6375 struct bpf_reg_state *dst_reg, 6376 struct bpf_reg_state src_reg) 6377 { 6378 struct bpf_reg_state *regs = cur_regs(env); 6379 u8 opcode = BPF_OP(insn->code); 6380 bool src_known; 6381 s64 smin_val, smax_val; 6382 u64 umin_val, umax_val; 6383 s32 s32_min_val, s32_max_val; 6384 u32 u32_min_val, u32_max_val; 6385 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 6386 u32 dst = insn->dst_reg; 6387 int ret; 6388 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 6389 6390 smin_val = src_reg.smin_value; 6391 smax_val = src_reg.smax_value; 6392 umin_val = src_reg.umin_value; 6393 umax_val = src_reg.umax_value; 6394 6395 s32_min_val = src_reg.s32_min_value; 6396 s32_max_val = src_reg.s32_max_value; 6397 u32_min_val = src_reg.u32_min_value; 6398 u32_max_val = src_reg.u32_max_value; 6399 6400 if (alu32) { 6401 src_known = tnum_subreg_is_const(src_reg.var_off); 6402 if ((src_known && 6403 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 6404 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 6405 /* Taint dst register if offset had invalid bounds 6406 * derived from e.g. dead branches. 6407 */ 6408 __mark_reg_unknown(env, dst_reg); 6409 return 0; 6410 } 6411 } else { 6412 src_known = tnum_is_const(src_reg.var_off); 6413 if ((src_known && 6414 (smin_val != smax_val || umin_val != umax_val)) || 6415 smin_val > smax_val || umin_val > umax_val) { 6416 /* Taint dst register if offset had invalid bounds 6417 * derived from e.g. dead branches. 6418 */ 6419 __mark_reg_unknown(env, dst_reg); 6420 return 0; 6421 } 6422 } 6423 6424 if (!src_known && 6425 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 6426 __mark_reg_unknown(env, dst_reg); 6427 return 0; 6428 } 6429 6430 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 6431 * There are two classes of instructions: The first class we track both 6432 * alu32 and alu64 sign/unsigned bounds independently this provides the 6433 * greatest amount of precision when alu operations are mixed with jmp32 6434 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 6435 * and BPF_OR. This is possible because these ops have fairly easy to 6436 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 6437 * See alu32 verifier tests for examples. The second class of 6438 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 6439 * with regards to tracking sign/unsigned bounds because the bits may 6440 * cross subreg boundaries in the alu64 case. When this happens we mark 6441 * the reg unbounded in the subreg bound space and use the resulting 6442 * tnum to calculate an approximation of the sign/unsigned bounds. 6443 */ 6444 switch (opcode) { 6445 case BPF_ADD: 6446 ret = sanitize_val_alu(env, insn); 6447 if (ret < 0) { 6448 verbose(env, "R%d tried to add from different pointers or scalars\n", dst); 6449 return ret; 6450 } 6451 scalar32_min_max_add(dst_reg, &src_reg); 6452 scalar_min_max_add(dst_reg, &src_reg); 6453 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 6454 break; 6455 case BPF_SUB: 6456 ret = sanitize_val_alu(env, insn); 6457 if (ret < 0) { 6458 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); 6459 return ret; 6460 } 6461 scalar32_min_max_sub(dst_reg, &src_reg); 6462 scalar_min_max_sub(dst_reg, &src_reg); 6463 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 6464 break; 6465 case BPF_MUL: 6466 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 6467 scalar32_min_max_mul(dst_reg, &src_reg); 6468 scalar_min_max_mul(dst_reg, &src_reg); 6469 break; 6470 case BPF_AND: 6471 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 6472 scalar32_min_max_and(dst_reg, &src_reg); 6473 scalar_min_max_and(dst_reg, &src_reg); 6474 break; 6475 case BPF_OR: 6476 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 6477 scalar32_min_max_or(dst_reg, &src_reg); 6478 scalar_min_max_or(dst_reg, &src_reg); 6479 break; 6480 case BPF_XOR: 6481 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); 6482 scalar32_min_max_xor(dst_reg, &src_reg); 6483 scalar_min_max_xor(dst_reg, &src_reg); 6484 break; 6485 case BPF_LSH: 6486 if (umax_val >= insn_bitness) { 6487 /* Shifts greater than 31 or 63 are undefined. 6488 * This includes shifts by a negative number. 6489 */ 6490 mark_reg_unknown(env, regs, insn->dst_reg); 6491 break; 6492 } 6493 if (alu32) 6494 scalar32_min_max_lsh(dst_reg, &src_reg); 6495 else 6496 scalar_min_max_lsh(dst_reg, &src_reg); 6497 break; 6498 case BPF_RSH: 6499 if (umax_val >= insn_bitness) { 6500 /* Shifts greater than 31 or 63 are undefined. 6501 * This includes shifts by a negative number. 6502 */ 6503 mark_reg_unknown(env, regs, insn->dst_reg); 6504 break; 6505 } 6506 if (alu32) 6507 scalar32_min_max_rsh(dst_reg, &src_reg); 6508 else 6509 scalar_min_max_rsh(dst_reg, &src_reg); 6510 break; 6511 case BPF_ARSH: 6512 if (umax_val >= insn_bitness) { 6513 /* Shifts greater than 31 or 63 are undefined. 6514 * This includes shifts by a negative number. 6515 */ 6516 mark_reg_unknown(env, regs, insn->dst_reg); 6517 break; 6518 } 6519 if (alu32) 6520 scalar32_min_max_arsh(dst_reg, &src_reg); 6521 else 6522 scalar_min_max_arsh(dst_reg, &src_reg); 6523 break; 6524 default: 6525 mark_reg_unknown(env, regs, insn->dst_reg); 6526 break; 6527 } 6528 6529 /* ALU32 ops are zero extended into 64bit register */ 6530 if (alu32) 6531 zext_32_to_64(dst_reg); 6532 6533 __update_reg_bounds(dst_reg); 6534 __reg_deduce_bounds(dst_reg); 6535 __reg_bound_offset(dst_reg); 6536 return 0; 6537 } 6538 6539 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 6540 * and var_off. 6541 */ 6542 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 6543 struct bpf_insn *insn) 6544 { 6545 struct bpf_verifier_state *vstate = env->cur_state; 6546 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 6547 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 6548 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 6549 u8 opcode = BPF_OP(insn->code); 6550 int err; 6551 6552 dst_reg = ®s[insn->dst_reg]; 6553 src_reg = NULL; 6554 if (dst_reg->type != SCALAR_VALUE) 6555 ptr_reg = dst_reg; 6556 else 6557 /* Make sure ID is cleared otherwise dst_reg min/max could be 6558 * incorrectly propagated into other registers by find_equal_scalars() 6559 */ 6560 dst_reg->id = 0; 6561 if (BPF_SRC(insn->code) == BPF_X) { 6562 src_reg = ®s[insn->src_reg]; 6563 if (src_reg->type != SCALAR_VALUE) { 6564 if (dst_reg->type != SCALAR_VALUE) { 6565 /* Combining two pointers by any ALU op yields 6566 * an arbitrary scalar. Disallow all math except 6567 * pointer subtraction 6568 */ 6569 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 6570 mark_reg_unknown(env, regs, insn->dst_reg); 6571 return 0; 6572 } 6573 verbose(env, "R%d pointer %s pointer prohibited\n", 6574 insn->dst_reg, 6575 bpf_alu_string[opcode >> 4]); 6576 return -EACCES; 6577 } else { 6578 /* scalar += pointer 6579 * This is legal, but we have to reverse our 6580 * src/dest handling in computing the range 6581 */ 6582 err = mark_chain_precision(env, insn->dst_reg); 6583 if (err) 6584 return err; 6585 return adjust_ptr_min_max_vals(env, insn, 6586 src_reg, dst_reg); 6587 } 6588 } else if (ptr_reg) { 6589 /* pointer += scalar */ 6590 err = mark_chain_precision(env, insn->src_reg); 6591 if (err) 6592 return err; 6593 return adjust_ptr_min_max_vals(env, insn, 6594 dst_reg, src_reg); 6595 } 6596 } else { 6597 /* Pretend the src is a reg with a known value, since we only 6598 * need to be able to read from this state. 6599 */ 6600 off_reg.type = SCALAR_VALUE; 6601 __mark_reg_known(&off_reg, insn->imm); 6602 src_reg = &off_reg; 6603 if (ptr_reg) /* pointer += K */ 6604 return adjust_ptr_min_max_vals(env, insn, 6605 ptr_reg, src_reg); 6606 } 6607 6608 /* Got here implies adding two SCALAR_VALUEs */ 6609 if (WARN_ON_ONCE(ptr_reg)) { 6610 print_verifier_state(env, state); 6611 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 6612 return -EINVAL; 6613 } 6614 if (WARN_ON(!src_reg)) { 6615 print_verifier_state(env, state); 6616 verbose(env, "verifier internal error: no src_reg\n"); 6617 return -EINVAL; 6618 } 6619 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 6620 } 6621 6622 /* check validity of 32-bit and 64-bit arithmetic operations */ 6623 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 6624 { 6625 struct bpf_reg_state *regs = cur_regs(env); 6626 u8 opcode = BPF_OP(insn->code); 6627 int err; 6628 6629 if (opcode == BPF_END || opcode == BPF_NEG) { 6630 if (opcode == BPF_NEG) { 6631 if (BPF_SRC(insn->code) != 0 || 6632 insn->src_reg != BPF_REG_0 || 6633 insn->off != 0 || insn->imm != 0) { 6634 verbose(env, "BPF_NEG uses reserved fields\n"); 6635 return -EINVAL; 6636 } 6637 } else { 6638 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 6639 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 6640 BPF_CLASS(insn->code) == BPF_ALU64) { 6641 verbose(env, "BPF_END uses reserved fields\n"); 6642 return -EINVAL; 6643 } 6644 } 6645 6646 /* check src operand */ 6647 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 6648 if (err) 6649 return err; 6650 6651 if (is_pointer_value(env, insn->dst_reg)) { 6652 verbose(env, "R%d pointer arithmetic prohibited\n", 6653 insn->dst_reg); 6654 return -EACCES; 6655 } 6656 6657 /* check dest operand */ 6658 err = check_reg_arg(env, insn->dst_reg, DST_OP); 6659 if (err) 6660 return err; 6661 6662 } else if (opcode == BPF_MOV) { 6663 6664 if (BPF_SRC(insn->code) == BPF_X) { 6665 if (insn->imm != 0 || insn->off != 0) { 6666 verbose(env, "BPF_MOV uses reserved fields\n"); 6667 return -EINVAL; 6668 } 6669 6670 /* check src operand */ 6671 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6672 if (err) 6673 return err; 6674 } else { 6675 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 6676 verbose(env, "BPF_MOV uses reserved fields\n"); 6677 return -EINVAL; 6678 } 6679 } 6680 6681 /* check dest operand, mark as required later */ 6682 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 6683 if (err) 6684 return err; 6685 6686 if (BPF_SRC(insn->code) == BPF_X) { 6687 struct bpf_reg_state *src_reg = regs + insn->src_reg; 6688 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 6689 6690 if (BPF_CLASS(insn->code) == BPF_ALU64) { 6691 /* case: R1 = R2 6692 * copy register state to dest reg 6693 */ 6694 if (src_reg->type == SCALAR_VALUE && !src_reg->id) 6695 /* Assign src and dst registers the same ID 6696 * that will be used by find_equal_scalars() 6697 * to propagate min/max range. 6698 */ 6699 src_reg->id = ++env->id_gen; 6700 *dst_reg = *src_reg; 6701 dst_reg->live |= REG_LIVE_WRITTEN; 6702 dst_reg->subreg_def = DEF_NOT_SUBREG; 6703 } else { 6704 /* R1 = (u32) R2 */ 6705 if (is_pointer_value(env, insn->src_reg)) { 6706 verbose(env, 6707 "R%d partial copy of pointer\n", 6708 insn->src_reg); 6709 return -EACCES; 6710 } else if (src_reg->type == SCALAR_VALUE) { 6711 *dst_reg = *src_reg; 6712 /* Make sure ID is cleared otherwise 6713 * dst_reg min/max could be incorrectly 6714 * propagated into src_reg by find_equal_scalars() 6715 */ 6716 dst_reg->id = 0; 6717 dst_reg->live |= REG_LIVE_WRITTEN; 6718 dst_reg->subreg_def = env->insn_idx + 1; 6719 } else { 6720 mark_reg_unknown(env, regs, 6721 insn->dst_reg); 6722 } 6723 zext_32_to_64(dst_reg); 6724 } 6725 } else { 6726 /* case: R = imm 6727 * remember the value we stored into this reg 6728 */ 6729 /* clear any state __mark_reg_known doesn't set */ 6730 mark_reg_unknown(env, regs, insn->dst_reg); 6731 regs[insn->dst_reg].type = SCALAR_VALUE; 6732 if (BPF_CLASS(insn->code) == BPF_ALU64) { 6733 __mark_reg_known(regs + insn->dst_reg, 6734 insn->imm); 6735 } else { 6736 __mark_reg_known(regs + insn->dst_reg, 6737 (u32)insn->imm); 6738 } 6739 } 6740 6741 } else if (opcode > BPF_END) { 6742 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 6743 return -EINVAL; 6744 6745 } else { /* all other ALU ops: and, sub, xor, add, ... */ 6746 6747 if (BPF_SRC(insn->code) == BPF_X) { 6748 if (insn->imm != 0 || insn->off != 0) { 6749 verbose(env, "BPF_ALU uses reserved fields\n"); 6750 return -EINVAL; 6751 } 6752 /* check src1 operand */ 6753 err = check_reg_arg(env, insn->src_reg, SRC_OP); 6754 if (err) 6755 return err; 6756 } else { 6757 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 6758 verbose(env, "BPF_ALU uses reserved fields\n"); 6759 return -EINVAL; 6760 } 6761 } 6762 6763 /* check src2 operand */ 6764 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 6765 if (err) 6766 return err; 6767 6768 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 6769 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 6770 verbose(env, "div by zero\n"); 6771 return -EINVAL; 6772 } 6773 6774 if ((opcode == BPF_LSH || opcode == BPF_RSH || 6775 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 6776 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 6777 6778 if (insn->imm < 0 || insn->imm >= size) { 6779 verbose(env, "invalid shift %d\n", insn->imm); 6780 return -EINVAL; 6781 } 6782 } 6783 6784 /* check dest operand */ 6785 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 6786 if (err) 6787 return err; 6788 6789 return adjust_reg_min_max_vals(env, insn); 6790 } 6791 6792 return 0; 6793 } 6794 6795 static void __find_good_pkt_pointers(struct bpf_func_state *state, 6796 struct bpf_reg_state *dst_reg, 6797 enum bpf_reg_type type, int new_range) 6798 { 6799 struct bpf_reg_state *reg; 6800 int i; 6801 6802 for (i = 0; i < MAX_BPF_REG; i++) { 6803 reg = &state->regs[i]; 6804 if (reg->type == type && reg->id == dst_reg->id) 6805 /* keep the maximum range already checked */ 6806 reg->range = max(reg->range, new_range); 6807 } 6808 6809 bpf_for_each_spilled_reg(i, state, reg) { 6810 if (!reg) 6811 continue; 6812 if (reg->type == type && reg->id == dst_reg->id) 6813 reg->range = max(reg->range, new_range); 6814 } 6815 } 6816 6817 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 6818 struct bpf_reg_state *dst_reg, 6819 enum bpf_reg_type type, 6820 bool range_right_open) 6821 { 6822 int new_range, i; 6823 6824 if (dst_reg->off < 0 || 6825 (dst_reg->off == 0 && range_right_open)) 6826 /* This doesn't give us any range */ 6827 return; 6828 6829 if (dst_reg->umax_value > MAX_PACKET_OFF || 6830 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 6831 /* Risk of overflow. For instance, ptr + (1<<63) may be less 6832 * than pkt_end, but that's because it's also less than pkt. 6833 */ 6834 return; 6835 6836 new_range = dst_reg->off; 6837 if (range_right_open) 6838 new_range--; 6839 6840 /* Examples for register markings: 6841 * 6842 * pkt_data in dst register: 6843 * 6844 * r2 = r3; 6845 * r2 += 8; 6846 * if (r2 > pkt_end) goto <handle exception> 6847 * <access okay> 6848 * 6849 * r2 = r3; 6850 * r2 += 8; 6851 * if (r2 < pkt_end) goto <access okay> 6852 * <handle exception> 6853 * 6854 * Where: 6855 * r2 == dst_reg, pkt_end == src_reg 6856 * r2=pkt(id=n,off=8,r=0) 6857 * r3=pkt(id=n,off=0,r=0) 6858 * 6859 * pkt_data in src register: 6860 * 6861 * r2 = r3; 6862 * r2 += 8; 6863 * if (pkt_end >= r2) goto <access okay> 6864 * <handle exception> 6865 * 6866 * r2 = r3; 6867 * r2 += 8; 6868 * if (pkt_end <= r2) goto <handle exception> 6869 * <access okay> 6870 * 6871 * Where: 6872 * pkt_end == dst_reg, r2 == src_reg 6873 * r2=pkt(id=n,off=8,r=0) 6874 * r3=pkt(id=n,off=0,r=0) 6875 * 6876 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 6877 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 6878 * and [r3, r3 + 8-1) respectively is safe to access depending on 6879 * the check. 6880 */ 6881 6882 /* If our ids match, then we must have the same max_value. And we 6883 * don't care about the other reg's fixed offset, since if it's too big 6884 * the range won't allow anything. 6885 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 6886 */ 6887 for (i = 0; i <= vstate->curframe; i++) 6888 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, 6889 new_range); 6890 } 6891 6892 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) 6893 { 6894 struct tnum subreg = tnum_subreg(reg->var_off); 6895 s32 sval = (s32)val; 6896 6897 switch (opcode) { 6898 case BPF_JEQ: 6899 if (tnum_is_const(subreg)) 6900 return !!tnum_equals_const(subreg, val); 6901 break; 6902 case BPF_JNE: 6903 if (tnum_is_const(subreg)) 6904 return !tnum_equals_const(subreg, val); 6905 break; 6906 case BPF_JSET: 6907 if ((~subreg.mask & subreg.value) & val) 6908 return 1; 6909 if (!((subreg.mask | subreg.value) & val)) 6910 return 0; 6911 break; 6912 case BPF_JGT: 6913 if (reg->u32_min_value > val) 6914 return 1; 6915 else if (reg->u32_max_value <= val) 6916 return 0; 6917 break; 6918 case BPF_JSGT: 6919 if (reg->s32_min_value > sval) 6920 return 1; 6921 else if (reg->s32_max_value < sval) 6922 return 0; 6923 break; 6924 case BPF_JLT: 6925 if (reg->u32_max_value < val) 6926 return 1; 6927 else if (reg->u32_min_value >= val) 6928 return 0; 6929 break; 6930 case BPF_JSLT: 6931 if (reg->s32_max_value < sval) 6932 return 1; 6933 else if (reg->s32_min_value >= sval) 6934 return 0; 6935 break; 6936 case BPF_JGE: 6937 if (reg->u32_min_value >= val) 6938 return 1; 6939 else if (reg->u32_max_value < val) 6940 return 0; 6941 break; 6942 case BPF_JSGE: 6943 if (reg->s32_min_value >= sval) 6944 return 1; 6945 else if (reg->s32_max_value < sval) 6946 return 0; 6947 break; 6948 case BPF_JLE: 6949 if (reg->u32_max_value <= val) 6950 return 1; 6951 else if (reg->u32_min_value > val) 6952 return 0; 6953 break; 6954 case BPF_JSLE: 6955 if (reg->s32_max_value <= sval) 6956 return 1; 6957 else if (reg->s32_min_value > sval) 6958 return 0; 6959 break; 6960 } 6961 6962 return -1; 6963 } 6964 6965 6966 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) 6967 { 6968 s64 sval = (s64)val; 6969 6970 switch (opcode) { 6971 case BPF_JEQ: 6972 if (tnum_is_const(reg->var_off)) 6973 return !!tnum_equals_const(reg->var_off, val); 6974 break; 6975 case BPF_JNE: 6976 if (tnum_is_const(reg->var_off)) 6977 return !tnum_equals_const(reg->var_off, val); 6978 break; 6979 case BPF_JSET: 6980 if ((~reg->var_off.mask & reg->var_off.value) & val) 6981 return 1; 6982 if (!((reg->var_off.mask | reg->var_off.value) & val)) 6983 return 0; 6984 break; 6985 case BPF_JGT: 6986 if (reg->umin_value > val) 6987 return 1; 6988 else if (reg->umax_value <= val) 6989 return 0; 6990 break; 6991 case BPF_JSGT: 6992 if (reg->smin_value > sval) 6993 return 1; 6994 else if (reg->smax_value < sval) 6995 return 0; 6996 break; 6997 case BPF_JLT: 6998 if (reg->umax_value < val) 6999 return 1; 7000 else if (reg->umin_value >= val) 7001 return 0; 7002 break; 7003 case BPF_JSLT: 7004 if (reg->smax_value < sval) 7005 return 1; 7006 else if (reg->smin_value >= sval) 7007 return 0; 7008 break; 7009 case BPF_JGE: 7010 if (reg->umin_value >= val) 7011 return 1; 7012 else if (reg->umax_value < val) 7013 return 0; 7014 break; 7015 case BPF_JSGE: 7016 if (reg->smin_value >= sval) 7017 return 1; 7018 else if (reg->smax_value < sval) 7019 return 0; 7020 break; 7021 case BPF_JLE: 7022 if (reg->umax_value <= val) 7023 return 1; 7024 else if (reg->umin_value > val) 7025 return 0; 7026 break; 7027 case BPF_JSLE: 7028 if (reg->smax_value <= sval) 7029 return 1; 7030 else if (reg->smin_value > sval) 7031 return 0; 7032 break; 7033 } 7034 7035 return -1; 7036 } 7037 7038 /* compute branch direction of the expression "if (reg opcode val) goto target;" 7039 * and return: 7040 * 1 - branch will be taken and "goto target" will be executed 7041 * 0 - branch will not be taken and fall-through to next insn 7042 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value 7043 * range [0,10] 7044 */ 7045 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 7046 bool is_jmp32) 7047 { 7048 if (__is_pointer_value(false, reg)) { 7049 if (!reg_type_not_null(reg->type)) 7050 return -1; 7051 7052 /* If pointer is valid tests against zero will fail so we can 7053 * use this to direct branch taken. 7054 */ 7055 if (val != 0) 7056 return -1; 7057 7058 switch (opcode) { 7059 case BPF_JEQ: 7060 return 0; 7061 case BPF_JNE: 7062 return 1; 7063 default: 7064 return -1; 7065 } 7066 } 7067 7068 if (is_jmp32) 7069 return is_branch32_taken(reg, val, opcode); 7070 return is_branch64_taken(reg, val, opcode); 7071 } 7072 7073 static int flip_opcode(u32 opcode) 7074 { 7075 /* How can we transform "a <op> b" into "b <op> a"? */ 7076 static const u8 opcode_flip[16] = { 7077 /* these stay the same */ 7078 [BPF_JEQ >> 4] = BPF_JEQ, 7079 [BPF_JNE >> 4] = BPF_JNE, 7080 [BPF_JSET >> 4] = BPF_JSET, 7081 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 7082 [BPF_JGE >> 4] = BPF_JLE, 7083 [BPF_JGT >> 4] = BPF_JLT, 7084 [BPF_JLE >> 4] = BPF_JGE, 7085 [BPF_JLT >> 4] = BPF_JGT, 7086 [BPF_JSGE >> 4] = BPF_JSLE, 7087 [BPF_JSGT >> 4] = BPF_JSLT, 7088 [BPF_JSLE >> 4] = BPF_JSGE, 7089 [BPF_JSLT >> 4] = BPF_JSGT 7090 }; 7091 return opcode_flip[opcode >> 4]; 7092 } 7093 7094 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, 7095 struct bpf_reg_state *src_reg, 7096 u8 opcode) 7097 { 7098 struct bpf_reg_state *pkt; 7099 7100 if (src_reg->type == PTR_TO_PACKET_END) { 7101 pkt = dst_reg; 7102 } else if (dst_reg->type == PTR_TO_PACKET_END) { 7103 pkt = src_reg; 7104 opcode = flip_opcode(opcode); 7105 } else { 7106 return -1; 7107 } 7108 7109 if (pkt->range >= 0) 7110 return -1; 7111 7112 switch (opcode) { 7113 case BPF_JLE: 7114 /* pkt <= pkt_end */ 7115 fallthrough; 7116 case BPF_JGT: 7117 /* pkt > pkt_end */ 7118 if (pkt->range == BEYOND_PKT_END) 7119 /* pkt has at last one extra byte beyond pkt_end */ 7120 return opcode == BPF_JGT; 7121 break; 7122 case BPF_JLT: 7123 /* pkt < pkt_end */ 7124 fallthrough; 7125 case BPF_JGE: 7126 /* pkt >= pkt_end */ 7127 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) 7128 return opcode == BPF_JGE; 7129 break; 7130 } 7131 return -1; 7132 } 7133 7134 /* Adjusts the register min/max values in the case that the dst_reg is the 7135 * variable register that we are working on, and src_reg is a constant or we're 7136 * simply doing a BPF_K check. 7137 * In JEQ/JNE cases we also adjust the var_off values. 7138 */ 7139 static void reg_set_min_max(struct bpf_reg_state *true_reg, 7140 struct bpf_reg_state *false_reg, 7141 u64 val, u32 val32, 7142 u8 opcode, bool is_jmp32) 7143 { 7144 struct tnum false_32off = tnum_subreg(false_reg->var_off); 7145 struct tnum false_64off = false_reg->var_off; 7146 struct tnum true_32off = tnum_subreg(true_reg->var_off); 7147 struct tnum true_64off = true_reg->var_off; 7148 s64 sval = (s64)val; 7149 s32 sval32 = (s32)val32; 7150 7151 /* If the dst_reg is a pointer, we can't learn anything about its 7152 * variable offset from the compare (unless src_reg were a pointer into 7153 * the same object, but we don't bother with that. 7154 * Since false_reg and true_reg have the same type by construction, we 7155 * only need to check one of them for pointerness. 7156 */ 7157 if (__is_pointer_value(false, false_reg)) 7158 return; 7159 7160 switch (opcode) { 7161 case BPF_JEQ: 7162 case BPF_JNE: 7163 { 7164 struct bpf_reg_state *reg = 7165 opcode == BPF_JEQ ? true_reg : false_reg; 7166 7167 /* JEQ/JNE comparison doesn't change the register equivalence. 7168 * r1 = r2; 7169 * if (r1 == 42) goto label; 7170 * ... 7171 * label: // here both r1 and r2 are known to be 42. 7172 * 7173 * Hence when marking register as known preserve it's ID. 7174 */ 7175 if (is_jmp32) 7176 __mark_reg32_known(reg, val32); 7177 else 7178 ___mark_reg_known(reg, val); 7179 break; 7180 } 7181 case BPF_JSET: 7182 if (is_jmp32) { 7183 false_32off = tnum_and(false_32off, tnum_const(~val32)); 7184 if (is_power_of_2(val32)) 7185 true_32off = tnum_or(true_32off, 7186 tnum_const(val32)); 7187 } else { 7188 false_64off = tnum_and(false_64off, tnum_const(~val)); 7189 if (is_power_of_2(val)) 7190 true_64off = tnum_or(true_64off, 7191 tnum_const(val)); 7192 } 7193 break; 7194 case BPF_JGE: 7195 case BPF_JGT: 7196 { 7197 if (is_jmp32) { 7198 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; 7199 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; 7200 7201 false_reg->u32_max_value = min(false_reg->u32_max_value, 7202 false_umax); 7203 true_reg->u32_min_value = max(true_reg->u32_min_value, 7204 true_umin); 7205 } else { 7206 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 7207 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 7208 7209 false_reg->umax_value = min(false_reg->umax_value, false_umax); 7210 true_reg->umin_value = max(true_reg->umin_value, true_umin); 7211 } 7212 break; 7213 } 7214 case BPF_JSGE: 7215 case BPF_JSGT: 7216 { 7217 if (is_jmp32) { 7218 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; 7219 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; 7220 7221 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); 7222 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); 7223 } else { 7224 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 7225 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 7226 7227 false_reg->smax_value = min(false_reg->smax_value, false_smax); 7228 true_reg->smin_value = max(true_reg->smin_value, true_smin); 7229 } 7230 break; 7231 } 7232 case BPF_JLE: 7233 case BPF_JLT: 7234 { 7235 if (is_jmp32) { 7236 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; 7237 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; 7238 7239 false_reg->u32_min_value = max(false_reg->u32_min_value, 7240 false_umin); 7241 true_reg->u32_max_value = min(true_reg->u32_max_value, 7242 true_umax); 7243 } else { 7244 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 7245 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 7246 7247 false_reg->umin_value = max(false_reg->umin_value, false_umin); 7248 true_reg->umax_value = min(true_reg->umax_value, true_umax); 7249 } 7250 break; 7251 } 7252 case BPF_JSLE: 7253 case BPF_JSLT: 7254 { 7255 if (is_jmp32) { 7256 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; 7257 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; 7258 7259 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); 7260 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); 7261 } else { 7262 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 7263 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 7264 7265 false_reg->smin_value = max(false_reg->smin_value, false_smin); 7266 true_reg->smax_value = min(true_reg->smax_value, true_smax); 7267 } 7268 break; 7269 } 7270 default: 7271 return; 7272 } 7273 7274 if (is_jmp32) { 7275 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), 7276 tnum_subreg(false_32off)); 7277 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), 7278 tnum_subreg(true_32off)); 7279 __reg_combine_32_into_64(false_reg); 7280 __reg_combine_32_into_64(true_reg); 7281 } else { 7282 false_reg->var_off = false_64off; 7283 true_reg->var_off = true_64off; 7284 __reg_combine_64_into_32(false_reg); 7285 __reg_combine_64_into_32(true_reg); 7286 } 7287 } 7288 7289 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 7290 * the variable reg. 7291 */ 7292 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 7293 struct bpf_reg_state *false_reg, 7294 u64 val, u32 val32, 7295 u8 opcode, bool is_jmp32) 7296 { 7297 opcode = flip_opcode(opcode); 7298 /* This uses zero as "not present in table"; luckily the zero opcode, 7299 * BPF_JA, can't get here. 7300 */ 7301 if (opcode) 7302 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); 7303 } 7304 7305 /* Regs are known to be equal, so intersect their min/max/var_off */ 7306 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 7307 struct bpf_reg_state *dst_reg) 7308 { 7309 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 7310 dst_reg->umin_value); 7311 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 7312 dst_reg->umax_value); 7313 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 7314 dst_reg->smin_value); 7315 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 7316 dst_reg->smax_value); 7317 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 7318 dst_reg->var_off); 7319 /* We might have learned new bounds from the var_off. */ 7320 __update_reg_bounds(src_reg); 7321 __update_reg_bounds(dst_reg); 7322 /* We might have learned something about the sign bit. */ 7323 __reg_deduce_bounds(src_reg); 7324 __reg_deduce_bounds(dst_reg); 7325 /* We might have learned some bits from the bounds. */ 7326 __reg_bound_offset(src_reg); 7327 __reg_bound_offset(dst_reg); 7328 /* Intersecting with the old var_off might have improved our bounds 7329 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 7330 * then new var_off is (0; 0x7f...fc) which improves our umax. 7331 */ 7332 __update_reg_bounds(src_reg); 7333 __update_reg_bounds(dst_reg); 7334 } 7335 7336 static void reg_combine_min_max(struct bpf_reg_state *true_src, 7337 struct bpf_reg_state *true_dst, 7338 struct bpf_reg_state *false_src, 7339 struct bpf_reg_state *false_dst, 7340 u8 opcode) 7341 { 7342 switch (opcode) { 7343 case BPF_JEQ: 7344 __reg_combine_min_max(true_src, true_dst); 7345 break; 7346 case BPF_JNE: 7347 __reg_combine_min_max(false_src, false_dst); 7348 break; 7349 } 7350 } 7351 7352 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 7353 struct bpf_reg_state *reg, u32 id, 7354 bool is_null) 7355 { 7356 if (reg_type_may_be_null(reg->type) && reg->id == id && 7357 !WARN_ON_ONCE(!reg->id)) { 7358 /* Old offset (both fixed and variable parts) should 7359 * have been known-zero, because we don't allow pointer 7360 * arithmetic on pointers that might be NULL. 7361 */ 7362 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || 7363 !tnum_equals_const(reg->var_off, 0) || 7364 reg->off)) { 7365 __mark_reg_known_zero(reg); 7366 reg->off = 0; 7367 } 7368 if (is_null) { 7369 reg->type = SCALAR_VALUE; 7370 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 7371 const struct bpf_map *map = reg->map_ptr; 7372 7373 if (map->inner_map_meta) { 7374 reg->type = CONST_PTR_TO_MAP; 7375 reg->map_ptr = map->inner_map_meta; 7376 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 7377 reg->type = PTR_TO_XDP_SOCK; 7378 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || 7379 map->map_type == BPF_MAP_TYPE_SOCKHASH) { 7380 reg->type = PTR_TO_SOCKET; 7381 } else { 7382 reg->type = PTR_TO_MAP_VALUE; 7383 } 7384 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { 7385 reg->type = PTR_TO_SOCKET; 7386 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { 7387 reg->type = PTR_TO_SOCK_COMMON; 7388 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { 7389 reg->type = PTR_TO_TCP_SOCK; 7390 } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) { 7391 reg->type = PTR_TO_BTF_ID; 7392 } else if (reg->type == PTR_TO_MEM_OR_NULL) { 7393 reg->type = PTR_TO_MEM; 7394 } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) { 7395 reg->type = PTR_TO_RDONLY_BUF; 7396 } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) { 7397 reg->type = PTR_TO_RDWR_BUF; 7398 } 7399 if (is_null) { 7400 /* We don't need id and ref_obj_id from this point 7401 * onwards anymore, thus we should better reset it, 7402 * so that state pruning has chances to take effect. 7403 */ 7404 reg->id = 0; 7405 reg->ref_obj_id = 0; 7406 } else if (!reg_may_point_to_spin_lock(reg)) { 7407 /* For not-NULL ptr, reg->ref_obj_id will be reset 7408 * in release_reg_references(). 7409 * 7410 * reg->id is still used by spin_lock ptr. Other 7411 * than spin_lock ptr type, reg->id can be reset. 7412 */ 7413 reg->id = 0; 7414 } 7415 } 7416 } 7417 7418 static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, 7419 bool is_null) 7420 { 7421 struct bpf_reg_state *reg; 7422 int i; 7423 7424 for (i = 0; i < MAX_BPF_REG; i++) 7425 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); 7426 7427 bpf_for_each_spilled_reg(i, state, reg) { 7428 if (!reg) 7429 continue; 7430 mark_ptr_or_null_reg(state, reg, id, is_null); 7431 } 7432 } 7433 7434 /* The logic is similar to find_good_pkt_pointers(), both could eventually 7435 * be folded together at some point. 7436 */ 7437 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 7438 bool is_null) 7439 { 7440 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 7441 struct bpf_reg_state *regs = state->regs; 7442 u32 ref_obj_id = regs[regno].ref_obj_id; 7443 u32 id = regs[regno].id; 7444 int i; 7445 7446 if (ref_obj_id && ref_obj_id == id && is_null) 7447 /* regs[regno] is in the " == NULL" branch. 7448 * No one could have freed the reference state before 7449 * doing the NULL check. 7450 */ 7451 WARN_ON_ONCE(release_reference_state(state, id)); 7452 7453 for (i = 0; i <= vstate->curframe; i++) 7454 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); 7455 } 7456 7457 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 7458 struct bpf_reg_state *dst_reg, 7459 struct bpf_reg_state *src_reg, 7460 struct bpf_verifier_state *this_branch, 7461 struct bpf_verifier_state *other_branch) 7462 { 7463 if (BPF_SRC(insn->code) != BPF_X) 7464 return false; 7465 7466 /* Pointers are always 64-bit. */ 7467 if (BPF_CLASS(insn->code) == BPF_JMP32) 7468 return false; 7469 7470 switch (BPF_OP(insn->code)) { 7471 case BPF_JGT: 7472 if ((dst_reg->type == PTR_TO_PACKET && 7473 src_reg->type == PTR_TO_PACKET_END) || 7474 (dst_reg->type == PTR_TO_PACKET_META && 7475 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 7476 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 7477 find_good_pkt_pointers(this_branch, dst_reg, 7478 dst_reg->type, false); 7479 mark_pkt_end(other_branch, insn->dst_reg, true); 7480 } else if ((dst_reg->type == PTR_TO_PACKET_END && 7481 src_reg->type == PTR_TO_PACKET) || 7482 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 7483 src_reg->type == PTR_TO_PACKET_META)) { 7484 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 7485 find_good_pkt_pointers(other_branch, src_reg, 7486 src_reg->type, true); 7487 mark_pkt_end(this_branch, insn->src_reg, false); 7488 } else { 7489 return false; 7490 } 7491 break; 7492 case BPF_JLT: 7493 if ((dst_reg->type == PTR_TO_PACKET && 7494 src_reg->type == PTR_TO_PACKET_END) || 7495 (dst_reg->type == PTR_TO_PACKET_META && 7496 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 7497 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 7498 find_good_pkt_pointers(other_branch, dst_reg, 7499 dst_reg->type, true); 7500 mark_pkt_end(this_branch, insn->dst_reg, false); 7501 } else if ((dst_reg->type == PTR_TO_PACKET_END && 7502 src_reg->type == PTR_TO_PACKET) || 7503 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 7504 src_reg->type == PTR_TO_PACKET_META)) { 7505 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 7506 find_good_pkt_pointers(this_branch, src_reg, 7507 src_reg->type, false); 7508 mark_pkt_end(other_branch, insn->src_reg, true); 7509 } else { 7510 return false; 7511 } 7512 break; 7513 case BPF_JGE: 7514 if ((dst_reg->type == PTR_TO_PACKET && 7515 src_reg->type == PTR_TO_PACKET_END) || 7516 (dst_reg->type == PTR_TO_PACKET_META && 7517 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 7518 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 7519 find_good_pkt_pointers(this_branch, dst_reg, 7520 dst_reg->type, true); 7521 mark_pkt_end(other_branch, insn->dst_reg, false); 7522 } else if ((dst_reg->type == PTR_TO_PACKET_END && 7523 src_reg->type == PTR_TO_PACKET) || 7524 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 7525 src_reg->type == PTR_TO_PACKET_META)) { 7526 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 7527 find_good_pkt_pointers(other_branch, src_reg, 7528 src_reg->type, false); 7529 mark_pkt_end(this_branch, insn->src_reg, true); 7530 } else { 7531 return false; 7532 } 7533 break; 7534 case BPF_JLE: 7535 if ((dst_reg->type == PTR_TO_PACKET && 7536 src_reg->type == PTR_TO_PACKET_END) || 7537 (dst_reg->type == PTR_TO_PACKET_META && 7538 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 7539 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 7540 find_good_pkt_pointers(other_branch, dst_reg, 7541 dst_reg->type, false); 7542 mark_pkt_end(this_branch, insn->dst_reg, true); 7543 } else if ((dst_reg->type == PTR_TO_PACKET_END && 7544 src_reg->type == PTR_TO_PACKET) || 7545 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 7546 src_reg->type == PTR_TO_PACKET_META)) { 7547 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 7548 find_good_pkt_pointers(this_branch, src_reg, 7549 src_reg->type, true); 7550 mark_pkt_end(other_branch, insn->src_reg, false); 7551 } else { 7552 return false; 7553 } 7554 break; 7555 default: 7556 return false; 7557 } 7558 7559 return true; 7560 } 7561 7562 static void find_equal_scalars(struct bpf_verifier_state *vstate, 7563 struct bpf_reg_state *known_reg) 7564 { 7565 struct bpf_func_state *state; 7566 struct bpf_reg_state *reg; 7567 int i, j; 7568 7569 for (i = 0; i <= vstate->curframe; i++) { 7570 state = vstate->frame[i]; 7571 for (j = 0; j < MAX_BPF_REG; j++) { 7572 reg = &state->regs[j]; 7573 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 7574 *reg = *known_reg; 7575 } 7576 7577 bpf_for_each_spilled_reg(j, state, reg) { 7578 if (!reg) 7579 continue; 7580 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 7581 *reg = *known_reg; 7582 } 7583 } 7584 } 7585 7586 static int check_cond_jmp_op(struct bpf_verifier_env *env, 7587 struct bpf_insn *insn, int *insn_idx) 7588 { 7589 struct bpf_verifier_state *this_branch = env->cur_state; 7590 struct bpf_verifier_state *other_branch; 7591 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 7592 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 7593 u8 opcode = BPF_OP(insn->code); 7594 bool is_jmp32; 7595 int pred = -1; 7596 int err; 7597 7598 /* Only conditional jumps are expected to reach here. */ 7599 if (opcode == BPF_JA || opcode > BPF_JSLE) { 7600 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 7601 return -EINVAL; 7602 } 7603 7604 if (BPF_SRC(insn->code) == BPF_X) { 7605 if (insn->imm != 0) { 7606 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 7607 return -EINVAL; 7608 } 7609 7610 /* check src1 operand */ 7611 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7612 if (err) 7613 return err; 7614 7615 if (is_pointer_value(env, insn->src_reg)) { 7616 verbose(env, "R%d pointer comparison prohibited\n", 7617 insn->src_reg); 7618 return -EACCES; 7619 } 7620 src_reg = ®s[insn->src_reg]; 7621 } else { 7622 if (insn->src_reg != BPF_REG_0) { 7623 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 7624 return -EINVAL; 7625 } 7626 } 7627 7628 /* check src2 operand */ 7629 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7630 if (err) 7631 return err; 7632 7633 dst_reg = ®s[insn->dst_reg]; 7634 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 7635 7636 if (BPF_SRC(insn->code) == BPF_K) { 7637 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); 7638 } else if (src_reg->type == SCALAR_VALUE && 7639 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { 7640 pred = is_branch_taken(dst_reg, 7641 tnum_subreg(src_reg->var_off).value, 7642 opcode, 7643 is_jmp32); 7644 } else if (src_reg->type == SCALAR_VALUE && 7645 !is_jmp32 && tnum_is_const(src_reg->var_off)) { 7646 pred = is_branch_taken(dst_reg, 7647 src_reg->var_off.value, 7648 opcode, 7649 is_jmp32); 7650 } else if (reg_is_pkt_pointer_any(dst_reg) && 7651 reg_is_pkt_pointer_any(src_reg) && 7652 !is_jmp32) { 7653 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode); 7654 } 7655 7656 if (pred >= 0) { 7657 /* If we get here with a dst_reg pointer type it is because 7658 * above is_branch_taken() special cased the 0 comparison. 7659 */ 7660 if (!__is_pointer_value(false, dst_reg)) 7661 err = mark_chain_precision(env, insn->dst_reg); 7662 if (BPF_SRC(insn->code) == BPF_X && !err && 7663 !__is_pointer_value(false, src_reg)) 7664 err = mark_chain_precision(env, insn->src_reg); 7665 if (err) 7666 return err; 7667 } 7668 if (pred == 1) { 7669 /* only follow the goto, ignore fall-through */ 7670 *insn_idx += insn->off; 7671 return 0; 7672 } else if (pred == 0) { 7673 /* only follow fall-through branch, since 7674 * that's where the program will go 7675 */ 7676 return 0; 7677 } 7678 7679 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 7680 false); 7681 if (!other_branch) 7682 return -EFAULT; 7683 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 7684 7685 /* detect if we are comparing against a constant value so we can adjust 7686 * our min/max values for our dst register. 7687 * this is only legit if both are scalars (or pointers to the same 7688 * object, I suppose, but we don't support that right now), because 7689 * otherwise the different base pointers mean the offsets aren't 7690 * comparable. 7691 */ 7692 if (BPF_SRC(insn->code) == BPF_X) { 7693 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 7694 7695 if (dst_reg->type == SCALAR_VALUE && 7696 src_reg->type == SCALAR_VALUE) { 7697 if (tnum_is_const(src_reg->var_off) || 7698 (is_jmp32 && 7699 tnum_is_const(tnum_subreg(src_reg->var_off)))) 7700 reg_set_min_max(&other_branch_regs[insn->dst_reg], 7701 dst_reg, 7702 src_reg->var_off.value, 7703 tnum_subreg(src_reg->var_off).value, 7704 opcode, is_jmp32); 7705 else if (tnum_is_const(dst_reg->var_off) || 7706 (is_jmp32 && 7707 tnum_is_const(tnum_subreg(dst_reg->var_off)))) 7708 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 7709 src_reg, 7710 dst_reg->var_off.value, 7711 tnum_subreg(dst_reg->var_off).value, 7712 opcode, is_jmp32); 7713 else if (!is_jmp32 && 7714 (opcode == BPF_JEQ || opcode == BPF_JNE)) 7715 /* Comparing for equality, we can combine knowledge */ 7716 reg_combine_min_max(&other_branch_regs[insn->src_reg], 7717 &other_branch_regs[insn->dst_reg], 7718 src_reg, dst_reg, opcode); 7719 if (src_reg->id && 7720 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { 7721 find_equal_scalars(this_branch, src_reg); 7722 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); 7723 } 7724 7725 } 7726 } else if (dst_reg->type == SCALAR_VALUE) { 7727 reg_set_min_max(&other_branch_regs[insn->dst_reg], 7728 dst_reg, insn->imm, (u32)insn->imm, 7729 opcode, is_jmp32); 7730 } 7731 7732 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && 7733 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { 7734 find_equal_scalars(this_branch, dst_reg); 7735 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); 7736 } 7737 7738 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 7739 * NOTE: these optimizations below are related with pointer comparison 7740 * which will never be JMP32. 7741 */ 7742 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 7743 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 7744 reg_type_may_be_null(dst_reg->type)) { 7745 /* Mark all identical registers in each branch as either 7746 * safe or unknown depending R == 0 or R != 0 conditional. 7747 */ 7748 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 7749 opcode == BPF_JNE); 7750 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 7751 opcode == BPF_JEQ); 7752 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 7753 this_branch, other_branch) && 7754 is_pointer_value(env, insn->dst_reg)) { 7755 verbose(env, "R%d pointer comparison prohibited\n", 7756 insn->dst_reg); 7757 return -EACCES; 7758 } 7759 if (env->log.level & BPF_LOG_LEVEL) 7760 print_verifier_state(env, this_branch->frame[this_branch->curframe]); 7761 return 0; 7762 } 7763 7764 /* verify BPF_LD_IMM64 instruction */ 7765 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 7766 { 7767 struct bpf_insn_aux_data *aux = cur_aux(env); 7768 struct bpf_reg_state *regs = cur_regs(env); 7769 struct bpf_reg_state *dst_reg; 7770 struct bpf_map *map; 7771 int err; 7772 7773 if (BPF_SIZE(insn->code) != BPF_DW) { 7774 verbose(env, "invalid BPF_LD_IMM insn\n"); 7775 return -EINVAL; 7776 } 7777 if (insn->off != 0) { 7778 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 7779 return -EINVAL; 7780 } 7781 7782 err = check_reg_arg(env, insn->dst_reg, DST_OP); 7783 if (err) 7784 return err; 7785 7786 dst_reg = ®s[insn->dst_reg]; 7787 if (insn->src_reg == 0) { 7788 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 7789 7790 dst_reg->type = SCALAR_VALUE; 7791 __mark_reg_known(®s[insn->dst_reg], imm); 7792 return 0; 7793 } 7794 7795 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { 7796 mark_reg_known_zero(env, regs, insn->dst_reg); 7797 7798 dst_reg->type = aux->btf_var.reg_type; 7799 switch (dst_reg->type) { 7800 case PTR_TO_MEM: 7801 dst_reg->mem_size = aux->btf_var.mem_size; 7802 break; 7803 case PTR_TO_BTF_ID: 7804 case PTR_TO_PERCPU_BTF_ID: 7805 dst_reg->btf = aux->btf_var.btf; 7806 dst_reg->btf_id = aux->btf_var.btf_id; 7807 break; 7808 default: 7809 verbose(env, "bpf verifier is misconfigured\n"); 7810 return -EFAULT; 7811 } 7812 return 0; 7813 } 7814 7815 map = env->used_maps[aux->map_index]; 7816 mark_reg_known_zero(env, regs, insn->dst_reg); 7817 dst_reg->map_ptr = map; 7818 7819 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { 7820 dst_reg->type = PTR_TO_MAP_VALUE; 7821 dst_reg->off = aux->map_off; 7822 if (map_value_has_spin_lock(map)) 7823 dst_reg->id = ++env->id_gen; 7824 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 7825 dst_reg->type = CONST_PTR_TO_MAP; 7826 } else { 7827 verbose(env, "bpf verifier is misconfigured\n"); 7828 return -EINVAL; 7829 } 7830 7831 return 0; 7832 } 7833 7834 static bool may_access_skb(enum bpf_prog_type type) 7835 { 7836 switch (type) { 7837 case BPF_PROG_TYPE_SOCKET_FILTER: 7838 case BPF_PROG_TYPE_SCHED_CLS: 7839 case BPF_PROG_TYPE_SCHED_ACT: 7840 return true; 7841 default: 7842 return false; 7843 } 7844 } 7845 7846 /* verify safety of LD_ABS|LD_IND instructions: 7847 * - they can only appear in the programs where ctx == skb 7848 * - since they are wrappers of function calls, they scratch R1-R5 registers, 7849 * preserve R6-R9, and store return value into R0 7850 * 7851 * Implicit input: 7852 * ctx == skb == R6 == CTX 7853 * 7854 * Explicit input: 7855 * SRC == any register 7856 * IMM == 32-bit immediate 7857 * 7858 * Output: 7859 * R0 - 8/16/32-bit skb data converted to cpu endianness 7860 */ 7861 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 7862 { 7863 struct bpf_reg_state *regs = cur_regs(env); 7864 static const int ctx_reg = BPF_REG_6; 7865 u8 mode = BPF_MODE(insn->code); 7866 int i, err; 7867 7868 if (!may_access_skb(resolve_prog_type(env->prog))) { 7869 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 7870 return -EINVAL; 7871 } 7872 7873 if (!env->ops->gen_ld_abs) { 7874 verbose(env, "bpf verifier is misconfigured\n"); 7875 return -EINVAL; 7876 } 7877 7878 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 7879 BPF_SIZE(insn->code) == BPF_DW || 7880 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 7881 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 7882 return -EINVAL; 7883 } 7884 7885 /* check whether implicit source operand (register R6) is readable */ 7886 err = check_reg_arg(env, ctx_reg, SRC_OP); 7887 if (err) 7888 return err; 7889 7890 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 7891 * gen_ld_abs() may terminate the program at runtime, leading to 7892 * reference leak. 7893 */ 7894 err = check_reference_leak(env); 7895 if (err) { 7896 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 7897 return err; 7898 } 7899 7900 if (env->cur_state->active_spin_lock) { 7901 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 7902 return -EINVAL; 7903 } 7904 7905 if (regs[ctx_reg].type != PTR_TO_CTX) { 7906 verbose(env, 7907 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 7908 return -EINVAL; 7909 } 7910 7911 if (mode == BPF_IND) { 7912 /* check explicit source operand */ 7913 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7914 if (err) 7915 return err; 7916 } 7917 7918 err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); 7919 if (err < 0) 7920 return err; 7921 7922 /* reset caller saved regs to unreadable */ 7923 for (i = 0; i < CALLER_SAVED_REGS; i++) { 7924 mark_reg_not_init(env, regs, caller_saved[i]); 7925 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 7926 } 7927 7928 /* mark destination R0 register as readable, since it contains 7929 * the value fetched from the packet. 7930 * Already marked as written above. 7931 */ 7932 mark_reg_unknown(env, regs, BPF_REG_0); 7933 /* ld_abs load up to 32-bit skb data. */ 7934 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 7935 return 0; 7936 } 7937 7938 static int check_return_code(struct bpf_verifier_env *env) 7939 { 7940 struct tnum enforce_attach_type_range = tnum_unknown; 7941 const struct bpf_prog *prog = env->prog; 7942 struct bpf_reg_state *reg; 7943 struct tnum range = tnum_range(0, 1); 7944 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 7945 int err; 7946 const bool is_subprog = env->cur_state->frame[0]->subprogno; 7947 7948 /* LSM and struct_ops func-ptr's return type could be "void" */ 7949 if (!is_subprog && 7950 (prog_type == BPF_PROG_TYPE_STRUCT_OPS || 7951 prog_type == BPF_PROG_TYPE_LSM) && 7952 !prog->aux->attach_func_proto->type) 7953 return 0; 7954 7955 /* eBPF calling convetion is such that R0 is used 7956 * to return the value from eBPF program. 7957 * Make sure that it's readable at this time 7958 * of bpf_exit, which means that program wrote 7959 * something into it earlier 7960 */ 7961 err = check_reg_arg(env, BPF_REG_0, SRC_OP); 7962 if (err) 7963 return err; 7964 7965 if (is_pointer_value(env, BPF_REG_0)) { 7966 verbose(env, "R0 leaks addr as return value\n"); 7967 return -EACCES; 7968 } 7969 7970 reg = cur_regs(env) + BPF_REG_0; 7971 if (is_subprog) { 7972 if (reg->type != SCALAR_VALUE) { 7973 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", 7974 reg_type_str[reg->type]); 7975 return -EINVAL; 7976 } 7977 return 0; 7978 } 7979 7980 switch (prog_type) { 7981 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 7982 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 7983 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || 7984 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || 7985 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || 7986 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || 7987 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME) 7988 range = tnum_range(1, 1); 7989 break; 7990 case BPF_PROG_TYPE_CGROUP_SKB: 7991 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 7992 range = tnum_range(0, 3); 7993 enforce_attach_type_range = tnum_range(2, 3); 7994 } 7995 break; 7996 case BPF_PROG_TYPE_CGROUP_SOCK: 7997 case BPF_PROG_TYPE_SOCK_OPS: 7998 case BPF_PROG_TYPE_CGROUP_DEVICE: 7999 case BPF_PROG_TYPE_CGROUP_SYSCTL: 8000 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 8001 break; 8002 case BPF_PROG_TYPE_RAW_TRACEPOINT: 8003 if (!env->prog->aux->attach_btf_id) 8004 return 0; 8005 range = tnum_const(0); 8006 break; 8007 case BPF_PROG_TYPE_TRACING: 8008 switch (env->prog->expected_attach_type) { 8009 case BPF_TRACE_FENTRY: 8010 case BPF_TRACE_FEXIT: 8011 range = tnum_const(0); 8012 break; 8013 case BPF_TRACE_RAW_TP: 8014 case BPF_MODIFY_RETURN: 8015 return 0; 8016 case BPF_TRACE_ITER: 8017 break; 8018 default: 8019 return -ENOTSUPP; 8020 } 8021 break; 8022 case BPF_PROG_TYPE_SK_LOOKUP: 8023 range = tnum_range(SK_DROP, SK_PASS); 8024 break; 8025 case BPF_PROG_TYPE_EXT: 8026 /* freplace program can return anything as its return value 8027 * depends on the to-be-replaced kernel func or bpf program. 8028 */ 8029 default: 8030 return 0; 8031 } 8032 8033 if (reg->type != SCALAR_VALUE) { 8034 verbose(env, "At program exit the register R0 is not a known value (%s)\n", 8035 reg_type_str[reg->type]); 8036 return -EINVAL; 8037 } 8038 8039 if (!tnum_in(range, reg->var_off)) { 8040 char tn_buf[48]; 8041 8042 verbose(env, "At program exit the register R0 "); 8043 if (!tnum_is_unknown(reg->var_off)) { 8044 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 8045 verbose(env, "has value %s", tn_buf); 8046 } else { 8047 verbose(env, "has unknown scalar value"); 8048 } 8049 tnum_strn(tn_buf, sizeof(tn_buf), range); 8050 verbose(env, " should have been in %s\n", tn_buf); 8051 return -EINVAL; 8052 } 8053 8054 if (!tnum_is_unknown(enforce_attach_type_range) && 8055 tnum_in(enforce_attach_type_range, reg->var_off)) 8056 env->prog->enforce_expected_attach_type = 1; 8057 return 0; 8058 } 8059 8060 /* non-recursive DFS pseudo code 8061 * 1 procedure DFS-iterative(G,v): 8062 * 2 label v as discovered 8063 * 3 let S be a stack 8064 * 4 S.push(v) 8065 * 5 while S is not empty 8066 * 6 t <- S.pop() 8067 * 7 if t is what we're looking for: 8068 * 8 return t 8069 * 9 for all edges e in G.adjacentEdges(t) do 8070 * 10 if edge e is already labelled 8071 * 11 continue with the next edge 8072 * 12 w <- G.adjacentVertex(t,e) 8073 * 13 if vertex w is not discovered and not explored 8074 * 14 label e as tree-edge 8075 * 15 label w as discovered 8076 * 16 S.push(w) 8077 * 17 continue at 5 8078 * 18 else if vertex w is discovered 8079 * 19 label e as back-edge 8080 * 20 else 8081 * 21 // vertex w is explored 8082 * 22 label e as forward- or cross-edge 8083 * 23 label t as explored 8084 * 24 S.pop() 8085 * 8086 * convention: 8087 * 0x10 - discovered 8088 * 0x11 - discovered and fall-through edge labelled 8089 * 0x12 - discovered and fall-through and branch edges labelled 8090 * 0x20 - explored 8091 */ 8092 8093 enum { 8094 DISCOVERED = 0x10, 8095 EXPLORED = 0x20, 8096 FALLTHROUGH = 1, 8097 BRANCH = 2, 8098 }; 8099 8100 static u32 state_htab_size(struct bpf_verifier_env *env) 8101 { 8102 return env->prog->len; 8103 } 8104 8105 static struct bpf_verifier_state_list **explored_state( 8106 struct bpf_verifier_env *env, 8107 int idx) 8108 { 8109 struct bpf_verifier_state *cur = env->cur_state; 8110 struct bpf_func_state *state = cur->frame[cur->curframe]; 8111 8112 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 8113 } 8114 8115 static void init_explored_state(struct bpf_verifier_env *env, int idx) 8116 { 8117 env->insn_aux_data[idx].prune_point = true; 8118 } 8119 8120 enum { 8121 DONE_EXPLORING = 0, 8122 KEEP_EXPLORING = 1, 8123 }; 8124 8125 /* t, w, e - match pseudo-code above: 8126 * t - index of current instruction 8127 * w - next instruction 8128 * e - edge 8129 */ 8130 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 8131 bool loop_ok) 8132 { 8133 int *insn_stack = env->cfg.insn_stack; 8134 int *insn_state = env->cfg.insn_state; 8135 8136 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 8137 return DONE_EXPLORING; 8138 8139 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 8140 return DONE_EXPLORING; 8141 8142 if (w < 0 || w >= env->prog->len) { 8143 verbose_linfo(env, t, "%d: ", t); 8144 verbose(env, "jump out of range from insn %d to %d\n", t, w); 8145 return -EINVAL; 8146 } 8147 8148 if (e == BRANCH) 8149 /* mark branch target for state pruning */ 8150 init_explored_state(env, w); 8151 8152 if (insn_state[w] == 0) { 8153 /* tree-edge */ 8154 insn_state[t] = DISCOVERED | e; 8155 insn_state[w] = DISCOVERED; 8156 if (env->cfg.cur_stack >= env->prog->len) 8157 return -E2BIG; 8158 insn_stack[env->cfg.cur_stack++] = w; 8159 return KEEP_EXPLORING; 8160 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 8161 if (loop_ok && env->bpf_capable) 8162 return DONE_EXPLORING; 8163 verbose_linfo(env, t, "%d: ", t); 8164 verbose_linfo(env, w, "%d: ", w); 8165 verbose(env, "back-edge from insn %d to %d\n", t, w); 8166 return -EINVAL; 8167 } else if (insn_state[w] == EXPLORED) { 8168 /* forward- or cross-edge */ 8169 insn_state[t] = DISCOVERED | e; 8170 } else { 8171 verbose(env, "insn state internal bug\n"); 8172 return -EFAULT; 8173 } 8174 return DONE_EXPLORING; 8175 } 8176 8177 /* Visits the instruction at index t and returns one of the following: 8178 * < 0 - an error occurred 8179 * DONE_EXPLORING - the instruction was fully explored 8180 * KEEP_EXPLORING - there is still work to be done before it is fully explored 8181 */ 8182 static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) 8183 { 8184 struct bpf_insn *insns = env->prog->insnsi; 8185 int ret; 8186 8187 /* All non-branch instructions have a single fall-through edge. */ 8188 if (BPF_CLASS(insns[t].code) != BPF_JMP && 8189 BPF_CLASS(insns[t].code) != BPF_JMP32) 8190 return push_insn(t, t + 1, FALLTHROUGH, env, false); 8191 8192 switch (BPF_OP(insns[t].code)) { 8193 case BPF_EXIT: 8194 return DONE_EXPLORING; 8195 8196 case BPF_CALL: 8197 ret = push_insn(t, t + 1, FALLTHROUGH, env, false); 8198 if (ret) 8199 return ret; 8200 8201 if (t + 1 < insn_cnt) 8202 init_explored_state(env, t + 1); 8203 if (insns[t].src_reg == BPF_PSEUDO_CALL) { 8204 init_explored_state(env, t); 8205 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, 8206 env, false); 8207 } 8208 return ret; 8209 8210 case BPF_JA: 8211 if (BPF_SRC(insns[t].code) != BPF_K) 8212 return -EINVAL; 8213 8214 /* unconditional jump with single edge */ 8215 ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env, 8216 true); 8217 if (ret) 8218 return ret; 8219 8220 /* unconditional jmp is not a good pruning point, 8221 * but it's marked, since backtracking needs 8222 * to record jmp history in is_state_visited(). 8223 */ 8224 init_explored_state(env, t + insns[t].off + 1); 8225 /* tell verifier to check for equivalent states 8226 * after every call and jump 8227 */ 8228 if (t + 1 < insn_cnt) 8229 init_explored_state(env, t + 1); 8230 8231 return ret; 8232 8233 default: 8234 /* conditional jump with two edges */ 8235 init_explored_state(env, t); 8236 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 8237 if (ret) 8238 return ret; 8239 8240 return push_insn(t, t + insns[t].off + 1, BRANCH, env, true); 8241 } 8242 } 8243 8244 /* non-recursive depth-first-search to detect loops in BPF program 8245 * loop == back-edge in directed graph 8246 */ 8247 static int check_cfg(struct bpf_verifier_env *env) 8248 { 8249 int insn_cnt = env->prog->len; 8250 int *insn_stack, *insn_state; 8251 int ret = 0; 8252 int i; 8253 8254 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 8255 if (!insn_state) 8256 return -ENOMEM; 8257 8258 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 8259 if (!insn_stack) { 8260 kvfree(insn_state); 8261 return -ENOMEM; 8262 } 8263 8264 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 8265 insn_stack[0] = 0; /* 0 is the first instruction */ 8266 env->cfg.cur_stack = 1; 8267 8268 while (env->cfg.cur_stack > 0) { 8269 int t = insn_stack[env->cfg.cur_stack - 1]; 8270 8271 ret = visit_insn(t, insn_cnt, env); 8272 switch (ret) { 8273 case DONE_EXPLORING: 8274 insn_state[t] = EXPLORED; 8275 env->cfg.cur_stack--; 8276 break; 8277 case KEEP_EXPLORING: 8278 break; 8279 default: 8280 if (ret > 0) { 8281 verbose(env, "visit_insn internal bug\n"); 8282 ret = -EFAULT; 8283 } 8284 goto err_free; 8285 } 8286 } 8287 8288 if (env->cfg.cur_stack < 0) { 8289 verbose(env, "pop stack internal bug\n"); 8290 ret = -EFAULT; 8291 goto err_free; 8292 } 8293 8294 for (i = 0; i < insn_cnt; i++) { 8295 if (insn_state[i] != EXPLORED) { 8296 verbose(env, "unreachable insn %d\n", i); 8297 ret = -EINVAL; 8298 goto err_free; 8299 } 8300 } 8301 ret = 0; /* cfg looks good */ 8302 8303 err_free: 8304 kvfree(insn_state); 8305 kvfree(insn_stack); 8306 env->cfg.insn_state = env->cfg.insn_stack = NULL; 8307 return ret; 8308 } 8309 8310 static int check_abnormal_return(struct bpf_verifier_env *env) 8311 { 8312 int i; 8313 8314 for (i = 1; i < env->subprog_cnt; i++) { 8315 if (env->subprog_info[i].has_ld_abs) { 8316 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); 8317 return -EINVAL; 8318 } 8319 if (env->subprog_info[i].has_tail_call) { 8320 verbose(env, "tail_call is not allowed in subprogs without BTF\n"); 8321 return -EINVAL; 8322 } 8323 } 8324 return 0; 8325 } 8326 8327 /* The minimum supported BTF func info size */ 8328 #define MIN_BPF_FUNCINFO_SIZE 8 8329 #define MAX_FUNCINFO_REC_SIZE 252 8330 8331 static int check_btf_func(struct bpf_verifier_env *env, 8332 const union bpf_attr *attr, 8333 union bpf_attr __user *uattr) 8334 { 8335 const struct btf_type *type, *func_proto, *ret_type; 8336 u32 i, nfuncs, urec_size, min_size; 8337 u32 krec_size = sizeof(struct bpf_func_info); 8338 struct bpf_func_info *krecord; 8339 struct bpf_func_info_aux *info_aux = NULL; 8340 struct bpf_prog *prog; 8341 const struct btf *btf; 8342 void __user *urecord; 8343 u32 prev_offset = 0; 8344 bool scalar_return; 8345 int ret = -ENOMEM; 8346 8347 nfuncs = attr->func_info_cnt; 8348 if (!nfuncs) { 8349 if (check_abnormal_return(env)) 8350 return -EINVAL; 8351 return 0; 8352 } 8353 8354 if (nfuncs != env->subprog_cnt) { 8355 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 8356 return -EINVAL; 8357 } 8358 8359 urec_size = attr->func_info_rec_size; 8360 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 8361 urec_size > MAX_FUNCINFO_REC_SIZE || 8362 urec_size % sizeof(u32)) { 8363 verbose(env, "invalid func info rec size %u\n", urec_size); 8364 return -EINVAL; 8365 } 8366 8367 prog = env->prog; 8368 btf = prog->aux->btf; 8369 8370 urecord = u64_to_user_ptr(attr->func_info); 8371 min_size = min_t(u32, krec_size, urec_size); 8372 8373 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 8374 if (!krecord) 8375 return -ENOMEM; 8376 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 8377 if (!info_aux) 8378 goto err_free; 8379 8380 for (i = 0; i < nfuncs; i++) { 8381 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 8382 if (ret) { 8383 if (ret == -E2BIG) { 8384 verbose(env, "nonzero tailing record in func info"); 8385 /* set the size kernel expects so loader can zero 8386 * out the rest of the record. 8387 */ 8388 if (put_user(min_size, &uattr->func_info_rec_size)) 8389 ret = -EFAULT; 8390 } 8391 goto err_free; 8392 } 8393 8394 if (copy_from_user(&krecord[i], urecord, min_size)) { 8395 ret = -EFAULT; 8396 goto err_free; 8397 } 8398 8399 /* check insn_off */ 8400 ret = -EINVAL; 8401 if (i == 0) { 8402 if (krecord[i].insn_off) { 8403 verbose(env, 8404 "nonzero insn_off %u for the first func info record", 8405 krecord[i].insn_off); 8406 goto err_free; 8407 } 8408 } else if (krecord[i].insn_off <= prev_offset) { 8409 verbose(env, 8410 "same or smaller insn offset (%u) than previous func info record (%u)", 8411 krecord[i].insn_off, prev_offset); 8412 goto err_free; 8413 } 8414 8415 if (env->subprog_info[i].start != krecord[i].insn_off) { 8416 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 8417 goto err_free; 8418 } 8419 8420 /* check type_id */ 8421 type = btf_type_by_id(btf, krecord[i].type_id); 8422 if (!type || !btf_type_is_func(type)) { 8423 verbose(env, "invalid type id %d in func info", 8424 krecord[i].type_id); 8425 goto err_free; 8426 } 8427 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 8428 8429 func_proto = btf_type_by_id(btf, type->type); 8430 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto))) 8431 /* btf_func_check() already verified it during BTF load */ 8432 goto err_free; 8433 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); 8434 scalar_return = 8435 btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type); 8436 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { 8437 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); 8438 goto err_free; 8439 } 8440 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { 8441 verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); 8442 goto err_free; 8443 } 8444 8445 prev_offset = krecord[i].insn_off; 8446 urecord += urec_size; 8447 } 8448 8449 prog->aux->func_info = krecord; 8450 prog->aux->func_info_cnt = nfuncs; 8451 prog->aux->func_info_aux = info_aux; 8452 return 0; 8453 8454 err_free: 8455 kvfree(krecord); 8456 kfree(info_aux); 8457 return ret; 8458 } 8459 8460 static void adjust_btf_func(struct bpf_verifier_env *env) 8461 { 8462 struct bpf_prog_aux *aux = env->prog->aux; 8463 int i; 8464 8465 if (!aux->func_info) 8466 return; 8467 8468 for (i = 0; i < env->subprog_cnt; i++) 8469 aux->func_info[i].insn_off = env->subprog_info[i].start; 8470 } 8471 8472 #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ 8473 sizeof(((struct bpf_line_info *)(0))->line_col)) 8474 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 8475 8476 static int check_btf_line(struct bpf_verifier_env *env, 8477 const union bpf_attr *attr, 8478 union bpf_attr __user *uattr) 8479 { 8480 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 8481 struct bpf_subprog_info *sub; 8482 struct bpf_line_info *linfo; 8483 struct bpf_prog *prog; 8484 const struct btf *btf; 8485 void __user *ulinfo; 8486 int err; 8487 8488 nr_linfo = attr->line_info_cnt; 8489 if (!nr_linfo) 8490 return 0; 8491 8492 rec_size = attr->line_info_rec_size; 8493 if (rec_size < MIN_BPF_LINEINFO_SIZE || 8494 rec_size > MAX_LINEINFO_REC_SIZE || 8495 rec_size & (sizeof(u32) - 1)) 8496 return -EINVAL; 8497 8498 /* Need to zero it in case the userspace may 8499 * pass in a smaller bpf_line_info object. 8500 */ 8501 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 8502 GFP_KERNEL | __GFP_NOWARN); 8503 if (!linfo) 8504 return -ENOMEM; 8505 8506 prog = env->prog; 8507 btf = prog->aux->btf; 8508 8509 s = 0; 8510 sub = env->subprog_info; 8511 ulinfo = u64_to_user_ptr(attr->line_info); 8512 expected_size = sizeof(struct bpf_line_info); 8513 ncopy = min_t(u32, expected_size, rec_size); 8514 for (i = 0; i < nr_linfo; i++) { 8515 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 8516 if (err) { 8517 if (err == -E2BIG) { 8518 verbose(env, "nonzero tailing record in line_info"); 8519 if (put_user(expected_size, 8520 &uattr->line_info_rec_size)) 8521 err = -EFAULT; 8522 } 8523 goto err_free; 8524 } 8525 8526 if (copy_from_user(&linfo[i], ulinfo, ncopy)) { 8527 err = -EFAULT; 8528 goto err_free; 8529 } 8530 8531 /* 8532 * Check insn_off to ensure 8533 * 1) strictly increasing AND 8534 * 2) bounded by prog->len 8535 * 8536 * The linfo[0].insn_off == 0 check logically falls into 8537 * the later "missing bpf_line_info for func..." case 8538 * because the first linfo[0].insn_off must be the 8539 * first sub also and the first sub must have 8540 * subprog_info[0].start == 0. 8541 */ 8542 if ((i && linfo[i].insn_off <= prev_offset) || 8543 linfo[i].insn_off >= prog->len) { 8544 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 8545 i, linfo[i].insn_off, prev_offset, 8546 prog->len); 8547 err = -EINVAL; 8548 goto err_free; 8549 } 8550 8551 if (!prog->insnsi[linfo[i].insn_off].code) { 8552 verbose(env, 8553 "Invalid insn code at line_info[%u].insn_off\n", 8554 i); 8555 err = -EINVAL; 8556 goto err_free; 8557 } 8558 8559 if (!btf_name_by_offset(btf, linfo[i].line_off) || 8560 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 8561 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 8562 err = -EINVAL; 8563 goto err_free; 8564 } 8565 8566 if (s != env->subprog_cnt) { 8567 if (linfo[i].insn_off == sub[s].start) { 8568 sub[s].linfo_idx = i; 8569 s++; 8570 } else if (sub[s].start < linfo[i].insn_off) { 8571 verbose(env, "missing bpf_line_info for func#%u\n", s); 8572 err = -EINVAL; 8573 goto err_free; 8574 } 8575 } 8576 8577 prev_offset = linfo[i].insn_off; 8578 ulinfo += rec_size; 8579 } 8580 8581 if (s != env->subprog_cnt) { 8582 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 8583 env->subprog_cnt - s, s); 8584 err = -EINVAL; 8585 goto err_free; 8586 } 8587 8588 prog->aux->linfo = linfo; 8589 prog->aux->nr_linfo = nr_linfo; 8590 8591 return 0; 8592 8593 err_free: 8594 kvfree(linfo); 8595 return err; 8596 } 8597 8598 static int check_btf_info(struct bpf_verifier_env *env, 8599 const union bpf_attr *attr, 8600 union bpf_attr __user *uattr) 8601 { 8602 struct btf *btf; 8603 int err; 8604 8605 if (!attr->func_info_cnt && !attr->line_info_cnt) { 8606 if (check_abnormal_return(env)) 8607 return -EINVAL; 8608 return 0; 8609 } 8610 8611 btf = btf_get_by_fd(attr->prog_btf_fd); 8612 if (IS_ERR(btf)) 8613 return PTR_ERR(btf); 8614 env->prog->aux->btf = btf; 8615 8616 err = check_btf_func(env, attr, uattr); 8617 if (err) 8618 return err; 8619 8620 err = check_btf_line(env, attr, uattr); 8621 if (err) 8622 return err; 8623 8624 return 0; 8625 } 8626 8627 /* check %cur's range satisfies %old's */ 8628 static bool range_within(struct bpf_reg_state *old, 8629 struct bpf_reg_state *cur) 8630 { 8631 return old->umin_value <= cur->umin_value && 8632 old->umax_value >= cur->umax_value && 8633 old->smin_value <= cur->smin_value && 8634 old->smax_value >= cur->smax_value; 8635 } 8636 8637 /* Maximum number of register states that can exist at once */ 8638 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) 8639 struct idpair { 8640 u32 old; 8641 u32 cur; 8642 }; 8643 8644 /* If in the old state two registers had the same id, then they need to have 8645 * the same id in the new state as well. But that id could be different from 8646 * the old state, so we need to track the mapping from old to new ids. 8647 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 8648 * regs with old id 5 must also have new id 9 for the new state to be safe. But 8649 * regs with a different old id could still have new id 9, we don't care about 8650 * that. 8651 * So we look through our idmap to see if this old id has been seen before. If 8652 * so, we require the new id to match; otherwise, we add the id pair to the map. 8653 */ 8654 static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) 8655 { 8656 unsigned int i; 8657 8658 for (i = 0; i < ID_MAP_SIZE; i++) { 8659 if (!idmap[i].old) { 8660 /* Reached an empty slot; haven't seen this id before */ 8661 idmap[i].old = old_id; 8662 idmap[i].cur = cur_id; 8663 return true; 8664 } 8665 if (idmap[i].old == old_id) 8666 return idmap[i].cur == cur_id; 8667 } 8668 /* We ran out of idmap slots, which should be impossible */ 8669 WARN_ON_ONCE(1); 8670 return false; 8671 } 8672 8673 static void clean_func_state(struct bpf_verifier_env *env, 8674 struct bpf_func_state *st) 8675 { 8676 enum bpf_reg_liveness live; 8677 int i, j; 8678 8679 for (i = 0; i < BPF_REG_FP; i++) { 8680 live = st->regs[i].live; 8681 /* liveness must not touch this register anymore */ 8682 st->regs[i].live |= REG_LIVE_DONE; 8683 if (!(live & REG_LIVE_READ)) 8684 /* since the register is unused, clear its state 8685 * to make further comparison simpler 8686 */ 8687 __mark_reg_not_init(env, &st->regs[i]); 8688 } 8689 8690 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 8691 live = st->stack[i].spilled_ptr.live; 8692 /* liveness must not touch this stack slot anymore */ 8693 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 8694 if (!(live & REG_LIVE_READ)) { 8695 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 8696 for (j = 0; j < BPF_REG_SIZE; j++) 8697 st->stack[i].slot_type[j] = STACK_INVALID; 8698 } 8699 } 8700 } 8701 8702 static void clean_verifier_state(struct bpf_verifier_env *env, 8703 struct bpf_verifier_state *st) 8704 { 8705 int i; 8706 8707 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 8708 /* all regs in this state in all frames were already marked */ 8709 return; 8710 8711 for (i = 0; i <= st->curframe; i++) 8712 clean_func_state(env, st->frame[i]); 8713 } 8714 8715 /* the parentage chains form a tree. 8716 * the verifier states are added to state lists at given insn and 8717 * pushed into state stack for future exploration. 8718 * when the verifier reaches bpf_exit insn some of the verifer states 8719 * stored in the state lists have their final liveness state already, 8720 * but a lot of states will get revised from liveness point of view when 8721 * the verifier explores other branches. 8722 * Example: 8723 * 1: r0 = 1 8724 * 2: if r1 == 100 goto pc+1 8725 * 3: r0 = 2 8726 * 4: exit 8727 * when the verifier reaches exit insn the register r0 in the state list of 8728 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 8729 * of insn 2 and goes exploring further. At the insn 4 it will walk the 8730 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 8731 * 8732 * Since the verifier pushes the branch states as it sees them while exploring 8733 * the program the condition of walking the branch instruction for the second 8734 * time means that all states below this branch were already explored and 8735 * their final liveness markes are already propagated. 8736 * Hence when the verifier completes the search of state list in is_state_visited() 8737 * we can call this clean_live_states() function to mark all liveness states 8738 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 8739 * will not be used. 8740 * This function also clears the registers and stack for states that !READ 8741 * to simplify state merging. 8742 * 8743 * Important note here that walking the same branch instruction in the callee 8744 * doesn't meant that the states are DONE. The verifier has to compare 8745 * the callsites 8746 */ 8747 static void clean_live_states(struct bpf_verifier_env *env, int insn, 8748 struct bpf_verifier_state *cur) 8749 { 8750 struct bpf_verifier_state_list *sl; 8751 int i; 8752 8753 sl = *explored_state(env, insn); 8754 while (sl) { 8755 if (sl->state.branches) 8756 goto next; 8757 if (sl->state.insn_idx != insn || 8758 sl->state.curframe != cur->curframe) 8759 goto next; 8760 for (i = 0; i <= cur->curframe; i++) 8761 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) 8762 goto next; 8763 clean_verifier_state(env, &sl->state); 8764 next: 8765 sl = sl->next; 8766 } 8767 } 8768 8769 /* Returns true if (rold safe implies rcur safe) */ 8770 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 8771 struct idpair *idmap) 8772 { 8773 bool equal; 8774 8775 if (!(rold->live & REG_LIVE_READ)) 8776 /* explored state didn't use this */ 8777 return true; 8778 8779 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; 8780 8781 if (rold->type == PTR_TO_STACK) 8782 /* two stack pointers are equal only if they're pointing to 8783 * the same stack frame, since fp-8 in foo != fp-8 in bar 8784 */ 8785 return equal && rold->frameno == rcur->frameno; 8786 8787 if (equal) 8788 return true; 8789 8790 if (rold->type == NOT_INIT) 8791 /* explored state can't have used this */ 8792 return true; 8793 if (rcur->type == NOT_INIT) 8794 return false; 8795 switch (rold->type) { 8796 case SCALAR_VALUE: 8797 if (rcur->type == SCALAR_VALUE) { 8798 if (!rold->precise && !rcur->precise) 8799 return true; 8800 /* new val must satisfy old val knowledge */ 8801 return range_within(rold, rcur) && 8802 tnum_in(rold->var_off, rcur->var_off); 8803 } else { 8804 /* We're trying to use a pointer in place of a scalar. 8805 * Even if the scalar was unbounded, this could lead to 8806 * pointer leaks because scalars are allowed to leak 8807 * while pointers are not. We could make this safe in 8808 * special cases if root is calling us, but it's 8809 * probably not worth the hassle. 8810 */ 8811 return false; 8812 } 8813 case PTR_TO_MAP_VALUE: 8814 /* If the new min/max/var_off satisfy the old ones and 8815 * everything else matches, we are OK. 8816 * 'id' is not compared, since it's only used for maps with 8817 * bpf_spin_lock inside map element and in such cases if 8818 * the rest of the prog is valid for one map element then 8819 * it's valid for all map elements regardless of the key 8820 * used in bpf_map_lookup() 8821 */ 8822 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 8823 range_within(rold, rcur) && 8824 tnum_in(rold->var_off, rcur->var_off); 8825 case PTR_TO_MAP_VALUE_OR_NULL: 8826 /* a PTR_TO_MAP_VALUE could be safe to use as a 8827 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 8828 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- 8829 * checked, doing so could have affected others with the same 8830 * id, and we can't check for that because we lost the id when 8831 * we converted to a PTR_TO_MAP_VALUE. 8832 */ 8833 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) 8834 return false; 8835 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) 8836 return false; 8837 /* Check our ids match any regs they're supposed to */ 8838 return check_ids(rold->id, rcur->id, idmap); 8839 case PTR_TO_PACKET_META: 8840 case PTR_TO_PACKET: 8841 if (rcur->type != rold->type) 8842 return false; 8843 /* We must have at least as much range as the old ptr 8844 * did, so that any accesses which were safe before are 8845 * still safe. This is true even if old range < old off, 8846 * since someone could have accessed through (ptr - k), or 8847 * even done ptr -= k in a register, to get a safe access. 8848 */ 8849 if (rold->range > rcur->range) 8850 return false; 8851 /* If the offsets don't match, we can't trust our alignment; 8852 * nor can we be sure that we won't fall out of range. 8853 */ 8854 if (rold->off != rcur->off) 8855 return false; 8856 /* id relations must be preserved */ 8857 if (rold->id && !check_ids(rold->id, rcur->id, idmap)) 8858 return false; 8859 /* new val must satisfy old val knowledge */ 8860 return range_within(rold, rcur) && 8861 tnum_in(rold->var_off, rcur->var_off); 8862 case PTR_TO_CTX: 8863 case CONST_PTR_TO_MAP: 8864 case PTR_TO_PACKET_END: 8865 case PTR_TO_FLOW_KEYS: 8866 case PTR_TO_SOCKET: 8867 case PTR_TO_SOCKET_OR_NULL: 8868 case PTR_TO_SOCK_COMMON: 8869 case PTR_TO_SOCK_COMMON_OR_NULL: 8870 case PTR_TO_TCP_SOCK: 8871 case PTR_TO_TCP_SOCK_OR_NULL: 8872 case PTR_TO_XDP_SOCK: 8873 /* Only valid matches are exact, which memcmp() above 8874 * would have accepted 8875 */ 8876 default: 8877 /* Don't know what's going on, just say it's not safe */ 8878 return false; 8879 } 8880 8881 /* Shouldn't get here; if we do, say it's not safe */ 8882 WARN_ON_ONCE(1); 8883 return false; 8884 } 8885 8886 static bool stacksafe(struct bpf_func_state *old, 8887 struct bpf_func_state *cur, 8888 struct idpair *idmap) 8889 { 8890 int i, spi; 8891 8892 /* walk slots of the explored stack and ignore any additional 8893 * slots in the current stack, since explored(safe) state 8894 * didn't use them 8895 */ 8896 for (i = 0; i < old->allocated_stack; i++) { 8897 spi = i / BPF_REG_SIZE; 8898 8899 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { 8900 i += BPF_REG_SIZE - 1; 8901 /* explored state didn't use this */ 8902 continue; 8903 } 8904 8905 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 8906 continue; 8907 8908 /* explored stack has more populated slots than current stack 8909 * and these slots were used 8910 */ 8911 if (i >= cur->allocated_stack) 8912 return false; 8913 8914 /* if old state was safe with misc data in the stack 8915 * it will be safe with zero-initialized stack. 8916 * The opposite is not true 8917 */ 8918 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 8919 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 8920 continue; 8921 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 8922 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 8923 /* Ex: old explored (safe) state has STACK_SPILL in 8924 * this stack slot, but current has STACK_MISC -> 8925 * this verifier states are not equivalent, 8926 * return false to continue verification of this path 8927 */ 8928 return false; 8929 if (i % BPF_REG_SIZE) 8930 continue; 8931 if (old->stack[spi].slot_type[0] != STACK_SPILL) 8932 continue; 8933 if (!regsafe(&old->stack[spi].spilled_ptr, 8934 &cur->stack[spi].spilled_ptr, 8935 idmap)) 8936 /* when explored and current stack slot are both storing 8937 * spilled registers, check that stored pointers types 8938 * are the same as well. 8939 * Ex: explored safe path could have stored 8940 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 8941 * but current path has stored: 8942 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 8943 * such verifier states are not equivalent. 8944 * return false to continue verification of this path 8945 */ 8946 return false; 8947 } 8948 return true; 8949 } 8950 8951 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) 8952 { 8953 if (old->acquired_refs != cur->acquired_refs) 8954 return false; 8955 return !memcmp(old->refs, cur->refs, 8956 sizeof(*old->refs) * old->acquired_refs); 8957 } 8958 8959 /* compare two verifier states 8960 * 8961 * all states stored in state_list are known to be valid, since 8962 * verifier reached 'bpf_exit' instruction through them 8963 * 8964 * this function is called when verifier exploring different branches of 8965 * execution popped from the state stack. If it sees an old state that has 8966 * more strict register state and more strict stack state then this execution 8967 * branch doesn't need to be explored further, since verifier already 8968 * concluded that more strict state leads to valid finish. 8969 * 8970 * Therefore two states are equivalent if register state is more conservative 8971 * and explored stack state is more conservative than the current one. 8972 * Example: 8973 * explored current 8974 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 8975 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 8976 * 8977 * In other words if current stack state (one being explored) has more 8978 * valid slots than old one that already passed validation, it means 8979 * the verifier can stop exploring and conclude that current state is valid too 8980 * 8981 * Similarly with registers. If explored state has register type as invalid 8982 * whereas register type in current state is meaningful, it means that 8983 * the current state will reach 'bpf_exit' instruction safely 8984 */ 8985 static bool func_states_equal(struct bpf_func_state *old, 8986 struct bpf_func_state *cur) 8987 { 8988 struct idpair *idmap; 8989 bool ret = false; 8990 int i; 8991 8992 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); 8993 /* If we failed to allocate the idmap, just say it's not safe */ 8994 if (!idmap) 8995 return false; 8996 8997 for (i = 0; i < MAX_BPF_REG; i++) { 8998 if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) 8999 goto out_free; 9000 } 9001 9002 if (!stacksafe(old, cur, idmap)) 9003 goto out_free; 9004 9005 if (!refsafe(old, cur)) 9006 goto out_free; 9007 ret = true; 9008 out_free: 9009 kfree(idmap); 9010 return ret; 9011 } 9012 9013 static bool states_equal(struct bpf_verifier_env *env, 9014 struct bpf_verifier_state *old, 9015 struct bpf_verifier_state *cur) 9016 { 9017 int i; 9018 9019 if (old->curframe != cur->curframe) 9020 return false; 9021 9022 /* Verification state from speculative execution simulation 9023 * must never prune a non-speculative execution one. 9024 */ 9025 if (old->speculative && !cur->speculative) 9026 return false; 9027 9028 if (old->active_spin_lock != cur->active_spin_lock) 9029 return false; 9030 9031 /* for states to be equal callsites have to be the same 9032 * and all frame states need to be equivalent 9033 */ 9034 for (i = 0; i <= old->curframe; i++) { 9035 if (old->frame[i]->callsite != cur->frame[i]->callsite) 9036 return false; 9037 if (!func_states_equal(old->frame[i], cur->frame[i])) 9038 return false; 9039 } 9040 return true; 9041 } 9042 9043 /* Return 0 if no propagation happened. Return negative error code if error 9044 * happened. Otherwise, return the propagated bit. 9045 */ 9046 static int propagate_liveness_reg(struct bpf_verifier_env *env, 9047 struct bpf_reg_state *reg, 9048 struct bpf_reg_state *parent_reg) 9049 { 9050 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 9051 u8 flag = reg->live & REG_LIVE_READ; 9052 int err; 9053 9054 /* When comes here, read flags of PARENT_REG or REG could be any of 9055 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 9056 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 9057 */ 9058 if (parent_flag == REG_LIVE_READ64 || 9059 /* Or if there is no read flag from REG. */ 9060 !flag || 9061 /* Or if the read flag from REG is the same as PARENT_REG. */ 9062 parent_flag == flag) 9063 return 0; 9064 9065 err = mark_reg_read(env, reg, parent_reg, flag); 9066 if (err) 9067 return err; 9068 9069 return flag; 9070 } 9071 9072 /* A write screens off any subsequent reads; but write marks come from the 9073 * straight-line code between a state and its parent. When we arrive at an 9074 * equivalent state (jump target or such) we didn't arrive by the straight-line 9075 * code, so read marks in the state must propagate to the parent regardless 9076 * of the state's write marks. That's what 'parent == state->parent' comparison 9077 * in mark_reg_read() is for. 9078 */ 9079 static int propagate_liveness(struct bpf_verifier_env *env, 9080 const struct bpf_verifier_state *vstate, 9081 struct bpf_verifier_state *vparent) 9082 { 9083 struct bpf_reg_state *state_reg, *parent_reg; 9084 struct bpf_func_state *state, *parent; 9085 int i, frame, err = 0; 9086 9087 if (vparent->curframe != vstate->curframe) { 9088 WARN(1, "propagate_live: parent frame %d current frame %d\n", 9089 vparent->curframe, vstate->curframe); 9090 return -EFAULT; 9091 } 9092 /* Propagate read liveness of registers... */ 9093 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 9094 for (frame = 0; frame <= vstate->curframe; frame++) { 9095 parent = vparent->frame[frame]; 9096 state = vstate->frame[frame]; 9097 parent_reg = parent->regs; 9098 state_reg = state->regs; 9099 /* We don't need to worry about FP liveness, it's read-only */ 9100 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 9101 err = propagate_liveness_reg(env, &state_reg[i], 9102 &parent_reg[i]); 9103 if (err < 0) 9104 return err; 9105 if (err == REG_LIVE_READ64) 9106 mark_insn_zext(env, &parent_reg[i]); 9107 } 9108 9109 /* Propagate stack slots. */ 9110 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 9111 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 9112 parent_reg = &parent->stack[i].spilled_ptr; 9113 state_reg = &state->stack[i].spilled_ptr; 9114 err = propagate_liveness_reg(env, state_reg, 9115 parent_reg); 9116 if (err < 0) 9117 return err; 9118 } 9119 } 9120 return 0; 9121 } 9122 9123 /* find precise scalars in the previous equivalent state and 9124 * propagate them into the current state 9125 */ 9126 static int propagate_precision(struct bpf_verifier_env *env, 9127 const struct bpf_verifier_state *old) 9128 { 9129 struct bpf_reg_state *state_reg; 9130 struct bpf_func_state *state; 9131 int i, err = 0; 9132 9133 state = old->frame[old->curframe]; 9134 state_reg = state->regs; 9135 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 9136 if (state_reg->type != SCALAR_VALUE || 9137 !state_reg->precise) 9138 continue; 9139 if (env->log.level & BPF_LOG_LEVEL2) 9140 verbose(env, "propagating r%d\n", i); 9141 err = mark_chain_precision(env, i); 9142 if (err < 0) 9143 return err; 9144 } 9145 9146 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 9147 if (state->stack[i].slot_type[0] != STACK_SPILL) 9148 continue; 9149 state_reg = &state->stack[i].spilled_ptr; 9150 if (state_reg->type != SCALAR_VALUE || 9151 !state_reg->precise) 9152 continue; 9153 if (env->log.level & BPF_LOG_LEVEL2) 9154 verbose(env, "propagating fp%d\n", 9155 (-i - 1) * BPF_REG_SIZE); 9156 err = mark_chain_precision_stack(env, i); 9157 if (err < 0) 9158 return err; 9159 } 9160 return 0; 9161 } 9162 9163 static bool states_maybe_looping(struct bpf_verifier_state *old, 9164 struct bpf_verifier_state *cur) 9165 { 9166 struct bpf_func_state *fold, *fcur; 9167 int i, fr = cur->curframe; 9168 9169 if (old->curframe != fr) 9170 return false; 9171 9172 fold = old->frame[fr]; 9173 fcur = cur->frame[fr]; 9174 for (i = 0; i < MAX_BPF_REG; i++) 9175 if (memcmp(&fold->regs[i], &fcur->regs[i], 9176 offsetof(struct bpf_reg_state, parent))) 9177 return false; 9178 return true; 9179 } 9180 9181 9182 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 9183 { 9184 struct bpf_verifier_state_list *new_sl; 9185 struct bpf_verifier_state_list *sl, **pprev; 9186 struct bpf_verifier_state *cur = env->cur_state, *new; 9187 int i, j, err, states_cnt = 0; 9188 bool add_new_state = env->test_state_freq ? true : false; 9189 9190 cur->last_insn_idx = env->prev_insn_idx; 9191 if (!env->insn_aux_data[insn_idx].prune_point) 9192 /* this 'insn_idx' instruction wasn't marked, so we will not 9193 * be doing state search here 9194 */ 9195 return 0; 9196 9197 /* bpf progs typically have pruning point every 4 instructions 9198 * http://vger.kernel.org/bpfconf2019.html#session-1 9199 * Do not add new state for future pruning if the verifier hasn't seen 9200 * at least 2 jumps and at least 8 instructions. 9201 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 9202 * In tests that amounts to up to 50% reduction into total verifier 9203 * memory consumption and 20% verifier time speedup. 9204 */ 9205 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 9206 env->insn_processed - env->prev_insn_processed >= 8) 9207 add_new_state = true; 9208 9209 pprev = explored_state(env, insn_idx); 9210 sl = *pprev; 9211 9212 clean_live_states(env, insn_idx, cur); 9213 9214 while (sl) { 9215 states_cnt++; 9216 if (sl->state.insn_idx != insn_idx) 9217 goto next; 9218 if (sl->state.branches) { 9219 if (states_maybe_looping(&sl->state, cur) && 9220 states_equal(env, &sl->state, cur)) { 9221 verbose_linfo(env, insn_idx, "; "); 9222 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 9223 return -EINVAL; 9224 } 9225 /* if the verifier is processing a loop, avoid adding new state 9226 * too often, since different loop iterations have distinct 9227 * states and may not help future pruning. 9228 * This threshold shouldn't be too low to make sure that 9229 * a loop with large bound will be rejected quickly. 9230 * The most abusive loop will be: 9231 * r1 += 1 9232 * if r1 < 1000000 goto pc-2 9233 * 1M insn_procssed limit / 100 == 10k peak states. 9234 * This threshold shouldn't be too high either, since states 9235 * at the end of the loop are likely to be useful in pruning. 9236 */ 9237 if (env->jmps_processed - env->prev_jmps_processed < 20 && 9238 env->insn_processed - env->prev_insn_processed < 100) 9239 add_new_state = false; 9240 goto miss; 9241 } 9242 if (states_equal(env, &sl->state, cur)) { 9243 sl->hit_cnt++; 9244 /* reached equivalent register/stack state, 9245 * prune the search. 9246 * Registers read by the continuation are read by us. 9247 * If we have any write marks in env->cur_state, they 9248 * will prevent corresponding reads in the continuation 9249 * from reaching our parent (an explored_state). Our 9250 * own state will get the read marks recorded, but 9251 * they'll be immediately forgotten as we're pruning 9252 * this state and will pop a new one. 9253 */ 9254 err = propagate_liveness(env, &sl->state, cur); 9255 9256 /* if previous state reached the exit with precision and 9257 * current state is equivalent to it (except precsion marks) 9258 * the precision needs to be propagated back in 9259 * the current state. 9260 */ 9261 err = err ? : push_jmp_history(env, cur); 9262 err = err ? : propagate_precision(env, &sl->state); 9263 if (err) 9264 return err; 9265 return 1; 9266 } 9267 miss: 9268 /* when new state is not going to be added do not increase miss count. 9269 * Otherwise several loop iterations will remove the state 9270 * recorded earlier. The goal of these heuristics is to have 9271 * states from some iterations of the loop (some in the beginning 9272 * and some at the end) to help pruning. 9273 */ 9274 if (add_new_state) 9275 sl->miss_cnt++; 9276 /* heuristic to determine whether this state is beneficial 9277 * to keep checking from state equivalence point of view. 9278 * Higher numbers increase max_states_per_insn and verification time, 9279 * but do not meaningfully decrease insn_processed. 9280 */ 9281 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { 9282 /* the state is unlikely to be useful. Remove it to 9283 * speed up verification 9284 */ 9285 *pprev = sl->next; 9286 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { 9287 u32 br = sl->state.branches; 9288 9289 WARN_ONCE(br, 9290 "BUG live_done but branches_to_explore %d\n", 9291 br); 9292 free_verifier_state(&sl->state, false); 9293 kfree(sl); 9294 env->peak_states--; 9295 } else { 9296 /* cannot free this state, since parentage chain may 9297 * walk it later. Add it for free_list instead to 9298 * be freed at the end of verification 9299 */ 9300 sl->next = env->free_list; 9301 env->free_list = sl; 9302 } 9303 sl = *pprev; 9304 continue; 9305 } 9306 next: 9307 pprev = &sl->next; 9308 sl = *pprev; 9309 } 9310 9311 if (env->max_states_per_insn < states_cnt) 9312 env->max_states_per_insn = states_cnt; 9313 9314 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 9315 return push_jmp_history(env, cur); 9316 9317 if (!add_new_state) 9318 return push_jmp_history(env, cur); 9319 9320 /* There were no equivalent states, remember the current one. 9321 * Technically the current state is not proven to be safe yet, 9322 * but it will either reach outer most bpf_exit (which means it's safe) 9323 * or it will be rejected. When there are no loops the verifier won't be 9324 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 9325 * again on the way to bpf_exit. 9326 * When looping the sl->state.branches will be > 0 and this state 9327 * will not be considered for equivalence until branches == 0. 9328 */ 9329 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 9330 if (!new_sl) 9331 return -ENOMEM; 9332 env->total_states++; 9333 env->peak_states++; 9334 env->prev_jmps_processed = env->jmps_processed; 9335 env->prev_insn_processed = env->insn_processed; 9336 9337 /* add new state to the head of linked list */ 9338 new = &new_sl->state; 9339 err = copy_verifier_state(new, cur); 9340 if (err) { 9341 free_verifier_state(new, false); 9342 kfree(new_sl); 9343 return err; 9344 } 9345 new->insn_idx = insn_idx; 9346 WARN_ONCE(new->branches != 1, 9347 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 9348 9349 cur->parent = new; 9350 cur->first_insn_idx = insn_idx; 9351 clear_jmp_history(cur); 9352 new_sl->next = *explored_state(env, insn_idx); 9353 *explored_state(env, insn_idx) = new_sl; 9354 /* connect new state to parentage chain. Current frame needs all 9355 * registers connected. Only r6 - r9 of the callers are alive (pushed 9356 * to the stack implicitly by JITs) so in callers' frames connect just 9357 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 9358 * the state of the call instruction (with WRITTEN set), and r0 comes 9359 * from callee with its full parentage chain, anyway. 9360 */ 9361 /* clear write marks in current state: the writes we did are not writes 9362 * our child did, so they don't screen off its reads from us. 9363 * (There are no read marks in current state, because reads always mark 9364 * their parent and current state never has children yet. Only 9365 * explored_states can get read marks.) 9366 */ 9367 for (j = 0; j <= cur->curframe; j++) { 9368 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 9369 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 9370 for (i = 0; i < BPF_REG_FP; i++) 9371 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 9372 } 9373 9374 /* all stack frames are accessible from callee, clear them all */ 9375 for (j = 0; j <= cur->curframe; j++) { 9376 struct bpf_func_state *frame = cur->frame[j]; 9377 struct bpf_func_state *newframe = new->frame[j]; 9378 9379 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 9380 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 9381 frame->stack[i].spilled_ptr.parent = 9382 &newframe->stack[i].spilled_ptr; 9383 } 9384 } 9385 return 0; 9386 } 9387 9388 /* Return true if it's OK to have the same insn return a different type. */ 9389 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 9390 { 9391 switch (type) { 9392 case PTR_TO_CTX: 9393 case PTR_TO_SOCKET: 9394 case PTR_TO_SOCKET_OR_NULL: 9395 case PTR_TO_SOCK_COMMON: 9396 case PTR_TO_SOCK_COMMON_OR_NULL: 9397 case PTR_TO_TCP_SOCK: 9398 case PTR_TO_TCP_SOCK_OR_NULL: 9399 case PTR_TO_XDP_SOCK: 9400 case PTR_TO_BTF_ID: 9401 case PTR_TO_BTF_ID_OR_NULL: 9402 return false; 9403 default: 9404 return true; 9405 } 9406 } 9407 9408 /* If an instruction was previously used with particular pointer types, then we 9409 * need to be careful to avoid cases such as the below, where it may be ok 9410 * for one branch accessing the pointer, but not ok for the other branch: 9411 * 9412 * R1 = sock_ptr 9413 * goto X; 9414 * ... 9415 * R1 = some_other_valid_ptr; 9416 * goto X; 9417 * ... 9418 * R2 = *(u32 *)(R1 + 0); 9419 */ 9420 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 9421 { 9422 return src != prev && (!reg_type_mismatch_ok(src) || 9423 !reg_type_mismatch_ok(prev)); 9424 } 9425 9426 static int do_check(struct bpf_verifier_env *env) 9427 { 9428 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 9429 struct bpf_verifier_state *state = env->cur_state; 9430 struct bpf_insn *insns = env->prog->insnsi; 9431 struct bpf_reg_state *regs; 9432 int insn_cnt = env->prog->len; 9433 bool do_print_state = false; 9434 int prev_insn_idx = -1; 9435 9436 for (;;) { 9437 struct bpf_insn *insn; 9438 u8 class; 9439 int err; 9440 9441 env->prev_insn_idx = prev_insn_idx; 9442 if (env->insn_idx >= insn_cnt) { 9443 verbose(env, "invalid insn idx %d insn_cnt %d\n", 9444 env->insn_idx, insn_cnt); 9445 return -EFAULT; 9446 } 9447 9448 insn = &insns[env->insn_idx]; 9449 class = BPF_CLASS(insn->code); 9450 9451 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 9452 verbose(env, 9453 "BPF program is too large. Processed %d insn\n", 9454 env->insn_processed); 9455 return -E2BIG; 9456 } 9457 9458 err = is_state_visited(env, env->insn_idx); 9459 if (err < 0) 9460 return err; 9461 if (err == 1) { 9462 /* found equivalent state, can prune the search */ 9463 if (env->log.level & BPF_LOG_LEVEL) { 9464 if (do_print_state) 9465 verbose(env, "\nfrom %d to %d%s: safe\n", 9466 env->prev_insn_idx, env->insn_idx, 9467 env->cur_state->speculative ? 9468 " (speculative execution)" : ""); 9469 else 9470 verbose(env, "%d: safe\n", env->insn_idx); 9471 } 9472 goto process_bpf_exit; 9473 } 9474 9475 if (signal_pending(current)) 9476 return -EAGAIN; 9477 9478 if (need_resched()) 9479 cond_resched(); 9480 9481 if (env->log.level & BPF_LOG_LEVEL2 || 9482 (env->log.level & BPF_LOG_LEVEL && do_print_state)) { 9483 if (env->log.level & BPF_LOG_LEVEL2) 9484 verbose(env, "%d:", env->insn_idx); 9485 else 9486 verbose(env, "\nfrom %d to %d%s:", 9487 env->prev_insn_idx, env->insn_idx, 9488 env->cur_state->speculative ? 9489 " (speculative execution)" : ""); 9490 print_verifier_state(env, state->frame[state->curframe]); 9491 do_print_state = false; 9492 } 9493 9494 if (env->log.level & BPF_LOG_LEVEL) { 9495 const struct bpf_insn_cbs cbs = { 9496 .cb_print = verbose, 9497 .private_data = env, 9498 }; 9499 9500 verbose_linfo(env, env->insn_idx, "; "); 9501 verbose(env, "%d: ", env->insn_idx); 9502 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 9503 } 9504 9505 if (bpf_prog_is_dev_bound(env->prog->aux)) { 9506 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 9507 env->prev_insn_idx); 9508 if (err) 9509 return err; 9510 } 9511 9512 regs = cur_regs(env); 9513 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 9514 prev_insn_idx = env->insn_idx; 9515 9516 if (class == BPF_ALU || class == BPF_ALU64) { 9517 err = check_alu_op(env, insn); 9518 if (err) 9519 return err; 9520 9521 } else if (class == BPF_LDX) { 9522 enum bpf_reg_type *prev_src_type, src_reg_type; 9523 9524 /* check for reserved fields is already done */ 9525 9526 /* check src operand */ 9527 err = check_reg_arg(env, insn->src_reg, SRC_OP); 9528 if (err) 9529 return err; 9530 9531 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 9532 if (err) 9533 return err; 9534 9535 src_reg_type = regs[insn->src_reg].type; 9536 9537 /* check that memory (src_reg + off) is readable, 9538 * the state of dst_reg will be updated by this func 9539 */ 9540 err = check_mem_access(env, env->insn_idx, insn->src_reg, 9541 insn->off, BPF_SIZE(insn->code), 9542 BPF_READ, insn->dst_reg, false); 9543 if (err) 9544 return err; 9545 9546 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; 9547 9548 if (*prev_src_type == NOT_INIT) { 9549 /* saw a valid insn 9550 * dst_reg = *(u32 *)(src_reg + off) 9551 * save type to validate intersecting paths 9552 */ 9553 *prev_src_type = src_reg_type; 9554 9555 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { 9556 /* ABuser program is trying to use the same insn 9557 * dst_reg = *(u32*) (src_reg + off) 9558 * with different pointer types: 9559 * src_reg == ctx in one branch and 9560 * src_reg == stack|map in some other branch. 9561 * Reject it. 9562 */ 9563 verbose(env, "same insn cannot be used with different pointers\n"); 9564 return -EINVAL; 9565 } 9566 9567 } else if (class == BPF_STX) { 9568 enum bpf_reg_type *prev_dst_type, dst_reg_type; 9569 9570 if (BPF_MODE(insn->code) == BPF_ATOMIC) { 9571 err = check_atomic(env, env->insn_idx, insn); 9572 if (err) 9573 return err; 9574 env->insn_idx++; 9575 continue; 9576 } 9577 9578 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { 9579 verbose(env, "BPF_STX uses reserved fields\n"); 9580 return -EINVAL; 9581 } 9582 9583 /* check src1 operand */ 9584 err = check_reg_arg(env, insn->src_reg, SRC_OP); 9585 if (err) 9586 return err; 9587 /* check src2 operand */ 9588 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 9589 if (err) 9590 return err; 9591 9592 dst_reg_type = regs[insn->dst_reg].type; 9593 9594 /* check that memory (dst_reg + off) is writeable */ 9595 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 9596 insn->off, BPF_SIZE(insn->code), 9597 BPF_WRITE, insn->src_reg, false); 9598 if (err) 9599 return err; 9600 9601 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; 9602 9603 if (*prev_dst_type == NOT_INIT) { 9604 *prev_dst_type = dst_reg_type; 9605 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { 9606 verbose(env, "same insn cannot be used with different pointers\n"); 9607 return -EINVAL; 9608 } 9609 9610 } else if (class == BPF_ST) { 9611 if (BPF_MODE(insn->code) != BPF_MEM || 9612 insn->src_reg != BPF_REG_0) { 9613 verbose(env, "BPF_ST uses reserved fields\n"); 9614 return -EINVAL; 9615 } 9616 /* check src operand */ 9617 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 9618 if (err) 9619 return err; 9620 9621 if (is_ctx_reg(env, insn->dst_reg)) { 9622 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 9623 insn->dst_reg, 9624 reg_type_str[reg_state(env, insn->dst_reg)->type]); 9625 return -EACCES; 9626 } 9627 9628 /* check that memory (dst_reg + off) is writeable */ 9629 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 9630 insn->off, BPF_SIZE(insn->code), 9631 BPF_WRITE, -1, false); 9632 if (err) 9633 return err; 9634 9635 } else if (class == BPF_JMP || class == BPF_JMP32) { 9636 u8 opcode = BPF_OP(insn->code); 9637 9638 env->jmps_processed++; 9639 if (opcode == BPF_CALL) { 9640 if (BPF_SRC(insn->code) != BPF_K || 9641 insn->off != 0 || 9642 (insn->src_reg != BPF_REG_0 && 9643 insn->src_reg != BPF_PSEUDO_CALL) || 9644 insn->dst_reg != BPF_REG_0 || 9645 class == BPF_JMP32) { 9646 verbose(env, "BPF_CALL uses reserved fields\n"); 9647 return -EINVAL; 9648 } 9649 9650 if (env->cur_state->active_spin_lock && 9651 (insn->src_reg == BPF_PSEUDO_CALL || 9652 insn->imm != BPF_FUNC_spin_unlock)) { 9653 verbose(env, "function calls are not allowed while holding a lock\n"); 9654 return -EINVAL; 9655 } 9656 if (insn->src_reg == BPF_PSEUDO_CALL) 9657 err = check_func_call(env, insn, &env->insn_idx); 9658 else 9659 err = check_helper_call(env, insn->imm, env->insn_idx); 9660 if (err) 9661 return err; 9662 9663 } else if (opcode == BPF_JA) { 9664 if (BPF_SRC(insn->code) != BPF_K || 9665 insn->imm != 0 || 9666 insn->src_reg != BPF_REG_0 || 9667 insn->dst_reg != BPF_REG_0 || 9668 class == BPF_JMP32) { 9669 verbose(env, "BPF_JA uses reserved fields\n"); 9670 return -EINVAL; 9671 } 9672 9673 env->insn_idx += insn->off + 1; 9674 continue; 9675 9676 } else if (opcode == BPF_EXIT) { 9677 if (BPF_SRC(insn->code) != BPF_K || 9678 insn->imm != 0 || 9679 insn->src_reg != BPF_REG_0 || 9680 insn->dst_reg != BPF_REG_0 || 9681 class == BPF_JMP32) { 9682 verbose(env, "BPF_EXIT uses reserved fields\n"); 9683 return -EINVAL; 9684 } 9685 9686 if (env->cur_state->active_spin_lock) { 9687 verbose(env, "bpf_spin_unlock is missing\n"); 9688 return -EINVAL; 9689 } 9690 9691 if (state->curframe) { 9692 /* exit from nested function */ 9693 err = prepare_func_exit(env, &env->insn_idx); 9694 if (err) 9695 return err; 9696 do_print_state = true; 9697 continue; 9698 } 9699 9700 err = check_reference_leak(env); 9701 if (err) 9702 return err; 9703 9704 err = check_return_code(env); 9705 if (err) 9706 return err; 9707 process_bpf_exit: 9708 update_branch_counts(env, env->cur_state); 9709 err = pop_stack(env, &prev_insn_idx, 9710 &env->insn_idx, pop_log); 9711 if (err < 0) { 9712 if (err != -ENOENT) 9713 return err; 9714 break; 9715 } else { 9716 do_print_state = true; 9717 continue; 9718 } 9719 } else { 9720 err = check_cond_jmp_op(env, insn, &env->insn_idx); 9721 if (err) 9722 return err; 9723 } 9724 } else if (class == BPF_LD) { 9725 u8 mode = BPF_MODE(insn->code); 9726 9727 if (mode == BPF_ABS || mode == BPF_IND) { 9728 err = check_ld_abs(env, insn); 9729 if (err) 9730 return err; 9731 9732 } else if (mode == BPF_IMM) { 9733 err = check_ld_imm(env, insn); 9734 if (err) 9735 return err; 9736 9737 env->insn_idx++; 9738 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 9739 } else { 9740 verbose(env, "invalid BPF_LD mode\n"); 9741 return -EINVAL; 9742 } 9743 } else { 9744 verbose(env, "unknown insn class %d\n", class); 9745 return -EINVAL; 9746 } 9747 9748 env->insn_idx++; 9749 } 9750 9751 return 0; 9752 } 9753 9754 static int find_btf_percpu_datasec(struct btf *btf) 9755 { 9756 const struct btf_type *t; 9757 const char *tname; 9758 int i, n; 9759 9760 /* 9761 * Both vmlinux and module each have their own ".data..percpu" 9762 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF 9763 * types to look at only module's own BTF types. 9764 */ 9765 n = btf_nr_types(btf); 9766 if (btf_is_module(btf)) 9767 i = btf_nr_types(btf_vmlinux); 9768 else 9769 i = 1; 9770 9771 for(; i < n; i++) { 9772 t = btf_type_by_id(btf, i); 9773 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) 9774 continue; 9775 9776 tname = btf_name_by_offset(btf, t->name_off); 9777 if (!strcmp(tname, ".data..percpu")) 9778 return i; 9779 } 9780 9781 return -ENOENT; 9782 } 9783 9784 /* replace pseudo btf_id with kernel symbol address */ 9785 static int check_pseudo_btf_id(struct bpf_verifier_env *env, 9786 struct bpf_insn *insn, 9787 struct bpf_insn_aux_data *aux) 9788 { 9789 const struct btf_var_secinfo *vsi; 9790 const struct btf_type *datasec; 9791 struct btf_mod_pair *btf_mod; 9792 const struct btf_type *t; 9793 const char *sym_name; 9794 bool percpu = false; 9795 u32 type, id = insn->imm; 9796 struct btf *btf; 9797 s32 datasec_id; 9798 u64 addr; 9799 int i, btf_fd, err; 9800 9801 btf_fd = insn[1].imm; 9802 if (btf_fd) { 9803 btf = btf_get_by_fd(btf_fd); 9804 if (IS_ERR(btf)) { 9805 verbose(env, "invalid module BTF object FD specified.\n"); 9806 return -EINVAL; 9807 } 9808 } else { 9809 if (!btf_vmlinux) { 9810 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); 9811 return -EINVAL; 9812 } 9813 btf = btf_vmlinux; 9814 btf_get(btf); 9815 } 9816 9817 t = btf_type_by_id(btf, id); 9818 if (!t) { 9819 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); 9820 err = -ENOENT; 9821 goto err_put; 9822 } 9823 9824 if (!btf_type_is_var(t)) { 9825 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id); 9826 err = -EINVAL; 9827 goto err_put; 9828 } 9829 9830 sym_name = btf_name_by_offset(btf, t->name_off); 9831 addr = kallsyms_lookup_name(sym_name); 9832 if (!addr) { 9833 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", 9834 sym_name); 9835 err = -ENOENT; 9836 goto err_put; 9837 } 9838 9839 datasec_id = find_btf_percpu_datasec(btf); 9840 if (datasec_id > 0) { 9841 datasec = btf_type_by_id(btf, datasec_id); 9842 for_each_vsi(i, datasec, vsi) { 9843 if (vsi->type == id) { 9844 percpu = true; 9845 break; 9846 } 9847 } 9848 } 9849 9850 insn[0].imm = (u32)addr; 9851 insn[1].imm = addr >> 32; 9852 9853 type = t->type; 9854 t = btf_type_skip_modifiers(btf, type, NULL); 9855 if (percpu) { 9856 aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID; 9857 aux->btf_var.btf = btf; 9858 aux->btf_var.btf_id = type; 9859 } else if (!btf_type_is_struct(t)) { 9860 const struct btf_type *ret; 9861 const char *tname; 9862 u32 tsize; 9863 9864 /* resolve the type size of ksym. */ 9865 ret = btf_resolve_size(btf, t, &tsize); 9866 if (IS_ERR(ret)) { 9867 tname = btf_name_by_offset(btf, t->name_off); 9868 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", 9869 tname, PTR_ERR(ret)); 9870 err = -EINVAL; 9871 goto err_put; 9872 } 9873 aux->btf_var.reg_type = PTR_TO_MEM; 9874 aux->btf_var.mem_size = tsize; 9875 } else { 9876 aux->btf_var.reg_type = PTR_TO_BTF_ID; 9877 aux->btf_var.btf = btf; 9878 aux->btf_var.btf_id = type; 9879 } 9880 9881 /* check whether we recorded this BTF (and maybe module) already */ 9882 for (i = 0; i < env->used_btf_cnt; i++) { 9883 if (env->used_btfs[i].btf == btf) { 9884 btf_put(btf); 9885 return 0; 9886 } 9887 } 9888 9889 if (env->used_btf_cnt >= MAX_USED_BTFS) { 9890 err = -E2BIG; 9891 goto err_put; 9892 } 9893 9894 btf_mod = &env->used_btfs[env->used_btf_cnt]; 9895 btf_mod->btf = btf; 9896 btf_mod->module = NULL; 9897 9898 /* if we reference variables from kernel module, bump its refcount */ 9899 if (btf_is_module(btf)) { 9900 btf_mod->module = btf_try_get_module(btf); 9901 if (!btf_mod->module) { 9902 err = -ENXIO; 9903 goto err_put; 9904 } 9905 } 9906 9907 env->used_btf_cnt++; 9908 9909 return 0; 9910 err_put: 9911 btf_put(btf); 9912 return err; 9913 } 9914 9915 static int check_map_prealloc(struct bpf_map *map) 9916 { 9917 return (map->map_type != BPF_MAP_TYPE_HASH && 9918 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 9919 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || 9920 !(map->map_flags & BPF_F_NO_PREALLOC); 9921 } 9922 9923 static bool is_tracing_prog_type(enum bpf_prog_type type) 9924 { 9925 switch (type) { 9926 case BPF_PROG_TYPE_KPROBE: 9927 case BPF_PROG_TYPE_TRACEPOINT: 9928 case BPF_PROG_TYPE_PERF_EVENT: 9929 case BPF_PROG_TYPE_RAW_TRACEPOINT: 9930 return true; 9931 default: 9932 return false; 9933 } 9934 } 9935 9936 static bool is_preallocated_map(struct bpf_map *map) 9937 { 9938 if (!check_map_prealloc(map)) 9939 return false; 9940 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) 9941 return false; 9942 return true; 9943 } 9944 9945 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 9946 struct bpf_map *map, 9947 struct bpf_prog *prog) 9948 9949 { 9950 enum bpf_prog_type prog_type = resolve_prog_type(prog); 9951 /* 9952 * Validate that trace type programs use preallocated hash maps. 9953 * 9954 * For programs attached to PERF events this is mandatory as the 9955 * perf NMI can hit any arbitrary code sequence. 9956 * 9957 * All other trace types using preallocated hash maps are unsafe as 9958 * well because tracepoint or kprobes can be inside locked regions 9959 * of the memory allocator or at a place where a recursion into the 9960 * memory allocator would see inconsistent state. 9961 * 9962 * On RT enabled kernels run-time allocation of all trace type 9963 * programs is strictly prohibited due to lock type constraints. On 9964 * !RT kernels it is allowed for backwards compatibility reasons for 9965 * now, but warnings are emitted so developers are made aware of 9966 * the unsafety and can fix their programs before this is enforced. 9967 */ 9968 if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) { 9969 if (prog_type == BPF_PROG_TYPE_PERF_EVENT) { 9970 verbose(env, "perf_event programs can only use preallocated hash map\n"); 9971 return -EINVAL; 9972 } 9973 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 9974 verbose(env, "trace type programs can only use preallocated hash map\n"); 9975 return -EINVAL; 9976 } 9977 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n"); 9978 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n"); 9979 } 9980 9981 if (map_value_has_spin_lock(map)) { 9982 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { 9983 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); 9984 return -EINVAL; 9985 } 9986 9987 if (is_tracing_prog_type(prog_type)) { 9988 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 9989 return -EINVAL; 9990 } 9991 9992 if (prog->aux->sleepable) { 9993 verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n"); 9994 return -EINVAL; 9995 } 9996 } 9997 9998 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && 9999 !bpf_offload_prog_map_match(prog, map)) { 10000 verbose(env, "offload device mismatch between prog and map\n"); 10001 return -EINVAL; 10002 } 10003 10004 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 10005 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 10006 return -EINVAL; 10007 } 10008 10009 if (prog->aux->sleepable) 10010 switch (map->map_type) { 10011 case BPF_MAP_TYPE_HASH: 10012 case BPF_MAP_TYPE_LRU_HASH: 10013 case BPF_MAP_TYPE_ARRAY: 10014 if (!is_preallocated_map(map)) { 10015 verbose(env, 10016 "Sleepable programs can only use preallocated hash maps\n"); 10017 return -EINVAL; 10018 } 10019 break; 10020 default: 10021 verbose(env, 10022 "Sleepable programs can only use array and hash maps\n"); 10023 return -EINVAL; 10024 } 10025 10026 return 0; 10027 } 10028 10029 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 10030 { 10031 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 10032 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 10033 } 10034 10035 /* find and rewrite pseudo imm in ld_imm64 instructions: 10036 * 10037 * 1. if it accesses map FD, replace it with actual map pointer. 10038 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. 10039 * 10040 * NOTE: btf_vmlinux is required for converting pseudo btf_id. 10041 */ 10042 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) 10043 { 10044 struct bpf_insn *insn = env->prog->insnsi; 10045 int insn_cnt = env->prog->len; 10046 int i, j, err; 10047 10048 err = bpf_prog_calc_tag(env->prog); 10049 if (err) 10050 return err; 10051 10052 for (i = 0; i < insn_cnt; i++, insn++) { 10053 if (BPF_CLASS(insn->code) == BPF_LDX && 10054 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { 10055 verbose(env, "BPF_LDX uses reserved fields\n"); 10056 return -EINVAL; 10057 } 10058 10059 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 10060 struct bpf_insn_aux_data *aux; 10061 struct bpf_map *map; 10062 struct fd f; 10063 u64 addr; 10064 10065 if (i == insn_cnt - 1 || insn[1].code != 0 || 10066 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 10067 insn[1].off != 0) { 10068 verbose(env, "invalid bpf_ld_imm64 insn\n"); 10069 return -EINVAL; 10070 } 10071 10072 if (insn[0].src_reg == 0) 10073 /* valid generic load 64-bit imm */ 10074 goto next_insn; 10075 10076 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { 10077 aux = &env->insn_aux_data[i]; 10078 err = check_pseudo_btf_id(env, insn, aux); 10079 if (err) 10080 return err; 10081 goto next_insn; 10082 } 10083 10084 /* In final convert_pseudo_ld_imm64() step, this is 10085 * converted into regular 64-bit imm load insn. 10086 */ 10087 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && 10088 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) || 10089 (insn[0].src_reg == BPF_PSEUDO_MAP_FD && 10090 insn[1].imm != 0)) { 10091 verbose(env, 10092 "unrecognized bpf_ld_imm64 insn\n"); 10093 return -EINVAL; 10094 } 10095 10096 f = fdget(insn[0].imm); 10097 map = __bpf_map_get(f); 10098 if (IS_ERR(map)) { 10099 verbose(env, "fd %d is not pointing to valid bpf_map\n", 10100 insn[0].imm); 10101 return PTR_ERR(map); 10102 } 10103 10104 err = check_map_prog_compatibility(env, map, env->prog); 10105 if (err) { 10106 fdput(f); 10107 return err; 10108 } 10109 10110 aux = &env->insn_aux_data[i]; 10111 if (insn->src_reg == BPF_PSEUDO_MAP_FD) { 10112 addr = (unsigned long)map; 10113 } else { 10114 u32 off = insn[1].imm; 10115 10116 if (off >= BPF_MAX_VAR_OFF) { 10117 verbose(env, "direct value offset of %u is not allowed\n", off); 10118 fdput(f); 10119 return -EINVAL; 10120 } 10121 10122 if (!map->ops->map_direct_value_addr) { 10123 verbose(env, "no direct value access support for this map type\n"); 10124 fdput(f); 10125 return -EINVAL; 10126 } 10127 10128 err = map->ops->map_direct_value_addr(map, &addr, off); 10129 if (err) { 10130 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 10131 map->value_size, off); 10132 fdput(f); 10133 return err; 10134 } 10135 10136 aux->map_off = off; 10137 addr += off; 10138 } 10139 10140 insn[0].imm = (u32)addr; 10141 insn[1].imm = addr >> 32; 10142 10143 /* check whether we recorded this map already */ 10144 for (j = 0; j < env->used_map_cnt; j++) { 10145 if (env->used_maps[j] == map) { 10146 aux->map_index = j; 10147 fdput(f); 10148 goto next_insn; 10149 } 10150 } 10151 10152 if (env->used_map_cnt >= MAX_USED_MAPS) { 10153 fdput(f); 10154 return -E2BIG; 10155 } 10156 10157 /* hold the map. If the program is rejected by verifier, 10158 * the map will be released by release_maps() or it 10159 * will be used by the valid program until it's unloaded 10160 * and all maps are released in free_used_maps() 10161 */ 10162 bpf_map_inc(map); 10163 10164 aux->map_index = env->used_map_cnt; 10165 env->used_maps[env->used_map_cnt++] = map; 10166 10167 if (bpf_map_is_cgroup_storage(map) && 10168 bpf_cgroup_storage_assign(env->prog->aux, map)) { 10169 verbose(env, "only one cgroup storage of each type is allowed\n"); 10170 fdput(f); 10171 return -EBUSY; 10172 } 10173 10174 fdput(f); 10175 next_insn: 10176 insn++; 10177 i++; 10178 continue; 10179 } 10180 10181 /* Basic sanity check before we invest more work here. */ 10182 if (!bpf_opcode_in_insntable(insn->code)) { 10183 verbose(env, "unknown opcode %02x\n", insn->code); 10184 return -EINVAL; 10185 } 10186 } 10187 10188 /* now all pseudo BPF_LD_IMM64 instructions load valid 10189 * 'struct bpf_map *' into a register instead of user map_fd. 10190 * These pointers will be used later by verifier to validate map access. 10191 */ 10192 return 0; 10193 } 10194 10195 /* drop refcnt of maps used by the rejected program */ 10196 static void release_maps(struct bpf_verifier_env *env) 10197 { 10198 __bpf_free_used_maps(env->prog->aux, env->used_maps, 10199 env->used_map_cnt); 10200 } 10201 10202 /* drop refcnt of maps used by the rejected program */ 10203 static void release_btfs(struct bpf_verifier_env *env) 10204 { 10205 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, 10206 env->used_btf_cnt); 10207 } 10208 10209 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 10210 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 10211 { 10212 struct bpf_insn *insn = env->prog->insnsi; 10213 int insn_cnt = env->prog->len; 10214 int i; 10215 10216 for (i = 0; i < insn_cnt; i++, insn++) 10217 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) 10218 insn->src_reg = 0; 10219 } 10220 10221 /* single env->prog->insni[off] instruction was replaced with the range 10222 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 10223 * [0, off) and [off, end) to new locations, so the patched range stays zero 10224 */ 10225 static int adjust_insn_aux_data(struct bpf_verifier_env *env, 10226 struct bpf_prog *new_prog, u32 off, u32 cnt) 10227 { 10228 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 10229 struct bpf_insn *insn = new_prog->insnsi; 10230 u32 prog_len; 10231 int i; 10232 10233 /* aux info at OFF always needs adjustment, no matter fast path 10234 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 10235 * original insn at old prog. 10236 */ 10237 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 10238 10239 if (cnt == 1) 10240 return 0; 10241 prog_len = new_prog->len; 10242 new_data = vzalloc(array_size(prog_len, 10243 sizeof(struct bpf_insn_aux_data))); 10244 if (!new_data) 10245 return -ENOMEM; 10246 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 10247 memcpy(new_data + off + cnt - 1, old_data + off, 10248 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 10249 for (i = off; i < off + cnt - 1; i++) { 10250 new_data[i].seen = env->pass_cnt; 10251 new_data[i].zext_dst = insn_has_def32(env, insn + i); 10252 } 10253 env->insn_aux_data = new_data; 10254 vfree(old_data); 10255 return 0; 10256 } 10257 10258 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 10259 { 10260 int i; 10261 10262 if (len == 1) 10263 return; 10264 /* NOTE: fake 'exit' subprog should be updated as well. */ 10265 for (i = 0; i <= env->subprog_cnt; i++) { 10266 if (env->subprog_info[i].start <= off) 10267 continue; 10268 env->subprog_info[i].start += len - 1; 10269 } 10270 } 10271 10272 static void adjust_poke_descs(struct bpf_prog *prog, u32 len) 10273 { 10274 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 10275 int i, sz = prog->aux->size_poke_tab; 10276 struct bpf_jit_poke_descriptor *desc; 10277 10278 for (i = 0; i < sz; i++) { 10279 desc = &tab[i]; 10280 desc->insn_idx += len - 1; 10281 } 10282 } 10283 10284 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 10285 const struct bpf_insn *patch, u32 len) 10286 { 10287 struct bpf_prog *new_prog; 10288 10289 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 10290 if (IS_ERR(new_prog)) { 10291 if (PTR_ERR(new_prog) == -ERANGE) 10292 verbose(env, 10293 "insn %d cannot be patched due to 16-bit range\n", 10294 env->insn_aux_data[off].orig_idx); 10295 return NULL; 10296 } 10297 if (adjust_insn_aux_data(env, new_prog, off, len)) 10298 return NULL; 10299 adjust_subprog_starts(env, off, len); 10300 adjust_poke_descs(new_prog, len); 10301 return new_prog; 10302 } 10303 10304 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 10305 u32 off, u32 cnt) 10306 { 10307 int i, j; 10308 10309 /* find first prog starting at or after off (first to remove) */ 10310 for (i = 0; i < env->subprog_cnt; i++) 10311 if (env->subprog_info[i].start >= off) 10312 break; 10313 /* find first prog starting at or after off + cnt (first to stay) */ 10314 for (j = i; j < env->subprog_cnt; j++) 10315 if (env->subprog_info[j].start >= off + cnt) 10316 break; 10317 /* if j doesn't start exactly at off + cnt, we are just removing 10318 * the front of previous prog 10319 */ 10320 if (env->subprog_info[j].start != off + cnt) 10321 j--; 10322 10323 if (j > i) { 10324 struct bpf_prog_aux *aux = env->prog->aux; 10325 int move; 10326 10327 /* move fake 'exit' subprog as well */ 10328 move = env->subprog_cnt + 1 - j; 10329 10330 memmove(env->subprog_info + i, 10331 env->subprog_info + j, 10332 sizeof(*env->subprog_info) * move); 10333 env->subprog_cnt -= j - i; 10334 10335 /* remove func_info */ 10336 if (aux->func_info) { 10337 move = aux->func_info_cnt - j; 10338 10339 memmove(aux->func_info + i, 10340 aux->func_info + j, 10341 sizeof(*aux->func_info) * move); 10342 aux->func_info_cnt -= j - i; 10343 /* func_info->insn_off is set after all code rewrites, 10344 * in adjust_btf_func() - no need to adjust 10345 */ 10346 } 10347 } else { 10348 /* convert i from "first prog to remove" to "first to adjust" */ 10349 if (env->subprog_info[i].start == off) 10350 i++; 10351 } 10352 10353 /* update fake 'exit' subprog as well */ 10354 for (; i <= env->subprog_cnt; i++) 10355 env->subprog_info[i].start -= cnt; 10356 10357 return 0; 10358 } 10359 10360 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 10361 u32 cnt) 10362 { 10363 struct bpf_prog *prog = env->prog; 10364 u32 i, l_off, l_cnt, nr_linfo; 10365 struct bpf_line_info *linfo; 10366 10367 nr_linfo = prog->aux->nr_linfo; 10368 if (!nr_linfo) 10369 return 0; 10370 10371 linfo = prog->aux->linfo; 10372 10373 /* find first line info to remove, count lines to be removed */ 10374 for (i = 0; i < nr_linfo; i++) 10375 if (linfo[i].insn_off >= off) 10376 break; 10377 10378 l_off = i; 10379 l_cnt = 0; 10380 for (; i < nr_linfo; i++) 10381 if (linfo[i].insn_off < off + cnt) 10382 l_cnt++; 10383 else 10384 break; 10385 10386 /* First live insn doesn't match first live linfo, it needs to "inherit" 10387 * last removed linfo. prog is already modified, so prog->len == off 10388 * means no live instructions after (tail of the program was removed). 10389 */ 10390 if (prog->len != off && l_cnt && 10391 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 10392 l_cnt--; 10393 linfo[--i].insn_off = off + cnt; 10394 } 10395 10396 /* remove the line info which refer to the removed instructions */ 10397 if (l_cnt) { 10398 memmove(linfo + l_off, linfo + i, 10399 sizeof(*linfo) * (nr_linfo - i)); 10400 10401 prog->aux->nr_linfo -= l_cnt; 10402 nr_linfo = prog->aux->nr_linfo; 10403 } 10404 10405 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 10406 for (i = l_off; i < nr_linfo; i++) 10407 linfo[i].insn_off -= cnt; 10408 10409 /* fix up all subprogs (incl. 'exit') which start >= off */ 10410 for (i = 0; i <= env->subprog_cnt; i++) 10411 if (env->subprog_info[i].linfo_idx > l_off) { 10412 /* program may have started in the removed region but 10413 * may not be fully removed 10414 */ 10415 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 10416 env->subprog_info[i].linfo_idx -= l_cnt; 10417 else 10418 env->subprog_info[i].linfo_idx = l_off; 10419 } 10420 10421 return 0; 10422 } 10423 10424 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 10425 { 10426 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 10427 unsigned int orig_prog_len = env->prog->len; 10428 int err; 10429 10430 if (bpf_prog_is_dev_bound(env->prog->aux)) 10431 bpf_prog_offload_remove_insns(env, off, cnt); 10432 10433 err = bpf_remove_insns(env->prog, off, cnt); 10434 if (err) 10435 return err; 10436 10437 err = adjust_subprog_starts_after_remove(env, off, cnt); 10438 if (err) 10439 return err; 10440 10441 err = bpf_adj_linfo_after_remove(env, off, cnt); 10442 if (err) 10443 return err; 10444 10445 memmove(aux_data + off, aux_data + off + cnt, 10446 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 10447 10448 return 0; 10449 } 10450 10451 /* The verifier does more data flow analysis than llvm and will not 10452 * explore branches that are dead at run time. Malicious programs can 10453 * have dead code too. Therefore replace all dead at-run-time code 10454 * with 'ja -1'. 10455 * 10456 * Just nops are not optimal, e.g. if they would sit at the end of the 10457 * program and through another bug we would manage to jump there, then 10458 * we'd execute beyond program memory otherwise. Returning exception 10459 * code also wouldn't work since we can have subprogs where the dead 10460 * code could be located. 10461 */ 10462 static void sanitize_dead_code(struct bpf_verifier_env *env) 10463 { 10464 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 10465 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 10466 struct bpf_insn *insn = env->prog->insnsi; 10467 const int insn_cnt = env->prog->len; 10468 int i; 10469 10470 for (i = 0; i < insn_cnt; i++) { 10471 if (aux_data[i].seen) 10472 continue; 10473 memcpy(insn + i, &trap, sizeof(trap)); 10474 } 10475 } 10476 10477 static bool insn_is_cond_jump(u8 code) 10478 { 10479 u8 op; 10480 10481 if (BPF_CLASS(code) == BPF_JMP32) 10482 return true; 10483 10484 if (BPF_CLASS(code) != BPF_JMP) 10485 return false; 10486 10487 op = BPF_OP(code); 10488 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 10489 } 10490 10491 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 10492 { 10493 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 10494 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 10495 struct bpf_insn *insn = env->prog->insnsi; 10496 const int insn_cnt = env->prog->len; 10497 int i; 10498 10499 for (i = 0; i < insn_cnt; i++, insn++) { 10500 if (!insn_is_cond_jump(insn->code)) 10501 continue; 10502 10503 if (!aux_data[i + 1].seen) 10504 ja.off = insn->off; 10505 else if (!aux_data[i + 1 + insn->off].seen) 10506 ja.off = 0; 10507 else 10508 continue; 10509 10510 if (bpf_prog_is_dev_bound(env->prog->aux)) 10511 bpf_prog_offload_replace_insn(env, i, &ja); 10512 10513 memcpy(insn, &ja, sizeof(ja)); 10514 } 10515 } 10516 10517 static int opt_remove_dead_code(struct bpf_verifier_env *env) 10518 { 10519 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 10520 int insn_cnt = env->prog->len; 10521 int i, err; 10522 10523 for (i = 0; i < insn_cnt; i++) { 10524 int j; 10525 10526 j = 0; 10527 while (i + j < insn_cnt && !aux_data[i + j].seen) 10528 j++; 10529 if (!j) 10530 continue; 10531 10532 err = verifier_remove_insns(env, i, j); 10533 if (err) 10534 return err; 10535 insn_cnt = env->prog->len; 10536 } 10537 10538 return 0; 10539 } 10540 10541 static int opt_remove_nops(struct bpf_verifier_env *env) 10542 { 10543 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 10544 struct bpf_insn *insn = env->prog->insnsi; 10545 int insn_cnt = env->prog->len; 10546 int i, err; 10547 10548 for (i = 0; i < insn_cnt; i++) { 10549 if (memcmp(&insn[i], &ja, sizeof(ja))) 10550 continue; 10551 10552 err = verifier_remove_insns(env, i, 1); 10553 if (err) 10554 return err; 10555 insn_cnt--; 10556 i--; 10557 } 10558 10559 return 0; 10560 } 10561 10562 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 10563 const union bpf_attr *attr) 10564 { 10565 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 10566 struct bpf_insn_aux_data *aux = env->insn_aux_data; 10567 int i, patch_len, delta = 0, len = env->prog->len; 10568 struct bpf_insn *insns = env->prog->insnsi; 10569 struct bpf_prog *new_prog; 10570 bool rnd_hi32; 10571 10572 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 10573 zext_patch[1] = BPF_ZEXT_REG(0); 10574 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 10575 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 10576 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 10577 for (i = 0; i < len; i++) { 10578 int adj_idx = i + delta; 10579 struct bpf_insn insn; 10580 10581 insn = insns[adj_idx]; 10582 if (!aux[adj_idx].zext_dst) { 10583 u8 code, class; 10584 u32 imm_rnd; 10585 10586 if (!rnd_hi32) 10587 continue; 10588 10589 code = insn.code; 10590 class = BPF_CLASS(code); 10591 if (insn_no_def(&insn)) 10592 continue; 10593 10594 /* NOTE: arg "reg" (the fourth one) is only used for 10595 * BPF_STX which has been ruled out in above 10596 * check, it is safe to pass NULL here. 10597 */ 10598 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { 10599 if (class == BPF_LD && 10600 BPF_MODE(code) == BPF_IMM) 10601 i++; 10602 continue; 10603 } 10604 10605 /* ctx load could be transformed into wider load. */ 10606 if (class == BPF_LDX && 10607 aux[adj_idx].ptr_type == PTR_TO_CTX) 10608 continue; 10609 10610 imm_rnd = get_random_int(); 10611 rnd_hi32_patch[0] = insn; 10612 rnd_hi32_patch[1].imm = imm_rnd; 10613 rnd_hi32_patch[3].dst_reg = insn.dst_reg; 10614 patch = rnd_hi32_patch; 10615 patch_len = 4; 10616 goto apply_patch_buffer; 10617 } 10618 10619 if (!bpf_jit_needs_zext()) 10620 continue; 10621 10622 zext_patch[0] = insn; 10623 zext_patch[1].dst_reg = insn.dst_reg; 10624 zext_patch[1].src_reg = insn.dst_reg; 10625 patch = zext_patch; 10626 patch_len = 2; 10627 apply_patch_buffer: 10628 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 10629 if (!new_prog) 10630 return -ENOMEM; 10631 env->prog = new_prog; 10632 insns = new_prog->insnsi; 10633 aux = env->insn_aux_data; 10634 delta += patch_len - 1; 10635 } 10636 10637 return 0; 10638 } 10639 10640 /* convert load instructions that access fields of a context type into a 10641 * sequence of instructions that access fields of the underlying structure: 10642 * struct __sk_buff -> struct sk_buff 10643 * struct bpf_sock_ops -> struct sock 10644 */ 10645 static int convert_ctx_accesses(struct bpf_verifier_env *env) 10646 { 10647 const struct bpf_verifier_ops *ops = env->ops; 10648 int i, cnt, size, ctx_field_size, delta = 0; 10649 const int insn_cnt = env->prog->len; 10650 struct bpf_insn insn_buf[16], *insn; 10651 u32 target_size, size_default, off; 10652 struct bpf_prog *new_prog; 10653 enum bpf_access_type type; 10654 bool is_narrower_load; 10655 10656 if (ops->gen_prologue || env->seen_direct_write) { 10657 if (!ops->gen_prologue) { 10658 verbose(env, "bpf verifier is misconfigured\n"); 10659 return -EINVAL; 10660 } 10661 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 10662 env->prog); 10663 if (cnt >= ARRAY_SIZE(insn_buf)) { 10664 verbose(env, "bpf verifier is misconfigured\n"); 10665 return -EINVAL; 10666 } else if (cnt) { 10667 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 10668 if (!new_prog) 10669 return -ENOMEM; 10670 10671 env->prog = new_prog; 10672 delta += cnt - 1; 10673 } 10674 } 10675 10676 if (bpf_prog_is_dev_bound(env->prog->aux)) 10677 return 0; 10678 10679 insn = env->prog->insnsi + delta; 10680 10681 for (i = 0; i < insn_cnt; i++, insn++) { 10682 bpf_convert_ctx_access_t convert_ctx_access; 10683 10684 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 10685 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 10686 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 10687 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) 10688 type = BPF_READ; 10689 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 10690 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 10691 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 10692 insn->code == (BPF_STX | BPF_MEM | BPF_DW)) 10693 type = BPF_WRITE; 10694 else 10695 continue; 10696 10697 if (type == BPF_WRITE && 10698 env->insn_aux_data[i + delta].sanitize_stack_off) { 10699 struct bpf_insn patch[] = { 10700 /* Sanitize suspicious stack slot with zero. 10701 * There are no memory dependencies for this store, 10702 * since it's only using frame pointer and immediate 10703 * constant of zero 10704 */ 10705 BPF_ST_MEM(BPF_DW, BPF_REG_FP, 10706 env->insn_aux_data[i + delta].sanitize_stack_off, 10707 0), 10708 /* the original STX instruction will immediately 10709 * overwrite the same stack slot with appropriate value 10710 */ 10711 *insn, 10712 }; 10713 10714 cnt = ARRAY_SIZE(patch); 10715 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 10716 if (!new_prog) 10717 return -ENOMEM; 10718 10719 delta += cnt - 1; 10720 env->prog = new_prog; 10721 insn = new_prog->insnsi + i + delta; 10722 continue; 10723 } 10724 10725 switch (env->insn_aux_data[i + delta].ptr_type) { 10726 case PTR_TO_CTX: 10727 if (!ops->convert_ctx_access) 10728 continue; 10729 convert_ctx_access = ops->convert_ctx_access; 10730 break; 10731 case PTR_TO_SOCKET: 10732 case PTR_TO_SOCK_COMMON: 10733 convert_ctx_access = bpf_sock_convert_ctx_access; 10734 break; 10735 case PTR_TO_TCP_SOCK: 10736 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 10737 break; 10738 case PTR_TO_XDP_SOCK: 10739 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 10740 break; 10741 case PTR_TO_BTF_ID: 10742 if (type == BPF_READ) { 10743 insn->code = BPF_LDX | BPF_PROBE_MEM | 10744 BPF_SIZE((insn)->code); 10745 env->prog->aux->num_exentries++; 10746 } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) { 10747 verbose(env, "Writes through BTF pointers are not allowed\n"); 10748 return -EINVAL; 10749 } 10750 continue; 10751 default: 10752 continue; 10753 } 10754 10755 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 10756 size = BPF_LDST_BYTES(insn); 10757 10758 /* If the read access is a narrower load of the field, 10759 * convert to a 4/8-byte load, to minimum program type specific 10760 * convert_ctx_access changes. If conversion is successful, 10761 * we will apply proper mask to the result. 10762 */ 10763 is_narrower_load = size < ctx_field_size; 10764 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 10765 off = insn->off; 10766 if (is_narrower_load) { 10767 u8 size_code; 10768 10769 if (type == BPF_WRITE) { 10770 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 10771 return -EINVAL; 10772 } 10773 10774 size_code = BPF_H; 10775 if (ctx_field_size == 4) 10776 size_code = BPF_W; 10777 else if (ctx_field_size == 8) 10778 size_code = BPF_DW; 10779 10780 insn->off = off & ~(size_default - 1); 10781 insn->code = BPF_LDX | BPF_MEM | size_code; 10782 } 10783 10784 target_size = 0; 10785 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 10786 &target_size); 10787 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 10788 (ctx_field_size && !target_size)) { 10789 verbose(env, "bpf verifier is misconfigured\n"); 10790 return -EINVAL; 10791 } 10792 10793 if (is_narrower_load && size < target_size) { 10794 u8 shift = bpf_ctx_narrow_access_offset( 10795 off, size, size_default) * 8; 10796 if (ctx_field_size <= 4) { 10797 if (shift) 10798 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 10799 insn->dst_reg, 10800 shift); 10801 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 10802 (1 << size * 8) - 1); 10803 } else { 10804 if (shift) 10805 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 10806 insn->dst_reg, 10807 shift); 10808 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, 10809 (1ULL << size * 8) - 1); 10810 } 10811 } 10812 10813 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 10814 if (!new_prog) 10815 return -ENOMEM; 10816 10817 delta += cnt - 1; 10818 10819 /* keep walking new program and skip insns we just inserted */ 10820 env->prog = new_prog; 10821 insn = new_prog->insnsi + i + delta; 10822 } 10823 10824 return 0; 10825 } 10826 10827 static int jit_subprogs(struct bpf_verifier_env *env) 10828 { 10829 struct bpf_prog *prog = env->prog, **func, *tmp; 10830 int i, j, subprog_start, subprog_end = 0, len, subprog; 10831 struct bpf_map *map_ptr; 10832 struct bpf_insn *insn; 10833 void *old_bpf_func; 10834 int err, num_exentries; 10835 10836 if (env->subprog_cnt <= 1) 10837 return 0; 10838 10839 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 10840 if (insn->code != (BPF_JMP | BPF_CALL) || 10841 insn->src_reg != BPF_PSEUDO_CALL) 10842 continue; 10843 /* Upon error here we cannot fall back to interpreter but 10844 * need a hard reject of the program. Thus -EFAULT is 10845 * propagated in any case. 10846 */ 10847 subprog = find_subprog(env, i + insn->imm + 1); 10848 if (subprog < 0) { 10849 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 10850 i + insn->imm + 1); 10851 return -EFAULT; 10852 } 10853 /* temporarily remember subprog id inside insn instead of 10854 * aux_data, since next loop will split up all insns into funcs 10855 */ 10856 insn->off = subprog; 10857 /* remember original imm in case JIT fails and fallback 10858 * to interpreter will be needed 10859 */ 10860 env->insn_aux_data[i].call_imm = insn->imm; 10861 /* point imm to __bpf_call_base+1 from JITs point of view */ 10862 insn->imm = 1; 10863 } 10864 10865 err = bpf_prog_alloc_jited_linfo(prog); 10866 if (err) 10867 goto out_undo_insn; 10868 10869 err = -ENOMEM; 10870 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 10871 if (!func) 10872 goto out_undo_insn; 10873 10874 for (i = 0; i < env->subprog_cnt; i++) { 10875 subprog_start = subprog_end; 10876 subprog_end = env->subprog_info[i + 1].start; 10877 10878 len = subprog_end - subprog_start; 10879 /* BPF_PROG_RUN doesn't call subprogs directly, 10880 * hence main prog stats include the runtime of subprogs. 10881 * subprogs don't have IDs and not reachable via prog_get_next_id 10882 * func[i]->aux->stats will never be accessed and stays NULL 10883 */ 10884 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 10885 if (!func[i]) 10886 goto out_free; 10887 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 10888 len * sizeof(struct bpf_insn)); 10889 func[i]->type = prog->type; 10890 func[i]->len = len; 10891 if (bpf_prog_calc_tag(func[i])) 10892 goto out_free; 10893 func[i]->is_func = 1; 10894 func[i]->aux->func_idx = i; 10895 /* the btf and func_info will be freed only at prog->aux */ 10896 func[i]->aux->btf = prog->aux->btf; 10897 func[i]->aux->func_info = prog->aux->func_info; 10898 10899 for (j = 0; j < prog->aux->size_poke_tab; j++) { 10900 u32 insn_idx = prog->aux->poke_tab[j].insn_idx; 10901 int ret; 10902 10903 if (!(insn_idx >= subprog_start && 10904 insn_idx <= subprog_end)) 10905 continue; 10906 10907 ret = bpf_jit_add_poke_descriptor(func[i], 10908 &prog->aux->poke_tab[j]); 10909 if (ret < 0) { 10910 verbose(env, "adding tail call poke descriptor failed\n"); 10911 goto out_free; 10912 } 10913 10914 func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1; 10915 10916 map_ptr = func[i]->aux->poke_tab[ret].tail_call.map; 10917 ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux); 10918 if (ret < 0) { 10919 verbose(env, "tracking tail call prog failed\n"); 10920 goto out_free; 10921 } 10922 } 10923 10924 /* Use bpf_prog_F_tag to indicate functions in stack traces. 10925 * Long term would need debug info to populate names 10926 */ 10927 func[i]->aux->name[0] = 'F'; 10928 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 10929 func[i]->jit_requested = 1; 10930 func[i]->aux->linfo = prog->aux->linfo; 10931 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 10932 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 10933 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 10934 num_exentries = 0; 10935 insn = func[i]->insnsi; 10936 for (j = 0; j < func[i]->len; j++, insn++) { 10937 if (BPF_CLASS(insn->code) == BPF_LDX && 10938 BPF_MODE(insn->code) == BPF_PROBE_MEM) 10939 num_exentries++; 10940 } 10941 func[i]->aux->num_exentries = num_exentries; 10942 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; 10943 func[i] = bpf_int_jit_compile(func[i]); 10944 if (!func[i]->jited) { 10945 err = -ENOTSUPP; 10946 goto out_free; 10947 } 10948 cond_resched(); 10949 } 10950 10951 /* Untrack main program's aux structs so that during map_poke_run() 10952 * we will not stumble upon the unfilled poke descriptors; each 10953 * of the main program's poke descs got distributed across subprogs 10954 * and got tracked onto map, so we are sure that none of them will 10955 * be missed after the operation below 10956 */ 10957 for (i = 0; i < prog->aux->size_poke_tab; i++) { 10958 map_ptr = prog->aux->poke_tab[i].tail_call.map; 10959 10960 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); 10961 } 10962 10963 /* at this point all bpf functions were successfully JITed 10964 * now populate all bpf_calls with correct addresses and 10965 * run last pass of JIT 10966 */ 10967 for (i = 0; i < env->subprog_cnt; i++) { 10968 insn = func[i]->insnsi; 10969 for (j = 0; j < func[i]->len; j++, insn++) { 10970 if (insn->code != (BPF_JMP | BPF_CALL) || 10971 insn->src_reg != BPF_PSEUDO_CALL) 10972 continue; 10973 subprog = insn->off; 10974 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - 10975 __bpf_call_base; 10976 } 10977 10978 /* we use the aux data to keep a list of the start addresses 10979 * of the JITed images for each function in the program 10980 * 10981 * for some architectures, such as powerpc64, the imm field 10982 * might not be large enough to hold the offset of the start 10983 * address of the callee's JITed image from __bpf_call_base 10984 * 10985 * in such cases, we can lookup the start address of a callee 10986 * by using its subprog id, available from the off field of 10987 * the call instruction, as an index for this list 10988 */ 10989 func[i]->aux->func = func; 10990 func[i]->aux->func_cnt = env->subprog_cnt; 10991 } 10992 for (i = 0; i < env->subprog_cnt; i++) { 10993 old_bpf_func = func[i]->bpf_func; 10994 tmp = bpf_int_jit_compile(func[i]); 10995 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 10996 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 10997 err = -ENOTSUPP; 10998 goto out_free; 10999 } 11000 cond_resched(); 11001 } 11002 11003 /* finally lock prog and jit images for all functions and 11004 * populate kallsysm 11005 */ 11006 for (i = 0; i < env->subprog_cnt; i++) { 11007 bpf_prog_lock_ro(func[i]); 11008 bpf_prog_kallsyms_add(func[i]); 11009 } 11010 11011 /* Last step: make now unused interpreter insns from main 11012 * prog consistent for later dump requests, so they can 11013 * later look the same as if they were interpreted only. 11014 */ 11015 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 11016 if (insn->code != (BPF_JMP | BPF_CALL) || 11017 insn->src_reg != BPF_PSEUDO_CALL) 11018 continue; 11019 insn->off = env->insn_aux_data[i].call_imm; 11020 subprog = find_subprog(env, i + insn->off + 1); 11021 insn->imm = subprog; 11022 } 11023 11024 prog->jited = 1; 11025 prog->bpf_func = func[0]->bpf_func; 11026 prog->aux->func = func; 11027 prog->aux->func_cnt = env->subprog_cnt; 11028 bpf_prog_free_unused_jited_linfo(prog); 11029 return 0; 11030 out_free: 11031 for (i = 0; i < env->subprog_cnt; i++) { 11032 if (!func[i]) 11033 continue; 11034 11035 for (j = 0; j < func[i]->aux->size_poke_tab; j++) { 11036 map_ptr = func[i]->aux->poke_tab[j].tail_call.map; 11037 map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux); 11038 } 11039 bpf_jit_free(func[i]); 11040 } 11041 kfree(func); 11042 out_undo_insn: 11043 /* cleanup main prog to be interpreted */ 11044 prog->jit_requested = 0; 11045 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 11046 if (insn->code != (BPF_JMP | BPF_CALL) || 11047 insn->src_reg != BPF_PSEUDO_CALL) 11048 continue; 11049 insn->off = 0; 11050 insn->imm = env->insn_aux_data[i].call_imm; 11051 } 11052 bpf_prog_free_jited_linfo(prog); 11053 return err; 11054 } 11055 11056 static int fixup_call_args(struct bpf_verifier_env *env) 11057 { 11058 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 11059 struct bpf_prog *prog = env->prog; 11060 struct bpf_insn *insn = prog->insnsi; 11061 int i, depth; 11062 #endif 11063 int err = 0; 11064 11065 if (env->prog->jit_requested && 11066 !bpf_prog_is_dev_bound(env->prog->aux)) { 11067 err = jit_subprogs(env); 11068 if (err == 0) 11069 return 0; 11070 if (err == -EFAULT) 11071 return err; 11072 } 11073 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 11074 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { 11075 /* When JIT fails the progs with bpf2bpf calls and tail_calls 11076 * have to be rejected, since interpreter doesn't support them yet. 11077 */ 11078 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 11079 return -EINVAL; 11080 } 11081 for (i = 0; i < prog->len; i++, insn++) { 11082 if (insn->code != (BPF_JMP | BPF_CALL) || 11083 insn->src_reg != BPF_PSEUDO_CALL) 11084 continue; 11085 depth = get_callee_stack_depth(env, insn, i); 11086 if (depth < 0) 11087 return depth; 11088 bpf_patch_call_args(insn, depth); 11089 } 11090 err = 0; 11091 #endif 11092 return err; 11093 } 11094 11095 /* fixup insn->imm field of bpf_call instructions 11096 * and inline eligible helpers as explicit sequence of BPF instructions 11097 * 11098 * this function is called after eBPF program passed verification 11099 */ 11100 static int fixup_bpf_calls(struct bpf_verifier_env *env) 11101 { 11102 struct bpf_prog *prog = env->prog; 11103 bool expect_blinding = bpf_jit_blinding_enabled(prog); 11104 struct bpf_insn *insn = prog->insnsi; 11105 const struct bpf_func_proto *fn; 11106 const int insn_cnt = prog->len; 11107 const struct bpf_map_ops *ops; 11108 struct bpf_insn_aux_data *aux; 11109 struct bpf_insn insn_buf[16]; 11110 struct bpf_prog *new_prog; 11111 struct bpf_map *map_ptr; 11112 int i, ret, cnt, delta = 0; 11113 11114 for (i = 0; i < insn_cnt; i++, insn++) { 11115 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 11116 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 11117 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 11118 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 11119 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 11120 struct bpf_insn mask_and_div[] = { 11121 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 11122 /* Rx div 0 -> 0 */ 11123 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), 11124 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 11125 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 11126 *insn, 11127 }; 11128 struct bpf_insn mask_and_mod[] = { 11129 BPF_MOV32_REG(insn->src_reg, insn->src_reg), 11130 /* Rx mod 0 -> Rx */ 11131 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), 11132 *insn, 11133 }; 11134 struct bpf_insn *patchlet; 11135 11136 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 11137 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 11138 patchlet = mask_and_div + (is64 ? 1 : 0); 11139 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); 11140 } else { 11141 patchlet = mask_and_mod + (is64 ? 1 : 0); 11142 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); 11143 } 11144 11145 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 11146 if (!new_prog) 11147 return -ENOMEM; 11148 11149 delta += cnt - 1; 11150 env->prog = prog = new_prog; 11151 insn = new_prog->insnsi + i + delta; 11152 continue; 11153 } 11154 11155 if (BPF_CLASS(insn->code) == BPF_LD && 11156 (BPF_MODE(insn->code) == BPF_ABS || 11157 BPF_MODE(insn->code) == BPF_IND)) { 11158 cnt = env->ops->gen_ld_abs(insn, insn_buf); 11159 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 11160 verbose(env, "bpf verifier is misconfigured\n"); 11161 return -EINVAL; 11162 } 11163 11164 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 11165 if (!new_prog) 11166 return -ENOMEM; 11167 11168 delta += cnt - 1; 11169 env->prog = prog = new_prog; 11170 insn = new_prog->insnsi + i + delta; 11171 continue; 11172 } 11173 11174 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 11175 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 11176 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 11177 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 11178 struct bpf_insn insn_buf[16]; 11179 struct bpf_insn *patch = &insn_buf[0]; 11180 bool issrc, isneg; 11181 u32 off_reg; 11182 11183 aux = &env->insn_aux_data[i + delta]; 11184 if (!aux->alu_state || 11185 aux->alu_state == BPF_ALU_NON_POINTER) 11186 continue; 11187 11188 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 11189 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 11190 BPF_ALU_SANITIZE_SRC; 11191 11192 off_reg = issrc ? insn->src_reg : insn->dst_reg; 11193 if (isneg) 11194 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 11195 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); 11196 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 11197 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 11198 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 11199 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 11200 if (issrc) { 11201 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, 11202 off_reg); 11203 insn->src_reg = BPF_REG_AX; 11204 } else { 11205 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, 11206 BPF_REG_AX); 11207 } 11208 if (isneg) 11209 insn->code = insn->code == code_add ? 11210 code_sub : code_add; 11211 *patch++ = *insn; 11212 if (issrc && isneg) 11213 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 11214 cnt = patch - insn_buf; 11215 11216 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 11217 if (!new_prog) 11218 return -ENOMEM; 11219 11220 delta += cnt - 1; 11221 env->prog = prog = new_prog; 11222 insn = new_prog->insnsi + i + delta; 11223 continue; 11224 } 11225 11226 if (insn->code != (BPF_JMP | BPF_CALL)) 11227 continue; 11228 if (insn->src_reg == BPF_PSEUDO_CALL) 11229 continue; 11230 11231 if (insn->imm == BPF_FUNC_get_route_realm) 11232 prog->dst_needed = 1; 11233 if (insn->imm == BPF_FUNC_get_prandom_u32) 11234 bpf_user_rnd_init_once(); 11235 if (insn->imm == BPF_FUNC_override_return) 11236 prog->kprobe_override = 1; 11237 if (insn->imm == BPF_FUNC_tail_call) { 11238 /* If we tail call into other programs, we 11239 * cannot make any assumptions since they can 11240 * be replaced dynamically during runtime in 11241 * the program array. 11242 */ 11243 prog->cb_access = 1; 11244 if (!allow_tail_call_in_subprogs(env)) 11245 prog->aux->stack_depth = MAX_BPF_STACK; 11246 prog->aux->max_pkt_offset = MAX_PACKET_OFF; 11247 11248 /* mark bpf_tail_call as different opcode to avoid 11249 * conditional branch in the interpeter for every normal 11250 * call and to prevent accidental JITing by JIT compiler 11251 * that doesn't support bpf_tail_call yet 11252 */ 11253 insn->imm = 0; 11254 insn->code = BPF_JMP | BPF_TAIL_CALL; 11255 11256 aux = &env->insn_aux_data[i + delta]; 11257 if (env->bpf_capable && !expect_blinding && 11258 prog->jit_requested && 11259 !bpf_map_key_poisoned(aux) && 11260 !bpf_map_ptr_poisoned(aux) && 11261 !bpf_map_ptr_unpriv(aux)) { 11262 struct bpf_jit_poke_descriptor desc = { 11263 .reason = BPF_POKE_REASON_TAIL_CALL, 11264 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 11265 .tail_call.key = bpf_map_key_immediate(aux), 11266 .insn_idx = i + delta, 11267 }; 11268 11269 ret = bpf_jit_add_poke_descriptor(prog, &desc); 11270 if (ret < 0) { 11271 verbose(env, "adding tail call poke descriptor failed\n"); 11272 return ret; 11273 } 11274 11275 insn->imm = ret + 1; 11276 continue; 11277 } 11278 11279 if (!bpf_map_ptr_unpriv(aux)) 11280 continue; 11281 11282 /* instead of changing every JIT dealing with tail_call 11283 * emit two extra insns: 11284 * if (index >= max_entries) goto out; 11285 * index &= array->index_mask; 11286 * to avoid out-of-bounds cpu speculation 11287 */ 11288 if (bpf_map_ptr_poisoned(aux)) { 11289 verbose(env, "tail_call abusing map_ptr\n"); 11290 return -EINVAL; 11291 } 11292 11293 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 11294 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 11295 map_ptr->max_entries, 2); 11296 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 11297 container_of(map_ptr, 11298 struct bpf_array, 11299 map)->index_mask); 11300 insn_buf[2] = *insn; 11301 cnt = 3; 11302 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 11303 if (!new_prog) 11304 return -ENOMEM; 11305 11306 delta += cnt - 1; 11307 env->prog = prog = new_prog; 11308 insn = new_prog->insnsi + i + delta; 11309 continue; 11310 } 11311 11312 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 11313 * and other inlining handlers are currently limited to 64 bit 11314 * only. 11315 */ 11316 if (prog->jit_requested && BITS_PER_LONG == 64 && 11317 (insn->imm == BPF_FUNC_map_lookup_elem || 11318 insn->imm == BPF_FUNC_map_update_elem || 11319 insn->imm == BPF_FUNC_map_delete_elem || 11320 insn->imm == BPF_FUNC_map_push_elem || 11321 insn->imm == BPF_FUNC_map_pop_elem || 11322 insn->imm == BPF_FUNC_map_peek_elem)) { 11323 aux = &env->insn_aux_data[i + delta]; 11324 if (bpf_map_ptr_poisoned(aux)) 11325 goto patch_call_imm; 11326 11327 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 11328 ops = map_ptr->ops; 11329 if (insn->imm == BPF_FUNC_map_lookup_elem && 11330 ops->map_gen_lookup) { 11331 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 11332 if (cnt == -EOPNOTSUPP) 11333 goto patch_map_ops_generic; 11334 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { 11335 verbose(env, "bpf verifier is misconfigured\n"); 11336 return -EINVAL; 11337 } 11338 11339 new_prog = bpf_patch_insn_data(env, i + delta, 11340 insn_buf, cnt); 11341 if (!new_prog) 11342 return -ENOMEM; 11343 11344 delta += cnt - 1; 11345 env->prog = prog = new_prog; 11346 insn = new_prog->insnsi + i + delta; 11347 continue; 11348 } 11349 11350 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 11351 (void *(*)(struct bpf_map *map, void *key))NULL)); 11352 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 11353 (int (*)(struct bpf_map *map, void *key))NULL)); 11354 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 11355 (int (*)(struct bpf_map *map, void *key, void *value, 11356 u64 flags))NULL)); 11357 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 11358 (int (*)(struct bpf_map *map, void *value, 11359 u64 flags))NULL)); 11360 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 11361 (int (*)(struct bpf_map *map, void *value))NULL)); 11362 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 11363 (int (*)(struct bpf_map *map, void *value))NULL)); 11364 patch_map_ops_generic: 11365 switch (insn->imm) { 11366 case BPF_FUNC_map_lookup_elem: 11367 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - 11368 __bpf_call_base; 11369 continue; 11370 case BPF_FUNC_map_update_elem: 11371 insn->imm = BPF_CAST_CALL(ops->map_update_elem) - 11372 __bpf_call_base; 11373 continue; 11374 case BPF_FUNC_map_delete_elem: 11375 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - 11376 __bpf_call_base; 11377 continue; 11378 case BPF_FUNC_map_push_elem: 11379 insn->imm = BPF_CAST_CALL(ops->map_push_elem) - 11380 __bpf_call_base; 11381 continue; 11382 case BPF_FUNC_map_pop_elem: 11383 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - 11384 __bpf_call_base; 11385 continue; 11386 case BPF_FUNC_map_peek_elem: 11387 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - 11388 __bpf_call_base; 11389 continue; 11390 } 11391 11392 goto patch_call_imm; 11393 } 11394 11395 if (prog->jit_requested && BITS_PER_LONG == 64 && 11396 insn->imm == BPF_FUNC_jiffies64) { 11397 struct bpf_insn ld_jiffies_addr[2] = { 11398 BPF_LD_IMM64(BPF_REG_0, 11399 (unsigned long)&jiffies), 11400 }; 11401 11402 insn_buf[0] = ld_jiffies_addr[0]; 11403 insn_buf[1] = ld_jiffies_addr[1]; 11404 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 11405 BPF_REG_0, 0); 11406 cnt = 3; 11407 11408 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 11409 cnt); 11410 if (!new_prog) 11411 return -ENOMEM; 11412 11413 delta += cnt - 1; 11414 env->prog = prog = new_prog; 11415 insn = new_prog->insnsi + i + delta; 11416 continue; 11417 } 11418 11419 patch_call_imm: 11420 fn = env->ops->get_func_proto(insn->imm, env->prog); 11421 /* all functions that have prototype and verifier allowed 11422 * programs to call them, must be real in-kernel functions 11423 */ 11424 if (!fn->func) { 11425 verbose(env, 11426 "kernel subsystem misconfigured func %s#%d\n", 11427 func_id_name(insn->imm), insn->imm); 11428 return -EFAULT; 11429 } 11430 insn->imm = fn->func - __bpf_call_base; 11431 } 11432 11433 /* Since poke tab is now finalized, publish aux to tracker. */ 11434 for (i = 0; i < prog->aux->size_poke_tab; i++) { 11435 map_ptr = prog->aux->poke_tab[i].tail_call.map; 11436 if (!map_ptr->ops->map_poke_track || 11437 !map_ptr->ops->map_poke_untrack || 11438 !map_ptr->ops->map_poke_run) { 11439 verbose(env, "bpf verifier is misconfigured\n"); 11440 return -EINVAL; 11441 } 11442 11443 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 11444 if (ret < 0) { 11445 verbose(env, "tracking tail call prog failed\n"); 11446 return ret; 11447 } 11448 } 11449 11450 return 0; 11451 } 11452 11453 static void free_states(struct bpf_verifier_env *env) 11454 { 11455 struct bpf_verifier_state_list *sl, *sln; 11456 int i; 11457 11458 sl = env->free_list; 11459 while (sl) { 11460 sln = sl->next; 11461 free_verifier_state(&sl->state, false); 11462 kfree(sl); 11463 sl = sln; 11464 } 11465 env->free_list = NULL; 11466 11467 if (!env->explored_states) 11468 return; 11469 11470 for (i = 0; i < state_htab_size(env); i++) { 11471 sl = env->explored_states[i]; 11472 11473 while (sl) { 11474 sln = sl->next; 11475 free_verifier_state(&sl->state, false); 11476 kfree(sl); 11477 sl = sln; 11478 } 11479 env->explored_states[i] = NULL; 11480 } 11481 } 11482 11483 /* The verifier is using insn_aux_data[] to store temporary data during 11484 * verification and to store information for passes that run after the 11485 * verification like dead code sanitization. do_check_common() for subprogram N 11486 * may analyze many other subprograms. sanitize_insn_aux_data() clears all 11487 * temporary data after do_check_common() finds that subprogram N cannot be 11488 * verified independently. pass_cnt counts the number of times 11489 * do_check_common() was run and insn->aux->seen tells the pass number 11490 * insn_aux_data was touched. These variables are compared to clear temporary 11491 * data from failed pass. For testing and experiments do_check_common() can be 11492 * run multiple times even when prior attempt to verify is unsuccessful. 11493 */ 11494 static void sanitize_insn_aux_data(struct bpf_verifier_env *env) 11495 { 11496 struct bpf_insn *insn = env->prog->insnsi; 11497 struct bpf_insn_aux_data *aux; 11498 int i, class; 11499 11500 for (i = 0; i < env->prog->len; i++) { 11501 class = BPF_CLASS(insn[i].code); 11502 if (class != BPF_LDX && class != BPF_STX) 11503 continue; 11504 aux = &env->insn_aux_data[i]; 11505 if (aux->seen != env->pass_cnt) 11506 continue; 11507 memset(aux, 0, offsetof(typeof(*aux), orig_idx)); 11508 } 11509 } 11510 11511 static int do_check_common(struct bpf_verifier_env *env, int subprog) 11512 { 11513 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 11514 struct bpf_verifier_state *state; 11515 struct bpf_reg_state *regs; 11516 int ret, i; 11517 11518 env->prev_linfo = NULL; 11519 env->pass_cnt++; 11520 11521 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 11522 if (!state) 11523 return -ENOMEM; 11524 state->curframe = 0; 11525 state->speculative = false; 11526 state->branches = 1; 11527 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 11528 if (!state->frame[0]) { 11529 kfree(state); 11530 return -ENOMEM; 11531 } 11532 env->cur_state = state; 11533 init_func_state(env, state->frame[0], 11534 BPF_MAIN_FUNC /* callsite */, 11535 0 /* frameno */, 11536 subprog); 11537 11538 regs = state->frame[state->curframe]->regs; 11539 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 11540 ret = btf_prepare_func_args(env, subprog, regs); 11541 if (ret) 11542 goto out; 11543 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 11544 if (regs[i].type == PTR_TO_CTX) 11545 mark_reg_known_zero(env, regs, i); 11546 else if (regs[i].type == SCALAR_VALUE) 11547 mark_reg_unknown(env, regs, i); 11548 } 11549 } else { 11550 /* 1st arg to a function */ 11551 regs[BPF_REG_1].type = PTR_TO_CTX; 11552 mark_reg_known_zero(env, regs, BPF_REG_1); 11553 ret = btf_check_func_arg_match(env, subprog, regs); 11554 if (ret == -EFAULT) 11555 /* unlikely verifier bug. abort. 11556 * ret == 0 and ret < 0 are sadly acceptable for 11557 * main() function due to backward compatibility. 11558 * Like socket filter program may be written as: 11559 * int bpf_prog(struct pt_regs *ctx) 11560 * and never dereference that ctx in the program. 11561 * 'struct pt_regs' is a type mismatch for socket 11562 * filter that should be using 'struct __sk_buff'. 11563 */ 11564 goto out; 11565 } 11566 11567 ret = do_check(env); 11568 out: 11569 /* check for NULL is necessary, since cur_state can be freed inside 11570 * do_check() under memory pressure. 11571 */ 11572 if (env->cur_state) { 11573 free_verifier_state(env->cur_state, true); 11574 env->cur_state = NULL; 11575 } 11576 while (!pop_stack(env, NULL, NULL, false)); 11577 if (!ret && pop_log) 11578 bpf_vlog_reset(&env->log, 0); 11579 free_states(env); 11580 if (ret) 11581 /* clean aux data in case subprog was rejected */ 11582 sanitize_insn_aux_data(env); 11583 return ret; 11584 } 11585 11586 /* Verify all global functions in a BPF program one by one based on their BTF. 11587 * All global functions must pass verification. Otherwise the whole program is rejected. 11588 * Consider: 11589 * int bar(int); 11590 * int foo(int f) 11591 * { 11592 * return bar(f); 11593 * } 11594 * int bar(int b) 11595 * { 11596 * ... 11597 * } 11598 * foo() will be verified first for R1=any_scalar_value. During verification it 11599 * will be assumed that bar() already verified successfully and call to bar() 11600 * from foo() will be checked for type match only. Later bar() will be verified 11601 * independently to check that it's safe for R1=any_scalar_value. 11602 */ 11603 static int do_check_subprogs(struct bpf_verifier_env *env) 11604 { 11605 struct bpf_prog_aux *aux = env->prog->aux; 11606 int i, ret; 11607 11608 if (!aux->func_info) 11609 return 0; 11610 11611 for (i = 1; i < env->subprog_cnt; i++) { 11612 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 11613 continue; 11614 env->insn_idx = env->subprog_info[i].start; 11615 WARN_ON_ONCE(env->insn_idx == 0); 11616 ret = do_check_common(env, i); 11617 if (ret) { 11618 return ret; 11619 } else if (env->log.level & BPF_LOG_LEVEL) { 11620 verbose(env, 11621 "Func#%d is safe for any args that match its prototype\n", 11622 i); 11623 } 11624 } 11625 return 0; 11626 } 11627 11628 static int do_check_main(struct bpf_verifier_env *env) 11629 { 11630 int ret; 11631 11632 env->insn_idx = 0; 11633 ret = do_check_common(env, 0); 11634 if (!ret) 11635 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 11636 return ret; 11637 } 11638 11639 11640 static void print_verification_stats(struct bpf_verifier_env *env) 11641 { 11642 int i; 11643 11644 if (env->log.level & BPF_LOG_STATS) { 11645 verbose(env, "verification time %lld usec\n", 11646 div_u64(env->verification_time, 1000)); 11647 verbose(env, "stack depth "); 11648 for (i = 0; i < env->subprog_cnt; i++) { 11649 u32 depth = env->subprog_info[i].stack_depth; 11650 11651 verbose(env, "%d", depth); 11652 if (i + 1 < env->subprog_cnt) 11653 verbose(env, "+"); 11654 } 11655 verbose(env, "\n"); 11656 } 11657 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 11658 "total_states %d peak_states %d mark_read %d\n", 11659 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 11660 env->max_states_per_insn, env->total_states, 11661 env->peak_states, env->longest_mark_read_walk); 11662 } 11663 11664 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 11665 { 11666 const struct btf_type *t, *func_proto; 11667 const struct bpf_struct_ops *st_ops; 11668 const struct btf_member *member; 11669 struct bpf_prog *prog = env->prog; 11670 u32 btf_id, member_idx; 11671 const char *mname; 11672 11673 btf_id = prog->aux->attach_btf_id; 11674 st_ops = bpf_struct_ops_find(btf_id); 11675 if (!st_ops) { 11676 verbose(env, "attach_btf_id %u is not a supported struct\n", 11677 btf_id); 11678 return -ENOTSUPP; 11679 } 11680 11681 t = st_ops->type; 11682 member_idx = prog->expected_attach_type; 11683 if (member_idx >= btf_type_vlen(t)) { 11684 verbose(env, "attach to invalid member idx %u of struct %s\n", 11685 member_idx, st_ops->name); 11686 return -EINVAL; 11687 } 11688 11689 member = &btf_type_member(t)[member_idx]; 11690 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 11691 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 11692 NULL); 11693 if (!func_proto) { 11694 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 11695 mname, member_idx, st_ops->name); 11696 return -EINVAL; 11697 } 11698 11699 if (st_ops->check_member) { 11700 int err = st_ops->check_member(t, member); 11701 11702 if (err) { 11703 verbose(env, "attach to unsupported member %s of struct %s\n", 11704 mname, st_ops->name); 11705 return err; 11706 } 11707 } 11708 11709 prog->aux->attach_func_proto = func_proto; 11710 prog->aux->attach_func_name = mname; 11711 env->ops = st_ops->verifier_ops; 11712 11713 return 0; 11714 } 11715 #define SECURITY_PREFIX "security_" 11716 11717 static int check_attach_modify_return(unsigned long addr, const char *func_name) 11718 { 11719 if (within_error_injection_list(addr) || 11720 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) 11721 return 0; 11722 11723 return -EINVAL; 11724 } 11725 11726 /* list of non-sleepable functions that are otherwise on 11727 * ALLOW_ERROR_INJECTION list 11728 */ 11729 BTF_SET_START(btf_non_sleepable_error_inject) 11730 /* Three functions below can be called from sleepable and non-sleepable context. 11731 * Assume non-sleepable from bpf safety point of view. 11732 */ 11733 BTF_ID(func, __add_to_page_cache_locked) 11734 BTF_ID(func, should_fail_alloc_page) 11735 BTF_ID(func, should_failslab) 11736 BTF_SET_END(btf_non_sleepable_error_inject) 11737 11738 static int check_non_sleepable_error_inject(u32 btf_id) 11739 { 11740 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); 11741 } 11742 11743 int bpf_check_attach_target(struct bpf_verifier_log *log, 11744 const struct bpf_prog *prog, 11745 const struct bpf_prog *tgt_prog, 11746 u32 btf_id, 11747 struct bpf_attach_target_info *tgt_info) 11748 { 11749 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 11750 const char prefix[] = "btf_trace_"; 11751 int ret = 0, subprog = -1, i; 11752 const struct btf_type *t; 11753 bool conservative = true; 11754 const char *tname; 11755 struct btf *btf; 11756 long addr = 0; 11757 11758 if (!btf_id) { 11759 bpf_log(log, "Tracing programs must provide btf_id\n"); 11760 return -EINVAL; 11761 } 11762 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; 11763 if (!btf) { 11764 bpf_log(log, 11765 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 11766 return -EINVAL; 11767 } 11768 t = btf_type_by_id(btf, btf_id); 11769 if (!t) { 11770 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id); 11771 return -EINVAL; 11772 } 11773 tname = btf_name_by_offset(btf, t->name_off); 11774 if (!tname) { 11775 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id); 11776 return -EINVAL; 11777 } 11778 if (tgt_prog) { 11779 struct bpf_prog_aux *aux = tgt_prog->aux; 11780 11781 for (i = 0; i < aux->func_info_cnt; i++) 11782 if (aux->func_info[i].type_id == btf_id) { 11783 subprog = i; 11784 break; 11785 } 11786 if (subprog == -1) { 11787 bpf_log(log, "Subprog %s doesn't exist\n", tname); 11788 return -EINVAL; 11789 } 11790 conservative = aux->func_info_aux[subprog].unreliable; 11791 if (prog_extension) { 11792 if (conservative) { 11793 bpf_log(log, 11794 "Cannot replace static functions\n"); 11795 return -EINVAL; 11796 } 11797 if (!prog->jit_requested) { 11798 bpf_log(log, 11799 "Extension programs should be JITed\n"); 11800 return -EINVAL; 11801 } 11802 } 11803 if (!tgt_prog->jited) { 11804 bpf_log(log, "Can attach to only JITed progs\n"); 11805 return -EINVAL; 11806 } 11807 if (tgt_prog->type == prog->type) { 11808 /* Cannot fentry/fexit another fentry/fexit program. 11809 * Cannot attach program extension to another extension. 11810 * It's ok to attach fentry/fexit to extension program. 11811 */ 11812 bpf_log(log, "Cannot recursively attach\n"); 11813 return -EINVAL; 11814 } 11815 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 11816 prog_extension && 11817 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 11818 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 11819 /* Program extensions can extend all program types 11820 * except fentry/fexit. The reason is the following. 11821 * The fentry/fexit programs are used for performance 11822 * analysis, stats and can be attached to any program 11823 * type except themselves. When extension program is 11824 * replacing XDP function it is necessary to allow 11825 * performance analysis of all functions. Both original 11826 * XDP program and its program extension. Hence 11827 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 11828 * allowed. If extending of fentry/fexit was allowed it 11829 * would be possible to create long call chain 11830 * fentry->extension->fentry->extension beyond 11831 * reasonable stack size. Hence extending fentry is not 11832 * allowed. 11833 */ 11834 bpf_log(log, "Cannot extend fentry/fexit\n"); 11835 return -EINVAL; 11836 } 11837 } else { 11838 if (prog_extension) { 11839 bpf_log(log, "Cannot replace kernel functions\n"); 11840 return -EINVAL; 11841 } 11842 } 11843 11844 switch (prog->expected_attach_type) { 11845 case BPF_TRACE_RAW_TP: 11846 if (tgt_prog) { 11847 bpf_log(log, 11848 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 11849 return -EINVAL; 11850 } 11851 if (!btf_type_is_typedef(t)) { 11852 bpf_log(log, "attach_btf_id %u is not a typedef\n", 11853 btf_id); 11854 return -EINVAL; 11855 } 11856 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 11857 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n", 11858 btf_id, tname); 11859 return -EINVAL; 11860 } 11861 tname += sizeof(prefix) - 1; 11862 t = btf_type_by_id(btf, t->type); 11863 if (!btf_type_is_ptr(t)) 11864 /* should never happen in valid vmlinux build */ 11865 return -EINVAL; 11866 t = btf_type_by_id(btf, t->type); 11867 if (!btf_type_is_func_proto(t)) 11868 /* should never happen in valid vmlinux build */ 11869 return -EINVAL; 11870 11871 break; 11872 case BPF_TRACE_ITER: 11873 if (!btf_type_is_func(t)) { 11874 bpf_log(log, "attach_btf_id %u is not a function\n", 11875 btf_id); 11876 return -EINVAL; 11877 } 11878 t = btf_type_by_id(btf, t->type); 11879 if (!btf_type_is_func_proto(t)) 11880 return -EINVAL; 11881 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 11882 if (ret) 11883 return ret; 11884 break; 11885 default: 11886 if (!prog_extension) 11887 return -EINVAL; 11888 fallthrough; 11889 case BPF_MODIFY_RETURN: 11890 case BPF_LSM_MAC: 11891 case BPF_TRACE_FENTRY: 11892 case BPF_TRACE_FEXIT: 11893 if (!btf_type_is_func(t)) { 11894 bpf_log(log, "attach_btf_id %u is not a function\n", 11895 btf_id); 11896 return -EINVAL; 11897 } 11898 if (prog_extension && 11899 btf_check_type_match(log, prog, btf, t)) 11900 return -EINVAL; 11901 t = btf_type_by_id(btf, t->type); 11902 if (!btf_type_is_func_proto(t)) 11903 return -EINVAL; 11904 11905 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && 11906 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || 11907 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) 11908 return -EINVAL; 11909 11910 if (tgt_prog && conservative) 11911 t = NULL; 11912 11913 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 11914 if (ret < 0) 11915 return ret; 11916 11917 if (tgt_prog) { 11918 if (subprog == 0) 11919 addr = (long) tgt_prog->bpf_func; 11920 else 11921 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 11922 } else { 11923 addr = kallsyms_lookup_name(tname); 11924 if (!addr) { 11925 bpf_log(log, 11926 "The address of function %s cannot be found\n", 11927 tname); 11928 return -ENOENT; 11929 } 11930 } 11931 11932 if (prog->aux->sleepable) { 11933 ret = -EINVAL; 11934 switch (prog->type) { 11935 case BPF_PROG_TYPE_TRACING: 11936 /* fentry/fexit/fmod_ret progs can be sleepable only if they are 11937 * attached to ALLOW_ERROR_INJECTION and are not in denylist. 11938 */ 11939 if (!check_non_sleepable_error_inject(btf_id) && 11940 within_error_injection_list(addr)) 11941 ret = 0; 11942 break; 11943 case BPF_PROG_TYPE_LSM: 11944 /* LSM progs check that they are attached to bpf_lsm_*() funcs. 11945 * Only some of them are sleepable. 11946 */ 11947 if (bpf_lsm_is_sleepable_hook(btf_id)) 11948 ret = 0; 11949 break; 11950 default: 11951 break; 11952 } 11953 if (ret) { 11954 bpf_log(log, "%s is not sleepable\n", tname); 11955 return ret; 11956 } 11957 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 11958 if (tgt_prog) { 11959 bpf_log(log, "can't modify return codes of BPF programs\n"); 11960 return -EINVAL; 11961 } 11962 ret = check_attach_modify_return(addr, tname); 11963 if (ret) { 11964 bpf_log(log, "%s() is not modifiable\n", tname); 11965 return ret; 11966 } 11967 } 11968 11969 break; 11970 } 11971 tgt_info->tgt_addr = addr; 11972 tgt_info->tgt_name = tname; 11973 tgt_info->tgt_type = t; 11974 return 0; 11975 } 11976 11977 static int check_attach_btf_id(struct bpf_verifier_env *env) 11978 { 11979 struct bpf_prog *prog = env->prog; 11980 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 11981 struct bpf_attach_target_info tgt_info = {}; 11982 u32 btf_id = prog->aux->attach_btf_id; 11983 struct bpf_trampoline *tr; 11984 int ret; 11985 u64 key; 11986 11987 if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING && 11988 prog->type != BPF_PROG_TYPE_LSM) { 11989 verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n"); 11990 return -EINVAL; 11991 } 11992 11993 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 11994 return check_struct_ops_btf_id(env); 11995 11996 if (prog->type != BPF_PROG_TYPE_TRACING && 11997 prog->type != BPF_PROG_TYPE_LSM && 11998 prog->type != BPF_PROG_TYPE_EXT) 11999 return 0; 12000 12001 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); 12002 if (ret) 12003 return ret; 12004 12005 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { 12006 /* to make freplace equivalent to their targets, they need to 12007 * inherit env->ops and expected_attach_type for the rest of the 12008 * verification 12009 */ 12010 env->ops = bpf_verifier_ops[tgt_prog->type]; 12011 prog->expected_attach_type = tgt_prog->expected_attach_type; 12012 } 12013 12014 /* store info about the attachment target that will be used later */ 12015 prog->aux->attach_func_proto = tgt_info.tgt_type; 12016 prog->aux->attach_func_name = tgt_info.tgt_name; 12017 12018 if (tgt_prog) { 12019 prog->aux->saved_dst_prog_type = tgt_prog->type; 12020 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; 12021 } 12022 12023 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { 12024 prog->aux->attach_btf_trace = true; 12025 return 0; 12026 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { 12027 if (!bpf_iter_prog_supported(prog)) 12028 return -EINVAL; 12029 return 0; 12030 } 12031 12032 if (prog->type == BPF_PROG_TYPE_LSM) { 12033 ret = bpf_lsm_verify_prog(&env->log, prog); 12034 if (ret < 0) 12035 return ret; 12036 } 12037 12038 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); 12039 tr = bpf_trampoline_get(key, &tgt_info); 12040 if (!tr) 12041 return -ENOMEM; 12042 12043 prog->aux->dst_trampoline = tr; 12044 return 0; 12045 } 12046 12047 struct btf *bpf_get_btf_vmlinux(void) 12048 { 12049 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 12050 mutex_lock(&bpf_verifier_lock); 12051 if (!btf_vmlinux) 12052 btf_vmlinux = btf_parse_vmlinux(); 12053 mutex_unlock(&bpf_verifier_lock); 12054 } 12055 return btf_vmlinux; 12056 } 12057 12058 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, 12059 union bpf_attr __user *uattr) 12060 { 12061 u64 start_time = ktime_get_ns(); 12062 struct bpf_verifier_env *env; 12063 struct bpf_verifier_log *log; 12064 int i, len, ret = -EINVAL; 12065 bool is_priv; 12066 12067 /* no program is valid */ 12068 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 12069 return -EINVAL; 12070 12071 /* 'struct bpf_verifier_env' can be global, but since it's not small, 12072 * allocate/free it every time bpf_check() is called 12073 */ 12074 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 12075 if (!env) 12076 return -ENOMEM; 12077 log = &env->log; 12078 12079 len = (*prog)->len; 12080 env->insn_aux_data = 12081 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 12082 ret = -ENOMEM; 12083 if (!env->insn_aux_data) 12084 goto err_free_env; 12085 for (i = 0; i < len; i++) 12086 env->insn_aux_data[i].orig_idx = i; 12087 env->prog = *prog; 12088 env->ops = bpf_verifier_ops[env->prog->type]; 12089 is_priv = bpf_capable(); 12090 12091 bpf_get_btf_vmlinux(); 12092 12093 /* grab the mutex to protect few globals used by verifier */ 12094 if (!is_priv) 12095 mutex_lock(&bpf_verifier_lock); 12096 12097 if (attr->log_level || attr->log_buf || attr->log_size) { 12098 /* user requested verbose verifier output 12099 * and supplied buffer to store the verification trace 12100 */ 12101 log->level = attr->log_level; 12102 log->ubuf = (char __user *) (unsigned long) attr->log_buf; 12103 log->len_total = attr->log_size; 12104 12105 ret = -EINVAL; 12106 /* log attributes have to be sane */ 12107 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || 12108 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) 12109 goto err_unlock; 12110 } 12111 12112 if (IS_ERR(btf_vmlinux)) { 12113 /* Either gcc or pahole or kernel are broken. */ 12114 verbose(env, "in-kernel BTF is malformed\n"); 12115 ret = PTR_ERR(btf_vmlinux); 12116 goto skip_full_check; 12117 } 12118 12119 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 12120 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 12121 env->strict_alignment = true; 12122 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 12123 env->strict_alignment = false; 12124 12125 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); 12126 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); 12127 env->bypass_spec_v1 = bpf_bypass_spec_v1(); 12128 env->bypass_spec_v4 = bpf_bypass_spec_v4(); 12129 env->bpf_capable = bpf_capable(); 12130 12131 if (is_priv) 12132 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 12133 12134 if (bpf_prog_is_dev_bound(env->prog->aux)) { 12135 ret = bpf_prog_offload_verifier_prep(env->prog); 12136 if (ret) 12137 goto skip_full_check; 12138 } 12139 12140 env->explored_states = kvcalloc(state_htab_size(env), 12141 sizeof(struct bpf_verifier_state_list *), 12142 GFP_USER); 12143 ret = -ENOMEM; 12144 if (!env->explored_states) 12145 goto skip_full_check; 12146 12147 ret = check_subprogs(env); 12148 if (ret < 0) 12149 goto skip_full_check; 12150 12151 ret = check_btf_info(env, attr, uattr); 12152 if (ret < 0) 12153 goto skip_full_check; 12154 12155 ret = check_attach_btf_id(env); 12156 if (ret) 12157 goto skip_full_check; 12158 12159 ret = resolve_pseudo_ldimm64(env); 12160 if (ret < 0) 12161 goto skip_full_check; 12162 12163 ret = check_cfg(env); 12164 if (ret < 0) 12165 goto skip_full_check; 12166 12167 ret = do_check_subprogs(env); 12168 ret = ret ?: do_check_main(env); 12169 12170 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux)) 12171 ret = bpf_prog_offload_finalize(env); 12172 12173 skip_full_check: 12174 kvfree(env->explored_states); 12175 12176 if (ret == 0) 12177 ret = check_max_stack_depth(env); 12178 12179 /* instruction rewrites happen after this point */ 12180 if (is_priv) { 12181 if (ret == 0) 12182 opt_hard_wire_dead_code_branches(env); 12183 if (ret == 0) 12184 ret = opt_remove_dead_code(env); 12185 if (ret == 0) 12186 ret = opt_remove_nops(env); 12187 } else { 12188 if (ret == 0) 12189 sanitize_dead_code(env); 12190 } 12191 12192 if (ret == 0) 12193 /* program is valid, convert *(u32*)(ctx + off) accesses */ 12194 ret = convert_ctx_accesses(env); 12195 12196 if (ret == 0) 12197 ret = fixup_bpf_calls(env); 12198 12199 /* do 32-bit optimization after insn patching has done so those patched 12200 * insns could be handled correctly. 12201 */ 12202 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { 12203 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 12204 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 12205 : false; 12206 } 12207 12208 if (ret == 0) 12209 ret = fixup_call_args(env); 12210 12211 env->verification_time = ktime_get_ns() - start_time; 12212 print_verification_stats(env); 12213 12214 if (log->level && bpf_verifier_log_full(log)) 12215 ret = -ENOSPC; 12216 if (log->level && !log->ubuf) { 12217 ret = -EFAULT; 12218 goto err_release_maps; 12219 } 12220 12221 if (ret) 12222 goto err_release_maps; 12223 12224 if (env->used_map_cnt) { 12225 /* if program passed verifier, update used_maps in bpf_prog_info */ 12226 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 12227 sizeof(env->used_maps[0]), 12228 GFP_KERNEL); 12229 12230 if (!env->prog->aux->used_maps) { 12231 ret = -ENOMEM; 12232 goto err_release_maps; 12233 } 12234 12235 memcpy(env->prog->aux->used_maps, env->used_maps, 12236 sizeof(env->used_maps[0]) * env->used_map_cnt); 12237 env->prog->aux->used_map_cnt = env->used_map_cnt; 12238 } 12239 if (env->used_btf_cnt) { 12240 /* if program passed verifier, update used_btfs in bpf_prog_aux */ 12241 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, 12242 sizeof(env->used_btfs[0]), 12243 GFP_KERNEL); 12244 if (!env->prog->aux->used_btfs) { 12245 ret = -ENOMEM; 12246 goto err_release_maps; 12247 } 12248 12249 memcpy(env->prog->aux->used_btfs, env->used_btfs, 12250 sizeof(env->used_btfs[0]) * env->used_btf_cnt); 12251 env->prog->aux->used_btf_cnt = env->used_btf_cnt; 12252 } 12253 if (env->used_map_cnt || env->used_btf_cnt) { 12254 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 12255 * bpf_ld_imm64 instructions 12256 */ 12257 convert_pseudo_ld_imm64(env); 12258 } 12259 12260 adjust_btf_func(env); 12261 12262 err_release_maps: 12263 if (!env->prog->aux->used_maps) 12264 /* if we didn't copy map pointers into bpf_prog_info, release 12265 * them now. Otherwise free_used_maps() will release them. 12266 */ 12267 release_maps(env); 12268 if (!env->prog->aux->used_btfs) 12269 release_btfs(env); 12270 12271 /* extension progs temporarily inherit the attach_type of their targets 12272 for verification purposes, so set it back to zero before returning 12273 */ 12274 if (env->prog->type == BPF_PROG_TYPE_EXT) 12275 env->prog->expected_attach_type = 0; 12276 12277 *prog = env->prog; 12278 err_unlock: 12279 if (!is_priv) 12280 mutex_unlock(&bpf_verifier_lock); 12281 vfree(env->insn_aux_data); 12282 err_free_env: 12283 kfree(env); 12284 return ret; 12285 } 12286