Lines Matching +full:mixed +full:- +full:signals

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 #include <linux/bpf-cgroup.h>
23 #include <linux/error-injection.h>
54 * The first pass is depth-first-search to check that the program is a DAG.
56 * - larger than BPF_MAXINSNS insns
57 * - if loop is present (detected via back-edge)
58 * - unreachable insns exist (shouldn't be a forest. program = one function)
59 * - out of bounds or malformed jumps
71 * All registers are 64-bit.
72 * R0 - return register
73 * R1-R5 argument passing registers
74 * R6-R9 callee saved registers
75 * R10 - frame pointer read-only
82 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
87 * (and -20 constant is saved for further stack bounds checking).
127 * [key, key + map->key_size) bytes are valid and were initialized on
133 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
137 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
138 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
140 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
145 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
153 * After the call R0 is set to return type of the function and registers R1-R5
159 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
165 * passes through a NULL-check conditional. For the branch wherein the state is
212 return aux->map_ptr_state.poison; in bpf_map_ptr_poisoned()
217 return aux->map_ptr_state.unpriv; in bpf_map_ptr_unpriv()
225 aux->map_ptr_state.unpriv = unpriv; in bpf_map_ptr_store()
226 aux->map_ptr_state.poison = poison; in bpf_map_ptr_store()
227 aux->map_ptr_state.map_ptr = map; in bpf_map_ptr_store()
232 return aux->map_key_state & BPF_MAP_KEY_POISON; in bpf_map_key_poisoned()
237 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); in bpf_map_key_unseen()
242 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); in bpf_map_key_immediate()
249 aux->map_key_state = state | BPF_MAP_KEY_SEEN | in bpf_map_key_store()
255 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_helper_call()
256 insn->src_reg == 0; in bpf_helper_call()
261 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_call()
262 insn->src_reg == BPF_PSEUDO_CALL; in bpf_pseudo_call()
267 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_kfunc_call()
268 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; in bpf_pseudo_kfunc_call()
312 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
313 * generally to pass info about user-defined local kptr types to later
352 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); in btf_type_name()
363 if (!bpf_verifier_log_needed(&env->log)) in verbose()
367 bpf_verifier_vlog(&env->log, fmt, args); in verbose()
379 if (reg->smin_value > S64_MIN) { in verbose_invalid_scalar()
380 verbose(env, " smin=%lld", reg->smin_value); in verbose_invalid_scalar()
383 if (reg->smax_value < S64_MAX) { in verbose_invalid_scalar()
384 verbose(env, " smax=%lld", reg->smax_value); in verbose_invalid_scalar()
396 type = reg->type; in reg_not_null()
415 if (reg->type == PTR_TO_MAP_VALUE) { in reg_btf_record()
416 rec = reg->map_ptr->record; in reg_btf_record()
417 } else if (type_is_ptr_alloc_obj(reg->type)) { in reg_btf_record()
418 meta = btf_find_struct_meta(reg->btf, reg->btf_id); in reg_btf_record()
420 rec = meta->record; in reg_btf_record()
427 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; in subprog_is_global()
436 if (!env->prog->aux->func_info) in subprog_name()
439 info = &env->prog->aux->func_info[subprog]; in subprog_name()
440 return btf_type_name(env->prog->aux->btf, info->type_id); in subprog_name()
447 info->is_cb = true; in mark_subprog_exc_cb()
448 info->is_async_cb = true; in mark_subprog_exc_cb()
449 info->is_exception_cb = true; in mark_subprog_exc_cb()
454 return subprog_info(env, subprog)->is_exception_cb; in subprog_is_exc_cb()
470 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; in is_acquire_function()
532 return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) || in is_sync_callback_calling_insn()
533 (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm)); in is_sync_callback_calling_insn()
538 return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) || in is_async_callback_calling_insn()
539 (bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm)); in is_async_callback_calling_insn()
544 return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO; in is_may_goto_insn()
549 return is_may_goto_insn(&env->prog->insnsi[insn_idx]); in is_may_goto_insn_at()
577 return BPF_CLASS(insn->code) == BPF_STX && in is_cmpxchg_insn()
578 BPF_MODE(insn->code) == BPF_ATOMIC && in is_cmpxchg_insn()
579 insn->imm == BPF_CMPXCHG; in is_cmpxchg_insn()
584 return (-off - 1) / BPF_REG_SIZE; in __get_spi()
590 struct bpf_verifier_state *cur = env->cur_state; in func()
592 return cur->frame[reg->frameno]; in func()
597 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
599 /* We need to check that slots between [spi - nr_slots + 1, spi] are in is_spi_bounds_valid()
604 * spi and the second slot will be at spi - 1. in is_spi_bounds_valid()
606 return spi - nr_slots + 1 >= 0 && spi < allocated_slots; in is_spi_bounds_valid()
614 if (!tnum_is_const(reg->var_off)) { in stack_slot_obj_get_spi()
616 return -EINVAL; in stack_slot_obj_get_spi()
619 off = reg->off + reg->var_off.value; in stack_slot_obj_get_spi()
622 return -EINVAL; in stack_slot_obj_get_spi()
628 return -EINVAL; in stack_slot_obj_get_spi()
632 return -ERANGE; in stack_slot_obj_get_spi()
700 int id = ++env->id_gen; in mark_dynptr_stack_regs()
710 __mark_dynptr_reg(reg, type, true, ++env->id_gen); in mark_dynptr_cb_reg()
727 /* We cannot assume both spi and spi - 1 belong to the same dynptr, in mark_stack_slots_dynptr()
739 err = destroy_if_dynptr_stack_slot(env, state, spi - 1); in mark_stack_slots_dynptr()
744 state->stack[spi].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
745 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
750 return -EINVAL; in mark_stack_slots_dynptr()
752 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, in mark_stack_slots_dynptr()
753 &state->stack[spi - 1].spilled_ptr, type); in mark_stack_slots_dynptr()
767 state->stack[spi].spilled_ptr.ref_obj_id = id; in mark_stack_slots_dynptr()
768 state->stack[spi - 1].spilled_ptr.ref_obj_id = id; in mark_stack_slots_dynptr()
771 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in mark_stack_slots_dynptr()
772 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in mark_stack_slots_dynptr()
782 state->stack[spi].slot_type[i] = STACK_INVALID; in invalidate_dynptr()
783 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in invalidate_dynptr()
786 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in invalidate_dynptr()
787 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in invalidate_dynptr()
795 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of in invalidate_dynptr()
800 * default (where the default reg state has its reg->parent as NULL), or in invalidate_dynptr()
802 * mark_reg_read won't walk reg->parent chain), but not randomly during in invalidate_dynptr()
804 * parentage chain will still be live (i.e. reg->parent may be in invalidate_dynptr()
805 * non-NULL), while earlier reg->parent was NULL, so we need in invalidate_dynptr()
810 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in invalidate_dynptr()
811 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in invalidate_dynptr()
823 if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in unmark_stack_slots_dynptr()
828 ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id; in unmark_stack_slots_dynptr()
841 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
842 if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id) in unmark_stack_slots_dynptr()
849 if (state->stack[i].slot_type[0] != STACK_DYNPTR) { in unmark_stack_slots_dynptr()
851 return -EFAULT; in unmark_stack_slots_dynptr()
853 if (state->stack[i].spilled_ptr.dynptr.first_slot) in unmark_stack_slots_dynptr()
865 if (!env->allow_ptr_leaks) in mark_reg_invalid()
883 if (state->stack[spi].slot_type[0] != STACK_DYNPTR) in destroy_if_dynptr_stack_slot()
887 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) in destroy_if_dynptr_stack_slot()
890 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in destroy_if_dynptr_stack_slot()
892 return -EINVAL; in destroy_if_dynptr_stack_slot()
896 mark_stack_slot_scratched(env, spi - 1); in destroy_if_dynptr_stack_slot()
900 state->stack[spi].slot_type[i] = STACK_INVALID; in destroy_if_dynptr_stack_slot()
901 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in destroy_if_dynptr_stack_slot()
904 dynptr_id = state->stack[spi].spilled_ptr.id; in destroy_if_dynptr_stack_slot()
906 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ in destroy_if_dynptr_stack_slot()
908 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) in destroy_if_dynptr_stack_slot()
910 if (dreg->dynptr_id == dynptr_id) in destroy_if_dynptr_stack_slot()
917 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in destroy_if_dynptr_stack_slot()
918 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in destroy_if_dynptr_stack_slot()
921 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in destroy_if_dynptr_stack_slot()
922 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; in destroy_if_dynptr_stack_slot()
931 if (reg->type == CONST_PTR_TO_DYNPTR) in is_dynptr_reg_valid_uninit()
936 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an in is_dynptr_reg_valid_uninit()
940 if (spi < 0 && spi != -ERANGE) in is_dynptr_reg_valid_uninit()
966 if (reg->type == CONST_PTR_TO_DYNPTR) in is_dynptr_reg_valid_init()
972 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) in is_dynptr_reg_valid_init()
976 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || in is_dynptr_reg_valid_init()
977 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) in is_dynptr_reg_valid_init()
996 if (reg->type == CONST_PTR_TO_DYNPTR) { in is_dynptr_type_expected()
997 return reg->dynptr.type == dynptr_type; in is_dynptr_type_expected()
1002 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; in is_dynptr_type_expected()
1029 struct bpf_stack_state *slot = &state->stack[spi - i]; in mark_stack_slots_iter()
1030 struct bpf_reg_state *st = &slot->spilled_ptr; in mark_stack_slots_iter()
1033 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ in mark_stack_slots_iter()
1036 st->type |= MEM_RCU; in mark_stack_slots_iter()
1038 st->type |= PTR_UNTRUSTED; in mark_stack_slots_iter()
1040 st->live |= REG_LIVE_WRITTEN; in mark_stack_slots_iter()
1041 st->ref_obj_id = i == 0 ? id : 0; in mark_stack_slots_iter()
1042 st->iter.btf = btf; in mark_stack_slots_iter()
1043 st->iter.btf_id = btf_id; in mark_stack_slots_iter()
1044 st->iter.state = BPF_ITER_STATE_ACTIVE; in mark_stack_slots_iter()
1045 st->iter.depth = 0; in mark_stack_slots_iter()
1048 slot->slot_type[j] = STACK_ITER; in mark_stack_slots_iter()
1050 mark_stack_slot_scratched(env, spi - i); in mark_stack_slots_iter()
1067 struct bpf_stack_state *slot = &state->stack[spi - i]; in unmark_stack_slots_iter()
1068 struct bpf_reg_state *st = &slot->spilled_ptr; in unmark_stack_slots_iter()
1071 WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); in unmark_stack_slots_iter()
1076 st->live |= REG_LIVE_WRITTEN; in unmark_stack_slots_iter()
1079 slot->slot_type[j] = STACK_INVALID; in unmark_stack_slots_iter()
1081 mark_stack_slot_scratched(env, spi - i); in unmark_stack_slots_iter()
1093 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we in is_iter_reg_valid_uninit()
1098 if (spi == -ERANGE) in is_iter_reg_valid_uninit()
1104 struct bpf_stack_state *slot = &state->stack[spi - i]; in is_iter_reg_valid_uninit()
1107 if (slot->slot_type[j] == STACK_ITER) in is_iter_reg_valid_uninit()
1122 return -EINVAL; in is_iter_reg_valid_init()
1125 struct bpf_stack_state *slot = &state->stack[spi - i]; in is_iter_reg_valid_init()
1126 struct bpf_reg_state *st = &slot->spilled_ptr; in is_iter_reg_valid_init()
1128 if (st->type & PTR_UNTRUSTED) in is_iter_reg_valid_init()
1129 return -EPROTO; in is_iter_reg_valid_init()
1131 if (i == 0 && !st->ref_obj_id) in is_iter_reg_valid_init()
1132 return -EINVAL; in is_iter_reg_valid_init()
1133 if (i != 0 && st->ref_obj_id) in is_iter_reg_valid_init()
1134 return -EINVAL; in is_iter_reg_valid_init()
1135 if (st->iter.btf != btf || st->iter.btf_id != btf_id) in is_iter_reg_valid_init()
1136 return -EINVAL; in is_iter_reg_valid_init()
1139 if (slot->slot_type[j] != STACK_ITER) in is_iter_reg_valid_init()
1140 return -EINVAL; in is_iter_reg_valid_init()
1166 slot = &state->stack[spi]; in mark_stack_slot_irq_flag()
1167 st = &slot->spilled_ptr; in mark_stack_slot_irq_flag()
1170 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ in mark_stack_slot_irq_flag()
1171 st->live |= REG_LIVE_WRITTEN; in mark_stack_slot_irq_flag()
1172 st->ref_obj_id = id; in mark_stack_slot_irq_flag()
1175 slot->slot_type[i] = STACK_IRQ_FLAG; in mark_stack_slot_irq_flag()
1192 slot = &state->stack[spi]; in unmark_stack_slot_irq_flag()
1193 st = &slot->spilled_ptr; in unmark_stack_slot_irq_flag()
1195 err = release_irq_state(env->cur_state, st->ref_obj_id); in unmark_stack_slot_irq_flag()
1196 WARN_ON_ONCE(err && err != -EACCES); in unmark_stack_slot_irq_flag()
1200 for (int i = 0; i < env->cur_state->acquired_refs; i++) { in unmark_stack_slot_irq_flag()
1201 if (env->cur_state->refs[i].id == env->cur_state->active_irq_id) { in unmark_stack_slot_irq_flag()
1202 insn_idx = env->cur_state->refs[i].insn_idx; in unmark_stack_slot_irq_flag()
1208 env->cur_state->active_irq_id, insn_idx); in unmark_stack_slot_irq_flag()
1215 st->live |= REG_LIVE_WRITTEN; in unmark_stack_slot_irq_flag()
1218 slot->slot_type[i] = STACK_INVALID; in unmark_stack_slot_irq_flag()
1230 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we in is_irq_flag_reg_valid_uninit()
1235 if (spi == -ERANGE) in is_irq_flag_reg_valid_uninit()
1240 slot = &state->stack[spi]; in is_irq_flag_reg_valid_uninit()
1243 if (slot->slot_type[i] == STACK_IRQ_FLAG) in is_irq_flag_reg_valid_uninit()
1257 return -EINVAL; in is_irq_flag_reg_valid_init()
1259 slot = &state->stack[spi]; in is_irq_flag_reg_valid_init()
1260 st = &slot->spilled_ptr; in is_irq_flag_reg_valid_init()
1262 if (!st->ref_obj_id) in is_irq_flag_reg_valid_init()
1263 return -EINVAL; in is_irq_flag_reg_valid_init()
1266 if (slot->slot_type[i] != STACK_IRQ_FLAG) in is_irq_flag_reg_valid_init()
1267 return -EINVAL; in is_irq_flag_reg_valid_init()
1272 * - spilled register state (STACK_SPILL);
1273 * - dynptr state (STACK_DYNPTR);
1274 * - iter state (STACK_ITER).
1275 * - irq flag state (STACK_IRQ_FLAG)
1279 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; in is_stack_slot_special()
1302 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
1307 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && in is_spilled_scalar_reg()
1308 stack->spilled_ptr.type == SCALAR_VALUE; in is_spilled_scalar_reg()
1313 return stack->slot_type[0] == STACK_SPILL && in is_spilled_scalar_reg64()
1314 stack->spilled_ptr.type == SCALAR_VALUE; in is_spilled_scalar_reg64()
1394 memset(arr + old_n * size, 0, (new_n - old_n) * size); in realloc_array()
1402 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, in copy_reference_state()
1404 if (!dst->refs) in copy_reference_state()
1405 return -ENOMEM; in copy_reference_state()
1407 dst->acquired_refs = src->acquired_refs; in copy_reference_state()
1408 dst->active_locks = src->active_locks; in copy_reference_state()
1409 dst->active_preempt_locks = src->active_preempt_locks; in copy_reference_state()
1410 dst->active_rcu_lock = src->active_rcu_lock; in copy_reference_state()
1411 dst->active_irq_id = src->active_irq_id; in copy_reference_state()
1417 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1419 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), in copy_stack_state()
1421 if (!dst->stack) in copy_stack_state()
1422 return -ENOMEM; in copy_stack_state()
1424 dst->allocated_stack = src->allocated_stack; in copy_stack_state()
1430 state->refs = realloc_array(state->refs, state->acquired_refs, n, in resize_reference_state()
1432 if (!state->refs) in resize_reference_state()
1433 return -ENOMEM; in resize_reference_state()
1435 state->acquired_refs = n; in resize_reference_state()
1439 /* Possibly update state->allocated_stack to be at least size bytes. Also
1440 * possibly update the function's high-water mark in its bpf_subprog_info.
1444 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n; in grow_stack_state()
1453 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); in grow_stack_state()
1454 if (!state->stack) in grow_stack_state()
1455 return -ENOMEM; in grow_stack_state()
1457 state->allocated_stack = size; in grow_stack_state()
1460 if (env->subprog_info[state->subprogno].stack_depth < size) in grow_stack_state()
1461 env->subprog_info[state->subprogno].stack_depth = size; in grow_stack_state()
1466 /* Acquire a pointer id from the env and update the state->refs to include
1473 struct bpf_verifier_state *state = env->cur_state; in acquire_reference_state()
1474 int new_ofs = state->acquired_refs; in acquire_reference_state()
1477 err = resize_reference_state(state, state->acquired_refs + 1); in acquire_reference_state()
1480 state->refs[new_ofs].insn_idx = insn_idx; in acquire_reference_state()
1482 return &state->refs[new_ofs]; in acquire_reference_state()
1491 return -ENOMEM; in acquire_reference()
1492 s->type = REF_TYPE_PTR; in acquire_reference()
1493 s->id = ++env->id_gen; in acquire_reference()
1494 return s->id; in acquire_reference()
1500 struct bpf_verifier_state *state = env->cur_state; in acquire_lock_state()
1505 return -ENOMEM; in acquire_lock_state()
1506 s->type = type; in acquire_lock_state()
1507 s->id = id; in acquire_lock_state()
1508 s->ptr = ptr; in acquire_lock_state()
1510 state->active_locks++; in acquire_lock_state()
1516 struct bpf_verifier_state *state = env->cur_state; in acquire_irq_state()
1521 return -ENOMEM; in acquire_irq_state()
1522 s->type = REF_TYPE_IRQ; in acquire_irq_state()
1523 s->id = ++env->id_gen; in acquire_irq_state()
1525 state->active_irq_id = s->id; in acquire_irq_state()
1526 return s->id; in acquire_irq_state()
1536 * it can detect out-of-order IRQ restore. Hence use memmove to shift in release_reference_state()
1539 last_idx = state->acquired_refs - 1; in release_reference_state()
1540 rem = state->acquired_refs - idx - 1; in release_reference_state()
1542 memmove(&state->refs[idx], &state->refs[idx + 1], sizeof(*state->refs) * rem); in release_reference_state()
1543 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); in release_reference_state()
1544 state->acquired_refs--; in release_reference_state()
1552 for (i = 0; i < state->acquired_refs; i++) { in release_lock_state()
1553 if (state->refs[i].type != type) in release_lock_state()
1555 if (state->refs[i].id == id && state->refs[i].ptr == ptr) { in release_lock_state()
1557 state->active_locks--; in release_lock_state()
1561 return -EINVAL; in release_lock_state()
1569 if (id != state->active_irq_id) in release_irq_state()
1570 return -EACCES; in release_irq_state()
1572 for (i = 0; i < state->acquired_refs; i++) { in release_irq_state()
1573 if (state->refs[i].type != REF_TYPE_IRQ) in release_irq_state()
1575 if (state->refs[i].id == id) { in release_irq_state()
1577 state->active_irq_id = prev_id; in release_irq_state()
1580 prev_id = state->refs[i].id; in release_irq_state()
1583 return -EINVAL; in release_irq_state()
1591 for (i = 0; i < state->acquired_refs; i++) { in find_lock_state()
1592 struct bpf_reference_state *s = &state->refs[i]; in find_lock_state()
1594 if (s->type != type) in find_lock_state()
1597 if (s->id == id && s->ptr == ptr) in find_lock_state()
1607 kfree(state->stack); in free_func_state()
1616 for (i = 0; i <= state->curframe; i++) { in free_verifier_state()
1617 free_func_state(state->frame[i]); in free_verifier_state()
1618 state->frame[i] = NULL; in free_verifier_state()
1620 kfree(state->refs); in free_verifier_state()
1644 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { in copy_verifier_state()
1645 free_func_state(dst_state->frame[i]); in copy_verifier_state()
1646 dst_state->frame[i] = NULL; in copy_verifier_state()
1651 dst_state->speculative = src->speculative; in copy_verifier_state()
1652 dst_state->in_sleepable = src->in_sleepable; in copy_verifier_state()
1653 dst_state->curframe = src->curframe; in copy_verifier_state()
1654 dst_state->branches = src->branches; in copy_verifier_state()
1655 dst_state->parent = src->parent; in copy_verifier_state()
1656 dst_state->first_insn_idx = src->first_insn_idx; in copy_verifier_state()
1657 dst_state->last_insn_idx = src->last_insn_idx; in copy_verifier_state()
1658 dst_state->insn_hist_start = src->insn_hist_start; in copy_verifier_state()
1659 dst_state->insn_hist_end = src->insn_hist_end; in copy_verifier_state()
1660 dst_state->dfs_depth = src->dfs_depth; in copy_verifier_state()
1661 dst_state->callback_unroll_depth = src->callback_unroll_depth; in copy_verifier_state()
1662 dst_state->used_as_loop_entry = src->used_as_loop_entry; in copy_verifier_state()
1663 dst_state->may_goto_depth = src->may_goto_depth; in copy_verifier_state()
1664 for (i = 0; i <= src->curframe; i++) { in copy_verifier_state()
1665 dst = dst_state->frame[i]; in copy_verifier_state()
1669 return -ENOMEM; in copy_verifier_state()
1670 dst_state->frame[i] = dst; in copy_verifier_state()
1672 err = copy_func_state(dst, src->frame[i]); in copy_verifier_state()
1681 return env->prog->len; in state_htab_size()
1686 struct bpf_verifier_state *cur = env->cur_state; in explored_state()
1687 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state()
1689 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
1696 if (a->curframe != b->curframe) in same_callsites()
1699 for (fr = a->curframe; fr >= 0; fr--) in same_callsites()
1700 if (a->frame[fr]->callsite != b->frame[fr]->callsite) in same_callsites()
1706 /* Open coded iterators allow back-edges in the state graph in order to
1710 * part of some loops in order to decide whether non-exact states
1712 * - non-exact states comparison establishes sub-state relation and uses
1715 * - exact states comparison just checks if current and explored states
1716 * are identical (and thus form a back-edge).
1740 * ... ... .---------> hdr
1743 * cur .-> succ | .------...
1746 * succ '-- cur | ... ...
1749 * | succ <- cur
1754 * '----'
1766 * .------... .------...
1769 * .-> hdr ... ... ...
1772 * | succ <- cur succ <- cur
1777 * '----' exit
1821 * - use st->branch == 0 as a signal that DFS of succ had been finished
1824 * - use st->branch > 0 as a signal that st is in the current DFS path;
1825 * - handle cases B and C in is_state_visited();
1826 * - update topmost loop entry for intermediate states in get_loop_entry().
1830 struct bpf_verifier_state *topmost = st->loop_entry, *old; in get_loop_entry()
1832 while (topmost && topmost->loop_entry && topmost != topmost->loop_entry) in get_loop_entry()
1833 topmost = topmost->loop_entry; in get_loop_entry()
1837 while (st && st->loop_entry != topmost) { in get_loop_entry()
1838 old = st->loop_entry; in get_loop_entry()
1839 st->loop_entry = topmost; in get_loop_entry()
1851 /* The head1->branches check decides between cases B and C in in update_loop_entry()
1852 * comment for get_loop_entry(). If hdr1->branches == 0 then in update_loop_entry()
1855 * no need to update cur->loop_entry. in update_loop_entry()
1857 if (hdr1->branches && hdr1->dfs_depth <= cur1->dfs_depth) { in update_loop_entry()
1858 cur->loop_entry = hdr; in update_loop_entry()
1859 hdr->used_as_loop_entry = true; in update_loop_entry()
1866 u32 br = --st->branches; in update_branch_counts()
1868 /* br == 0 signals that DFS exploration for 'st' is finished, in update_branch_counts()
1873 if (br == 0 && st->parent && st->loop_entry) in update_branch_counts()
1874 update_loop_entry(st->parent, st->loop_entry); in update_branch_counts()
1884 st = st->parent; in update_branch_counts()
1891 struct bpf_verifier_state *cur = env->cur_state; in pop_stack()
1892 struct bpf_verifier_stack_elem *elem, *head = env->head; in pop_stack()
1895 if (env->head == NULL) in pop_stack()
1896 return -ENOENT; in pop_stack()
1899 err = copy_verifier_state(cur, &head->st); in pop_stack()
1904 bpf_vlog_reset(&env->log, head->log_pos); in pop_stack()
1906 *insn_idx = head->insn_idx; in pop_stack()
1908 *prev_insn_idx = head->prev_insn_idx; in pop_stack()
1909 elem = head->next; in pop_stack()
1910 free_verifier_state(&head->st, false); in pop_stack()
1912 env->head = elem; in pop_stack()
1913 env->stack_size--; in pop_stack()
1921 struct bpf_verifier_state *cur = env->cur_state; in push_stack()
1929 elem->insn_idx = insn_idx; in push_stack()
1930 elem->prev_insn_idx = prev_insn_idx; in push_stack()
1931 elem->next = env->head; in push_stack()
1932 elem->log_pos = env->log.end_pos; in push_stack()
1933 env->head = elem; in push_stack()
1934 env->stack_size++; in push_stack()
1935 err = copy_verifier_state(&elem->st, cur); in push_stack()
1938 elem->st.speculative |= speculative; in push_stack()
1939 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_stack()
1941 env->stack_size); in push_stack()
1944 if (elem->st.parent) { in push_stack()
1945 ++elem->st.parent->branches; in push_stack()
1948 * 1. speculative states will bump 'branches' for non-branch in push_stack()
1956 return &elem->st; in push_stack()
1958 free_verifier_state(env->cur_state, true); in push_stack()
1959 env->cur_state = NULL; in push_stack()
1970 /* This helper doesn't clear reg->id */
1973 reg->var_off = tnum_const(imm); in ___mark_reg_known()
1974 reg->smin_value = (s64)imm; in ___mark_reg_known()
1975 reg->smax_value = (s64)imm; in ___mark_reg_known()
1976 reg->umin_value = imm; in ___mark_reg_known()
1977 reg->umax_value = imm; in ___mark_reg_known()
1979 reg->s32_min_value = (s32)imm; in ___mark_reg_known()
1980 reg->s32_max_value = (s32)imm; in ___mark_reg_known()
1981 reg->u32_min_value = (u32)imm; in ___mark_reg_known()
1982 reg->u32_max_value = (u32)imm; in ___mark_reg_known()
1991 memset(((u8 *)reg) + sizeof(reg->type), 0, in __mark_reg_known()
1992 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); in __mark_reg_known()
1993 reg->id = 0; in __mark_reg_known()
1994 reg->ref_obj_id = 0; in __mark_reg_known()
2000 reg->var_off = tnum_const_subreg(reg->var_off, imm); in __mark_reg32_known()
2001 reg->s32_min_value = (s32)imm; in __mark_reg32_known()
2002 reg->s32_max_value = (s32)imm; in __mark_reg32_known()
2003 reg->u32_min_value = (u32)imm; in __mark_reg32_known()
2004 reg->u32_max_value = (u32)imm; in __mark_reg32_known()
2018 reg->type = SCALAR_VALUE; in __mark_reg_const_zero()
2022 reg->precise = !env->bpf_capable; in __mark_reg_const_zero()
2041 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for in __mark_dynptr_reg()
2046 reg->type = CONST_PTR_TO_DYNPTR; in __mark_dynptr_reg()
2048 reg->id = dynptr_id; in __mark_dynptr_reg()
2049 reg->dynptr.type = type; in __mark_dynptr_reg()
2050 reg->dynptr.first_slot = first_slot; in __mark_dynptr_reg()
2055 if (base_type(reg->type) == PTR_TO_MAP_VALUE) { in mark_ptr_not_null_reg()
2056 const struct bpf_map *map = reg->map_ptr; in mark_ptr_not_null_reg()
2058 if (map->inner_map_meta) { in mark_ptr_not_null_reg()
2059 reg->type = CONST_PTR_TO_MAP; in mark_ptr_not_null_reg()
2060 reg->map_ptr = map->inner_map_meta; in mark_ptr_not_null_reg()
2064 if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER)) in mark_ptr_not_null_reg()
2065 reg->map_uid = reg->id; in mark_ptr_not_null_reg()
2066 if (btf_record_has_field(map->inner_map_meta->record, BPF_WORKQUEUE)) in mark_ptr_not_null_reg()
2067 reg->map_uid = reg->id; in mark_ptr_not_null_reg()
2068 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { in mark_ptr_not_null_reg()
2069 reg->type = PTR_TO_XDP_SOCK; in mark_ptr_not_null_reg()
2070 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || in mark_ptr_not_null_reg()
2071 map->map_type == BPF_MAP_TYPE_SOCKHASH) { in mark_ptr_not_null_reg()
2072 reg->type = PTR_TO_SOCKET; in mark_ptr_not_null_reg()
2074 reg->type = PTR_TO_MAP_VALUE; in mark_ptr_not_null_reg()
2079 reg->type &= ~PTR_MAYBE_NULL; in mark_ptr_not_null_reg()
2087 regs[regno].btf = ds_head->btf; in mark_reg_graph_node()
2088 regs[regno].btf_id = ds_head->value_btf_id; in mark_reg_graph_node()
2089 regs[regno].off = ds_head->node_offset; in mark_reg_graph_node()
2094 return type_is_pkt_pointer(reg->type); in reg_is_pkt_pointer()
2100 reg->type == PTR_TO_PACKET_END; in reg_is_pkt_pointer_any()
2105 return base_type(reg->type) == PTR_TO_MEM && in reg_is_dynptr_slice_pkt()
2106 (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP); in reg_is_dynptr_slice_pkt()
2117 return reg->type == which && in reg_is_init_pkt_pointer()
2118 reg->id == 0 && in reg_is_init_pkt_pointer()
2119 reg->off == 0 && in reg_is_init_pkt_pointer()
2120 tnum_equals_const(reg->var_off, 0); in reg_is_init_pkt_pointer()
2126 reg->smin_value = S64_MIN; in __mark_reg_unbounded()
2127 reg->smax_value = S64_MAX; in __mark_reg_unbounded()
2128 reg->umin_value = 0; in __mark_reg_unbounded()
2129 reg->umax_value = U64_MAX; in __mark_reg_unbounded()
2131 reg->s32_min_value = S32_MIN; in __mark_reg_unbounded()
2132 reg->s32_max_value = S32_MAX; in __mark_reg_unbounded()
2133 reg->u32_min_value = 0; in __mark_reg_unbounded()
2134 reg->u32_max_value = U32_MAX; in __mark_reg_unbounded()
2139 reg->smin_value = S64_MIN; in __mark_reg64_unbounded()
2140 reg->smax_value = S64_MAX; in __mark_reg64_unbounded()
2141 reg->umin_value = 0; in __mark_reg64_unbounded()
2142 reg->umax_value = U64_MAX; in __mark_reg64_unbounded()
2147 reg->s32_min_value = S32_MIN; in __mark_reg32_unbounded()
2148 reg->s32_max_value = S32_MAX; in __mark_reg32_unbounded()
2149 reg->u32_min_value = 0; in __mark_reg32_unbounded()
2150 reg->u32_max_value = U32_MAX; in __mark_reg32_unbounded()
2155 struct tnum var32_off = tnum_subreg(reg->var_off); in __update_reg32_bounds()
2158 reg->s32_min_value = max_t(s32, reg->s32_min_value, in __update_reg32_bounds()
2161 reg->s32_max_value = min_t(s32, reg->s32_max_value, in __update_reg32_bounds()
2163 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); in __update_reg32_bounds()
2164 reg->u32_max_value = min(reg->u32_max_value, in __update_reg32_bounds()
2171 reg->smin_value = max_t(s64, reg->smin_value, in __update_reg64_bounds()
2172 reg->var_off.value | (reg->var_off.mask & S64_MIN)); in __update_reg64_bounds()
2174 reg->smax_value = min_t(s64, reg->smax_value, in __update_reg64_bounds()
2175 reg->var_off.value | (reg->var_off.mask & S64_MAX)); in __update_reg64_bounds()
2176 reg->umin_value = max(reg->umin_value, reg->var_off.value); in __update_reg64_bounds()
2177 reg->umax_value = min(reg->umax_value, in __update_reg64_bounds()
2178 reg->var_off.value | reg->var_off.mask); in __update_reg64_bounds()
2187 /* Uses signed min/max values to inform unsigned, and vice-versa */
2195 * [10, 20] range. But this property holds for any 64-bit range as in __reg32_deduce_bounds()
2206 * depends on actual hexadecimal values of 32-bit range. They can form in __reg32_deduce_bounds()
2211 if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) { in __reg32_deduce_bounds()
2215 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value); in __reg32_deduce_bounds()
2216 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value); in __reg32_deduce_bounds()
2218 if ((s32)reg->umin_value <= (s32)reg->umax_value) { in __reg32_deduce_bounds()
2219 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); in __reg32_deduce_bounds()
2220 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); in __reg32_deduce_bounds()
2223 if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) { in __reg32_deduce_bounds()
2225 if ((u32)reg->smin_value <= (u32)reg->smax_value) { in __reg32_deduce_bounds()
2226 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value); in __reg32_deduce_bounds()
2227 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value); in __reg32_deduce_bounds()
2230 if ((s32)reg->smin_value <= (s32)reg->smax_value) { in __reg32_deduce_bounds()
2231 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); in __reg32_deduce_bounds()
2232 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); in __reg32_deduce_bounds()
2236 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to in __reg32_deduce_bounds()
2239 * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]). in __reg32_deduce_bounds()
2240 * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff, in __reg32_deduce_bounds()
2242 * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]). in __reg32_deduce_bounds()
2246 * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister. in __reg32_deduce_bounds()
2248 if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) && in __reg32_deduce_bounds()
2249 (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) { in __reg32_deduce_bounds()
2250 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); in __reg32_deduce_bounds()
2251 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); in __reg32_deduce_bounds()
2253 if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) && in __reg32_deduce_bounds()
2254 (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) { in __reg32_deduce_bounds()
2255 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); in __reg32_deduce_bounds()
2256 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); in __reg32_deduce_bounds()
2261 if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) { in __reg32_deduce_bounds()
2262 reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
2263 reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
2267 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg32_deduce_bounds()
2269 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { in __reg32_deduce_bounds()
2270 reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
2271 reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
2282 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2290 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2291 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2294 * contiguous to the right of it, wrapping around from -1 to 0, and in __reg64_deduce_bounds()
2297 * more visually as mapped to sign-agnostic range of hex values. in __reg64_deduce_bounds()
2303 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2304 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2306 * >------------------------------ -------------------------------> in __reg64_deduce_bounds()
2316 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2322 * will be non-negative both as u64 and s64 (and in fact it will be in __reg64_deduce_bounds()
2325 * non-negative range of values larger than 0x8000000000000000. in __reg64_deduce_bounds()
2344 if ((s64)reg->umin_value <= (s64)reg->umax_value) { in __reg64_deduce_bounds()
2345 reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value); in __reg64_deduce_bounds()
2346 reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value); in __reg64_deduce_bounds()
2350 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg64_deduce_bounds()
2352 if ((u64)reg->smin_value <= (u64)reg->smax_value) { in __reg64_deduce_bounds()
2353 reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); in __reg64_deduce_bounds()
2354 reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); in __reg64_deduce_bounds()
2360 /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit in __reg_deduce_mixed_bounds()
2361 * values on both sides of 64-bit range in hope to have tighter range. in __reg_deduce_mixed_bounds()
2363 * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff]. in __reg_deduce_mixed_bounds()
2364 * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound in __reg_deduce_mixed_bounds()
2365 * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of in __reg_deduce_mixed_bounds()
2366 * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a in __reg_deduce_mixed_bounds()
2369 * with are well-formed ranges in respective s64 or u64 domain, just in __reg_deduce_mixed_bounds()
2370 * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments. in __reg_deduce_mixed_bounds()
2375 /* u32 -> u64 tightening, it's always well-formed */ in __reg_deduce_mixed_bounds()
2376 new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value; in __reg_deduce_mixed_bounds()
2377 new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value; in __reg_deduce_mixed_bounds()
2378 reg->umin_value = max_t(u64, reg->umin_value, new_umin); in __reg_deduce_mixed_bounds()
2379 reg->umax_value = min_t(u64, reg->umax_value, new_umax); in __reg_deduce_mixed_bounds()
2380 /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */ in __reg_deduce_mixed_bounds()
2381 new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value; in __reg_deduce_mixed_bounds()
2382 new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value; in __reg_deduce_mixed_bounds()
2383 reg->smin_value = max_t(s64, reg->smin_value, new_smin); in __reg_deduce_mixed_bounds()
2384 reg->smax_value = min_t(s64, reg->smax_value, new_smax); in __reg_deduce_mixed_bounds()
2387 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { in __reg_deduce_mixed_bounds()
2388 /* s32 -> u64 tightening */ in __reg_deduce_mixed_bounds()
2389 new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; in __reg_deduce_mixed_bounds()
2390 new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; in __reg_deduce_mixed_bounds()
2391 reg->umin_value = max_t(u64, reg->umin_value, new_umin); in __reg_deduce_mixed_bounds()
2392 reg->umax_value = min_t(u64, reg->umax_value, new_umax); in __reg_deduce_mixed_bounds()
2393 /* s32 -> s64 tightening */ in __reg_deduce_mixed_bounds()
2394 new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; in __reg_deduce_mixed_bounds()
2395 new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; in __reg_deduce_mixed_bounds()
2396 reg->smin_value = max_t(s64, reg->smin_value, new_smin); in __reg_deduce_mixed_bounds()
2397 reg->smax_value = min_t(s64, reg->smax_value, new_smax); in __reg_deduce_mixed_bounds()
2401 * when upper bits for a 64-bit range are all 1s or all 0s. in __reg_deduce_mixed_bounds()
2412 * Also suppose that it's 32-bit range is positive, in __reg_deduce_mixed_bounds()
2413 * meaning that lower 32-bits of the full 64-bit register in __reg_deduce_mixed_bounds()
2421 * which means that upper bits of the full 64-bit register in __reg_deduce_mixed_bounds()
2425 * - 0xffff_ffff_8000_0000 == (s64)S32_MIN in __reg_deduce_mixed_bounds()
2426 * - 0x0000_0000_7fff_ffff == (s64)S32_MAX in __reg_deduce_mixed_bounds()
2429 if (reg->s32_min_value >= 0 && reg->smin_value >= S32_MIN && reg->smax_value <= S32_MAX) { in __reg_deduce_mixed_bounds()
2430 reg->smin_value = reg->s32_min_value; in __reg_deduce_mixed_bounds()
2431 reg->smax_value = reg->s32_max_value; in __reg_deduce_mixed_bounds()
2432 reg->umin_value = reg->s32_min_value; in __reg_deduce_mixed_bounds()
2433 reg->umax_value = reg->s32_max_value; in __reg_deduce_mixed_bounds()
2434 reg->var_off = tnum_intersect(reg->var_off, in __reg_deduce_mixed_bounds()
2435 tnum_range(reg->smin_value, reg->smax_value)); in __reg_deduce_mixed_bounds()
2449 struct tnum var64_off = tnum_intersect(reg->var_off, in __reg_bound_offset()
2450 tnum_range(reg->umin_value, in __reg_bound_offset()
2451 reg->umax_value)); in __reg_bound_offset()
2453 tnum_range(reg->u32_min_value, in __reg_bound_offset()
2454 reg->u32_max_value)); in __reg_bound_offset()
2456 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); in __reg_bound_offset()
2480 if (reg->umin_value > reg->umax_value || in reg_bounds_sanity_check()
2481 reg->smin_value > reg->smax_value || in reg_bounds_sanity_check()
2482 reg->u32_min_value > reg->u32_max_value || in reg_bounds_sanity_check()
2483 reg->s32_min_value > reg->s32_max_value) { in reg_bounds_sanity_check()
2488 if (tnum_is_const(reg->var_off)) { in reg_bounds_sanity_check()
2489 u64 uval = reg->var_off.value; in reg_bounds_sanity_check()
2492 if (reg->umin_value != uval || reg->umax_value != uval || in reg_bounds_sanity_check()
2493 reg->smin_value != sval || reg->smax_value != sval) { in reg_bounds_sanity_check()
2499 if (tnum_subreg_is_const(reg->var_off)) { in reg_bounds_sanity_check()
2500 u32 uval32 = tnum_subreg(reg->var_off).value; in reg_bounds_sanity_check()
2503 if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 || in reg_bounds_sanity_check()
2504 reg->s32_min_value != sval32 || reg->s32_max_value != sval32) { in reg_bounds_sanity_check()
2514 ctx, msg, reg->umin_value, reg->umax_value, in reg_bounds_sanity_check()
2515 reg->smin_value, reg->smax_value, in reg_bounds_sanity_check()
2516 reg->u32_min_value, reg->u32_max_value, in reg_bounds_sanity_check()
2517 reg->s32_min_value, reg->s32_max_value, in reg_bounds_sanity_check()
2518 reg->var_off.value, reg->var_off.mask); in reg_bounds_sanity_check()
2519 if (env->test_reg_invariants) in reg_bounds_sanity_check()
2520 return -EFAULT; in reg_bounds_sanity_check()
2532 reg->umin_value = reg->u32_min_value; in __reg_assign_32_into_64()
2533 reg->umax_value = reg->u32_max_value; in __reg_assign_32_into_64()
2535 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must in __reg_assign_32_into_64()
2539 if (__reg32_bound_s64(reg->s32_min_value) && in __reg_assign_32_into_64()
2540 __reg32_bound_s64(reg->s32_max_value)) { in __reg_assign_32_into_64()
2541 reg->smin_value = reg->s32_min_value; in __reg_assign_32_into_64()
2542 reg->smax_value = reg->s32_max_value; in __reg_assign_32_into_64()
2544 reg->smin_value = 0; in __reg_assign_32_into_64()
2545 reg->smax_value = U32_MAX; in __reg_assign_32_into_64()
2557 reg->type = SCALAR_VALUE; in __mark_reg_unknown_imprecise()
2558 reg->id = 0; in __mark_reg_unknown_imprecise()
2559 reg->ref_obj_id = 0; in __mark_reg_unknown_imprecise()
2560 reg->var_off = tnum_unknown; in __mark_reg_unknown_imprecise()
2561 reg->frameno = 0; in __mark_reg_unknown_imprecise()
2562 reg->precise = false; in __mark_reg_unknown_imprecise()
2573 reg->precise = !env->bpf_capable; in __mark_reg_unknown()
2597 reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min); in __mark_reg_s32_range()
2598 reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max); in __mark_reg_s32_range()
2600 reg->smin_value = max_t(s64, reg->smin_value, s32_min); in __mark_reg_s32_range()
2601 reg->smax_value = min_t(s64, reg->smax_value, s32_max); in __mark_reg_s32_range()
2612 reg->type = NOT_INIT; in __mark_reg_not_init()
2643 regs[regno].id = ++env->id_gen; in mark_btf_ld_reg()
2650 struct bpf_reg_state *regs = state->regs; in init_reg_state()
2663 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
2671 #define BPF_MAIN_FUNC (-1)
2676 state->callsite = callsite; in init_func_state()
2677 state->frameno = frameno; in init_func_state()
2678 state->subprogno = subprogno; in init_func_state()
2679 state->callback_ret_range = retval_range(0, 0); in init_func_state()
2696 elem->insn_idx = insn_idx; in push_async_cb()
2697 elem->prev_insn_idx = prev_insn_idx; in push_async_cb()
2698 elem->next = env->head; in push_async_cb()
2699 elem->log_pos = env->log.end_pos; in push_async_cb()
2700 env->head = elem; in push_async_cb()
2701 env->stack_size++; in push_async_cb()
2702 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_async_cb()
2705 env->stack_size); in push_async_cb()
2716 elem->st.branches = 1; in push_async_cb()
2717 elem->st.in_sleepable = is_sleepable; in push_async_cb()
2718 elem->st.insn_hist_start = env->cur_state->insn_hist_end; in push_async_cb()
2719 elem->st.insn_hist_end = elem->st.insn_hist_start; in push_async_cb()
2727 elem->st.frame[0] = frame; in push_async_cb()
2728 return &elem->st; in push_async_cb()
2730 free_verifier_state(env->cur_state, true); in push_async_cb()
2731 env->cur_state = NULL; in push_async_cb()
2746 return ((struct bpf_subprog_info *)a)->start - in cmp_subprogs()
2747 ((struct bpf_subprog_info *)b)->start; in cmp_subprogs()
2753 struct bpf_subprog_info *vals = env->subprog_info; in find_containing_subprog()
2756 if (off >= env->prog->len || off < 0 || env->subprog_cnt == 0) in find_containing_subprog()
2760 r = env->subprog_cnt - 1; in find_containing_subprog()
2762 m = l + (r - l + 1) / 2; in find_containing_subprog()
2766 r = m - 1; in find_containing_subprog()
2777 if (!p || p->start != off) in find_subprog()
2778 return -ENOENT; in find_subprog()
2779 return p - env->subprog_info; in find_subprog()
2784 int insn_cnt = env->prog->len; in add_subprog()
2789 return -EINVAL; in add_subprog()
2794 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { in add_subprog()
2796 return -E2BIG; in add_subprog()
2799 env->subprog_info[env->subprog_cnt++].start = off; in add_subprog()
2800 sort(env->subprog_info, env->subprog_cnt, in add_subprog()
2801 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); in add_subprog()
2802 return env->subprog_cnt - 1; in add_subprog()
2807 struct bpf_prog_aux *aux = env->prog->aux; in bpf_find_exception_callback_insn_off()
2808 struct btf *btf = aux->btf; in bpf_find_exception_callback_insn_off()
2814 /* Non-zero func_info_cnt implies valid btf */ in bpf_find_exception_callback_insn_off()
2815 if (!aux->func_info_cnt) in bpf_find_exception_callback_insn_off()
2817 main_btf_id = aux->func_info[0].type_id; in bpf_find_exception_callback_insn_off()
2822 return -EINVAL; in bpf_find_exception_callback_insn_off()
2825 name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:"); in bpf_find_exception_callback_insn_off()
2829 if (ret == -ENOENT) in bpf_find_exception_callback_insn_off()
2831 else if (ret == -EEXIST) in bpf_find_exception_callback_insn_off()
2845 return -EINVAL; in bpf_find_exception_callback_insn_off()
2848 for (i = 0; i < aux->func_info_cnt; i++) { in bpf_find_exception_callback_insn_off()
2849 if (aux->func_info[i].type_id != id) in bpf_find_exception_callback_insn_off()
2851 ret = aux->func_info[i].insn_off; in bpf_find_exception_callback_insn_off()
2857 ret = -EINVAL; in bpf_find_exception_callback_insn_off()
2862 ret = -EINVAL; in bpf_find_exception_callback_insn_off()
2905 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; in kfunc_desc_cmp_by_id_off()
2913 return d0->offset - d1->offset; in kfunc_btf_cmp_by_off()
2925 tab = prog->aux->kfunc_tab; in find_kfunc_desc()
2926 return bsearch(&desc, tab->descs, tab->nr_descs, in find_kfunc_desc()
2927 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); in find_kfunc_desc()
2937 return -EFAULT; in bpf_get_kfunc_addr()
2939 *func_addr = (u8 *)desc->addr; in bpf_get_kfunc_addr()
2953 tab = env->prog->aux->kfunc_btf_tab; in __find_kfunc_desc_btf()
2954 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, in __find_kfunc_desc_btf()
2955 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); in __find_kfunc_desc_btf()
2957 if (tab->nr_descs == MAX_KFUNC_BTFS) { in __find_kfunc_desc_btf()
2959 return ERR_PTR(-E2BIG); in __find_kfunc_desc_btf()
2962 if (bpfptr_is_null(env->fd_array)) { in __find_kfunc_desc_btf()
2964 return ERR_PTR(-EPROTO); in __find_kfunc_desc_btf()
2967 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, in __find_kfunc_desc_btf()
2970 return ERR_PTR(-EFAULT); in __find_kfunc_desc_btf()
2981 return ERR_PTR(-EINVAL); in __find_kfunc_desc_btf()
2987 return ERR_PTR(-ENXIO); in __find_kfunc_desc_btf()
2990 b = &tab->descs[tab->nr_descs++]; in __find_kfunc_desc_btf()
2991 b->btf = btf; in __find_kfunc_desc_btf()
2992 b->module = mod; in __find_kfunc_desc_btf()
2993 b->offset = offset; in __find_kfunc_desc_btf()
2998 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in __find_kfunc_desc_btf()
3001 btf = b->btf; in __find_kfunc_desc_btf()
3012 while (tab->nr_descs--) { in bpf_free_kfunc_btf_tab()
3013 module_put(tab->descs[tab->nr_descs].module); in bpf_free_kfunc_btf_tab()
3014 btf_put(tab->descs[tab->nr_descs].btf); in bpf_free_kfunc_btf_tab()
3027 return ERR_PTR(-EINVAL); in find_kfunc_desc_btf()
3032 return btf_vmlinux ?: ERR_PTR(-ENOENT); in find_kfunc_desc_btf()
3048 prog_aux = env->prog->aux; in add_kfunc_call()
3049 tab = prog_aux->kfunc_tab; in add_kfunc_call()
3050 btf_tab = prog_aux->kfunc_btf_tab; in add_kfunc_call()
3054 return -ENOTSUPP; in add_kfunc_call()
3057 if (!env->prog->jit_requested) { in add_kfunc_call()
3059 return -ENOTSUPP; in add_kfunc_call()
3064 return -ENOTSUPP; in add_kfunc_call()
3067 if (!env->prog->gpl_compatible) { in add_kfunc_call()
3068 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); in add_kfunc_call()
3069 return -EINVAL; in add_kfunc_call()
3074 return -ENOMEM; in add_kfunc_call()
3075 prog_aux->kfunc_tab = tab; in add_kfunc_call()
3090 return -ENOMEM; in add_kfunc_call()
3091 prog_aux->kfunc_btf_tab = btf_tab; in add_kfunc_call()
3100 if (find_kfunc_desc(env->prog, func_id, offset)) in add_kfunc_call()
3103 if (tab->nr_descs == MAX_KFUNC_DESCS) { in add_kfunc_call()
3105 return -E2BIG; in add_kfunc_call()
3112 return -EINVAL; in add_kfunc_call()
3114 func_proto = btf_type_by_id(desc_btf, func->type); in add_kfunc_call()
3118 return -EINVAL; in add_kfunc_call()
3121 func_name = btf_name_by_offset(desc_btf, func->name_off); in add_kfunc_call()
3126 return -EINVAL; in add_kfunc_call()
3134 /* Check whether the relative offset overflows desc->imm */ in add_kfunc_call()
3138 return -EINVAL; in add_kfunc_call()
3143 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); in add_kfunc_call()
3148 desc = &tab->descs[tab->nr_descs++]; in add_kfunc_call()
3149 desc->func_id = func_id; in add_kfunc_call()
3150 desc->imm = call_imm; in add_kfunc_call()
3151 desc->offset = offset; in add_kfunc_call()
3152 desc->addr = addr; in add_kfunc_call()
3153 err = btf_distill_func_proto(&env->log, desc_btf, in add_kfunc_call()
3155 &desc->func_model); in add_kfunc_call()
3157 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in add_kfunc_call()
3167 if (d0->imm != d1->imm) in kfunc_desc_cmp_by_imm_off()
3168 return d0->imm < d1->imm ? -1 : 1; in kfunc_desc_cmp_by_imm_off()
3169 if (d0->offset != d1->offset) in kfunc_desc_cmp_by_imm_off()
3170 return d0->offset < d1->offset ? -1 : 1; in kfunc_desc_cmp_by_imm_off()
3178 tab = prog->aux->kfunc_tab; in sort_kfunc_descs_by_imm_off()
3182 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in sort_kfunc_descs_by_imm_off()
3188 return !!prog->aux->kfunc_tab; in bpf_prog_has_kfunc_call()
3196 .imm = insn->imm, in bpf_jit_find_kfunc_model()
3197 .offset = insn->off, in bpf_jit_find_kfunc_model()
3202 tab = prog->aux->kfunc_tab; in bpf_jit_find_kfunc_model()
3203 res = bsearch(&desc, tab->descs, tab->nr_descs, in bpf_jit_find_kfunc_model()
3204 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); in bpf_jit_find_kfunc_model()
3206 return res ? &res->func_model : NULL; in bpf_jit_find_kfunc_model()
3211 struct bpf_subprog_info *subprog = env->subprog_info; in add_subprog_and_kfunc()
3212 int i, ret, insn_cnt = env->prog->len, ex_cb_insn; in add_subprog_and_kfunc()
3213 struct bpf_insn *insn = env->prog->insnsi; in add_subprog_and_kfunc()
3225 if (!env->bpf_capable) { in add_subprog_and_kfunc()
3227 return -EPERM; in add_subprog_and_kfunc()
3231 ret = add_subprog(env, i + insn->imm + 1); in add_subprog_and_kfunc()
3233 ret = add_kfunc_call(env, insn->imm, insn->off); in add_subprog_and_kfunc()
3251 for (i = 1; i < env->subprog_cnt; i++) { in add_subprog_and_kfunc()
3252 if (env->subprog_info[i].start != ex_cb_insn) in add_subprog_and_kfunc()
3254 env->exception_callback_subprog = i; in add_subprog_and_kfunc()
3263 subprog[env->subprog_cnt].start = insn_cnt; in add_subprog_and_kfunc()
3265 if (env->log.level & BPF_LOG_LEVEL2) in add_subprog_and_kfunc()
3266 for (i = 0; i < env->subprog_cnt; i++) in add_subprog_and_kfunc()
3275 struct bpf_subprog_info *subprog = env->subprog_info; in check_subprogs()
3276 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs()
3277 int insn_cnt = env->prog->len; in check_subprogs()
3304 return -EINVAL; in check_subprogs()
3307 if (i == subprog_end - 1) { in check_subprogs()
3308 /* to avoid fall-through from one subprog into another in check_subprogs()
3316 return -EINVAL; in check_subprogs()
3320 if (cur_subprog < env->subprog_cnt) in check_subprogs()
3328 * issues like callee-saved registers, stack slot allocation time, etc.
3334 bool writes = parent == state->parent; /* Observe write marks */ in mark_reg_read()
3339 if (writes && state->live & REG_LIVE_WRITTEN) in mark_reg_read()
3341 if (parent->live & REG_LIVE_DONE) { in mark_reg_read()
3343 reg_type_str(env, parent->type), in mark_reg_read()
3344 parent->var_off.value, parent->off); in mark_reg_read()
3345 return -EFAULT; in mark_reg_read()
3350 if ((parent->live & REG_LIVE_READ) == flag || in mark_reg_read()
3351 parent->live & REG_LIVE_READ64) in mark_reg_read()
3355 * keep re-marking all parents as LIVE_READ. in mark_reg_read()
3357 * multiple times without writes into it in-between. in mark_reg_read()
3363 parent->live |= flag; in mark_reg_read()
3366 parent->live &= ~REG_LIVE_READ32; in mark_reg_read()
3368 parent = state->parent; in mark_reg_read()
3373 if (env->longest_mark_read_walk < cnt) in mark_reg_read()
3374 env->longest_mark_read_walk = cnt; in mark_reg_read()
3385 struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr; in mark_stack_slot_obj_read()
3387 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64); in mark_stack_slot_obj_read()
3391 mark_stack_slot_scratched(env, spi - i); in mark_stack_slot_obj_read()
3404 if (reg->type == CONST_PTR_TO_DYNPTR) in mark_dynptr_read()
3432 /* This function is supposed to be used by the following 32-bit optimization
3434 * on 64-bit, otherwise return FALSE.
3441 code = insn->code; in is_reg64()
3456 if (insn->src_reg == BPF_PSEUDO_CALL) in is_reg64()
3468 if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32)) in is_reg64()
3472 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) in is_reg64()
3490 if (t == SRC_OP && reg->type != SCALAR_VALUE) in is_reg64()
3502 /* Both LD_IND and LD_ABS return 32-bit data. */ in is_reg64()
3522 /* Return the regno defined by the insn, or -1. */
3525 switch (BPF_CLASS(insn->code)) { in insn_def_regno()
3529 return -1; in insn_def_regno()
3531 if ((BPF_MODE(insn->code) == BPF_ATOMIC || in insn_def_regno()
3532 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) && in insn_def_regno()
3533 (insn->imm & BPF_FETCH)) { in insn_def_regno()
3534 if (insn->imm == BPF_CMPXCHG) in insn_def_regno()
3537 return insn->src_reg; in insn_def_regno()
3539 return -1; in insn_def_regno()
3542 return insn->dst_reg; in insn_def_regno()
3546 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
3551 if (dst_reg == -1) in insn_has_def32()
3560 s32 def_idx = reg->subreg_def; in mark_insn_zext()
3565 env->insn_aux_data[def_idx - 1].zext_dst = true; in mark_insn_zext()
3566 /* The dst will be zero extended, so won't be sub-register anymore. */ in mark_insn_zext()
3567 reg->subreg_def = DEF_NOT_SUBREG; in mark_insn_zext()
3573 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in __check_reg_arg()
3579 return -EINVAL; in __check_reg_arg()
3588 if (reg->type == NOT_INIT) { in __check_reg_arg()
3590 return -EACCES; in __check_reg_arg()
3592 /* We don't need to worry about FP liveness because it's read-only */ in __check_reg_arg()
3599 return mark_reg_read(env, reg, reg->parent, in __check_reg_arg()
3605 return -EACCES; in __check_reg_arg()
3607 reg->live |= REG_LIVE_WRITTEN; in __check_reg_arg()
3608 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; in __check_reg_arg()
3618 struct bpf_verifier_state *vstate = env->cur_state; in check_reg_arg()
3619 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_reg_arg()
3621 return __check_reg_arg(env, state->regs, regno, t); in check_reg_arg()
3641 env->insn_aux_data[idx].jmp_point = true; in mark_jmp_point()
3646 return env->insn_aux_data[insn_idx].jmp_point; in is_jmp_point()
3653 #define LR_FRAMENO_MASK ((1ull << LR_FRAMENO_BITS) - 1)
3654 #define LR_SPI_MASK ((1ull << LR_SPI_BITS) - 1)
3655 #define LR_SIZE_MASK ((1ull << LR_SIZE_BITS) - 1)
3676 if (s->cnt < LINKED_REGS_MAX) in linked_regs_push()
3677 return &s->entries[s->cnt++]; in linked_regs_push()
3682 /* Use u64 as a vector of 6 10-bit values, use first 4-bits to track
3685 * - 3-bits frameno
3686 * - 6-bits spi_or_reg
3687 * - 1-bit is_reg
3694 for (i = 0; i < s->cnt; ++i) { in linked_regs_pack()
3695 struct linked_reg *e = &s->entries[i]; in linked_regs_pack()
3698 tmp |= e->frameno; in linked_regs_pack()
3699 tmp |= e->spi << LR_SPI_OFF; in linked_regs_pack()
3700 tmp |= (e->is_reg ? 1 : 0) << LR_IS_REG_OFF; in linked_regs_pack()
3706 val |= s->cnt; in linked_regs_pack()
3714 s->cnt = val & LR_SIZE_MASK; in linked_regs_unpack()
3717 for (i = 0; i < s->cnt; ++i) { in linked_regs_unpack()
3718 struct linked_reg *e = &s->entries[i]; in linked_regs_unpack()
3720 e->frameno = val & LR_FRAMENO_MASK; in linked_regs_unpack()
3721 e->spi = (val >> LR_SPI_OFF) & LR_SPI_MASK; in linked_regs_unpack()
3722 e->is_reg = (val >> LR_IS_REG_OFF) & 0x1; in linked_regs_unpack()
3735 if (env->cur_hist_ent) { in push_insn_history()
3739 WARN_ONCE((env->cur_hist_ent->flags & insn_flags) && in push_insn_history()
3740 (env->cur_hist_ent->flags & insn_flags) != insn_flags, in push_insn_history()
3742 env->insn_idx, env->cur_hist_ent->flags, insn_flags); in push_insn_history()
3743 env->cur_hist_ent->flags |= insn_flags; in push_insn_history()
3744 WARN_ONCE(env->cur_hist_ent->linked_regs != 0, in push_insn_history()
3746 env->insn_idx, env->cur_hist_ent->linked_regs); in push_insn_history()
3747 env->cur_hist_ent->linked_regs = linked_regs; in push_insn_history()
3751 if (cur->insn_hist_end + 1 > env->insn_hist_cap) { in push_insn_history()
3752 alloc_size = size_mul(cur->insn_hist_end + 1, sizeof(*p)); in push_insn_history()
3753 p = kvrealloc(env->insn_hist, alloc_size, GFP_USER); in push_insn_history()
3755 return -ENOMEM; in push_insn_history()
3756 env->insn_hist = p; in push_insn_history()
3757 env->insn_hist_cap = alloc_size / sizeof(*p); in push_insn_history()
3760 p = &env->insn_hist[cur->insn_hist_end]; in push_insn_history()
3761 p->idx = env->insn_idx; in push_insn_history()
3762 p->prev_idx = env->prev_insn_idx; in push_insn_history()
3763 p->flags = insn_flags; in push_insn_history()
3764 p->linked_regs = linked_regs; in push_insn_history()
3766 cur->insn_hist_end++; in push_insn_history()
3767 env->cur_hist_ent = p; in push_insn_history()
3775 if (hist_end > hist_start && env->insn_hist[hist_end - 1].idx == insn_idx) in get_insn_hist_entry()
3776 return &env->insn_hist[hist_end - 1]; in get_insn_hist_entry()
3782 * Return -ENOENT if we exhausted all instructions within given state.
3785 * insn index within the same state, e.g.: 3->4->5->3, so just because current
3798 u32 cnt = hist_end - hist_start; in get_prev_insn_idx()
3800 if (insn_idx == st->first_insn_idx) { in get_prev_insn_idx()
3802 return -ENOENT; in get_prev_insn_idx()
3803 if (cnt == 1 && env->insn_hist[hist_start].idx == insn_idx) in get_prev_insn_idx()
3804 return -ENOENT; in get_prev_insn_idx()
3807 if (cnt && env->insn_hist[hist_end - 1].idx == insn_idx) { in get_prev_insn_idx()
3808 (*hist_endp)--; in get_prev_insn_idx()
3809 return env->insn_hist[hist_end - 1].prev_idx; in get_prev_insn_idx()
3811 return insn_idx - 1; in get_prev_insn_idx()
3820 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) in disasm_kfunc_name()
3823 desc_btf = find_kfunc_desc_btf(data, insn->off); in disasm_kfunc_name()
3827 func = btf_type_by_id(desc_btf, insn->imm); in disasm_kfunc_name()
3828 return btf_name_by_offset(desc_btf, func->name_off); in disasm_kfunc_name()
3833 bt->frame = frame; in bt_init()
3838 struct bpf_verifier_env *env = bt->env; in bt_reset()
3841 bt->env = env; in bt_reset()
3849 for (i = 0; i <= bt->frame; i++) in bt_empty()
3850 mask |= bt->reg_masks[i] | bt->stack_masks[i]; in bt_empty()
3857 if (bt->frame == MAX_CALL_FRAMES - 1) { in bt_subprog_enter()
3858 verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); in bt_subprog_enter()
3860 return -EFAULT; in bt_subprog_enter()
3862 bt->frame++; in bt_subprog_enter()
3868 if (bt->frame == 0) { in bt_subprog_exit()
3869 verbose(bt->env, "BUG subprog exit from frame 0\n"); in bt_subprog_exit()
3871 return -EFAULT; in bt_subprog_exit()
3873 bt->frame--; in bt_subprog_exit()
3879 bt->reg_masks[frame] |= 1 << reg; in bt_set_frame_reg()
3884 bt->reg_masks[frame] &= ~(1 << reg); in bt_clear_frame_reg()
3889 bt_set_frame_reg(bt, bt->frame, reg); in bt_set_reg()
3894 bt_clear_frame_reg(bt, bt->frame, reg); in bt_clear_reg()
3899 bt->stack_masks[frame] |= 1ull << slot; in bt_set_frame_slot()
3904 bt->stack_masks[frame] &= ~(1ull << slot); in bt_clear_frame_slot()
3909 return bt->reg_masks[frame]; in bt_frame_reg_mask()
3914 return bt->reg_masks[bt->frame]; in bt_reg_mask()
3919 return bt->stack_masks[frame]; in bt_frame_stack_mask()
3924 return bt->stack_masks[bt->frame]; in bt_stack_mask()
3929 return bt->reg_masks[bt->frame] & (1 << reg); in bt_is_reg_set()
3934 return bt->reg_masks[frame] & (1 << reg); in bt_is_frame_reg_set()
3939 return bt->stack_masks[frame] & (1ull << slot); in bt_is_frame_slot_set()
3956 buf_sz -= n; in fmt_reg_mask()
3961 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
3972 n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8); in fmt_stack_mask()
3975 buf_sz -= n; in fmt_stack_mask()
3981 /* If any register R in hist->linked_regs is marked as precise in bt,
3982 * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs.
3990 if (!hist || hist->linked_regs == 0) in bt_sync_linked_regs()
3993 linked_regs_unpack(hist->linked_regs, &linked_regs); in bt_sync_linked_regs()
3997 if ((e->is_reg && bt_is_frame_reg_set(bt, e->frameno, e->regno)) || in bt_sync_linked_regs()
3998 (!e->is_reg && bt_is_frame_slot_set(bt, e->frameno, e->spi))) { in bt_sync_linked_regs()
4010 if (e->is_reg) in bt_sync_linked_regs()
4011 bt_set_frame_reg(bt, e->frameno, e->regno); in bt_sync_linked_regs()
4013 bt_set_frame_slot(bt, e->frameno, e->spi); in bt_sync_linked_regs()
4025 * - *would be* executed next, if jump history is viewed in forward order;
4026 * - *was* processed previously during backtracking.
4036 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn()
4037 u8 class = BPF_CLASS(insn->code); in backtrack_insn()
4038 u8 opcode = BPF_OP(insn->code); in backtrack_insn()
4039 u8 mode = BPF_MODE(insn->code); in backtrack_insn()
4040 u32 dreg = insn->dst_reg; in backtrack_insn()
4041 u32 sreg = insn->src_reg; in backtrack_insn()
4044 if (insn->code == 0) in backtrack_insn()
4046 if (env->log.level & BPF_LOG_LEVEL2) { in backtrack_insn()
4047 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); in backtrack_insn()
4049 bt->frame, env->tmp_str_buf); in backtrack_insn()
4050 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); in backtrack_insn()
4051 verbose(env, "stack=%s before ", env->tmp_str_buf); in backtrack_insn()
4053 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in backtrack_insn()
4071 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
4089 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
4111 if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) in backtrack_insn()
4113 /* dreg = *(u64 *)[fp - off] was a fill from the stack. in backtrack_insn()
4114 * that [fp - off] slot contains scalar that needs to be in backtrack_insn()
4117 spi = insn_stack_access_spi(hist->flags); in backtrack_insn()
4118 fr = insn_stack_access_frameno(hist->flags); in backtrack_insn()
4126 return -ENOTSUPP; in backtrack_insn()
4128 if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) in backtrack_insn()
4130 spi = insn_stack_access_spi(hist->flags); in backtrack_insn()
4131 fr = insn_stack_access_frameno(hist->flags); in backtrack_insn()
4141 subprog_insn_idx = idx + insn->imm + 1; in backtrack_insn()
4144 return -EFAULT; in backtrack_insn()
4154 /* r1-r5 are invalidated after subprog call, in backtrack_insn()
4161 return -EFAULT; in backtrack_insn()
4169 * so only r1-r5 could be still requested as in backtrack_insn()
4170 * precise, r0 and r6-r10 or any stack slot in in backtrack_insn()
4176 return -EFAULT; in backtrack_insn()
4184 return -EFAULT; in backtrack_insn()
4186 /* propagate r1-r5 to the caller */ in backtrack_insn()
4190 bt_set_frame_reg(bt, bt->frame - 1, i); in backtrack_insn()
4194 return -EFAULT; in backtrack_insn()
4197 } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) { in backtrack_insn()
4198 /* exit from callback subprog to callback-calling helper or in backtrack_insn()
4202 * propagate precision of r1-r5 (if any requested), as they are in backtrack_insn()
4208 return -EFAULT; in backtrack_insn()
4213 return -EFAULT; in backtrack_insn()
4215 /* clear r1-r5 in callback subprog's mask */ in backtrack_insn()
4219 return -EFAULT; in backtrack_insn()
4226 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) in backtrack_insn()
4227 return -ENOTSUPP; in backtrack_insn()
4231 /* if backtracing was looking for registers R1-R5 in backtrack_insn()
4236 return -EFAULT; in backtrack_insn()
4244 * precision to registers R1-R5 should have been found already. in backtrack_insn()
4245 * In case of a callback, it is ok to have R1-R5 marked for in backtrack_insn()
4255 return -EFAULT; in backtrack_insn()
4260 * whether the instruction at subseq_idx-1 is subprog in backtrack_insn()
4266 r0_precise = subseq_idx - 1 >= 0 && in backtrack_insn()
4267 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && in backtrack_insn()
4272 return -EFAULT; in backtrack_insn()
4276 /* r6-r9 and stack slots will stay set in caller frame in backtrack_insn()
4280 } else if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
4291 } else if (BPF_SRC(insn->code) == BPF_K) { in backtrack_insn()
4294 * this insn, so for the K-based conditional in backtrack_insn()
4308 return -ENOTSUPP; in backtrack_insn()
4345 * r9 -= r8
4376 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4378 st->curframe); in mark_all_scalars_precise()
4384 * because precision markings in current non-checkpointed state are in mark_all_scalars_precise()
4387 for (st = st->parent; st; st = st->parent) { in mark_all_scalars_precise()
4388 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_precise()
4389 func = st->frame[i]; in mark_all_scalars_precise()
4391 reg = &func->regs[j]; in mark_all_scalars_precise()
4392 if (reg->type != SCALAR_VALUE || reg->precise) in mark_all_scalars_precise()
4394 reg->precise = true; in mark_all_scalars_precise()
4395 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4400 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
4401 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_precise()
4403 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_precise()
4404 if (reg->type != SCALAR_VALUE || reg->precise) in mark_all_scalars_precise()
4406 reg->precise = true; in mark_all_scalars_precise()
4407 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4409 i, -(j + 1) * 8); in mark_all_scalars_precise()
4422 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_imprecise()
4423 func = st->frame[i]; in mark_all_scalars_imprecise()
4425 reg = &func->regs[j]; in mark_all_scalars_imprecise()
4426 if (reg->type != SCALAR_VALUE) in mark_all_scalars_imprecise()
4428 reg->precise = false; in mark_all_scalars_imprecise()
4430 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_imprecise()
4431 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_imprecise()
4433 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_imprecise()
4434 if (reg->type != SCALAR_VALUE) in mark_all_scalars_imprecise()
4436 reg->precise = false; in mark_all_scalars_imprecise()
4458 * i.e., it is not yet put into env->explored_states, and it has no children
4461 * reached or b) checkpointed and put into env->explored_states, branching out
4530 struct backtrack_state *bt = &env->bt; in __mark_chain_precision()
4531 struct bpf_verifier_state *st = env->cur_state; in __mark_chain_precision()
4532 int first_idx = st->first_insn_idx; in __mark_chain_precision()
4533 int last_idx = env->insn_idx; in __mark_chain_precision()
4534 int subseq_idx = -1; in __mark_chain_precision()
4540 if (!env->bpf_capable) in __mark_chain_precision()
4544 bt_init(bt, env->cur_state->curframe); in __mark_chain_precision()
4550 func = st->frame[bt->frame]; in __mark_chain_precision()
4552 reg = &func->regs[regno]; in __mark_chain_precision()
4553 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
4555 return -EFAULT; in __mark_chain_precision()
4565 u32 hist_start = st->insn_hist_start; in __mark_chain_precision()
4566 u32 hist_end = st->insn_hist_end; in __mark_chain_precision()
4569 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4571 bt->frame, last_idx, first_idx, subseq_idx); in __mark_chain_precision()
4577 * requested precise registers are R1-R5 in __mark_chain_precision()
4580 if (st->curframe == 0 && in __mark_chain_precision()
4581 st->frame[0]->subprogno > 0 && in __mark_chain_precision()
4582 st->frame[0]->callsite == BPF_MAIN_FUNC && in __mark_chain_precision()
4587 reg = &st->frame[0]->regs[i]; in __mark_chain_precision()
4589 if (reg->type == SCALAR_VALUE) in __mark_chain_precision()
4590 reg->precise = true; in __mark_chain_precision()
4596 st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); in __mark_chain_precision()
4598 return -EFAULT; in __mark_chain_precision()
4609 if (err == -ENOTSUPP) { in __mark_chain_precision()
4610 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4624 if (i == -ENOENT) in __mark_chain_precision()
4626 if (i >= env->prog->len) { in __mark_chain_precision()
4635 return -EFAULT; in __mark_chain_precision()
4638 st = st->parent; in __mark_chain_precision()
4642 for (fr = bt->frame; fr >= 0; fr--) { in __mark_chain_precision()
4643 func = st->frame[fr]; in __mark_chain_precision()
4646 reg = &func->regs[i]; in __mark_chain_precision()
4647 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
4651 if (reg->precise) in __mark_chain_precision()
4654 reg->precise = true; in __mark_chain_precision()
4659 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
4661 i, func->allocated_stack / BPF_REG_SIZE); in __mark_chain_precision()
4663 return -EFAULT; in __mark_chain_precision()
4666 if (!is_spilled_scalar_reg(&func->stack[i])) { in __mark_chain_precision()
4670 reg = &func->stack[i].spilled_ptr; in __mark_chain_precision()
4671 if (reg->precise) in __mark_chain_precision()
4674 reg->precise = true; in __mark_chain_precision()
4676 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4677 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4680 fr, env->tmp_str_buf); in __mark_chain_precision()
4681 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4683 verbose(env, "stack=%s: ", env->tmp_str_buf); in __mark_chain_precision()
4692 last_idx = st->last_insn_idx; in __mark_chain_precision()
4693 first_idx = st->first_insn_idx; in __mark_chain_precision()
4697 * something (e.g., stack access through non-r10 register), so in __mark_chain_precision()
4701 mark_all_scalars_precise(env, env->cur_state); in __mark_chain_precision()
4713 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to
4718 return __mark_chain_precision(env, -1); in mark_chain_precision_batch()
4751 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); in register_is_null()
4757 return reg->type == SCALAR_VALUE && in is_reg_const()
4758 tnum_is_const(subreg32 ? tnum_subreg(reg->var_off) : reg->var_off); in is_reg_const()
4764 return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value; in reg_const_value()
4773 return reg->type != SCALAR_VALUE; in __is_pointer_value()
4779 if (src_reg->type != SCALAR_VALUE) in assign_scalar_id_before_mov()
4782 if (src_reg->id & BPF_ADD_CONST) { in assign_scalar_id_before_mov()
4785 * rY->id has special linked register already. in assign_scalar_id_before_mov()
4788 src_reg->id = 0; in assign_scalar_id_before_mov()
4789 src_reg->off = 0; in assign_scalar_id_before_mov()
4792 if (!src_reg->id && !tnum_is_const(src_reg->var_off)) in assign_scalar_id_before_mov()
4797 src_reg->id = ++env->id_gen; in assign_scalar_id_before_mov()
4800 /* Copy src state preserving dst->parent and dst->live fields */
4803 struct bpf_reg_state *parent = dst->parent; in copy_register_state()
4804 enum bpf_reg_liveness live = dst->live; in copy_register_state()
4807 dst->parent = parent; in copy_register_state()
4808 dst->live = live; in copy_register_state()
4818 copy_register_state(&state->stack[spi].spilled_ptr, reg); in save_register_state()
4820 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in save_register_state()
4822 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
4823 state->stack[spi].slot_type[i - 1] = STACK_SPILL; in save_register_state()
4826 for (; i; i--) in save_register_state()
4827 mark_stack_slot_misc(env, &state->stack[spi].slot_type[i - 1]); in save_register_state()
4832 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; in is_bpf_st_mem()
4837 return fls64(reg->umax_value); in get_reg_width()
4844 struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno]; in check_fastcall_stack_contract()
4845 struct bpf_insn_aux_data *aux = env->insn_aux_data; in check_fastcall_stack_contract()
4848 if (subprog->fastcall_stack_off <= off || aux[insn_idx].fastcall_pattern) in check_fastcall_stack_contract()
4855 subprog->fastcall_stack_off = S16_MIN; in check_fastcall_stack_contract()
4859 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in check_fastcall_stack_contract()
4875 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
4876 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_fixed_off()
4878 int insn_flags = insn_stack_access_flags(state->frameno, spi); in check_stack_write_fixed_off()
4880 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, in check_stack_write_fixed_off()
4883 if (!env->allow_ptr_leaks && in check_stack_write_fixed_off()
4884 is_spilled_reg(&state->stack[spi]) && in check_stack_write_fixed_off()
4885 !is_spilled_scalar_reg(&state->stack[spi]) && in check_stack_write_fixed_off()
4888 return -EACCES; in check_stack_write_fixed_off()
4891 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_fixed_off()
4893 reg = &cur->regs[value_regno]; in check_stack_write_fixed_off()
4894 if (!env->bypass_spec_v4) { in check_stack_write_fixed_off()
4895 bool sanitize = reg && is_spillable_regtype(reg->type); in check_stack_write_fixed_off()
4898 u8 type = state->stack[spi].slot_type[i]; in check_stack_write_fixed_off()
4907 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; in check_stack_write_fixed_off()
4916 if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) { in check_stack_write_fixed_off()
4926 state->stack[spi].spilled_ptr.id = 0; in check_stack_write_fixed_off()
4928 env->bpf_capable) { in check_stack_write_fixed_off()
4929 struct bpf_reg_state *tmp_reg = &env->fake_reg[0]; in check_stack_write_fixed_off()
4932 __mark_reg_known(tmp_reg, insn->imm); in check_stack_write_fixed_off()
4933 tmp_reg->type = SCALAR_VALUE; in check_stack_write_fixed_off()
4935 } else if (reg && is_spillable_regtype(reg->type)) { in check_stack_write_fixed_off()
4940 return -EACCES; in check_stack_write_fixed_off()
4942 if (state != cur && reg->type == PTR_TO_STACK) { in check_stack_write_fixed_off()
4944 return -EINVAL; in check_stack_write_fixed_off()
4951 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_fixed_off()
4953 if (is_stack_slot_special(&state->stack[spi])) in check_stack_write_fixed_off()
4955 scrub_spilled_slot(&state->stack[spi].slot_type[i]); in check_stack_write_fixed_off()
4966 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; in check_stack_write_fixed_off()
4970 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { in check_stack_write_fixed_off()
4985 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; in check_stack_write_fixed_off()
4990 return push_insn_history(env, env->cur_state, insn_flags, 0); in check_stack_write_fixed_off()
5000 * 'off' includes 'regno->off'.
5001 * 'value_regno' can be -1, meaning that an unknown value is being written to
5023 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_var_off()
5030 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_var_off()
5031 ptr_reg = &cur->regs[ptr_regno]; in check_stack_write_var_off()
5032 min_off = ptr_reg->smin_value + off; in check_stack_write_var_off()
5033 max_off = ptr_reg->smax_value + off + size; in check_stack_write_var_off()
5035 value_reg = &cur->regs[value_regno]; in check_stack_write_var_off()
5037 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0)) in check_stack_write_var_off()
5055 slot = -i - 1; in check_stack_write_var_off()
5057 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
5060 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { in check_stack_write_var_off()
5072 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", in check_stack_write_var_off()
5074 return -EINVAL; in check_stack_write_var_off()
5081 is_spilled_scalar_reg(&state->stack[spi])) { in check_stack_write_var_off()
5082 struct bpf_reg_state *spill_reg = &state->stack[spi].spilled_ptr; in check_stack_write_var_off()
5084 if (tnum_is_const(spill_reg->var_off) && spill_reg->var_off.value == 0) { in check_stack_write_var_off()
5091 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_var_off()
5107 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { in check_stack_write_var_off()
5108 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", in check_stack_write_var_off()
5110 return -EINVAL; in check_stack_write_var_off()
5136 struct bpf_verifier_state *vstate = env->cur_state; in mark_reg_stack_read()
5137 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_reg_stack_read()
5143 slot = -i - 1; in mark_reg_stack_read()
5146 stype = ptr_state->stack[spi].slot_type; in mark_reg_stack_read()
5151 if (zeros == max_off - min_off) { in mark_reg_stack_read()
5155 __mark_reg_const_zero(env, &state->regs[dst_regno]); in mark_reg_stack_read()
5158 mark_reg_unknown(env, state->regs, dst_regno); in mark_reg_stack_read()
5160 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in mark_reg_stack_read()
5167 * 'dst_regno' can be -1, meaning that the read value is not going to a
5177 struct bpf_verifier_state *vstate = env->cur_state; in check_stack_read_fixed_off()
5178 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_stack_read_fixed_off()
5179 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
5182 int insn_flags = insn_stack_access_flags(reg_state->frameno, spi); in check_stack_read_fixed_off()
5184 stype = reg_state->stack[spi].slot_type; in check_stack_read_fixed_off()
5185 reg = &reg_state->stack[spi].spilled_ptr; in check_stack_read_fixed_off()
5188 check_fastcall_stack_contract(env, state, env->insn_idx, off); in check_stack_read_fixed_off()
5190 if (is_spilled_reg(&reg_state->stack[spi])) { in check_stack_read_fixed_off()
5193 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
5197 if (reg->type != SCALAR_VALUE) { in check_stack_read_fixed_off()
5198 verbose_linfo(env, env->insn_idx, "; "); in check_stack_read_fixed_off()
5200 return -EACCES; in check_stack_read_fixed_off()
5203 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
5212 s32 subreg_def = state->regs[dst_regno].subreg_def; in check_stack_read_fixed_off()
5214 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
5215 state->regs[dst_regno].subreg_def = subreg_def; in check_stack_read_fixed_off()
5221 state->regs[dst_regno].id = 0; in check_stack_read_fixed_off()
5226 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
5237 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
5241 return -EACCES; in check_stack_read_fixed_off()
5245 tnum_is_const(reg->var_off) && reg->var_off.value == 0) { in check_stack_read_fixed_off()
5246 __mark_reg_const_zero(env, &state->regs[dst_regno]); in check_stack_read_fixed_off()
5250 __mark_reg_const_zero(env, &state->regs[dst_regno]); in check_stack_read_fixed_off()
5253 mark_reg_unknown(env, state->regs, dst_regno); in check_stack_read_fixed_off()
5257 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
5260 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
5265 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; in check_stack_read_fixed_off()
5266 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { in check_stack_read_fixed_off()
5267 /* If dst_regno==-1, the caller is asking us whether in check_stack_read_fixed_off()
5275 return -EACCES; in check_stack_read_fixed_off()
5277 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
5280 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
5285 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
5289 return -EACCES; in check_stack_read_fixed_off()
5291 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in check_stack_read_fixed_off()
5297 return push_insn_history(env, env->cur_state, insn_flags, 0); in check_stack_read_fixed_off()
5346 min_off = reg->smin_value + off; in check_stack_read_var_off()
5347 max_off = reg->smax_value + off; in check_stack_read_var_off()
5349 check_fastcall_stack_contract(env, ptr_state, env->insn_idx, min_off); in check_stack_read_var_off()
5360 * can be -1, meaning that the read value is not going to a register.
5370 bool var_off = !tnum_is_const(reg->var_off); in check_stack_read()
5379 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_read()
5382 return -EACCES; in check_stack_read()
5394 off += reg->var_off.value; in check_stack_read()
5413 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
5415 * be -1, meaning that we're not writing from a register.
5427 if (tnum_is_const(reg->var_off)) { in check_stack_write()
5428 off += reg->var_off.value; in check_stack_write()
5451 map->value_size, off, size); in check_map_access_type()
5452 return -EACCES; in check_map_access_type()
5457 map->value_size, off, size); in check_map_access_type()
5458 return -EACCES; in check_map_access_type()
5476 switch (reg->type) { in __check_mem_access()
5489 off, size, regno, reg->id, off, mem_size); in __check_mem_access()
5497 return -EACCES; in __check_mem_access()
5505 struct bpf_verifier_state *vstate = env->cur_state; in check_mem_region_access()
5506 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_mem_region_access()
5507 struct bpf_reg_state *reg = &state->regs[regno]; in check_mem_region_access()
5520 if (reg->smin_value < 0 && in check_mem_region_access()
5521 (reg->smin_value == S64_MIN || in check_mem_region_access()
5522 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || in check_mem_region_access()
5523 reg->smin_value + off < 0)) { in check_mem_region_access()
5526 return -EACCES; in check_mem_region_access()
5528 err = __check_mem_access(env, regno, reg->smin_value + off, size, in check_mem_region_access()
5538 * If reg->umax_value + off could overflow, treat that as unbounded too. in check_mem_region_access()
5540 if (reg->umax_value >= BPF_MAX_VAR_OFF) { in check_mem_region_access()
5543 return -EACCES; in check_mem_region_access()
5545 err = __check_mem_access(env, regno, reg->umax_value + off, size, in check_mem_region_access()
5560 /* Access to this pointer-typed register or passing it to a helper in __check_ptr_off_reg()
5564 if (reg->off < 0) { in __check_ptr_off_reg()
5566 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5567 return -EACCES; in __check_ptr_off_reg()
5570 if (!fixed_off_ok && reg->off) { in __check_ptr_off_reg()
5572 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5573 return -EACCES; in __check_ptr_off_reg()
5576 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_ptr_off_reg()
5579 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_ptr_off_reg()
5581 reg_type_str(env, reg->type), tn_buf); in __check_ptr_off_reg()
5582 return -EACCES; in __check_ptr_off_reg()
5598 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); in map_kptr_match_type()
5602 if (btf_is_kernel(reg->btf)) { in map_kptr_match_type()
5606 if (kptr_field->type == BPF_KPTR_UNREF) in map_kptr_match_type()
5610 if (kptr_field->type == BPF_KPTR_PERCPU) in map_kptr_match_type()
5614 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) in map_kptr_match_type()
5617 /* We need to verify reg->type and reg->btf, before accessing reg->btf */ in map_kptr_match_type()
5618 reg_name = btf_type_name(reg->btf, reg->btf_id); in map_kptr_match_type()
5624 * reg->off and reg->ref_obj_id are not needed here. in map_kptr_match_type()
5627 return -EACCES; in map_kptr_match_type()
5630 * we also need to take into account the reg->off. in map_kptr_match_type()
5641 * val->foo = v; // reg->off is zero, btf and btf_id match type in map_kptr_match_type()
5642 * val->bar = &v->br; // reg->off is still zero, but we need to retry with in map_kptr_match_type()
5644 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked in map_kptr_match_type()
5647 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off in map_kptr_match_type()
5653 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in map_kptr_match_type()
5654 kptr_field->kptr.btf, kptr_field->kptr.btf_id, in map_kptr_match_type()
5655 kptr_field->type != BPF_KPTR_UNREF)) in map_kptr_match_type()
5660 reg_type_str(env, reg->type), reg_name); in map_kptr_match_type()
5662 if (kptr_field->type == BPF_KPTR_UNREF) in map_kptr_match_type()
5667 return -EINVAL; in map_kptr_match_type()
5672 return env->prog->sleepable || in in_sleepable()
5673 (env->cur_state && env->cur_state->in_sleepable); in in_sleepable()
5676 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
5681 return env->cur_state->active_rcu_lock || in in_rcu_cs()
5682 env->cur_state->active_locks || in in_rcu_cs()
5714 if (btf_is_kernel(kptr_field->kptr.btf)) in kptr_pointee_btf_record()
5717 meta = btf_find_struct_meta(kptr_field->kptr.btf, in kptr_pointee_btf_record()
5718 kptr_field->kptr.btf_id); in kptr_pointee_btf_record()
5720 return meta ? meta->record : NULL; in kptr_pointee_btf_record()
5725 const struct btf_field_kptr *kptr = &field->kptr; in rcu_safe_kptr()
5727 return field->type == BPF_KPTR_PERCPU || in rcu_safe_kptr()
5728 (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id)); in rcu_safe_kptr()
5739 if (kptr_field->type == BPF_KPTR_PERCPU) in btf_ld_kptr_type()
5741 else if (!btf_is_kernel(kptr_field->kptr.btf)) in btf_ld_kptr_type()
5760 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); in mark_uptr_ld_reg()
5763 reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; in mark_uptr_ld_reg()
5764 reg->mem_size = t->size; in mark_uptr_ld_reg()
5765 reg->id = ++env->id_gen; in mark_uptr_ld_reg()
5774 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_map_kptr_access()
5775 int class = BPF_CLASS(insn->code); in check_map_kptr_access()
5779 * - Reject cases where variable offset may touch kptr in check_map_kptr_access()
5780 * - size of access (must be BPF_DW) in check_map_kptr_access()
5781 * - tnum_is_const(reg->var_off) in check_map_kptr_access()
5782 * - kptr_field->offset == off + reg->var_off.value in check_map_kptr_access()
5785 if (BPF_MODE(insn->code) != BPF_MEM) { in check_map_kptr_access()
5787 return -EACCES; in check_map_kptr_access()
5794 (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) { in check_map_kptr_access()
5796 return -EACCES; in check_map_kptr_access()
5798 if (class != BPF_LDX && kptr_field->type == BPF_UPTR) { in check_map_kptr_access()
5800 return -EACCES; in check_map_kptr_access()
5804 if (kptr_field->type == BPF_UPTR) in check_map_kptr_access()
5810 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, in check_map_kptr_access()
5811 kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field)); in check_map_kptr_access()
5816 return -EACCES; in check_map_kptr_access()
5818 if (insn->imm) { in check_map_kptr_access()
5820 kptr_field->offset); in check_map_kptr_access()
5821 return -EACCES; in check_map_kptr_access()
5825 return -EACCES; in check_map_kptr_access()
5835 struct bpf_verifier_state *vstate = env->cur_state; in check_map_access()
5836 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_map_access()
5837 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access()
5838 struct bpf_map *map = reg->map_ptr; in check_map_access()
5842 err = check_mem_region_access(env, regno, off, size, map->value_size, in check_map_access()
5847 if (IS_ERR_OR_NULL(map->record)) in check_map_access()
5849 rec = map->record; in check_map_access()
5850 for (i = 0; i < rec->cnt; i++) { in check_map_access()
5851 struct btf_field *field = &rec->fields[i]; in check_map_access()
5852 u32 p = field->offset; in check_map_access()
5858 if (reg->smin_value + off < p + field->size && in check_map_access()
5859 p < reg->umax_value + off + size) { in check_map_access()
5860 switch (field->type) { in check_map_access()
5867 btf_field_type_name(field->type)); in check_map_access()
5868 return -EACCES; in check_map_access()
5870 if (!tnum_is_const(reg->var_off)) { in check_map_access()
5872 btf_field_type_name(field->type)); in check_map_access()
5873 return -EACCES; in check_map_access()
5875 if (p != off + reg->var_off.value) { in check_map_access()
5877 btf_field_type_name(field->type), in check_map_access()
5878 p, off + reg->var_off.value); in check_map_access()
5879 return -EACCES; in check_map_access()
5883 btf_field_type_name(field->type)); in check_map_access()
5884 return -EACCES; in check_map_access()
5889 btf_field_type_name(field->type)); in check_map_access()
5890 return -EACCES; in check_map_access()
5903 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in may_access_direct_pkt_data()
5925 return meta->pkt_access; in may_access_direct_pkt_data()
5927 env->seen_direct_write = true; in may_access_direct_pkt_data()
5932 env->seen_direct_write = true; in may_access_direct_pkt_data()
5949 * reg->range we have comes after that. We are only checking the fixed in check_packet_access()
5956 if (reg->smin_value < 0) { in check_packet_access()
5959 return -EACCES; in check_packet_access()
5962 err = reg->range < 0 ? -EINVAL : in check_packet_access()
5963 __check_mem_access(env, regno, off, size, reg->range, in check_packet_access()
5970 /* __check_mem_access has made sure "off + size - 1" is within u16. in check_packet_access()
5971 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, in check_packet_access()
5974 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. in check_packet_access()
5976 env->prog->aux->max_pkt_offset = in check_packet_access()
5977 max_t(u32, env->prog->aux->max_pkt_offset, in check_packet_access()
5978 off + reg->umax_value + size - 1); in check_packet_access()
5990 .log = &env->log, in check_ctx_access()
5995 if (env->ops->is_valid_access && in check_ctx_access()
5996 env->ops->is_valid_access(off, size, t, env->prog, &info)) { in check_ctx_access()
6011 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; in check_ctx_access()
6014 if (env->prog->aux->max_ctx_offset < off + size) in check_ctx_access()
6015 env->prog->aux->max_ctx_offset = off + size; in check_ctx_access()
6020 return -EACCES; in check_ctx_access()
6030 return -EACCES; in check_flow_keys_access()
6044 if (reg->smin_value < 0) { in check_sock_access()
6047 return -EACCES; in check_sock_access()
6050 switch (reg->type) { in check_sock_access()
6069 env->insn_aux_data[insn_idx].ctx_field_size = in check_sock_access()
6075 regno, reg_type_str(env, reg->type), off, size); in check_sock_access()
6077 return -EACCES; in check_sock_access()
6082 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); in is_pointer_value()
6089 return reg->type == PTR_TO_CTX; in is_ctx_reg()
6096 return type_is_sk_pointer(reg->type); in is_sk_reg()
6103 return type_is_pkt_pointer(reg->type); in is_pkt_reg()
6111 return reg->type == PTR_TO_FLOW_KEYS; in is_flow_key_reg()
6118 return reg->type == PTR_TO_ARENA; in is_arena_reg()
6133 if (reg->ref_obj_id) in is_trusted_reg()
6137 if (reg2btf_ids[base_type(reg->type)] && in is_trusted_reg()
6138 !bpf_type_has_unsafe_modifiers(reg->type)) in is_trusted_reg()
6143 * other type modifiers may be safe, but we elect to take an opt-in in is_trusted_reg()
6150 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS && in is_trusted_reg()
6151 !bpf_type_has_unsafe_modifiers(reg->type); in is_trusted_reg()
6156 return reg->type & MEM_RCU; in is_rcu_reg()
6185 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); in check_pkt_ptr_alignment()
6189 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_pkt_ptr_alignment()
6192 ip_align, tn_buf, reg->off, off, size); in check_pkt_ptr_alignment()
6193 return -EACCES; in check_pkt_ptr_alignment()
6210 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); in check_generic_ptr_alignment()
6214 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_generic_ptr_alignment()
6216 pointer_desc, tn_buf, reg->off, off, size); in check_generic_ptr_alignment()
6217 return -EACCES; in check_generic_ptr_alignment()
6227 bool strict = env->strict_alignment || strict_alignment_once; in check_ptr_alignment()
6230 switch (reg->type) { in check_ptr_alignment()
6287 switch (prog->type) { in bpf_enable_priv_stack()
6296 if (prog->aux->priv_stack_requested || bpf_prog_check_recur(prog)) in bpf_enable_priv_stack()
6308 if (env->prog->jit_requested) in round_up_stack_depth()
6311 /* round up to 32-bytes, since this is granularity in round_up_stack_depth()
6326 struct bpf_subprog_info *subprog = env->subprog_info; in check_max_stack_depth_subprog()
6327 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth_subprog()
6345 * func1 -> sub rsp, 128 in check_max_stack_depth_subprog()
6346 * subfunc1 -> sub rsp, 256 in check_max_stack_depth_subprog()
6347 * tailcall1 -> add rsp, 256 in check_max_stack_depth_subprog()
6348 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) in check_max_stack_depth_subprog()
6349 * subfunc2 -> sub rsp, 64 in check_max_stack_depth_subprog()
6350 * subfunc22 -> sub rsp, 128 in check_max_stack_depth_subprog()
6351 * tailcall2 -> add rsp, 128 in check_max_stack_depth_subprog()
6352 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) in check_max_stack_depth_subprog()
6361 return -EACCES; in check_max_stack_depth_subprog()
6379 return -EACCES; in check_max_stack_depth_subprog()
6386 return -EACCES; in check_max_stack_depth_subprog()
6412 return -EINVAL; in check_max_stack_depth_subprog()
6427 return -EFAULT; in check_max_stack_depth_subprog()
6432 return -EFAULT; in check_max_stack_depth_subprog()
6439 return -EINVAL; in check_max_stack_depth_subprog()
6454 return -E2BIG; in check_max_stack_depth_subprog()
6467 return -EINVAL; in check_max_stack_depth_subprog()
6472 env->prog->aux->tail_call_reachable = true; in check_max_stack_depth_subprog()
6480 depth -= round_up_stack_depth(env, subprog[idx].stack_depth); in check_max_stack_depth_subprog()
6481 frame--; in check_max_stack_depth_subprog()
6490 struct bpf_subprog_info *si = env->subprog_info; in check_max_stack_depth()
6494 for (int i = 0; i < env->subprog_cnt; i++) { in check_max_stack_depth()
6502 priv_stack_mode = bpf_enable_priv_stack(env->prog); in check_max_stack_depth()
6512 for (int i = env->subprog_cnt - 1; i >= 0; i--) { in check_max_stack_depth()
6521 for (int i = 0; i < env->subprog_cnt; i++) { in check_max_stack_depth()
6523 env->prog->aux->jits_use_priv_stack = true; in check_max_stack_depth()
6535 int start = idx + insn->imm + 1, subprog; in get_callee_stack_depth()
6541 return -EFAULT; in get_callee_stack_depth()
6543 return env->subprog_info[subprog].stack_depth; in get_callee_stack_depth()
6556 return -EACCES; in __check_buffer_access()
6558 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_buffer_access()
6561 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_buffer_access()
6565 return -EACCES; in __check_buffer_access()
6581 if (off + size > env->prog->aux->max_tp_access) in check_tp_buffer_access()
6582 env->prog->aux->max_tp_access = off + size; in check_tp_buffer_access()
6593 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; in check_buffer_access()
6606 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
6609 reg->var_off = tnum_subreg(reg->var_off); in zext_32_to_64()
6621 reg->var_off = tnum_cast(reg->var_off, size); in coerce_reg_to_size()
6624 mask = ((u64)1 << (size * 8)) - 1; in coerce_reg_to_size()
6625 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { in coerce_reg_to_size()
6626 reg->umin_value &= mask; in coerce_reg_to_size()
6627 reg->umax_value &= mask; in coerce_reg_to_size()
6629 reg->umin_value = 0; in coerce_reg_to_size()
6630 reg->umax_value = mask; in coerce_reg_to_size()
6632 reg->smin_value = reg->umin_value; in coerce_reg_to_size()
6633 reg->smax_value = reg->umax_value; in coerce_reg_to_size()
6636 * values are also truncated so we push 64-bit bounds into in coerce_reg_to_size()
6637 * 32-bit bounds. Above were truncated < 32-bits already. in coerce_reg_to_size()
6648 reg->smin_value = reg->s32_min_value = S8_MIN; in set_sext64_default_val()
6649 reg->smax_value = reg->s32_max_value = S8_MAX; in set_sext64_default_val()
6651 reg->smin_value = reg->s32_min_value = S16_MIN; in set_sext64_default_val()
6652 reg->smax_value = reg->s32_max_value = S16_MAX; in set_sext64_default_val()
6655 reg->smin_value = reg->s32_min_value = S32_MIN; in set_sext64_default_val()
6656 reg->smax_value = reg->s32_max_value = S32_MAX; in set_sext64_default_val()
6658 reg->umin_value = reg->u32_min_value = 0; in set_sext64_default_val()
6659 reg->umax_value = U64_MAX; in set_sext64_default_val()
6660 reg->u32_max_value = U32_MAX; in set_sext64_default_val()
6661 reg->var_off = tnum_unknown; in set_sext64_default_val()
6670 if (tnum_is_const(reg->var_off)) { in coerce_reg_to_size_sx()
6671 u64_cval = reg->var_off.value; in coerce_reg_to_size_sx()
6673 reg->var_off = tnum_const((s8)u64_cval); in coerce_reg_to_size_sx()
6675 reg->var_off = tnum_const((s16)u64_cval); in coerce_reg_to_size_sx()
6678 reg->var_off = tnum_const((s32)u64_cval); in coerce_reg_to_size_sx()
6680 u64_cval = reg->var_off.value; in coerce_reg_to_size_sx()
6681 reg->smax_value = reg->smin_value = u64_cval; in coerce_reg_to_size_sx()
6682 reg->umax_value = reg->umin_value = u64_cval; in coerce_reg_to_size_sx()
6683 reg->s32_max_value = reg->s32_min_value = u64_cval; in coerce_reg_to_size_sx()
6684 reg->u32_max_value = reg->u32_min_value = u64_cval; in coerce_reg_to_size_sx()
6688 top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits; in coerce_reg_to_size_sx()
6689 top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits; in coerce_reg_to_size_sx()
6696 init_s64_max = (s8)reg->smax_value; in coerce_reg_to_size_sx()
6697 init_s64_min = (s8)reg->smin_value; in coerce_reg_to_size_sx()
6699 init_s64_max = (s16)reg->smax_value; in coerce_reg_to_size_sx()
6700 init_s64_min = (s16)reg->smin_value; in coerce_reg_to_size_sx()
6702 init_s64_max = (s32)reg->smax_value; in coerce_reg_to_size_sx()
6703 init_s64_min = (s32)reg->smin_value; in coerce_reg_to_size_sx()
6711 reg->s32_min_value = reg->smin_value = s64_min; in coerce_reg_to_size_sx()
6712 reg->s32_max_value = reg->smax_value = s64_max; in coerce_reg_to_size_sx()
6713 reg->u32_min_value = reg->umin_value = s64_min; in coerce_reg_to_size_sx()
6714 reg->u32_max_value = reg->umax_value = s64_max; in coerce_reg_to_size_sx()
6715 reg->var_off = tnum_range(s64_min, s64_max); in coerce_reg_to_size_sx()
6726 reg->s32_min_value = S8_MIN; in set_sext32_default_val()
6727 reg->s32_max_value = S8_MAX; in set_sext32_default_val()
6730 reg->s32_min_value = S16_MIN; in set_sext32_default_val()
6731 reg->s32_max_value = S16_MAX; in set_sext32_default_val()
6733 reg->u32_min_value = 0; in set_sext32_default_val()
6734 reg->u32_max_value = U32_MAX; in set_sext32_default_val()
6735 reg->var_off = tnum_subreg(tnum_unknown); in set_sext32_default_val()
6744 if (tnum_is_const(reg->var_off)) { in coerce_subreg_to_size_sx()
6745 u32_val = reg->var_off.value; in coerce_subreg_to_size_sx()
6747 reg->var_off = tnum_const((s8)u32_val); in coerce_subreg_to_size_sx()
6749 reg->var_off = tnum_const((s16)u32_val); in coerce_subreg_to_size_sx()
6751 u32_val = reg->var_off.value; in coerce_subreg_to_size_sx()
6752 reg->s32_min_value = reg->s32_max_value = u32_val; in coerce_subreg_to_size_sx()
6753 reg->u32_min_value = reg->u32_max_value = u32_val; in coerce_subreg_to_size_sx()
6757 top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits; in coerce_subreg_to_size_sx()
6758 top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits; in coerce_subreg_to_size_sx()
6765 init_s32_max = (s8)reg->s32_max_value; in coerce_subreg_to_size_sx()
6766 init_s32_min = (s8)reg->s32_min_value; in coerce_subreg_to_size_sx()
6769 init_s32_max = (s16)reg->s32_max_value; in coerce_subreg_to_size_sx()
6770 init_s32_min = (s16)reg->s32_min_value; in coerce_subreg_to_size_sx()
6776 reg->s32_min_value = s32_min; in coerce_subreg_to_size_sx()
6777 reg->s32_max_value = s32_max; in coerce_subreg_to_size_sx()
6778 reg->u32_min_value = (u32)s32_min; in coerce_subreg_to_size_sx()
6779 reg->u32_max_value = (u32)s32_max; in coerce_subreg_to_size_sx()
6780 reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max)); in coerce_subreg_to_size_sx()
6790 /* A map is considered read-only if the following condition are true: in bpf_map_is_rdonly()
6803 return (map->map_flags & BPF_F_RDONLY_PROG) && in bpf_map_is_rdonly()
6804 READ_ONCE(map->frozen) && in bpf_map_is_rdonly()
6815 err = map->ops->map_direct_value_addr(map, &addr, off); in bpf_map_direct_read()
6834 return -EINVAL; in bpf_map_direct_read()
6859 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */ in BTF_TYPE_SAFE_RCU()
6872 /* skb->sk, req->sk are not RCU protected, but we mark them as such
6902 /* no negative dentry-s in places where bpf can see it */ in BTF_TYPE_SAFE_TRUSTED()
6918 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); in type_is_rcu()
6929 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); in type_is_rcu_or_null()
6942 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); in type_is_trusted()
6951 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, in type_is_trusted_or_null()
6962 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); in check_ptr_to_btf_access()
6963 const char *tname = btf_name_by_offset(reg->btf, t->name_off); in check_ptr_to_btf_access()
6969 if (!env->allow_ptr_leaks) { in check_ptr_to_btf_access()
6973 return -EPERM; in check_ptr_to_btf_access()
6975 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
6977 "Cannot access kernel 'struct %s' from non-GPL compatible program\n", in check_ptr_to_btf_access()
6979 return -EINVAL; in check_ptr_to_btf_access()
6985 return -EACCES; in check_ptr_to_btf_access()
6987 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in check_ptr_to_btf_access()
6990 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_ptr_to_btf_access()
6994 return -EACCES; in check_ptr_to_btf_access()
6997 if (reg->type & MEM_USER) { in check_ptr_to_btf_access()
7001 return -EACCES; in check_ptr_to_btf_access()
7004 if (reg->type & MEM_PERCPU) { in check_ptr_to_btf_access()
7008 return -EACCES; in check_ptr_to_btf_access()
7011 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { in check_ptr_to_btf_access()
7012 if (!btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
7013 verbose(env, "verifier internal error: reg->btf must be kernel btf\n"); in check_ptr_to_btf_access()
7014 return -EFAULT; in check_ptr_to_btf_access()
7016 ret = env->ops->btf_struct_access(&env->log, reg, off, size); in check_ptr_to_btf_access()
7022 if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) { in check_ptr_to_btf_access()
7024 return -EACCES; in check_ptr_to_btf_access()
7027 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && in check_ptr_to_btf_access()
7028 !(reg->type & MEM_RCU) && !reg->ref_obj_id) { in check_ptr_to_btf_access()
7029 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); in check_ptr_to_btf_access()
7030 return -EFAULT; in check_ptr_to_btf_access()
7033 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); in check_ptr_to_btf_access()
7042 } else if (type_flag(reg->type) & PTR_UNTRUSTED) { in check_ptr_to_btf_access()
7053 * 'cgroups' pointer is untrusted if task->cgroups dereference in check_ptr_to_btf_access()
7055 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). in check_ptr_to_btf_access()
7058 * A regular RCU-protected pointer with __rcu tag can also be deemed in check_ptr_to_btf_access()
7065 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { in check_ptr_to_btf_access()
7079 /* keep as-is */ in check_ptr_to_btf_access()
7100 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); in check_ptr_to_btf_access()
7112 struct bpf_map *map = reg->map_ptr; in check_ptr_to_map_access()
7122 return -ENOTSUPP; in check_ptr_to_map_access()
7125 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { in check_ptr_to_map_access()
7127 map->map_type); in check_ptr_to_map_access()
7128 return -ENOTSUPP; in check_ptr_to_map_access()
7131 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); in check_ptr_to_map_access()
7132 tname = btf_name_by_offset(btf_vmlinux, t->name_off); in check_ptr_to_map_access()
7134 if (!env->allow_ptr_leaks) { in check_ptr_to_map_access()
7138 return -EPERM; in check_ptr_to_map_access()
7144 return -EACCES; in check_ptr_to_map_access()
7149 return -EACCES; in check_ptr_to_map_access()
7154 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0); in check_ptr_to_map_access()
7155 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); in check_ptr_to_map_access()
7166 * maximum valid offset is -1.
7168 * The minimum valid offset is -MAX_BPF_STACK for writes, and
7169 * -state->allocated_stack for reads.
7178 if (t == BPF_WRITE || env->allow_uninit_stack) in check_stack_slot_within_bounds()
7179 min_valid_off = -MAX_BPF_STACK; in check_stack_slot_within_bounds()
7181 min_valid_off = -state->allocated_stack; in check_stack_slot_within_bounds()
7183 if (off < min_valid_off || off > -1) in check_stack_slot_within_bounds()
7184 return -EACCES; in check_stack_slot_within_bounds()
7191 * 'off' includes `regno->offset`, but not its dynamic part (if any).
7210 if (tnum_is_const(reg->var_off)) { in check_stack_access_within_bounds()
7211 min_off = (s64)reg->var_off.value + off; in check_stack_access_within_bounds()
7214 if (reg->smax_value >= BPF_MAX_VAR_OFF || in check_stack_access_within_bounds()
7215 reg->smin_value <= -BPF_MAX_VAR_OFF) { in check_stack_access_within_bounds()
7216 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", in check_stack_access_within_bounds()
7218 return -EACCES; in check_stack_access_within_bounds()
7220 min_off = reg->smin_value + off; in check_stack_access_within_bounds()
7221 max_off = reg->smax_value + off + access_size; in check_stack_access_within_bounds()
7226 err = -EINVAL; /* out of stack access into non-negative offsets */ in check_stack_access_within_bounds()
7231 err = -EFAULT; /* invalid negative access size; integer overflow? */ in check_stack_access_within_bounds()
7234 if (tnum_is_const(reg->var_off)) { in check_stack_access_within_bounds()
7240 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access_within_bounds()
7241 verbose(env, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n", in check_stack_access_within_bounds()
7248 * size is -min_off, not -min_off+1. in check_stack_access_within_bounds()
7250 return grow_stack_state(env, state, -min_off /* size */); in check_stack_access_within_bounds()
7256 if (prog->type == BPF_PROG_TYPE_LSM && in get_func_retval_range()
7257 prog->expected_attach_type == BPF_LSM_MAC && in get_func_retval_range()
7267 * if t==write && value_regno==-1, some unknown value is stored into memory
7268 * if t==read && value_regno==-1, don't care what we read from memory
7282 /* alignment checks will add in reg->off themselves */ in check_mem_access()
7287 /* for access checks, reg->off is just part of off */ in check_mem_access()
7288 off += reg->off; in check_mem_access()
7290 if (reg->type == PTR_TO_MAP_KEY) { in check_mem_access()
7293 return -EACCES; in check_mem_access()
7297 reg->map_ptr->key_size, false); in check_mem_access()
7302 } else if (reg->type == PTR_TO_MAP_VALUE) { in check_mem_access()
7308 return -EACCES; in check_mem_access()
7316 if (tnum_is_const(reg->var_off)) in check_mem_access()
7317 kptr_field = btf_record_find(reg->map_ptr->record, in check_mem_access()
7318 off + reg->var_off.value, BPF_KPTR | BPF_UPTR); in check_mem_access()
7322 struct bpf_map *map = reg->map_ptr; in check_mem_access()
7324 /* if map is read-only, track its contents as scalars */ in check_mem_access()
7325 if (tnum_is_const(reg->var_off) && in check_mem_access()
7327 map->ops->map_direct_value_addr) { in check_mem_access()
7328 int map_off = off + reg->var_off.value; in check_mem_access()
7342 } else if (base_type(reg->type) == PTR_TO_MEM) { in check_mem_access()
7343 bool rdonly_mem = type_is_rdonly_mem(reg->type); in check_mem_access()
7345 if (type_may_be_null(reg->type)) { in check_mem_access()
7347 reg_type_str(env, reg->type)); in check_mem_access()
7348 return -EACCES; in check_mem_access()
7353 regno, reg_type_str(env, reg->type)); in check_mem_access()
7354 return -EACCES; in check_mem_access()
7360 return -EACCES; in check_mem_access()
7364 reg->mem_size, false); in check_mem_access()
7367 } else if (reg->type == PTR_TO_CTX) { in check_mem_access()
7377 return -EACCES; in check_mem_access()
7394 if (is_retval && get_func_retval_range(env->prog, &range)) { in check_mem_access()
7406 regs[value_regno].id = ++env->id_gen; in check_mem_access()
7410 * a sub-register. in check_mem_access()
7421 } else if (reg->type == PTR_TO_STACK) { in check_mem_access()
7436 return -EACCES; in check_mem_access()
7442 return -EACCES; in check_mem_access()
7447 } else if (reg->type == PTR_TO_FLOW_KEYS) { in check_mem_access()
7452 return -EACCES; in check_mem_access()
7458 } else if (type_is_sk_pointer(reg->type)) { in check_mem_access()
7461 regno, reg_type_str(env, reg->type)); in check_mem_access()
7462 return -EACCES; in check_mem_access()
7467 } else if (reg->type == PTR_TO_TP_BUFFER) { in check_mem_access()
7471 } else if (base_type(reg->type) == PTR_TO_BTF_ID && in check_mem_access()
7472 !type_may_be_null(reg->type)) { in check_mem_access()
7475 } else if (reg->type == CONST_PTR_TO_MAP) { in check_mem_access()
7478 } else if (base_type(reg->type) == PTR_TO_BUF) { in check_mem_access()
7479 bool rdonly_mem = type_is_rdonly_mem(reg->type); in check_mem_access()
7485 regno, reg_type_str(env, reg->type)); in check_mem_access()
7486 return -EACCES; in check_mem_access()
7488 max_access = &env->prog->aux->max_rdonly_access; in check_mem_access()
7490 max_access = &env->prog->aux->max_rdwr_access; in check_mem_access()
7498 } else if (reg->type == PTR_TO_ARENA) { in check_mem_access()
7503 reg_type_str(env, reg->type)); in check_mem_access()
7504 return -EACCES; in check_mem_access()
7510 /* b/h/w load zero-extends, mark upper bits as known 0 */ in check_mem_access()
7526 switch (insn->imm) { in check_atomic()
7539 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); in check_atomic()
7540 return -EINVAL; in check_atomic()
7543 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { in check_atomic()
7545 return -EINVAL; in check_atomic()
7549 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_atomic()
7554 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_atomic()
7558 if (insn->imm == BPF_CMPXCHG) { in check_atomic()
7568 return -EACCES; in check_atomic()
7572 if (is_pointer_value(env, insn->src_reg)) { in check_atomic()
7573 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); in check_atomic()
7574 return -EACCES; in check_atomic()
7577 if (is_ctx_reg(env, insn->dst_reg) || in check_atomic()
7578 is_pkt_reg(env, insn->dst_reg) || in check_atomic()
7579 is_flow_key_reg(env, insn->dst_reg) || in check_atomic()
7580 is_sk_reg(env, insn->dst_reg) || in check_atomic()
7581 (is_arena_reg(env, insn->dst_reg) && !bpf_jit_supports_insn(insn, true))) { in check_atomic()
7583 insn->dst_reg, in check_atomic()
7584 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); in check_atomic()
7585 return -EACCES; in check_atomic()
7588 if (insn->imm & BPF_FETCH) { in check_atomic()
7589 if (insn->imm == BPF_CMPXCHG) in check_atomic()
7592 load_reg = insn->src_reg; in check_atomic()
7602 load_reg = -1; in check_atomic()
7608 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
7609 BPF_SIZE(insn->code), BPF_READ, -1, true, false); in check_atomic()
7611 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
7612 BPF_SIZE(insn->code), BPF_READ, load_reg, in check_atomic()
7617 if (is_arena_reg(env, insn->dst_reg)) { in check_atomic()
7623 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, in check_atomic()
7624 BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); in check_atomic()
7635 * 'off' includes 'regno->off', but not its dynamic part (if any).
7649 * read-only. in check_stack_range_initialized()
7654 verbose(env, "invalid zero-sized read\n"); in check_stack_range_initialized()
7655 return -EACCES; in check_stack_range_initialized()
7666 if (tnum_is_const(reg->var_off)) { in check_stack_range_initialized()
7667 min_off = max_off = reg->var_off.value + off; in check_stack_range_initialized()
7674 if (!env->bypass_spec_v1) { in check_stack_range_initialized()
7677 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_range_initialized()
7680 return -EACCES; in check_stack_range_initialized()
7688 if (meta && meta->raw_mode) in check_stack_range_initialized()
7691 min_off = reg->smin_value + off; in check_stack_range_initialized()
7692 max_off = reg->smax_value + off; in check_stack_range_initialized()
7695 if (meta && meta->raw_mode) { in check_stack_range_initialized()
7710 int stack_off = -i - 1; in check_stack_range_initialized()
7714 if (state->allocated_stack <= stack_off) in check_stack_range_initialized()
7716 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { in check_stack_range_initialized()
7718 return -EACCES; in check_stack_range_initialized()
7721 meta->access_size = access_size; in check_stack_range_initialized()
7722 meta->regno = regno; in check_stack_range_initialized()
7729 slot = -i - 1; in check_stack_range_initialized()
7731 if (state->allocated_stack <= slot) { in check_stack_range_initialized()
7733 return -EFAULT; in check_stack_range_initialized()
7736 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
7740 (*stype == STACK_INVALID && env->allow_uninit_stack)) { in check_stack_range_initialized()
7748 if (is_spilled_reg(&state->stack[spi]) && in check_stack_range_initialized()
7749 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || in check_stack_range_initialized()
7750 env->allow_ptr_leaks)) { in check_stack_range_initialized()
7752 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); in check_stack_range_initialized()
7754 scrub_spilled_slot(&state->stack[spi].slot_type[j]); in check_stack_range_initialized()
7759 if (tnum_is_const(reg->var_off)) { in check_stack_range_initialized()
7761 regno, min_off, i - min_off, access_size); in check_stack_range_initialized()
7765 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_range_initialized()
7767 regno, tn_buf, i - min_off, access_size); in check_stack_range_initialized()
7769 return -EACCES; in check_stack_range_initialized()
7771 /* reading any byte out of 8-byte 'spill_slot' will cause in check_stack_range_initialized()
7774 mark_reg_read(env, &state->stack[spi].spilled_ptr, in check_stack_range_initialized()
7775 state->stack[spi].spilled_ptr.parent, in check_stack_range_initialized()
7794 switch (base_type(reg->type)) { in check_helper_mem_access()
7797 return check_packet_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7802 reg_type_str(env, reg->type)); in check_helper_mem_access()
7803 return -EACCES; in check_helper_mem_access()
7805 return check_mem_region_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7806 reg->map_ptr->key_size, false); in check_helper_mem_access()
7808 if (check_map_access_type(env, regno, reg->off, access_size, access_type)) in check_helper_mem_access()
7809 return -EACCES; in check_helper_mem_access()
7810 return check_map_access(env, regno, reg->off, access_size, in check_helper_mem_access()
7813 if (type_is_rdonly_mem(reg->type)) { in check_helper_mem_access()
7816 reg_type_str(env, reg->type)); in check_helper_mem_access()
7817 return -EACCES; in check_helper_mem_access()
7820 return check_mem_region_access(env, regno, reg->off, in check_helper_mem_access()
7821 access_size, reg->mem_size, in check_helper_mem_access()
7824 if (type_is_rdonly_mem(reg->type)) { in check_helper_mem_access()
7827 reg_type_str(env, reg->type)); in check_helper_mem_access()
7828 return -EACCES; in check_helper_mem_access()
7831 max_access = &env->prog->aux->max_rdonly_access; in check_helper_mem_access()
7833 max_access = &env->prog->aux->max_rdwr_access; in check_helper_mem_access()
7835 return check_buffer_access(env, reg, regno, reg->off, in check_helper_mem_access()
7841 regno, reg->off, access_size, in check_helper_mem_access()
7844 return check_ptr_to_btf_access(env, regs, regno, reg->off, in check_helper_mem_access()
7845 access_size, BPF_READ, -1); in check_helper_mem_access()
7852 if (!env->ops->convert_ctx_access) { in check_helper_mem_access()
7853 int offset = access_size - 1; in check_helper_mem_access()
7855 /* Allow zero-byte read from PTR_TO_CTX */ in check_helper_mem_access()
7857 return zero_size_allowed ? 0 : -EACCES; in check_helper_mem_access()
7859 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, in check_helper_mem_access()
7860 access_type, -1, false, false); in check_helper_mem_access()
7865 /* Allow zero-byte read from NULL, regardless of pointer type */ in check_helper_mem_access()
7871 reg_type_str(env, reg->type)); in check_helper_mem_access()
7873 return -EACCES; in check_helper_mem_access()
7880 * @regno is the register containing the access size. regno-1 is the register
7899 meta->msize_max_value = reg->umax_value; in check_mem_size_reg()
7906 if (!tnum_is_const(reg->var_off)) in check_mem_size_reg()
7909 if (reg->smin_value < 0) { in check_mem_size_reg()
7912 return -EACCES; in check_mem_size_reg()
7915 if (reg->umin_value == 0 && !zero_size_allowed) { in check_mem_size_reg()
7916 verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n", in check_mem_size_reg()
7917 regno, reg->umin_value, reg->umax_value); in check_mem_size_reg()
7918 return -EACCES; in check_mem_size_reg()
7921 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { in check_mem_size_reg()
7924 return -EACCES; in check_mem_size_reg()
7926 err = check_helper_mem_access(env, regno - 1, reg->umax_value, in check_mem_size_reg()
7936 bool may_be_null = type_may_be_null(reg->type); in check_mem_reg()
7964 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; in check_kfunc_mem_size_reg()
7965 bool may_be_null = type_may_be_null(mem_reg->type); in check_kfunc_mem_size_reg()
7991 * Two bpf_map_lookups (even with the same key) will have different reg->id.
7992 * Two separate bpf_obj_new will also have different reg->id.
7994 * clears reg->id after value_or_null->value transition, since the verifier only
7998 * reg->id > 0 after value_or_null->value transition. By doing so
8003 * dead-locks.
8007 * env->cur_state->active_locks remembers which map value element or allocated
8014 struct bpf_verifier_state *cur = env->cur_state; in process_spin_lock()
8015 bool is_const = tnum_is_const(reg->var_off); in process_spin_lock()
8016 u64 val = reg->var_off.value; in process_spin_lock()
8026 return -EINVAL; in process_spin_lock()
8028 if (reg->type == PTR_TO_MAP_VALUE) { in process_spin_lock()
8029 map = reg->map_ptr; in process_spin_lock()
8030 if (!map->btf) { in process_spin_lock()
8033 map->name); in process_spin_lock()
8034 return -EINVAL; in process_spin_lock()
8037 btf = reg->btf; in process_spin_lock()
8043 map ? map->name : "kptr"); in process_spin_lock()
8044 return -EINVAL; in process_spin_lock()
8046 if (rec->spin_lock_off != val + reg->off) { in process_spin_lock()
8048 val + reg->off, rec->spin_lock_off); in process_spin_lock()
8049 return -EINVAL; in process_spin_lock()
8059 if (cur->active_locks) { in process_spin_lock()
8062 return -EINVAL; in process_spin_lock()
8064 err = acquire_lock_state(env, env->insn_idx, REF_TYPE_LOCK, reg->id, ptr); in process_spin_lock()
8077 if (!cur->active_locks) { in process_spin_lock()
8079 return -EINVAL; in process_spin_lock()
8082 if (release_lock_state(env->cur_state, REF_TYPE_LOCK, reg->id, ptr)) { in process_spin_lock()
8084 return -EINVAL; in process_spin_lock()
8096 bool is_const = tnum_is_const(reg->var_off); in process_timer_func()
8097 struct bpf_map *map = reg->map_ptr; in process_timer_func()
8098 u64 val = reg->var_off.value; in process_timer_func()
8104 return -EINVAL; in process_timer_func()
8106 if (!map->btf) { in process_timer_func()
8108 map->name); in process_timer_func()
8109 return -EINVAL; in process_timer_func()
8111 if (!btf_record_has_field(map->record, BPF_TIMER)) { in process_timer_func()
8112 verbose(env, "map '%s' has no valid bpf_timer\n", map->name); in process_timer_func()
8113 return -EINVAL; in process_timer_func()
8115 if (map->record->timer_off != val + reg->off) { in process_timer_func()
8117 val + reg->off, map->record->timer_off); in process_timer_func()
8118 return -EINVAL; in process_timer_func()
8120 if (meta->map_ptr) { in process_timer_func()
8122 return -EFAULT; in process_timer_func()
8124 meta->map_uid = reg->map_uid; in process_timer_func()
8125 meta->map_ptr = map; in process_timer_func()
8133 struct bpf_map *map = reg->map_ptr; in process_wq_func()
8134 u64 val = reg->var_off.value; in process_wq_func()
8136 if (map->record->wq_off != val + reg->off) { in process_wq_func()
8138 val + reg->off, map->record->wq_off); in process_wq_func()
8139 return -EINVAL; in process_wq_func()
8141 meta->map.uid = reg->map_uid; in process_wq_func()
8142 meta->map.ptr = map; in process_wq_func()
8155 if (type_is_ptr_alloc_obj(reg->type)) { in process_kptr_func()
8158 map_ptr = reg->map_ptr; in process_kptr_func()
8159 if (!map_ptr->btf) { in process_kptr_func()
8161 map_ptr->name); in process_kptr_func()
8162 return -EINVAL; in process_kptr_func()
8164 rec = map_ptr->record; in process_kptr_func()
8165 meta->map_ptr = map_ptr; in process_kptr_func()
8168 if (!tnum_is_const(reg->var_off)) { in process_kptr_func()
8172 return -EINVAL; in process_kptr_func()
8177 return -EINVAL; in process_kptr_func()
8180 kptr_off = reg->off + reg->var_off.value; in process_kptr_func()
8184 return -EACCES; in process_kptr_func()
8186 if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) { in process_kptr_func()
8188 return -EACCES; in process_kptr_func()
8190 meta->kptr_field = kptr_field; in process_kptr_func()
8209 * reg->type and the memory's in reg->dynptr.type), but there is no support for
8225 if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { in process_dynptr_func()
8228 regno - 1); in process_dynptr_func()
8229 return -EINVAL; in process_dynptr_func()
8237 return -EFAULT; in process_dynptr_func()
8240 /* MEM_UNINIT - Points to memory that is an appropriate candidate for in process_dynptr_func()
8247 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be in process_dynptr_func()
8251 * None - Points to a initialized dynptr that can be mutated and in process_dynptr_func()
8260 return -EINVAL; in process_dynptr_func()
8266 i, BPF_DW, BPF_WRITE, -1, false, false); in process_dynptr_func()
8273 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ in process_dynptr_func()
8274 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { in process_dynptr_func()
8276 return -EINVAL; in process_dynptr_func()
8282 regno - 1); in process_dynptr_func()
8283 return -EINVAL; in process_dynptr_func()
8290 dynptr_type_str(arg_to_dynptr_type(arg_type)), regno - 1); in process_dynptr_func()
8291 return -EINVAL; in process_dynptr_func()
8303 return state->stack[spi].spilled_ptr.ref_obj_id; in iter_ref_obj_id()
8308 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); in is_iter_kfunc()
8313 return meta->kfunc_flags & KF_ITER_NEW; in is_iter_new_kfunc()
8318 return meta->kfunc_flags & KF_ITER_NEXT; in is_iter_next_kfunc()
8323 return meta->kfunc_flags & KF_ITER_DESTROY; in is_iter_destroy_kfunc()
8336 return btf_param_match_suffix(meta->btf, arg, "__iter"); in is_kfunc_arg_iter()
8346 if (reg->type != PTR_TO_STACK) { in process_iter_arg()
8347 verbose(env, "arg#%d expected pointer to an iterator on stack\n", regno - 1); in process_iter_arg()
8348 return -EINVAL; in process_iter_arg()
8357 btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1); in process_iter_arg()
8359 verbose(env, "expected valid iter pointer as arg #%d\n", regno - 1); in process_iter_arg()
8360 return -EINVAL; in process_iter_arg()
8362 t = btf_type_by_id(meta->btf, btf_id); in process_iter_arg()
8363 nr_slots = t->size / BPF_REG_SIZE; in process_iter_arg()
8369 iter_type_str(meta->btf, btf_id), regno - 1); in process_iter_arg()
8370 return -EINVAL; in process_iter_arg()
8375 i, BPF_DW, BPF_WRITE, -1, false, false); in process_iter_arg()
8380 err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots); in process_iter_arg()
8387 err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); in process_iter_arg()
8391 case -EINVAL: in process_iter_arg()
8393 iter_type_str(meta->btf, btf_id), regno - 1); in process_iter_arg()
8395 case -EPROTO: in process_iter_arg()
8396 verbose(env, "expected an RCU CS when using %s\n", meta->func_name); in process_iter_arg()
8410 /* remember meta->iter info for process_iter_next_call() */ in process_iter_arg()
8411 meta->iter.spi = spi; in process_iter_arg()
8412 meta->iter.frameno = reg->frameno; in process_iter_arg()
8413 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); in process_iter_arg()
8426 * stopped at insn_idx with callsites matching those in cur->frame.
8437 for (; sl; sl = sl->next) { in find_prev_entry()
8438 /* If st->branches != 0 state is a part of current DFS verification path, in find_prev_entry()
8441 st = &sl->state; in find_prev_entry()
8442 if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) && in find_prev_entry()
8443 st->dfs_depth < cur->dfs_depth) in find_prev_entry()
8459 if (rold->type != SCALAR_VALUE) in maybe_widen_reg()
8461 if (rold->type != rcur->type) in maybe_widen_reg()
8463 if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap)) in maybe_widen_reg()
8476 for (fr = old->curframe; fr >= 0; fr--) { in widen_imprecise_scalars()
8477 fold = old->frame[fr]; in widen_imprecise_scalars()
8478 fcur = cur->frame[fr]; in widen_imprecise_scalars()
8482 &fold->regs[i], in widen_imprecise_scalars()
8483 &fcur->regs[i], in widen_imprecise_scalars()
8484 &env->idmap_scratch); in widen_imprecise_scalars()
8486 for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { in widen_imprecise_scalars()
8487 if (!is_spilled_reg(&fold->stack[i]) || in widen_imprecise_scalars()
8488 !is_spilled_reg(&fcur->stack[i])) in widen_imprecise_scalars()
8492 &fold->stack[i].spilled_ptr, in widen_imprecise_scalars()
8493 &fcur->stack[i].spilled_ptr, in widen_imprecise_scalars()
8494 &env->idmap_scratch); in widen_imprecise_scalars()
8503 int iter_frameno = meta->iter.frameno; in get_iter_from_state()
8504 int iter_spi = meta->iter.spi; in get_iter_from_state()
8506 return &cur_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; in get_iter_from_state()
8529 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
8590 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; in process_iter_next_call()
8591 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; in process_iter_next_call()
8598 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && in process_iter_next_call()
8599 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { in process_iter_next_call()
8601 cur_iter->iter.state, iter_state_str(cur_iter->iter.state)); in process_iter_next_call()
8602 return -EFAULT; in process_iter_next_call()
8605 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { in process_iter_next_call()
8609 if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx || in process_iter_next_call()
8610 !same_callsites(cur_st->parent, cur_st)) { in process_iter_next_call()
8612 return -EFAULT; in process_iter_next_call()
8614 /* Note cur_st->parent in the call below, it is necessary to skip in process_iter_next_call()
8618 prev_st = find_prev_entry(env, cur_st->parent, insn_idx); in process_iter_next_call()
8622 return -ENOMEM; in process_iter_next_call()
8625 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; in process_iter_next_call()
8626 queued_iter->iter.depth++; in process_iter_next_call()
8630 queued_fr = queued_st->frame[queued_st->curframe]; in process_iter_next_call()
8631 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); in process_iter_next_call()
8636 cur_iter->iter.state = BPF_ITER_STATE_DRAINED; in process_iter_next_call()
8637 __mark_reg_const_zero(env, &cur_fr->regs[BPF_REG_0]); in process_iter_next_call()
8668 if (!meta->map_ptr) { in resolve_map_arg_type()
8670 verbose(env, "invalid map_ptr to access map->type\n"); in resolve_map_arg_type()
8671 return -EACCES; in resolve_map_arg_type()
8674 switch (meta->map_ptr->map_type) { in resolve_map_arg_type()
8681 return -EINVAL; in resolve_map_arg_type()
8685 if (meta->func_id == BPF_FUNC_map_peek_elem) in resolve_map_arg_type()
8811 enum bpf_reg_type expected, type = reg->type; in check_reg_type()
8818 return -EFAULT; in check_reg_type()
8841 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) { in check_reg_type()
8846 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { in check_reg_type()
8847 expected = compatible->types[i]; in check_reg_type()
8855 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); in check_reg_type()
8857 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); in check_reg_type()
8858 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); in check_reg_type()
8859 return -EACCES; in check_reg_type()
8862 if (base_type(reg->type) != PTR_TO_BTF_ID) in check_reg_type()
8869 func_id_name(meta->func_id), in check_reg_type()
8870 regno, reg_type_str(env, reg->type)); in check_reg_type()
8871 return -EACCES; in check_reg_type()
8876 switch ((int)reg->type) { in check_reg_type()
8889 meta->func_id != BPF_FUNC_sk_release; in check_reg_type()
8891 if (type_may_be_null(reg->type) && in check_reg_type()
8894 return -EACCES; in check_reg_type()
8898 if (!compatible->btf_id) { in check_reg_type()
8900 return -EFAULT; in check_reg_type()
8902 arg_btf_id = compatible->btf_id; in check_reg_type()
8905 if (meta->func_id == BPF_FUNC_kptr_xchg) { in check_reg_type()
8906 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
8907 return -EACCES; in check_reg_type()
8911 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", in check_reg_type()
8913 return -EACCES; in check_reg_type()
8916 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in check_reg_type()
8920 regno, btf_type_name(reg->btf, reg->btf_id), in check_reg_type()
8922 return -EACCES; in check_reg_type()
8929 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock && in check_reg_type()
8930 meta->func_id != BPF_FUNC_kptr_xchg) { in check_reg_type()
8932 return -EFAULT; in check_reg_type()
8935 if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) { in check_reg_type()
8936 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
8937 return -EACCES; in check_reg_type()
8947 return -EFAULT; in check_reg_type()
8973 u32 type = reg->type; in check_func_arg_reg_off()
8979 * meta->release_regno. in check_func_arg_reg_off()
8995 if (reg->off) { in check_func_arg_reg_off()
8998 return -EINVAL; in check_func_arg_reg_off()
9029 * can be non-zero. This was already checked above. So pass in check_func_arg_reg_off()
9048 if (arg_type_is_dynptr(fn->arg_type[i])) { in get_dynptr_arg_reg()
9067 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_id()
9068 return reg->id; in dynptr_id()
9072 return state->stack[spi].spilled_ptr.id; in dynptr_id()
9080 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_ref_obj_id()
9081 return reg->ref_obj_id; in dynptr_ref_obj_id()
9085 return state->stack[spi].spilled_ptr.ref_obj_id; in dynptr_ref_obj_id()
9094 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_get_type()
9095 return reg->dynptr.type; in dynptr_get_type()
9097 spi = __get_spi(reg->off); in dynptr_get_type()
9103 return state->stack[spi].spilled_ptr.dynptr.type; in dynptr_get_type()
9109 struct bpf_map *map = reg->map_ptr; in check_reg_const_str()
9115 if (reg->type != PTR_TO_MAP_VALUE) in check_reg_const_str()
9116 return -EINVAL; in check_reg_const_str()
9120 return -EACCES; in check_reg_const_str()
9123 if (!tnum_is_const(reg->var_off)) { in check_reg_const_str()
9125 return -EACCES; in check_reg_const_str()
9128 if (!map->ops->map_direct_value_addr) { in check_reg_const_str()
9130 return -EACCES; in check_reg_const_str()
9133 err = check_map_access(env, regno, reg->off, in check_reg_const_str()
9134 map->value_size - reg->off, false, in check_reg_const_str()
9139 map_off = reg->off + reg->var_off.value; in check_reg_const_str()
9140 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); in check_reg_const_str()
9147 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { in check_reg_const_str()
9148 verbose(env, "string is not zero-terminated\n"); in check_reg_const_str()
9149 return -EINVAL; in check_reg_const_str()
9169 if (!env->bpf_capable) in get_constant_map_key()
9170 return -EOPNOTSUPP; in get_constant_map_key()
9171 if (key->type != PTR_TO_STACK) in get_constant_map_key()
9172 return -EOPNOTSUPP; in get_constant_map_key()
9173 if (!tnum_is_const(key->var_off)) in get_constant_map_key()
9174 return -EOPNOTSUPP; in get_constant_map_key()
9176 stack_off = key->off + key->var_off.value; in get_constant_map_key()
9177 slot = -stack_off - 1; in get_constant_map_key()
9180 stype = state->stack[spi].slot_type; in get_constant_map_key()
9183 for (i = off; i >= 0 && stype[i] == STACK_ZERO; i--) in get_constant_map_key()
9191 if (!is_spilled_scalar_reg(&state->stack[spi])) in get_constant_map_key()
9192 return -EOPNOTSUPP; in get_constant_map_key()
9193 for (i = off; i >= 0 && stype[i] == STACK_SPILL; i--) in get_constant_map_key()
9196 return -EOPNOTSUPP; in get_constant_map_key()
9198 reg = &state->stack[spi].spilled_ptr; in get_constant_map_key()
9199 if (!tnum_is_const(reg->var_off)) in get_constant_map_key()
9201 return -EOPNOTSUPP; in get_constant_map_key()
9206 bt_set_frame_slot(&env->bt, key->frameno, spi); in get_constant_map_key()
9211 *value = reg->var_off.value; in get_constant_map_key()
9224 enum bpf_arg_type arg_type = fn->arg_type[arg]; in check_func_arg()
9225 enum bpf_reg_type type = reg->type; in check_func_arg()
9241 return -EACCES; in check_func_arg()
9249 return -EACCES; in check_func_arg()
9267 arg_btf_id = fn->arg_btf_id[arg]; in check_func_arg()
9288 if (reg->type == PTR_TO_STACK) { in check_func_arg()
9290 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { in check_func_arg()
9292 return -EINVAL; in check_func_arg()
9296 return -EINVAL; in check_func_arg()
9298 } else if (!reg->ref_obj_id && !register_is_null(reg)) { in check_func_arg()
9301 return -EINVAL; in check_func_arg()
9303 if (meta->release_regno) { in check_func_arg()
9305 return -EFAULT; in check_func_arg()
9307 meta->release_regno = regno; in check_func_arg()
9310 if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) { in check_func_arg()
9311 if (meta->ref_obj_id) { in check_func_arg()
9313 regno, reg->ref_obj_id, in check_func_arg()
9314 meta->ref_obj_id); in check_func_arg()
9315 return -EFAULT; in check_func_arg()
9317 meta->ref_obj_id = reg->ref_obj_id; in check_func_arg()
9323 if (meta->map_ptr) { in check_func_arg()
9336 if (meta->map_ptr != reg->map_ptr || in check_func_arg()
9337 meta->map_uid != reg->map_uid) { in check_func_arg()
9340 meta->map_uid, reg->map_uid); in check_func_arg()
9341 return -EINVAL; in check_func_arg()
9344 meta->map_ptr = reg->map_ptr; in check_func_arg()
9345 meta->map_uid = reg->map_uid; in check_func_arg()
9349 * check that [key, key + map->key_size) are within in check_func_arg()
9352 if (!meta->map_ptr) { in check_func_arg()
9358 verbose(env, "invalid map_ptr to access map->key\n"); in check_func_arg()
9359 return -EACCES; in check_func_arg()
9361 key_size = meta->map_ptr->key_size; in check_func_arg()
9365 if (can_elide_value_nullness(meta->map_ptr->map_type)) { in check_func_arg()
9366 err = get_constant_map_key(env, reg, key_size, &meta->const_map_key); in check_func_arg()
9368 meta->const_map_key = -1; in check_func_arg()
9369 if (err == -EOPNOTSUPP) in check_func_arg()
9381 * check [value, value + map->value_size) validity in check_func_arg()
9383 if (!meta->map_ptr) { in check_func_arg()
9385 verbose(env, "invalid map_ptr to access map->value\n"); in check_func_arg()
9386 return -EACCES; in check_func_arg()
9388 meta->raw_mode = arg_type & MEM_UNINIT; in check_func_arg()
9389 err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, in check_func_arg()
9394 if (!reg->btf_id) { in check_func_arg()
9396 return -EACCES; in check_func_arg()
9398 meta->ret_btf = reg->btf; in check_func_arg()
9399 meta->ret_btf_id = reg->btf_id; in check_func_arg()
9404 return -EACCES; in check_func_arg()
9406 if (meta->func_id == BPF_FUNC_spin_lock) { in check_func_arg()
9410 } else if (meta->func_id == BPF_FUNC_spin_unlock) { in check_func_arg()
9416 return -EFAULT; in check_func_arg()
9425 meta->subprogno = reg->subprogno; in check_func_arg()
9431 meta->raw_mode = arg_type & MEM_UNINIT; in check_func_arg()
9433 err = check_helper_mem_access(env, regno, fn->arg_size[arg], in check_func_arg()
9439 err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); in check_func_arg()
9444 fn->arg_type[arg - 1] & MEM_WRITE ? in check_func_arg()
9450 fn->arg_type[arg - 1] & MEM_WRITE ? in check_func_arg()
9460 if (!tnum_is_const(reg->var_off)) { in check_func_arg()
9463 return -EACCES; in check_func_arg()
9465 meta->mem_size = reg->var_off.value; in check_func_arg()
9489 enum bpf_attach_type eatype = env->prog->expected_attach_type; in may_update_sockmap()
9490 enum bpf_prog_type type = resolve_prog_type(env->prog); in may_update_sockmap()
9527 return env->prog->jit_requested && in allow_tail_call_in_subprogs()
9538 switch (map->map_type) { in check_map_func_compatibility()
9584 /* Restrict bpf side of cpumap and xskmap, open when use-cases in check_map_func_compatibility()
9666 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) in check_map_func_compatibility()
9668 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { in check_map_func_compatibility()
9669 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in check_map_func_compatibility()
9670 return -EINVAL; in check_map_func_compatibility()
9678 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) in check_map_func_compatibility()
9687 if (map->map_type != BPF_MAP_TYPE_RINGBUF) in check_map_func_compatibility()
9691 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) in check_map_func_compatibility()
9695 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) in check_map_func_compatibility()
9700 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) in check_map_func_compatibility()
9704 if (map->map_type != BPF_MAP_TYPE_DEVMAP && in check_map_func_compatibility()
9705 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && in check_map_func_compatibility()
9706 map->map_type != BPF_MAP_TYPE_CPUMAP && in check_map_func_compatibility()
9707 map->map_type != BPF_MAP_TYPE_XSKMAP) in check_map_func_compatibility()
9713 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) in check_map_func_compatibility()
9719 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
9723 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in check_map_func_compatibility()
9724 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in check_map_func_compatibility()
9728 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && in check_map_func_compatibility()
9729 map->map_type != BPF_MAP_TYPE_SOCKMAP && in check_map_func_compatibility()
9730 map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
9734 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
9735 map->map_type != BPF_MAP_TYPE_STACK) in check_map_func_compatibility()
9740 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
9741 map->map_type != BPF_MAP_TYPE_STACK && in check_map_func_compatibility()
9742 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) in check_map_func_compatibility()
9746 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && in check_map_func_compatibility()
9747 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in check_map_func_compatibility()
9748 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) in check_map_func_compatibility()
9753 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) in check_map_func_compatibility()
9758 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) in check_map_func_compatibility()
9763 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) in check_map_func_compatibility()
9768 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) in check_map_func_compatibility()
9778 map->map_type, func_id_name(func_id), func_id); in check_map_func_compatibility()
9779 return -EINVAL; in check_map_func_compatibility()
9786 if (arg_type_is_raw_mem(fn->arg1_type)) in check_raw_mode_ok()
9788 if (arg_type_is_raw_mem(fn->arg2_type)) in check_raw_mode_ok()
9790 if (arg_type_is_raw_mem(fn->arg3_type)) in check_raw_mode_ok()
9792 if (arg_type_is_raw_mem(fn->arg4_type)) in check_raw_mode_ok()
9794 if (arg_type_is_raw_mem(fn->arg5_type)) in check_raw_mode_ok()
9806 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; in check_args_pair_invalid()
9807 bool has_size = fn->arg_size[arg] != 0; in check_args_pair_invalid()
9810 if (arg + 1 < ARRAY_SIZE(fn->arg_type)) in check_args_pair_invalid()
9811 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); in check_args_pair_invalid()
9813 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) in check_args_pair_invalid()
9826 if (arg_type_is_mem_size(fn->arg1_type) || in check_arg_pair_ok()
9841 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { in check_btf_id_ok()
9842 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID) in check_btf_id_ok()
9843 return !!fn->arg_btf_id[i]; in check_btf_id_ok()
9844 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK) in check_btf_id_ok()
9845 return fn->arg_btf_id[i] == BPF_PTR_POISON; in check_btf_id_ok()
9846 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && in check_btf_id_ok()
9848 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || in check_btf_id_ok()
9849 !(fn->arg_type[i] & MEM_FIXED_SIZE))) in check_btf_id_ok()
9860 check_btf_id_ok(fn) ? 0 : -EINVAL; in check_func_proto()
9874 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in clear_all_pkt_pointers()
9881 AT_PKT_END = -1,
9882 BEYOND_PKT_END = -2,
9887 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_pkt_end()
9888 struct bpf_reg_state *reg = &state->regs[regn]; in mark_pkt_end()
9890 if (reg->type != PTR_TO_PACKET) in mark_pkt_end()
9901 reg->range = BEYOND_PKT_END; in mark_pkt_end()
9903 reg->range = AT_PKT_END; in mark_pkt_end()
9910 for (i = 0; i < state->acquired_refs; i++) { in release_reference_nomark()
9911 if (state->refs[i].type != REF_TYPE_PTR) in release_reference_nomark()
9913 if (state->refs[i].id == ref_obj_id) { in release_reference_nomark()
9918 return -EINVAL; in release_reference_nomark()
9928 struct bpf_verifier_state *vstate = env->cur_state; in release_reference()
9938 if (reg->ref_obj_id == ref_obj_id) in release_reference()
9950 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in invalidate_non_owning_refs()
9951 if (type_is_non_owning_ref(reg->type)) in invalidate_non_owning_refs()
9961 /* after the call registers r0 - r5 were scratched */ in clear_caller_saved_regs()
9984 if (state->curframe + 1 >= MAX_CALL_FRAMES) { in setup_func_entry()
9986 state->curframe + 2); in setup_func_entry()
9987 return -E2BIG; in setup_func_entry()
9990 if (state->frame[state->curframe + 1]) { in setup_func_entry()
9992 state->curframe + 1); in setup_func_entry()
9993 return -EFAULT; in setup_func_entry()
9996 caller = state->frame[state->curframe]; in setup_func_entry()
9999 return -ENOMEM; in setup_func_entry()
10000 state->frame[state->curframe + 1] = callee; in setup_func_entry()
10002 /* callee cannot access r0, r6 - r9 for reading and has to write in setup_func_entry()
10009 state->curframe + 1 /* frameno within this callchain */, in setup_func_entry()
10016 state->curframe++; in setup_func_entry()
10022 state->frame[state->curframe + 1] = NULL; in setup_func_entry()
10031 struct bpf_verifier_log *log = &env->log; in btf_check_func_arg_match()
10042 for (i = 0; i < sub->arg_cnt; i++) { in btf_check_func_arg_match()
10045 struct bpf_subprog_arg_info *arg = &sub->args[i]; in btf_check_func_arg_match()
10047 if (arg->arg_type == ARG_ANYTHING) { in btf_check_func_arg_match()
10048 if (reg->type != SCALAR_VALUE) { in btf_check_func_arg_match()
10050 return -EINVAL; in btf_check_func_arg_match()
10052 } else if (arg->arg_type == ARG_PTR_TO_CTX) { in btf_check_func_arg_match()
10059 if (reg->type != PTR_TO_CTX) { in btf_check_func_arg_match()
10061 return -EINVAL; in btf_check_func_arg_match()
10063 } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { in btf_check_func_arg_match()
10067 if (check_mem_reg(env, reg, regno, arg->mem_size)) in btf_check_func_arg_match()
10068 return -EINVAL; in btf_check_func_arg_match()
10069 if (!(arg->arg_type & PTR_MAYBE_NULL) && (reg->type & PTR_MAYBE_NULL)) { in btf_check_func_arg_match()
10070 bpf_log(log, "arg#%d is expected to be non-NULL\n", i); in btf_check_func_arg_match()
10071 return -EINVAL; in btf_check_func_arg_match()
10073 } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) { in btf_check_func_arg_match()
10079 * run-time debug nightmare. in btf_check_func_arg_match()
10081 if (reg->type != PTR_TO_ARENA && reg->type != SCALAR_VALUE) { in btf_check_func_arg_match()
10083 return -EINVAL; in btf_check_func_arg_match()
10085 } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { in btf_check_func_arg_match()
10090 ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0); in btf_check_func_arg_match()
10093 } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { in btf_check_func_arg_match()
10097 if (register_is_null(reg) && type_may_be_null(arg->arg_type)) in btf_check_func_arg_match()
10101 err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta); in btf_check_func_arg_match()
10102 err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type); in btf_check_func_arg_match()
10107 i, arg->arg_type); in btf_check_func_arg_match()
10108 return -EFAULT; in btf_check_func_arg_match()
10117 * EFAULT - there is a verifier bug. Abort verification.
10118 * EINVAL - there is a type mismatch or BTF is not available.
10119 * 0 - BTF matches with what bpf_reg_state expects.
10125 struct bpf_prog *prog = env->prog; in btf_check_subprog_call()
10126 struct btf *btf = prog->aux->btf; in btf_check_subprog_call()
10130 if (!prog->aux->func_info) in btf_check_subprog_call()
10131 return -EINVAL; in btf_check_subprog_call()
10133 btf_id = prog->aux->func_info[subprog].type_id; in btf_check_subprog_call()
10135 return -EFAULT; in btf_check_subprog_call()
10137 if (prog->aux->func_info_aux[subprog].unreliable) in btf_check_subprog_call()
10138 return -EINVAL; in btf_check_subprog_call()
10146 prog->aux->func_info_aux[subprog].unreliable = true; in btf_check_subprog_call()
10154 struct bpf_verifier_state *state = env->cur_state, *callback_state; in push_callback_call()
10158 caller = state->frame[state->curframe]; in push_callback_call()
10159 err = btf_check_subprog_call(env, subprog, caller->regs); in push_callback_call()
10160 if (err == -EFAULT) in push_callback_call()
10167 env->subprog_info[subprog].is_cb = true; in push_callback_call()
10169 !is_callback_calling_kfunc(insn->imm)) { in push_callback_call()
10170 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", in push_callback_call()
10171 func_id_name(insn->imm), insn->imm); in push_callback_call()
10172 return -EFAULT; in push_callback_call()
10174 !is_callback_calling_function(insn->imm)) { /* helper */ in push_callback_call()
10175 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", in push_callback_call()
10176 func_id_name(insn->imm), insn->imm); in push_callback_call()
10177 return -EFAULT; in push_callback_call()
10184 env->subprog_info[subprog].is_async_cb = true; in push_callback_call()
10185 async_cb = push_async_cb(env, env->subprog_info[subprog].start, in push_callback_call()
10187 is_bpf_wq_set_callback_impl_kfunc(insn->imm)); in push_callback_call()
10189 return -EFAULT; in push_callback_call()
10190 callee = async_cb->frame[0]; in push_callback_call()
10191 callee->async_entry_cnt = caller->async_entry_cnt + 1; in push_callback_call()
10204 callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); in push_callback_call()
10206 return -ENOMEM; in push_callback_call()
10213 callback_state->callback_unroll_depth++; in push_callback_call()
10214 callback_state->frame[callback_state->curframe - 1]->callback_depth++; in push_callback_call()
10215 caller->callback_depth = 0; in push_callback_call()
10222 struct bpf_verifier_state *state = env->cur_state; in check_func_call()
10226 target_insn = *insn_idx + insn->imm + 1; in check_func_call()
10230 return -EFAULT; in check_func_call()
10233 caller = state->frame[state->curframe]; in check_func_call()
10234 err = btf_check_subprog_call(env, subprog, caller->regs); in check_func_call()
10235 if (err == -EFAULT) in check_func_call()
10241 if (env->cur_state->active_locks) { in check_func_call()
10244 return -EINVAL; in check_func_call()
10248 if (env->cur_state->active_preempt_locks) { in check_func_call()
10251 return -EINVAL; in check_func_call()
10254 if (env->cur_state->active_irq_id) { in check_func_call()
10257 return -EINVAL; in check_func_call()
10268 if (env->subprog_info[subprog].changes_pkt_data) in check_func_call()
10271 subprog_aux(env, subprog)->called = true; in check_func_call()
10272 clear_caller_saved_regs(env, caller->regs); in check_func_call()
10274 /* All global functions return a 64-bit SCALAR_VALUE */ in check_func_call()
10275 mark_reg_unknown(env, caller->regs, BPF_REG_0); in check_func_call()
10276 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in check_func_call()
10289 clear_caller_saved_regs(env, caller->regs); in check_func_call()
10292 *insn_idx = env->subprog_info[subprog].start - 1; in check_func_call()
10294 if (env->log.level & BPF_LOG_LEVEL) { in check_func_call()
10296 print_verifier_state(env, state, caller->frameno, true); in check_func_call()
10298 print_verifier_state(env, state, state->curframe, true); in check_func_call()
10313 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in map_set_for_each_callback_args()
10315 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in map_set_for_each_callback_args()
10316 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in map_set_for_each_callback_args()
10317 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
10319 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in map_set_for_each_callback_args()
10320 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in map_set_for_each_callback_args()
10321 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
10324 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; in map_set_for_each_callback_args()
10327 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
10337 /* copy r1 - r5 args that callee can access. The copy includes parent in set_callee_state()
10341 callee->regs[i] = caller->regs[i]; in set_callee_state()
10350 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; in set_map_elem_callback_state()
10355 map = insn_aux->map_ptr_state.map_ptr; in set_map_elem_callback_state()
10356 if (!map->ops->map_set_for_each_callback_args || in set_map_elem_callback_state()
10357 !map->ops->map_for_each_callback) { in set_map_elem_callback_state()
10359 return -ENOTSUPP; in set_map_elem_callback_state()
10362 err = map->ops->map_set_for_each_callback_args(env, caller, callee); in set_map_elem_callback_state()
10366 callee->in_callback_fn = true; in set_map_elem_callback_state()
10367 callee->callback_ret_range = retval_range(0, 1); in set_map_elem_callback_state()
10380 callee->regs[BPF_REG_1].type = SCALAR_VALUE; in set_loop_callback_state()
10381 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_loop_callback_state()
10384 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
10385 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
10386 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
10388 callee->in_callback_fn = true; in set_loop_callback_state()
10389 callee->callback_ret_range = retval_range(0, 1); in set_loop_callback_state()
10398 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; in set_timer_callback_state()
10403 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_timer_callback_state()
10404 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_timer_callback_state()
10405 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_timer_callback_state()
10407 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_timer_callback_state()
10408 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_timer_callback_state()
10409 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_timer_callback_state()
10411 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_timer_callback_state()
10412 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_timer_callback_state()
10413 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_timer_callback_state()
10416 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
10417 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
10418 callee->in_async_callback_fn = true; in set_timer_callback_state()
10419 callee->callback_ret_range = retval_range(0, 1); in set_timer_callback_state()
10433 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in set_find_vma_callback_state()
10435 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; in set_find_vma_callback_state()
10436 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_find_vma_callback_state()
10437 callee->regs[BPF_REG_2].btf = btf_vmlinux; in set_find_vma_callback_state()
10438 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA]; in set_find_vma_callback_state()
10441 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; in set_find_vma_callback_state()
10444 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
10445 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
10446 callee->in_callback_fn = true; in set_find_vma_callback_state()
10447 callee->callback_ret_range = retval_range(0, 1); in set_find_vma_callback_state()
10460 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
10461 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); in set_user_ringbuf_callback_state()
10462 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_user_ringbuf_callback_state()
10465 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
10466 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
10467 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
10469 callee->in_callback_fn = true; in set_user_ringbuf_callback_state()
10470 callee->callback_ret_range = retval_range(0, 1); in set_user_ringbuf_callback_state()
10488 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, in set_rbtree_add_callback_state()
10490 if (!field || !field->graph_root.value_btf_id) in set_rbtree_add_callback_state()
10491 return -EFAULT; in set_rbtree_add_callback_state()
10493 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); in set_rbtree_add_callback_state()
10494 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); in set_rbtree_add_callback_state()
10495 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); in set_rbtree_add_callback_state()
10496 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); in set_rbtree_add_callback_state()
10498 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_rbtree_add_callback_state()
10499 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_rbtree_add_callback_state()
10500 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
10501 callee->in_callback_fn = true; in set_rbtree_add_callback_state()
10502 callee->callback_ret_range = retval_range(0, 1); in set_rbtree_add_callback_state()
10514 struct bpf_verifier_state *state = env->cur_state; in in_rbtree_lock_required_cb()
10515 struct bpf_insn *insn = env->prog->insnsi; in in_rbtree_lock_required_cb()
10519 if (!state->curframe) in in_rbtree_lock_required_cb()
10522 callee = state->frame[state->curframe]; in in_rbtree_lock_required_cb()
10524 if (!callee->in_callback_fn) in in_rbtree_lock_required_cb()
10527 kfunc_btf_id = insn[callee->callsite].imm; in in_rbtree_lock_required_cb()
10535 return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval; in retval_range_within()
10537 return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; in retval_range_within()
10542 struct bpf_verifier_state *state = env->cur_state, *prev_st; in prepare_func_exit()
10548 callee = state->frame[state->curframe]; in prepare_func_exit()
10549 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
10550 if (r0->type == PTR_TO_STACK) { in prepare_func_exit()
10558 return -EINVAL; in prepare_func_exit()
10561 caller = state->frame[state->curframe - 1]; in prepare_func_exit()
10562 if (callee->in_callback_fn) { in prepare_func_exit()
10563 if (r0->type != SCALAR_VALUE) { in prepare_func_exit()
10565 return -EACCES; in prepare_func_exit()
10569 err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64); in prepare_func_exit()
10575 if (!retval_range_within(callee->callback_ret_range, r0, false)) { in prepare_func_exit()
10576 verbose_invalid_scalar(env, r0, callee->callback_ret_range, in prepare_func_exit()
10578 return -EINVAL; in prepare_func_exit()
10580 if (!calls_callback(env, callee->callsite)) { in prepare_func_exit()
10582 *insn_idx, callee->callsite); in prepare_func_exit()
10583 return -EFAULT; in prepare_func_exit()
10587 caller->regs[BPF_REG_0] = *r0; in prepare_func_exit()
10594 in_callback_fn = callee->in_callback_fn; in prepare_func_exit()
10596 *insn_idx = callee->callsite; in prepare_func_exit()
10598 *insn_idx = callee->callsite + 1; in prepare_func_exit()
10600 if (env->log.level & BPF_LOG_LEVEL) { in prepare_func_exit()
10602 print_verifier_state(env, state, callee->frameno, true); in prepare_func_exit()
10604 print_verifier_state(env, state, caller->frameno, true); in prepare_func_exit()
10609 state->frame[state->curframe--] = NULL; in prepare_func_exit()
10614 * void cb(int idx, struct ctx *ctx) { ctx->i++; ... } in prepare_func_exit()
10647 ret_reg->smax_value = meta->msize_max_value; in do_refine_retval_range()
10648 ret_reg->s32_max_value = meta->msize_max_value; in do_refine_retval_range()
10649 ret_reg->smin_value = -MAX_ERRNO; in do_refine_retval_range()
10650 ret_reg->s32_min_value = -MAX_ERRNO; in do_refine_retval_range()
10654 ret_reg->umax_value = nr_cpu_ids - 1; in do_refine_retval_range()
10655 ret_reg->u32_max_value = nr_cpu_ids - 1; in do_refine_retval_range()
10656 ret_reg->smax_value = nr_cpu_ids - 1; in do_refine_retval_range()
10657 ret_reg->s32_max_value = nr_cpu_ids - 1; in do_refine_retval_range()
10658 ret_reg->umin_value = 0; in do_refine_retval_range()
10659 ret_reg->u32_min_value = 0; in do_refine_retval_range()
10660 ret_reg->smin_value = 0; in do_refine_retval_range()
10661 ret_reg->s32_min_value = 0; in do_refine_retval_range()
10673 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_map()
10674 struct bpf_map *map = meta->map_ptr; in record_func_map()
10690 return -EINVAL; in record_func_map()
10693 /* In case of read-only, some additional restrictions in record_func_map()
10697 if ((map->map_flags & BPF_F_RDONLY_PROG) && in record_func_map()
10703 return -EACCES; in record_func_map()
10706 if (!aux->map_ptr_state.map_ptr) in record_func_map()
10707 bpf_map_ptr_store(aux, meta->map_ptr, in record_func_map()
10708 !meta->map_ptr->bypass_spec_v1, false); in record_func_map()
10709 else if (aux->map_ptr_state.map_ptr != meta->map_ptr) in record_func_map()
10710 bpf_map_ptr_store(aux, meta->map_ptr, in record_func_map()
10711 !meta->map_ptr->bypass_spec_v1, true); in record_func_map()
10719 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_key()
10721 struct bpf_map *map = meta->map_ptr; in record_func_key()
10727 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { in record_func_key()
10729 return -EINVAL; in record_func_key()
10733 val = reg->var_off.value; in record_func_key()
10734 max = map->max_entries; in record_func_key()
10754 struct bpf_verifier_state *state = env->cur_state; in check_reference_leak()
10758 if (!exception_exit && cur_func(env)->frameno) in check_reference_leak()
10761 for (i = 0; i < state->acquired_refs; i++) { in check_reference_leak()
10762 if (state->refs[i].type != REF_TYPE_PTR) in check_reference_leak()
10765 state->refs[i].id, state->refs[i].insn_idx); in check_reference_leak()
10768 return refs_lingering ? -EINVAL : 0; in check_reference_leak()
10775 if (check_lock && env->cur_state->active_locks) { in check_resource_leak()
10776 verbose(env, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix); in check_resource_leak()
10777 return -EINVAL; in check_resource_leak()
10786 if (check_lock && env->cur_state->active_irq_id) { in check_resource_leak()
10787 verbose(env, "%s cannot be used inside bpf_local_irq_save-ed region\n", prefix); in check_resource_leak()
10788 return -EINVAL; in check_resource_leak()
10791 if (check_lock && env->cur_state->active_rcu_lock) { in check_resource_leak()
10792 verbose(env, "%s cannot be used inside bpf_rcu_read_lock-ed region\n", prefix); in check_resource_leak()
10793 return -EINVAL; in check_resource_leak()
10796 if (check_lock && env->cur_state->active_preempt_locks) { in check_resource_leak()
10797 verbose(env, "%s cannot be used inside bpf_preempt_disable-ed region\n", prefix); in check_resource_leak()
10798 return -EINVAL; in check_resource_leak()
10809 struct bpf_map *fmt_map = fmt_reg->map_ptr; in check_bpf_snprintf_call()
10816 if (data_len_reg->var_off.value % 8) in check_bpf_snprintf_call()
10817 return -EINVAL; in check_bpf_snprintf_call()
10818 num_args = data_len_reg->var_off.value / 8; in check_bpf_snprintf_call()
10823 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; in check_bpf_snprintf_call()
10824 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, in check_bpf_snprintf_call()
10828 return -EFAULT; in check_bpf_snprintf_call()
10844 enum bpf_prog_type type = resolve_prog_type(env->prog); in check_get_func_ip()
10848 if (!bpf_prog_has_trampoline(env->prog)) { in check_get_func_ip()
10851 return -ENOTSUPP; in check_get_func_ip()
10860 return -ENOTSUPP; in check_get_func_ip()
10865 return &env->insn_aux_data[env->insn_idx]; in cur_aux()
10882 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; in update_loop_inline_state()
10884 if (!state->initialized) { in update_loop_inline_state()
10885 state->initialized = 1; in update_loop_inline_state()
10886 state->fit_for_inline = loop_flag_is_zero(env); in update_loop_inline_state()
10887 state->callback_subprogno = subprogno; in update_loop_inline_state()
10891 if (!state->fit_for_inline) in update_loop_inline_state()
10894 state->fit_for_inline = (loop_flag_is_zero(env) && in update_loop_inline_state()
10895 state->callback_subprogno == subprogno); in update_loop_inline_state()
10917 return -ERANGE; in get_helper_proto()
10919 if (!env->ops->get_func_proto) in get_helper_proto()
10920 return -EINVAL; in get_helper_proto()
10922 *ptr = env->ops->get_func_proto(func_id, env->prog); in get_helper_proto()
10923 return *ptr ? 0 : -EINVAL; in get_helper_proto()
10929 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_helper_call()
10941 func_id = insn->imm; in check_helper_call()
10942 err = get_helper_proto(env, insn->imm, &fn); in check_helper_call()
10943 if (err == -ERANGE) { in check_helper_call()
10945 return -EINVAL; in check_helper_call()
10954 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in check_helper_call()
10955 if (!env->prog->gpl_compatible && fn->gpl_only) { in check_helper_call()
10956 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); in check_helper_call()
10957 return -EINVAL; in check_helper_call()
10960 if (fn->allowed && !fn->allowed(env->prog)) { in check_helper_call()
10962 return -EINVAL; in check_helper_call()
10965 if (!in_sleepable(env) && fn->might_sleep) { in check_helper_call()
10966 verbose(env, "helper call might sleep in a non-sleepable prog\n"); in check_helper_call()
10967 return -EINVAL; in check_helper_call()
10972 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { in check_helper_call()
10975 return -EINVAL; in check_helper_call()
10979 meta.pkt_access = fn->pkt_access; in check_helper_call()
10988 if (env->cur_state->active_rcu_lock) { in check_helper_call()
10989 if (fn->might_sleep) { in check_helper_call()
10992 return -EINVAL; in check_helper_call()
10996 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
10999 if (env->cur_state->active_preempt_locks) { in check_helper_call()
11000 if (fn->might_sleep) { in check_helper_call()
11001 verbose(env, "sleepable helper %s#%d in non-preemptible region\n", in check_helper_call()
11003 return -EINVAL; in check_helper_call()
11007 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
11010 if (env->cur_state->active_irq_id) { in check_helper_call()
11011 if (fn->might_sleep) { in check_helper_call()
11012 verbose(env, "sleepable helper %s#%d in IRQ-disabled region\n", in check_helper_call()
11014 return -EINVAL; in check_helper_call()
11018 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
11042 BPF_WRITE, -1, false, false); in check_helper_call()
11050 err = -EINVAL; in check_helper_call()
11055 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) { in check_helper_call()
11058 return -EFAULT; in check_helper_call()
11067 err = release_reference_nomark(env->cur_state, ref_obj_id); in check_helper_call()
11069 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in check_helper_call()
11070 if (reg->ref_obj_id == ref_obj_id) { in check_helper_call()
11071 if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) { in check_helper_call()
11072 reg->ref_obj_id = 0; in check_helper_call()
11073 reg->type &= ~MEM_ALLOC; in check_helper_call()
11074 reg->type |= MEM_RCU; in check_helper_call()
11107 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); in check_helper_call()
11108 return -EINVAL; in check_helper_call()
11134 if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { in check_helper_call()
11138 cur_func(env)->callback_depth = 0; in check_helper_call()
11139 if (env->log.level & BPF_LOG_LEVEL2) in check_helper_call()
11141 env->cur_state->curframe); in check_helper_call()
11148 return -EACCES; in check_helper_call()
11153 env->prog->expected_attach_type == BPF_LSM_CGROUP) { in check_helper_call()
11154 if (!env->prog->aux->attach_func_proto->type) { in check_helper_call()
11159 return -EINVAL; in check_helper_call()
11170 return -EFAULT; in check_helper_call()
11175 return -EFAULT; in check_helper_call()
11179 return -EFAULT; in check_helper_call()
11206 return -EFAULT; in check_helper_call()
11210 return -EFAULT; in check_helper_call()
11226 if (reg->type & MEM_RCU) { in check_helper_call()
11227 type = btf_type_by_id(reg->btf, reg->btf_id); in check_helper_call()
11230 return -EFAULT; in check_helper_call()
11233 env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true; in check_helper_call()
11252 /* helper call returns 64-bit value. */ in check_helper_call()
11256 ret_type = fn->ret_type; in check_helper_call()
11277 return -EINVAL; in check_helper_call()
11281 can_elide_value_nullness(meta.map_ptr->map_type) && in check_helper_call()
11283 meta.const_map_key < meta.map_ptr->max_entries) in check_helper_call()
11290 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) { in check_helper_call()
11291 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
11325 tname = btf_name_by_offset(meta.ret_btf, t->name_off); in check_helper_call()
11328 return -EINVAL; in check_helper_call()
11358 ret_btf = meta.kptr_field->kptr.btf; in check_helper_call()
11359 ret_btf_id = meta.kptr_field->kptr.btf_id; in check_helper_call()
11362 if (meta.kptr_field->type == BPF_KPTR_PERCPU) in check_helper_call()
11366 if (fn->ret_btf_id == BPF_PTR_POISON) { in check_helper_call()
11368 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", in check_helper_call()
11370 return -EINVAL; in check_helper_call()
11373 ret_btf_id = *fn->ret_btf_id; in check_helper_call()
11379 return -EINVAL; in check_helper_call()
11388 return -EINVAL; in check_helper_call()
11392 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
11397 return -EFAULT; in check_helper_call()
11417 err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); in check_helper_call()
11427 !env->prog->has_callchain_buf) { in check_helper_call()
11434 err = -ENOTSUPP; in check_helper_call()
11442 env->prog->has_callchain_buf = true; in check_helper_call()
11446 env->prog->call_get_stack = true; in check_helper_call()
11450 return -ENOTSUPP; in check_helper_call()
11451 env->prog->call_get_func_ip = true; in check_helper_call()
11469 reg->live |= REG_LIVE_WRITTEN; in mark_btf_func_reg_size()
11470 reg->subreg_def = reg_size == sizeof(u64) ? in mark_btf_func_reg_size()
11471 DEF_NOT_SUBREG : env->insn_idx + 1; in mark_btf_func_reg_size()
11476 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); in mark_btf_func_reg_size()
11478 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); in mark_btf_func_reg_size()
11485 return meta->kfunc_flags & KF_ACQUIRE; in is_kfunc_acquire()
11490 return meta->kfunc_flags & KF_RELEASE; in is_kfunc_release()
11495 return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); in is_kfunc_trusted_args()
11500 return meta->kfunc_flags & KF_SLEEPABLE; in is_kfunc_sleepable()
11505 return meta->kfunc_flags & KF_DESTRUCTIVE; in is_kfunc_destructive()
11510 return meta->kfunc_flags & KF_RCU; in is_kfunc_rcu()
11515 return meta->kfunc_flags & KF_RCU_PROTECTED; in is_kfunc_rcu_protected()
11524 t = btf_type_skip_modifiers(btf, arg->type, NULL); in is_kfunc_arg_mem_size()
11525 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) in is_kfunc_arg_mem_size()
11537 t = btf_type_skip_modifiers(btf, arg->type, NULL); in is_kfunc_arg_const_mem_size()
11538 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) in is_kfunc_arg_const_mem_size()
11601 param_name = btf_name_by_offset(btf, arg->name_off); in is_kfunc_arg_scalar_with_name()
11636 t = btf_type_skip_modifiers(btf, arg->type, NULL); in BTF_ID()
11641 t = btf_type_skip_modifiers(btf, t->type, &res_id); in BTF_ID()
11682 t = btf_type_resolve_func_ptr(btf, arg->type, NULL); in is_kfunc_arg_callback()
11704 member_type = btf_type_skip_modifiers(btf, member->type, NULL); in __btf_type_is_scalar_struct()
11716 if (!array->nelems) in __btf_type_is_scalar_struct()
11718 member_type = btf_type_skip_modifiers(btf, array->type, NULL); in __btf_type_is_scalar_struct()
11865 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && in BTF_ID()
11866 meta->arg_owning_ref) { in BTF_ID()
11870 return meta->kfunc_flags & KF_RET_NULL; in BTF_ID()
11875 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock]; in is_kfunc_bpf_rcu_read_lock()
11880 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; in is_kfunc_bpf_rcu_read_unlock()
11885 return meta->func_id == special_kfunc_list[KF_bpf_preempt_disable]; in is_kfunc_bpf_preempt_disable()
11890 return meta->func_id == special_kfunc_list[KF_bpf_preempt_enable]; in is_kfunc_bpf_preempt_enable()
11905 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) in get_kfunc_ptr_arg_type()
11913 if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) in get_kfunc_ptr_arg_type()
11916 if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) in get_kfunc_ptr_arg_type()
11919 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11922 if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11925 if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11931 if (is_kfunc_arg_list_head(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11934 if (is_kfunc_arg_list_node(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11937 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11940 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11943 if (is_kfunc_arg_const_str(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11946 if (is_kfunc_arg_map(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11949 if (is_kfunc_arg_wq(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11952 if (is_kfunc_arg_irq_flag(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11955 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { in get_kfunc_ptr_arg_type()
11958 meta->func_name, argno, btf_type_str(ref_t), ref_tname); in get_kfunc_ptr_arg_type()
11959 return -EINVAL; in get_kfunc_ptr_arg_type()
11964 if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
11968 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) || in get_kfunc_ptr_arg_type()
11969 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))) in get_kfunc_ptr_arg_type()
11977 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && in get_kfunc_ptr_arg_type()
11981 return -EINVAL; in get_kfunc_ptr_arg_type()
12001 if (base_type(reg->type) == PTR_TO_BTF_ID) { in process_kf_arg_ptr_to_btf_id()
12002 reg_btf = reg->btf; in process_kf_arg_ptr_to_btf_id()
12003 reg_ref_id = reg->btf_id; in process_kf_arg_ptr_to_btf_id()
12006 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; in process_kf_arg_ptr_to_btf_id()
12010 * or releasing a reference, or are no-cast aliases. We do _not_ in process_kf_arg_ptr_to_btf_id()
12033 if ((is_kfunc_release(meta) && reg->ref_obj_id) || in process_kf_arg_ptr_to_btf_id()
12034 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) in process_kf_arg_ptr_to_btf_id()
12038 (reg->off || !tnum_is_const(reg->var_off) || in process_kf_arg_ptr_to_btf_id()
12039 reg->var_off.value)); in process_kf_arg_ptr_to_btf_id()
12042 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); in process_kf_arg_ptr_to_btf_id()
12043 …struct_same = btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, st… in process_kf_arg_ptr_to_btf_id()
12045 * actually use it -- it must cast to the underlying type. So we allow in process_kf_arg_ptr_to_btf_id()
12051 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, in process_kf_arg_ptr_to_btf_id()
12053 return -EINVAL; in process_kf_arg_ptr_to_btf_id()
12065 if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_save]) { in process_irq_flag()
12067 } else if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_restore]) { in process_irq_flag()
12071 return -EFAULT; in process_irq_flag()
12076 verbose(env, "expected uninitialized irq flag as arg#%d\n", regno - 1); in process_irq_flag()
12077 return -EINVAL; in process_irq_flag()
12080 err = check_mem_access(env, env->insn_idx, regno, 0, BPF_DW, BPF_WRITE, -1, false, false); in process_irq_flag()
12084 err = mark_stack_slot_irq_flag(env, meta, reg, env->insn_idx); in process_irq_flag()
12090 verbose(env, "expected an initialized irq flag as arg#%d\n", regno - 1); in process_irq_flag()
12110 if (!env->cur_state->active_locks) { in ref_set_non_owning()
12112 return -EFAULT; in ref_set_non_owning()
12115 if (type_flag(reg->type) & NON_OWN_REF) { in ref_set_non_owning()
12117 return -EFAULT; in ref_set_non_owning()
12120 reg->type |= NON_OWN_REF; in ref_set_non_owning()
12121 if (rec->refcount_off >= 0) in ref_set_non_owning()
12122 reg->type |= MEM_RCU; in ref_set_non_owning()
12129 struct bpf_verifier_state *state = env->cur_state; in ref_convert_owning_non_owning()
12136 "owning -> non-owning conversion\n"); in ref_convert_owning_non_owning()
12137 return -EFAULT; in ref_convert_owning_non_owning()
12140 for (i = 0; i < state->acquired_refs; i++) { in ref_convert_owning_non_owning()
12141 if (state->refs[i].id != ref_obj_id) in ref_convert_owning_non_owning()
12147 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in ref_convert_owning_non_owning()
12148 if (reg->ref_obj_id == ref_obj_id) { in ref_convert_owning_non_owning()
12149 reg->ref_obj_id = 0; in ref_convert_owning_non_owning()
12157 return -EFAULT; in ref_convert_owning_non_owning()
12169 * allocation, the verifier preserves a unique reg->id for it.
12181 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
12182 * allocated objects is the reg->btf pointer.
12184 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
12195 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
12197 * will get different reg->id assigned to each lookup, hence different
12200 * In case of allocated objects, active_lock.ptr is the reg->btf, and the
12201 * reg->id is a unique ID preserved after the NULL pointer check on the pointer
12202 * returned from bpf_obj_new. Each allocation receives a new reg->id.
12210 switch ((int)reg->type) { in check_reg_allocation_locked()
12212 ptr = reg->map_ptr; in check_reg_allocation_locked()
12215 ptr = reg->btf; in check_reg_allocation_locked()
12219 return -EFAULT; in check_reg_allocation_locked()
12221 id = reg->id; in check_reg_allocation_locked()
12223 if (!env->cur_state->active_locks) in check_reg_allocation_locked()
12224 return -EINVAL; in check_reg_allocation_locked()
12225 s = find_lock_state(env->cur_state, REF_TYPE_LOCK, id, ptr); in check_reg_allocation_locked()
12228 return -EINVAL; in check_reg_allocation_locked()
12278 return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && in is_bpf_throw_kfunc()
12279 insn->imm == special_kfunc_list[KF_bpf_throw]; in is_bpf_throw_kfunc()
12362 if (meta->btf != btf_vmlinux) { in __process_kf_arg_ptr_to_graph_root()
12364 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
12367 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_root()
12368 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
12371 if (!tnum_is_const(reg->var_off)) { in __process_kf_arg_ptr_to_graph_root()
12375 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
12379 head_off = reg->off + reg->var_off.value; in __process_kf_arg_ptr_to_graph_root()
12383 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
12389 rec->spin_lock_off, head_type_name); in __process_kf_arg_ptr_to_graph_root()
12390 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
12395 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
12406 &meta->arg_list_head.field); in process_kf_arg_ptr_to_list_head()
12414 &meta->arg_rbtree_root.field); in process_kf_arg_ptr_to_rbtree_root()
12430 if (meta->btf != btf_vmlinux) { in __process_kf_arg_ptr_to_graph_node()
12432 return -EFAULT; in __process_kf_arg_ptr_to_graph_node()
12435 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_node()
12436 return -EFAULT; in __process_kf_arg_ptr_to_graph_node()
12439 if (!tnum_is_const(reg->var_off)) { in __process_kf_arg_ptr_to_graph_node()
12443 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
12446 node_off = reg->off + reg->var_off.value; in __process_kf_arg_ptr_to_graph_node()
12450 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
12455 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id); in __process_kf_arg_ptr_to_graph_node()
12456 t = btf_type_by_id(reg->btf, reg->btf_id); in __process_kf_arg_ptr_to_graph_node()
12457 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, in __process_kf_arg_ptr_to_graph_node()
12458 field->graph_root.value_btf_id, true)) { in __process_kf_arg_ptr_to_graph_node()
12463 field->graph_root.node_offset, in __process_kf_arg_ptr_to_graph_node()
12464 btf_name_by_offset(field->graph_root.btf, et->name_off), in __process_kf_arg_ptr_to_graph_node()
12465 node_off, btf_name_by_offset(reg->btf, t->name_off)); in __process_kf_arg_ptr_to_graph_node()
12466 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
12468 meta->arg_btf = reg->btf; in __process_kf_arg_ptr_to_graph_node()
12469 meta->arg_btf_id = reg->btf_id; in __process_kf_arg_ptr_to_graph_node()
12471 if (node_off != field->graph_root.node_offset) { in __process_kf_arg_ptr_to_graph_node()
12474 field->graph_root.node_offset, in __process_kf_arg_ptr_to_graph_node()
12475 btf_name_by_offset(field->graph_root.btf, et->name_off)); in __process_kf_arg_ptr_to_graph_node()
12476 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
12488 &meta->arg_list_head.field); in process_kf_arg_ptr_to_list_node()
12497 &meta->arg_rbtree_root.field); in process_kf_arg_ptr_to_rbtree_node()
12502 * LSM hooks and iters (both sleepable and non-sleepable) are safe.
12508 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_css_task_iter_allowlist()
12514 if (env->prog->expected_attach_type == BPF_TRACE_ITER) in check_css_task_iter_allowlist()
12525 const char *func_name = meta->func_name, *ref_tname; in check_kfunc_args()
12526 const struct btf *btf = meta->btf; in check_kfunc_args()
12532 args = (const struct btf_param *)(meta->func_proto + 1); in check_kfunc_args()
12533 nargs = btf_type_vlen(meta->func_proto); in check_kfunc_args()
12537 return -EINVAL; in check_kfunc_args()
12557 if (reg->type != SCALAR_VALUE) { in check_kfunc_args()
12559 return -EINVAL; in check_kfunc_args()
12562 if (is_kfunc_arg_constant(meta->btf, &args[i])) { in check_kfunc_args()
12563 if (meta->arg_constant.found) { in check_kfunc_args()
12565 return -EFAULT; in check_kfunc_args()
12567 if (!tnum_is_const(reg->var_off)) { in check_kfunc_args()
12569 return -EINVAL; in check_kfunc_args()
12574 meta->arg_constant.found = true; in check_kfunc_args()
12575 meta->arg_constant.value = reg->var_off.value; in check_kfunc_args()
12577 meta->r0_rdonly = true; in check_kfunc_args()
12584 if (meta->r0_size) { in check_kfunc_args()
12586 return -EINVAL; in check_kfunc_args()
12589 if (!tnum_is_const(reg->var_off)) { in check_kfunc_args()
12591 return -EINVAL; in check_kfunc_args()
12594 meta->r0_size = reg->var_off.value; in check_kfunc_args()
12604 return -EINVAL; in check_kfunc_args()
12608 (register_is_null(reg) || type_may_be_null(reg->type)) && in check_kfunc_args()
12609 !is_kfunc_arg_nullable(meta->btf, &args[i])) { in check_kfunc_args()
12611 return -EACCES; in check_kfunc_args()
12614 if (reg->ref_obj_id) { in check_kfunc_args()
12615 if (is_kfunc_release(meta) && meta->ref_obj_id) { in check_kfunc_args()
12617 regno, reg->ref_obj_id, in check_kfunc_args()
12618 meta->ref_obj_id); in check_kfunc_args()
12619 return -EFAULT; in check_kfunc_args()
12621 meta->ref_obj_id = reg->ref_obj_id; in check_kfunc_args()
12623 meta->release_regno = regno; in check_kfunc_args()
12626 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); in check_kfunc_args()
12627 ref_tname = btf_name_by_offset(btf, ref_t->name_off); in check_kfunc_args()
12637 if (!reg->map_ptr) { in check_kfunc_args()
12639 return -EINVAL; in check_kfunc_args()
12641 if (meta->map.ptr && reg->map_ptr->record->wq_off >= 0) { in check_kfunc_args()
12654 if (meta->map.ptr != reg->map_ptr || in check_kfunc_args()
12655 meta->map.uid != reg->map_uid) { in check_kfunc_args()
12658 meta->map.uid, reg->map_uid); in check_kfunc_args()
12659 return -EINVAL; in check_kfunc_args()
12662 meta->map.ptr = reg->map_ptr; in check_kfunc_args()
12663 meta->map.uid = reg->map_uid; in check_kfunc_args()
12673 return -EINVAL; in check_kfunc_args()
12677 return -EINVAL; in check_kfunc_args()
12698 return -EFAULT; in check_kfunc_args()
12701 if (is_kfunc_release(meta) && reg->ref_obj_id) in check_kfunc_args()
12709 if (reg->type != PTR_TO_CTX) { in check_kfunc_args()
12711 i, reg_type_str(env, reg->type)); in check_kfunc_args()
12712 return -EINVAL; in check_kfunc_args()
12715 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { in check_kfunc_args()
12716 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); in check_kfunc_args()
12718 return -EINVAL; in check_kfunc_args()
12719 meta->ret_btf_id = ret; in check_kfunc_args()
12723 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12724 if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) { in check_kfunc_args()
12726 return -EINVAL; in check_kfunc_args()
12728 } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) { in check_kfunc_args()
12729 if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { in check_kfunc_args()
12731 return -EINVAL; in check_kfunc_args()
12735 return -EINVAL; in check_kfunc_args()
12737 if (!reg->ref_obj_id) { in check_kfunc_args()
12739 return -EINVAL; in check_kfunc_args()
12741 if (meta->btf == btf_vmlinux) { in check_kfunc_args()
12742 meta->arg_btf = reg->btf; in check_kfunc_args()
12743 meta->arg_btf_id = reg->btf_id; in check_kfunc_args()
12751 if (reg->type == CONST_PTR_TO_DYNPTR) in check_kfunc_args()
12757 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { in check_kfunc_args()
12759 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { in check_kfunc_args()
12761 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] && in check_kfunc_args()
12763 enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type; in check_kfunc_args()
12767 return -EFAULT; in check_kfunc_args()
12771 clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id; in check_kfunc_args()
12774 return -EFAULT; in check_kfunc_args()
12789 meta->initialized_dynptr.id = id; in check_kfunc_args()
12790 meta->initialized_dynptr.type = dynptr_get_type(env, reg); in check_kfunc_args()
12791 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); in check_kfunc_args()
12797 if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { in check_kfunc_args()
12800 return -EINVAL; in check_kfunc_args()
12808 if (reg->type != PTR_TO_MAP_VALUE && in check_kfunc_args()
12809 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12811 return -EINVAL; in check_kfunc_args()
12813 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { in check_kfunc_args()
12815 return -EINVAL; in check_kfunc_args()
12822 if (reg->type != PTR_TO_MAP_VALUE && in check_kfunc_args()
12823 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12825 return -EINVAL; in check_kfunc_args()
12827 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { in check_kfunc_args()
12829 return -EINVAL; in check_kfunc_args()
12836 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12838 return -EINVAL; in check_kfunc_args()
12840 if (!reg->ref_obj_id) { in check_kfunc_args()
12842 return -EINVAL; in check_kfunc_args()
12849 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) { in check_kfunc_args()
12850 if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { in check_kfunc_args()
12851 verbose(env, "rbtree_remove node input must be non-owning ref\n"); in check_kfunc_args()
12852 return -EINVAL; in check_kfunc_args()
12856 return -EINVAL; in check_kfunc_args()
12859 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
12861 return -EINVAL; in check_kfunc_args()
12863 if (!reg->ref_obj_id) { in check_kfunc_args()
12865 return -EINVAL; in check_kfunc_args()
12877 ref_tname = btf_name_by_offset(btf, ref_t->name_off); in check_kfunc_args()
12881 if ((base_type(reg->type) != PTR_TO_BTF_ID || in check_kfunc_args()
12882 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && in check_kfunc_args()
12883 !reg2btf_ids[base_type(reg->type)]) { in check_kfunc_args()
12884 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); in check_kfunc_args()
12886 reg_type_str(env, base_type(reg->type) | in check_kfunc_args()
12887 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); in check_kfunc_args()
12888 return -EINVAL; in check_kfunc_args()
12899 return -EINVAL; in check_kfunc_args()
12912 if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) { in check_kfunc_args()
12920 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { in check_kfunc_args()
12921 if (meta->arg_constant.found) { in check_kfunc_args()
12923 return -EFAULT; in check_kfunc_args()
12925 if (!tnum_is_const(size_reg->var_off)) { in check_kfunc_args()
12927 return -EINVAL; in check_kfunc_args()
12929 meta->arg_constant.found = true; in check_kfunc_args()
12930 meta->arg_constant.value = size_reg->var_off.value; in check_kfunc_args()
12938 if (reg->type != PTR_TO_FUNC) { in check_kfunc_args()
12940 return -EINVAL; in check_kfunc_args()
12942 meta->subprogno = reg->subprogno; in check_kfunc_args()
12945 if (!type_is_ptr_alloc_obj(reg->type)) { in check_kfunc_args()
12946 verbose(env, "arg#%d is neither owning or non-owning ref\n", i); in check_kfunc_args()
12947 return -EINVAL; in check_kfunc_args()
12949 if (!type_is_non_owning_ref(reg->type)) in check_kfunc_args()
12950 meta->arg_owning_ref = true; in check_kfunc_args()
12955 return -EFAULT; in check_kfunc_args()
12958 if (rec->refcount_off < 0) { in check_kfunc_args()
12960 return -EINVAL; in check_kfunc_args()
12963 meta->arg_btf = reg->btf; in check_kfunc_args()
12964 meta->arg_btf_id = reg->btf_id; in check_kfunc_args()
12967 if (reg->type != PTR_TO_MAP_VALUE) { in check_kfunc_args()
12969 return -EINVAL; in check_kfunc_args()
12976 if (reg->type != PTR_TO_MAP_VALUE) { in check_kfunc_args()
12978 return -EINVAL; in check_kfunc_args()
12985 if (reg->type != PTR_TO_STACK) { in check_kfunc_args()
12987 return -EINVAL; in check_kfunc_args()
12996 if (is_kfunc_release(meta) && !meta->release_regno) { in check_kfunc_args()
12999 return -EINVAL; in check_kfunc_args()
13018 if (!insn->imm) in fetch_kfunc_meta()
13019 return -EINVAL; in fetch_kfunc_meta()
13021 desc_btf = find_kfunc_desc_btf(env, insn->off); in fetch_kfunc_meta()
13025 func_id = insn->imm; in fetch_kfunc_meta()
13027 func_name = btf_name_by_offset(desc_btf, func->name_off); in fetch_kfunc_meta()
13030 func_proto = btf_type_by_id(desc_btf, func->type); in fetch_kfunc_meta()
13032 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); in fetch_kfunc_meta()
13034 return -EACCES; in fetch_kfunc_meta()
13038 meta->btf = desc_btf; in fetch_kfunc_meta()
13039 meta->func_id = func_id; in fetch_kfunc_meta()
13040 meta->kfunc_flags = *kfunc_flags; in fetch_kfunc_meta()
13041 meta->func_proto = func_proto; in fetch_kfunc_meta()
13042 meta->func_name = func_name; in fetch_kfunc_meta()
13065 if (!insn->imm) in check_kfunc_call()
13069 if (err == -EACCES && func_name) in check_kfunc_call()
13074 insn_aux = &env->insn_aux_data[insn_idx]; in check_kfunc_call()
13076 insn_aux->is_iter_next = is_iter_next_kfunc(&meta); in check_kfunc_call()
13080 return -EACCES; in check_kfunc_call()
13086 return -EACCES; in check_kfunc_call()
13125 if (env->cur_state->active_rcu_lock) { in check_kfunc_call()
13132 return -EACCES; in check_kfunc_call()
13137 return -EINVAL; in check_kfunc_call()
13139 bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({ in check_kfunc_call()
13140 if (reg->type & MEM_RCU) { in check_kfunc_call()
13141 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); in check_kfunc_call()
13142 reg->type |= PTR_UNTRUSTED; in check_kfunc_call()
13145 env->cur_state->active_rcu_lock = false; in check_kfunc_call()
13148 return -EACCES; in check_kfunc_call()
13151 env->cur_state->active_rcu_lock = true; in check_kfunc_call()
13154 return -EINVAL; in check_kfunc_call()
13157 if (env->cur_state->active_preempt_locks) { in check_kfunc_call()
13159 env->cur_state->active_preempt_locks++; in check_kfunc_call()
13161 env->cur_state->active_preempt_locks--; in check_kfunc_call()
13163 verbose(env, "kernel func %s is sleepable within non-preemptible region\n", func_name); in check_kfunc_call()
13164 return -EACCES; in check_kfunc_call()
13167 env->cur_state->active_preempt_locks++; in check_kfunc_call()
13170 return -EINVAL; in check_kfunc_call()
13173 if (env->cur_state->active_irq_id && sleepable) { in check_kfunc_call()
13174 verbose(env, "kernel func %s is sleepable within IRQ-disabled region\n", func_name); in check_kfunc_call()
13175 return -EACCES; in check_kfunc_call()
13194 insn_aux->insert_off = regs[BPF_REG_2].off; in check_kfunc_call()
13195 insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); in check_kfunc_call()
13198 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", in check_kfunc_call()
13215 return -ENOTSUPP; in check_kfunc_call()
13217 env->seen_exception = true; in check_kfunc_call()
13222 if (!env->exception_callback_subprog) { in check_kfunc_call()
13233 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); in check_kfunc_call()
13242 return -EINVAL; in check_kfunc_call()
13248 mark_btf_func_reg_size(env, BPF_REG_0, t->size); in check_kfunc_call()
13250 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); in check_kfunc_call()
13260 return -ENOMEM; in check_kfunc_call()
13264 return -EINVAL; in check_kfunc_call()
13267 ret_btf = env->prog->aux->btf; in check_kfunc_call()
13273 return -EINVAL; in check_kfunc_call()
13279 return -EINVAL; in check_kfunc_call()
13283 if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) { in check_kfunc_call()
13285 ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE); in check_kfunc_call()
13286 return -EINVAL; in check_kfunc_call()
13305 err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size); in check_kfunc_call()
13315 return -EINVAL; in check_kfunc_call()
13320 return -EINVAL; in check_kfunc_call()
13331 insn_aux->obj_new_size = ret_t->size; in check_kfunc_call()
13332 insn_aux->kptr_struct_meta = struct_meta; in check_kfunc_call()
13339 insn_aux->kptr_struct_meta = in check_kfunc_call()
13346 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_kfunc_call()
13351 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_kfunc_call()
13362 return -EINVAL; in check_kfunc_call()
13377 return -EFAULT; in check_kfunc_call()
13388 /* this will set env->seen_direct_write to true */ in check_kfunc_call()
13391 return -EINVAL; in check_kfunc_call()
13397 return -EFAULT; in check_kfunc_call()
13408 return -EFAULT; in check_kfunc_call()
13424 ptr_type->name_off); in check_kfunc_call()
13430 return -EINVAL; in check_kfunc_call()
13455 cur_iter = get_iter_from_state(env->cur_state, &meta); in check_kfunc_call()
13457 if (cur_iter->type & MEM_RCU) /* KF_RCU_PROTECTED */ in check_kfunc_call()
13467 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
13483 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
13488 insn_aux->kptr_struct_meta = in check_kfunc_call()
13505 mark_btf_func_reg_size(env, regno, t->size); in check_kfunc_call()
13521 bool known = tnum_is_const(reg->var_off); in check_reg_sane_offset()
13522 s64 val = reg->var_off.value; in check_reg_sane_offset()
13523 s64 smin = reg->smin_value; in check_reg_sane_offset()
13525 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { in check_reg_sane_offset()
13531 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
13533 reg_type_str(env, type), reg->off); in check_reg_sane_offset()
13543 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
13553 REASON_BOUNDS = -1,
13554 REASON_TYPE = -2,
13555 REASON_PATHS = -3,
13556 REASON_LIMIT = -4,
13557 REASON_STACK = -5,
13565 switch (ptr_reg->type) { in retrieve_ptr_limit()
13567 /* Offset 0 is out-of-bounds, but acceptable start for the in retrieve_ptr_limit()
13573 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); in retrieve_ptr_limit()
13576 max = ptr_reg->map_ptr->value_size; in retrieve_ptr_limit()
13578 ptr_reg->smin_value : in retrieve_ptr_limit()
13579 ptr_reg->umax_value) + ptr_reg->off; in retrieve_ptr_limit()
13594 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; in can_skip_alu_sanitation()
13603 if (aux->alu_state && in update_alu_sanitation_state()
13604 (aux->alu_state != alu_state || in update_alu_sanitation_state()
13605 aux->alu_limit != alu_limit)) in update_alu_sanitation_state()
13609 aux->alu_state = alu_state; in update_alu_sanitation_state()
13610 aux->alu_limit = alu_limit; in update_alu_sanitation_state()
13645 regs = branch->frame[branch->curframe]->regs; in sanitize_speculative_path()
13646 if (BPF_SRC(insn->code) == BPF_K) { in sanitize_speculative_path()
13647 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
13648 } else if (BPF_SRC(insn->code) == BPF_X) { in sanitize_speculative_path()
13649 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
13650 mark_reg_unknown(env, regs, insn->src_reg); in sanitize_speculative_path()
13664 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; in sanitize_ptr_alu()
13665 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_ptr_alu()
13666 bool off_is_imm = tnum_is_const(off_reg->var_off); in sanitize_ptr_alu()
13667 bool off_is_neg = off_reg->smin_value < 0; in sanitize_ptr_alu()
13669 u8 opcode = BPF_OP(insn->code); in sanitize_ptr_alu()
13678 /* We already marked aux for masking from non-speculative in sanitize_ptr_alu()
13682 if (vstate->speculative) in sanitize_ptr_alu()
13686 if (!tnum_is_const(off_reg->var_off) && in sanitize_ptr_alu()
13687 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) in sanitize_ptr_alu()
13690 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || in sanitize_ptr_alu()
13694 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); in sanitize_ptr_alu()
13702 alu_state = info->aux.alu_state; in sanitize_ptr_alu()
13703 alu_limit = abs(info->aux.alu_limit - alu_limit); in sanitize_ptr_alu()
13714 env->explore_alu_limits = true; in sanitize_ptr_alu()
13725 * Also, when register is a known constant, we rewrite register-based in sanitize_ptr_alu()
13726 * operation to immediate-based, and thus do not need masking (and as in sanitize_ptr_alu()
13727 * a consequence, do not need to simulate the zero-truncation either). in sanitize_ptr_alu()
13732 /* Simulate and find potential out-of-bounds access under in sanitize_ptr_alu()
13736 * to simulate dst (== 0) +/-= ptr. Needed, for example, in sanitize_ptr_alu()
13737 * for cases where we use K-based arithmetic in one direction in sanitize_ptr_alu()
13738 * and truncated reg-based in the other in order to explore in sanitize_ptr_alu()
13745 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, in sanitize_ptr_alu()
13746 env->insn_idx); in sanitize_ptr_alu()
13754 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_mark_insn_seen()
13758 * the non-speculative domain, sanitize_dead_code() can still in sanitize_mark_insn_seen()
13761 if (!vstate->speculative) in sanitize_mark_insn_seen()
13762 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; in sanitize_mark_insn_seen()
13771 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; in sanitize_err()
13772 u32 dst = insn->dst_reg, src = insn->src_reg; in sanitize_err()
13776 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", in sanitize_err()
13801 return -EACCES; in sanitize_err()
13812 * 'off' includes 'reg->off'.
13820 if (!tnum_is_const(reg->var_off)) { in check_stack_access_for_ptr_arithmetic()
13823 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access_for_ptr_arithmetic()
13826 return -EACCES; in check_stack_access_for_ptr_arithmetic()
13829 if (off >= 0 || off < -MAX_BPF_STACK) { in check_stack_access_for_ptr_arithmetic()
13832 return -EACCES; in check_stack_access_for_ptr_arithmetic()
13842 u32 dst = insn->dst_reg; in sanitize_check_bounds()
13847 if (env->bypass_spec_v1) in sanitize_check_bounds()
13850 switch (dst_reg->type) { in sanitize_check_bounds()
13853 dst_reg->off + dst_reg->var_off.value)) in sanitize_check_bounds()
13854 return -EACCES; in sanitize_check_bounds()
13857 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { in sanitize_check_bounds()
13860 return -EACCES; in sanitize_check_bounds()
13872 * If we return -EACCES, caller may want to try again treating pointer as a
13873 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
13880 struct bpf_verifier_state *vstate = env->cur_state; in adjust_ptr_min_max_vals()
13881 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_ptr_min_max_vals()
13882 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals()
13883 bool known = tnum_is_const(off_reg->var_off); in adjust_ptr_min_max_vals()
13884 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, in adjust_ptr_min_max_vals()
13885 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; in adjust_ptr_min_max_vals()
13886 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, in adjust_ptr_min_max_vals()
13887 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; in adjust_ptr_min_max_vals()
13889 u8 opcode = BPF_OP(insn->code); in adjust_ptr_min_max_vals()
13890 u32 dst = insn->dst_reg; in adjust_ptr_min_max_vals()
13904 if (BPF_CLASS(insn->code) != BPF_ALU64) { in adjust_ptr_min_max_vals()
13905 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ in adjust_ptr_min_max_vals()
13906 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_ptr_min_max_vals()
13912 "R%d 32-bit pointer arithmetic prohibited\n", in adjust_ptr_min_max_vals()
13914 return -EACCES; in adjust_ptr_min_max_vals()
13917 if (ptr_reg->type & PTR_MAYBE_NULL) { in adjust_ptr_min_max_vals()
13918 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", in adjust_ptr_min_max_vals()
13919 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
13920 return -EACCES; in adjust_ptr_min_max_vals()
13923 switch (base_type(ptr_reg->type)) { in adjust_ptr_min_max_vals()
13948 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
13949 return -EACCES; in adjust_ptr_min_max_vals()
13955 dst_reg->type = ptr_reg->type; in adjust_ptr_min_max_vals()
13956 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
13958 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || in adjust_ptr_min_max_vals()
13959 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
13960 return -EINVAL; in adjust_ptr_min_max_vals()
13962 /* pointer types do not carry 32-bit bounds at the moment. */ in adjust_ptr_min_max_vals()
13977 if (known && (ptr_reg->off + smin_val == in adjust_ptr_min_max_vals()
13978 (s64)(s32)(ptr_reg->off + smin_val))) { in adjust_ptr_min_max_vals()
13980 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
13981 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
13982 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
13983 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
13984 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
13985 dst_reg->off = ptr_reg->off + smin_val; in adjust_ptr_min_max_vals()
13986 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
13989 /* A new variable offset is created. Note that off_reg->off in adjust_ptr_min_max_vals()
13998 if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) || in adjust_ptr_min_max_vals()
13999 check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) { in adjust_ptr_min_max_vals()
14000 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
14001 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
14003 if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) || in adjust_ptr_min_max_vals()
14004 check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) { in adjust_ptr_min_max_vals()
14005 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
14006 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
14008 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
14009 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
14010 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
14012 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
14014 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); in adjust_ptr_min_max_vals()
14019 /* scalar -= pointer. Creates an unknown scalar */ in adjust_ptr_min_max_vals()
14022 return -EACCES; in adjust_ptr_min_max_vals()
14028 if (ptr_reg->type == PTR_TO_STACK) { in adjust_ptr_min_max_vals()
14031 return -EACCES; in adjust_ptr_min_max_vals()
14033 if (known && (ptr_reg->off - smin_val == in adjust_ptr_min_max_vals()
14034 (s64)(s32)(ptr_reg->off - smin_val))) { in adjust_ptr_min_max_vals()
14035 /* pointer -= K. Subtract it from fixed offset */ in adjust_ptr_min_max_vals()
14036 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
14037 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
14038 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
14039 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
14040 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
14041 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
14042 dst_reg->off = ptr_reg->off - smin_val; in adjust_ptr_min_max_vals()
14043 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
14047 * nonnegative, then any reg->range we had before is still good. in adjust_ptr_min_max_vals()
14049 if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) || in adjust_ptr_min_max_vals()
14050 check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) { in adjust_ptr_min_max_vals()
14052 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
14053 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
14057 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
14058 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
14061 dst_reg->umin_value = umin_ptr - umax_val; in adjust_ptr_min_max_vals()
14062 dst_reg->umax_value = umax_ptr - umin_val; in adjust_ptr_min_max_vals()
14064 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
14065 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
14066 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
14068 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
14071 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); in adjust_ptr_min_max_vals()
14080 return -EACCES; in adjust_ptr_min_max_vals()
14082 /* other operators (e.g. MUL,LSH) produce non-pointer results */ in adjust_ptr_min_max_vals()
14085 return -EACCES; in adjust_ptr_min_max_vals()
14088 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
14089 return -EINVAL; in adjust_ptr_min_max_vals()
14092 return -EACCES; in adjust_ptr_min_max_vals()
14106 s32 *dst_smin = &dst_reg->s32_min_value; in scalar32_min_max_add()
14107 s32 *dst_smax = &dst_reg->s32_max_value; in scalar32_min_max_add()
14108 u32 *dst_umin = &dst_reg->u32_min_value; in scalar32_min_max_add()
14109 u32 *dst_umax = &dst_reg->u32_max_value; in scalar32_min_max_add()
14111 if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) || in scalar32_min_max_add()
14112 check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) { in scalar32_min_max_add()
14116 if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) || in scalar32_min_max_add()
14117 check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) { in scalar32_min_max_add()
14126 s64 *dst_smin = &dst_reg->smin_value; in scalar_min_max_add()
14127 s64 *dst_smax = &dst_reg->smax_value; in scalar_min_max_add()
14128 u64 *dst_umin = &dst_reg->umin_value; in scalar_min_max_add()
14129 u64 *dst_umax = &dst_reg->umax_value; in scalar_min_max_add()
14131 if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) || in scalar_min_max_add()
14132 check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) { in scalar_min_max_add()
14136 if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) || in scalar_min_max_add()
14137 check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) { in scalar_min_max_add()
14146 s32 *dst_smin = &dst_reg->s32_min_value; in scalar32_min_max_sub()
14147 s32 *dst_smax = &dst_reg->s32_max_value; in scalar32_min_max_sub()
14148 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_sub()
14149 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_sub()
14151 if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) || in scalar32_min_max_sub()
14152 check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) { in scalar32_min_max_sub()
14157 if (dst_reg->u32_min_value < umax_val) { in scalar32_min_max_sub()
14159 dst_reg->u32_min_value = 0; in scalar32_min_max_sub()
14160 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_sub()
14163 dst_reg->u32_min_value -= umax_val; in scalar32_min_max_sub()
14164 dst_reg->u32_max_value -= umin_val; in scalar32_min_max_sub()
14171 s64 *dst_smin = &dst_reg->smin_value; in scalar_min_max_sub()
14172 s64 *dst_smax = &dst_reg->smax_value; in scalar_min_max_sub()
14173 u64 umin_val = src_reg->umin_value; in scalar_min_max_sub()
14174 u64 umax_val = src_reg->umax_value; in scalar_min_max_sub()
14176 if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) || in scalar_min_max_sub()
14177 check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) { in scalar_min_max_sub()
14182 if (dst_reg->umin_value < umax_val) { in scalar_min_max_sub()
14184 dst_reg->umin_value = 0; in scalar_min_max_sub()
14185 dst_reg->umax_value = U64_MAX; in scalar_min_max_sub()
14188 dst_reg->umin_value -= umax_val; in scalar_min_max_sub()
14189 dst_reg->umax_value -= umin_val; in scalar_min_max_sub()
14196 s32 *dst_smin = &dst_reg->s32_min_value; in scalar32_min_max_mul()
14197 s32 *dst_smax = &dst_reg->s32_max_value; in scalar32_min_max_mul()
14198 u32 *dst_umin = &dst_reg->u32_min_value; in scalar32_min_max_mul()
14199 u32 *dst_umax = &dst_reg->u32_max_value; in scalar32_min_max_mul()
14202 if (check_mul_overflow(*dst_umax, src_reg->u32_max_value, dst_umax) || in scalar32_min_max_mul()
14203 check_mul_overflow(*dst_umin, src_reg->u32_min_value, dst_umin)) { in scalar32_min_max_mul()
14208 if (check_mul_overflow(*dst_smin, src_reg->s32_min_value, &tmp_prod[0]) || in scalar32_min_max_mul()
14209 check_mul_overflow(*dst_smin, src_reg->s32_max_value, &tmp_prod[1]) || in scalar32_min_max_mul()
14210 check_mul_overflow(*dst_smax, src_reg->s32_min_value, &tmp_prod[2]) || in scalar32_min_max_mul()
14211 check_mul_overflow(*dst_smax, src_reg->s32_max_value, &tmp_prod[3])) { in scalar32_min_max_mul()
14224 s64 *dst_smin = &dst_reg->smin_value; in scalar_min_max_mul()
14225 s64 *dst_smax = &dst_reg->smax_value; in scalar_min_max_mul()
14226 u64 *dst_umin = &dst_reg->umin_value; in scalar_min_max_mul()
14227 u64 *dst_umax = &dst_reg->umax_value; in scalar_min_max_mul()
14230 if (check_mul_overflow(*dst_umax, src_reg->umax_value, dst_umax) || in scalar_min_max_mul()
14231 check_mul_overflow(*dst_umin, src_reg->umin_value, dst_umin)) { in scalar_min_max_mul()
14236 if (check_mul_overflow(*dst_smin, src_reg->smin_value, &tmp_prod[0]) || in scalar_min_max_mul()
14237 check_mul_overflow(*dst_smin, src_reg->smax_value, &tmp_prod[1]) || in scalar_min_max_mul()
14238 check_mul_overflow(*dst_smax, src_reg->smin_value, &tmp_prod[2]) || in scalar_min_max_mul()
14239 check_mul_overflow(*dst_smax, src_reg->smax_value, &tmp_prod[3])) { in scalar_min_max_mul()
14252 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_and()
14253 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_and()
14254 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_and()
14255 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_and()
14265 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_and()
14266 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); in scalar32_min_max_and()
14271 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_and()
14272 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_and()
14273 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_and()
14275 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_and()
14276 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_and()
14283 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_and()
14284 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_and()
14285 u64 umax_val = src_reg->umax_value; in scalar_min_max_and()
14288 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_and()
14295 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_and()
14296 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); in scalar_min_max_and()
14301 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_and()
14302 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_and()
14303 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_and()
14305 dst_reg->smin_value = S64_MIN; in scalar_min_max_and()
14306 dst_reg->smax_value = S64_MAX; in scalar_min_max_and()
14315 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_or()
14316 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_or()
14317 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_or()
14318 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_or()
14328 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); in scalar32_min_max_or()
14329 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_or()
14334 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_or()
14335 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_or()
14336 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_or()
14338 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_or()
14339 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_or()
14346 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_or()
14347 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_or()
14348 u64 umin_val = src_reg->umin_value; in scalar_min_max_or()
14351 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_or()
14358 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); in scalar_min_max_or()
14359 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_or()
14364 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_or()
14365 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_or()
14366 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_or()
14368 dst_reg->smin_value = S64_MIN; in scalar_min_max_or()
14369 dst_reg->smax_value = S64_MAX; in scalar_min_max_or()
14378 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_xor()
14379 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_xor()
14380 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_xor()
14388 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_xor()
14389 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_xor()
14394 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_xor()
14395 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_xor()
14396 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_xor()
14398 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_xor()
14399 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_xor()
14406 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_xor()
14407 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_xor()
14410 /* dst_reg->var_off.value has been updated earlier */ in scalar_min_max_xor()
14411 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_xor()
14416 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_xor()
14417 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_xor()
14422 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_xor()
14423 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_xor()
14424 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_xor()
14426 dst_reg->smin_value = S64_MIN; in scalar_min_max_xor()
14427 dst_reg->smax_value = S64_MAX; in scalar_min_max_xor()
14439 dst_reg->s32_min_value = S32_MIN; in __scalar32_min_max_lsh()
14440 dst_reg->s32_max_value = S32_MAX; in __scalar32_min_max_lsh()
14442 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { in __scalar32_min_max_lsh()
14443 dst_reg->u32_min_value = 0; in __scalar32_min_max_lsh()
14444 dst_reg->u32_max_value = U32_MAX; in __scalar32_min_max_lsh()
14446 dst_reg->u32_min_value <<= umin_val; in __scalar32_min_max_lsh()
14447 dst_reg->u32_max_value <<= umax_val; in __scalar32_min_max_lsh()
14454 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_lsh()
14455 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_lsh()
14457 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_lsh()
14460 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); in scalar32_min_max_lsh()
14479 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) in __scalar64_min_max_lsh()
14480 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; in __scalar64_min_max_lsh()
14482 dst_reg->smax_value = S64_MAX; in __scalar64_min_max_lsh()
14484 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) in __scalar64_min_max_lsh()
14485 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; in __scalar64_min_max_lsh()
14487 dst_reg->smin_value = S64_MIN; in __scalar64_min_max_lsh()
14490 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { in __scalar64_min_max_lsh()
14491 dst_reg->umin_value = 0; in __scalar64_min_max_lsh()
14492 dst_reg->umax_value = U64_MAX; in __scalar64_min_max_lsh()
14494 dst_reg->umin_value <<= umin_val; in __scalar64_min_max_lsh()
14495 dst_reg->umax_value <<= umax_val; in __scalar64_min_max_lsh()
14502 u64 umax_val = src_reg->umax_value; in scalar_min_max_lsh()
14503 u64 umin_val = src_reg->umin_value; in scalar_min_max_lsh()
14509 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); in scalar_min_max_lsh()
14517 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_rsh()
14518 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_rsh()
14519 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_rsh()
14535 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_rsh()
14536 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_rsh()
14538 dst_reg->var_off = tnum_rshift(subreg, umin_val); in scalar32_min_max_rsh()
14539 dst_reg->u32_min_value >>= umax_val; in scalar32_min_max_rsh()
14540 dst_reg->u32_max_value >>= umin_val; in scalar32_min_max_rsh()
14549 u64 umax_val = src_reg->umax_value; in scalar_min_max_rsh()
14550 u64 umin_val = src_reg->umin_value; in scalar_min_max_rsh()
14566 dst_reg->smin_value = S64_MIN; in scalar_min_max_rsh()
14567 dst_reg->smax_value = S64_MAX; in scalar_min_max_rsh()
14568 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); in scalar_min_max_rsh()
14569 dst_reg->umin_value >>= umax_val; in scalar_min_max_rsh()
14570 dst_reg->umax_value >>= umin_val; in scalar_min_max_rsh()
14583 u64 umin_val = src_reg->u32_min_value; in scalar32_min_max_arsh()
14588 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); in scalar32_min_max_arsh()
14589 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); in scalar32_min_max_arsh()
14591 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); in scalar32_min_max_arsh()
14596 dst_reg->u32_min_value = 0; in scalar32_min_max_arsh()
14597 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_arsh()
14606 u64 umin_val = src_reg->umin_value; in scalar_min_max_arsh()
14611 dst_reg->smin_value >>= umin_val; in scalar_min_max_arsh()
14612 dst_reg->smax_value >>= umin_val; in scalar_min_max_arsh()
14614 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); in scalar_min_max_arsh()
14619 dst_reg->umin_value = 0; in scalar_min_max_arsh()
14620 dst_reg->umax_value = U64_MAX; in scalar_min_max_arsh()
14623 * on bits being shifted in from upper 32-bits. Take easy way out in scalar_min_max_arsh()
14634 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; in is_safe_to_compute_dst_reg_range()
14637 if (tnum_subreg_is_const(src_reg->var_off) in is_safe_to_compute_dst_reg_range()
14638 && src_reg->s32_min_value == src_reg->s32_max_value in is_safe_to_compute_dst_reg_range()
14639 && src_reg->u32_min_value == src_reg->u32_max_value) in is_safe_to_compute_dst_reg_range()
14642 if (tnum_is_const(src_reg->var_off) in is_safe_to_compute_dst_reg_range()
14643 && src_reg->smin_value == src_reg->smax_value in is_safe_to_compute_dst_reg_range()
14644 && src_reg->umin_value == src_reg->umax_value) in is_safe_to_compute_dst_reg_range()
14648 switch (BPF_OP(insn->code)) { in is_safe_to_compute_dst_reg_range()
14664 return (src_is_const && src_reg->umax_value < insn_bitness); in is_safe_to_compute_dst_reg_range()
14670 /* WARNING: This function does calculations on 64-bit values, but the actual
14671 * execution may occur on 32-bit values. Therefore, things like bitshifts
14672 * need extra checks in the 32-bit case.
14679 u8 opcode = BPF_OP(insn->code); in adjust_scalar_min_max_vals()
14680 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_scalar_min_max_vals()
14697 * greatest amount of precision when alu operations are mixed with jmp32 in adjust_scalar_min_max_vals()
14700 * understand and calculate behavior in both 32-bit and 64-bit alu ops. in adjust_scalar_min_max_vals()
14712 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14717 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14720 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14725 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14730 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14735 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
14774 struct bpf_verifier_state *vstate = env->cur_state; in adjust_reg_min_max_vals()
14775 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_reg_min_max_vals()
14776 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals()
14778 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_reg_min_max_vals()
14779 u8 opcode = BPF_OP(insn->code); in adjust_reg_min_max_vals()
14782 dst_reg = &regs[insn->dst_reg]; in adjust_reg_min_max_vals()
14785 if (dst_reg->type == PTR_TO_ARENA) { in adjust_reg_min_max_vals()
14788 if (BPF_CLASS(insn->code) == BPF_ALU64) in adjust_reg_min_max_vals()
14790 * 32-bit operations zero upper bits automatically. in adjust_reg_min_max_vals()
14791 * 64-bit operations need to be converted to 32. in adjust_reg_min_max_vals()
14793 aux->needs_zext = true; in adjust_reg_min_max_vals()
14799 if (dst_reg->type != SCALAR_VALUE) in adjust_reg_min_max_vals()
14802 if (BPF_SRC(insn->code) == BPF_X) { in adjust_reg_min_max_vals()
14803 src_reg = &regs[insn->src_reg]; in adjust_reg_min_max_vals()
14804 if (src_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
14805 if (dst_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
14810 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_reg_min_max_vals()
14811 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
14815 insn->dst_reg, in adjust_reg_min_max_vals()
14817 return -EACCES; in adjust_reg_min_max_vals()
14823 err = mark_chain_precision(env, insn->dst_reg); in adjust_reg_min_max_vals()
14831 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
14836 } else if (dst_reg->precise) { in adjust_reg_min_max_vals()
14838 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
14847 __mark_reg_known(&off_reg, insn->imm); in adjust_reg_min_max_vals()
14856 print_verifier_state(env, vstate, vstate->curframe, true); in adjust_reg_min_max_vals()
14858 return -EINVAL; in adjust_reg_min_max_vals()
14861 print_verifier_state(env, vstate, vstate->curframe, true); in adjust_reg_min_max_vals()
14863 return -EINVAL; in adjust_reg_min_max_vals()
14874 * So for 64-bit alu remember constant delta between r2 and r1 and in adjust_reg_min_max_vals()
14877 if (env->bpf_capable && in adjust_reg_min_max_vals()
14878 BPF_OP(insn->code) == BPF_ADD && !alu32 && in adjust_reg_min_max_vals()
14879 dst_reg->id && is_reg_const(src_reg, false)) { in adjust_reg_min_max_vals()
14882 if ((dst_reg->id & BPF_ADD_CONST) || in adjust_reg_min_max_vals()
14887 * we cannot accumulate another val into rx->off. in adjust_reg_min_max_vals()
14889 dst_reg->off = 0; in adjust_reg_min_max_vals()
14890 dst_reg->id = 0; in adjust_reg_min_max_vals()
14892 dst_reg->id |= BPF_ADD_CONST; in adjust_reg_min_max_vals()
14893 dst_reg->off = val; in adjust_reg_min_max_vals()
14900 dst_reg->id = 0; in adjust_reg_min_max_vals()
14905 /* check validity of 32-bit and 64-bit arithmetic operations */
14909 u8 opcode = BPF_OP(insn->code); in check_alu_op()
14914 if (BPF_SRC(insn->code) != BPF_K || in check_alu_op()
14915 insn->src_reg != BPF_REG_0 || in check_alu_op()
14916 insn->off != 0 || insn->imm != 0) { in check_alu_op()
14918 return -EINVAL; in check_alu_op()
14921 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || in check_alu_op()
14922 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || in check_alu_op()
14923 (BPF_CLASS(insn->code) == BPF_ALU64 && in check_alu_op()
14924 BPF_SRC(insn->code) != BPF_TO_LE)) { in check_alu_op()
14926 return -EINVAL; in check_alu_op()
14931 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
14935 if (is_pointer_value(env, insn->dst_reg)) { in check_alu_op()
14937 insn->dst_reg); in check_alu_op()
14938 return -EACCES; in check_alu_op()
14942 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_alu_op()
14948 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
14949 if (BPF_CLASS(insn->code) == BPF_ALU) { in check_alu_op()
14950 if ((insn->off != 0 && insn->off != 8 && insn->off != 16) || in check_alu_op()
14951 insn->imm) { in check_alu_op()
14953 return -EINVAL; in check_alu_op()
14955 } else if (insn->off == BPF_ADDR_SPACE_CAST) { in check_alu_op()
14956 if (insn->imm != 1 && insn->imm != 1u << 16) { in check_alu_op()
14958 return -EINVAL; in check_alu_op()
14960 if (!env->prog->aux->arena) { in check_alu_op()
14962 return -EINVAL; in check_alu_op()
14965 if ((insn->off != 0 && insn->off != 8 && insn->off != 16 && in check_alu_op()
14966 insn->off != 32) || insn->imm) { in check_alu_op()
14968 return -EINVAL; in check_alu_op()
14973 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
14977 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { in check_alu_op()
14979 return -EINVAL; in check_alu_op()
14984 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
14988 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
14989 struct bpf_reg_state *src_reg = regs + insn->src_reg; in check_alu_op()
14990 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; in check_alu_op()
14992 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
14993 if (insn->imm) { in check_alu_op()
14995 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
14996 if (insn->imm == 1) { /* cast from as(1) to as(0) */ in check_alu_op()
14997 dst_reg->type = PTR_TO_ARENA; in check_alu_op()
14998 /* PTR_TO_ARENA is 32-bit */ in check_alu_op()
14999 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
15001 } else if (insn->off == 0) { in check_alu_op()
15007 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
15008 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
15011 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
15013 "R%d sign-extension part of pointer\n", in check_alu_op()
15014 insn->src_reg); in check_alu_op()
15015 return -EACCES; in check_alu_op()
15016 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
15019 no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); in check_alu_op()
15024 dst_reg->id = 0; in check_alu_op()
15025 coerce_reg_to_size_sx(dst_reg, insn->off >> 3); in check_alu_op()
15026 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
15027 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
15029 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
15034 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
15037 insn->src_reg); in check_alu_op()
15038 return -EACCES; in check_alu_op()
15039 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
15040 if (insn->off == 0) { in check_alu_op()
15051 dst_reg->id = 0; in check_alu_op()
15052 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
15053 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
15056 bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); in check_alu_op()
15062 dst_reg->id = 0; in check_alu_op()
15063 dst_reg->live |= REG_LIVE_WRITTEN; in check_alu_op()
15064 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
15065 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3); in check_alu_op()
15069 insn->dst_reg); in check_alu_op()
15079 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
15080 regs[insn->dst_reg].type = SCALAR_VALUE; in check_alu_op()
15081 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
15082 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
15083 insn->imm); in check_alu_op()
15085 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
15086 (u32)insn->imm); in check_alu_op()
15092 return -EINVAL; in check_alu_op()
15096 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
15097 if (insn->imm != 0 || insn->off > 1 || in check_alu_op()
15098 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { in check_alu_op()
15100 return -EINVAL; in check_alu_op()
15103 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
15107 if (insn->src_reg != BPF_REG_0 || insn->off > 1 || in check_alu_op()
15108 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { in check_alu_op()
15110 return -EINVAL; in check_alu_op()
15115 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
15120 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { in check_alu_op()
15122 return -EINVAL; in check_alu_op()
15126 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { in check_alu_op()
15127 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; in check_alu_op()
15129 if (insn->imm < 0 || insn->imm >= size) { in check_alu_op()
15130 verbose(env, "invalid shift %d\n", insn->imm); in check_alu_op()
15131 return -EINVAL; in check_alu_op()
15136 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
15142 return reg_bounds_sanity_check(env, &regs[insn->dst_reg], "alu"); in check_alu_op()
15154 if (dst_reg->off < 0 || in find_good_pkt_pointers()
15155 (dst_reg->off == 0 && range_right_open)) in find_good_pkt_pointers()
15159 if (dst_reg->umax_value > MAX_PACKET_OFF || in find_good_pkt_pointers()
15160 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) in find_good_pkt_pointers()
15166 new_range = dst_reg->off; in find_good_pkt_pointers()
15207 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) in find_good_pkt_pointers()
15208 * and [r3, r3 + 8-1) respectively is safe to access depending on in find_good_pkt_pointers()
15215 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. in find_good_pkt_pointers()
15218 if (reg->type == type && reg->id == dst_reg->id) in find_good_pkt_pointers()
15220 reg->range = max(reg->range, new_range); in find_good_pkt_pointers()
15230 struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off; in is_scalar_branch_taken()
15231 struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off; in is_scalar_branch_taken()
15232 u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value; in is_scalar_branch_taken()
15233 u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value; in is_scalar_branch_taken()
15234 s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value; in is_scalar_branch_taken()
15235 s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value; in is_scalar_branch_taken()
15236 u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value; in is_scalar_branch_taken()
15237 u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value; in is_scalar_branch_taken()
15238 s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value; in is_scalar_branch_taken()
15239 s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value; in is_scalar_branch_taken()
15248 /* non-overlapping ranges */ in is_scalar_branch_taken()
15254 /* if 64-bit ranges are inconclusive, see if we can in is_scalar_branch_taken()
15255 * utilize 32-bit subrange knowledge to eliminate in is_scalar_branch_taken()
15258 if (reg1->u32_min_value > reg2->u32_max_value || in is_scalar_branch_taken()
15259 reg1->u32_max_value < reg2->u32_min_value) in is_scalar_branch_taken()
15261 if (reg1->s32_min_value > reg2->s32_max_value || in is_scalar_branch_taken()
15262 reg1->s32_max_value < reg2->s32_min_value) in is_scalar_branch_taken()
15272 /* non-overlapping ranges */ in is_scalar_branch_taken()
15278 /* if 64-bit ranges are inconclusive, see if we can in is_scalar_branch_taken()
15279 * utilize 32-bit subrange knowledge to eliminate in is_scalar_branch_taken()
15282 if (reg1->u32_min_value > reg2->u32_max_value || in is_scalar_branch_taken()
15283 reg1->u32_max_value < reg2->u32_min_value) in is_scalar_branch_taken()
15285 if (reg1->s32_min_value > reg2->s32_max_value || in is_scalar_branch_taken()
15286 reg1->s32_max_value < reg2->s32_min_value) in is_scalar_branch_taken()
15296 return -1; in is_scalar_branch_taken()
15352 return -1; in is_scalar_branch_taken()
15382 if (src_reg->type == PTR_TO_PACKET_END) { in is_pkt_ptr_branch_taken()
15384 } else if (dst_reg->type == PTR_TO_PACKET_END) { in is_pkt_ptr_branch_taken()
15388 return -1; in is_pkt_ptr_branch_taken()
15391 if (pkt->range >= 0) in is_pkt_ptr_branch_taken()
15392 return -1; in is_pkt_ptr_branch_taken()
15400 if (pkt->range == BEYOND_PKT_END) in is_pkt_ptr_branch_taken()
15409 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) in is_pkt_ptr_branch_taken()
15413 return -1; in is_pkt_ptr_branch_taken()
15418 * 1 - branch will be taken and "goto target" will be executed
15419 * 0 - branch will not be taken and fall-through to next insn
15420 * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value
15439 return -1; in is_branch_taken()
15442 return -1; in is_branch_taken()
15449 return -1; in is_branch_taken()
15457 return -1; in is_branch_taken()
15513 reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); in regs_refine_cond_op()
15514 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); in regs_refine_cond_op()
15515 reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); in regs_refine_cond_op()
15516 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); in regs_refine_cond_op()
15517 reg2->u32_min_value = reg1->u32_min_value; in regs_refine_cond_op()
15518 reg2->u32_max_value = reg1->u32_max_value; in regs_refine_cond_op()
15519 reg2->s32_min_value = reg1->s32_min_value; in regs_refine_cond_op()
15520 reg2->s32_max_value = reg1->s32_max_value; in regs_refine_cond_op()
15522 t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off)); in regs_refine_cond_op()
15523 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
15524 reg2->var_off = tnum_with_subreg(reg2->var_off, t); in regs_refine_cond_op()
15526 reg1->umin_value = max(reg1->umin_value, reg2->umin_value); in regs_refine_cond_op()
15527 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); in regs_refine_cond_op()
15528 reg1->smin_value = max(reg1->smin_value, reg2->smin_value); in regs_refine_cond_op()
15529 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); in regs_refine_cond_op()
15530 reg2->umin_value = reg1->umin_value; in regs_refine_cond_op()
15531 reg2->umax_value = reg1->umax_value; in regs_refine_cond_op()
15532 reg2->smin_value = reg1->smin_value; in regs_refine_cond_op()
15533 reg2->smax_value = reg1->smax_value; in regs_refine_cond_op()
15535 reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off); in regs_refine_cond_op()
15536 reg2->var_off = reg1->var_off; in regs_refine_cond_op()
15559 if (reg1->u32_min_value == (u32)val) in regs_refine_cond_op()
15560 reg1->u32_min_value++; in regs_refine_cond_op()
15561 if (reg1->u32_max_value == (u32)val) in regs_refine_cond_op()
15562 reg1->u32_max_value--; in regs_refine_cond_op()
15563 if (reg1->s32_min_value == (s32)val) in regs_refine_cond_op()
15564 reg1->s32_min_value++; in regs_refine_cond_op()
15565 if (reg1->s32_max_value == (s32)val) in regs_refine_cond_op()
15566 reg1->s32_max_value--; in regs_refine_cond_op()
15568 if (reg1->umin_value == (u64)val) in regs_refine_cond_op()
15569 reg1->umin_value++; in regs_refine_cond_op()
15570 if (reg1->umax_value == (u64)val) in regs_refine_cond_op()
15571 reg1->umax_value--; in regs_refine_cond_op()
15572 if (reg1->smin_value == (s64)val) in regs_refine_cond_op()
15573 reg1->smin_value++; in regs_refine_cond_op()
15574 if (reg1->smax_value == (s64)val) in regs_refine_cond_op()
15575 reg1->smax_value--; in regs_refine_cond_op()
15588 * it's a single-bit value to begin with. in regs_refine_cond_op()
15597 t = tnum_or(tnum_subreg(reg1->var_off), tnum_const(val)); in regs_refine_cond_op()
15598 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
15600 reg1->var_off = tnum_or(reg1->var_off, tnum_const(val)); in regs_refine_cond_op()
15610 t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val)); in regs_refine_cond_op()
15611 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
15613 reg1->var_off = tnum_and(reg1->var_off, tnum_const(~val)); in regs_refine_cond_op()
15618 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); in regs_refine_cond_op()
15619 reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); in regs_refine_cond_op()
15621 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); in regs_refine_cond_op()
15622 reg2->umin_value = max(reg1->umin_value, reg2->umin_value); in regs_refine_cond_op()
15627 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1); in regs_refine_cond_op()
15628 reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value); in regs_refine_cond_op()
15630 reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1); in regs_refine_cond_op()
15631 reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value); in regs_refine_cond_op()
15636 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); in regs_refine_cond_op()
15637 reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); in regs_refine_cond_op()
15639 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); in regs_refine_cond_op()
15640 reg2->smin_value = max(reg1->smin_value, reg2->smin_value); in regs_refine_cond_op()
15645 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1); in regs_refine_cond_op()
15646 reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value); in regs_refine_cond_op()
15648 reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1); in regs_refine_cond_op()
15649 reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value); in regs_refine_cond_op()
15659 * check, in which case we have a fake SCALAR_VALUE representing insn->imm).
15676 if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE) in reg_set_min_max()
15700 if (type_may_be_null(reg->type) && reg->id == id && in mark_ptr_or_null_reg()
15701 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) { in mark_ptr_or_null_reg()
15703 * known-zero, because we don't allow pointer arithmetic on in mark_ptr_or_null_reg()
15709 * is fine to expect to see reg->off. in mark_ptr_or_null_reg()
15711 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) in mark_ptr_or_null_reg()
15713 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && in mark_ptr_or_null_reg()
15714 WARN_ON_ONCE(reg->off)) in mark_ptr_or_null_reg()
15718 reg->type = SCALAR_VALUE; in mark_ptr_or_null_reg()
15723 reg->id = 0; in mark_ptr_or_null_reg()
15724 reg->ref_obj_id = 0; in mark_ptr_or_null_reg()
15732 /* For not-NULL ptr, reg->ref_obj_id will be reset in mark_ptr_or_null_reg()
15735 * reg->id is still used by spin_lock ptr. Other in mark_ptr_or_null_reg()
15736 * than spin_lock ptr type, reg->id can be reset. in mark_ptr_or_null_reg()
15738 reg->id = 0; in mark_ptr_or_null_reg()
15749 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_ptr_or_null_regs()
15750 struct bpf_reg_state *regs = state->regs, *reg; in mark_ptr_or_null_regs()
15772 if (BPF_SRC(insn->code) != BPF_X) in try_match_pkt_pointers()
15775 /* Pointers are always 64-bit. */ in try_match_pkt_pointers()
15776 if (BPF_CLASS(insn->code) == BPF_JMP32) in try_match_pkt_pointers()
15779 switch (BPF_OP(insn->code)) { in try_match_pkt_pointers()
15781 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
15782 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
15783 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
15787 dst_reg->type, false); in try_match_pkt_pointers()
15788 mark_pkt_end(other_branch, insn->dst_reg, true); in try_match_pkt_pointers()
15789 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
15790 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
15792 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
15795 src_reg->type, true); in try_match_pkt_pointers()
15796 mark_pkt_end(this_branch, insn->src_reg, false); in try_match_pkt_pointers()
15802 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
15803 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
15804 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
15808 dst_reg->type, true); in try_match_pkt_pointers()
15809 mark_pkt_end(this_branch, insn->dst_reg, false); in try_match_pkt_pointers()
15810 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
15811 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
15813 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
15816 src_reg->type, false); in try_match_pkt_pointers()
15817 mark_pkt_end(other_branch, insn->src_reg, true); in try_match_pkt_pointers()
15823 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
15824 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
15825 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
15829 dst_reg->type, true); in try_match_pkt_pointers()
15830 mark_pkt_end(other_branch, insn->dst_reg, false); in try_match_pkt_pointers()
15831 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
15832 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
15834 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
15837 src_reg->type, false); in try_match_pkt_pointers()
15838 mark_pkt_end(this_branch, insn->src_reg, true); in try_match_pkt_pointers()
15844 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
15845 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
15846 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
15850 dst_reg->type, false); in try_match_pkt_pointers()
15851 mark_pkt_end(this_branch, insn->dst_reg, true); in try_match_pkt_pointers()
15852 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
15853 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
15855 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
15858 src_reg->type, true); in try_match_pkt_pointers()
15859 mark_pkt_end(other_branch, insn->src_reg, false); in try_match_pkt_pointers()
15876 if (reg->type != SCALAR_VALUE || (reg->id & ~BPF_ADD_CONST) != id) in __collect_linked_regs()
15881 e->frameno = frameno; in __collect_linked_regs()
15882 e->is_reg = is_reg; in __collect_linked_regs()
15883 e->regno = spi_or_reg; in __collect_linked_regs()
15885 reg->id = 0; in __collect_linked_regs()
15890 * in verifier state, save R in linked_regs if R->id == id.
15901 for (i = vstate->curframe; i >= 0; i--) { in collect_linked_regs()
15902 func = vstate->frame[i]; in collect_linked_regs()
15904 reg = &func->regs[j]; in collect_linked_regs()
15907 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in collect_linked_regs()
15908 if (!is_spilled_reg(&func->stack[j])) in collect_linked_regs()
15910 reg = &func->stack[j].spilled_ptr; in collect_linked_regs()
15917 * if R->id == known_reg->id.
15927 for (i = 0; i < linked_regs->cnt; ++i) { in sync_linked_regs()
15928 e = &linked_regs->entries[i]; in sync_linked_regs()
15929 reg = e->is_reg ? &vstate->frame[e->frameno]->regs[e->regno] in sync_linked_regs()
15930 : &vstate->frame[e->frameno]->stack[e->spi].spilled_ptr; in sync_linked_regs()
15931 if (reg->type != SCALAR_VALUE || reg == known_reg) in sync_linked_regs()
15933 if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) in sync_linked_regs()
15935 if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || in sync_linked_regs()
15936 reg->off == known_reg->off) { in sync_linked_regs()
15937 s32 saved_subreg_def = reg->subreg_def; in sync_linked_regs()
15940 reg->subreg_def = saved_subreg_def; in sync_linked_regs()
15942 s32 saved_subreg_def = reg->subreg_def; in sync_linked_regs()
15943 s32 saved_off = reg->off; in sync_linked_regs()
15946 __mark_reg_known(&fake_reg, (s32)reg->off - (s32)known_reg->off); in sync_linked_regs()
15954 reg->off = saved_off; in sync_linked_regs()
15955 reg->subreg_def = saved_subreg_def; in sync_linked_regs()
15959 reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); in sync_linked_regs()
15967 struct bpf_verifier_state *this_branch = env->cur_state; in check_cond_jmp_op()
15969 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; in check_cond_jmp_op()
15973 u8 opcode = BPF_OP(insn->code); in check_cond_jmp_op()
15975 int pred = -1; in check_cond_jmp_op()
15981 return -EINVAL; in check_cond_jmp_op()
15985 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; in check_cond_jmp_op()
15988 if (insn->code != (BPF_JMP | BPF_JCOND) || in check_cond_jmp_op()
15989 insn->src_reg != BPF_MAY_GOTO || in check_cond_jmp_op()
15990 insn->dst_reg || insn->imm) { in check_cond_jmp_op()
15991 verbose(env, "invalid may_goto imm %d\n", insn->imm); in check_cond_jmp_op()
15992 return -EINVAL; in check_cond_jmp_op()
15994 prev_st = find_prev_entry(env, cur_st->parent, idx); in check_cond_jmp_op()
15999 return -ENOMEM; in check_cond_jmp_op()
16001 queued_st->may_goto_depth++; in check_cond_jmp_op()
16004 *insn_idx += insn->off; in check_cond_jmp_op()
16009 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_cond_jmp_op()
16013 dst_reg = &regs[insn->dst_reg]; in check_cond_jmp_op()
16014 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
16015 if (insn->imm != 0) { in check_cond_jmp_op()
16017 return -EINVAL; in check_cond_jmp_op()
16021 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_cond_jmp_op()
16025 src_reg = &regs[insn->src_reg]; in check_cond_jmp_op()
16027 is_pointer_value(env, insn->src_reg)) { in check_cond_jmp_op()
16029 insn->src_reg); in check_cond_jmp_op()
16030 return -EACCES; in check_cond_jmp_op()
16033 if (insn->src_reg != BPF_REG_0) { in check_cond_jmp_op()
16035 return -EINVAL; in check_cond_jmp_op()
16037 src_reg = &env->fake_reg[0]; in check_cond_jmp_op()
16039 src_reg->type = SCALAR_VALUE; in check_cond_jmp_op()
16040 __mark_reg_known(src_reg, insn->imm); in check_cond_jmp_op()
16043 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in check_cond_jmp_op()
16050 err = mark_chain_precision(env, insn->dst_reg); in check_cond_jmp_op()
16051 if (BPF_SRC(insn->code) == BPF_X && !err && in check_cond_jmp_op()
16053 err = mark_chain_precision(env, insn->src_reg); in check_cond_jmp_op()
16059 /* Only follow the goto, ignore fall-through. If needed, push in check_cond_jmp_op()
16060 * the fall-through branch for simulation under speculative in check_cond_jmp_op()
16063 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
16066 return -EFAULT; in check_cond_jmp_op()
16067 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
16068 print_insn_state(env, this_branch, this_branch->curframe); in check_cond_jmp_op()
16069 *insn_idx += insn->off; in check_cond_jmp_op()
16072 /* Only follow the fall-through branch, since that's where the in check_cond_jmp_op()
16076 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
16078 *insn_idx + insn->off + 1, in check_cond_jmp_op()
16080 return -EFAULT; in check_cond_jmp_op()
16081 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
16082 print_insn_state(env, this_branch, this_branch->curframe); in check_cond_jmp_op()
16091 if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id) in check_cond_jmp_op()
16092 collect_linked_regs(this_branch, src_reg->id, &linked_regs); in check_cond_jmp_op()
16093 if (dst_reg->type == SCALAR_VALUE && dst_reg->id) in check_cond_jmp_op()
16094 collect_linked_regs(this_branch, dst_reg->id, &linked_regs); in check_cond_jmp_op()
16101 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
16104 return -EFAULT; in check_cond_jmp_op()
16105 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; in check_cond_jmp_op()
16107 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
16109 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
16110 &other_branch_regs[insn->src_reg], in check_cond_jmp_op()
16112 } else /* BPF_SRC(insn->code) == BPF_K */ { in check_cond_jmp_op()
16117 memcpy(&env->fake_reg[1], &env->fake_reg[0], in check_cond_jmp_op()
16118 sizeof(env->fake_reg[0])); in check_cond_jmp_op()
16120 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
16121 &env->fake_reg[0], in check_cond_jmp_op()
16122 dst_reg, &env->fake_reg[1], in check_cond_jmp_op()
16128 if (BPF_SRC(insn->code) == BPF_X && in check_cond_jmp_op()
16129 src_reg->type == SCALAR_VALUE && src_reg->id && in check_cond_jmp_op()
16130 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { in check_cond_jmp_op()
16132 sync_linked_regs(other_branch, &other_branch_regs[insn->src_reg], &linked_regs); in check_cond_jmp_op()
16134 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && in check_cond_jmp_op()
16135 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { in check_cond_jmp_op()
16137 sync_linked_regs(other_branch, &other_branch_regs[insn->dst_reg], &linked_regs); in check_cond_jmp_op()
16142 * E.g. register A - maybe null in check_cond_jmp_op()
16143 * register B - not null in check_cond_jmp_op()
16144 * for JNE A, B, ... - A is not null in the false branch; in check_cond_jmp_op()
16145 * for JEQ A, B, ... - A is not null in the true branch. in check_cond_jmp_op()
16152 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X && in check_cond_jmp_op()
16154 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) && in check_cond_jmp_op()
16155 base_type(src_reg->type) != PTR_TO_BTF_ID && in check_cond_jmp_op()
16156 base_type(dst_reg->type) != PTR_TO_BTF_ID) { in check_cond_jmp_op()
16170 if (type_may_be_null(src_reg->type)) in check_cond_jmp_op()
16171 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]); in check_cond_jmp_op()
16173 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]); in check_cond_jmp_op()
16181 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && in check_cond_jmp_op()
16182 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && in check_cond_jmp_op()
16183 type_may_be_null(dst_reg->type)) { in check_cond_jmp_op()
16187 mark_ptr_or_null_regs(this_branch, insn->dst_reg, in check_cond_jmp_op()
16189 mark_ptr_or_null_regs(other_branch, insn->dst_reg, in check_cond_jmp_op()
16191 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], in check_cond_jmp_op()
16193 is_pointer_value(env, insn->dst_reg)) { in check_cond_jmp_op()
16195 insn->dst_reg); in check_cond_jmp_op()
16196 return -EACCES; in check_cond_jmp_op()
16198 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
16199 print_insn_state(env, this_branch, this_branch->curframe); in check_cond_jmp_op()
16212 if (BPF_SIZE(insn->code) != BPF_DW) { in check_ld_imm()
16214 return -EINVAL; in check_ld_imm()
16216 if (insn->off != 0) { in check_ld_imm()
16218 return -EINVAL; in check_ld_imm()
16221 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_ld_imm()
16225 dst_reg = &regs[insn->dst_reg]; in check_ld_imm()
16226 if (insn->src_reg == 0) { in check_ld_imm()
16227 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; in check_ld_imm()
16229 dst_reg->type = SCALAR_VALUE; in check_ld_imm()
16230 __mark_reg_known(&regs[insn->dst_reg], imm); in check_ld_imm()
16235 * we either succeed and assign a corresponding dst_reg->type after in check_ld_imm()
16238 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
16240 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { in check_ld_imm()
16241 dst_reg->type = aux->btf_var.reg_type; in check_ld_imm()
16242 switch (base_type(dst_reg->type)) { in check_ld_imm()
16244 dst_reg->mem_size = aux->btf_var.mem_size; in check_ld_imm()
16247 dst_reg->btf = aux->btf_var.btf; in check_ld_imm()
16248 dst_reg->btf_id = aux->btf_var.btf_id; in check_ld_imm()
16252 return -EFAULT; in check_ld_imm()
16257 if (insn->src_reg == BPF_PSEUDO_FUNC) { in check_ld_imm()
16258 struct bpf_prog_aux *aux = env->prog->aux; in check_ld_imm()
16260 env->insn_idx + insn->imm + 1); in check_ld_imm()
16262 if (!aux->func_info) { in check_ld_imm()
16264 return -EINVAL; in check_ld_imm()
16266 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { in check_ld_imm()
16268 return -EINVAL; in check_ld_imm()
16271 dst_reg->type = PTR_TO_FUNC; in check_ld_imm()
16272 dst_reg->subprogno = subprogno; in check_ld_imm()
16276 map = env->used_maps[aux->map_index]; in check_ld_imm()
16277 dst_reg->map_ptr = map; in check_ld_imm()
16279 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || in check_ld_imm()
16280 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { in check_ld_imm()
16281 if (map->map_type == BPF_MAP_TYPE_ARENA) { in check_ld_imm()
16285 dst_reg->type = PTR_TO_MAP_VALUE; in check_ld_imm()
16286 dst_reg->off = aux->map_off; in check_ld_imm()
16287 WARN_ON_ONCE(map->max_entries != 1); in check_ld_imm()
16288 /* We want reg->id to be same (0) as map_value is not distinct */ in check_ld_imm()
16289 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || in check_ld_imm()
16290 insn->src_reg == BPF_PSEUDO_MAP_IDX) { in check_ld_imm()
16291 dst_reg->type = CONST_PTR_TO_MAP; in check_ld_imm()
16294 return -EINVAL; in check_ld_imm()
16313 * - they can only appear in the programs where ctx == skb
16314 * - since they are wrappers of function calls, they scratch R1-R5 registers,
16315 * preserve R6-R9, and store return value into R0
16322 * IMM == 32-bit immediate
16325 * R0 - 8/16/32-bit skb data converted to cpu endianness
16331 u8 mode = BPF_MODE(insn->code); in check_ld_abs()
16334 if (!may_access_skb(resolve_prog_type(env->prog))) { in check_ld_abs()
16336 return -EINVAL; in check_ld_abs()
16339 if (!env->ops->gen_ld_abs) { in check_ld_abs()
16341 return -EINVAL; in check_ld_abs()
16344 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || in check_ld_abs()
16345 BPF_SIZE(insn->code) == BPF_DW || in check_ld_abs()
16346 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { in check_ld_abs()
16348 return -EINVAL; in check_ld_abs()
16367 return -EINVAL; in check_ld_abs()
16372 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_ld_abs()
16392 /* ld_abs load up to 32-bit skb data. */ in check_ld_abs()
16393 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
16401 const struct bpf_prog *prog = env->prog; in check_return_code()
16404 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_return_code()
16406 struct bpf_func_state *frame = env->cur_state->frame[0]; in check_return_code()
16407 const bool is_subprog = frame->subprogno; in check_return_code()
16410 /* LSM and struct_ops func-ptr's return type could be "void" */ in check_return_code()
16411 if (!is_subprog || frame->in_exception_callback_fn) { in check_return_code()
16414 if (prog->expected_attach_type == BPF_LSM_CGROUP) in check_return_code()
16415 /* See below, can be 0 or 0-1 depending on hook. */ in check_return_code()
16419 if (!prog->aux->attach_func_proto->type) in check_return_code()
16439 return -EACCES; in check_return_code()
16444 if (frame->in_async_callback_fn) { in check_return_code()
16451 if (is_subprog && !frame->in_exception_callback_fn) { in check_return_code()
16452 if (reg->type != SCALAR_VALUE) { in check_return_code()
16454 regno, reg_type_str(env, reg->type)); in check_return_code()
16455 return -EINVAL; in check_return_code()
16462 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || in check_return_code()
16463 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || in check_return_code()
16464 env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG || in check_return_code()
16465 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || in check_return_code()
16466 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || in check_return_code()
16467 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME || in check_return_code()
16468 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || in check_return_code()
16469 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME || in check_return_code()
16470 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME) in check_return_code()
16472 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || in check_return_code()
16473 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) in check_return_code()
16477 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { in check_return_code()
16489 if (!env->prog->aux->attach_btf_id) in check_return_code()
16494 switch (env->prog->expected_attach_type) { in check_return_code()
16505 return -ENOTSUPP; in check_return_code()
16509 switch (env->prog->expected_attach_type) { in check_return_code()
16523 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { in check_return_code()
16525 if (!get_func_retval_range(env->prog, &range)) in check_return_code()
16531 } else if (!env->prog->aux->attach_func_proto->type) { in check_return_code()
16544 * depends on the to-be-replaced kernel func or bpf program. in check_return_code()
16551 if (reg->type != SCALAR_VALUE) { in check_return_code()
16553 exit_ctx, regno, reg_type_str(env, reg->type)); in check_return_code()
16554 return -EINVAL; in check_return_code()
16564 prog->expected_attach_type == BPF_LSM_CGROUP && in check_return_code()
16566 !prog->aux->attach_func_proto->type) in check_return_code()
16568 return -EINVAL; in check_return_code()
16572 tnum_in(enforce_attach_type_range, reg->var_off)) in check_return_code()
16573 env->prog->enforce_expected_attach_type = 1; in check_return_code()
16582 subprog->changes_pkt_data = true; in mark_subprog_changes_pkt_data()
16585 /* 't' is an index of a call-site.
16587 * Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED.
16597 caller->changes_pkt_data |= callee->changes_pkt_data; in merge_callee_effects()
16600 /* non-recursive DFS pseudo code
16601 * 1 procedure DFS-iterative(G,v):
16606 * 6 t <- S.peek()
16612 * 12 w <- G.adjacentVertex(t,e)
16614 * 14 label e as tree-edge
16619 * 19 label e as back-edge
16622 * 22 label e as forward- or cross-edge
16627 * 0x10 - discovered
16628 * 0x11 - discovered and fall-through edge labelled
16629 * 0x12 - discovered and fall-through and branch edges labelled
16630 * 0x20 - explored
16642 env->insn_aux_data[idx].prune_point = true; in mark_prune_point()
16647 return env->insn_aux_data[insn_idx].prune_point; in is_prune_point()
16652 env->insn_aux_data[idx].force_checkpoint = true; in mark_force_checkpoint()
16657 return env->insn_aux_data[insn_idx].force_checkpoint; in is_force_checkpoint()
16662 env->insn_aux_data[idx].calls_callback = true; in mark_calls_callback()
16667 return env->insn_aux_data[insn_idx].calls_callback; in calls_callback()
16675 /* t, w, e - match pseudo-code above:
16676 * t - index of current instruction
16677 * w - next instruction
16678 * e - edge
16682 int *insn_stack = env->cfg.insn_stack; in push_insn()
16683 int *insn_state = env->cfg.insn_state; in push_insn()
16691 if (w < 0 || w >= env->prog->len) { in push_insn()
16694 return -EINVAL; in push_insn()
16704 /* tree-edge */ in push_insn()
16707 if (env->cfg.cur_stack >= env->prog->len) in push_insn()
16708 return -E2BIG; in push_insn()
16709 insn_stack[env->cfg.cur_stack++] = w; in push_insn()
16712 if (env->bpf_capable) in push_insn()
16716 verbose(env, "back-edge from insn %d to %d\n", t, w); in push_insn()
16717 return -EINVAL; in push_insn()
16719 /* forward- or cross-edge */ in push_insn()
16723 return -EFAULT; in push_insn()
16741 /* when we exit from subprog, we need to record non-linear history */ in visit_func_call_insn()
16754 #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
16759 * - includes R0 if function is non-void;
16760 * - includes R1-R5 if corresponding parameter has is described
16769 if (fn->ret_type != RET_VOID) in helper_fastcall_clobber_mask()
16771 for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i) in helper_fastcall_clobber_mask()
16772 if (fn->arg_type[i] != ARG_DONTCARE) in helper_fastcall_clobber_mask()
16786 return env->prog->jit_requested && bpf_jit_supports_percpu_insn(); in verifier_inlines_helper_call()
16798 vlen = btf_type_vlen(meta->func_proto); in kfunc_fastcall_clobber_mask()
16800 if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type))) in kfunc_fastcall_clobber_mask()
16810 return meta->kfunc_flags & KF_FASTCALL; in is_fastcall_kfunc_call()
16817 * - R0 is scratched only if function is non-void;
16818 * - R1-R5 are scratched only if corresponding parameter type is defined
16825 * - for bpf_fastcall calls clang allocates registers as-if relevant r0-r5
16828 * - as a post-processing step, clang visits each bpf_fastcall call and adds
16829 * spill/fill for every live r0-r5;
16831 * - stack offsets used for the spill/fill are allocated as lowest
16835 * - when kernel loads a program, it looks for such patterns
16839 * - if so, and if verifier or current JIT inlines the call to the
16843 * - when old kernel loads a program, presence of spill/fill pairs
16850 * *(u64 *)(r10 - 8) = r1; r1 = 1;
16851 * *(u64 *)(r10 - 16) = r2; r2 = 2;
16852 * call %[to_be_inlined] --> call %[to_be_inlined]
16853 * r2 = *(u64 *)(r10 - 16); r0 = r1;
16854 * r1 = *(u64 *)(r10 - 8); r0 += r2;
16860 * - look for such patterns;
16861 * - mark spill and fill instructions in env->insn_aux_data[*].fastcall_pattern;
16862 * - mark set env->insn_aux_data[*].fastcall_spills_num for call instruction;
16863 * - update env->subprog_info[*]->fastcall_stack_off to find an offset
16865 * - update env->subprog_info[*]->keep_fastcall_stack.
16880 * *(u64 *)(r10 - 8) = r1; r1 = 1;
16881 * call %[to_be_inlined] --> call %[to_be_inlined]
16882 * r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!!
16883 * r0 = *(u64 *)(r10 - 8); r0 += r1;
16891 struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx; in mark_fastcall_pattern_for_call()
16892 struct bpf_insn *call = &env->prog->insnsi[insn_idx]; in mark_fastcall_pattern_for_call()
16901 if (get_helper_proto(env, call->imm, &fn) < 0) in mark_fastcall_pattern_for_call()
16905 can_be_inlined = fn->allow_fastcall && in mark_fastcall_pattern_for_call()
16906 (verifier_inlines_helper_call(env, call->imm) || in mark_fastcall_pattern_for_call()
16907 bpf_jit_inlines_helper_call(call->imm)); in mark_fastcall_pattern_for_call()
16931 * *(u64 *)(r10 - Y) = rX (where Y % 8 == 0) in mark_fastcall_pattern_for_call()
16935 * rX = *(u64 *)(r10 - Y) in mark_fastcall_pattern_for_call()
16938 if (insn_idx - i < 0 || insn_idx + i >= env->prog->len) in mark_fastcall_pattern_for_call()
16940 stx = &insns[insn_idx - i]; in mark_fastcall_pattern_for_call()
16943 if (stx->code != (BPF_STX | BPF_MEM | BPF_DW) || in mark_fastcall_pattern_for_call()
16944 ldx->code != (BPF_LDX | BPF_MEM | BPF_DW) || in mark_fastcall_pattern_for_call()
16945 stx->dst_reg != BPF_REG_10 || in mark_fastcall_pattern_for_call()
16946 ldx->src_reg != BPF_REG_10) in mark_fastcall_pattern_for_call()
16949 if (stx->src_reg != ldx->dst_reg) in mark_fastcall_pattern_for_call()
16952 if ((BIT(stx->src_reg) & expected_regs_mask) == 0) in mark_fastcall_pattern_for_call()
16956 * is always 8-byte aligned. in mark_fastcall_pattern_for_call()
16958 if (stx->off != off || ldx->off != off) in mark_fastcall_pattern_for_call()
16960 expected_regs_mask &= ~BIT(stx->src_reg); in mark_fastcall_pattern_for_call()
16961 env->insn_aux_data[insn_idx - i].fastcall_pattern = 1; in mark_fastcall_pattern_for_call()
16962 env->insn_aux_data[insn_idx + i].fastcall_pattern = 1; in mark_fastcall_pattern_for_call()
16971 * 1: *(u64 *)(r10 - 8) = r1 in mark_fastcall_pattern_for_call()
16973 * 3: r1 = *(u64 *)(r10 - 8) in mark_fastcall_pattern_for_call()
16974 * 4: *(u64 *)(r10 - 8) = r1 in mark_fastcall_pattern_for_call()
16976 * 6: r1 = *(u64 *)(r10 - 8) in mark_fastcall_pattern_for_call()
16984 env->insn_aux_data[insn_idx].fastcall_spills_num = i - 1; in mark_fastcall_pattern_for_call()
16986 subprog->keep_fastcall_stack = 1; in mark_fastcall_pattern_for_call()
16987 subprog->fastcall_stack_off = min(subprog->fastcall_stack_off, off); in mark_fastcall_pattern_for_call()
16992 struct bpf_subprog_info *subprog = env->subprog_info; in mark_fastcall_patterns()
16997 for (s = 0; s < env->subprog_cnt; ++s, ++subprog) { in mark_fastcall_patterns()
17000 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in mark_fastcall_patterns()
17001 insn = env->prog->insnsi + i; in mark_fastcall_patterns()
17002 if (insn->code != (BPF_STX | BPF_MEM | BPF_DW) || in mark_fastcall_patterns()
17003 insn->dst_reg != BPF_REG_10) in mark_fastcall_patterns()
17005 lowest_off = min(lowest_off, insn->off); in mark_fastcall_patterns()
17008 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in mark_fastcall_patterns()
17009 insn = env->prog->insnsi + i; in mark_fastcall_patterns()
17010 if (insn->code != (BPF_JMP | BPF_CALL)) in mark_fastcall_patterns()
17019 * < 0 - an error occurred
17020 * DONE_EXPLORING - the instruction was fully explored
17021 * KEEP_EXPLORING - there is still work to be done before it is fully explored
17025 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; in visit_insn()
17031 /* All non-branch instructions have a single fall-through edge. */ in visit_insn()
17032 if (BPF_CLASS(insn->code) != BPF_JMP && in visit_insn()
17033 BPF_CLASS(insn->code) != BPF_JMP32) { in visit_insn()
17038 switch (BPF_OP(insn->code)) { in visit_insn()
17065 if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm)) in visit_insn()
17067 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in visit_insn()
17074 * is crucial for fast convergence of open-coded iterator loop in visit_insn()
17080 * It is expected that with correct open-coded iterators in visit_insn()
17087 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); in visit_insn()
17090 if (BPF_SRC(insn->code) != BPF_K) in visit_insn()
17091 return -EINVAL; in visit_insn()
17093 if (BPF_CLASS(insn->code) == BPF_JMP) in visit_insn()
17094 off = insn->off; in visit_insn()
17096 off = insn->imm; in visit_insn()
17118 return push_insn(t, t + insn->off + 1, BRANCH, env); in visit_insn()
17122 /* non-recursive depth-first-search to detect loops in BPF program
17123 * loop == back-edge in directed graph
17127 int insn_cnt = env->prog->len; in check_cfg()
17132 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
17134 return -ENOMEM; in check_cfg()
17136 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); in check_cfg()
17139 return -ENOMEM; in check_cfg()
17144 env->cfg.cur_stack = 1; in check_cfg()
17147 while (env->cfg.cur_stack > 0) { in check_cfg()
17148 int t = insn_stack[env->cfg.cur_stack - 1]; in check_cfg()
17154 env->cfg.cur_stack--; in check_cfg()
17161 ret = -EFAULT; in check_cfg()
17167 if (env->cfg.cur_stack < 0) { in check_cfg()
17169 ret = -EFAULT; in check_cfg()
17173 if (env->exception_callback_subprog && !ex_done) { in check_cfg()
17174 ex_insn_beg = env->subprog_info[env->exception_callback_subprog].start; in check_cfg()
17178 env->cfg.cur_stack = 1; in check_cfg()
17184 struct bpf_insn *insn = &env->prog->insnsi[i]; in check_cfg()
17188 ret = -EINVAL; in check_cfg()
17194 ret = -EINVAL; in check_cfg()
17201 env->prog->aux->changes_pkt_data = env->subprog_info[0].changes_pkt_data; in check_cfg()
17206 env->cfg.insn_state = env->cfg.insn_stack = NULL; in check_cfg()
17214 for (i = 1; i < env->subprog_cnt; i++) { in check_abnormal_return()
17215 if (env->subprog_info[i].has_ld_abs) { in check_abnormal_return()
17217 return -EINVAL; in check_abnormal_return()
17219 if (env->subprog_info[i].has_tail_call) { in check_abnormal_return()
17221 return -EINVAL; in check_abnormal_return()
17243 int ret = -ENOMEM; in check_btf_func_early()
17245 nfuncs = attr->func_info_cnt; in check_btf_func_early()
17248 return -EINVAL; in check_btf_func_early()
17252 urec_size = attr->func_info_rec_size; in check_btf_func_early()
17257 return -EINVAL; in check_btf_func_early()
17260 prog = env->prog; in check_btf_func_early()
17261 btf = prog->aux->btf; in check_btf_func_early()
17263 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); in check_btf_func_early()
17268 return -ENOMEM; in check_btf_func_early()
17273 if (ret == -E2BIG) { in check_btf_func_early()
17281 ret = -EFAULT; in check_btf_func_early()
17287 ret = -EFAULT; in check_btf_func_early()
17292 ret = -EINVAL; in check_btf_func_early()
17315 func_proto = btf_type_by_id(btf, type->type); in check_btf_func_early()
17324 prog->aux->func_info = krecord; in check_btf_func_early()
17325 prog->aux->func_info_cnt = nfuncs; in check_btf_func_early()
17345 int ret = -ENOMEM; in check_btf_func()
17347 nfuncs = attr->func_info_cnt; in check_btf_func()
17350 return -EINVAL; in check_btf_func()
17353 if (nfuncs != env->subprog_cnt) { in check_btf_func()
17355 return -EINVAL; in check_btf_func()
17358 urec_size = attr->func_info_rec_size; in check_btf_func()
17360 prog = env->prog; in check_btf_func()
17361 btf = prog->aux->btf; in check_btf_func()
17363 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); in check_btf_func()
17365 krecord = prog->aux->func_info; in check_btf_func()
17368 return -ENOMEM; in check_btf_func()
17372 ret = -EINVAL; in check_btf_func()
17374 if (env->subprog_info[i].start != krecord[i].insn_off) { in check_btf_func()
17381 info_aux[i].linkage = BTF_INFO_VLEN(type->info); in check_btf_func()
17383 func_proto = btf_type_by_id(btf, type->type); in check_btf_func()
17385 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); in check_btf_func()
17388 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { in check_btf_func()
17392 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { in check_btf_func()
17400 prog->aux->func_info_aux = info_aux; in check_btf_func()
17410 struct bpf_prog_aux *aux = env->prog->aux; in adjust_btf_func()
17413 if (!aux->func_info) in adjust_btf_func()
17417 for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++) in adjust_btf_func()
17418 aux->func_info[i].insn_off = env->subprog_info[i].start; in adjust_btf_func()
17436 nr_linfo = attr->line_info_cnt; in check_btf_line()
17440 return -EINVAL; in check_btf_line()
17442 rec_size = attr->line_info_rec_size; in check_btf_line()
17445 rec_size & (sizeof(u32) - 1)) in check_btf_line()
17446 return -EINVAL; in check_btf_line()
17454 return -ENOMEM; in check_btf_line()
17456 prog = env->prog; in check_btf_line()
17457 btf = prog->aux->btf; in check_btf_line()
17460 sub = env->subprog_info; in check_btf_line()
17461 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); in check_btf_line()
17467 if (err == -E2BIG) { in check_btf_line()
17472 err = -EFAULT; in check_btf_line()
17478 err = -EFAULT; in check_btf_line()
17485 * 2) bounded by prog->len in check_btf_line()
17494 linfo[i].insn_off >= prog->len) { in check_btf_line()
17495 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", in check_btf_line()
17497 prog->len); in check_btf_line()
17498 err = -EINVAL; in check_btf_line()
17502 if (!prog->insnsi[linfo[i].insn_off].code) { in check_btf_line()
17506 err = -EINVAL; in check_btf_line()
17513 err = -EINVAL; in check_btf_line()
17517 if (s != env->subprog_cnt) { in check_btf_line()
17523 err = -EINVAL; in check_btf_line()
17532 if (s != env->subprog_cnt) { in check_btf_line()
17534 env->subprog_cnt - s, s); in check_btf_line()
17535 err = -EINVAL; in check_btf_line()
17539 prog->aux->linfo = linfo; in check_btf_line()
17540 prog->aux->nr_linfo = nr_linfo; in check_btf_line()
17558 struct bpf_prog *prog = env->prog; in check_core_relo()
17559 const struct btf *btf = prog->aux->btf; in check_core_relo()
17561 .log = &env->log, in check_core_relo()
17567 nr_core_relo = attr->core_relo_cnt; in check_core_relo()
17571 return -EINVAL; in check_core_relo()
17573 rec_size = attr->core_relo_rec_size; in check_core_relo()
17577 return -EINVAL; in check_core_relo()
17579 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); in check_core_relo()
17583 /* Unlike func_info and line_info, copy and apply each CO-RE in check_core_relo()
17590 if (err == -E2BIG) { in check_core_relo()
17595 err = -EFAULT; in check_core_relo()
17601 err = -EFAULT; in check_core_relo()
17605 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { in check_core_relo()
17606 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", in check_core_relo()
17607 i, core_relo.insn_off, prog->len); in check_core_relo()
17608 err = -EINVAL; in check_core_relo()
17613 &prog->insnsi[core_relo.insn_off / 8]); in check_core_relo()
17628 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info_early()
17630 return -EINVAL; in check_btf_info_early()
17634 btf = btf_get_by_fd(attr->prog_btf_fd); in check_btf_info_early()
17639 return -EACCES; in check_btf_info_early()
17641 env->prog->aux->btf = btf; in check_btf_info_early()
17655 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info()
17657 return -EINVAL; in check_btf_info()
17680 return old->umin_value <= cur->umin_value && in range_within()
17681 old->umax_value >= cur->umax_value && in range_within()
17682 old->smin_value <= cur->smin_value && in range_within()
17683 old->smax_value >= cur->smax_value && in range_within()
17684 old->u32_min_value <= cur->u32_min_value && in range_within()
17685 old->u32_max_value >= cur->u32_max_value && in range_within()
17686 old->s32_min_value <= cur->s32_min_value && in range_within()
17687 old->s32_max_value >= cur->s32_max_value; in range_within()
17702 struct bpf_id_pair *map = idmap->map; in check_ids()
17735 old_id = old_id ? old_id : ++idmap->tmp_id_gen; in check_scalar_ids()
17736 cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen; in check_scalar_ids()
17748 live = st->regs[i].live; in clean_func_state()
17750 st->regs[i].live |= REG_LIVE_DONE; in clean_func_state()
17755 __mark_reg_not_init(env, &st->regs[i]); in clean_func_state()
17758 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
17759 live = st->stack[i].spilled_ptr.live; in clean_func_state()
17761 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; in clean_func_state()
17763 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); in clean_func_state()
17765 st->stack[i].slot_type[j] = STACK_INVALID; in clean_func_state()
17775 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) in clean_verifier_state()
17779 for (i = 0; i <= st->curframe; i++) in clean_verifier_state()
17780 clean_func_state(env, st->frame[i]); in clean_verifier_state()
17822 if (sl->state.branches) in clean_live_states()
17824 if (sl->state.insn_idx != insn || in clean_live_states()
17825 !same_callsites(&sl->state, cur)) in clean_live_states()
17827 clean_verifier_state(env, &sl->state); in clean_live_states()
17829 sl = sl->next; in clean_live_states()
17838 check_ids(rold->id, rcur->id, idmap) && in regs_exact()
17839 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); in regs_exact()
17856 if (!(rold->live & REG_LIVE_READ) && exact == NOT_EXACT) in regsafe()
17859 if (rold->type == NOT_INIT) { in regsafe()
17860 if (exact == NOT_EXACT || rcur->type == NOT_INIT) in regsafe()
17882 * a non-MAYBE_NULL variant. in regsafe()
17884 * non-MAYBE_NULL registers as well. in regsafe()
17886 if (rold->type != rcur->type) in regsafe()
17889 switch (base_type(rold->type)) { in regsafe()
17891 if (env->explore_alu_limits) { in regsafe()
17896 check_scalar_ids(rold->id, rcur->id, idmap); in regsafe()
17898 if (!rold->precise && exact == NOT_EXACT) in regsafe()
17900 if ((rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST)) in regsafe()
17902 if ((rold->id & BPF_ADD_CONST) && (rold->off != rcur->off)) in regsafe()
17914 * First verification path is [1-6]: in regsafe()
17915 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; in regsafe()
17916 * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark in regsafe()
17918 * Next verification path is [1-4, 6]. in regsafe()
17921 * I. r6{.id=b}, r7{.id=b} via path 1-6; in regsafe()
17922 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. in regsafe()
17925 * --- in regsafe()
17929 tnum_in(rold->var_off, rcur->var_off) && in regsafe()
17930 check_scalar_ids(rold->id, rcur->id, idmap); in regsafe()
17941 tnum_in(rold->var_off, rcur->var_off) && in regsafe()
17942 check_ids(rold->id, rcur->id, idmap) && in regsafe()
17943 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); in regsafe()
17949 * since someone could have accessed through (ptr - k), or in regsafe()
17950 * even done ptr -= k in a register, to get a safe access. in regsafe()
17952 if (rold->range > rcur->range) in regsafe()
17957 if (rold->off != rcur->off) in regsafe()
17960 if (!check_ids(rold->id, rcur->id, idmap)) in regsafe()
17964 tnum_in(rold->var_off, rcur->var_off); in regsafe()
17967 * the same stack frame, since fp-8 in foo != fp-8 in bar in regsafe()
17969 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno; in regsafe()
17992 for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i) { in is_stack_all_misc()
17993 if ((stack->slot_type[i] == STACK_MISC) || in is_stack_all_misc()
17994 (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack)) in is_stack_all_misc()
18006 return &stack->spilled_ptr; in scalar_reg_for_stack()
18024 for (i = 0; i < old->allocated_stack; i++) { in stacksafe()
18030 (i >= cur->allocated_stack || in stacksafe()
18031 old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
18032 cur->stack[spi].slot_type[i % BPF_REG_SIZE])) in stacksafe()
18035 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) in stacksafe()
18037 i += BPF_REG_SIZE - 1; in stacksafe()
18042 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
18045 if (env->allow_uninit_stack && in stacksafe()
18046 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) in stacksafe()
18052 if (i >= cur->allocated_stack) in stacksafe()
18055 /* 64-bit scalar spill vs all slots MISC and vice versa. in stacksafe()
18060 old_reg = scalar_reg_for_stack(env, &old->stack[spi]); in stacksafe()
18061 cur_reg = scalar_reg_for_stack(env, &cur->stack[spi]); in stacksafe()
18065 i += BPF_REG_SIZE - 1; in stacksafe()
18070 * it will be safe with zero-initialized stack. in stacksafe()
18073 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
18074 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
18076 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
18077 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
18079 * this stack slot, but current has STACK_MISC -> in stacksafe()
18084 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
18087 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { in stacksafe()
18093 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} in stacksafe()
18095 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} in stacksafe()
18099 if (!regsafe(env, &old->stack[spi].spilled_ptr, in stacksafe()
18100 &cur->stack[spi].spilled_ptr, idmap, exact)) in stacksafe()
18104 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
18105 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
18106 if (old_reg->dynptr.type != cur_reg->dynptr.type || in stacksafe()
18107 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || in stacksafe()
18108 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
18112 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
18113 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
18120 if (old_reg->iter.btf != cur_reg->iter.btf || in stacksafe()
18121 old_reg->iter.btf_id != cur_reg->iter.btf_id || in stacksafe()
18122 old_reg->iter.state != cur_reg->iter.state || in stacksafe()
18123 /* ignore {old_reg,cur_reg}->iter.depth, see above */ in stacksafe()
18124 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
18128 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
18129 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
18130 if (!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
18150 if (old->acquired_refs != cur->acquired_refs) in refsafe()
18153 if (old->active_locks != cur->active_locks) in refsafe()
18156 if (old->active_preempt_locks != cur->active_preempt_locks) in refsafe()
18159 if (old->active_rcu_lock != cur->active_rcu_lock) in refsafe()
18162 if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap)) in refsafe()
18165 for (i = 0; i < old->acquired_refs; i++) { in refsafe()
18166 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) || in refsafe()
18167 old->refs[i].type != cur->refs[i].type) in refsafe()
18169 switch (old->refs[i].type) { in refsafe()
18174 if (old->refs[i].ptr != cur->refs[i].ptr) in refsafe()
18178 WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old->refs[i].type); in refsafe()
18217 if (old->callback_depth > cur->callback_depth) in func_states_equal()
18221 if (!regsafe(env, &old->regs[i], &cur->regs[i], in func_states_equal()
18222 &env->idmap_scratch, exact)) in func_states_equal()
18225 if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) in func_states_equal()
18233 env->idmap_scratch.tmp_id_gen = env->id_gen; in reset_idmap_scratch()
18234 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); in reset_idmap_scratch()
18244 if (old->curframe != cur->curframe) in states_equal()
18250 * must never prune a non-speculative execution one. in states_equal()
18252 if (old->speculative && !cur->speculative) in states_equal()
18255 if (old->in_sleepable != cur->in_sleepable) in states_equal()
18258 if (!refsafe(old, cur, &env->idmap_scratch)) in states_equal()
18264 for (i = 0; i <= old->curframe; i++) { in states_equal()
18265 if (old->frame[i]->callsite != cur->frame[i]->callsite) in states_equal()
18267 if (!func_states_equal(env, old->frame[i], cur->frame[i], exact)) in states_equal()
18280 u8 parent_flag = parent_reg->live & REG_LIVE_READ; in propagate_liveness_reg()
18281 u8 flag = reg->live & REG_LIVE_READ; in propagate_liveness_reg()
18303 * straight-line code between a state and its parent. When we arrive at an
18304 * equivalent state (jump target or such) we didn't arrive by the straight-line
18306 * of the state's write marks. That's what 'parent == state->parent' comparison
18317 if (vparent->curframe != vstate->curframe) { in propagate_liveness()
18319 vparent->curframe, vstate->curframe); in propagate_liveness()
18320 return -EFAULT; in propagate_liveness()
18324 for (frame = 0; frame <= vstate->curframe; frame++) { in propagate_liveness()
18325 parent = vparent->frame[frame]; in propagate_liveness()
18326 state = vstate->frame[frame]; in propagate_liveness()
18327 parent_reg = parent->regs; in propagate_liveness()
18328 state_reg = state->regs; in propagate_liveness()
18329 /* We don't need to worry about FP liveness, it's read-only */ in propagate_liveness()
18330 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { in propagate_liveness()
18340 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
18341 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
18342 parent_reg = &parent->stack[i].spilled_ptr; in propagate_liveness()
18343 state_reg = &state->stack[i].spilled_ptr; in propagate_liveness()
18364 for (fr = old->curframe; fr >= 0; fr--) { in propagate_precision()
18365 state = old->frame[fr]; in propagate_precision()
18366 state_reg = state->regs; in propagate_precision()
18369 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
18370 !state_reg->precise || in propagate_precision()
18371 !(state_reg->live & REG_LIVE_READ)) in propagate_precision()
18373 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
18379 bt_set_frame_reg(&env->bt, fr, i); in propagate_precision()
18383 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
18384 if (!is_spilled_reg(&state->stack[i])) in propagate_precision()
18386 state_reg = &state->stack[i].spilled_ptr; in propagate_precision()
18387 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
18388 !state_reg->precise || in propagate_precision()
18389 !(state_reg->live & REG_LIVE_READ)) in propagate_precision()
18391 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
18394 fr, (-i - 1) * BPF_REG_SIZE); in propagate_precision()
18396 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); in propagate_precision()
18398 bt_set_frame_slot(&env->bt, fr, i); in propagate_precision()
18416 int i, fr = cur->curframe; in states_maybe_looping()
18418 if (old->curframe != fr) in states_maybe_looping()
18421 fold = old->frame[fr]; in states_maybe_looping()
18422 fcur = cur->frame[fr]; in states_maybe_looping()
18424 if (memcmp(&fold->regs[i], &fcur->regs[i], in states_maybe_looping()
18432 return env->insn_aux_data[insn_idx].is_iter_next; in is_iter_next_insn()
18442 * Here's a situation in pseudo-BPF assembly form:
18462 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
18469 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
18489 * while (x--) {} // <<-- infinite loop here
18499 for (fr = old->curframe; fr >= 0; fr--) { in iter_active_depths_differ()
18500 state = old->frame[fr]; in iter_active_depths_differ()
18501 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in iter_active_depths_differ()
18502 if (state->stack[i].slot_type[0] != STACK_ITER) in iter_active_depths_differ()
18505 slot = &state->stack[i].spilled_ptr; in iter_active_depths_differ()
18506 if (slot->iter.state != BPF_ITER_STATE_ACTIVE) in iter_active_depths_differ()
18509 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr; in iter_active_depths_differ()
18510 if (cur_slot->iter.depth != slot->iter.depth) in iter_active_depths_differ()
18521 struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; in is_state_visited()
18525 force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) || in is_state_visited()
18527 cur->insn_hist_end - cur->insn_hist_start > 40; in is_state_visited()
18530 * http://vger.kernel.org/bpfconf2019.html#session-1 in is_state_visited()
18538 if (env->jmps_processed - env->prev_jmps_processed >= 2 && in is_state_visited()
18539 env->insn_processed - env->prev_insn_processed >= 8) in is_state_visited()
18549 if (sl->state.insn_idx != insn_idx) in is_state_visited()
18552 if (sl->state.branches) { in is_state_visited()
18553 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; in is_state_visited()
18555 if (frame->in_async_callback_fn && in is_state_visited()
18556 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { in is_state_visited()
18570 /* BPF open-coded iterators loop detection is special. in is_state_visited()
18586 * 1. r7 = -16 in is_state_visited()
18588 * 3. while (bpf_iter_num_next(&fp[-8])) { in is_state_visited()
18590 * 5. r7 = -32 in is_state_visited()
18600 * Here verifier would first visit path 1-3, create a checkpoint at 3 in is_state_visited()
18601 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does in is_state_visited()
18603 * comparison would discard current state with r7=-32 in is_state_visited()
18607 if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) { in is_state_visited()
18612 cur_frame = cur->frame[cur->curframe]; in is_state_visited()
18616 iter_reg = &cur_frame->regs[BPF_REG_1]; in is_state_visited()
18619 * no need for extra (re-)validations in is_state_visited()
18621 spi = __get_spi(iter_reg->off + iter_reg->var_off.value); in is_state_visited()
18622 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; in is_state_visited()
18623 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) { in is_state_visited()
18624 update_loop_entry(cur, &sl->state); in is_state_visited()
18631 if (sl->state.may_goto_depth != cur->may_goto_depth && in is_state_visited()
18632 states_equal(env, &sl->state, cur, RANGE_WITHIN)) { in is_state_visited()
18633 update_loop_entry(cur, &sl->state); in is_state_visited()
18638 if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) in is_state_visited()
18643 if (states_maybe_looping(&sl->state, cur) && in is_state_visited()
18644 states_equal(env, &sl->state, cur, EXACT) && in is_state_visited()
18645 !iter_active_depths_differ(&sl->state, cur) && in is_state_visited()
18646 sl->state.may_goto_depth == cur->may_goto_depth && in is_state_visited()
18647 sl->state.callback_unroll_depth == cur->callback_unroll_depth) { in is_state_visited()
18651 print_verifier_state(env, cur, cur->curframe, true); in is_state_visited()
18653 print_verifier_state(env, &sl->state, cur->curframe, true); in is_state_visited()
18654 return -EINVAL; in is_state_visited()
18663 * if r1 < 1000000 goto pc-2 in is_state_visited()
18670 env->jmps_processed - env->prev_jmps_processed < 20 && in is_state_visited()
18671 env->insn_processed - env->prev_insn_processed < 100) in is_state_visited()
18675 /* If sl->state is a part of a loop and this loop's entry is a part of in is_state_visited()
18682 * .---------> hdr All branches from 'succ' had been explored in is_state_visited()
18685 * | .------... Suppose states 'cur' and 'succ' correspond in is_state_visited()
18691 * | succ <- cur To check if that is the case, verify in is_state_visited()
18696 * '----' in is_state_visited()
18700 loop_entry = get_loop_entry(&sl->state); in is_state_visited()
18701 force_exact = loop_entry && loop_entry->branches > 0; in is_state_visited()
18702 if (states_equal(env, &sl->state, cur, force_exact ? RANGE_WITHIN : NOT_EXACT)) { in is_state_visited()
18706 sl->hit_cnt++; in is_state_visited()
18710 * If we have any write marks in env->cur_state, they in is_state_visited()
18717 err = propagate_liveness(env, &sl->state, cur); in is_state_visited()
18724 if (is_jmp_point(env, env->insn_idx)) in is_state_visited()
18726 err = err ? : propagate_precision(env, &sl->state); in is_state_visited()
18739 sl->miss_cnt++; in is_state_visited()
18748 n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; in is_state_visited()
18749 if (sl->miss_cnt > sl->hit_cnt * n + n) { in is_state_visited()
18753 *pprev = sl->next; in is_state_visited()
18754 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE && in is_state_visited()
18755 !sl->state.used_as_loop_entry) { in is_state_visited()
18756 u32 br = sl->state.branches; in is_state_visited()
18761 free_verifier_state(&sl->state, false); in is_state_visited()
18763 env->peak_states--; in is_state_visited()
18769 sl->next = env->free_list; in is_state_visited()
18770 env->free_list = sl; in is_state_visited()
18776 pprev = &sl->next; in is_state_visited()
18780 if (env->max_states_per_insn < states_cnt) in is_state_visited()
18781 env->max_states_per_insn = states_cnt; in is_state_visited()
18783 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) in is_state_visited()
18795 * When looping the sl->state.branches will be > 0 and this state in is_state_visited()
18800 return -ENOMEM; in is_state_visited()
18801 env->total_states++; in is_state_visited()
18802 env->peak_states++; in is_state_visited()
18803 env->prev_jmps_processed = env->jmps_processed; in is_state_visited()
18804 env->prev_insn_processed = env->insn_processed; in is_state_visited()
18807 if (env->bpf_capable) in is_state_visited()
18811 new = &new_sl->state; in is_state_visited()
18818 new->insn_idx = insn_idx; in is_state_visited()
18819 WARN_ONCE(new->branches != 1, in is_state_visited()
18820 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); in is_state_visited()
18822 cur->parent = new; in is_state_visited()
18823 cur->first_insn_idx = insn_idx; in is_state_visited()
18824 cur->insn_hist_start = cur->insn_hist_end; in is_state_visited()
18825 cur->dfs_depth = new->dfs_depth + 1; in is_state_visited()
18826 new_sl->next = *explored_state(env, insn_idx); in is_state_visited()
18829 * registers connected. Only r6 - r9 of the callers are alive (pushed in is_state_visited()
18831 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to in is_state_visited()
18841 for (j = 0; j <= cur->curframe; j++) { in is_state_visited()
18842 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) in is_state_visited()
18843 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; in is_state_visited()
18845 cur->frame[j]->regs[i].live = REG_LIVE_NONE; in is_state_visited()
18849 for (j = 0; j <= cur->curframe; j++) { in is_state_visited()
18850 struct bpf_func_state *frame = cur->frame[j]; in is_state_visited()
18851 struct bpf_func_state *newframe = new->frame[j]; in is_state_visited()
18853 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
18854 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; in is_state_visited()
18855 frame->stack[i].spilled_ptr.parent = in is_state_visited()
18856 &newframe->stack[i].spilled_ptr; in is_state_visited()
18900 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; in save_aux_ptr_type()
18928 return -EINVAL; in save_aux_ptr_type()
18937 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check()
18938 struct bpf_verifier_state *state = env->cur_state; in do_check()
18939 struct bpf_insn *insns = env->prog->insnsi; in do_check()
18941 int insn_cnt = env->prog->len; in do_check()
18943 int prev_insn_idx = -1; in do_check()
18952 env->cur_hist_ent = NULL; in do_check()
18954 env->prev_insn_idx = prev_insn_idx; in do_check()
18955 if (env->insn_idx >= insn_cnt) { in do_check()
18957 env->insn_idx, insn_cnt); in do_check()
18958 return -EFAULT; in do_check()
18961 insn = &insns[env->insn_idx]; in do_check()
18962 class = BPF_CLASS(insn->code); in do_check()
18964 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { in do_check()
18967 env->insn_processed); in do_check()
18968 return -E2BIG; in do_check()
18971 state->last_insn_idx = env->prev_insn_idx; in do_check()
18973 if (is_prune_point(env, env->insn_idx)) { in do_check()
18974 err = is_state_visited(env, env->insn_idx); in do_check()
18979 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
18982 env->prev_insn_idx, env->insn_idx, in do_check()
18983 env->cur_state->speculative ? in do_check()
18986 verbose(env, "%d: safe\n", env->insn_idx); in do_check()
18992 if (is_jmp_point(env, env->insn_idx)) { in do_check()
18999 return -EAGAIN; in do_check()
19004 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { in do_check()
19006 env->prev_insn_idx, env->insn_idx, in do_check()
19007 env->cur_state->speculative ? in do_check()
19009 print_verifier_state(env, state, state->curframe, true); in do_check()
19013 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
19021 print_insn_state(env, state, state->curframe); in do_check()
19023 verbose_linfo(env, env->insn_idx, "; "); in do_check()
19024 env->prev_log_pos = env->log.end_pos; in do_check()
19025 verbose(env, "%d: ", env->insn_idx); in do_check()
19026 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in do_check()
19027 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; in do_check()
19028 env->prev_log_pos = env->log.end_pos; in do_check()
19031 if (bpf_prog_is_offloaded(env->prog->aux)) { in do_check()
19032 err = bpf_prog_offload_verify_insn(env, env->insn_idx, in do_check()
19033 env->prev_insn_idx); in do_check()
19040 prev_insn_idx = env->insn_idx; in do_check()
19053 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
19057 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in do_check()
19061 src_reg_type = regs[insn->src_reg].type; in do_check()
19066 err = check_mem_access(env, env->insn_idx, insn->src_reg, in do_check()
19067 insn->off, BPF_SIZE(insn->code), in do_check()
19068 BPF_READ, insn->dst_reg, false, in do_check()
19069 BPF_MODE(insn->code) == BPF_MEMSX); in do_check()
19071 err = err ?: reg_bounds_sanity_check(env, &regs[insn->dst_reg], "ldx"); in do_check()
19077 if (BPF_MODE(insn->code) == BPF_ATOMIC) { in do_check()
19078 err = check_atomic(env, env->insn_idx, insn); in do_check()
19081 env->insn_idx++; in do_check()
19085 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { in do_check()
19087 return -EINVAL; in do_check()
19091 err = check_reg_arg(env, insn->src_reg, SRC_OP); in do_check()
19095 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
19099 dst_reg_type = regs[insn->dst_reg].type; in do_check()
19102 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
19103 insn->off, BPF_SIZE(insn->code), in do_check()
19104 BPF_WRITE, insn->src_reg, false, false); in do_check()
19114 if (BPF_MODE(insn->code) != BPF_MEM || in do_check()
19115 insn->src_reg != BPF_REG_0) { in do_check()
19117 return -EINVAL; in do_check()
19120 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check()
19124 dst_reg_type = regs[insn->dst_reg].type; in do_check()
19127 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check()
19128 insn->off, BPF_SIZE(insn->code), in do_check()
19129 BPF_WRITE, -1, false, false); in do_check()
19137 u8 opcode = BPF_OP(insn->code); in do_check()
19139 env->jmps_processed++; in do_check()
19141 if (BPF_SRC(insn->code) != BPF_K || in do_check()
19142 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL in do_check()
19143 && insn->off != 0) || in do_check()
19144 (insn->src_reg != BPF_REG_0 && in do_check()
19145 insn->src_reg != BPF_PSEUDO_CALL && in do_check()
19146 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || in do_check()
19147 insn->dst_reg != BPF_REG_0 || in do_check()
19150 return -EINVAL; in do_check()
19153 if (env->cur_state->active_locks) { in do_check()
19154 if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || in do_check()
19155 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && in do_check()
19156 (insn->off != 0 || !kfunc_spin_allowed(insn->imm)))) { in do_check()
19158 return -EINVAL; in do_check()
19161 if (insn->src_reg == BPF_PSEUDO_CALL) { in do_check()
19162 err = check_func_call(env, insn, &env->insn_idx); in do_check()
19163 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_check()
19164 err = check_kfunc_call(env, insn, &env->insn_idx); in do_check()
19170 err = check_helper_call(env, insn, &env->insn_idx); in do_check()
19177 if (BPF_SRC(insn->code) != BPF_K || in do_check()
19178 insn->src_reg != BPF_REG_0 || in do_check()
19179 insn->dst_reg != BPF_REG_0 || in do_check()
19180 (class == BPF_JMP && insn->imm != 0) || in do_check()
19181 (class == BPF_JMP32 && insn->off != 0)) { in do_check()
19183 return -EINVAL; in do_check()
19187 env->insn_idx += insn->off + 1; in do_check()
19189 env->insn_idx += insn->imm + 1; in do_check()
19193 if (BPF_SRC(insn->code) != BPF_K || in do_check()
19194 insn->imm != 0 || in do_check()
19195 insn->src_reg != BPF_REG_0 || in do_check()
19196 insn->dst_reg != BPF_REG_0 || in do_check()
19199 return -EINVAL; in do_check()
19204 * state->curframe > 0, it may be a callback in do_check()
19208 err = check_resource_leak(env, exception_exit, !env->cur_state->curframe, in do_check()
19226 if (state->curframe) { in do_check()
19228 err = prepare_func_exit(env, &env->insn_idx); in do_check()
19240 update_branch_counts(env, env->cur_state); in do_check()
19242 &env->insn_idx, pop_log); in do_check()
19244 if (err != -ENOENT) in do_check()
19252 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check()
19257 u8 mode = BPF_MODE(insn->code); in do_check()
19269 env->insn_idx++; in do_check()
19273 return -EINVAL; in do_check()
19277 return -EINVAL; in do_check()
19280 env->insn_idx++; in do_check()
19305 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) in find_btf_percpu_datasec()
19308 tname = btf_name_by_offset(btf, t->name_off); in find_btf_percpu_datasec()
19313 return -ENOENT; in find_btf_percpu_datasec()
19328 for (i = 0; i < env->used_btf_cnt; i++) in __add_used_btf()
19329 if (env->used_btfs[i].btf == btf) in __add_used_btf()
19332 if (env->used_btf_cnt >= MAX_USED_BTFS) in __add_used_btf()
19333 return -E2BIG; in __add_used_btf()
19337 btf_mod = &env->used_btfs[env->used_btf_cnt]; in __add_used_btf()
19338 btf_mod->btf = btf; in __add_used_btf()
19339 btf_mod->module = NULL; in __add_used_btf()
19343 btf_mod->module = btf_try_get_module(btf); in __add_used_btf()
19344 if (!btf_mod->module) { in __add_used_btf()
19346 return -ENXIO; in __add_used_btf()
19350 return env->used_btf_cnt++; in __add_used_btf()
19364 u32 type, id = insn->imm; in __check_pseudo_btf_id()
19372 return -ENOENT; in __check_pseudo_btf_id()
19377 return -EINVAL; in __check_pseudo_btf_id()
19380 sym_name = btf_name_by_offset(btf, t->name_off); in __check_pseudo_btf_id()
19385 return -ENOENT; in __check_pseudo_btf_id()
19391 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; in __check_pseudo_btf_id()
19392 aux->btf_var.mem_size = 0; in __check_pseudo_btf_id()
19400 if (vsi->type == id) { in __check_pseudo_btf_id()
19407 type = t->type; in __check_pseudo_btf_id()
19410 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; in __check_pseudo_btf_id()
19411 aux->btf_var.btf = btf; in __check_pseudo_btf_id()
19412 aux->btf_var.btf_id = type; in __check_pseudo_btf_id()
19421 tname = btf_name_by_offset(btf, t->name_off); in __check_pseudo_btf_id()
19424 return -EINVAL; in __check_pseudo_btf_id()
19426 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; in __check_pseudo_btf_id()
19427 aux->btf_var.mem_size = tsize; in __check_pseudo_btf_id()
19429 aux->btf_var.reg_type = PTR_TO_BTF_ID; in __check_pseudo_btf_id()
19430 aux->btf_var.btf = btf; in __check_pseudo_btf_id()
19431 aux->btf_var.btf_id = type; in __check_pseudo_btf_id()
19452 return -EINVAL; in check_pseudo_btf_id()
19457 return -EINVAL; in check_pseudo_btf_id()
19488 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || in bpf_map_is_cgroup_storage()
19489 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); in bpf_map_is_cgroup_storage()
19499 if (btf_record_has_field(map->record, BPF_LIST_HEAD) || in check_map_prog_compatibility()
19500 btf_record_has_field(map->record, BPF_RB_ROOT)) { in check_map_prog_compatibility()
19503 return -EINVAL; in check_map_prog_compatibility()
19507 if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in check_map_prog_compatibility()
19510 return -EINVAL; in check_map_prog_compatibility()
19515 return -EINVAL; in check_map_prog_compatibility()
19519 if (btf_record_has_field(map->record, BPF_TIMER)) { in check_map_prog_compatibility()
19522 return -EINVAL; in check_map_prog_compatibility()
19526 if (btf_record_has_field(map->record, BPF_WORKQUEUE)) { in check_map_prog_compatibility()
19529 return -EINVAL; in check_map_prog_compatibility()
19533 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && in check_map_prog_compatibility()
19536 return -EINVAL; in check_map_prog_compatibility()
19539 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in check_map_prog_compatibility()
19541 return -EINVAL; in check_map_prog_compatibility()
19544 if (prog->sleepable) in check_map_prog_compatibility()
19545 switch (map->map_type) { in check_map_prog_compatibility()
19567 return -EINVAL; in check_map_prog_compatibility()
19571 bpf_cgroup_storage_assign(env->prog->aux, map)) { in check_map_prog_compatibility()
19573 return -EBUSY; in check_map_prog_compatibility()
19576 if (map->map_type == BPF_MAP_TYPE_ARENA) { in check_map_prog_compatibility()
19577 if (env->prog->aux->arena) { in check_map_prog_compatibility()
19579 return -EBUSY; in check_map_prog_compatibility()
19581 if (!env->allow_ptr_leaks || !env->bpf_capable) { in check_map_prog_compatibility()
19583 return -EPERM; in check_map_prog_compatibility()
19585 if (!env->prog->jit_requested) { in check_map_prog_compatibility()
19587 return -EOPNOTSUPP; in check_map_prog_compatibility()
19591 return -EOPNOTSUPP; in check_map_prog_compatibility()
19593 env->prog->aux->arena = (void *)map; in check_map_prog_compatibility()
19594 if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { in check_map_prog_compatibility()
19596 return -EINVAL; in check_map_prog_compatibility()
19608 for (i = 0; i < env->used_map_cnt; i++) in __add_used_map()
19609 if (env->used_maps[i] == map) in __add_used_map()
19612 if (env->used_map_cnt >= MAX_USED_MAPS) { in __add_used_map()
19615 return -E2BIG; in __add_used_map()
19618 err = check_map_prog_compatibility(env, map, env->prog); in __add_used_map()
19622 if (env->prog->sleepable) in __add_used_map()
19623 atomic64_inc(&map->sleepable_refcnt); in __add_used_map()
19632 env->used_maps[env->used_map_cnt++] = map; in __add_used_map()
19634 return env->used_map_cnt - 1; in __add_used_map()
19664 struct bpf_insn *insn = env->prog->insnsi; in resolve_pseudo_ldimm64()
19665 int insn_cnt = env->prog->len; in resolve_pseudo_ldimm64()
19668 err = bpf_prog_calc_tag(env->prog); in resolve_pseudo_ldimm64()
19673 if (BPF_CLASS(insn->code) == BPF_LDX && in resolve_pseudo_ldimm64()
19674 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) || in resolve_pseudo_ldimm64()
19675 insn->imm != 0)) { in resolve_pseudo_ldimm64()
19677 return -EINVAL; in resolve_pseudo_ldimm64()
19687 if (i == insn_cnt - 1 || insn[1].code != 0 || in resolve_pseudo_ldimm64()
19691 return -EINVAL; in resolve_pseudo_ldimm64()
19695 /* valid generic load 64-bit imm */ in resolve_pseudo_ldimm64()
19699 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
19707 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
19708 aux->ptr_type = PTR_TO_FUNC; in resolve_pseudo_ldimm64()
19713 * converted into regular 64-bit imm load insn. in resolve_pseudo_ldimm64()
19726 return -EINVAL; in resolve_pseudo_ldimm64()
19732 if (bpfptr_is_null(env->fd_array)) { in resolve_pseudo_ldimm64()
19734 return -EPROTO; in resolve_pseudo_ldimm64()
19736 if (copy_from_bpfptr_offset(&fd, env->fd_array, in resolve_pseudo_ldimm64()
19739 return -EFAULT; in resolve_pseudo_ldimm64()
19749 map = env->used_maps[map_idx]; in resolve_pseudo_ldimm64()
19751 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
19752 aux->map_index = map_idx; in resolve_pseudo_ldimm64()
19762 return -EINVAL; in resolve_pseudo_ldimm64()
19765 if (!map->ops->map_direct_value_addr) { in resolve_pseudo_ldimm64()
19767 return -EINVAL; in resolve_pseudo_ldimm64()
19770 err = map->ops->map_direct_value_addr(map, &addr, off); in resolve_pseudo_ldimm64()
19773 map->value_size, off); in resolve_pseudo_ldimm64()
19777 aux->map_off = off; in resolve_pseudo_ldimm64()
19791 if (!bpf_opcode_in_insntable(insn->code)) { in resolve_pseudo_ldimm64()
19792 verbose(env, "unknown opcode %02x\n", insn->code); in resolve_pseudo_ldimm64()
19793 return -EINVAL; in resolve_pseudo_ldimm64()
19807 __bpf_free_used_maps(env->prog->aux, env->used_maps, in release_maps()
19808 env->used_map_cnt); in release_maps()
19814 __bpf_free_used_btfs(env->used_btfs, env->used_btf_cnt); in release_btfs()
19820 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64()
19821 int insn_cnt = env->prog->len; in convert_pseudo_ld_imm64()
19825 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) in convert_pseudo_ld_imm64()
19827 if (insn->src_reg == BPF_PSEUDO_FUNC) in convert_pseudo_ld_imm64()
19829 insn->src_reg = 0; in convert_pseudo_ld_imm64()
19833 /* single env->prog->insni[off] instruction was replaced with the range
19841 struct bpf_insn_aux_data *old_data = env->insn_aux_data; in adjust_insn_aux_data()
19842 struct bpf_insn *insn = new_prog->insnsi; in adjust_insn_aux_data()
19851 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); in adjust_insn_aux_data()
19855 prog_len = new_prog->len; in adjust_insn_aux_data()
19858 memcpy(new_data + off + cnt - 1, old_data + off, in adjust_insn_aux_data()
19859 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); in adjust_insn_aux_data()
19860 for (i = off; i < off + cnt - 1; i++) { in adjust_insn_aux_data()
19865 env->insn_aux_data = new_data; in adjust_insn_aux_data()
19876 for (i = 0; i <= env->subprog_cnt; i++) { in adjust_subprog_starts()
19877 if (env->subprog_info[i].start <= off) in adjust_subprog_starts()
19879 env->subprog_info[i].start += len - 1; in adjust_subprog_starts()
19885 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; in adjust_poke_descs()
19886 int i, sz = prog->aux->size_poke_tab; in adjust_poke_descs()
19891 if (desc->insn_idx <= off) in adjust_poke_descs()
19893 desc->insn_idx += len - 1; in adjust_poke_descs()
19904 new_data = vzalloc(array_size(env->prog->len + len - 1, in bpf_patch_insn_data()
19910 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); in bpf_patch_insn_data()
19912 if (PTR_ERR(new_prog) == -ERANGE) in bpf_patch_insn_data()
19914 "insn %d cannot be patched due to 16-bit range\n", in bpf_patch_insn_data()
19915 env->insn_aux_data[off].orig_idx); in bpf_patch_insn_data()
19931 struct bpf_insn *insn = prog->insnsi; in adjust_jmp_off()
19932 u32 insn_cnt = prog->len, i; in adjust_jmp_off()
19937 u8 code = insn->code; in adjust_jmp_off()
19946 if (insn->code == (BPF_JMP32 | BPF_JA)) { in adjust_jmp_off()
19947 if (i + 1 + insn->imm != tgt_idx) in adjust_jmp_off()
19949 if (check_add_overflow(insn->imm, delta, &imm)) in adjust_jmp_off()
19950 return -ERANGE; in adjust_jmp_off()
19951 insn->imm = imm; in adjust_jmp_off()
19953 if (i + 1 + insn->off != tgt_idx) in adjust_jmp_off()
19955 if (check_add_overflow(insn->off, delta, &off)) in adjust_jmp_off()
19956 return -ERANGE; in adjust_jmp_off()
19957 insn->off = off; in adjust_jmp_off()
19969 for (i = 0; i < env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
19970 if (env->subprog_info[i].start >= off) in adjust_subprog_starts_after_remove()
19973 for (j = i; j < env->subprog_cnt; j++) in adjust_subprog_starts_after_remove()
19974 if (env->subprog_info[j].start >= off + cnt) in adjust_subprog_starts_after_remove()
19979 if (env->subprog_info[j].start != off + cnt) in adjust_subprog_starts_after_remove()
19980 j--; in adjust_subprog_starts_after_remove()
19983 struct bpf_prog_aux *aux = env->prog->aux; in adjust_subprog_starts_after_remove()
19987 move = env->subprog_cnt + 1 - j; in adjust_subprog_starts_after_remove()
19989 memmove(env->subprog_info + i, in adjust_subprog_starts_after_remove()
19990 env->subprog_info + j, in adjust_subprog_starts_after_remove()
19991 sizeof(*env->subprog_info) * move); in adjust_subprog_starts_after_remove()
19992 env->subprog_cnt -= j - i; in adjust_subprog_starts_after_remove()
19995 if (aux->func_info) { in adjust_subprog_starts_after_remove()
19996 move = aux->func_info_cnt - j; in adjust_subprog_starts_after_remove()
19998 memmove(aux->func_info + i, in adjust_subprog_starts_after_remove()
19999 aux->func_info + j, in adjust_subprog_starts_after_remove()
20000 sizeof(*aux->func_info) * move); in adjust_subprog_starts_after_remove()
20001 aux->func_info_cnt -= j - i; in adjust_subprog_starts_after_remove()
20002 /* func_info->insn_off is set after all code rewrites, in adjust_subprog_starts_after_remove()
20003 * in adjust_btf_func() - no need to adjust in adjust_subprog_starts_after_remove()
20008 if (env->subprog_info[i].start == off) in adjust_subprog_starts_after_remove()
20013 for (; i <= env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
20014 env->subprog_info[i].start -= cnt; in adjust_subprog_starts_after_remove()
20022 struct bpf_prog *prog = env->prog; in bpf_adj_linfo_after_remove()
20026 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
20030 linfo = prog->aux->linfo; in bpf_adj_linfo_after_remove()
20046 * last removed linfo. prog is already modified, so prog->len == off in bpf_adj_linfo_after_remove()
20049 if (prog->len != off && l_cnt && in bpf_adj_linfo_after_remove()
20051 l_cnt--; in bpf_adj_linfo_after_remove()
20052 linfo[--i].insn_off = off + cnt; in bpf_adj_linfo_after_remove()
20058 sizeof(*linfo) * (nr_linfo - i)); in bpf_adj_linfo_after_remove()
20060 prog->aux->nr_linfo -= l_cnt; in bpf_adj_linfo_after_remove()
20061 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
20066 linfo[i].insn_off -= cnt; in bpf_adj_linfo_after_remove()
20069 for (i = 0; i <= env->subprog_cnt; i++) in bpf_adj_linfo_after_remove()
20070 if (env->subprog_info[i].linfo_idx > l_off) { in bpf_adj_linfo_after_remove()
20074 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) in bpf_adj_linfo_after_remove()
20075 env->subprog_info[i].linfo_idx -= l_cnt; in bpf_adj_linfo_after_remove()
20077 env->subprog_info[i].linfo_idx = l_off; in bpf_adj_linfo_after_remove()
20085 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in verifier_remove_insns()
20086 unsigned int orig_prog_len = env->prog->len; in verifier_remove_insns()
20089 if (bpf_prog_is_offloaded(env->prog->aux)) in verifier_remove_insns()
20092 err = bpf_remove_insns(env->prog, off, cnt); in verifier_remove_insns()
20105 sizeof(*aux_data) * (orig_prog_len - off - cnt)); in verifier_remove_insns()
20112 * have dead code too. Therefore replace all dead at-run-time code
20113 * with 'ja -1'.
20123 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in sanitize_dead_code()
20124 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); in sanitize_dead_code()
20125 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code()
20126 const int insn_cnt = env->prog->len; in sanitize_dead_code()
20153 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_hard_wire_dead_code_branches()
20155 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches()
20156 const int insn_cnt = env->prog->len; in opt_hard_wire_dead_code_branches()
20160 if (!insn_is_cond_jump(insn->code)) in opt_hard_wire_dead_code_branches()
20164 ja.off = insn->off; in opt_hard_wire_dead_code_branches()
20165 else if (!aux_data[i + 1 + insn->off].seen) in opt_hard_wire_dead_code_branches()
20170 if (bpf_prog_is_offloaded(env->prog->aux)) in opt_hard_wire_dead_code_branches()
20179 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_remove_dead_code()
20180 int insn_cnt = env->prog->len; in opt_remove_dead_code()
20195 insn_cnt = env->prog->len; in opt_remove_dead_code()
20206 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops()
20207 int insn_cnt = env->prog->len; in opt_remove_nops()
20221 insn_cnt--; in opt_remove_nops()
20223 i -= (is_may_goto_0 && i > 0) ? 2 : 1; in opt_remove_nops()
20233 struct bpf_insn_aux_data *aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
20234 int i, patch_len, delta = 0, len = env->prog->len; in opt_subreg_zext_lo32_rnd_hi32()
20235 struct bpf_insn *insns = env->prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
20239 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; in opt_subreg_zext_lo32_rnd_hi32()
20260 if (load_reg == -1) in opt_subreg_zext_lo32_rnd_hi32()
20288 /* Add in an zero-extend instruction if a) the JIT has requested in opt_subreg_zext_lo32_rnd_hi32()
20292 * R0, therefore always zero-extends. However some archs' in opt_subreg_zext_lo32_rnd_hi32()
20295 * orthogonal to the general zero-extension behaviour of the in opt_subreg_zext_lo32_rnd_hi32()
20301 /* Zero-extension is done by the caller. */ in opt_subreg_zext_lo32_rnd_hi32()
20305 if (WARN_ON(load_reg == -1)) { in opt_subreg_zext_lo32_rnd_hi32()
20307 return -EFAULT; in opt_subreg_zext_lo32_rnd_hi32()
20318 return -ENOMEM; in opt_subreg_zext_lo32_rnd_hi32()
20319 env->prog = new_prog; in opt_subreg_zext_lo32_rnd_hi32()
20320 insns = new_prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
20321 aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
20322 delta += patch_len - 1; in opt_subreg_zext_lo32_rnd_hi32()
20330 * struct __sk_buff -> struct sk_buff
20331 * struct bpf_sock_ops -> struct sock
20335 struct bpf_subprog_info *subprogs = env->subprog_info; in convert_ctx_accesses()
20336 const struct bpf_verifier_ops *ops = env->ops; in convert_ctx_accesses()
20338 const int insn_cnt = env->prog->len; in convert_ctx_accesses()
20339 struct bpf_insn *epilogue_buf = env->epilogue_buf; in convert_ctx_accesses()
20340 struct bpf_insn *insn_buf = env->insn_buf; in convert_ctx_accesses()
20348 if (ops->gen_epilogue) { in convert_ctx_accesses()
20349 epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog, in convert_ctx_accesses()
20350 -(subprogs[0].stack_depth + 8)); in convert_ctx_accesses()
20353 return -EINVAL; in convert_ctx_accesses()
20359 -subprogs[0].stack_depth); in convert_ctx_accesses()
20360 insn_buf[cnt++] = env->prog->insnsi[0]; in convert_ctx_accesses()
20363 return -ENOMEM; in convert_ctx_accesses()
20364 env->prog = new_prog; in convert_ctx_accesses()
20365 delta += cnt - 1; in convert_ctx_accesses()
20369 if (ops->gen_prologue || env->seen_direct_write) { in convert_ctx_accesses()
20370 if (!ops->gen_prologue) { in convert_ctx_accesses()
20372 return -EINVAL; in convert_ctx_accesses()
20374 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, in convert_ctx_accesses()
20375 env->prog); in convert_ctx_accesses()
20378 return -EINVAL; in convert_ctx_accesses()
20382 return -ENOMEM; in convert_ctx_accesses()
20384 env->prog = new_prog; in convert_ctx_accesses()
20385 delta += cnt - 1; in convert_ctx_accesses()
20390 WARN_ON(adjust_jmp_off(env->prog, 0, delta)); in convert_ctx_accesses()
20392 if (bpf_prog_is_offloaded(env->prog->aux)) in convert_ctx_accesses()
20395 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
20401 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
20402 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
20403 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
20404 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
20405 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || in convert_ctx_accesses()
20406 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || in convert_ctx_accesses()
20407 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { in convert_ctx_accesses()
20409 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
20410 insn->code == (BPF_STX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
20411 insn->code == (BPF_STX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
20412 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
20413 insn->code == (BPF_ST | BPF_MEM | BPF_B) || in convert_ctx_accesses()
20414 insn->code == (BPF_ST | BPF_MEM | BPF_H) || in convert_ctx_accesses()
20415 insn->code == (BPF_ST | BPF_MEM | BPF_W) || in convert_ctx_accesses()
20416 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { in convert_ctx_accesses()
20418 } else if ((insn->code == (BPF_STX | BPF_ATOMIC | BPF_W) || in convert_ctx_accesses()
20419 insn->code == (BPF_STX | BPF_ATOMIC | BPF_DW)) && in convert_ctx_accesses()
20420 env->insn_aux_data[i + delta].ptr_type == PTR_TO_ARENA) { in convert_ctx_accesses()
20421 insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code); in convert_ctx_accesses()
20422 env->prog->aux->num_exentries++; in convert_ctx_accesses()
20424 } else if (insn->code == (BPF_JMP | BPF_EXIT) && in convert_ctx_accesses()
20430 insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1); in convert_ctx_accesses()
20448 env->insn_aux_data[i + delta].sanitize_stack_spill) { in convert_ctx_accesses()
20457 return -ENOMEM; in convert_ctx_accesses()
20459 delta += cnt - 1; in convert_ctx_accesses()
20460 env->prog = new_prog; in convert_ctx_accesses()
20461 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
20465 switch ((int)env->insn_aux_data[i + delta].ptr_type) { in convert_ctx_accesses()
20467 if (!ops->convert_ctx_access) in convert_ctx_accesses()
20469 convert_ctx_access = ops->convert_ctx_access; in convert_ctx_accesses()
20491 if (BPF_MODE(insn->code) == BPF_MEM) in convert_ctx_accesses()
20492 insn->code = BPF_LDX | BPF_PROBE_MEM | in convert_ctx_accesses()
20493 BPF_SIZE((insn)->code); in convert_ctx_accesses()
20495 insn->code = BPF_LDX | BPF_PROBE_MEMSX | in convert_ctx_accesses()
20496 BPF_SIZE((insn)->code); in convert_ctx_accesses()
20497 env->prog->aux->num_exentries++; in convert_ctx_accesses()
20501 if (BPF_MODE(insn->code) == BPF_MEMSX) { in convert_ctx_accesses()
20503 return -EOPNOTSUPP; in convert_ctx_accesses()
20505 insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code); in convert_ctx_accesses()
20506 env->prog->aux->num_exentries++; in convert_ctx_accesses()
20512 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; in convert_ctx_accesses()
20514 mode = BPF_MODE(insn->code); in convert_ctx_accesses()
20517 * convert to a 4/8-byte load, to minimum program type specific in convert_ctx_accesses()
20523 off = insn->off; in convert_ctx_accesses()
20529 return -EINVAL; in convert_ctx_accesses()
20538 insn->off = off & ~(size_default - 1); in convert_ctx_accesses()
20539 insn->code = BPF_LDX | BPF_MEM | size_code; in convert_ctx_accesses()
20543 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
20548 return -EINVAL; in convert_ctx_accesses()
20556 return -EINVAL; in convert_ctx_accesses()
20561 insn->dst_reg, in convert_ctx_accesses()
20563 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
20564 (1 << size * 8) - 1); in convert_ctx_accesses()
20568 insn->dst_reg, in convert_ctx_accesses()
20570 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
20571 (1ULL << size * 8) - 1); in convert_ctx_accesses()
20576 insn->dst_reg, insn->dst_reg, in convert_ctx_accesses()
20582 return -ENOMEM; in convert_ctx_accesses()
20584 delta += cnt - 1; in convert_ctx_accesses()
20587 env->prog = new_prog; in convert_ctx_accesses()
20588 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
20596 struct bpf_prog *prog = env->prog, **func, *tmp; in jit_subprogs()
20603 if (env->subprog_cnt <= 1) in jit_subprogs()
20606 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
20611 * need a hard reject of the program. Thus -EFAULT is in jit_subprogs()
20614 subprog = find_subprog(env, i + insn->imm + 1); in jit_subprogs()
20617 i + insn->imm + 1); in jit_subprogs()
20618 return -EFAULT; in jit_subprogs()
20623 insn->off = subprog; in jit_subprogs()
20627 env->insn_aux_data[i].call_imm = insn->imm; in jit_subprogs()
20629 insn->imm = 1; in jit_subprogs()
20649 err = -ENOMEM; in jit_subprogs()
20650 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); in jit_subprogs()
20654 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
20656 subprog_end = env->subprog_info[i + 1].start; in jit_subprogs()
20658 len = subprog_end - subprog_start; in jit_subprogs()
20662 * func[i]->stats will never be accessed and stays NULL in jit_subprogs()
20667 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], in jit_subprogs()
20669 func[i]->type = prog->type; in jit_subprogs()
20670 func[i]->len = len; in jit_subprogs()
20673 func[i]->is_func = 1; in jit_subprogs()
20674 func[i]->sleepable = prog->sleepable; in jit_subprogs()
20675 func[i]->aux->func_idx = i; in jit_subprogs()
20676 /* Below members will be freed only at prog->aux */ in jit_subprogs()
20677 func[i]->aux->btf = prog->aux->btf; in jit_subprogs()
20678 func[i]->aux->func_info = prog->aux->func_info; in jit_subprogs()
20679 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; in jit_subprogs()
20680 func[i]->aux->poke_tab = prog->aux->poke_tab; in jit_subprogs()
20681 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; in jit_subprogs()
20683 for (j = 0; j < prog->aux->size_poke_tab; j++) { in jit_subprogs()
20686 poke = &prog->aux->poke_tab[j]; in jit_subprogs()
20687 if (poke->insn_idx < subprog_end && in jit_subprogs()
20688 poke->insn_idx >= subprog_start) in jit_subprogs()
20689 poke->aux = func[i]->aux; in jit_subprogs()
20692 func[i]->aux->name[0] = 'F'; in jit_subprogs()
20693 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; in jit_subprogs()
20694 if (env->subprog_info[i].priv_stack_mode == PRIV_STACK_ADAPTIVE) in jit_subprogs()
20695 func[i]->aux->jits_use_priv_stack = true; in jit_subprogs()
20697 func[i]->jit_requested = 1; in jit_subprogs()
20698 func[i]->blinding_requested = prog->blinding_requested; in jit_subprogs()
20699 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; in jit_subprogs()
20700 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; in jit_subprogs()
20701 func[i]->aux->linfo = prog->aux->linfo; in jit_subprogs()
20702 func[i]->aux->nr_linfo = prog->aux->nr_linfo; in jit_subprogs()
20703 func[i]->aux->jited_linfo = prog->aux->jited_linfo; in jit_subprogs()
20704 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; in jit_subprogs()
20705 func[i]->aux->arena = prog->aux->arena; in jit_subprogs()
20707 insn = func[i]->insnsi; in jit_subprogs()
20708 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
20709 if (BPF_CLASS(insn->code) == BPF_LDX && in jit_subprogs()
20710 (BPF_MODE(insn->code) == BPF_PROBE_MEM || in jit_subprogs()
20711 BPF_MODE(insn->code) == BPF_PROBE_MEM32 || in jit_subprogs()
20712 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) in jit_subprogs()
20714 if ((BPF_CLASS(insn->code) == BPF_STX || in jit_subprogs()
20715 BPF_CLASS(insn->code) == BPF_ST) && in jit_subprogs()
20716 BPF_MODE(insn->code) == BPF_PROBE_MEM32) in jit_subprogs()
20718 if (BPF_CLASS(insn->code) == BPF_STX && in jit_subprogs()
20719 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) in jit_subprogs()
20722 func[i]->aux->num_exentries = num_exentries; in jit_subprogs()
20723 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; in jit_subprogs()
20724 func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; in jit_subprogs()
20725 func[i]->aux->changes_pkt_data = env->subprog_info[i].changes_pkt_data; in jit_subprogs()
20727 func[i]->aux->exception_boundary = env->seen_exception; in jit_subprogs()
20729 if (!func[i]->jited) { in jit_subprogs()
20730 err = -ENOTSUPP; in jit_subprogs()
20740 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
20741 insn = func[i]->insnsi; in jit_subprogs()
20742 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
20744 subprog = insn->off; in jit_subprogs()
20745 insn[0].imm = (u32)(long)func[subprog]->bpf_func; in jit_subprogs()
20746 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; in jit_subprogs()
20751 subprog = insn->off; in jit_subprogs()
20752 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); in jit_subprogs()
20766 func[i]->aux->func = func; in jit_subprogs()
20767 func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
20768 func[i]->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
20770 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
20771 old_bpf_func = func[i]->bpf_func; in jit_subprogs()
20773 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { in jit_subprogs()
20774 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); in jit_subprogs()
20775 err = -ENOTSUPP; in jit_subprogs()
20785 for (i = 1; i < env->subprog_cnt; i++) { in jit_subprogs()
20791 for (i = 1; i < env->subprog_cnt; i++) in jit_subprogs()
20798 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
20800 insn[0].imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
20801 insn[1].imm = insn->off; in jit_subprogs()
20802 insn->off = 0; in jit_subprogs()
20807 insn->off = env->insn_aux_data[i].call_imm; in jit_subprogs()
20808 subprog = find_subprog(env, i + insn->off + 1); in jit_subprogs()
20809 insn->imm = subprog; in jit_subprogs()
20812 prog->jited = 1; in jit_subprogs()
20813 prog->bpf_func = func[0]->bpf_func; in jit_subprogs()
20814 prog->jited_len = func[0]->jited_len; in jit_subprogs()
20815 prog->aux->extable = func[0]->aux->extable; in jit_subprogs()
20816 prog->aux->num_exentries = func[0]->aux->num_exentries; in jit_subprogs()
20817 prog->aux->func = func; in jit_subprogs()
20818 prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
20819 prog->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
20820 prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; in jit_subprogs()
20821 prog->aux->exception_boundary = func[0]->aux->exception_boundary; in jit_subprogs()
20829 for (i = 0; i < prog->aux->size_poke_tab; i++) { in jit_subprogs()
20830 map_ptr = prog->aux->poke_tab[i].tail_call.map; in jit_subprogs()
20831 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); in jit_subprogs()
20837 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
20840 func[i]->aux->poke_tab = NULL; in jit_subprogs()
20846 prog->jit_requested = 0; in jit_subprogs()
20847 prog->blinding_requested = 0; in jit_subprogs()
20848 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
20851 insn->off = 0; in jit_subprogs()
20852 insn->imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
20861 struct bpf_prog *prog = env->prog; in fixup_call_args()
20862 struct bpf_insn *insn = prog->insnsi; in fixup_call_args()
20868 if (env->prog->jit_requested && in fixup_call_args()
20869 !bpf_prog_is_offloaded(env->prog->aux)) { in fixup_call_args()
20873 if (err == -EFAULT) in fixup_call_args()
20878 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); in fixup_call_args()
20879 return -EINVAL; in fixup_call_args()
20881 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { in fixup_call_args()
20885 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in fixup_call_args()
20886 return -EINVAL; in fixup_call_args()
20888 for (i = 0; i < prog->len; i++, insn++) { in fixup_call_args()
20893 verbose(env, "callbacks are not allowed in non-JITed programs\n"); in fixup_call_args()
20894 return -EINVAL; in fixup_call_args()
20913 struct bpf_prog *prog = env->prog; in specialize_kfunc()
20931 seen_direct_write = env->seen_direct_write; in specialize_kfunc()
20937 /* restore env->seen_direct_write to its original value, since in specialize_kfunc()
20940 env->seen_direct_write = seen_direct_write; in specialize_kfunc()
20951 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; in __fixup_collection_insert_kfunc()
20956 insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); in __fixup_collection_insert_kfunc()
20966 if (!insn->imm) { in fixup_kfunc_call()
20968 return -EINVAL; in fixup_kfunc_call()
20973 /* insn->imm has the btf func_id. Replace it with an offset relative to in fixup_kfunc_call()
20977 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); in fixup_kfunc_call()
20980 insn->imm); in fixup_kfunc_call()
20981 return -EFAULT; in fixup_kfunc_call()
20985 insn->imm = BPF_CALL_IMM(desc->addr); in fixup_kfunc_call()
20986 if (insn->off) in fixup_kfunc_call()
20988 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || in fixup_kfunc_call()
20989 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { in fixup_kfunc_call()
20990 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
20992 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; in fixup_kfunc_call()
20994 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) { in fixup_kfunc_call()
20997 return -EFAULT; in fixup_kfunc_call()
21005 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || in fixup_kfunc_call()
21006 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] || in fixup_kfunc_call()
21007 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { in fixup_kfunc_call()
21008 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
21011 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) { in fixup_kfunc_call()
21014 return -EFAULT; in fixup_kfunc_call()
21017 if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && in fixup_kfunc_call()
21021 return -EFAULT; in fixup_kfunc_call()
21028 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || in fixup_kfunc_call()
21029 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || in fixup_kfunc_call()
21030 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in fixup_kfunc_call()
21031 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
21035 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ in fixup_kfunc_call()
21036 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in fixup_kfunc_call()
21044 return -EFAULT; in fixup_kfunc_call()
21047 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, in fixup_kfunc_call()
21049 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || in fixup_kfunc_call()
21050 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { in fixup_kfunc_call()
21053 } else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) { in fixup_kfunc_call()
21054 struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) }; in fixup_kfunc_call()
21064 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */
21067 struct bpf_subprog_info *info = env->subprog_info; in add_hidden_subprog()
21068 int cnt = env->subprog_cnt; in add_hidden_subprog()
21072 if (env->hidden_subprog_cnt) { in add_hidden_subprog()
21074 return -EFAULT; in add_hidden_subprog()
21078 * in bpf_patch_insn_data are no-ops. in add_hidden_subprog()
21080 prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); in add_hidden_subprog()
21082 return -ENOMEM; in add_hidden_subprog()
21083 env->prog = prog; in add_hidden_subprog()
21085 info[cnt].start = prog->len - len + 1; in add_hidden_subprog()
21086 env->subprog_cnt++; in add_hidden_subprog()
21087 env->hidden_subprog_cnt++; in add_hidden_subprog()
21091 /* Do various post-verification rewrites in a single program pass.
21096 struct bpf_prog *prog = env->prog; in do_misc_fixups()
21097 enum bpf_attach_type eatype = prog->expected_attach_type; in do_misc_fixups()
21099 struct bpf_insn *insn = prog->insnsi; in do_misc_fixups()
21101 const int insn_cnt = prog->len; in do_misc_fixups()
21104 struct bpf_insn *insn_buf = env->insn_buf; in do_misc_fixups()
21108 struct bpf_subprog_info *subprogs = env->subprog_info; in do_misc_fixups()
21112 if (env->seen_exception && !env->exception_callback_subprog) { in do_misc_fixups()
21114 env->prog->insnsi[insn_cnt - 1], in do_misc_fixups()
21122 prog = env->prog; in do_misc_fixups()
21123 insn = prog->insnsi; in do_misc_fixups()
21125 env->exception_callback_subprog = env->subprog_cnt - 1; in do_misc_fixups()
21127 mark_subprog_exc_cb(env, env->exception_callback_subprog); in do_misc_fixups()
21131 if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) { in do_misc_fixups()
21132 if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) || in do_misc_fixups()
21133 (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) { in do_misc_fixups()
21134 /* convert to 32-bit mov that clears upper 32-bit */ in do_misc_fixups()
21135 insn->code = BPF_ALU | BPF_MOV | BPF_X; in do_misc_fixups()
21137 insn->off = 0; in do_misc_fixups()
21138 insn->imm = 0; in do_misc_fixups()
21143 if (env->insn_aux_data[i + delta].needs_zext) in do_misc_fixups()
21144 /* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */ in do_misc_fixups()
21145 insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code); in do_misc_fixups()
21147 /* Make sdiv/smod divide-by-minus-one exceptions impossible. */ in do_misc_fixups()
21148 if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) || in do_misc_fixups()
21149 insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) || in do_misc_fixups()
21150 insn->code == (BPF_ALU | BPF_MOD | BPF_K) || in do_misc_fixups()
21151 insn->code == (BPF_ALU | BPF_DIV | BPF_K)) && in do_misc_fixups()
21152 insn->off == 1 && insn->imm == -1) { in do_misc_fixups()
21153 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in do_misc_fixups()
21154 bool isdiv = BPF_OP(insn->code) == BPF_DIV; in do_misc_fixups()
21158 BPF_NEG | BPF_K, insn->dst_reg, in do_misc_fixups()
21162 BPF_MOV32_IMM(insn->dst_reg, 0), in do_misc_fixups()
21170 return -ENOMEM; in do_misc_fixups()
21172 delta += cnt - 1; in do_misc_fixups()
21173 env->prog = prog = new_prog; in do_misc_fixups()
21174 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21178 /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */ in do_misc_fixups()
21179 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || in do_misc_fixups()
21180 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || in do_misc_fixups()
21181 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || in do_misc_fixups()
21182 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { in do_misc_fixups()
21183 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in do_misc_fixups()
21184 bool isdiv = BPF_OP(insn->code) == BPF_DIV; in do_misc_fixups()
21185 bool is_sdiv = isdiv && insn->off == 1; in do_misc_fixups()
21186 bool is_smod = !isdiv && insn->off == 1; in do_misc_fixups()
21189 /* [R,W]x div 0 -> 0 */ in do_misc_fixups()
21191 BPF_JNE | BPF_K, insn->src_reg, in do_misc_fixups()
21193 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), in do_misc_fixups()
21198 /* [R,W]x mod 0 -> [R,W]x */ in do_misc_fixups()
21200 BPF_JEQ | BPF_K, insn->src_reg, in do_misc_fixups()
21204 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), in do_misc_fixups()
21207 /* [R,W]x sdiv 0 -> 0 in do_misc_fixups()
21208 * LLONG_MIN sdiv -1 -> LLONG_MIN in do_misc_fixups()
21209 * INT_MIN sdiv -1 -> INT_MIN in do_misc_fixups()
21211 BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), in do_misc_fixups()
21222 BPF_MOV | BPF_K, insn->dst_reg, in do_misc_fixups()
21224 /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ in do_misc_fixups()
21226 BPF_NEG | BPF_K, insn->dst_reg, in do_misc_fixups()
21232 /* [R,W]x mod 0 -> [R,W]x */ in do_misc_fixups()
21233 /* [R,W]x mod -1 -> 0 */ in do_misc_fixups()
21234 BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), in do_misc_fixups()
21244 BPF_MOV32_IMM(insn->dst_reg, 0), in do_misc_fixups()
21248 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), in do_misc_fixups()
21256 cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0); in do_misc_fixups()
21260 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); in do_misc_fixups()
21265 return -ENOMEM; in do_misc_fixups()
21267 delta += cnt - 1; in do_misc_fixups()
21268 env->prog = prog = new_prog; in do_misc_fixups()
21269 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21273 /* Make it impossible to de-reference a userspace address */ in do_misc_fixups()
21274 if (BPF_CLASS(insn->code) == BPF_LDX && in do_misc_fixups()
21275 (BPF_MODE(insn->code) == BPF_PROBE_MEM || in do_misc_fixups()
21276 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { in do_misc_fixups()
21283 *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); in do_misc_fixups()
21284 if (insn->off) in do_misc_fixups()
21285 *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off); in do_misc_fixups()
21290 *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0); in do_misc_fixups()
21292 cnt = patch - insn_buf; in do_misc_fixups()
21295 return -ENOMEM; in do_misc_fixups()
21297 delta += cnt - 1; in do_misc_fixups()
21298 env->prog = prog = new_prog; in do_misc_fixups()
21299 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21304 if (BPF_CLASS(insn->code) == BPF_LD && in do_misc_fixups()
21305 (BPF_MODE(insn->code) == BPF_ABS || in do_misc_fixups()
21306 BPF_MODE(insn->code) == BPF_IND)) { in do_misc_fixups()
21307 cnt = env->ops->gen_ld_abs(insn, insn_buf); in do_misc_fixups()
21310 return -EINVAL; in do_misc_fixups()
21315 return -ENOMEM; in do_misc_fixups()
21317 delta += cnt - 1; in do_misc_fixups()
21318 env->prog = prog = new_prog; in do_misc_fixups()
21319 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21324 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || in do_misc_fixups()
21325 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { in do_misc_fixups()
21332 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
21333 if (!aux->alu_state || in do_misc_fixups()
21334 aux->alu_state == BPF_ALU_NON_POINTER) in do_misc_fixups()
21337 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; in do_misc_fixups()
21338 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == in do_misc_fixups()
21340 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; in do_misc_fixups()
21342 off_reg = issrc ? insn->src_reg : insn->dst_reg; in do_misc_fixups()
21344 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); in do_misc_fixups()
21347 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in do_misc_fixups()
21348 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); in do_misc_fixups()
21356 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); in do_misc_fixups()
21357 insn->src_reg = BPF_REG_AX; in do_misc_fixups()
21359 insn->code = insn->code == code_add ? in do_misc_fixups()
21363 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in do_misc_fixups()
21364 cnt = patch - insn_buf; in do_misc_fixups()
21368 return -ENOMEM; in do_misc_fixups()
21370 delta += cnt - 1; in do_misc_fixups()
21371 env->prog = prog = new_prog; in do_misc_fixups()
21372 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21377 int stack_off = -stack_depth - 8; in do_misc_fixups()
21381 if (insn->off >= 0) in do_misc_fixups()
21382 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2); in do_misc_fixups()
21384 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1); in do_misc_fixups()
21391 return -ENOMEM; in do_misc_fixups()
21393 delta += cnt - 1; in do_misc_fixups()
21394 env->prog = prog = new_prog; in do_misc_fixups()
21395 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21399 if (insn->code != (BPF_JMP | BPF_CALL)) in do_misc_fixups()
21401 if (insn->src_reg == BPF_PSEUDO_CALL) in do_misc_fixups()
21403 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_misc_fixups()
21412 return -ENOMEM; in do_misc_fixups()
21414 delta += cnt - 1; in do_misc_fixups()
21415 env->prog = prog = new_prog; in do_misc_fixups()
21416 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21421 if (bpf_jit_inlines_helper_call(insn->imm)) in do_misc_fixups()
21424 if (insn->imm == BPF_FUNC_get_route_realm) in do_misc_fixups()
21425 prog->dst_needed = 1; in do_misc_fixups()
21426 if (insn->imm == BPF_FUNC_get_prandom_u32) in do_misc_fixups()
21428 if (insn->imm == BPF_FUNC_override_return) in do_misc_fixups()
21429 prog->kprobe_override = 1; in do_misc_fixups()
21430 if (insn->imm == BPF_FUNC_tail_call) { in do_misc_fixups()
21436 prog->cb_access = 1; in do_misc_fixups()
21438 prog->aux->stack_depth = MAX_BPF_STACK; in do_misc_fixups()
21439 prog->aux->max_pkt_offset = MAX_PACKET_OFF; in do_misc_fixups()
21446 insn->imm = 0; in do_misc_fixups()
21447 insn->code = BPF_JMP | BPF_TAIL_CALL; in do_misc_fixups()
21449 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
21450 if (env->bpf_capable && !prog->blinding_requested && in do_misc_fixups()
21451 prog->jit_requested && in do_misc_fixups()
21457 .tail_call.map = aux->map_ptr_state.map_ptr, in do_misc_fixups()
21468 insn->imm = ret + 1; in do_misc_fixups()
21478 * index &= array->index_mask; in do_misc_fixups()
21479 * to avoid out-of-bounds cpu speculation in do_misc_fixups()
21483 return -EINVAL; in do_misc_fixups()
21486 map_ptr = aux->map_ptr_state.map_ptr; in do_misc_fixups()
21488 map_ptr->max_entries, 2); in do_misc_fixups()
21492 map)->index_mask); in do_misc_fixups()
21497 return -ENOMEM; in do_misc_fixups()
21499 delta += cnt - 1; in do_misc_fixups()
21500 env->prog = prog = new_prog; in do_misc_fixups()
21501 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21505 if (insn->imm == BPF_FUNC_timer_set_callback) { in do_misc_fixups()
21514 * Those that were not bpf_timer_init-ed will return -EINVAL. in do_misc_fixups()
21516 * Those that were not both bpf_timer_init-ed and in do_misc_fixups()
21517 * bpf_timer_set_callback-ed will return -EINVAL. in do_misc_fixups()
21520 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), in do_misc_fixups()
21530 return -ENOMEM; in do_misc_fixups()
21532 delta += cnt - 1; in do_misc_fixups()
21533 env->prog = prog = new_prog; in do_misc_fixups()
21534 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21538 if (is_storage_get_function(insn->imm)) { in do_misc_fixups()
21540 env->insn_aux_data[i + delta].storage_get_func_atomic) in do_misc_fixups()
21549 return -ENOMEM; in do_misc_fixups()
21551 delta += cnt - 1; in do_misc_fixups()
21552 env->prog = prog = new_prog; in do_misc_fixups()
21553 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21558 if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) { in do_misc_fixups()
21568 return -ENOMEM; in do_misc_fixups()
21570 delta += cnt - 1; in do_misc_fixups()
21571 env->prog = prog = new_prog; in do_misc_fixups()
21572 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21580 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
21581 (insn->imm == BPF_FUNC_map_lookup_elem || in do_misc_fixups()
21582 insn->imm == BPF_FUNC_map_update_elem || in do_misc_fixups()
21583 insn->imm == BPF_FUNC_map_delete_elem || in do_misc_fixups()
21584 insn->imm == BPF_FUNC_map_push_elem || in do_misc_fixups()
21585 insn->imm == BPF_FUNC_map_pop_elem || in do_misc_fixups()
21586 insn->imm == BPF_FUNC_map_peek_elem || in do_misc_fixups()
21587 insn->imm == BPF_FUNC_redirect_map || in do_misc_fixups()
21588 insn->imm == BPF_FUNC_for_each_map_elem || in do_misc_fixups()
21589 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { in do_misc_fixups()
21590 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
21594 map_ptr = aux->map_ptr_state.map_ptr; in do_misc_fixups()
21595 ops = map_ptr->ops; in do_misc_fixups()
21596 if (insn->imm == BPF_FUNC_map_lookup_elem && in do_misc_fixups()
21597 ops->map_gen_lookup) { in do_misc_fixups()
21598 cnt = ops->map_gen_lookup(map_ptr, insn_buf); in do_misc_fixups()
21599 if (cnt == -EOPNOTSUPP) in do_misc_fixups()
21603 return -EINVAL; in do_misc_fixups()
21609 return -ENOMEM; in do_misc_fixups()
21611 delta += cnt - 1; in do_misc_fixups()
21612 env->prog = prog = new_prog; in do_misc_fixups()
21613 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21617 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, in do_misc_fixups()
21619 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, in do_misc_fixups()
21621 BUILD_BUG_ON(!__same_type(ops->map_update_elem, in do_misc_fixups()
21624 BUILD_BUG_ON(!__same_type(ops->map_push_elem, in do_misc_fixups()
21627 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, in do_misc_fixups()
21629 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, in do_misc_fixups()
21631 BUILD_BUG_ON(!__same_type(ops->map_redirect, in do_misc_fixups()
21633 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, in do_misc_fixups()
21638 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, in do_misc_fixups()
21642 switch (insn->imm) { in do_misc_fixups()
21644 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); in do_misc_fixups()
21647 insn->imm = BPF_CALL_IMM(ops->map_update_elem); in do_misc_fixups()
21650 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); in do_misc_fixups()
21653 insn->imm = BPF_CALL_IMM(ops->map_push_elem); in do_misc_fixups()
21656 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); in do_misc_fixups()
21659 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); in do_misc_fixups()
21662 insn->imm = BPF_CALL_IMM(ops->map_redirect); in do_misc_fixups()
21665 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); in do_misc_fixups()
21668 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); in do_misc_fixups()
21676 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
21677 insn->imm == BPF_FUNC_jiffies64) { in do_misc_fixups()
21692 return -ENOMEM; in do_misc_fixups()
21694 delta += cnt - 1; in do_misc_fixups()
21695 env->prog = prog = new_prog; in do_misc_fixups()
21696 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21702 if (insn->imm == BPF_FUNC_get_smp_processor_id && in do_misc_fixups()
21703 verifier_inlines_helper_call(env, insn->imm)) { in do_misc_fixups()
21720 return -ENOMEM; in do_misc_fixups()
21722 delta += cnt - 1; in do_misc_fixups()
21723 env->prog = prog = new_prog; in do_misc_fixups()
21724 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21730 insn->imm == BPF_FUNC_get_func_arg) { in do_misc_fixups()
21731 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
21732 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
21740 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); in do_misc_fixups()
21745 return -ENOMEM; in do_misc_fixups()
21747 delta += cnt - 1; in do_misc_fixups()
21748 env->prog = prog = new_prog; in do_misc_fixups()
21749 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21755 insn->imm == BPF_FUNC_get_func_ret) { in do_misc_fixups()
21758 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
21759 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
21767 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); in do_misc_fixups()
21773 return -ENOMEM; in do_misc_fixups()
21775 delta += cnt - 1; in do_misc_fixups()
21776 env->prog = prog = new_prog; in do_misc_fixups()
21777 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21783 insn->imm == BPF_FUNC_get_func_arg_cnt) { in do_misc_fixups()
21784 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
21785 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
21789 return -ENOMEM; in do_misc_fixups()
21791 env->prog = prog = new_prog; in do_misc_fixups()
21792 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21798 insn->imm == BPF_FUNC_get_func_ip) { in do_misc_fixups()
21799 /* Load IP address from ctx - 16 */ in do_misc_fixups()
21800 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); in do_misc_fixups()
21804 return -ENOMEM; in do_misc_fixups()
21806 env->prog = prog = new_prog; in do_misc_fixups()
21807 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21813 prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
21814 insn->imm == BPF_FUNC_get_branch_snapshot) { in do_misc_fixups()
21827 /* if (unlikely(flags)) return -EINVAL */ in do_misc_fixups()
21832 * divide-by-3 through multiplication, followed by further in do_misc_fixups()
21833 * division by 8 through 3-bit right shift. in do_misc_fixups()
21845 /* if (entry_cnt == 0) return -ENOENT */ in do_misc_fixups()
21850 /* return -EINVAL; */ in do_misc_fixups()
21851 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); in do_misc_fixups()
21853 /* return -ENOENT; */ in do_misc_fixups()
21854 insn_buf[10] = BPF_MOV64_IMM(BPF_REG_0, -ENOENT); in do_misc_fixups()
21859 return -ENOMEM; in do_misc_fixups()
21861 delta += cnt - 1; in do_misc_fixups()
21862 env->prog = prog = new_prog; in do_misc_fixups()
21863 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21868 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
21869 insn->imm == BPF_FUNC_kptr_xchg && in do_misc_fixups()
21877 return -ENOMEM; in do_misc_fixups()
21879 delta += cnt - 1; in do_misc_fixups()
21880 env->prog = prog = new_prog; in do_misc_fixups()
21881 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
21885 fn = env->ops->get_func_proto(insn->imm, env->prog); in do_misc_fixups()
21887 * programs to call them, must be real in-kernel functions in do_misc_fixups()
21889 if (!fn->func) { in do_misc_fixups()
21892 func_id_name(insn->imm), insn->imm); in do_misc_fixups()
21893 return -EFAULT; in do_misc_fixups()
21895 insn->imm = fn->func - __bpf_call_base; in do_misc_fixups()
21908 env->prog->aux->stack_depth = subprogs[0].stack_depth; in do_misc_fixups()
21909 for (i = 0; i < env->subprog_cnt; i++) { in do_misc_fixups()
21917 return -EFAULT; in do_misc_fixups()
21922 -subprogs[i].stack_depth, BPF_MAX_LOOPS); in do_misc_fixups()
21924 insn_buf[1] = env->prog->insnsi[subprog_start]; in do_misc_fixups()
21928 return -ENOMEM; in do_misc_fixups()
21929 env->prog = prog = new_prog; in do_misc_fixups()
21936 WARN_ON(adjust_jmp_off(env->prog, subprog_start, 1)); in do_misc_fixups()
21940 for (i = 0; i < prog->aux->size_poke_tab; i++) { in do_misc_fixups()
21941 map_ptr = prog->aux->poke_tab[i].tail_call.map; in do_misc_fixups()
21942 if (!map_ptr->ops->map_poke_track || in do_misc_fixups()
21943 !map_ptr->ops->map_poke_untrack || in do_misc_fixups()
21944 !map_ptr->ops->map_poke_run) { in do_misc_fixups()
21946 return -EINVAL; in do_misc_fixups()
21949 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); in do_misc_fixups()
21956 sort_kfunc_descs_by_imm_off(env->prog); in do_misc_fixups()
21974 struct bpf_insn *insn_buf = env->insn_buf; in inline_bpf_loop()
21989 insn_buf[cnt++] = BPF_MOV32_IMM(BPF_REG_0, -E2BIG); in inline_bpf_loop()
22012 insn_buf[cnt++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6); in inline_bpf_loop()
22028 callback_start = env->subprog_info[callback_subprogno].start; in inline_bpf_loop()
22031 callback_offset = callback_start - call_insn_offset - 1; in inline_bpf_loop()
22032 new_prog->insnsi[call_insn_offset].imm = callback_offset; in inline_bpf_loop()
22039 return insn->code == (BPF_JMP | BPF_CALL) && in is_bpf_loop_call()
22040 insn->src_reg == 0 && in is_bpf_loop_call()
22041 insn->imm == BPF_FUNC_loop; in is_bpf_loop_call()
22044 /* For all sub-programs in the program (including main) check
22055 struct bpf_subprog_info *subprogs = env->subprog_info; in optimize_bpf_loop()
22057 struct bpf_insn *insn = env->prog->insnsi; in optimize_bpf_loop()
22058 int insn_cnt = env->prog->len; in optimize_bpf_loop()
22060 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; in optimize_bpf_loop()
22065 &env->insn_aux_data[i + delta].loop_inline_state; in optimize_bpf_loop()
22067 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { in optimize_bpf_loop()
22073 -(stack_depth + stack_depth_extra), in optimize_bpf_loop()
22074 inline_state->callback_subprogno, in optimize_bpf_loop()
22077 return -ENOMEM; in optimize_bpf_loop()
22079 delta += cnt - 1; in optimize_bpf_loop()
22080 env->prog = new_prog; in optimize_bpf_loop()
22081 insn = new_prog->insnsi + i + delta; in optimize_bpf_loop()
22088 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; in optimize_bpf_loop()
22093 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in optimize_bpf_loop()
22103 struct bpf_subprog_info *subprog = env->subprog_info; in remove_fastcall_spills_fills()
22104 struct bpf_insn_aux_data *aux = env->insn_aux_data; in remove_fastcall_spills_fills()
22105 struct bpf_insn *insn = env->prog->insnsi; in remove_fastcall_spills_fills()
22106 int insn_cnt = env->prog->len; in remove_fastcall_spills_fills()
22116 *(insn - j) = NOP; in remove_fastcall_spills_fills()
22121 if ((subprog + 1)->start == i + 1) { in remove_fastcall_spills_fills()
22122 if (modified && !subprog->keep_fastcall_stack) in remove_fastcall_spills_fills()
22123 subprog->stack_depth = -subprog->fastcall_stack_off; in remove_fastcall_spills_fills()
22137 sl = env->free_list; in free_states()
22139 sln = sl->next; in free_states()
22140 free_verifier_state(&sl->state, false); in free_states()
22144 env->free_list = NULL; in free_states()
22146 if (!env->explored_states) in free_states()
22150 sl = env->explored_states[i]; in free_states()
22153 sln = sl->next; in free_states()
22154 free_verifier_state(&sl->state, false); in free_states()
22158 env->explored_states[i] = NULL; in free_states()
22164 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check_common()
22170 env->prev_linfo = NULL; in do_check_common()
22171 env->pass_cnt++; in do_check_common()
22175 return -ENOMEM; in do_check_common()
22176 state->curframe = 0; in do_check_common()
22177 state->speculative = false; in do_check_common()
22178 state->branches = 1; in do_check_common()
22179 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); in do_check_common()
22180 if (!state->frame[0]) { in do_check_common()
22182 return -ENOMEM; in do_check_common()
22184 env->cur_state = state; in do_check_common()
22185 init_func_state(env, state->frame[0], in do_check_common()
22189 state->first_insn_idx = env->subprog_info[subprog].start; in do_check_common()
22190 state->last_insn_idx = -1; in do_check_common()
22192 regs = state->frame[state->curframe]->regs; in do_check_common()
22193 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { in do_check_common()
22204 state->frame[0]->in_exception_callback_fn = true; in do_check_common()
22209 if (sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_ANYTHING) { in do_check_common()
22211 ret = -EINVAL; in do_check_common()
22215 for (i = BPF_REG_1; i <= sub->arg_cnt; i++) { in do_check_common()
22216 arg = &sub->args[i - BPF_REG_1]; in do_check_common()
22219 if (arg->arg_type == ARG_PTR_TO_CTX) { in do_check_common()
22220 reg->type = PTR_TO_CTX; in do_check_common()
22222 } else if (arg->arg_type == ARG_ANYTHING) { in do_check_common()
22223 reg->type = SCALAR_VALUE; in do_check_common()
22225 } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { in do_check_common()
22227 __mark_dynptr_reg(reg, BPF_DYNPTR_TYPE_LOCAL, true, ++env->id_gen); in do_check_common()
22228 } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { in do_check_common()
22229 reg->type = PTR_TO_MEM; in do_check_common()
22230 if (arg->arg_type & PTR_MAYBE_NULL) in do_check_common()
22231 reg->type |= PTR_MAYBE_NULL; in do_check_common()
22233 reg->mem_size = arg->mem_size; in do_check_common()
22234 reg->id = ++env->id_gen; in do_check_common()
22235 } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { in do_check_common()
22236 reg->type = PTR_TO_BTF_ID; in do_check_common()
22237 if (arg->arg_type & PTR_MAYBE_NULL) in do_check_common()
22238 reg->type |= PTR_MAYBE_NULL; in do_check_common()
22239 if (arg->arg_type & PTR_UNTRUSTED) in do_check_common()
22240 reg->type |= PTR_UNTRUSTED; in do_check_common()
22241 if (arg->arg_type & PTR_TRUSTED) in do_check_common()
22242 reg->type |= PTR_TRUSTED; in do_check_common()
22244 reg->btf = bpf_get_btf_vmlinux(); /* can't fail at this point */ in do_check_common()
22245 reg->btf_id = arg->btf_id; in do_check_common()
22246 reg->id = ++env->id_gen; in do_check_common()
22247 } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) { in do_check_common()
22252 i - BPF_REG_1, arg->arg_type); in do_check_common()
22253 ret = -EFAULT; in do_check_common()
22262 if (env->prog->aux->func_info_aux) { in do_check_common()
22264 if (ret || sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_PTR_TO_CTX) in do_check_common()
22265 env->prog->aux->func_info_aux[0].unreliable = true; in do_check_common()
22278 if (env->cur_state) { in do_check_common()
22279 free_verifier_state(env->cur_state, true); in do_check_common()
22280 env->cur_state = NULL; in do_check_common()
22284 bpf_vlog_reset(&env->log, 0); in do_check_common()
22311 struct bpf_prog_aux *aux = env->prog->aux; in do_check_subprogs()
22315 if (!aux->func_info) in do_check_subprogs()
22319 if (env->exception_callback_subprog) in do_check_subprogs()
22320 subprog_aux(env, env->exception_callback_subprog)->called = true; in do_check_subprogs()
22324 for (i = 1; i < env->subprog_cnt; i++) { in do_check_subprogs()
22329 if (!sub_aux->called || sub_aux->verified) in do_check_subprogs()
22332 env->insn_idx = env->subprog_info[i].start; in do_check_subprogs()
22333 WARN_ON_ONCE(env->insn_idx == 0); in do_check_subprogs()
22337 } else if (env->log.level & BPF_LOG_LEVEL) { in do_check_subprogs()
22346 sub_aux->verified = true; in do_check_subprogs()
22363 env->insn_idx = 0; in do_check_main()
22366 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in do_check_main()
22375 if (env->log.level & BPF_LOG_STATS) { in print_verification_stats()
22377 div_u64(env->verification_time, 1000)); in print_verification_stats()
22379 for (i = 0; i < env->subprog_cnt; i++) { in print_verification_stats()
22380 u32 depth = env->subprog_info[i].stack_depth; in print_verification_stats()
22383 if (i + 1 < env->subprog_cnt) in print_verification_stats()
22390 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, in print_verification_stats()
22391 env->max_states_per_insn, env->total_states, in print_verification_stats()
22392 env->peak_states, env->longest_mark_read_walk); in print_verification_stats()
22401 struct bpf_prog *prog = env->prog; in check_struct_ops_btf_id()
22407 if (!prog->gpl_compatible) { in check_struct_ops_btf_id()
22409 return -EINVAL; in check_struct_ops_btf_id()
22412 if (!prog->aux->attach_btf_id) in check_struct_ops_btf_id()
22413 return -ENOTSUPP; in check_struct_ops_btf_id()
22415 btf = prog->aux->attach_btf; in check_struct_ops_btf_id()
22418 env->attach_btf_mod = btf_try_get_module(btf); in check_struct_ops_btf_id()
22419 if (!env->attach_btf_mod) { in check_struct_ops_btf_id()
22422 return -ENOTSUPP; in check_struct_ops_btf_id()
22426 btf_id = prog->aux->attach_btf_id; in check_struct_ops_btf_id()
22431 return -ENOTSUPP; in check_struct_ops_btf_id()
22433 st_ops = st_ops_desc->st_ops; in check_struct_ops_btf_id()
22435 t = st_ops_desc->type; in check_struct_ops_btf_id()
22436 member_idx = prog->expected_attach_type; in check_struct_ops_btf_id()
22439 member_idx, st_ops->name); in check_struct_ops_btf_id()
22440 return -EINVAL; in check_struct_ops_btf_id()
22444 mname = btf_name_by_offset(btf, member->name_off); in check_struct_ops_btf_id()
22445 func_proto = btf_type_resolve_func_ptr(btf, member->type, in check_struct_ops_btf_id()
22449 mname, member_idx, st_ops->name); in check_struct_ops_btf_id()
22450 return -EINVAL; in check_struct_ops_btf_id()
22456 mname, st_ops->name); in check_struct_ops_btf_id()
22460 if (st_ops->check_member) { in check_struct_ops_btf_id()
22461 err = st_ops->check_member(t, member, prog); in check_struct_ops_btf_id()
22465 mname, st_ops->name); in check_struct_ops_btf_id()
22470 if (prog->aux->priv_stack_requested && !bpf_jit_supports_private_stack()) { in check_struct_ops_btf_id()
22472 return -EACCES; in check_struct_ops_btf_id()
22476 prog->aux->ctx_arg_info = in check_struct_ops_btf_id()
22477 st_ops_desc->arg_info[member_idx].info; in check_struct_ops_btf_id()
22478 prog->aux->ctx_arg_info_size = in check_struct_ops_btf_id()
22479 st_ops_desc->arg_info[member_idx].cnt; in check_struct_ops_btf_id()
22481 prog->aux->attach_func_proto = func_proto; in check_struct_ops_btf_id()
22482 prog->aux->attach_func_name = mname; in check_struct_ops_btf_id()
22483 env->ops = st_ops->verifier_ops; in check_struct_ops_btf_id()
22492 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) in check_attach_modify_return()
22495 return -EINVAL; in check_attach_modify_return()
22498 /* list of non-sleepable functions that are otherwise on
22502 /* Three functions below can be called from sleepable and non-sleepable context.
22503 * Assume non-sleepable from bpf safety point of view.
22525 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; in bpf_check_attach_target()
22526 bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING; in bpf_check_attach_target()
22530 int ret = 0, subprog = -1, i; in bpf_check_attach_target()
22540 return -EINVAL; in bpf_check_attach_target()
22542 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; in bpf_check_attach_target()
22546 return -EINVAL; in bpf_check_attach_target()
22551 return -EINVAL; in bpf_check_attach_target()
22553 tname = btf_name_by_offset(btf, t->name_off); in bpf_check_attach_target()
22556 return -EINVAL; in bpf_check_attach_target()
22559 struct bpf_prog_aux *aux = tgt_prog->aux; in bpf_check_attach_target()
22562 if (bpf_prog_is_dev_bound(prog->aux) && in bpf_check_attach_target()
22565 return -EINVAL; in bpf_check_attach_target()
22568 for (i = 0; i < aux->func_info_cnt; i++) in bpf_check_attach_target()
22569 if (aux->func_info[i].type_id == btf_id) { in bpf_check_attach_target()
22573 if (subprog == -1) { in bpf_check_attach_target()
22575 return -EINVAL; in bpf_check_attach_target()
22577 if (aux->func && aux->func[subprog]->aux->exception_cb) { in bpf_check_attach_target()
22581 return -EINVAL; in bpf_check_attach_target()
22583 conservative = aux->func_info_aux[subprog].unreliable; in bpf_check_attach_target()
22588 return -EINVAL; in bpf_check_attach_target()
22590 if (!prog->jit_requested) { in bpf_check_attach_target()
22593 return -EINVAL; in bpf_check_attach_target()
22595 tgt_changes_pkt_data = aux->func in bpf_check_attach_target()
22596 ? aux->func[subprog]->aux->changes_pkt_data in bpf_check_attach_target()
22597 : aux->changes_pkt_data; in bpf_check_attach_target()
22598 if (prog->aux->changes_pkt_data && !tgt_changes_pkt_data) { in bpf_check_attach_target()
22601 return -EINVAL; in bpf_check_attach_target()
22604 if (!tgt_prog->jited) { in bpf_check_attach_target()
22606 return -EINVAL; in bpf_check_attach_target()
22609 if (aux->attach_tracing_prog) { in bpf_check_attach_target()
22616 return -EINVAL; in bpf_check_attach_target()
22618 } else if (tgt_prog->type == prog->type) { in bpf_check_attach_target()
22625 return -EINVAL; in bpf_check_attach_target()
22627 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && in bpf_check_attach_target()
22629 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || in bpf_check_attach_target()
22630 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { in bpf_check_attach_target()
22641 * long call chain fentry->extension->fentry->extension in bpf_check_attach_target()
22646 return -EINVAL; in bpf_check_attach_target()
22651 return -EINVAL; in bpf_check_attach_target()
22655 switch (prog->expected_attach_type) { in bpf_check_attach_target()
22660 return -EINVAL; in bpf_check_attach_target()
22665 return -EINVAL; in bpf_check_attach_target()
22667 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { in bpf_check_attach_target()
22670 return -EINVAL; in bpf_check_attach_target()
22672 tname += sizeof(prefix) - 1; in bpf_check_attach_target()
22679 return -EINVAL; in bpf_check_attach_target()
22680 fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL, in bpf_check_attach_target()
22690 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
22693 return -EINVAL; in bpf_check_attach_target()
22698 return -EINVAL; in bpf_check_attach_target()
22701 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
22704 return -EINVAL; in bpf_check_attach_target()
22711 return -EINVAL; in bpf_check_attach_target()
22713 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
22715 return -EINVAL; in bpf_check_attach_target()
22716 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
22722 return -EINVAL; in bpf_check_attach_target()
22732 return -EINVAL; in bpf_check_attach_target()
22736 return -EINVAL; in bpf_check_attach_target()
22737 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
22739 return -EINVAL; in bpf_check_attach_target()
22741 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && in bpf_check_attach_target()
22742 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || in bpf_check_attach_target()
22743 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) in bpf_check_attach_target()
22744 return -EINVAL; in bpf_check_attach_target()
22749 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
22755 addr = (long) tgt_prog->bpf_func; in bpf_check_attach_target()
22757 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; in bpf_check_attach_target()
22773 return -ENOENT; in bpf_check_attach_target()
22777 if (prog->sleepable) { in bpf_check_attach_target()
22778 ret = -EINVAL; in bpf_check_attach_target()
22779 switch (prog->type) { in bpf_check_attach_target()
22814 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { in bpf_check_attach_target()
22818 return -EINVAL; in bpf_check_attach_target()
22820 ret = -EINVAL; in bpf_check_attach_target()
22833 tgt_info->tgt_addr = addr; in bpf_check_attach_target()
22834 tgt_info->tgt_name = tname; in bpf_check_attach_target()
22835 tgt_info->tgt_type = t; in bpf_check_attach_target()
22836 tgt_info->tgt_mod = mod; in bpf_check_attach_target()
22861 if (prog->type == BPF_PROG_TYPE_TRACING) { in BTF_SET_START()
22862 switch (prog->expected_attach_type) { in BTF_SET_START()
22872 return prog->type == BPF_PROG_TYPE_LSM || in BTF_SET_START()
22873 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || in BTF_SET_START()
22874 prog->type == BPF_PROG_TYPE_STRUCT_OPS; in BTF_SET_START()
22879 struct bpf_prog *prog = env->prog; in check_attach_btf_id()
22880 struct bpf_prog *tgt_prog = prog->aux->dst_prog; in check_attach_btf_id()
22882 u32 btf_id = prog->aux->attach_btf_id; in check_attach_btf_id()
22887 if (prog->type == BPF_PROG_TYPE_SYSCALL) { in check_attach_btf_id()
22888 if (prog->sleepable) in check_attach_btf_id()
22892 return -EINVAL; in check_attach_btf_id()
22895 if (prog->sleepable && !can_be_sleepable(prog)) { in check_attach_btf_id()
22897 return -EINVAL; in check_attach_btf_id()
22900 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) in check_attach_btf_id()
22903 if (prog->type != BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
22904 prog->type != BPF_PROG_TYPE_LSM && in check_attach_btf_id()
22905 prog->type != BPF_PROG_TYPE_EXT) in check_attach_btf_id()
22908 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); in check_attach_btf_id()
22912 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { in check_attach_btf_id()
22914 * inherit env->ops and expected_attach_type for the rest of the in check_attach_btf_id()
22917 env->ops = bpf_verifier_ops[tgt_prog->type]; in check_attach_btf_id()
22918 prog->expected_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
22922 prog->aux->attach_func_proto = tgt_info.tgt_type; in check_attach_btf_id()
22923 prog->aux->attach_func_name = tgt_info.tgt_name; in check_attach_btf_id()
22924 prog->aux->mod = tgt_info.tgt_mod; in check_attach_btf_id()
22927 prog->aux->saved_dst_prog_type = tgt_prog->type; in check_attach_btf_id()
22928 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
22931 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { in check_attach_btf_id()
22932 prog->aux->attach_btf_trace = true; in check_attach_btf_id()
22934 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { in check_attach_btf_id()
22936 return -EINVAL; in check_attach_btf_id()
22940 if (prog->type == BPF_PROG_TYPE_LSM) { in check_attach_btf_id()
22941 ret = bpf_lsm_verify_prog(&env->log, prog); in check_attach_btf_id()
22944 } else if (prog->type == BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
22946 return -EINVAL; in check_attach_btf_id()
22949 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); in check_attach_btf_id()
22952 return -ENOMEM; in check_attach_btf_id()
22954 if (tgt_prog && tgt_prog->aux->tail_call_reachable) in check_attach_btf_id()
22955 tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; in check_attach_btf_id()
22957 prog->aux->dst_trampoline = tr; in check_attach_btf_id()
22973 * The add_fd_from_fd_array() is executed only if fd_array_cnt is non-zero. In
23011 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); in process_fd_array()
23018 if (!attr->fd_array_cnt) in process_fd_array()
23022 if (attr->fd_array_cnt >= (U32_MAX / size)) { in process_fd_array()
23023 verbose(env, "fd_array_cnt is too big (%u)\n", attr->fd_array_cnt); in process_fd_array()
23024 return -EINVAL; in process_fd_array()
23027 for (i = 0; i < attr->fd_array_cnt; i++) { in process_fd_array()
23028 if (copy_from_bpfptr_offset(&fd, env->fd_array, i * size, size)) in process_fd_array()
23029 return -EFAULT; in process_fd_array()
23043 int i, len, ret = -EINVAL, err; in bpf_check()
23049 return -EINVAL; in bpf_check()
23056 return -ENOMEM; in bpf_check()
23058 env->bt.env = env; in bpf_check()
23060 len = (*prog)->len; in bpf_check()
23061 env->insn_aux_data = in bpf_check()
23063 ret = -ENOMEM; in bpf_check()
23064 if (!env->insn_aux_data) in bpf_check()
23067 env->insn_aux_data[i].orig_idx = i; in bpf_check()
23068 env->prog = *prog; in bpf_check()
23069 env->ops = bpf_verifier_ops[env->prog->type]; in bpf_check()
23071 env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token); in bpf_check()
23072 env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token); in bpf_check()
23073 env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token); in bpf_check()
23074 env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token); in bpf_check()
23075 env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF); in bpf_check()
23086 ret = bpf_vlog_init(&env->log, attr->log_level, in bpf_check()
23087 (char __user *) (unsigned long) attr->log_buf, in bpf_check()
23088 attr->log_size); in bpf_check()
23100 verbose(env, "in-kernel BTF is malformed\n"); in bpf_check()
23105 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); in bpf_check()
23107 env->strict_alignment = true; in bpf_check()
23108 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) in bpf_check()
23109 env->strict_alignment = false; in bpf_check()
23112 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; in bpf_check()
23113 env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS; in bpf_check()
23115 env->explored_states = kvcalloc(state_htab_size(env), in bpf_check()
23118 ret = -ENOMEM; in bpf_check()
23119 if (!env->explored_states) in bpf_check()
23142 if (bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
23143 ret = bpf_prog_offload_verifier_prep(env->prog); in bpf_check()
23163 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) in bpf_check()
23167 kvfree(env->explored_states); in bpf_check()
23201 /* do 32-bit optimization after insn patching has done so those patched in bpf_check()
23204 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
23206 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret in bpf_check()
23213 env->verification_time = ktime_get_ns() - start_time; in bpf_check()
23215 env->prog->aux->verified_insns = env->insn_processed; in bpf_check()
23218 err = bpf_vlog_finalize(&env->log, &log_true_size); in bpf_check()
23225 ret = -EFAULT; in bpf_check()
23232 if (env->used_map_cnt) { in bpf_check()
23234 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, in bpf_check()
23235 sizeof(env->used_maps[0]), in bpf_check()
23238 if (!env->prog->aux->used_maps) { in bpf_check()
23239 ret = -ENOMEM; in bpf_check()
23243 memcpy(env->prog->aux->used_maps, env->used_maps, in bpf_check()
23244 sizeof(env->used_maps[0]) * env->used_map_cnt); in bpf_check()
23245 env->prog->aux->used_map_cnt = env->used_map_cnt; in bpf_check()
23247 if (env->used_btf_cnt) { in bpf_check()
23249 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, in bpf_check()
23250 sizeof(env->used_btfs[0]), in bpf_check()
23252 if (!env->prog->aux->used_btfs) { in bpf_check()
23253 ret = -ENOMEM; in bpf_check()
23257 memcpy(env->prog->aux->used_btfs, env->used_btfs, in bpf_check()
23258 sizeof(env->used_btfs[0]) * env->used_btf_cnt); in bpf_check()
23259 env->prog->aux->used_btf_cnt = env->used_btf_cnt; in bpf_check()
23261 if (env->used_map_cnt || env->used_btf_cnt) { in bpf_check()
23271 if (!env->prog->aux->used_maps) in bpf_check()
23276 if (!env->prog->aux->used_btfs) in bpf_check()
23282 if (env->prog->type == BPF_PROG_TYPE_EXT) in bpf_check()
23283 env->prog->expected_attach_type = 0; in bpf_check()
23285 *prog = env->prog; in bpf_check()
23287 module_put(env->attach_btf_mod); in bpf_check()
23291 vfree(env->insn_aux_data); in bpf_check()
23292 kvfree(env->insn_hist); in bpf_check()