Lines Matching +full:endianness +full:- +full:agnostic

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 #include <linux/bpf-cgroup.h>
23 #include <linux/error-injection.h>
60 * The first pass is depth-first-search to check that the program is a DAG.
62 * - larger than BPF_MAXINSNS insns
63 * - if loop is present (detected via back-edge)
64 * - unreachable insns exist (shouldn't be a forest. program = one function)
65 * - out of bounds or malformed jumps
77 * All registers are 64-bit.
78 * R0 - return register
79 * R1-R5 argument passing registers
80 * R6-R9 callee saved registers
81 * R10 - frame pointer read-only
88 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
93 * (and -20 constant is saved for further stack bounds checking).
133 * [key, key + map->key_size) bytes are valid and were initialized on
139 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
143 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
144 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
146 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
151 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
159 * After the call R0 is set to return type of the function and registers R1-R5
165 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
171 * passes through a NULL-check conditional. For the branch wherein the state is
218 return aux->map_ptr_state.poison; in bpf_map_ptr_poisoned()
223 return aux->map_ptr_state.unpriv; in bpf_map_ptr_unpriv()
231 aux->map_ptr_state.unpriv = unpriv; in bpf_map_ptr_store()
232 aux->map_ptr_state.poison = poison; in bpf_map_ptr_store()
233 aux->map_ptr_state.map_ptr = map; in bpf_map_ptr_store()
238 return aux->map_key_state & BPF_MAP_KEY_POISON; in bpf_map_key_poisoned()
243 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); in bpf_map_key_unseen()
248 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); in bpf_map_key_immediate()
255 aux->map_key_state = state | BPF_MAP_KEY_SEEN | in bpf_map_key_store()
261 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_helper_call()
262 insn->src_reg == 0; in bpf_helper_call()
267 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_call()
268 insn->src_reg == BPF_PSEUDO_CALL; in bpf_pseudo_call()
273 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_kfunc_call()
274 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; in bpf_pseudo_kfunc_call()
318 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
319 * generally to pass info about user-defined local kptr types to later
359 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); in btf_type_name()
370 if (!bpf_verifier_log_needed(&env->log)) in verbose()
374 bpf_verifier_vlog(&env->log, fmt, args); in verbose()
386 if (reg->smin_value > S64_MIN) { in verbose_invalid_scalar()
387 verbose(env, " smin=%lld", reg->smin_value); in verbose_invalid_scalar()
390 if (reg->smax_value < S64_MAX) { in verbose_invalid_scalar()
391 verbose(env, " smax=%lld", reg->smax_value); in verbose_invalid_scalar()
403 type = reg->type; in reg_not_null()
414 (type == PTR_TO_MEM && !(reg->type & PTR_UNTRUSTED)) || in reg_not_null()
423 if (reg->type == PTR_TO_MAP_VALUE) { in reg_btf_record()
424 rec = reg->map_ptr->record; in reg_btf_record()
425 } else if (type_is_ptr_alloc_obj(reg->type)) { in reg_btf_record()
426 meta = btf_find_struct_meta(reg->btf, reg->btf_id); in reg_btf_record()
428 rec = meta->record; in reg_btf_record()
435 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; in subprog_is_global()
444 if (!env->prog->aux->func_info) in subprog_name()
447 info = &env->prog->aux->func_info[subprog]; in subprog_name()
448 return btf_type_name(env->prog->aux->btf, info->type_id); in subprog_name()
455 info->is_cb = true; in mark_subprog_exc_cb()
456 info->is_async_cb = true; in mark_subprog_exc_cb()
457 info->is_exception_cb = true; in mark_subprog_exc_cb()
462 return subprog_info(env, subprog)->is_exception_cb; in subprog_is_exc_cb()
478 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; in is_acquire_function()
540 return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) || in is_sync_callback_calling_insn()
541 (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm)); in is_sync_callback_calling_insn()
546 return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) || in is_async_callback_calling_insn()
547 (bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm)); in is_async_callback_calling_insn()
552 return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO; in is_may_goto_insn()
557 return is_may_goto_insn(&env->prog->insnsi[insn_idx]); in is_may_goto_insn_at()
585 return BPF_CLASS(insn->code) == BPF_STX && in is_cmpxchg_insn()
586 BPF_MODE(insn->code) == BPF_ATOMIC && in is_cmpxchg_insn()
587 insn->imm == BPF_CMPXCHG; in is_cmpxchg_insn()
592 return BPF_CLASS(insn->code) == BPF_STX && in is_atomic_load_insn()
593 BPF_MODE(insn->code) == BPF_ATOMIC && in is_atomic_load_insn()
594 insn->imm == BPF_LOAD_ACQ; in is_atomic_load_insn()
599 return (-off - 1) / BPF_REG_SIZE; in __get_spi()
605 struct bpf_verifier_state *cur = env->cur_state; in func()
607 return cur->frame[reg->frameno]; in func()
612 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
614 /* We need to check that slots between [spi - nr_slots + 1, spi] are in is_spi_bounds_valid()
619 * spi and the second slot will be at spi - 1. in is_spi_bounds_valid()
621 return spi - nr_slots + 1 >= 0 && spi < allocated_slots; in is_spi_bounds_valid()
629 if (!tnum_is_const(reg->var_off)) { in stack_slot_obj_get_spi()
631 return -EINVAL; in stack_slot_obj_get_spi()
634 off = reg->off + reg->var_off.value; in stack_slot_obj_get_spi()
637 return -EINVAL; in stack_slot_obj_get_spi()
643 return -EINVAL; in stack_slot_obj_get_spi()
647 return -ERANGE; in stack_slot_obj_get_spi()
719 int id = ++env->id_gen; in mark_dynptr_stack_regs()
729 __mark_dynptr_reg(reg, type, true, ++env->id_gen); in mark_dynptr_cb_reg()
746 /* We cannot assume both spi and spi - 1 belong to the same dynptr, in mark_stack_slots_dynptr()
758 err = destroy_if_dynptr_stack_slot(env, state, spi - 1); in mark_stack_slots_dynptr()
763 state->stack[spi].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
764 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; in mark_stack_slots_dynptr()
769 return -EINVAL; in mark_stack_slots_dynptr()
771 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, in mark_stack_slots_dynptr()
772 &state->stack[spi - 1].spilled_ptr, type); in mark_stack_slots_dynptr()
786 state->stack[spi].spilled_ptr.ref_obj_id = id; in mark_stack_slots_dynptr()
787 state->stack[spi - 1].spilled_ptr.ref_obj_id = id; in mark_stack_slots_dynptr()
790 bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi)); in mark_stack_slots_dynptr()
800 state->stack[spi].slot_type[i] = STACK_INVALID; in invalidate_dynptr()
801 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in invalidate_dynptr()
804 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in invalidate_dynptr()
805 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in invalidate_dynptr()
807 bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi)); in invalidate_dynptr()
819 if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in unmark_stack_slots_dynptr()
824 ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id; in unmark_stack_slots_dynptr()
837 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
838 if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id) in unmark_stack_slots_dynptr()
845 if (state->stack[i].slot_type[0] != STACK_DYNPTR) { in unmark_stack_slots_dynptr()
847 return -EFAULT; in unmark_stack_slots_dynptr()
849 if (state->stack[i].spilled_ptr.dynptr.first_slot) in unmark_stack_slots_dynptr()
861 if (!env->allow_ptr_leaks) in mark_reg_invalid()
879 if (state->stack[spi].slot_type[0] != STACK_DYNPTR) in destroy_if_dynptr_stack_slot()
883 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) in destroy_if_dynptr_stack_slot()
886 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { in destroy_if_dynptr_stack_slot()
888 return -EINVAL; in destroy_if_dynptr_stack_slot()
892 mark_stack_slot_scratched(env, spi - 1); in destroy_if_dynptr_stack_slot()
896 state->stack[spi].slot_type[i] = STACK_INVALID; in destroy_if_dynptr_stack_slot()
897 state->stack[spi - 1].slot_type[i] = STACK_INVALID; in destroy_if_dynptr_stack_slot()
900 dynptr_id = state->stack[spi].spilled_ptr.id; in destroy_if_dynptr_stack_slot()
902 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ in destroy_if_dynptr_stack_slot()
904 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) in destroy_if_dynptr_stack_slot()
906 if (dreg->dynptr_id == dynptr_id) in destroy_if_dynptr_stack_slot()
913 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); in destroy_if_dynptr_stack_slot()
914 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); in destroy_if_dynptr_stack_slot()
916 bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi)); in destroy_if_dynptr_stack_slot()
925 if (reg->type == CONST_PTR_TO_DYNPTR) in is_dynptr_reg_valid_uninit()
930 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an in is_dynptr_reg_valid_uninit()
934 if (spi < 0 && spi != -ERANGE) in is_dynptr_reg_valid_uninit()
960 if (reg->type == CONST_PTR_TO_DYNPTR) in is_dynptr_reg_valid_init()
966 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) in is_dynptr_reg_valid_init()
970 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || in is_dynptr_reg_valid_init()
971 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) in is_dynptr_reg_valid_init()
990 if (reg->type == CONST_PTR_TO_DYNPTR) { in is_dynptr_type_expected()
991 return reg->dynptr.type == dynptr_type; in is_dynptr_type_expected()
996 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; in is_dynptr_type_expected()
1023 struct bpf_stack_state *slot = &state->stack[spi - i]; in mark_stack_slots_iter()
1024 struct bpf_reg_state *st = &slot->spilled_ptr; in mark_stack_slots_iter()
1027 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ in mark_stack_slots_iter()
1030 st->type |= MEM_RCU; in mark_stack_slots_iter()
1032 st->type |= PTR_UNTRUSTED; in mark_stack_slots_iter()
1034 st->ref_obj_id = i == 0 ? id : 0; in mark_stack_slots_iter()
1035 st->iter.btf = btf; in mark_stack_slots_iter()
1036 st->iter.btf_id = btf_id; in mark_stack_slots_iter()
1037 st->iter.state = BPF_ITER_STATE_ACTIVE; in mark_stack_slots_iter()
1038 st->iter.depth = 0; in mark_stack_slots_iter()
1041 slot->slot_type[j] = STACK_ITER; in mark_stack_slots_iter()
1043 bpf_mark_stack_write(env, state->frameno, BIT(spi - i)); in mark_stack_slots_iter()
1044 mark_stack_slot_scratched(env, spi - i); in mark_stack_slots_iter()
1061 struct bpf_stack_state *slot = &state->stack[spi - i]; in unmark_stack_slots_iter()
1062 struct bpf_reg_state *st = &slot->spilled_ptr; in unmark_stack_slots_iter()
1065 WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); in unmark_stack_slots_iter()
1070 slot->slot_type[j] = STACK_INVALID; in unmark_stack_slots_iter()
1072 bpf_mark_stack_write(env, state->frameno, BIT(spi - i)); in unmark_stack_slots_iter()
1073 mark_stack_slot_scratched(env, spi - i); in unmark_stack_slots_iter()
1085 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we in is_iter_reg_valid_uninit()
1090 if (spi == -ERANGE) in is_iter_reg_valid_uninit()
1096 struct bpf_stack_state *slot = &state->stack[spi - i]; in is_iter_reg_valid_uninit()
1099 if (slot->slot_type[j] == STACK_ITER) in is_iter_reg_valid_uninit()
1114 return -EINVAL; in is_iter_reg_valid_init()
1117 struct bpf_stack_state *slot = &state->stack[spi - i]; in is_iter_reg_valid_init()
1118 struct bpf_reg_state *st = &slot->spilled_ptr; in is_iter_reg_valid_init()
1120 if (st->type & PTR_UNTRUSTED) in is_iter_reg_valid_init()
1121 return -EPROTO; in is_iter_reg_valid_init()
1123 if (i == 0 && !st->ref_obj_id) in is_iter_reg_valid_init()
1124 return -EINVAL; in is_iter_reg_valid_init()
1125 if (i != 0 && st->ref_obj_id) in is_iter_reg_valid_init()
1126 return -EINVAL; in is_iter_reg_valid_init()
1127 if (st->iter.btf != btf || st->iter.btf_id != btf_id) in is_iter_reg_valid_init()
1128 return -EINVAL; in is_iter_reg_valid_init()
1131 if (slot->slot_type[j] != STACK_ITER) in is_iter_reg_valid_init()
1132 return -EINVAL; in is_iter_reg_valid_init()
1159 slot = &state->stack[spi]; in mark_stack_slot_irq_flag()
1160 st = &slot->spilled_ptr; in mark_stack_slot_irq_flag()
1162 bpf_mark_stack_write(env, reg->frameno, BIT(spi)); in mark_stack_slot_irq_flag()
1164 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ in mark_stack_slot_irq_flag()
1165 st->ref_obj_id = id; in mark_stack_slot_irq_flag()
1166 st->irq.kfunc_class = kfunc_class; in mark_stack_slot_irq_flag()
1169 slot->slot_type[i] = STACK_IRQ_FLAG; in mark_stack_slot_irq_flag()
1187 slot = &state->stack[spi]; in unmark_stack_slot_irq_flag()
1188 st = &slot->spilled_ptr; in unmark_stack_slot_irq_flag()
1190 if (st->irq.kfunc_class != kfunc_class) { in unmark_stack_slot_irq_flag()
1191 const char *flag_kfunc = st->irq.kfunc_class == IRQ_NATIVE_KFUNC ? "native" : "lock"; in unmark_stack_slot_irq_flag()
1196 return -EINVAL; in unmark_stack_slot_irq_flag()
1199 err = release_irq_state(env->cur_state, st->ref_obj_id); in unmark_stack_slot_irq_flag()
1200 WARN_ON_ONCE(err && err != -EACCES); in unmark_stack_slot_irq_flag()
1204 for (int i = 0; i < env->cur_state->acquired_refs; i++) { in unmark_stack_slot_irq_flag()
1205 if (env->cur_state->refs[i].id == env->cur_state->active_irq_id) { in unmark_stack_slot_irq_flag()
1206 insn_idx = env->cur_state->refs[i].insn_idx; in unmark_stack_slot_irq_flag()
1212 env->cur_state->active_irq_id, insn_idx); in unmark_stack_slot_irq_flag()
1218 bpf_mark_stack_write(env, reg->frameno, BIT(spi)); in unmark_stack_slot_irq_flag()
1221 slot->slot_type[i] = STACK_INVALID; in unmark_stack_slot_irq_flag()
1233 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we in is_irq_flag_reg_valid_uninit()
1238 if (spi == -ERANGE) in is_irq_flag_reg_valid_uninit()
1243 slot = &state->stack[spi]; in is_irq_flag_reg_valid_uninit()
1246 if (slot->slot_type[i] == STACK_IRQ_FLAG) in is_irq_flag_reg_valid_uninit()
1260 return -EINVAL; in is_irq_flag_reg_valid_init()
1262 slot = &state->stack[spi]; in is_irq_flag_reg_valid_init()
1263 st = &slot->spilled_ptr; in is_irq_flag_reg_valid_init()
1265 if (!st->ref_obj_id) in is_irq_flag_reg_valid_init()
1266 return -EINVAL; in is_irq_flag_reg_valid_init()
1269 if (slot->slot_type[i] != STACK_IRQ_FLAG) in is_irq_flag_reg_valid_init()
1270 return -EINVAL; in is_irq_flag_reg_valid_init()
1275 * - spilled register state (STACK_SPILL);
1276 * - dynptr state (STACK_DYNPTR);
1277 * - iter state (STACK_ITER).
1278 * - irq flag state (STACK_IRQ_FLAG)
1282 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; in is_stack_slot_special()
1305 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
1310 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && in is_spilled_scalar_reg()
1311 stack->spilled_ptr.type == SCALAR_VALUE; in is_spilled_scalar_reg()
1316 return stack->slot_type[0] == STACK_SPILL && in is_spilled_scalar_reg64()
1317 stack->spilled_ptr.type == SCALAR_VALUE; in is_spilled_scalar_reg64()
1397 memset(arr + old_n * size, 0, (new_n - old_n) * size); in realloc_array()
1405 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, in copy_reference_state()
1407 if (!dst->refs) in copy_reference_state()
1408 return -ENOMEM; in copy_reference_state()
1410 dst->acquired_refs = src->acquired_refs; in copy_reference_state()
1411 dst->active_locks = src->active_locks; in copy_reference_state()
1412 dst->active_preempt_locks = src->active_preempt_locks; in copy_reference_state()
1413 dst->active_rcu_lock = src->active_rcu_lock; in copy_reference_state()
1414 dst->active_irq_id = src->active_irq_id; in copy_reference_state()
1415 dst->active_lock_id = src->active_lock_id; in copy_reference_state()
1416 dst->active_lock_ptr = src->active_lock_ptr; in copy_reference_state()
1422 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1424 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), in copy_stack_state()
1426 if (!dst->stack) in copy_stack_state()
1427 return -ENOMEM; in copy_stack_state()
1429 dst->allocated_stack = src->allocated_stack; in copy_stack_state()
1435 state->refs = realloc_array(state->refs, state->acquired_refs, n, in resize_reference_state()
1437 if (!state->refs) in resize_reference_state()
1438 return -ENOMEM; in resize_reference_state()
1440 state->acquired_refs = n; in resize_reference_state()
1444 /* Possibly update state->allocated_stack to be at least size bytes. Also
1445 * possibly update the function's high-water mark in its bpf_subprog_info.
1449 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n; in grow_stack_state()
1458 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); in grow_stack_state()
1459 if (!state->stack) in grow_stack_state()
1460 return -ENOMEM; in grow_stack_state()
1462 state->allocated_stack = size; in grow_stack_state()
1465 if (env->subprog_info[state->subprogno].stack_depth < size) in grow_stack_state()
1466 env->subprog_info[state->subprogno].stack_depth = size; in grow_stack_state()
1471 /* Acquire a pointer id from the env and update the state->refs to include
1478 struct bpf_verifier_state *state = env->cur_state; in acquire_reference_state()
1479 int new_ofs = state->acquired_refs; in acquire_reference_state()
1482 err = resize_reference_state(state, state->acquired_refs + 1); in acquire_reference_state()
1485 state->refs[new_ofs].insn_idx = insn_idx; in acquire_reference_state()
1487 return &state->refs[new_ofs]; in acquire_reference_state()
1496 return -ENOMEM; in acquire_reference()
1497 s->type = REF_TYPE_PTR; in acquire_reference()
1498 s->id = ++env->id_gen; in acquire_reference()
1499 return s->id; in acquire_reference()
1505 struct bpf_verifier_state *state = env->cur_state; in acquire_lock_state()
1510 return -ENOMEM; in acquire_lock_state()
1511 s->type = type; in acquire_lock_state()
1512 s->id = id; in acquire_lock_state()
1513 s->ptr = ptr; in acquire_lock_state()
1515 state->active_locks++; in acquire_lock_state()
1516 state->active_lock_id = id; in acquire_lock_state()
1517 state->active_lock_ptr = ptr; in acquire_lock_state()
1523 struct bpf_verifier_state *state = env->cur_state; in acquire_irq_state()
1528 return -ENOMEM; in acquire_irq_state()
1529 s->type = REF_TYPE_IRQ; in acquire_irq_state()
1530 s->id = ++env->id_gen; in acquire_irq_state()
1532 state->active_irq_id = s->id; in acquire_irq_state()
1533 return s->id; in acquire_irq_state()
1543 * it can detect out-of-order IRQ restore. Hence use memmove to shift in release_reference_state()
1546 last_idx = state->acquired_refs - 1; in release_reference_state()
1547 rem = state->acquired_refs - idx - 1; in release_reference_state()
1549 memmove(&state->refs[idx], &state->refs[idx + 1], sizeof(*state->refs) * rem); in release_reference_state()
1550 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); in release_reference_state()
1551 state->acquired_refs--; in release_reference_state()
1559 for (i = 0; i < state->acquired_refs; i++) in find_reference_state()
1560 if (state->refs[i].id == ptr_id) in find_reference_state()
1572 for (i = 0; i < state->acquired_refs; i++) { in release_lock_state()
1573 if (state->refs[i].type == type && state->refs[i].id == id && in release_lock_state()
1574 state->refs[i].ptr == ptr) { in release_lock_state()
1576 state->active_locks--; in release_lock_state()
1578 state->active_lock_id = prev_id; in release_lock_state()
1579 state->active_lock_ptr = prev_ptr; in release_lock_state()
1582 if (state->refs[i].type & REF_TYPE_LOCK_MASK) { in release_lock_state()
1583 prev_id = state->refs[i].id; in release_lock_state()
1584 prev_ptr = state->refs[i].ptr; in release_lock_state()
1587 return -EINVAL; in release_lock_state()
1595 if (id != state->active_irq_id) in release_irq_state()
1596 return -EACCES; in release_irq_state()
1598 for (i = 0; i < state->acquired_refs; i++) { in release_irq_state()
1599 if (state->refs[i].type != REF_TYPE_IRQ) in release_irq_state()
1601 if (state->refs[i].id == id) { in release_irq_state()
1603 state->active_irq_id = prev_id; in release_irq_state()
1606 prev_id = state->refs[i].id; in release_irq_state()
1609 return -EINVAL; in release_irq_state()
1617 for (i = 0; i < state->acquired_refs; i++) { in find_lock_state()
1618 struct bpf_reference_state *s = &state->refs[i]; in find_lock_state()
1620 if (!(s->type & type)) in find_lock_state()
1623 if (s->id == id && s->ptr == ptr) in find_lock_state()
1633 cur_states = env->explored_states_size + env->free_list_size + env->num_backedges; in update_peak_states()
1634 env->peak_states = max(env->peak_states, cur_states); in update_peak_states()
1641 kfree(state->stack); in free_func_state()
1647 kfree(state->jmp_history); in clear_jmp_history()
1648 state->jmp_history = NULL; in clear_jmp_history()
1649 state->jmp_history_cnt = 0; in clear_jmp_history()
1657 for (i = 0; i <= state->curframe; i++) { in free_verifier_state()
1658 free_func_state(state->frame[i]); in free_verifier_state()
1659 state->frame[i] = NULL; in free_verifier_state()
1661 kfree(state->refs); in free_verifier_state()
1667 /* struct bpf_verifier_state->parent refers to states
1668 * that are in either of env->{expored_states,free_list}.
1673 if (st->parent) in state_parent_as_list()
1674 return container_of(st->parent, struct bpf_verifier_state_list, state); in state_parent_as_list()
1682 * - is in the env->free_list;
1683 * - has no children states;
1688 if (!sl->in_free_list in maybe_free_verifier_state()
1689 || sl->state.branches != 0 in maybe_free_verifier_state()
1690 || incomplete_read_marks(env, &sl->state)) in maybe_free_verifier_state()
1692 list_del(&sl->node); in maybe_free_verifier_state()
1693 free_verifier_state(&sl->state, false); in maybe_free_verifier_state()
1695 env->free_list_size--; in maybe_free_verifier_state()
1714 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, in copy_verifier_state()
1715 src->jmp_history_cnt, sizeof(*dst_state->jmp_history), in copy_verifier_state()
1717 if (!dst_state->jmp_history) in copy_verifier_state()
1718 return -ENOMEM; in copy_verifier_state()
1719 dst_state->jmp_history_cnt = src->jmp_history_cnt; in copy_verifier_state()
1724 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { in copy_verifier_state()
1725 free_func_state(dst_state->frame[i]); in copy_verifier_state()
1726 dst_state->frame[i] = NULL; in copy_verifier_state()
1731 dst_state->speculative = src->speculative; in copy_verifier_state()
1732 dst_state->in_sleepable = src->in_sleepable; in copy_verifier_state()
1733 dst_state->cleaned = src->cleaned; in copy_verifier_state()
1734 dst_state->curframe = src->curframe; in copy_verifier_state()
1735 dst_state->branches = src->branches; in copy_verifier_state()
1736 dst_state->parent = src->parent; in copy_verifier_state()
1737 dst_state->first_insn_idx = src->first_insn_idx; in copy_verifier_state()
1738 dst_state->last_insn_idx = src->last_insn_idx; in copy_verifier_state()
1739 dst_state->dfs_depth = src->dfs_depth; in copy_verifier_state()
1740 dst_state->callback_unroll_depth = src->callback_unroll_depth; in copy_verifier_state()
1741 dst_state->may_goto_depth = src->may_goto_depth; in copy_verifier_state()
1742 dst_state->equal_state = src->equal_state; in copy_verifier_state()
1743 for (i = 0; i <= src->curframe; i++) { in copy_verifier_state()
1744 dst = dst_state->frame[i]; in copy_verifier_state()
1748 return -ENOMEM; in copy_verifier_state()
1749 dst_state->frame[i] = dst; in copy_verifier_state()
1751 err = copy_func_state(dst, src->frame[i]); in copy_verifier_state()
1760 return env->prog->len; in state_htab_size()
1765 struct bpf_verifier_state *cur = env->cur_state; in explored_state()
1766 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state()
1768 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; in explored_state()
1775 if (a->curframe != b->curframe) in same_callsites()
1778 for (fr = a->curframe; fr >= 0; fr--) in same_callsites()
1779 if (a->frame[fr]->callsite != b->frame[fr]->callsite) in same_callsites()
1788 return frame == st->curframe in frame_insn_idx()
1789 ? st->insn_idx in frame_insn_idx()
1790 : st->frame[frame + 1]->callsite; in frame_insn_idx()
1813 for (i = 0; i <= st->curframe; i++) { in compute_scc_callchain()
1815 scc = env->insn_aux_data[insn_idx].scc; in compute_scc_callchain()
1817 callchain->scc = scc; in compute_scc_callchain()
1819 } else if (i < st->curframe) { in compute_scc_callchain()
1820 callchain->callsites[i] = insn_idx; in compute_scc_callchain()
1832 struct bpf_scc_info *info = env->scc_info[callchain->scc]; in scc_visit_lookup()
1833 struct bpf_scc_visit *visits = info->visits; in scc_visit_lookup()
1838 for (i = 0; i < info->num_visits; i++) in scc_visit_lookup()
1856 scc = callchain->scc; in scc_visit_alloc()
1857 info = env->scc_info[scc]; in scc_visit_alloc()
1858 num_visits = info ? info->num_visits : 0; in scc_visit_alloc()
1860 info = kvrealloc(env->scc_info[scc], new_sz, GFP_KERNEL_ACCOUNT); in scc_visit_alloc()
1863 env->scc_info[scc] = info; in scc_visit_alloc()
1864 info->num_visits = num_visits + 1; in scc_visit_alloc()
1865 visit = &info->visits[num_visits]; in scc_visit_alloc()
1867 memcpy(&visit->callchain, callchain, sizeof(*callchain)); in scc_visit_alloc()
1871 /* Form a string '(callsite#1,callsite#2,...,scc)' in env->tmp_str_buf */
1874 char *buf = env->tmp_str_buf; in format_callchain()
1877 delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "("); in format_callchain()
1878 for (i = 0; i < ARRAY_SIZE(callchain->callsites); i++) { in format_callchain()
1879 if (!callchain->callsites[i]) in format_callchain()
1881 delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "%u,", in format_callchain()
1882 callchain->callsites[i]); in format_callchain()
1884 delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "%u)", callchain->scc); in format_callchain()
1885 return env->tmp_str_buf; in format_callchain()
1890 * If instance does not exist or is empty, assign visit->entry_state to @st.
1894 struct bpf_scc_callchain *callchain = &env->callchain_buf; in maybe_enter_scc()
1902 return -ENOMEM; in maybe_enter_scc()
1903 if (!visit->entry_state) { in maybe_enter_scc()
1904 visit->entry_state = st; in maybe_enter_scc()
1905 if (env->log.level & BPF_LOG_LEVEL2) in maybe_enter_scc()
1914 * - set visit->entry_state to NULL;
1915 * - flush accumulated backedges.
1919 struct bpf_scc_callchain *callchain = &env->callchain_buf; in maybe_exit_scc()
1928 * must exist for non-speculative paths. For non-speculative paths in maybe_exit_scc()
1938 if (!st->speculative) { in maybe_exit_scc()
1941 return -EFAULT; in maybe_exit_scc()
1945 if (visit->entry_state != st) in maybe_exit_scc()
1947 if (env->log.level & BPF_LOG_LEVEL2) in maybe_exit_scc()
1949 visit->entry_state = NULL; in maybe_exit_scc()
1950 env->num_backedges -= visit->num_backedges; in maybe_exit_scc()
1951 visit->num_backedges = 0; in maybe_exit_scc()
1957 * and add @backedge to visit->backedges. @st callchain must exist.
1963 struct bpf_scc_callchain *callchain = &env->callchain_buf; in add_scc_backedge()
1968 st->insn_idx); in add_scc_backedge()
1969 return -EFAULT; in add_scc_backedge()
1975 return -EFAULT; in add_scc_backedge()
1977 if (env->log.level & BPF_LOG_LEVEL2) in add_scc_backedge()
1979 backedge->next = visit->backedges; in add_scc_backedge()
1980 visit->backedges = backedge; in add_scc_backedge()
1981 visit->num_backedges++; in add_scc_backedge()
1982 env->num_backedges++; in add_scc_backedge()
1987 /* bpf_reg_state->live marks for registers in a state @st are incomplete,
1994 struct bpf_scc_callchain *callchain = &env->callchain_buf; in incomplete_read_marks()
2002 return !!visit->backedges; in incomplete_read_marks()
2009 for (backedge = visit->backedges; backedge; backedge = next) { in free_backedges()
2010 free_verifier_state(&backedge->state, false); in free_backedges()
2011 next = backedge->next; in free_backedges()
2014 visit->backedges = NULL; in free_backedges()
2024 u32 br = --st->branches; in update_branch_counts()
2035 parent = st->parent; in update_branch_counts()
2048 struct bpf_verifier_state *cur = env->cur_state; in pop_stack()
2049 struct bpf_verifier_stack_elem *elem, *head = env->head; in pop_stack()
2052 if (env->head == NULL) in pop_stack()
2053 return -ENOENT; in pop_stack()
2056 err = copy_verifier_state(cur, &head->st); in pop_stack()
2061 bpf_vlog_reset(&env->log, head->log_pos); in pop_stack()
2063 *insn_idx = head->insn_idx; in pop_stack()
2065 *prev_insn_idx = head->prev_insn_idx; in pop_stack()
2066 elem = head->next; in pop_stack()
2067 free_verifier_state(&head->st, false); in pop_stack()
2069 env->head = elem; in pop_stack()
2070 env->stack_size--; in pop_stack()
2076 /* Should only return true for non-fatal errors that are allowed to in error_recoverable_with_nospec()
2079 * something like ENOMEM because it is likely to re-occur for the next in error_recoverable_with_nospec()
2080 * architectural path once it has been recovered-from in all speculative in error_recoverable_with_nospec()
2083 return err == -EPERM || err == -EACCES || err == -EINVAL; in error_recoverable_with_nospec()
2090 struct bpf_verifier_state *cur = env->cur_state; in push_stack()
2098 elem->insn_idx = insn_idx; in push_stack()
2099 elem->prev_insn_idx = prev_insn_idx; in push_stack()
2100 elem->next = env->head; in push_stack()
2101 elem->log_pos = env->log.end_pos; in push_stack()
2102 env->head = elem; in push_stack()
2103 env->stack_size++; in push_stack()
2104 err = copy_verifier_state(&elem->st, cur); in push_stack()
2107 elem->st.speculative |= speculative; in push_stack()
2108 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_stack()
2110 env->stack_size); in push_stack()
2113 if (elem->st.parent) { in push_stack()
2114 ++elem->st.parent->branches; in push_stack()
2117 * 1. speculative states will bump 'branches' for non-branch in push_stack()
2125 return &elem->st; in push_stack()
2133 /* This helper doesn't clear reg->id */
2136 reg->var_off = tnum_const(imm); in ___mark_reg_known()
2137 reg->smin_value = (s64)imm; in ___mark_reg_known()
2138 reg->smax_value = (s64)imm; in ___mark_reg_known()
2139 reg->umin_value = imm; in ___mark_reg_known()
2140 reg->umax_value = imm; in ___mark_reg_known()
2142 reg->s32_min_value = (s32)imm; in ___mark_reg_known()
2143 reg->s32_max_value = (s32)imm; in ___mark_reg_known()
2144 reg->u32_min_value = (u32)imm; in ___mark_reg_known()
2145 reg->u32_max_value = (u32)imm; in ___mark_reg_known()
2154 memset(((u8 *)reg) + sizeof(reg->type), 0, in __mark_reg_known()
2155 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); in __mark_reg_known()
2156 reg->id = 0; in __mark_reg_known()
2157 reg->ref_obj_id = 0; in __mark_reg_known()
2163 reg->var_off = tnum_const_subreg(reg->var_off, imm); in __mark_reg32_known()
2164 reg->s32_min_value = (s32)imm; in __mark_reg32_known()
2165 reg->s32_max_value = (s32)imm; in __mark_reg32_known()
2166 reg->u32_min_value = (u32)imm; in __mark_reg32_known()
2167 reg->u32_max_value = (u32)imm; in __mark_reg32_known()
2181 reg->type = SCALAR_VALUE; in __mark_reg_const_zero()
2185 reg->precise = !env->bpf_capable; in __mark_reg_const_zero()
2204 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for in __mark_dynptr_reg()
2209 reg->type = CONST_PTR_TO_DYNPTR; in __mark_dynptr_reg()
2211 reg->id = dynptr_id; in __mark_dynptr_reg()
2212 reg->dynptr.type = type; in __mark_dynptr_reg()
2213 reg->dynptr.first_slot = first_slot; in __mark_dynptr_reg()
2218 if (base_type(reg->type) == PTR_TO_MAP_VALUE) { in mark_ptr_not_null_reg()
2219 const struct bpf_map *map = reg->map_ptr; in mark_ptr_not_null_reg()
2221 if (map->inner_map_meta) { in mark_ptr_not_null_reg()
2222 reg->type = CONST_PTR_TO_MAP; in mark_ptr_not_null_reg()
2223 reg->map_ptr = map->inner_map_meta; in mark_ptr_not_null_reg()
2227 if (btf_record_has_field(map->inner_map_meta->record, in mark_ptr_not_null_reg()
2229 reg->map_uid = reg->id; in mark_ptr_not_null_reg()
2231 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { in mark_ptr_not_null_reg()
2232 reg->type = PTR_TO_XDP_SOCK; in mark_ptr_not_null_reg()
2233 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || in mark_ptr_not_null_reg()
2234 map->map_type == BPF_MAP_TYPE_SOCKHASH) { in mark_ptr_not_null_reg()
2235 reg->type = PTR_TO_SOCKET; in mark_ptr_not_null_reg()
2237 reg->type = PTR_TO_MAP_VALUE; in mark_ptr_not_null_reg()
2242 reg->type &= ~PTR_MAYBE_NULL; in mark_ptr_not_null_reg()
2250 regs[regno].btf = ds_head->btf; in mark_reg_graph_node()
2251 regs[regno].btf_id = ds_head->value_btf_id; in mark_reg_graph_node()
2252 regs[regno].off = ds_head->node_offset; in mark_reg_graph_node()
2257 return type_is_pkt_pointer(reg->type); in reg_is_pkt_pointer()
2263 reg->type == PTR_TO_PACKET_END; in reg_is_pkt_pointer_any()
2268 return base_type(reg->type) == PTR_TO_MEM && in reg_is_dynptr_slice_pkt()
2269 (reg->type & in reg_is_dynptr_slice_pkt()
2281 return reg->type == which && in reg_is_init_pkt_pointer()
2282 reg->id == 0 && in reg_is_init_pkt_pointer()
2283 reg->off == 0 && in reg_is_init_pkt_pointer()
2284 tnum_equals_const(reg->var_off, 0); in reg_is_init_pkt_pointer()
2290 reg->smin_value = S64_MIN; in __mark_reg_unbounded()
2291 reg->smax_value = S64_MAX; in __mark_reg_unbounded()
2292 reg->umin_value = 0; in __mark_reg_unbounded()
2293 reg->umax_value = U64_MAX; in __mark_reg_unbounded()
2295 reg->s32_min_value = S32_MIN; in __mark_reg_unbounded()
2296 reg->s32_max_value = S32_MAX; in __mark_reg_unbounded()
2297 reg->u32_min_value = 0; in __mark_reg_unbounded()
2298 reg->u32_max_value = U32_MAX; in __mark_reg_unbounded()
2303 reg->smin_value = S64_MIN; in __mark_reg64_unbounded()
2304 reg->smax_value = S64_MAX; in __mark_reg64_unbounded()
2305 reg->umin_value = 0; in __mark_reg64_unbounded()
2306 reg->umax_value = U64_MAX; in __mark_reg64_unbounded()
2311 reg->s32_min_value = S32_MIN; in __mark_reg32_unbounded()
2312 reg->s32_max_value = S32_MAX; in __mark_reg32_unbounded()
2313 reg->u32_min_value = 0; in __mark_reg32_unbounded()
2314 reg->u32_max_value = U32_MAX; in __mark_reg32_unbounded()
2319 struct tnum var32_off = tnum_subreg(reg->var_off); in __update_reg32_bounds()
2322 reg->s32_min_value = max_t(s32, reg->s32_min_value, in __update_reg32_bounds()
2325 reg->s32_max_value = min_t(s32, reg->s32_max_value, in __update_reg32_bounds()
2327 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); in __update_reg32_bounds()
2328 reg->u32_max_value = min(reg->u32_max_value, in __update_reg32_bounds()
2335 reg->smin_value = max_t(s64, reg->smin_value, in __update_reg64_bounds()
2336 reg->var_off.value | (reg->var_off.mask & S64_MIN)); in __update_reg64_bounds()
2338 reg->smax_value = min_t(s64, reg->smax_value, in __update_reg64_bounds()
2339 reg->var_off.value | (reg->var_off.mask & S64_MAX)); in __update_reg64_bounds()
2340 reg->umin_value = max(reg->umin_value, reg->var_off.value); in __update_reg64_bounds()
2341 reg->umax_value = min(reg->umax_value, in __update_reg64_bounds()
2342 reg->var_off.value | reg->var_off.mask); in __update_reg64_bounds()
2351 /* Uses signed min/max values to inform unsigned, and vice-versa */
2359 * [10, 20] range. But this property holds for any 64-bit range as in __reg32_deduce_bounds()
2370 * depends on actual hexadecimal values of 32-bit range. They can form in __reg32_deduce_bounds()
2375 if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) { in __reg32_deduce_bounds()
2379 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value); in __reg32_deduce_bounds()
2380 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value); in __reg32_deduce_bounds()
2382 if ((s32)reg->umin_value <= (s32)reg->umax_value) { in __reg32_deduce_bounds()
2383 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); in __reg32_deduce_bounds()
2384 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); in __reg32_deduce_bounds()
2387 if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) { in __reg32_deduce_bounds()
2389 if ((u32)reg->smin_value <= (u32)reg->smax_value) { in __reg32_deduce_bounds()
2390 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value); in __reg32_deduce_bounds()
2391 reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value); in __reg32_deduce_bounds()
2394 if ((s32)reg->smin_value <= (s32)reg->smax_value) { in __reg32_deduce_bounds()
2395 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); in __reg32_deduce_bounds()
2396 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); in __reg32_deduce_bounds()
2400 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to in __reg32_deduce_bounds()
2403 * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]). in __reg32_deduce_bounds()
2404 * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff, in __reg32_deduce_bounds()
2406 * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]). in __reg32_deduce_bounds()
2410 * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister. in __reg32_deduce_bounds()
2412 if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) && in __reg32_deduce_bounds()
2413 (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) { in __reg32_deduce_bounds()
2414 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); in __reg32_deduce_bounds()
2415 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); in __reg32_deduce_bounds()
2417 if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) && in __reg32_deduce_bounds()
2418 (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) { in __reg32_deduce_bounds()
2419 reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); in __reg32_deduce_bounds()
2420 reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); in __reg32_deduce_bounds()
2425 if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) { in __reg32_deduce_bounds()
2426 reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
2427 reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
2431 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg32_deduce_bounds()
2433 if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { in __reg32_deduce_bounds()
2434 reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); in __reg32_deduce_bounds()
2435 reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); in __reg32_deduce_bounds()
2446 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2454 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2455 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2458 * contiguous to the right of it, wrapping around from -1 to 0, and in __reg64_deduce_bounds()
2461 * more visually as mapped to sign-agnostic range of hex values. in __reg64_deduce_bounds()
2467 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2468 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2470 * >------------------------------ -------------------------------> in __reg64_deduce_bounds()
2480 * |-------------------------------|--------------------------------| in __reg64_deduce_bounds()
2486 * will be non-negative both as u64 and s64 (and in fact it will be in __reg64_deduce_bounds()
2489 * non-negative range of values larger than 0x8000000000000000. in __reg64_deduce_bounds()
2508 if ((s64)reg->umin_value <= (s64)reg->umax_value) { in __reg64_deduce_bounds()
2509 reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value); in __reg64_deduce_bounds()
2510 reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value); in __reg64_deduce_bounds()
2514 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. in __reg64_deduce_bounds()
2516 if ((u64)reg->smin_value <= (u64)reg->smax_value) { in __reg64_deduce_bounds()
2517 reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); in __reg64_deduce_bounds()
2518 reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); in __reg64_deduce_bounds()
2530 * |----------------------------|----------------------------| in __reg64_deduce_bounds()
2532 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2538 * |----------------------------|----------------------------| in __reg64_deduce_bounds()
2540 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2548 * |----------------------------|----------------------------| in __reg64_deduce_bounds()
2550 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2555 if (reg->umax_value < (u64)reg->smin_value) { in __reg64_deduce_bounds()
2556 reg->smin_value = (s64)reg->umin_value; in __reg64_deduce_bounds()
2557 reg->umax_value = min_t(u64, reg->umax_value, reg->smax_value); in __reg64_deduce_bounds()
2558 } else if ((u64)reg->smax_value < reg->umin_value) { in __reg64_deduce_bounds()
2564 * |----------------------------|----------------------------| in __reg64_deduce_bounds()
2566 * 0 S64_MAX S64_MIN -1 in __reg64_deduce_bounds()
2568 reg->smax_value = (s64)reg->umax_value; in __reg64_deduce_bounds()
2569 reg->umin_value = max_t(u64, reg->umin_value, reg->smin_value); in __reg64_deduce_bounds()
2576 /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit in __reg_deduce_mixed_bounds()
2577 * values on both sides of 64-bit range in hope to have tighter range. in __reg_deduce_mixed_bounds()
2579 * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff]. in __reg_deduce_mixed_bounds()
2580 * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound in __reg_deduce_mixed_bounds()
2581 * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of in __reg_deduce_mixed_bounds()
2582 * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a in __reg_deduce_mixed_bounds()
2585 * with are well-formed ranges in respective s64 or u64 domain, just in __reg_deduce_mixed_bounds()
2586 * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments. in __reg_deduce_mixed_bounds()
2591 /* u32 -> u64 tightening, it's always well-formed */ in __reg_deduce_mixed_bounds()
2592 new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value; in __reg_deduce_mixed_bounds()
2593 new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value; in __reg_deduce_mixed_bounds()
2594 reg->umin_value = max_t(u64, reg->umin_value, new_umin); in __reg_deduce_mixed_bounds()
2595 reg->umax_value = min_t(u64, reg->umax_value, new_umax); in __reg_deduce_mixed_bounds()
2596 /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */ in __reg_deduce_mixed_bounds()
2597 new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value; in __reg_deduce_mixed_bounds()
2598 new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value; in __reg_deduce_mixed_bounds()
2599 reg->smin_value = max_t(s64, reg->smin_value, new_smin); in __reg_deduce_mixed_bounds()
2600 reg->smax_value = min_t(s64, reg->smax_value, new_smax); in __reg_deduce_mixed_bounds()
2603 * when upper bits for a 64-bit range are all 1s or all 0s. in __reg_deduce_mixed_bounds()
2614 * Also suppose that it's 32-bit range is positive, in __reg_deduce_mixed_bounds()
2615 * meaning that lower 32-bits of the full 64-bit register in __reg_deduce_mixed_bounds()
2623 * which means that upper bits of the full 64-bit register in __reg_deduce_mixed_bounds()
2627 * - 0xffff_ffff_8000_0000 == (s64)S32_MIN in __reg_deduce_mixed_bounds()
2628 * - 0x0000_0000_7fff_ffff == (s64)S32_MAX in __reg_deduce_mixed_bounds()
2631 if (reg->s32_min_value >= 0 && reg->smin_value >= S32_MIN && reg->smax_value <= S32_MAX) { in __reg_deduce_mixed_bounds()
2632 reg->smin_value = reg->s32_min_value; in __reg_deduce_mixed_bounds()
2633 reg->smax_value = reg->s32_max_value; in __reg_deduce_mixed_bounds()
2634 reg->umin_value = reg->s32_min_value; in __reg_deduce_mixed_bounds()
2635 reg->umax_value = reg->s32_max_value; in __reg_deduce_mixed_bounds()
2636 reg->var_off = tnum_intersect(reg->var_off, in __reg_deduce_mixed_bounds()
2637 tnum_range(reg->smin_value, reg->smax_value)); in __reg_deduce_mixed_bounds()
2651 struct tnum var64_off = tnum_intersect(reg->var_off, in __reg_bound_offset()
2652 tnum_range(reg->umin_value, in __reg_bound_offset()
2653 reg->umax_value)); in __reg_bound_offset()
2655 tnum_range(reg->u32_min_value, in __reg_bound_offset()
2656 reg->u32_max_value)); in __reg_bound_offset()
2658 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); in __reg_bound_offset()
2683 if (reg->umin_value > reg->umax_value || in reg_bounds_sanity_check()
2684 reg->smin_value > reg->smax_value || in reg_bounds_sanity_check()
2685 reg->u32_min_value > reg->u32_max_value || in reg_bounds_sanity_check()
2686 reg->s32_min_value > reg->s32_max_value) { in reg_bounds_sanity_check()
2691 if (tnum_is_const(reg->var_off)) { in reg_bounds_sanity_check()
2692 u64 uval = reg->var_off.value; in reg_bounds_sanity_check()
2695 if (reg->umin_value != uval || reg->umax_value != uval || in reg_bounds_sanity_check()
2696 reg->smin_value != sval || reg->smax_value != sval) { in reg_bounds_sanity_check()
2702 if (tnum_subreg_is_const(reg->var_off)) { in reg_bounds_sanity_check()
2703 u32 uval32 = tnum_subreg(reg->var_off).value; in reg_bounds_sanity_check()
2706 if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 || in reg_bounds_sanity_check()
2707 reg->s32_min_value != sval32 || reg->s32_max_value != sval32) { in reg_bounds_sanity_check()
2717 ctx, msg, reg->umin_value, reg->umax_value, in reg_bounds_sanity_check()
2718 reg->smin_value, reg->smax_value, in reg_bounds_sanity_check()
2719 reg->u32_min_value, reg->u32_max_value, in reg_bounds_sanity_check()
2720 reg->s32_min_value, reg->s32_max_value, in reg_bounds_sanity_check()
2721 reg->var_off.value, reg->var_off.mask); in reg_bounds_sanity_check()
2722 if (env->test_reg_invariants) in reg_bounds_sanity_check()
2723 return -EFAULT; in reg_bounds_sanity_check()
2735 reg->umin_value = reg->u32_min_value; in __reg_assign_32_into_64()
2736 reg->umax_value = reg->u32_max_value; in __reg_assign_32_into_64()
2738 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must in __reg_assign_32_into_64()
2742 if (__reg32_bound_s64(reg->s32_min_value) && in __reg_assign_32_into_64()
2743 __reg32_bound_s64(reg->s32_max_value)) { in __reg_assign_32_into_64()
2744 reg->smin_value = reg->s32_min_value; in __reg_assign_32_into_64()
2745 reg->smax_value = reg->s32_max_value; in __reg_assign_32_into_64()
2747 reg->smin_value = 0; in __reg_assign_32_into_64()
2748 reg->smax_value = U32_MAX; in __reg_assign_32_into_64()
2760 reg->type = SCALAR_VALUE; in __mark_reg_unknown_imprecise()
2761 reg->id = 0; in __mark_reg_unknown_imprecise()
2762 reg->ref_obj_id = 0; in __mark_reg_unknown_imprecise()
2763 reg->var_off = tnum_unknown; in __mark_reg_unknown_imprecise()
2764 reg->frameno = 0; in __mark_reg_unknown_imprecise()
2765 reg->precise = false; in __mark_reg_unknown_imprecise()
2776 reg->precise = !env->bpf_capable; in __mark_reg_unknown()
2800 reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min); in __mark_reg_s32_range()
2801 reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max); in __mark_reg_s32_range()
2803 reg->smin_value = max_t(s64, reg->smin_value, s32_min); in __mark_reg_s32_range()
2804 reg->smax_value = min_t(s64, reg->smax_value, s32_max); in __mark_reg_s32_range()
2815 reg->type = NOT_INIT; in __mark_reg_not_init()
2847 regs[regno].id = ++env->id_gen; in mark_btf_ld_reg()
2856 return -EFAULT; in mark_btf_ld_reg()
2864 struct bpf_reg_state *regs = state->regs; in init_reg_state()
2875 regs[BPF_REG_FP].frameno = state->frameno; in init_reg_state()
2883 #define BPF_MAIN_FUNC (-1)
2888 state->callsite = callsite; in init_func_state()
2889 state->frameno = frameno; in init_func_state()
2890 state->subprogno = subprogno; in init_func_state()
2891 state->callback_ret_range = retval_range(0, 0); in init_func_state()
2908 elem->insn_idx = insn_idx; in push_async_cb()
2909 elem->prev_insn_idx = prev_insn_idx; in push_async_cb()
2910 elem->next = env->head; in push_async_cb()
2911 elem->log_pos = env->log.end_pos; in push_async_cb()
2912 env->head = elem; in push_async_cb()
2913 env->stack_size++; in push_async_cb()
2914 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { in push_async_cb()
2917 env->stack_size); in push_async_cb()
2925 elem->st.branches = 1; in push_async_cb()
2926 elem->st.in_sleepable = is_sleepable; in push_async_cb()
2934 elem->st.frame[0] = frame; in push_async_cb()
2935 return &elem->st; in push_async_cb()
2947 return ((struct bpf_subprog_info *)a)->start - in cmp_subprogs()
2948 ((struct bpf_subprog_info *)b)->start; in cmp_subprogs()
2954 struct bpf_subprog_info *vals = env->subprog_info; in bpf_find_containing_subprog()
2957 if (off >= env->prog->len || off < 0 || env->subprog_cnt == 0) in bpf_find_containing_subprog()
2961 r = env->subprog_cnt - 1; in bpf_find_containing_subprog()
2963 m = l + (r - l + 1) / 2; in bpf_find_containing_subprog()
2967 r = m - 1; in bpf_find_containing_subprog()
2978 if (!p || p->start != off) in find_subprog()
2979 return -ENOENT; in find_subprog()
2980 return p - env->subprog_info; in find_subprog()
2985 int insn_cnt = env->prog->len; in add_subprog()
2990 return -EINVAL; in add_subprog()
2995 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { in add_subprog()
2997 return -E2BIG; in add_subprog()
3000 env->subprog_info[env->subprog_cnt++].start = off; in add_subprog()
3001 sort(env->subprog_info, env->subprog_cnt, in add_subprog()
3002 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); in add_subprog()
3003 return env->subprog_cnt - 1; in add_subprog()
3008 struct bpf_prog_aux *aux = env->prog->aux; in bpf_find_exception_callback_insn_off()
3009 struct btf *btf = aux->btf; in bpf_find_exception_callback_insn_off()
3015 /* Non-zero func_info_cnt implies valid btf */ in bpf_find_exception_callback_insn_off()
3016 if (!aux->func_info_cnt) in bpf_find_exception_callback_insn_off()
3018 main_btf_id = aux->func_info[0].type_id; in bpf_find_exception_callback_insn_off()
3023 return -EINVAL; in bpf_find_exception_callback_insn_off()
3026 name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:"); in bpf_find_exception_callback_insn_off()
3030 if (ret == -ENOENT) in bpf_find_exception_callback_insn_off()
3032 else if (ret == -EEXIST) in bpf_find_exception_callback_insn_off()
3046 return -EINVAL; in bpf_find_exception_callback_insn_off()
3049 for (i = 0; i < aux->func_info_cnt; i++) { in bpf_find_exception_callback_insn_off()
3050 if (aux->func_info[i].type_id != id) in bpf_find_exception_callback_insn_off()
3052 ret = aux->func_info[i].insn_off; in bpf_find_exception_callback_insn_off()
3058 ret = -EINVAL; in bpf_find_exception_callback_insn_off()
3063 ret = -EINVAL; in bpf_find_exception_callback_insn_off()
3106 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; in kfunc_desc_cmp_by_id_off()
3114 return d0->offset - d1->offset; in kfunc_btf_cmp_by_off()
3126 tab = prog->aux->kfunc_tab; in find_kfunc_desc()
3127 return bsearch(&desc, tab->descs, tab->nr_descs, in find_kfunc_desc()
3128 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); in find_kfunc_desc()
3138 return -EFAULT; in bpf_get_kfunc_addr()
3140 *func_addr = (u8 *)desc->addr; in bpf_get_kfunc_addr()
3154 tab = env->prog->aux->kfunc_btf_tab; in __find_kfunc_desc_btf()
3155 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, in __find_kfunc_desc_btf()
3156 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); in __find_kfunc_desc_btf()
3158 if (tab->nr_descs == MAX_KFUNC_BTFS) { in __find_kfunc_desc_btf()
3160 return ERR_PTR(-E2BIG); in __find_kfunc_desc_btf()
3163 if (bpfptr_is_null(env->fd_array)) { in __find_kfunc_desc_btf()
3165 return ERR_PTR(-EPROTO); in __find_kfunc_desc_btf()
3168 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, in __find_kfunc_desc_btf()
3171 return ERR_PTR(-EFAULT); in __find_kfunc_desc_btf()
3182 return ERR_PTR(-EINVAL); in __find_kfunc_desc_btf()
3188 return ERR_PTR(-ENXIO); in __find_kfunc_desc_btf()
3191 b = &tab->descs[tab->nr_descs++]; in __find_kfunc_desc_btf()
3192 b->btf = btf; in __find_kfunc_desc_btf()
3193 b->module = mod; in __find_kfunc_desc_btf()
3194 b->offset = offset; in __find_kfunc_desc_btf()
3199 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in __find_kfunc_desc_btf()
3202 btf = b->btf; in __find_kfunc_desc_btf()
3213 while (tab->nr_descs--) { in bpf_free_kfunc_btf_tab()
3214 module_put(tab->descs[tab->nr_descs].module); in bpf_free_kfunc_btf_tab()
3215 btf_put(tab->descs[tab->nr_descs].btf); in bpf_free_kfunc_btf_tab()
3228 return ERR_PTR(-EINVAL); in find_kfunc_desc_btf()
3233 return btf_vmlinux ?: ERR_PTR(-ENOENT); in find_kfunc_desc_btf()
3249 prog_aux = env->prog->aux; in add_kfunc_call()
3250 tab = prog_aux->kfunc_tab; in add_kfunc_call()
3251 btf_tab = prog_aux->kfunc_btf_tab; in add_kfunc_call()
3255 return -ENOTSUPP; in add_kfunc_call()
3258 if (!env->prog->jit_requested) { in add_kfunc_call()
3260 return -ENOTSUPP; in add_kfunc_call()
3265 return -ENOTSUPP; in add_kfunc_call()
3268 if (!env->prog->gpl_compatible) { in add_kfunc_call()
3269 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); in add_kfunc_call()
3270 return -EINVAL; in add_kfunc_call()
3275 return -ENOMEM; in add_kfunc_call()
3276 prog_aux->kfunc_tab = tab; in add_kfunc_call()
3291 return -ENOMEM; in add_kfunc_call()
3292 prog_aux->kfunc_btf_tab = btf_tab; in add_kfunc_call()
3301 if (find_kfunc_desc(env->prog, func_id, offset)) in add_kfunc_call()
3304 if (tab->nr_descs == MAX_KFUNC_DESCS) { in add_kfunc_call()
3306 return -E2BIG; in add_kfunc_call()
3313 return -EINVAL; in add_kfunc_call()
3315 func_proto = btf_type_by_id(desc_btf, func->type); in add_kfunc_call()
3319 return -EINVAL; in add_kfunc_call()
3322 func_name = btf_name_by_offset(desc_btf, func->name_off); in add_kfunc_call()
3327 return -EINVAL; in add_kfunc_call()
3335 /* Check whether the relative offset overflows desc->imm */ in add_kfunc_call()
3339 return -EINVAL; in add_kfunc_call()
3344 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); in add_kfunc_call()
3349 desc = &tab->descs[tab->nr_descs++]; in add_kfunc_call()
3350 desc->func_id = func_id; in add_kfunc_call()
3351 desc->imm = call_imm; in add_kfunc_call()
3352 desc->offset = offset; in add_kfunc_call()
3353 desc->addr = addr; in add_kfunc_call()
3354 err = btf_distill_func_proto(&env->log, desc_btf, in add_kfunc_call()
3356 &desc->func_model); in add_kfunc_call()
3358 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in add_kfunc_call()
3368 if (d0->imm != d1->imm) in kfunc_desc_cmp_by_imm_off()
3369 return d0->imm < d1->imm ? -1 : 1; in kfunc_desc_cmp_by_imm_off()
3370 if (d0->offset != d1->offset) in kfunc_desc_cmp_by_imm_off()
3371 return d0->offset < d1->offset ? -1 : 1; in kfunc_desc_cmp_by_imm_off()
3379 tab = prog->aux->kfunc_tab; in sort_kfunc_descs_by_imm_off()
3383 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), in sort_kfunc_descs_by_imm_off()
3389 return !!prog->aux->kfunc_tab; in bpf_prog_has_kfunc_call()
3397 .imm = insn->imm, in bpf_jit_find_kfunc_model()
3398 .offset = insn->off, in bpf_jit_find_kfunc_model()
3403 tab = prog->aux->kfunc_tab; in bpf_jit_find_kfunc_model()
3404 res = bsearch(&desc, tab->descs, tab->nr_descs, in bpf_jit_find_kfunc_model()
3405 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); in bpf_jit_find_kfunc_model()
3407 return res ? &res->func_model : NULL; in bpf_jit_find_kfunc_model()
3417 ret = add_kfunc_call(env, insn->imm, insn->off); in add_kfunc_in_insns()
3427 struct bpf_subprog_info *subprog = env->subprog_info; in add_subprog_and_kfunc()
3428 int i, ret, insn_cnt = env->prog->len, ex_cb_insn; in add_subprog_and_kfunc()
3429 struct bpf_insn *insn = env->prog->insnsi; in add_subprog_and_kfunc()
3441 if (!env->bpf_capable) { in add_subprog_and_kfunc()
3443 return -EPERM; in add_subprog_and_kfunc()
3447 ret = add_subprog(env, i + insn->imm + 1); in add_subprog_and_kfunc()
3449 ret = add_kfunc_call(env, insn->imm, insn->off); in add_subprog_and_kfunc()
3467 for (i = 1; i < env->subprog_cnt; i++) { in add_subprog_and_kfunc()
3468 if (env->subprog_info[i].start != ex_cb_insn) in add_subprog_and_kfunc()
3470 env->exception_callback_subprog = i; in add_subprog_and_kfunc()
3479 subprog[env->subprog_cnt].start = insn_cnt; in add_subprog_and_kfunc()
3481 if (env->log.level & BPF_LOG_LEVEL2) in add_subprog_and_kfunc()
3482 for (i = 0; i < env->subprog_cnt; i++) in add_subprog_and_kfunc()
3491 struct bpf_subprog_info *subprog = env->subprog_info; in check_subprogs()
3492 struct bpf_insn *insn = env->prog->insnsi; in check_subprogs()
3493 int insn_cnt = env->prog->len; in check_subprogs()
3517 return -EINVAL; in check_subprogs()
3520 if (i == subprog_end - 1) { in check_subprogs()
3521 /* to avoid fall-through from one subprog into another in check_subprogs()
3529 return -EINVAL; in check_subprogs()
3533 if (cur_subprog < env->subprog_cnt) in check_subprogs()
3546 err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi - i)); in mark_stack_slot_obj_read()
3549 mark_stack_slot_scratched(env, spi - i); in mark_stack_slot_obj_read()
3562 if (reg->type == CONST_PTR_TO_DYNPTR) in mark_dynptr_read()
3590 /* This function is supposed to be used by the following 32-bit optimization
3592 * on 64-bit, otherwise return FALSE.
3599 code = insn->code; in is_reg64()
3614 if (insn->src_reg == BPF_PSEUDO_CALL) in is_reg64()
3626 if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32)) in is_reg64()
3630 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) in is_reg64()
3648 if (t == SRC_OP && reg->type != SCALAR_VALUE) in is_reg64()
3660 /* Both LD_IND and LD_ABS return 32-bit data. */ in is_reg64()
3680 /* Return the regno defined by the insn, or -1. */
3683 switch (BPF_CLASS(insn->code)) { in insn_def_regno()
3687 return -1; in insn_def_regno()
3689 if (BPF_MODE(insn->code) == BPF_ATOMIC || in insn_def_regno()
3690 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) { in insn_def_regno()
3691 if (insn->imm == BPF_CMPXCHG) in insn_def_regno()
3693 else if (insn->imm == BPF_LOAD_ACQ) in insn_def_regno()
3694 return insn->dst_reg; in insn_def_regno()
3695 else if (insn->imm & BPF_FETCH) in insn_def_regno()
3696 return insn->src_reg; in insn_def_regno()
3698 return -1; in insn_def_regno()
3700 return insn->dst_reg; in insn_def_regno()
3704 /* Return TRUE if INSN has defined any 32-bit value explicitly. */
3709 if (dst_reg == -1) in insn_has_def32()
3718 s32 def_idx = reg->subreg_def; in mark_insn_zext()
3723 env->insn_aux_data[def_idx - 1].zext_dst = true; in mark_insn_zext()
3724 /* The dst will be zero extended, so won't be sub-register anymore. */ in mark_insn_zext()
3725 reg->subreg_def = DEF_NOT_SUBREG; in mark_insn_zext()
3731 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; in __check_reg_arg()
3737 return -EINVAL; in __check_reg_arg()
3746 if (reg->type == NOT_INIT) { in __check_reg_arg()
3748 return -EACCES; in __check_reg_arg()
3750 /* We don't need to worry about FP liveness because it's read-only */ in __check_reg_arg()
3762 return -EACCES; in __check_reg_arg()
3764 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; in __check_reg_arg()
3774 struct bpf_verifier_state *vstate = env->cur_state; in check_reg_arg()
3775 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_reg_arg()
3777 return __check_reg_arg(env, state->regs, regno, t); in check_reg_arg()
3797 env->insn_aux_data[idx].jmp_point = true; in mark_jmp_point()
3802 return env->insn_aux_data[insn_idx].jmp_point; in is_jmp_point()
3809 #define LR_FRAMENO_MASK ((1ull << LR_FRAMENO_BITS) - 1)
3810 #define LR_SPI_MASK ((1ull << LR_SPI_BITS) - 1)
3811 #define LR_SIZE_MASK ((1ull << LR_SIZE_BITS) - 1)
3832 if (s->cnt < LINKED_REGS_MAX) in linked_regs_push()
3833 return &s->entries[s->cnt++]; in linked_regs_push()
3838 /* Use u64 as a vector of 6 10-bit values, use first 4-bits to track
3841 * - 3-bits frameno
3842 * - 6-bits spi_or_reg
3843 * - 1-bit is_reg
3850 for (i = 0; i < s->cnt; ++i) { in linked_regs_pack()
3851 struct linked_reg *e = &s->entries[i]; in linked_regs_pack()
3854 tmp |= e->frameno; in linked_regs_pack()
3855 tmp |= e->spi << LR_SPI_OFF; in linked_regs_pack()
3856 tmp |= (e->is_reg ? 1 : 0) << LR_IS_REG_OFF; in linked_regs_pack()
3862 val |= s->cnt; in linked_regs_pack()
3870 s->cnt = val & LR_SIZE_MASK; in linked_regs_unpack()
3873 for (i = 0; i < s->cnt; ++i) { in linked_regs_unpack()
3874 struct linked_reg *e = &s->entries[i]; in linked_regs_unpack()
3876 e->frameno = val & LR_FRAMENO_MASK; in linked_regs_unpack()
3877 e->spi = (val >> LR_SPI_OFF) & LR_SPI_MASK; in linked_regs_unpack()
3878 e->is_reg = (val >> LR_IS_REG_OFF) & 0x1; in linked_regs_unpack()
3887 u32 cnt = cur->jmp_history_cnt; in push_jmp_history()
3892 if (env->cur_hist_ent) { in push_jmp_history()
3896 verifier_bug_if((env->cur_hist_ent->flags & insn_flags) && in push_jmp_history()
3897 (env->cur_hist_ent->flags & insn_flags) != insn_flags, in push_jmp_history()
3899 env->insn_idx, env->cur_hist_ent->flags, insn_flags); in push_jmp_history()
3900 env->cur_hist_ent->flags |= insn_flags; in push_jmp_history()
3901 verifier_bug_if(env->cur_hist_ent->linked_regs != 0, env, in push_jmp_history()
3903 env->insn_idx, env->cur_hist_ent->linked_regs); in push_jmp_history()
3904 env->cur_hist_ent->linked_regs = linked_regs; in push_jmp_history()
3910 p = krealloc(cur->jmp_history, alloc_size, GFP_KERNEL_ACCOUNT); in push_jmp_history()
3912 return -ENOMEM; in push_jmp_history()
3913 cur->jmp_history = p; in push_jmp_history()
3915 p = &cur->jmp_history[cnt - 1]; in push_jmp_history()
3916 p->idx = env->insn_idx; in push_jmp_history()
3917 p->prev_idx = env->prev_insn_idx; in push_jmp_history()
3918 p->flags = insn_flags; in push_jmp_history()
3919 p->linked_regs = linked_regs; in push_jmp_history()
3920 cur->jmp_history_cnt = cnt; in push_jmp_history()
3921 env->cur_hist_ent = p; in push_jmp_history()
3929 if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx) in get_jmp_hist_entry()
3930 return &st->jmp_history[hist_end - 1]; in get_jmp_hist_entry()
3936 * Return -ENOENT if we exhausted all instructions within given state.
3939 * insn index within the same state, e.g.: 3->4->5->3, so just because current
3952 if (i == st->first_insn_idx) { in get_prev_insn_idx()
3954 return -ENOENT; in get_prev_insn_idx()
3955 if (cnt == 1 && st->jmp_history[0].idx == i) in get_prev_insn_idx()
3956 return -ENOENT; in get_prev_insn_idx()
3959 if (cnt && st->jmp_history[cnt - 1].idx == i) { in get_prev_insn_idx()
3960 i = st->jmp_history[cnt - 1].prev_idx; in get_prev_insn_idx()
3961 (*history)--; in get_prev_insn_idx()
3963 i--; in get_prev_insn_idx()
3973 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) in disasm_kfunc_name()
3976 desc_btf = find_kfunc_desc_btf(data, insn->off); in disasm_kfunc_name()
3980 func = btf_type_by_id(desc_btf, insn->imm); in disasm_kfunc_name()
3981 return btf_name_by_offset(desc_btf, func->name_off); in disasm_kfunc_name()
3992 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); in verbose_insn()
3997 bt->frame = frame; in bt_init()
4002 struct bpf_verifier_env *env = bt->env; in bt_reset()
4005 bt->env = env; in bt_reset()
4013 for (i = 0; i <= bt->frame; i++) in bt_empty()
4014 mask |= bt->reg_masks[i] | bt->stack_masks[i]; in bt_empty()
4021 if (bt->frame == MAX_CALL_FRAMES - 1) { in bt_subprog_enter()
4022 verifier_bug(bt->env, "subprog enter from frame %d", bt->frame); in bt_subprog_enter()
4023 return -EFAULT; in bt_subprog_enter()
4025 bt->frame++; in bt_subprog_enter()
4031 if (bt->frame == 0) { in bt_subprog_exit()
4032 verifier_bug(bt->env, "subprog exit from frame 0"); in bt_subprog_exit()
4033 return -EFAULT; in bt_subprog_exit()
4035 bt->frame--; in bt_subprog_exit()
4041 bt->reg_masks[frame] |= 1 << reg; in bt_set_frame_reg()
4046 bt->reg_masks[frame] &= ~(1 << reg); in bt_clear_frame_reg()
4051 bt_set_frame_reg(bt, bt->frame, reg); in bt_set_reg()
4056 bt_clear_frame_reg(bt, bt->frame, reg); in bt_clear_reg()
4061 bt->stack_masks[frame] |= 1ull << slot; in bt_set_frame_slot()
4066 bt->stack_masks[frame] &= ~(1ull << slot); in bt_clear_frame_slot()
4071 return bt->reg_masks[frame]; in bt_frame_reg_mask()
4076 return bt->reg_masks[bt->frame]; in bt_reg_mask()
4081 return bt->stack_masks[frame]; in bt_frame_stack_mask()
4086 return bt->stack_masks[bt->frame]; in bt_stack_mask()
4091 return bt->reg_masks[bt->frame] & (1 << reg); in bt_is_reg_set()
4096 return bt->reg_masks[frame] & (1 << reg); in bt_is_frame_reg_set()
4101 return bt->stack_masks[frame] & (1ull << slot); in bt_is_frame_slot_set()
4118 buf_sz -= n; in fmt_reg_mask()
4123 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
4134 n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8); in bpf_fmt_stack_mask()
4137 buf_sz -= n; in bpf_fmt_stack_mask()
4143 /* If any register R in hist->linked_regs is marked as precise in bt,
4144 * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs.
4152 if (!hist || hist->linked_regs == 0) in bt_sync_linked_regs()
4155 linked_regs_unpack(hist->linked_regs, &linked_regs); in bt_sync_linked_regs()
4159 if ((e->is_reg && bt_is_frame_reg_set(bt, e->frameno, e->regno)) || in bt_sync_linked_regs()
4160 (!e->is_reg && bt_is_frame_slot_set(bt, e->frameno, e->spi))) { in bt_sync_linked_regs()
4172 if (e->is_reg) in bt_sync_linked_regs()
4173 bt_set_frame_reg(bt, e->frameno, e->regno); in bt_sync_linked_regs()
4175 bt_set_frame_slot(bt, e->frameno, e->spi); in bt_sync_linked_regs()
4185 * - *would be* executed next, if jump history is viewed in forward order;
4186 * - *was* processed previously during backtracking.
4191 struct bpf_insn *insn = env->prog->insnsi + idx; in backtrack_insn()
4192 u8 class = BPF_CLASS(insn->code); in backtrack_insn()
4193 u8 opcode = BPF_OP(insn->code); in backtrack_insn()
4194 u8 mode = BPF_MODE(insn->code); in backtrack_insn()
4195 u32 dreg = insn->dst_reg; in backtrack_insn()
4196 u32 sreg = insn->src_reg; in backtrack_insn()
4199 if (insn->code == 0) in backtrack_insn()
4201 if (env->log.level & BPF_LOG_LEVEL2) { in backtrack_insn()
4202 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); in backtrack_insn()
4204 bt->frame, env->tmp_str_buf); in backtrack_insn()
4205 bpf_fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); in backtrack_insn()
4206 verbose(env, "stack=%s before ", env->tmp_str_buf); in backtrack_insn()
4226 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
4244 if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
4266 if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) in backtrack_insn()
4268 /* dreg = *(u64 *)[fp - off] was a fill from the stack. in backtrack_insn()
4269 * that [fp - off] slot contains scalar that needs to be in backtrack_insn()
4272 spi = insn_stack_access_spi(hist->flags); in backtrack_insn()
4273 fr = insn_stack_access_frameno(hist->flags); in backtrack_insn()
4281 return -ENOTSUPP; in backtrack_insn()
4283 if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) in backtrack_insn()
4285 spi = insn_stack_access_spi(hist->flags); in backtrack_insn()
4286 fr = insn_stack_access_frameno(hist->flags); in backtrack_insn()
4296 subprog_insn_idx = idx + insn->imm + 1; in backtrack_insn()
4299 return -EFAULT; in backtrack_insn()
4310 /* r1-r5 are invalidated after subprog call, in backtrack_insn()
4317 return -EFAULT; in backtrack_insn()
4325 * so only r1-r5 could be still requested as in backtrack_insn()
4326 * precise, r0 and r6-r10 or any stack slot in in backtrack_insn()
4332 return -EFAULT; in backtrack_insn()
4341 return -EFAULT; in backtrack_insn()
4343 /* propagate r1-r5 to the caller */ in backtrack_insn()
4347 bt_set_frame_reg(bt, bt->frame - 1, i); in backtrack_insn()
4351 return -EFAULT; in backtrack_insn()
4354 } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) { in backtrack_insn()
4355 /* exit from callback subprog to callback-calling helper or in backtrack_insn()
4359 * propagate precision of r1-r5 (if any requested), as they are in backtrack_insn()
4365 return -EFAULT; in backtrack_insn()
4370 return -EFAULT; in backtrack_insn()
4372 /* clear r1-r5 in callback subprog's mask */ in backtrack_insn()
4376 return -EFAULT; in backtrack_insn()
4383 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) in backtrack_insn()
4384 return -ENOTSUPP; in backtrack_insn()
4388 /* if backtracking was looking for registers R1-R5 in backtrack_insn()
4393 return -EFAULT; in backtrack_insn()
4401 * precision to registers R1-R5 should have been found already. in backtrack_insn()
4402 * In case of a callback, it is ok to have R1-R5 marked for in backtrack_insn()
4412 return -EFAULT; in backtrack_insn()
4417 * whether the instruction at subseq_idx-1 is subprog in backtrack_insn()
4423 r0_precise = subseq_idx - 1 >= 0 && in backtrack_insn()
4424 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && in backtrack_insn()
4429 return -EFAULT; in backtrack_insn()
4433 /* r6-r9 and stack slots will stay set in caller frame in backtrack_insn()
4437 } else if (BPF_SRC(insn->code) == BPF_X) { in backtrack_insn()
4446 if (!hist || !(hist->flags & INSN_F_SRC_REG_STACK)) in backtrack_insn()
4448 if (!hist || !(hist->flags & INSN_F_DST_REG_STACK)) in backtrack_insn()
4450 } else if (BPF_SRC(insn->code) == BPF_K) { in backtrack_insn()
4453 * this insn, so for the K-based conditional in backtrack_insn()
4467 return -ENOTSUPP; in backtrack_insn()
4504 * r9 -= r8
4535 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4537 st->curframe); in mark_all_scalars_precise()
4543 * because precision markings in current non-checkpointed state are in mark_all_scalars_precise()
4546 for (st = st->parent; st; st = st->parent) { in mark_all_scalars_precise()
4547 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_precise()
4548 func = st->frame[i]; in mark_all_scalars_precise()
4550 reg = &func->regs[j]; in mark_all_scalars_precise()
4551 if (reg->type != SCALAR_VALUE || reg->precise) in mark_all_scalars_precise()
4553 reg->precise = true; in mark_all_scalars_precise()
4554 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4559 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
4560 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_precise()
4562 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_precise()
4563 if (reg->type != SCALAR_VALUE || reg->precise) in mark_all_scalars_precise()
4565 reg->precise = true; in mark_all_scalars_precise()
4566 if (env->log.level & BPF_LOG_LEVEL2) { in mark_all_scalars_precise()
4568 i, -(j + 1) * 8); in mark_all_scalars_precise()
4581 for (i = 0; i <= st->curframe; i++) { in mark_all_scalars_imprecise()
4582 func = st->frame[i]; in mark_all_scalars_imprecise()
4584 reg = &func->regs[j]; in mark_all_scalars_imprecise()
4585 if (reg->type != SCALAR_VALUE) in mark_all_scalars_imprecise()
4587 reg->precise = false; in mark_all_scalars_imprecise()
4589 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_imprecise()
4590 if (!is_spilled_reg(&func->stack[j])) in mark_all_scalars_imprecise()
4592 reg = &func->stack[j].spilled_ptr; in mark_all_scalars_imprecise()
4593 if (reg->type != SCALAR_VALUE) in mark_all_scalars_imprecise()
4595 reg->precise = false; in mark_all_scalars_imprecise()
4617 * i.e., it is not yet put into env->explored_states, and it has no children
4620 * reached or b) checkpointed and put into env->explored_states, branching out
4693 struct backtrack_state *bt = &env->bt; in __mark_chain_precision()
4694 int first_idx = st->first_insn_idx; in __mark_chain_precision()
4695 int last_idx = starting_state->insn_idx; in __mark_chain_precision()
4696 int subseq_idx = -1; in __mark_chain_precision()
4702 if (!env->bpf_capable) in __mark_chain_precision()
4707 bt_init(bt, starting_state->curframe); in __mark_chain_precision()
4713 func = st->frame[bt->frame]; in __mark_chain_precision()
4715 reg = &func->regs[regno]; in __mark_chain_precision()
4716 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
4718 return -EFAULT; in __mark_chain_precision()
4728 u32 history = st->jmp_history_cnt; in __mark_chain_precision()
4731 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4733 bt->frame, last_idx, first_idx, subseq_idx); in __mark_chain_precision()
4739 * requested precise registers are R1-R5 in __mark_chain_precision()
4742 if (st->curframe == 0 && in __mark_chain_precision()
4743 st->frame[0]->subprogno > 0 && in __mark_chain_precision()
4744 st->frame[0]->callsite == BPF_MAIN_FUNC && in __mark_chain_precision()
4749 reg = &st->frame[0]->regs[i]; in __mark_chain_precision()
4751 if (reg->type == SCALAR_VALUE) { in __mark_chain_precision()
4752 reg->precise = true; in __mark_chain_precision()
4760 st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); in __mark_chain_precision()
4761 return -EFAULT; in __mark_chain_precision()
4772 if (err == -ENOTSUPP) { in __mark_chain_precision()
4787 if (i == -ENOENT) in __mark_chain_precision()
4789 if (i >= env->prog->len) { in __mark_chain_precision()
4797 return -EFAULT; in __mark_chain_precision()
4800 st = st->parent; in __mark_chain_precision()
4804 for (fr = bt->frame; fr >= 0; fr--) { in __mark_chain_precision()
4805 func = st->frame[fr]; in __mark_chain_precision()
4808 reg = &func->regs[i]; in __mark_chain_precision()
4809 if (reg->type != SCALAR_VALUE) { in __mark_chain_precision()
4813 if (reg->precise) { in __mark_chain_precision()
4816 reg->precise = true; in __mark_chain_precision()
4823 if (verifier_bug_if(i >= func->allocated_stack / BPF_REG_SIZE, in __mark_chain_precision()
4825 i, func->allocated_stack / BPF_REG_SIZE)) in __mark_chain_precision()
4826 return -EFAULT; in __mark_chain_precision()
4828 if (!is_spilled_scalar_reg(&func->stack[i])) { in __mark_chain_precision()
4832 reg = &func->stack[i].spilled_ptr; in __mark_chain_precision()
4833 if (reg->precise) { in __mark_chain_precision()
4836 reg->precise = true; in __mark_chain_precision()
4840 if (env->log.level & BPF_LOG_LEVEL2) { in __mark_chain_precision()
4841 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4844 fr, env->tmp_str_buf); in __mark_chain_precision()
4845 bpf_fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, in __mark_chain_precision()
4847 verbose(env, "stack=%s: ", env->tmp_str_buf); in __mark_chain_precision()
4856 last_idx = st->last_insn_idx; in __mark_chain_precision()
4857 first_idx = st->first_insn_idx; in __mark_chain_precision()
4861 * something (e.g., stack access through non-r10 register), so in __mark_chain_precision()
4874 return __mark_chain_precision(env, env->cur_state, regno, NULL); in mark_chain_precision()
4877 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to
4883 return __mark_chain_precision(env, starting_state, -1, NULL); in mark_chain_precision_batch()
4916 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); in register_is_null()
4922 return reg->type == SCALAR_VALUE && in is_reg_const()
4923 tnum_is_const(subreg32 ? tnum_subreg(reg->var_off) : reg->var_off); in is_reg_const()
4929 return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value; in reg_const_value()
4938 return reg->type != SCALAR_VALUE; in __is_pointer_value()
4944 if (src_reg->type != SCALAR_VALUE) in assign_scalar_id_before_mov()
4947 if (src_reg->id & BPF_ADD_CONST) { in assign_scalar_id_before_mov()
4950 * rY->id has special linked register already. in assign_scalar_id_before_mov()
4953 src_reg->id = 0; in assign_scalar_id_before_mov()
4954 src_reg->off = 0; in assign_scalar_id_before_mov()
4957 if (!src_reg->id && !tnum_is_const(src_reg->var_off)) in assign_scalar_id_before_mov()
4962 src_reg->id = ++env->id_gen; in assign_scalar_id_before_mov()
4965 /* Copy src state preserving dst->parent and dst->live fields */
4978 copy_register_state(&state->stack[spi].spilled_ptr, reg); in save_register_state()
4980 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
4981 state->stack[spi].slot_type[i - 1] = STACK_SPILL; in save_register_state()
4984 for (; i; i--) in save_register_state()
4985 mark_stack_slot_misc(env, &state->stack[spi].slot_type[i - 1]); in save_register_state()
4990 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; in is_bpf_st_mem()
4995 return fls64(reg->umax_value); in get_reg_width()
5002 struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno]; in check_fastcall_stack_contract()
5003 struct bpf_insn_aux_data *aux = env->insn_aux_data; in check_fastcall_stack_contract()
5006 if (subprog->fastcall_stack_off <= off || aux[insn_idx].fastcall_pattern) in check_fastcall_stack_contract()
5013 subprog->fastcall_stack_off = S16_MIN; in check_fastcall_stack_contract()
5017 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in check_fastcall_stack_contract()
5033 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
5034 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_fixed_off()
5036 int insn_flags = insn_stack_access_flags(state->frameno, spi); in check_stack_write_fixed_off()
5038 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, in check_stack_write_fixed_off()
5041 if (!env->allow_ptr_leaks && in check_stack_write_fixed_off()
5042 is_spilled_reg(&state->stack[spi]) && in check_stack_write_fixed_off()
5043 !is_spilled_scalar_reg(&state->stack[spi]) && in check_stack_write_fixed_off()
5046 return -EACCES; in check_stack_write_fixed_off()
5049 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_fixed_off()
5051 reg = &cur->regs[value_regno]; in check_stack_write_fixed_off()
5052 if (!env->bypass_spec_v4) { in check_stack_write_fixed_off()
5053 bool sanitize = reg && is_spillable_regtype(reg->type); in check_stack_write_fixed_off()
5056 u8 type = state->stack[spi].slot_type[i]; in check_stack_write_fixed_off()
5065 env->insn_aux_data[insn_idx].nospec_result = true; in check_stack_write_fixed_off()
5081 bpf_mark_stack_write(env, state->frameno, BIT(spi)); in check_stack_write_fixed_off()
5086 if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) { in check_stack_write_fixed_off()
5096 state->stack[spi].spilled_ptr.id = 0; in check_stack_write_fixed_off()
5098 env->bpf_capable) { in check_stack_write_fixed_off()
5099 struct bpf_reg_state *tmp_reg = &env->fake_reg[0]; in check_stack_write_fixed_off()
5102 __mark_reg_known(tmp_reg, insn->imm); in check_stack_write_fixed_off()
5103 tmp_reg->type = SCALAR_VALUE; in check_stack_write_fixed_off()
5105 } else if (reg && is_spillable_regtype(reg->type)) { in check_stack_write_fixed_off()
5110 return -EACCES; in check_stack_write_fixed_off()
5112 if (state != cur && reg->type == PTR_TO_STACK) { in check_stack_write_fixed_off()
5114 return -EINVAL; in check_stack_write_fixed_off()
5121 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_fixed_off()
5123 if (is_stack_slot_special(&state->stack[spi])) in check_stack_write_fixed_off()
5125 scrub_spilled_slot(&state->stack[spi].slot_type[i]); in check_stack_write_fixed_off()
5129 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { in check_stack_write_fixed_off()
5144 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; in check_stack_write_fixed_off()
5149 return push_jmp_history(env, env->cur_state, insn_flags, 0); in check_stack_write_fixed_off()
5159 * 'off' includes 'regno->off'.
5160 * 'value_regno' can be -1, meaning that an unknown value is being written to
5182 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_stack_write_var_off()
5189 cur = env->cur_state->frame[env->cur_state->curframe]; in check_stack_write_var_off()
5190 ptr_reg = &cur->regs[ptr_regno]; in check_stack_write_var_off()
5191 min_off = ptr_reg->smin_value + off; in check_stack_write_var_off()
5192 max_off = ptr_reg->smax_value + off + size; in check_stack_write_var_off()
5194 value_reg = &cur->regs[value_regno]; in check_stack_write_var_off()
5196 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0)) in check_stack_write_var_off()
5214 slot = -i - 1; in check_stack_write_var_off()
5216 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
5219 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { in check_stack_write_var_off()
5231 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", in check_stack_write_var_off()
5233 return -EINVAL; in check_stack_write_var_off()
5240 is_spilled_scalar_reg(&state->stack[spi])) { in check_stack_write_var_off()
5241 struct bpf_reg_state *spill_reg = &state->stack[spi].spilled_ptr; in check_stack_write_var_off()
5243 if (tnum_is_const(spill_reg->var_off) && spill_reg->var_off.value == 0) { in check_stack_write_var_off()
5250 state->stack[spi].spilled_ptr.type = NOT_INIT; in check_stack_write_var_off()
5266 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { in check_stack_write_var_off()
5267 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", in check_stack_write_var_off()
5269 return -EINVAL; in check_stack_write_var_off()
5295 struct bpf_verifier_state *vstate = env->cur_state; in mark_reg_stack_read()
5296 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_reg_stack_read()
5302 slot = -i - 1; in mark_reg_stack_read()
5305 stype = ptr_state->stack[spi].slot_type; in mark_reg_stack_read()
5310 if (zeros == max_off - min_off) { in mark_reg_stack_read()
5314 __mark_reg_const_zero(env, &state->regs[dst_regno]); in mark_reg_stack_read()
5317 mark_reg_unknown(env, state->regs, dst_regno); in mark_reg_stack_read()
5325 * 'dst_regno' can be -1, meaning that the read value is not going to a
5335 struct bpf_verifier_state *vstate = env->cur_state; in check_stack_read_fixed_off()
5336 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_stack_read_fixed_off()
5337 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
5340 int insn_flags = insn_stack_access_flags(reg_state->frameno, spi); in check_stack_read_fixed_off()
5343 stype = reg_state->stack[spi].slot_type; in check_stack_read_fixed_off()
5344 reg = &reg_state->stack[spi].spilled_ptr; in check_stack_read_fixed_off()
5347 check_fastcall_stack_contract(env, state, env->insn_idx, off); in check_stack_read_fixed_off()
5348 err = bpf_mark_stack_read(env, reg_state->frameno, env->insn_idx, BIT(spi)); in check_stack_read_fixed_off()
5352 if (is_spilled_reg(&reg_state->stack[spi])) { in check_stack_read_fixed_off()
5355 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
5359 if (reg->type != SCALAR_VALUE) { in check_stack_read_fixed_off()
5360 verbose_linfo(env, env->insn_idx, "; "); in check_stack_read_fixed_off()
5362 return -EACCES; in check_stack_read_fixed_off()
5373 s32 subreg_def = state->regs[dst_regno].subreg_def; in check_stack_read_fixed_off()
5375 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
5376 state->regs[dst_regno].subreg_def = subreg_def; in check_stack_read_fixed_off()
5382 state->regs[dst_regno].id = 0; in check_stack_read_fixed_off()
5387 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
5398 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
5402 return -EACCES; in check_stack_read_fixed_off()
5406 tnum_is_const(reg->var_off) && reg->var_off.value == 0) { in check_stack_read_fixed_off()
5407 __mark_reg_const_zero(env, &state->regs[dst_regno]); in check_stack_read_fixed_off()
5411 __mark_reg_const_zero(env, &state->regs[dst_regno]); in check_stack_read_fixed_off()
5414 mark_reg_unknown(env, state->regs, dst_regno); in check_stack_read_fixed_off()
5420 copy_register_state(&state->regs[dst_regno], reg); in check_stack_read_fixed_off()
5425 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { in check_stack_read_fixed_off()
5426 /* If dst_regno==-1, the caller is asking us whether in check_stack_read_fixed_off()
5434 return -EACCES; in check_stack_read_fixed_off()
5438 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
5443 if (type == STACK_INVALID && env->allow_uninit_stack) in check_stack_read_fixed_off()
5447 return -EACCES; in check_stack_read_fixed_off()
5454 return push_jmp_history(env, env->cur_state, insn_flags, 0); in check_stack_read_fixed_off()
5503 min_off = reg->smin_value + off; in check_stack_read_var_off()
5504 max_off = reg->smax_value + off; in check_stack_read_var_off()
5506 check_fastcall_stack_contract(env, ptr_state, env->insn_idx, min_off); in check_stack_read_var_off()
5517 * can be -1, meaning that the read value is not going to a register.
5527 bool var_off = !tnum_is_const(reg->var_off); in check_stack_read()
5536 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_read()
5539 return -EACCES; in check_stack_read()
5551 off += reg->var_off.value; in check_stack_read()
5570 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
5572 * be -1, meaning that we're not writing from a register.
5584 if (tnum_is_const(reg->var_off)) { in check_stack_write()
5585 off += reg->var_off.value; in check_stack_write()
5608 map->value_size, off, size); in check_map_access_type()
5609 return -EACCES; in check_map_access_type()
5614 map->value_size, off, size); in check_map_access_type()
5615 return -EACCES; in check_map_access_type()
5633 switch (reg->type) { in __check_mem_access()
5646 off, size, regno, reg->id, off, mem_size); in __check_mem_access()
5654 return -EACCES; in __check_mem_access()
5662 struct bpf_verifier_state *vstate = env->cur_state; in check_mem_region_access()
5663 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_mem_region_access()
5664 struct bpf_reg_state *reg = &state->regs[regno]; in check_mem_region_access()
5677 if (reg->smin_value < 0 && in check_mem_region_access()
5678 (reg->smin_value == S64_MIN || in check_mem_region_access()
5679 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || in check_mem_region_access()
5680 reg->smin_value + off < 0)) { in check_mem_region_access()
5683 return -EACCES; in check_mem_region_access()
5685 err = __check_mem_access(env, regno, reg->smin_value + off, size, in check_mem_region_access()
5695 * If reg->umax_value + off could overflow, treat that as unbounded too. in check_mem_region_access()
5697 if (reg->umax_value >= BPF_MAX_VAR_OFF) { in check_mem_region_access()
5700 return -EACCES; in check_mem_region_access()
5702 err = __check_mem_access(env, regno, reg->umax_value + off, size, in check_mem_region_access()
5717 /* Access to this pointer-typed register or passing it to a helper in __check_ptr_off_reg()
5721 if (reg->off < 0) { in __check_ptr_off_reg()
5723 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5724 return -EACCES; in __check_ptr_off_reg()
5727 if (!fixed_off_ok && reg->off) { in __check_ptr_off_reg()
5729 reg_type_str(env, reg->type), regno, reg->off); in __check_ptr_off_reg()
5730 return -EACCES; in __check_ptr_off_reg()
5733 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_ptr_off_reg()
5736 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_ptr_off_reg()
5738 reg_type_str(env, reg->type), tn_buf); in __check_ptr_off_reg()
5739 return -EACCES; in __check_ptr_off_reg()
5755 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); in map_kptr_match_type()
5759 if (btf_is_kernel(reg->btf)) { in map_kptr_match_type()
5763 if (kptr_field->type == BPF_KPTR_UNREF) in map_kptr_match_type()
5767 if (kptr_field->type == BPF_KPTR_PERCPU) in map_kptr_match_type()
5771 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) in map_kptr_match_type()
5774 /* We need to verify reg->type and reg->btf, before accessing reg->btf */ in map_kptr_match_type()
5775 reg_name = btf_type_name(reg->btf, reg->btf_id); in map_kptr_match_type()
5781 * reg->off and reg->ref_obj_id are not needed here. in map_kptr_match_type()
5784 return -EACCES; in map_kptr_match_type()
5787 * we also need to take into account the reg->off. in map_kptr_match_type()
5798 * val->foo = v; // reg->off is zero, btf and btf_id match type in map_kptr_match_type()
5799 * val->bar = &v->br; // reg->off is still zero, but we need to retry with in map_kptr_match_type()
5801 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked in map_kptr_match_type()
5804 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off in map_kptr_match_type()
5810 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in map_kptr_match_type()
5811 kptr_field->kptr.btf, kptr_field->kptr.btf_id, in map_kptr_match_type()
5812 kptr_field->type != BPF_KPTR_UNREF)) in map_kptr_match_type()
5817 reg_type_str(env, reg->type), reg_name); in map_kptr_match_type()
5819 if (kptr_field->type == BPF_KPTR_UNREF) in map_kptr_match_type()
5824 return -EINVAL; in map_kptr_match_type()
5829 return env->prog->sleepable || in in_sleepable()
5830 (env->cur_state && env->cur_state->in_sleepable); in in_sleepable()
5833 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
5838 return env->cur_state->active_rcu_lock || in in_rcu_cs()
5839 env->cur_state->active_locks || in in_rcu_cs()
5871 if (btf_is_kernel(kptr_field->kptr.btf)) in kptr_pointee_btf_record()
5874 meta = btf_find_struct_meta(kptr_field->kptr.btf, in kptr_pointee_btf_record()
5875 kptr_field->kptr.btf_id); in kptr_pointee_btf_record()
5877 return meta ? meta->record : NULL; in kptr_pointee_btf_record()
5882 const struct btf_field_kptr *kptr = &field->kptr; in rcu_safe_kptr()
5884 return field->type == BPF_KPTR_PERCPU || in rcu_safe_kptr()
5885 (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id)); in rcu_safe_kptr()
5896 if (kptr_field->type == BPF_KPTR_PERCPU) in btf_ld_kptr_type()
5898 else if (!btf_is_kernel(kptr_field->kptr.btf)) in btf_ld_kptr_type()
5917 t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); in mark_uptr_ld_reg()
5920 reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; in mark_uptr_ld_reg()
5921 reg->mem_size = t->size; in mark_uptr_ld_reg()
5922 reg->id = ++env->id_gen; in mark_uptr_ld_reg()
5931 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; in check_map_kptr_access()
5932 int class = BPF_CLASS(insn->code); in check_map_kptr_access()
5937 * - Reject cases where variable offset may touch kptr in check_map_kptr_access()
5938 * - size of access (must be BPF_DW) in check_map_kptr_access()
5939 * - tnum_is_const(reg->var_off) in check_map_kptr_access()
5940 * - kptr_field->offset == off + reg->var_off.value in check_map_kptr_access()
5943 if (BPF_MODE(insn->code) != BPF_MEM) { in check_map_kptr_access()
5945 return -EACCES; in check_map_kptr_access()
5952 (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) { in check_map_kptr_access()
5954 return -EACCES; in check_map_kptr_access()
5956 if (class != BPF_LDX && kptr_field->type == BPF_UPTR) { in check_map_kptr_access()
5958 return -EACCES; in check_map_kptr_access()
5962 if (kptr_field->type == BPF_UPTR) in check_map_kptr_access()
5969 kptr_field->kptr.btf, kptr_field->kptr.btf_id, in check_map_kptr_access()
5977 return -EACCES; in check_map_kptr_access()
5979 if (insn->imm) { in check_map_kptr_access()
5981 kptr_field->offset); in check_map_kptr_access()
5982 return -EACCES; in check_map_kptr_access()
5986 return -EACCES; in check_map_kptr_access()
5996 struct bpf_verifier_state *vstate = env->cur_state; in check_map_access()
5997 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in check_map_access()
5998 struct bpf_reg_state *reg = &state->regs[regno]; in check_map_access()
5999 struct bpf_map *map = reg->map_ptr; in check_map_access()
6003 err = check_mem_region_access(env, regno, off, size, map->value_size, in check_map_access()
6008 if (IS_ERR_OR_NULL(map->record)) in check_map_access()
6010 rec = map->record; in check_map_access()
6011 for (i = 0; i < rec->cnt; i++) { in check_map_access()
6012 struct btf_field *field = &rec->fields[i]; in check_map_access()
6013 u32 p = field->offset; in check_map_access()
6019 if (reg->smin_value + off < p + field->size && in check_map_access()
6020 p < reg->umax_value + off + size) { in check_map_access()
6021 switch (field->type) { in check_map_access()
6028 btf_field_type_name(field->type)); in check_map_access()
6029 return -EACCES; in check_map_access()
6031 if (!tnum_is_const(reg->var_off)) { in check_map_access()
6033 btf_field_type_name(field->type)); in check_map_access()
6034 return -EACCES; in check_map_access()
6036 if (p != off + reg->var_off.value) { in check_map_access()
6038 btf_field_type_name(field->type), in check_map_access()
6039 p, off + reg->var_off.value); in check_map_access()
6040 return -EACCES; in check_map_access()
6044 btf_field_type_name(field->type)); in check_map_access()
6045 return -EACCES; in check_map_access()
6050 btf_field_type_name(field->type)); in check_map_access()
6051 return -EACCES; in check_map_access()
6064 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in may_access_direct_pkt_data()
6086 return meta->pkt_access; in may_access_direct_pkt_data()
6088 env->seen_direct_write = true; in may_access_direct_pkt_data()
6093 env->seen_direct_write = true; in may_access_direct_pkt_data()
6110 * reg->range we have comes after that. We are only checking the fixed in check_packet_access()
6117 if (reg->smin_value < 0) { in check_packet_access()
6120 return -EACCES; in check_packet_access()
6123 err = reg->range < 0 ? -EINVAL : in check_packet_access()
6124 __check_mem_access(env, regno, off, size, reg->range, in check_packet_access()
6131 /* __check_mem_access has made sure "off + size - 1" is within u16. in check_packet_access()
6132 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, in check_packet_access()
6135 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. in check_packet_access()
6137 env->prog->aux->max_pkt_offset = in check_packet_access()
6138 max_t(u32, env->prog->aux->max_pkt_offset, in check_packet_access()
6139 off + reg->umax_value + size - 1); in check_packet_access()
6148 if (env->ops->is_valid_access && in check_ctx_access()
6149 env->ops->is_valid_access(off, size, t, env->prog, info)) { in check_ctx_access()
6157 if (base_type(info->reg_type) == PTR_TO_BTF_ID) { in check_ctx_access()
6158 if (info->ref_obj_id && in check_ctx_access()
6159 !find_reference_state(env->cur_state, info->ref_obj_id)) { in check_ctx_access()
6162 return -EACCES; in check_ctx_access()
6165 env->insn_aux_data[insn_idx].ctx_field_size = info->ctx_field_size; in check_ctx_access()
6168 if (env->prog->aux->max_ctx_offset < off + size) in check_ctx_access()
6169 env->prog->aux->max_ctx_offset = off + size; in check_ctx_access()
6174 return -EACCES; in check_ctx_access()
6184 return -EACCES; in check_flow_keys_access()
6198 if (reg->smin_value < 0) { in check_sock_access()
6201 return -EACCES; in check_sock_access()
6204 switch (reg->type) { in check_sock_access()
6223 env->insn_aux_data[insn_idx].ctx_field_size = in check_sock_access()
6229 regno, reg_type_str(env, reg->type), off, size); in check_sock_access()
6231 return -EACCES; in check_sock_access()
6236 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); in is_pointer_value()
6243 return reg->type == PTR_TO_CTX; in is_ctx_reg()
6250 return type_is_sk_pointer(reg->type); in is_sk_reg()
6257 return type_is_pkt_pointer(reg->type); in is_pkt_reg()
6265 return reg->type == PTR_TO_FLOW_KEYS; in is_flow_key_reg()
6272 return reg->type == PTR_TO_ARENA; in is_arena_reg()
6307 if (reg->ref_obj_id) in is_trusted_reg()
6311 if (reg2btf_ids[base_type(reg->type)] && in is_trusted_reg()
6312 !bpf_type_has_unsafe_modifiers(reg->type)) in is_trusted_reg()
6317 * other type modifiers may be safe, but we elect to take an opt-in in is_trusted_reg()
6324 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS && in is_trusted_reg()
6325 !bpf_type_has_unsafe_modifiers(reg->type); in is_trusted_reg()
6330 return reg->type & MEM_RCU; in is_rcu_reg()
6359 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); in check_pkt_ptr_alignment()
6363 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_pkt_ptr_alignment()
6366 ip_align, tn_buf, reg->off, off, size); in check_pkt_ptr_alignment()
6367 return -EACCES; in check_pkt_ptr_alignment()
6384 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); in check_generic_ptr_alignment()
6388 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_generic_ptr_alignment()
6390 pointer_desc, tn_buf, reg->off, off, size); in check_generic_ptr_alignment()
6391 return -EACCES; in check_generic_ptr_alignment()
6401 bool strict = env->strict_alignment || strict_alignment_once; in check_ptr_alignment()
6404 switch (reg->type) { in check_ptr_alignment()
6461 switch (prog->type) { in bpf_enable_priv_stack()
6470 if (prog->aux->priv_stack_requested || bpf_prog_check_recur(prog)) in bpf_enable_priv_stack()
6482 if (env->prog->jit_requested) in round_up_stack_depth()
6485 /* round up to 32-bytes, since this is granularity in round_up_stack_depth()
6500 struct bpf_subprog_info *subprog = env->subprog_info; in check_max_stack_depth_subprog()
6501 struct bpf_insn *insn = env->prog->insnsi; in check_max_stack_depth_subprog()
6519 * func1 -> sub rsp, 128 in check_max_stack_depth_subprog()
6520 * subfunc1 -> sub rsp, 256 in check_max_stack_depth_subprog()
6521 * tailcall1 -> add rsp, 256 in check_max_stack_depth_subprog()
6522 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) in check_max_stack_depth_subprog()
6523 * subfunc2 -> sub rsp, 64 in check_max_stack_depth_subprog()
6524 * subfunc22 -> sub rsp, 128 in check_max_stack_depth_subprog()
6525 * tailcall2 -> add rsp, 128 in check_max_stack_depth_subprog()
6526 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) in check_max_stack_depth_subprog()
6535 return -EACCES; in check_max_stack_depth_subprog()
6553 return -EACCES; in check_max_stack_depth_subprog()
6560 return -EACCES; in check_max_stack_depth_subprog()
6586 return -EINVAL; in check_max_stack_depth_subprog()
6599 return -EFAULT; in check_max_stack_depth_subprog()
6603 return -EFAULT; in check_max_stack_depth_subprog()
6610 return -EINVAL; in check_max_stack_depth_subprog()
6625 return -E2BIG; in check_max_stack_depth_subprog()
6638 return -EINVAL; in check_max_stack_depth_subprog()
6643 env->prog->aux->tail_call_reachable = true; in check_max_stack_depth_subprog()
6651 depth -= round_up_stack_depth(env, subprog[idx].stack_depth); in check_max_stack_depth_subprog()
6652 frame--; in check_max_stack_depth_subprog()
6661 struct bpf_subprog_info *si = env->subprog_info; in check_max_stack_depth()
6665 for (int i = 0; i < env->subprog_cnt; i++) { in check_max_stack_depth()
6673 priv_stack_mode = bpf_enable_priv_stack(env->prog); in check_max_stack_depth()
6683 for (int i = env->subprog_cnt - 1; i >= 0; i--) { in check_max_stack_depth()
6692 for (int i = 0; i < env->subprog_cnt; i++) { in check_max_stack_depth()
6694 env->prog->aux->jits_use_priv_stack = true; in check_max_stack_depth()
6706 int start = idx + insn->imm + 1, subprog; in get_callee_stack_depth()
6710 return -EFAULT; in get_callee_stack_depth()
6711 return env->subprog_info[subprog].stack_depth; in get_callee_stack_depth()
6724 return -EACCES; in __check_buffer_access()
6726 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in __check_buffer_access()
6729 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in __check_buffer_access()
6733 return -EACCES; in __check_buffer_access()
6749 if (off + size > env->prog->aux->max_tp_access) in check_tp_buffer_access()
6750 env->prog->aux->max_tp_access = off + size; in check_tp_buffer_access()
6761 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; in check_buffer_access()
6774 /* BPF architecture zero extends alu32 ops into 64-bit registesr */
6777 reg->var_off = tnum_subreg(reg->var_off); in zext_32_to_64()
6789 reg->var_off = tnum_cast(reg->var_off, size); in coerce_reg_to_size()
6792 mask = ((u64)1 << (size * 8)) - 1; in coerce_reg_to_size()
6793 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { in coerce_reg_to_size()
6794 reg->umin_value &= mask; in coerce_reg_to_size()
6795 reg->umax_value &= mask; in coerce_reg_to_size()
6797 reg->umin_value = 0; in coerce_reg_to_size()
6798 reg->umax_value = mask; in coerce_reg_to_size()
6800 reg->smin_value = reg->umin_value; in coerce_reg_to_size()
6801 reg->smax_value = reg->umax_value; in coerce_reg_to_size()
6804 * values are also truncated so we push 64-bit bounds into in coerce_reg_to_size()
6805 * 32-bit bounds. Above were truncated < 32-bits already. in coerce_reg_to_size()
6816 reg->smin_value = reg->s32_min_value = S8_MIN; in set_sext64_default_val()
6817 reg->smax_value = reg->s32_max_value = S8_MAX; in set_sext64_default_val()
6819 reg->smin_value = reg->s32_min_value = S16_MIN; in set_sext64_default_val()
6820 reg->smax_value = reg->s32_max_value = S16_MAX; in set_sext64_default_val()
6823 reg->smin_value = reg->s32_min_value = S32_MIN; in set_sext64_default_val()
6824 reg->smax_value = reg->s32_max_value = S32_MAX; in set_sext64_default_val()
6826 reg->umin_value = reg->u32_min_value = 0; in set_sext64_default_val()
6827 reg->umax_value = U64_MAX; in set_sext64_default_val()
6828 reg->u32_max_value = U32_MAX; in set_sext64_default_val()
6829 reg->var_off = tnum_unknown; in set_sext64_default_val()
6838 if (tnum_is_const(reg->var_off)) { in coerce_reg_to_size_sx()
6839 u64_cval = reg->var_off.value; in coerce_reg_to_size_sx()
6841 reg->var_off = tnum_const((s8)u64_cval); in coerce_reg_to_size_sx()
6843 reg->var_off = tnum_const((s16)u64_cval); in coerce_reg_to_size_sx()
6846 reg->var_off = tnum_const((s32)u64_cval); in coerce_reg_to_size_sx()
6848 u64_cval = reg->var_off.value; in coerce_reg_to_size_sx()
6849 reg->smax_value = reg->smin_value = u64_cval; in coerce_reg_to_size_sx()
6850 reg->umax_value = reg->umin_value = u64_cval; in coerce_reg_to_size_sx()
6851 reg->s32_max_value = reg->s32_min_value = u64_cval; in coerce_reg_to_size_sx()
6852 reg->u32_max_value = reg->u32_min_value = u64_cval; in coerce_reg_to_size_sx()
6856 top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits; in coerce_reg_to_size_sx()
6857 top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits; in coerce_reg_to_size_sx()
6864 init_s64_max = (s8)reg->smax_value; in coerce_reg_to_size_sx()
6865 init_s64_min = (s8)reg->smin_value; in coerce_reg_to_size_sx()
6867 init_s64_max = (s16)reg->smax_value; in coerce_reg_to_size_sx()
6868 init_s64_min = (s16)reg->smin_value; in coerce_reg_to_size_sx()
6870 init_s64_max = (s32)reg->smax_value; in coerce_reg_to_size_sx()
6871 init_s64_min = (s32)reg->smin_value; in coerce_reg_to_size_sx()
6879 reg->s32_min_value = reg->smin_value = s64_min; in coerce_reg_to_size_sx()
6880 reg->s32_max_value = reg->smax_value = s64_max; in coerce_reg_to_size_sx()
6881 reg->u32_min_value = reg->umin_value = s64_min; in coerce_reg_to_size_sx()
6882 reg->u32_max_value = reg->umax_value = s64_max; in coerce_reg_to_size_sx()
6883 reg->var_off = tnum_range(s64_min, s64_max); in coerce_reg_to_size_sx()
6894 reg->s32_min_value = S8_MIN; in set_sext32_default_val()
6895 reg->s32_max_value = S8_MAX; in set_sext32_default_val()
6898 reg->s32_min_value = S16_MIN; in set_sext32_default_val()
6899 reg->s32_max_value = S16_MAX; in set_sext32_default_val()
6901 reg->u32_min_value = 0; in set_sext32_default_val()
6902 reg->u32_max_value = U32_MAX; in set_sext32_default_val()
6903 reg->var_off = tnum_subreg(tnum_unknown); in set_sext32_default_val()
6912 if (tnum_is_const(reg->var_off)) { in coerce_subreg_to_size_sx()
6913 u32_val = reg->var_off.value; in coerce_subreg_to_size_sx()
6915 reg->var_off = tnum_const((s8)u32_val); in coerce_subreg_to_size_sx()
6917 reg->var_off = tnum_const((s16)u32_val); in coerce_subreg_to_size_sx()
6919 u32_val = reg->var_off.value; in coerce_subreg_to_size_sx()
6920 reg->s32_min_value = reg->s32_max_value = u32_val; in coerce_subreg_to_size_sx()
6921 reg->u32_min_value = reg->u32_max_value = u32_val; in coerce_subreg_to_size_sx()
6925 top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits; in coerce_subreg_to_size_sx()
6926 top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits; in coerce_subreg_to_size_sx()
6933 init_s32_max = (s8)reg->s32_max_value; in coerce_subreg_to_size_sx()
6934 init_s32_min = (s8)reg->s32_min_value; in coerce_subreg_to_size_sx()
6937 init_s32_max = (s16)reg->s32_max_value; in coerce_subreg_to_size_sx()
6938 init_s32_min = (s16)reg->s32_min_value; in coerce_subreg_to_size_sx()
6944 reg->s32_min_value = s32_min; in coerce_subreg_to_size_sx()
6945 reg->s32_max_value = s32_max; in coerce_subreg_to_size_sx()
6946 reg->u32_min_value = (u32)s32_min; in coerce_subreg_to_size_sx()
6947 reg->u32_max_value = (u32)s32_max; in coerce_subreg_to_size_sx()
6948 reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max)); in coerce_subreg_to_size_sx()
6958 /* A map is considered read-only if the following condition are true: in bpf_map_is_rdonly()
6971 return (map->map_flags & BPF_F_RDONLY_PROG) && in bpf_map_is_rdonly()
6972 READ_ONCE(map->frozen) && in bpf_map_is_rdonly()
6983 err = map->ops->map_direct_value_addr(map, &addr, off); in bpf_map_direct_read()
7002 return -EINVAL; in bpf_map_direct_read()
7027 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */ in BTF_TYPE_SAFE_RCU()
7044 /* skb->sk, req->sk are not RCU protected, but we mark them as such
7090 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); in type_is_rcu()
7101 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); in type_is_rcu_or_null()
7113 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); in type_is_trusted()
7123 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, in type_is_trusted_or_null()
7134 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); in check_ptr_to_btf_access()
7135 const char *tname = btf_name_by_offset(reg->btf, t->name_off); in check_ptr_to_btf_access()
7141 if (!env->allow_ptr_leaks) { in check_ptr_to_btf_access()
7145 return -EPERM; in check_ptr_to_btf_access()
7147 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
7149 "Cannot access kernel 'struct %s' from non-GPL compatible program\n", in check_ptr_to_btf_access()
7151 return -EINVAL; in check_ptr_to_btf_access()
7157 return -EACCES; in check_ptr_to_btf_access()
7159 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { in check_ptr_to_btf_access()
7162 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_ptr_to_btf_access()
7166 return -EACCES; in check_ptr_to_btf_access()
7169 if (reg->type & MEM_USER) { in check_ptr_to_btf_access()
7173 return -EACCES; in check_ptr_to_btf_access()
7176 if (reg->type & MEM_PERCPU) { in check_ptr_to_btf_access()
7180 return -EACCES; in check_ptr_to_btf_access()
7183 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { in check_ptr_to_btf_access()
7184 if (!btf_is_kernel(reg->btf)) { in check_ptr_to_btf_access()
7185 verifier_bug(env, "reg->btf must be kernel btf"); in check_ptr_to_btf_access()
7186 return -EFAULT; in check_ptr_to_btf_access()
7188 ret = env->ops->btf_struct_access(&env->log, reg, off, size); in check_ptr_to_btf_access()
7194 if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) { in check_ptr_to_btf_access()
7196 return -EACCES; in check_ptr_to_btf_access()
7199 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && in check_ptr_to_btf_access()
7200 !(reg->type & MEM_RCU) && !reg->ref_obj_id) { in check_ptr_to_btf_access()
7201 verifier_bug(env, "ref_obj_id for allocated object must be non-zero"); in check_ptr_to_btf_access()
7202 return -EFAULT; in check_ptr_to_btf_access()
7205 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); in check_ptr_to_btf_access()
7214 } else if (type_flag(reg->type) & PTR_UNTRUSTED) { in check_ptr_to_btf_access()
7225 * 'cgroups' pointer is untrusted if task->cgroups dereference in check_ptr_to_btf_access()
7227 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). in check_ptr_to_btf_access()
7230 * A regular RCU-protected pointer with __rcu tag can also be deemed in check_ptr_to_btf_access()
7237 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { in check_ptr_to_btf_access()
7251 /* keep as-is */ in check_ptr_to_btf_access()
7272 ret = mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); in check_ptr_to_btf_access()
7287 struct bpf_map *map = reg->map_ptr; in check_ptr_to_map_access()
7297 return -ENOTSUPP; in check_ptr_to_map_access()
7300 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { in check_ptr_to_map_access()
7302 map->map_type); in check_ptr_to_map_access()
7303 return -ENOTSUPP; in check_ptr_to_map_access()
7306 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); in check_ptr_to_map_access()
7307 tname = btf_name_by_offset(btf_vmlinux, t->name_off); in check_ptr_to_map_access()
7309 if (!env->allow_ptr_leaks) { in check_ptr_to_map_access()
7313 return -EPERM; in check_ptr_to_map_access()
7319 return -EACCES; in check_ptr_to_map_access()
7324 return -EACCES; in check_ptr_to_map_access()
7330 btf_vmlinux, *map->ops->map_btf_id, 0); in check_ptr_to_map_access()
7333 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); in check_ptr_to_map_access()
7347 * maximum valid offset is -1.
7349 * The minimum valid offset is -MAX_BPF_STACK for writes, and
7350 * -state->allocated_stack for reads.
7359 if (t == BPF_WRITE || env->allow_uninit_stack) in check_stack_slot_within_bounds()
7360 min_valid_off = -MAX_BPF_STACK; in check_stack_slot_within_bounds()
7362 min_valid_off = -state->allocated_stack; in check_stack_slot_within_bounds()
7364 if (off < min_valid_off || off > -1) in check_stack_slot_within_bounds()
7365 return -EACCES; in check_stack_slot_within_bounds()
7372 * 'off' includes `regno->offset`, but not its dynamic part (if any).
7391 if (tnum_is_const(reg->var_off)) { in check_stack_access_within_bounds()
7392 min_off = (s64)reg->var_off.value + off; in check_stack_access_within_bounds()
7395 if (reg->smax_value >= BPF_MAX_VAR_OFF || in check_stack_access_within_bounds()
7396 reg->smin_value <= -BPF_MAX_VAR_OFF) { in check_stack_access_within_bounds()
7397 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", in check_stack_access_within_bounds()
7399 return -EACCES; in check_stack_access_within_bounds()
7401 min_off = reg->smin_value + off; in check_stack_access_within_bounds()
7402 max_off = reg->smax_value + off + access_size; in check_stack_access_within_bounds()
7407 err = -EINVAL; /* out of stack access into non-negative offsets */ in check_stack_access_within_bounds()
7412 err = -EFAULT; /* invalid negative access size; integer overflow? */ in check_stack_access_within_bounds()
7415 if (tnum_is_const(reg->var_off)) { in check_stack_access_within_bounds()
7421 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access_within_bounds()
7422 verbose(env, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n", in check_stack_access_within_bounds()
7429 * size is -min_off, not -min_off+1. in check_stack_access_within_bounds()
7431 return grow_stack_state(env, state, -min_off /* size */); in check_stack_access_within_bounds()
7437 if (prog->type == BPF_PROG_TYPE_LSM && in get_func_retval_range()
7438 prog->expected_attach_type == BPF_LSM_MAC && in get_func_retval_range()
7448 * if t==write && value_regno==-1, some unknown value is stored into memory
7449 * if t==read && value_regno==-1, don't care what we read from memory
7463 /* alignment checks will add in reg->off themselves */ in check_mem_access()
7468 /* for access checks, reg->off is just part of off */ in check_mem_access()
7469 off += reg->off; in check_mem_access()
7471 if (reg->type == PTR_TO_MAP_KEY) { in check_mem_access()
7474 return -EACCES; in check_mem_access()
7478 reg->map_ptr->key_size, false); in check_mem_access()
7483 } else if (reg->type == PTR_TO_MAP_VALUE) { in check_mem_access()
7489 return -EACCES; in check_mem_access()
7497 if (tnum_is_const(reg->var_off)) in check_mem_access()
7498 kptr_field = btf_record_find(reg->map_ptr->record, in check_mem_access()
7499 off + reg->var_off.value, BPF_KPTR | BPF_UPTR); in check_mem_access()
7503 struct bpf_map *map = reg->map_ptr; in check_mem_access()
7505 /* if map is read-only, track its contents as scalars */ in check_mem_access()
7506 if (tnum_is_const(reg->var_off) && in check_mem_access()
7508 map->ops->map_direct_value_addr) { in check_mem_access()
7509 int map_off = off + reg->var_off.value; in check_mem_access()
7523 } else if (base_type(reg->type) == PTR_TO_MEM) { in check_mem_access()
7524 bool rdonly_mem = type_is_rdonly_mem(reg->type); in check_mem_access()
7525 bool rdonly_untrusted = rdonly_mem && (reg->type & PTR_UNTRUSTED); in check_mem_access()
7527 if (type_may_be_null(reg->type)) { in check_mem_access()
7529 reg_type_str(env, reg->type)); in check_mem_access()
7530 return -EACCES; in check_mem_access()
7535 regno, reg_type_str(env, reg->type)); in check_mem_access()
7536 return -EACCES; in check_mem_access()
7542 return -EACCES; in check_mem_access()
7551 reg->mem_size, false); in check_mem_access()
7554 } else if (reg->type == PTR_TO_CTX) { in check_mem_access()
7559 .log = &env->log, in check_mem_access()
7565 return -EACCES; in check_mem_access()
7581 if (info.is_retval && get_func_retval_range(env->prog, &range)) { in check_mem_access()
7593 regs[value_regno].id = ++env->id_gen; in check_mem_access()
7597 * a sub-register. in check_mem_access()
7609 } else if (reg->type == PTR_TO_STACK) { in check_mem_access()
7624 return -EACCES; in check_mem_access()
7630 return -EACCES; in check_mem_access()
7635 } else if (reg->type == PTR_TO_FLOW_KEYS) { in check_mem_access()
7640 return -EACCES; in check_mem_access()
7646 } else if (type_is_sk_pointer(reg->type)) { in check_mem_access()
7649 regno, reg_type_str(env, reg->type)); in check_mem_access()
7650 return -EACCES; in check_mem_access()
7655 } else if (reg->type == PTR_TO_TP_BUFFER) { in check_mem_access()
7659 } else if (base_type(reg->type) == PTR_TO_BTF_ID && in check_mem_access()
7660 !type_may_be_null(reg->type)) { in check_mem_access()
7663 } else if (reg->type == CONST_PTR_TO_MAP) { in check_mem_access()
7666 } else if (base_type(reg->type) == PTR_TO_BUF) { in check_mem_access()
7667 bool rdonly_mem = type_is_rdonly_mem(reg->type); in check_mem_access()
7673 regno, reg_type_str(env, reg->type)); in check_mem_access()
7674 return -EACCES; in check_mem_access()
7676 max_access = &env->prog->aux->max_rdonly_access; in check_mem_access()
7678 max_access = &env->prog->aux->max_rdwr_access; in check_mem_access()
7686 } else if (reg->type == PTR_TO_ARENA) { in check_mem_access()
7691 reg_type_str(env, reg->type)); in check_mem_access()
7692 return -EACCES; in check_mem_access()
7698 /* b/h/w load zero-extends, mark upper bits as known 0 */ in check_mem_access()
7718 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_load_mem()
7723 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_load_mem()
7727 src_reg_type = regs[insn->src_reg].type; in check_load_mem()
7732 err = check_mem_access(env, env->insn_idx, insn->src_reg, insn->off, in check_load_mem()
7733 BPF_SIZE(insn->code), BPF_READ, insn->dst_reg, in check_load_mem()
7737 err = err ?: reg_bounds_sanity_check(env, &regs[insn->dst_reg], ctx); in check_load_mem()
7750 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_store_reg()
7755 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_store_reg()
7759 dst_reg_type = regs[insn->dst_reg].type; in check_store_reg()
7762 err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, in check_store_reg()
7763 BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg, in check_store_reg()
7776 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { in check_atomic_rmw()
7778 return -EINVAL; in check_atomic_rmw()
7782 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_atomic_rmw()
7787 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_atomic_rmw()
7791 if (insn->imm == BPF_CMPXCHG) { in check_atomic_rmw()
7801 return -EACCES; in check_atomic_rmw()
7805 if (is_pointer_value(env, insn->src_reg)) { in check_atomic_rmw()
7806 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); in check_atomic_rmw()
7807 return -EACCES; in check_atomic_rmw()
7810 if (!atomic_ptr_type_ok(env, insn->dst_reg, insn)) { in check_atomic_rmw()
7812 insn->dst_reg, in check_atomic_rmw()
7813 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); in check_atomic_rmw()
7814 return -EACCES; in check_atomic_rmw()
7817 if (insn->imm & BPF_FETCH) { in check_atomic_rmw()
7818 if (insn->imm == BPF_CMPXCHG) in check_atomic_rmw()
7821 load_reg = insn->src_reg; in check_atomic_rmw()
7831 load_reg = -1; in check_atomic_rmw()
7837 err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, in check_atomic_rmw()
7838 BPF_SIZE(insn->code), BPF_READ, -1, true, false); in check_atomic_rmw()
7840 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in check_atomic_rmw()
7841 insn->off, BPF_SIZE(insn->code), in check_atomic_rmw()
7846 if (is_arena_reg(env, insn->dst_reg)) { in check_atomic_rmw()
7852 err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, in check_atomic_rmw()
7853 BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); in check_atomic_rmw()
7868 if (!atomic_ptr_type_ok(env, insn->src_reg, insn)) { in check_atomic_load()
7870 insn->src_reg, in check_atomic_load()
7871 reg_type_str(env, reg_state(env, insn->src_reg)->type)); in check_atomic_load()
7872 return -EACCES; in check_atomic_load()
7887 if (!atomic_ptr_type_ok(env, insn->dst_reg, insn)) { in check_atomic_store()
7889 insn->dst_reg, in check_atomic_store()
7890 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); in check_atomic_store()
7891 return -EACCES; in check_atomic_store()
7899 switch (insn->imm) { in check_atomic()
7912 if (BPF_SIZE(insn->code) == BPF_DW && BITS_PER_LONG != 64) { in check_atomic()
7914 "64-bit load-acquires are only supported on 64-bit arches\n"); in check_atomic()
7915 return -EOPNOTSUPP; in check_atomic()
7919 if (BPF_SIZE(insn->code) == BPF_DW && BITS_PER_LONG != 64) { in check_atomic()
7921 "64-bit store-releases are only supported on 64-bit arches\n"); in check_atomic()
7922 return -EOPNOTSUPP; in check_atomic()
7927 insn->imm); in check_atomic()
7928 return -EINVAL; in check_atomic()
7937 * 'off' includes 'regno->off', but not its dynamic part (if any).
7951 * read-only. in check_stack_range_initialized()
7956 verbose(env, "invalid zero-sized read\n"); in check_stack_range_initialized()
7957 return -EACCES; in check_stack_range_initialized()
7968 if (tnum_is_const(reg->var_off)) { in check_stack_range_initialized()
7969 min_off = max_off = reg->var_off.value + off; in check_stack_range_initialized()
7976 if (!env->bypass_spec_v1) { in check_stack_range_initialized()
7979 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_range_initialized()
7982 return -EACCES; in check_stack_range_initialized()
7990 if (meta && meta->raw_mode) in check_stack_range_initialized()
7993 min_off = reg->smin_value + off; in check_stack_range_initialized()
7994 max_off = reg->smax_value + off; in check_stack_range_initialized()
7997 if (meta && meta->raw_mode) { in check_stack_range_initialized()
8012 int stack_off = -i - 1; in check_stack_range_initialized()
8016 if (state->allocated_stack <= stack_off) in check_stack_range_initialized()
8018 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { in check_stack_range_initialized()
8020 return -EACCES; in check_stack_range_initialized()
8023 meta->access_size = access_size; in check_stack_range_initialized()
8024 meta->regno = regno; in check_stack_range_initialized()
8031 slot = -i - 1; in check_stack_range_initialized()
8033 if (state->allocated_stack <= slot) { in check_stack_range_initialized()
8035 return -EFAULT; in check_stack_range_initialized()
8038 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
8042 (*stype == STACK_INVALID && env->allow_uninit_stack)) { in check_stack_range_initialized()
8050 if (is_spilled_reg(&state->stack[spi]) && in check_stack_range_initialized()
8051 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || in check_stack_range_initialized()
8052 env->allow_ptr_leaks)) { in check_stack_range_initialized()
8054 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); in check_stack_range_initialized()
8056 scrub_spilled_slot(&state->stack[spi].slot_type[j]); in check_stack_range_initialized()
8061 if (tnum_is_const(reg->var_off)) { in check_stack_range_initialized()
8063 regno, min_off, i - min_off, access_size); in check_stack_range_initialized()
8067 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_range_initialized()
8069 regno, tn_buf, i - min_off, access_size); in check_stack_range_initialized()
8071 return -EACCES; in check_stack_range_initialized()
8073 /* reading any byte out of 8-byte 'spill_slot' will cause in check_stack_range_initialized()
8076 err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi)); in check_stack_range_initialized()
8096 switch (base_type(reg->type)) { in check_helper_mem_access()
8099 return check_packet_access(env, regno, reg->off, access_size, in check_helper_mem_access()
8104 reg_type_str(env, reg->type)); in check_helper_mem_access()
8105 return -EACCES; in check_helper_mem_access()
8107 return check_mem_region_access(env, regno, reg->off, access_size, in check_helper_mem_access()
8108 reg->map_ptr->key_size, false); in check_helper_mem_access()
8110 if (check_map_access_type(env, regno, reg->off, access_size, access_type)) in check_helper_mem_access()
8111 return -EACCES; in check_helper_mem_access()
8112 return check_map_access(env, regno, reg->off, access_size, in check_helper_mem_access()
8115 if (type_is_rdonly_mem(reg->type)) { in check_helper_mem_access()
8118 reg_type_str(env, reg->type)); in check_helper_mem_access()
8119 return -EACCES; in check_helper_mem_access()
8122 return check_mem_region_access(env, regno, reg->off, in check_helper_mem_access()
8123 access_size, reg->mem_size, in check_helper_mem_access()
8126 if (type_is_rdonly_mem(reg->type)) { in check_helper_mem_access()
8129 reg_type_str(env, reg->type)); in check_helper_mem_access()
8130 return -EACCES; in check_helper_mem_access()
8133 max_access = &env->prog->aux->max_rdonly_access; in check_helper_mem_access()
8135 max_access = &env->prog->aux->max_rdwr_access; in check_helper_mem_access()
8137 return check_buffer_access(env, reg, regno, reg->off, in check_helper_mem_access()
8143 regno, reg->off, access_size, in check_helper_mem_access()
8146 return check_ptr_to_btf_access(env, regs, regno, reg->off, in check_helper_mem_access()
8147 access_size, BPF_READ, -1); in check_helper_mem_access()
8154 if (!env->ops->convert_ctx_access) { in check_helper_mem_access()
8155 int offset = access_size - 1; in check_helper_mem_access()
8157 /* Allow zero-byte read from PTR_TO_CTX */ in check_helper_mem_access()
8159 return zero_size_allowed ? 0 : -EACCES; in check_helper_mem_access()
8161 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, in check_helper_mem_access()
8162 access_type, -1, false, false); in check_helper_mem_access()
8167 /* Allow zero-byte read from NULL, regardless of pointer type */ in check_helper_mem_access()
8173 reg_type_str(env, reg->type)); in check_helper_mem_access()
8175 return -EACCES; in check_helper_mem_access()
8182 * @regno is the register containing the access size. regno-1 is the register
8201 meta->msize_max_value = reg->umax_value; in check_mem_size_reg()
8208 if (!tnum_is_const(reg->var_off)) in check_mem_size_reg()
8211 if (reg->smin_value < 0) { in check_mem_size_reg()
8214 return -EACCES; in check_mem_size_reg()
8217 if (reg->umin_value == 0 && !zero_size_allowed) { in check_mem_size_reg()
8218 verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n", in check_mem_size_reg()
8219 regno, reg->umin_value, reg->umax_value); in check_mem_size_reg()
8220 return -EACCES; in check_mem_size_reg()
8223 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { in check_mem_size_reg()
8226 return -EACCES; in check_mem_size_reg()
8228 err = check_helper_mem_access(env, regno - 1, reg->umax_value, in check_mem_size_reg()
8238 bool may_be_null = type_may_be_null(reg->type); in check_mem_reg()
8266 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; in check_kfunc_mem_size_reg()
8267 bool may_be_null = type_may_be_null(mem_reg->type); in check_kfunc_mem_size_reg()
8299 * Two bpf_map_lookups (even with the same key) will have different reg->id.
8300 * Two separate bpf_obj_new will also have different reg->id.
8302 * clears reg->id after value_or_null->value transition, since the verifier only
8306 * reg->id > 0 after value_or_null->value transition. By doing so
8311 * dead-locks.
8315 * env->cur_state->active_locks remembers which map value element or allocated
8323 struct bpf_verifier_state *cur = env->cur_state; in process_spin_lock()
8324 bool is_const = tnum_is_const(reg->var_off); in process_spin_lock()
8326 u64 val = reg->var_off.value; in process_spin_lock()
8337 return -EINVAL; in process_spin_lock()
8339 if (reg->type == PTR_TO_MAP_VALUE) { in process_spin_lock()
8340 map = reg->map_ptr; in process_spin_lock()
8341 if (!map->btf) { in process_spin_lock()
8344 map->name, lock_str); in process_spin_lock()
8345 return -EINVAL; in process_spin_lock()
8348 btf = reg->btf; in process_spin_lock()
8354 map ? map->name : "kptr", lock_str); in process_spin_lock()
8355 return -EINVAL; in process_spin_lock()
8357 spin_lock_off = is_res_lock ? rec->res_spin_lock_off : rec->spin_lock_off; in process_spin_lock()
8358 if (spin_lock_off != val + reg->off) { in process_spin_lock()
8360 val + reg->off, lock_str, spin_lock_off); in process_spin_lock()
8361 return -EINVAL; in process_spin_lock()
8372 if (!is_res_lock && cur->active_locks) { in process_spin_lock()
8373 if (find_lock_state(env->cur_state, REF_TYPE_LOCK, 0, NULL)) { in process_spin_lock()
8376 return -EINVAL; in process_spin_lock()
8378 } else if (is_res_lock && cur->active_locks) { in process_spin_lock()
8379 if (find_lock_state(env->cur_state, REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ, reg->id, ptr)) { in process_spin_lock()
8381 return -EINVAL; in process_spin_lock()
8391 err = acquire_lock_state(env, env->insn_idx, type, reg->id, ptr); in process_spin_lock()
8405 if (!cur->active_locks) { in process_spin_lock()
8407 return -EINVAL; in process_spin_lock()
8416 if (!find_lock_state(cur, type, reg->id, ptr)) { in process_spin_lock()
8418 return -EINVAL; in process_spin_lock()
8420 if (reg->id != cur->active_lock_id || ptr != cur->active_lock_ptr) { in process_spin_lock()
8422 return -EINVAL; in process_spin_lock()
8424 if (release_lock_state(cur, type, reg->id, ptr)) { in process_spin_lock()
8426 return -EINVAL; in process_spin_lock()
8439 bool is_const = tnum_is_const(reg->var_off); in check_map_field_pointer()
8440 struct bpf_map *map = reg->map_ptr; in check_map_field_pointer()
8441 u64 val = reg->var_off.value; in check_map_field_pointer()
8443 int field_off = -1; in check_map_field_pointer()
8449 return -EINVAL; in check_map_field_pointer()
8451 if (!map->btf) { in check_map_field_pointer()
8452 verbose(env, "map '%s' has to have BTF in order to use %s\n", map->name, in check_map_field_pointer()
8454 return -EINVAL; in check_map_field_pointer()
8456 if (!btf_record_has_field(map->record, field_type)) { in check_map_field_pointer()
8457 verbose(env, "map '%s' has no valid %s\n", map->name, struct_name); in check_map_field_pointer()
8458 return -EINVAL; in check_map_field_pointer()
8462 field_off = map->record->timer_off; in check_map_field_pointer()
8465 field_off = map->record->task_work_off; in check_map_field_pointer()
8469 return -EINVAL; in check_map_field_pointer()
8471 if (field_off != val + reg->off) { in check_map_field_pointer()
8473 val + reg->off, struct_name, field_off); in check_map_field_pointer()
8474 return -EINVAL; in check_map_field_pointer()
8483 struct bpf_map *map = reg->map_ptr; in process_timer_func()
8490 if (meta->map_ptr) { in process_timer_func()
8492 return -EFAULT; in process_timer_func()
8496 return -EOPNOTSUPP; in process_timer_func()
8498 meta->map_uid = reg->map_uid; in process_timer_func()
8499 meta->map_ptr = map; in process_timer_func()
8507 struct bpf_map *map = reg->map_ptr; in process_wq_func()
8508 u64 val = reg->var_off.value; in process_wq_func()
8510 if (map->record->wq_off != val + reg->off) { in process_wq_func()
8512 val + reg->off, map->record->wq_off); in process_wq_func()
8513 return -EINVAL; in process_wq_func()
8515 meta->map.uid = reg->map_uid; in process_wq_func()
8516 meta->map.ptr = map; in process_wq_func()
8524 struct bpf_map *map = reg->map_ptr; in process_task_work_func()
8531 if (meta->map.ptr) { in process_task_work_func()
8533 return -EFAULT; in process_task_work_func()
8535 meta->map.uid = reg->map_uid; in process_task_work_func()
8536 meta->map.ptr = map; in process_task_work_func()
8549 if (type_is_ptr_alloc_obj(reg->type)) { in process_kptr_func()
8552 map_ptr = reg->map_ptr; in process_kptr_func()
8553 if (!map_ptr->btf) { in process_kptr_func()
8555 map_ptr->name); in process_kptr_func()
8556 return -EINVAL; in process_kptr_func()
8558 rec = map_ptr->record; in process_kptr_func()
8559 meta->map_ptr = map_ptr; in process_kptr_func()
8562 if (!tnum_is_const(reg->var_off)) { in process_kptr_func()
8566 return -EINVAL; in process_kptr_func()
8571 return -EINVAL; in process_kptr_func()
8574 kptr_off = reg->off + reg->var_off.value; in process_kptr_func()
8578 return -EACCES; in process_kptr_func()
8580 if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) { in process_kptr_func()
8582 return -EACCES; in process_kptr_func()
8584 meta->kptr_field = kptr_field; in process_kptr_func()
8603 * reg->type and the memory's in reg->dynptr.type), but there is no support for
8619 if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { in process_dynptr_func()
8622 regno - 1); in process_dynptr_func()
8623 return -EINVAL; in process_dynptr_func()
8631 return -EFAULT; in process_dynptr_func()
8634 /* MEM_UNINIT - Points to memory that is an appropriate candidate for in process_dynptr_func()
8641 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be in process_dynptr_func()
8645 * None - Points to a initialized dynptr that can be mutated and in process_dynptr_func()
8654 return -EINVAL; in process_dynptr_func()
8660 i, BPF_DW, BPF_WRITE, -1, false, false); in process_dynptr_func()
8667 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ in process_dynptr_func()
8668 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { in process_dynptr_func()
8670 return -EINVAL; in process_dynptr_func()
8676 regno - 1); in process_dynptr_func()
8677 return -EINVAL; in process_dynptr_func()
8684 dynptr_type_str(arg_to_dynptr_type(arg_type)), regno - 1); in process_dynptr_func()
8685 return -EINVAL; in process_dynptr_func()
8697 return state->stack[spi].spilled_ptr.ref_obj_id; in iter_ref_obj_id()
8702 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); in is_iter_kfunc()
8707 return meta->kfunc_flags & KF_ITER_NEW; in is_iter_new_kfunc()
8712 return meta->kfunc_flags & KF_ITER_NEXT; in is_iter_next_kfunc()
8717 return meta->kfunc_flags & KF_ITER_DESTROY; in is_iter_destroy_kfunc()
8730 return btf_param_match_suffix(meta->btf, arg, "__iter"); in is_kfunc_arg_iter()
8740 if (reg->type != PTR_TO_STACK) { in process_iter_arg()
8741 verbose(env, "arg#%d expected pointer to an iterator on stack\n", regno - 1); in process_iter_arg()
8742 return -EINVAL; in process_iter_arg()
8751 btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1); in process_iter_arg()
8753 verbose(env, "expected valid iter pointer as arg #%d\n", regno - 1); in process_iter_arg()
8754 return -EINVAL; in process_iter_arg()
8756 t = btf_type_by_id(meta->btf, btf_id); in process_iter_arg()
8757 nr_slots = t->size / BPF_REG_SIZE; in process_iter_arg()
8763 iter_type_str(meta->btf, btf_id), regno - 1); in process_iter_arg()
8764 return -EINVAL; in process_iter_arg()
8769 i, BPF_DW, BPF_WRITE, -1, false, false); in process_iter_arg()
8774 err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots); in process_iter_arg()
8781 err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); in process_iter_arg()
8785 case -EINVAL: in process_iter_arg()
8787 iter_type_str(meta->btf, btf_id), regno - 1); in process_iter_arg()
8789 case -EPROTO: in process_iter_arg()
8790 verbose(env, "expected an RCU CS when using %s\n", meta->func_name); in process_iter_arg()
8804 /* remember meta->iter info for process_iter_next_call() */ in process_iter_arg()
8805 meta->iter.spi = spi; in process_iter_arg()
8806 meta->iter.frameno = reg->frameno; in process_iter_arg()
8807 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); in process_iter_arg()
8820 * stopped at insn_idx with callsites matching those in cur->frame.
8834 /* If st->branches != 0 state is a part of current DFS verification path, in find_prev_entry()
8837 st = &sl->state; in find_prev_entry()
8838 if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) && in find_prev_entry()
8839 st->dfs_depth < cur->dfs_depth) in find_prev_entry()
8855 if (rold->type != SCALAR_VALUE) in maybe_widen_reg()
8857 if (rold->type != rcur->type) in maybe_widen_reg()
8859 if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap)) in maybe_widen_reg()
8872 for (fr = old->curframe; fr >= 0; fr--) { in widen_imprecise_scalars()
8873 fold = old->frame[fr]; in widen_imprecise_scalars()
8874 fcur = cur->frame[fr]; in widen_imprecise_scalars()
8878 &fold->regs[i], in widen_imprecise_scalars()
8879 &fcur->regs[i], in widen_imprecise_scalars()
8880 &env->idmap_scratch); in widen_imprecise_scalars()
8882 for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { in widen_imprecise_scalars()
8883 if (!is_spilled_reg(&fold->stack[i]) || in widen_imprecise_scalars()
8884 !is_spilled_reg(&fcur->stack[i])) in widen_imprecise_scalars()
8888 &fold->stack[i].spilled_ptr, in widen_imprecise_scalars()
8889 &fcur->stack[i].spilled_ptr, in widen_imprecise_scalars()
8890 &env->idmap_scratch); in widen_imprecise_scalars()
8899 int iter_frameno = meta->iter.frameno; in get_iter_from_state()
8900 int iter_spi = meta->iter.spi; in get_iter_from_state()
8902 return &cur_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; in get_iter_from_state()
8925 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
8986 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; in process_iter_next_call()
8987 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; in process_iter_next_call()
8994 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && in process_iter_next_call()
8995 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { in process_iter_next_call()
8997 cur_iter->iter.state, iter_state_str(cur_iter->iter.state)); in process_iter_next_call()
8998 return -EFAULT; in process_iter_next_call()
9001 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { in process_iter_next_call()
9005 if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx || in process_iter_next_call()
9006 !same_callsites(cur_st->parent, cur_st)) { in process_iter_next_call()
9008 return -EFAULT; in process_iter_next_call()
9010 /* Note cur_st->parent in the call below, it is necessary to skip in process_iter_next_call()
9014 prev_st = find_prev_entry(env, cur_st->parent, insn_idx); in process_iter_next_call()
9018 return -ENOMEM; in process_iter_next_call()
9021 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; in process_iter_next_call()
9022 queued_iter->iter.depth++; in process_iter_next_call()
9026 queued_fr = queued_st->frame[queued_st->curframe]; in process_iter_next_call()
9027 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); in process_iter_next_call()
9032 cur_iter->iter.state = BPF_ITER_STATE_DRAINED; in process_iter_next_call()
9033 __mark_reg_const_zero(env, &cur_fr->regs[BPF_REG_0]); in process_iter_next_call()
9064 if (!meta->map_ptr) { in resolve_map_arg_type()
9066 verifier_bug(env, "invalid map_ptr to access map->type"); in resolve_map_arg_type()
9067 return -EFAULT; in resolve_map_arg_type()
9070 switch (meta->map_ptr->map_type) { in resolve_map_arg_type()
9077 return -EINVAL; in resolve_map_arg_type()
9081 if (meta->func_id == BPF_FUNC_map_peek_elem) in resolve_map_arg_type()
9207 enum bpf_reg_type expected, type = reg->type; in check_reg_type()
9214 return -EFAULT; in check_reg_type()
9237 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) { in check_reg_type()
9242 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { in check_reg_type()
9243 expected = compatible->types[i]; in check_reg_type()
9251 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); in check_reg_type()
9253 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); in check_reg_type()
9254 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); in check_reg_type()
9255 return -EACCES; in check_reg_type()
9258 if (base_type(reg->type) != PTR_TO_BTF_ID) in check_reg_type()
9265 func_id_name(meta->func_id), in check_reg_type()
9266 regno, reg_type_str(env, reg->type)); in check_reg_type()
9267 return -EACCES; in check_reg_type()
9272 switch ((int)reg->type) { in check_reg_type()
9285 meta->func_id != BPF_FUNC_sk_release; in check_reg_type()
9287 if (type_may_be_null(reg->type) && in check_reg_type()
9290 return -EACCES; in check_reg_type()
9294 if (!compatible->btf_id) { in check_reg_type()
9296 return -EFAULT; in check_reg_type()
9298 arg_btf_id = compatible->btf_id; in check_reg_type()
9301 if (meta->func_id == BPF_FUNC_kptr_xchg) { in check_reg_type()
9302 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
9303 return -EACCES; in check_reg_type()
9307 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", in check_reg_type()
9309 return -EACCES; in check_reg_type()
9312 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, in check_reg_type()
9316 regno, btf_type_name(reg->btf, reg->btf_id), in check_reg_type()
9318 return -EACCES; in check_reg_type()
9325 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock && in check_reg_type()
9326 meta->func_id != BPF_FUNC_kptr_xchg) { in check_reg_type()
9328 return -EFAULT; in check_reg_type()
9331 if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) { in check_reg_type()
9332 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) in check_reg_type()
9333 return -EACCES; in check_reg_type()
9343 return -EFAULT; in check_reg_type()
9369 u32 type = reg->type; in check_func_arg_reg_off()
9375 * meta->release_regno. in check_func_arg_reg_off()
9391 if (reg->off) { in check_func_arg_reg_off()
9394 return -EINVAL; in check_func_arg_reg_off()
9425 * can be non-zero. This was already checked above. So pass in check_func_arg_reg_off()
9444 if (arg_type_is_dynptr(fn->arg_type[i])) { in get_dynptr_arg_reg()
9463 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_id()
9464 return reg->id; in dynptr_id()
9468 return state->stack[spi].spilled_ptr.id; in dynptr_id()
9476 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_ref_obj_id()
9477 return reg->ref_obj_id; in dynptr_ref_obj_id()
9481 return state->stack[spi].spilled_ptr.ref_obj_id; in dynptr_ref_obj_id()
9490 if (reg->type == CONST_PTR_TO_DYNPTR) in dynptr_get_type()
9491 return reg->dynptr.type; in dynptr_get_type()
9493 spi = __get_spi(reg->off); in dynptr_get_type()
9499 return state->stack[spi].spilled_ptr.dynptr.type; in dynptr_get_type()
9505 struct bpf_map *map = reg->map_ptr; in check_reg_const_str()
9511 if (reg->type != PTR_TO_MAP_VALUE) in check_reg_const_str()
9512 return -EINVAL; in check_reg_const_str()
9516 return -EACCES; in check_reg_const_str()
9519 if (!tnum_is_const(reg->var_off)) { in check_reg_const_str()
9521 return -EACCES; in check_reg_const_str()
9524 if (!map->ops->map_direct_value_addr) { in check_reg_const_str()
9526 return -EACCES; in check_reg_const_str()
9529 err = check_map_access(env, regno, reg->off, in check_reg_const_str()
9530 map->value_size - reg->off, false, in check_reg_const_str()
9535 map_off = reg->off + reg->var_off.value; in check_reg_const_str()
9536 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); in check_reg_const_str()
9543 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { in check_reg_const_str()
9544 verbose(env, "string is not zero-terminated\n"); in check_reg_const_str()
9545 return -EINVAL; in check_reg_const_str()
9565 if (!env->bpf_capable) in get_constant_map_key()
9566 return -EOPNOTSUPP; in get_constant_map_key()
9567 if (key->type != PTR_TO_STACK) in get_constant_map_key()
9568 return -EOPNOTSUPP; in get_constant_map_key()
9569 if (!tnum_is_const(key->var_off)) in get_constant_map_key()
9570 return -EOPNOTSUPP; in get_constant_map_key()
9572 stack_off = key->off + key->var_off.value; in get_constant_map_key()
9573 slot = -stack_off - 1; in get_constant_map_key()
9576 stype = state->stack[spi].slot_type; in get_constant_map_key()
9579 for (i = off; i >= 0 && stype[i] == STACK_ZERO; i--) in get_constant_map_key()
9587 if (!is_spilled_scalar_reg(&state->stack[spi])) in get_constant_map_key()
9588 return -EOPNOTSUPP; in get_constant_map_key()
9589 for (i = off; i >= 0 && stype[i] == STACK_SPILL; i--) in get_constant_map_key()
9592 return -EOPNOTSUPP; in get_constant_map_key()
9594 reg = &state->stack[spi].spilled_ptr; in get_constant_map_key()
9595 if (!tnum_is_const(reg->var_off)) in get_constant_map_key()
9597 return -EOPNOTSUPP; in get_constant_map_key()
9602 bt_set_frame_slot(&env->bt, key->frameno, spi); in get_constant_map_key()
9603 err = mark_chain_precision_batch(env, env->cur_state); in get_constant_map_key()
9607 *value = reg->var_off.value; in get_constant_map_key()
9620 enum bpf_arg_type arg_type = fn->arg_type[arg]; in check_func_arg()
9621 enum bpf_reg_type type = reg->type; in check_func_arg()
9637 return -EACCES; in check_func_arg()
9645 return -EACCES; in check_func_arg()
9663 arg_btf_id = fn->arg_btf_id[arg]; in check_func_arg()
9684 if (reg->type == PTR_TO_STACK) { in check_func_arg()
9686 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { in check_func_arg()
9688 return -EINVAL; in check_func_arg()
9692 return -EINVAL; in check_func_arg()
9694 } else if (!reg->ref_obj_id && !register_is_null(reg)) { in check_func_arg()
9697 return -EINVAL; in check_func_arg()
9699 if (meta->release_regno) { in check_func_arg()
9701 return -EFAULT; in check_func_arg()
9703 meta->release_regno = regno; in check_func_arg()
9706 if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) { in check_func_arg()
9707 if (meta->ref_obj_id) { in check_func_arg()
9709 regno, reg->ref_obj_id, in check_func_arg()
9710 meta->ref_obj_id); in check_func_arg()
9711 return -EACCES; in check_func_arg()
9713 meta->ref_obj_id = reg->ref_obj_id; in check_func_arg()
9719 if (meta->map_ptr) { in check_func_arg()
9732 if (meta->map_ptr != reg->map_ptr || in check_func_arg()
9733 meta->map_uid != reg->map_uid) { in check_func_arg()
9736 meta->map_uid, reg->map_uid); in check_func_arg()
9737 return -EINVAL; in check_func_arg()
9740 meta->map_ptr = reg->map_ptr; in check_func_arg()
9741 meta->map_uid = reg->map_uid; in check_func_arg()
9745 * check that [key, key + map->key_size) are within in check_func_arg()
9748 if (!meta->map_ptr) { in check_func_arg()
9754 verifier_bug(env, "invalid map_ptr to access map->key"); in check_func_arg()
9755 return -EFAULT; in check_func_arg()
9757 key_size = meta->map_ptr->key_size; in check_func_arg()
9761 if (can_elide_value_nullness(meta->map_ptr->map_type)) { in check_func_arg()
9762 err = get_constant_map_key(env, reg, key_size, &meta->const_map_key); in check_func_arg()
9764 meta->const_map_key = -1; in check_func_arg()
9765 if (err == -EOPNOTSUPP) in check_func_arg()
9777 * check [value, value + map->value_size) validity in check_func_arg()
9779 if (!meta->map_ptr) { in check_func_arg()
9781 verifier_bug(env, "invalid map_ptr to access map->value"); in check_func_arg()
9782 return -EFAULT; in check_func_arg()
9784 meta->raw_mode = arg_type & MEM_UNINIT; in check_func_arg()
9785 err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, in check_func_arg()
9790 if (!reg->btf_id) { in check_func_arg()
9792 return -EACCES; in check_func_arg()
9794 meta->ret_btf = reg->btf; in check_func_arg()
9795 meta->ret_btf_id = reg->btf_id; in check_func_arg()
9800 return -EACCES; in check_func_arg()
9802 if (meta->func_id == BPF_FUNC_spin_lock) { in check_func_arg()
9806 } else if (meta->func_id == BPF_FUNC_spin_unlock) { in check_func_arg()
9812 return -EFAULT; in check_func_arg()
9821 meta->subprogno = reg->subprogno; in check_func_arg()
9827 meta->raw_mode = arg_type & MEM_UNINIT; in check_func_arg()
9829 err = check_helper_mem_access(env, regno, fn->arg_size[arg], in check_func_arg()
9835 err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); in check_func_arg()
9840 fn->arg_type[arg - 1] & MEM_WRITE ? in check_func_arg()
9846 fn->arg_type[arg - 1] & MEM_WRITE ? in check_func_arg()
9856 if (!tnum_is_const(reg->var_off)) { in check_func_arg()
9859 return -EACCES; in check_func_arg()
9861 meta->mem_size = reg->var_off.value; in check_func_arg()
9885 enum bpf_attach_type eatype = env->prog->expected_attach_type; in may_update_sockmap()
9886 enum bpf_prog_type type = resolve_prog_type(env->prog); in may_update_sockmap()
9923 return env->prog->jit_requested && in allow_tail_call_in_subprogs()
9934 switch (map->map_type) { in check_map_func_compatibility()
9980 /* Restrict bpf side of cpumap and xskmap, open when use-cases in check_map_func_compatibility()
10062 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) in check_map_func_compatibility()
10064 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { in check_map_func_compatibility()
10065 verbose(env, "mixing of tail_calls and bpf-to-bpf calls is not supported\n"); in check_map_func_compatibility()
10066 return -EINVAL; in check_map_func_compatibility()
10074 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) in check_map_func_compatibility()
10083 if (map->map_type != BPF_MAP_TYPE_RINGBUF) in check_map_func_compatibility()
10087 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) in check_map_func_compatibility()
10091 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) in check_map_func_compatibility()
10096 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) in check_map_func_compatibility()
10100 if (map->map_type != BPF_MAP_TYPE_DEVMAP && in check_map_func_compatibility()
10101 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && in check_map_func_compatibility()
10102 map->map_type != BPF_MAP_TYPE_CPUMAP && in check_map_func_compatibility()
10103 map->map_type != BPF_MAP_TYPE_XSKMAP) in check_map_func_compatibility()
10109 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) in check_map_func_compatibility()
10115 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
10119 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in check_map_func_compatibility()
10120 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in check_map_func_compatibility()
10124 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && in check_map_func_compatibility()
10125 map->map_type != BPF_MAP_TYPE_SOCKMAP && in check_map_func_compatibility()
10126 map->map_type != BPF_MAP_TYPE_SOCKHASH) in check_map_func_compatibility()
10130 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
10131 map->map_type != BPF_MAP_TYPE_STACK) in check_map_func_compatibility()
10136 if (map->map_type != BPF_MAP_TYPE_QUEUE && in check_map_func_compatibility()
10137 map->map_type != BPF_MAP_TYPE_STACK && in check_map_func_compatibility()
10138 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) in check_map_func_compatibility()
10142 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && in check_map_func_compatibility()
10143 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in check_map_func_compatibility()
10144 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) in check_map_func_compatibility()
10149 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) in check_map_func_compatibility()
10154 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) in check_map_func_compatibility()
10159 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) in check_map_func_compatibility()
10164 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) in check_map_func_compatibility()
10174 map->map_type, func_id_name(func_id), func_id); in check_map_func_compatibility()
10175 return -EINVAL; in check_map_func_compatibility()
10182 if (arg_type_is_raw_mem(fn->arg1_type)) in check_raw_mode_ok()
10184 if (arg_type_is_raw_mem(fn->arg2_type)) in check_raw_mode_ok()
10186 if (arg_type_is_raw_mem(fn->arg3_type)) in check_raw_mode_ok()
10188 if (arg_type_is_raw_mem(fn->arg4_type)) in check_raw_mode_ok()
10190 if (arg_type_is_raw_mem(fn->arg5_type)) in check_raw_mode_ok()
10202 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; in check_args_pair_invalid()
10203 bool has_size = fn->arg_size[arg] != 0; in check_args_pair_invalid()
10206 if (arg + 1 < ARRAY_SIZE(fn->arg_type)) in check_args_pair_invalid()
10207 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); in check_args_pair_invalid()
10209 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) in check_args_pair_invalid()
10222 if (arg_type_is_mem_size(fn->arg1_type) || in check_arg_pair_ok()
10237 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { in check_btf_id_ok()
10238 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID) in check_btf_id_ok()
10239 return !!fn->arg_btf_id[i]; in check_btf_id_ok()
10240 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK) in check_btf_id_ok()
10241 return fn->arg_btf_id[i] == BPF_PTR_POISON; in check_btf_id_ok()
10242 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && in check_btf_id_ok()
10244 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || in check_btf_id_ok()
10245 !(fn->arg_type[i] & MEM_FIXED_SIZE))) in check_btf_id_ok()
10256 check_btf_id_ok(fn) ? 0 : -EINVAL; in check_func_proto()
10270 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in clear_all_pkt_pointers()
10277 AT_PKT_END = -1,
10278 BEYOND_PKT_END = -2,
10283 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_pkt_end()
10284 struct bpf_reg_state *reg = &state->regs[regn]; in mark_pkt_end()
10286 if (reg->type != PTR_TO_PACKET) in mark_pkt_end()
10297 reg->range = BEYOND_PKT_END; in mark_pkt_end()
10299 reg->range = AT_PKT_END; in mark_pkt_end()
10306 for (i = 0; i < state->acquired_refs; i++) { in release_reference_nomark()
10307 if (state->refs[i].type != REF_TYPE_PTR) in release_reference_nomark()
10309 if (state->refs[i].id == ref_obj_id) { in release_reference_nomark()
10314 return -EINVAL; in release_reference_nomark()
10324 struct bpf_verifier_state *vstate = env->cur_state; in release_reference()
10334 if (reg->ref_obj_id == ref_obj_id) in release_reference()
10346 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in invalidate_non_owning_refs()
10347 if (type_is_non_owning_ref(reg->type)) in invalidate_non_owning_refs()
10357 /* after the call registers r0 - r5 were scratched */ in clear_caller_saved_regs()
10382 if (state->curframe + 1 >= MAX_CALL_FRAMES) { in setup_func_entry()
10384 state->curframe + 2); in setup_func_entry()
10385 return -E2BIG; in setup_func_entry()
10388 if (state->frame[state->curframe + 1]) { in setup_func_entry()
10389 verifier_bug(env, "Frame %d already allocated", state->curframe + 1); in setup_func_entry()
10390 return -EFAULT; in setup_func_entry()
10393 caller = state->frame[state->curframe]; in setup_func_entry()
10396 return -ENOMEM; in setup_func_entry()
10397 state->frame[state->curframe + 1] = callee; in setup_func_entry()
10399 /* callee cannot access r0, r6 - r9 for reading and has to write in setup_func_entry()
10406 state->curframe + 1 /* frameno within this callchain */, in setup_func_entry()
10413 state->curframe++; in setup_func_entry()
10419 state->frame[state->curframe + 1] = NULL; in setup_func_entry()
10428 struct bpf_verifier_log *log = &env->log; in btf_check_func_arg_match()
10439 for (i = 0; i < sub->arg_cnt; i++) { in btf_check_func_arg_match()
10442 struct bpf_subprog_arg_info *arg = &sub->args[i]; in btf_check_func_arg_match()
10444 if (arg->arg_type == ARG_ANYTHING) { in btf_check_func_arg_match()
10445 if (reg->type != SCALAR_VALUE) { in btf_check_func_arg_match()
10447 return -EINVAL; in btf_check_func_arg_match()
10449 } else if (arg->arg_type & PTR_UNTRUSTED) { in btf_check_func_arg_match()
10452 * read-only and probe read instructions would protect against in btf_check_func_arg_match()
10455 } else if (arg->arg_type == ARG_PTR_TO_CTX) { in btf_check_func_arg_match()
10462 if (reg->type != PTR_TO_CTX) { in btf_check_func_arg_match()
10464 return -EINVAL; in btf_check_func_arg_match()
10466 } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { in btf_check_func_arg_match()
10470 if (check_mem_reg(env, reg, regno, arg->mem_size)) in btf_check_func_arg_match()
10471 return -EINVAL; in btf_check_func_arg_match()
10472 if (!(arg->arg_type & PTR_MAYBE_NULL) && (reg->type & PTR_MAYBE_NULL)) { in btf_check_func_arg_match()
10473 bpf_log(log, "arg#%d is expected to be non-NULL\n", i); in btf_check_func_arg_match()
10474 return -EINVAL; in btf_check_func_arg_match()
10476 } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) { in btf_check_func_arg_match()
10482 * run-time debug nightmare. in btf_check_func_arg_match()
10484 if (reg->type != PTR_TO_ARENA && reg->type != SCALAR_VALUE) { in btf_check_func_arg_match()
10486 return -EINVAL; in btf_check_func_arg_match()
10488 } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { in btf_check_func_arg_match()
10493 ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0); in btf_check_func_arg_match()
10496 } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { in btf_check_func_arg_match()
10500 if (register_is_null(reg) && type_may_be_null(arg->arg_type)) in btf_check_func_arg_match()
10504 err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta); in btf_check_func_arg_match()
10505 err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type); in btf_check_func_arg_match()
10509 verifier_bug(env, "unrecognized arg#%d type %d", i, arg->arg_type); in btf_check_func_arg_match()
10510 return -EFAULT; in btf_check_func_arg_match()
10519 * EFAULT - there is a verifier bug. Abort verification.
10520 * EINVAL - there is a type mismatch or BTF is not available.
10521 * 0 - BTF matches with what bpf_reg_state expects.
10527 struct bpf_prog *prog = env->prog; in btf_check_subprog_call()
10528 struct btf *btf = prog->aux->btf; in btf_check_subprog_call()
10532 if (!prog->aux->func_info) in btf_check_subprog_call()
10533 return -EINVAL; in btf_check_subprog_call()
10535 btf_id = prog->aux->func_info[subprog].type_id; in btf_check_subprog_call()
10537 return -EFAULT; in btf_check_subprog_call()
10539 if (prog->aux->func_info_aux[subprog].unreliable) in btf_check_subprog_call()
10540 return -EINVAL; in btf_check_subprog_call()
10548 prog->aux->func_info_aux[subprog].unreliable = true; in btf_check_subprog_call()
10556 struct bpf_verifier_state *state = env->cur_state, *callback_state; in push_callback_call()
10560 caller = state->frame[state->curframe]; in push_callback_call()
10561 err = btf_check_subprog_call(env, subprog, caller->regs); in push_callback_call()
10562 if (err == -EFAULT) in push_callback_call()
10569 env->subprog_info[subprog].is_cb = true; in push_callback_call()
10571 !is_callback_calling_kfunc(insn->imm)) { in push_callback_call()
10572 verifier_bug(env, "kfunc %s#%d not marked as callback-calling", in push_callback_call()
10573 func_id_name(insn->imm), insn->imm); in push_callback_call()
10574 return -EFAULT; in push_callback_call()
10576 !is_callback_calling_function(insn->imm)) { /* helper */ in push_callback_call()
10577 verifier_bug(env, "helper %s#%d not marked as callback-calling", in push_callback_call()
10578 func_id_name(insn->imm), insn->imm); in push_callback_call()
10579 return -EFAULT; in push_callback_call()
10586 env->subprog_info[subprog].is_async_cb = true; in push_callback_call()
10587 async_cb = push_async_cb(env, env->subprog_info[subprog].start, in push_callback_call()
10589 is_bpf_wq_set_callback_impl_kfunc(insn->imm) || in push_callback_call()
10590 is_task_work_add_kfunc(insn->imm)); in push_callback_call()
10592 return -EFAULT; in push_callback_call()
10593 callee = async_cb->frame[0]; in push_callback_call()
10594 callee->async_entry_cnt = caller->async_entry_cnt + 1; in push_callback_call()
10607 callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); in push_callback_call()
10609 return -ENOMEM; in push_callback_call()
10616 callback_state->callback_unroll_depth++; in push_callback_call()
10617 callback_state->frame[callback_state->curframe - 1]->callback_depth++; in push_callback_call()
10618 caller->callback_depth = 0; in push_callback_call()
10625 struct bpf_verifier_state *state = env->cur_state; in check_func_call()
10629 target_insn = *insn_idx + insn->imm + 1; in check_func_call()
10633 return -EFAULT; in check_func_call()
10635 caller = state->frame[state->curframe]; in check_func_call()
10636 err = btf_check_subprog_call(env, subprog, caller->regs); in check_func_call()
10637 if (err == -EFAULT) in check_func_call()
10642 if (env->cur_state->active_locks) { in check_func_call()
10645 return -EINVAL; in check_func_call()
10648 if (env->subprog_info[subprog].might_sleep && in check_func_call()
10649 (env->cur_state->active_rcu_lock || env->cur_state->active_preempt_locks || in check_func_call()
10650 env->cur_state->active_irq_id || !in_sleepable(env))) { in check_func_call()
10651 verbose(env, "global functions that may sleep are not allowed in non-sleepable context,\n" in check_func_call()
10652 "i.e., in a RCU/IRQ/preempt-disabled section, or in\n" in check_func_call()
10653 "a non-sleepable BPF program context\n"); in check_func_call()
10654 return -EINVAL; in check_func_call()
10665 if (env->subprog_info[subprog].changes_pkt_data) in check_func_call()
10668 subprog_aux(env, subprog)->called = true; in check_func_call()
10669 clear_caller_saved_regs(env, caller->regs); in check_func_call()
10671 /* All global functions return a 64-bit SCALAR_VALUE */ in check_func_call()
10672 mark_reg_unknown(env, caller->regs, BPF_REG_0); in check_func_call()
10673 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; in check_func_call()
10686 clear_caller_saved_regs(env, caller->regs); in check_func_call()
10689 *insn_idx = env->subprog_info[subprog].start - 1; in check_func_call()
10693 if (env->log.level & BPF_LOG_LEVEL) { in check_func_call()
10695 print_verifier_state(env, state, caller->frameno, true); in check_func_call()
10697 print_verifier_state(env, state, state->curframe, true); in check_func_call()
10712 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in map_set_for_each_callback_args()
10714 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in map_set_for_each_callback_args()
10715 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in map_set_for_each_callback_args()
10716 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
10718 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in map_set_for_each_callback_args()
10719 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in map_set_for_each_callback_args()
10720 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; in map_set_for_each_callback_args()
10723 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; in map_set_for_each_callback_args()
10726 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in map_set_for_each_callback_args()
10736 /* copy r1 - r5 args that callee can access. The copy includes parent in set_callee_state()
10740 callee->regs[i] = caller->regs[i]; in set_callee_state()
10749 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; in set_map_elem_callback_state()
10754 map = insn_aux->map_ptr_state.map_ptr; in set_map_elem_callback_state()
10755 if (!map->ops->map_set_for_each_callback_args || in set_map_elem_callback_state()
10756 !map->ops->map_for_each_callback) { in set_map_elem_callback_state()
10758 return -ENOTSUPP; in set_map_elem_callback_state()
10761 err = map->ops->map_set_for_each_callback_args(env, caller, callee); in set_map_elem_callback_state()
10765 callee->in_callback_fn = true; in set_map_elem_callback_state()
10766 callee->callback_ret_range = retval_range(0, 1); in set_map_elem_callback_state()
10779 callee->regs[BPF_REG_1].type = SCALAR_VALUE; in set_loop_callback_state()
10780 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_loop_callback_state()
10783 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_loop_callback_state()
10784 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_loop_callback_state()
10785 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_loop_callback_state()
10787 callee->in_callback_fn = true; in set_loop_callback_state()
10788 callee->callback_ret_range = retval_range(0, 1); in set_loop_callback_state()
10797 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; in set_timer_callback_state()
10802 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_timer_callback_state()
10803 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_timer_callback_state()
10804 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_timer_callback_state()
10806 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_timer_callback_state()
10807 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_timer_callback_state()
10808 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_timer_callback_state()
10810 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_timer_callback_state()
10811 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_timer_callback_state()
10812 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_timer_callback_state()
10815 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_timer_callback_state()
10816 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_timer_callback_state()
10817 callee->in_async_callback_fn = true; in set_timer_callback_state()
10818 callee->callback_ret_range = retval_range(0, 0); in set_timer_callback_state()
10832 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; in set_find_vma_callback_state()
10834 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; in set_find_vma_callback_state()
10835 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_find_vma_callback_state()
10836 callee->regs[BPF_REG_2].btf = btf_vmlinux; in set_find_vma_callback_state()
10837 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA]; in set_find_vma_callback_state()
10840 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; in set_find_vma_callback_state()
10843 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_find_vma_callback_state()
10844 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_find_vma_callback_state()
10845 callee->in_callback_fn = true; in set_find_vma_callback_state()
10846 callee->callback_ret_range = retval_range(0, 1); in set_find_vma_callback_state()
10859 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); in set_user_ringbuf_callback_state()
10860 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); in set_user_ringbuf_callback_state()
10861 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; in set_user_ringbuf_callback_state()
10864 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_user_ringbuf_callback_state()
10865 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_user_ringbuf_callback_state()
10866 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_user_ringbuf_callback_state()
10868 callee->in_callback_fn = true; in set_user_ringbuf_callback_state()
10869 callee->callback_ret_range = retval_range(0, 1); in set_user_ringbuf_callback_state()
10887 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, in set_rbtree_add_callback_state()
10889 if (!field || !field->graph_root.value_btf_id) in set_rbtree_add_callback_state()
10890 return -EFAULT; in set_rbtree_add_callback_state()
10892 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); in set_rbtree_add_callback_state()
10893 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); in set_rbtree_add_callback_state()
10894 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); in set_rbtree_add_callback_state()
10895 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); in set_rbtree_add_callback_state()
10897 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); in set_rbtree_add_callback_state()
10898 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_rbtree_add_callback_state()
10899 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_rbtree_add_callback_state()
10900 callee->in_callback_fn = true; in set_rbtree_add_callback_state()
10901 callee->callback_ret_range = retval_range(0, 1); in set_rbtree_add_callback_state()
10910 struct bpf_map *map_ptr = caller->regs[BPF_REG_3].map_ptr; in set_task_work_schedule_callback_state()
10915 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; in set_task_work_schedule_callback_state()
10916 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); in set_task_work_schedule_callback_state()
10917 callee->regs[BPF_REG_1].map_ptr = map_ptr; in set_task_work_schedule_callback_state()
10919 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; in set_task_work_schedule_callback_state()
10920 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); in set_task_work_schedule_callback_state()
10921 callee->regs[BPF_REG_2].map_ptr = map_ptr; in set_task_work_schedule_callback_state()
10923 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; in set_task_work_schedule_callback_state()
10924 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); in set_task_work_schedule_callback_state()
10925 callee->regs[BPF_REG_3].map_ptr = map_ptr; in set_task_work_schedule_callback_state()
10928 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); in set_task_work_schedule_callback_state()
10929 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); in set_task_work_schedule_callback_state()
10930 callee->in_async_callback_fn = true; in set_task_work_schedule_callback_state()
10931 callee->callback_ret_range = retval_range(S32_MIN, S32_MAX); in set_task_work_schedule_callback_state()
10943 struct bpf_verifier_state *state = env->cur_state; in in_rbtree_lock_required_cb()
10944 struct bpf_insn *insn = env->prog->insnsi; in in_rbtree_lock_required_cb()
10948 if (!state->curframe) in in_rbtree_lock_required_cb()
10951 callee = state->frame[state->curframe]; in in_rbtree_lock_required_cb()
10953 if (!callee->in_callback_fn) in in_rbtree_lock_required_cb()
10956 kfunc_btf_id = insn[callee->callsite].imm; in in_rbtree_lock_required_cb()
10964 return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval; in retval_range_within()
10966 return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; in retval_range_within()
10971 struct bpf_verifier_state *state = env->cur_state, *prev_st; in prepare_func_exit()
10977 callee = state->frame[state->curframe]; in prepare_func_exit()
10978 r0 = &callee->regs[BPF_REG_0]; in prepare_func_exit()
10979 if (r0->type == PTR_TO_STACK) { in prepare_func_exit()
10987 return -EINVAL; in prepare_func_exit()
10990 caller = state->frame[state->curframe - 1]; in prepare_func_exit()
10991 if (callee->in_callback_fn) { in prepare_func_exit()
10992 if (r0->type != SCALAR_VALUE) { in prepare_func_exit()
10994 return -EACCES; in prepare_func_exit()
11003 if (!retval_range_within(callee->callback_ret_range, r0, false)) { in prepare_func_exit()
11004 verbose_invalid_scalar(env, r0, callee->callback_ret_range, in prepare_func_exit()
11006 return -EINVAL; in prepare_func_exit()
11008 if (!bpf_calls_callback(env, callee->callsite)) { in prepare_func_exit()
11010 *insn_idx, callee->callsite); in prepare_func_exit()
11011 return -EFAULT; in prepare_func_exit()
11015 caller->regs[BPF_REG_0] = *r0; in prepare_func_exit()
11022 in_callback_fn = callee->in_callback_fn; in prepare_func_exit()
11024 *insn_idx = callee->callsite; in prepare_func_exit()
11026 *insn_idx = callee->callsite + 1; in prepare_func_exit()
11028 if (env->log.level & BPF_LOG_LEVEL) { in prepare_func_exit()
11030 print_verifier_state(env, state, callee->frameno, true); in prepare_func_exit()
11032 print_verifier_state(env, state, caller->frameno, true); in prepare_func_exit()
11037 state->frame[state->curframe--] = NULL; in prepare_func_exit()
11042 * void cb(int idx, struct ctx *ctx) { ctx->i++; ... } in prepare_func_exit()
11075 ret_reg->smax_value = meta->msize_max_value; in do_refine_retval_range()
11076 ret_reg->s32_max_value = meta->msize_max_value; in do_refine_retval_range()
11077 ret_reg->smin_value = -MAX_ERRNO; in do_refine_retval_range()
11078 ret_reg->s32_min_value = -MAX_ERRNO; in do_refine_retval_range()
11082 ret_reg->umax_value = nr_cpu_ids - 1; in do_refine_retval_range()
11083 ret_reg->u32_max_value = nr_cpu_ids - 1; in do_refine_retval_range()
11084 ret_reg->smax_value = nr_cpu_ids - 1; in do_refine_retval_range()
11085 ret_reg->s32_max_value = nr_cpu_ids - 1; in do_refine_retval_range()
11086 ret_reg->umin_value = 0; in do_refine_retval_range()
11087 ret_reg->u32_min_value = 0; in do_refine_retval_range()
11088 ret_reg->smin_value = 0; in do_refine_retval_range()
11089 ret_reg->s32_min_value = 0; in do_refine_retval_range()
11101 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_map()
11102 struct bpf_map *map = meta->map_ptr; in record_func_map()
11118 return -EFAULT; in record_func_map()
11121 /* In case of read-only, some additional restrictions in record_func_map()
11125 if ((map->map_flags & BPF_F_RDONLY_PROG) && in record_func_map()
11131 return -EACCES; in record_func_map()
11134 if (!aux->map_ptr_state.map_ptr) in record_func_map()
11135 bpf_map_ptr_store(aux, meta->map_ptr, in record_func_map()
11136 !meta->map_ptr->bypass_spec_v1, false); in record_func_map()
11137 else if (aux->map_ptr_state.map_ptr != meta->map_ptr) in record_func_map()
11138 bpf_map_ptr_store(aux, meta->map_ptr, in record_func_map()
11139 !meta->map_ptr->bypass_spec_v1, true); in record_func_map()
11147 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; in record_func_key()
11149 struct bpf_map *map = meta->map_ptr; in record_func_key()
11155 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { in record_func_key()
11157 return -EINVAL; in record_func_key()
11161 val = reg->var_off.value; in record_func_key()
11162 max = map->max_entries; in record_func_key()
11182 struct bpf_verifier_state *state = env->cur_state; in check_reference_leak()
11183 enum bpf_prog_type type = resolve_prog_type(env->prog); in check_reference_leak()
11188 if (!exception_exit && cur_func(env)->frameno) in check_reference_leak()
11191 for (i = 0; i < state->acquired_refs; i++) { in check_reference_leak()
11192 if (state->refs[i].type != REF_TYPE_PTR) in check_reference_leak()
11198 reg->ref_obj_id == state->refs[i].id) in check_reference_leak()
11201 state->refs[i].id, state->refs[i].insn_idx); in check_reference_leak()
11204 return refs_lingering ? -EINVAL : 0; in check_reference_leak()
11211 if (check_lock && env->cur_state->active_locks) { in check_resource_leak()
11212 verbose(env, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix); in check_resource_leak()
11213 return -EINVAL; in check_resource_leak()
11222 if (check_lock && env->cur_state->active_irq_id) { in check_resource_leak()
11223 verbose(env, "%s cannot be used inside bpf_local_irq_save-ed region\n", prefix); in check_resource_leak()
11224 return -EINVAL; in check_resource_leak()
11227 if (check_lock && env->cur_state->active_rcu_lock) { in check_resource_leak()
11228 verbose(env, "%s cannot be used inside bpf_rcu_read_lock-ed region\n", prefix); in check_resource_leak()
11229 return -EINVAL; in check_resource_leak()
11232 if (check_lock && env->cur_state->active_preempt_locks) { in check_resource_leak()
11233 verbose(env, "%s cannot be used inside bpf_preempt_disable-ed region\n", prefix); in check_resource_leak()
11234 return -EINVAL; in check_resource_leak()
11245 struct bpf_map *fmt_map = fmt_reg->map_ptr; in check_bpf_snprintf_call()
11252 if (data_len_reg->var_off.value % 8) in check_bpf_snprintf_call()
11253 return -EINVAL; in check_bpf_snprintf_call()
11254 num_args = data_len_reg->var_off.value / 8; in check_bpf_snprintf_call()
11259 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; in check_bpf_snprintf_call()
11260 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, in check_bpf_snprintf_call()
11264 return -EFAULT; in check_bpf_snprintf_call()
11280 enum bpf_prog_type type = resolve_prog_type(env->prog); in check_get_func_ip()
11284 if (!bpf_prog_has_trampoline(env->prog)) { in check_get_func_ip()
11287 return -ENOTSUPP; in check_get_func_ip()
11296 return -ENOTSUPP; in check_get_func_ip()
11301 return &env->insn_aux_data[env->insn_idx]; in cur_aux()
11318 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; in update_loop_inline_state()
11320 if (!state->initialized) { in update_loop_inline_state()
11321 state->initialized = 1; in update_loop_inline_state()
11322 state->fit_for_inline = loop_flag_is_zero(env); in update_loop_inline_state()
11323 state->callback_subprogno = subprogno; in update_loop_inline_state()
11327 if (!state->fit_for_inline) in update_loop_inline_state()
11330 state->fit_for_inline = (loop_flag_is_zero(env) && in update_loop_inline_state()
11331 state->callback_subprogno == subprogno); in update_loop_inline_state()
11353 return -ERANGE; in get_helper_proto()
11355 if (!env->ops->get_func_proto) in get_helper_proto()
11356 return -EINVAL; in get_helper_proto()
11358 *ptr = env->ops->get_func_proto(func_id, env->prog); in get_helper_proto()
11359 return *ptr && (*ptr)->func ? 0 : -EINVAL; in get_helper_proto()
11365 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_helper_call()
11377 func_id = insn->imm; in check_helper_call()
11378 err = get_helper_proto(env, insn->imm, &fn); in check_helper_call()
11379 if (err == -ERANGE) { in check_helper_call()
11381 return -EINVAL; in check_helper_call()
11390 /* eBPF programs must be GPL compatible to use GPL-ed functions */ in check_helper_call()
11391 if (!env->prog->gpl_compatible && fn->gpl_only) { in check_helper_call()
11392 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); in check_helper_call()
11393 return -EINVAL; in check_helper_call()
11396 if (fn->allowed && !fn->allowed(env->prog)) { in check_helper_call()
11398 return -EINVAL; in check_helper_call()
11401 if (!in_sleepable(env) && fn->might_sleep) { in check_helper_call()
11402 verbose(env, "helper call might sleep in a non-sleepable prog\n"); in check_helper_call()
11403 return -EINVAL; in check_helper_call()
11408 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { in check_helper_call()
11410 return -EFAULT; in check_helper_call()
11414 meta.pkt_access = fn->pkt_access; in check_helper_call()
11422 if (env->cur_state->active_rcu_lock) { in check_helper_call()
11423 if (fn->might_sleep) { in check_helper_call()
11426 return -EINVAL; in check_helper_call()
11430 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
11433 if (env->cur_state->active_preempt_locks) { in check_helper_call()
11434 if (fn->might_sleep) { in check_helper_call()
11435 verbose(env, "sleepable helper %s#%d in non-preemptible region\n", in check_helper_call()
11437 return -EINVAL; in check_helper_call()
11441 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
11444 if (env->cur_state->active_irq_id) { in check_helper_call()
11445 if (fn->might_sleep) { in check_helper_call()
11446 verbose(env, "sleepable helper %s#%d in IRQ-disabled region\n", in check_helper_call()
11448 return -EINVAL; in check_helper_call()
11452 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; in check_helper_call()
11476 BPF_WRITE, -1, false, false); in check_helper_call()
11484 err = -EINVAL; in check_helper_call()
11489 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) { in check_helper_call()
11492 return -EFAULT; in check_helper_call()
11501 err = release_reference_nomark(env->cur_state, ref_obj_id); in check_helper_call()
11503 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ in check_helper_call()
11504 if (reg->ref_obj_id == ref_obj_id) { in check_helper_call()
11505 if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) { in check_helper_call()
11506 reg->ref_obj_id = 0; in check_helper_call()
11507 reg->type &= ~MEM_ALLOC; in check_helper_call()
11508 reg->type |= MEM_RCU; in check_helper_call()
11541 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); in check_helper_call()
11542 return -EINVAL; in check_helper_call()
11568 if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { in check_helper_call()
11572 cur_func(env)->callback_depth = 0; in check_helper_call()
11573 if (env->log.level & BPF_LOG_LEVEL2) in check_helper_call()
11575 env->cur_state->curframe); in check_helper_call()
11582 return -EACCES; in check_helper_call()
11587 env->prog->expected_attach_type == BPF_LSM_CGROUP) { in check_helper_call()
11588 if (!env->prog->aux->attach_func_proto->type) { in check_helper_call()
11593 return -EINVAL; in check_helper_call()
11604 return -EFAULT; in check_helper_call()
11609 return -EFAULT; in check_helper_call()
11613 return -EFAULT; in check_helper_call()
11640 return -EFAULT; in check_helper_call()
11644 return -EFAULT; in check_helper_call()
11661 if (reg->type & MEM_RCU) { in check_helper_call()
11662 type = btf_type_by_id(reg->btf, reg->btf_id); in check_helper_call()
11665 return -EFAULT; in check_helper_call()
11668 env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true; in check_helper_call()
11687 /* helper call returns 64-bit value. */ in check_helper_call()
11691 ret_type = fn->ret_type; in check_helper_call()
11711 return -EFAULT; in check_helper_call()
11715 can_elide_value_nullness(meta.map_ptr->map_type) && in check_helper_call()
11717 meta.const_map_key < meta.map_ptr->max_entries) in check_helper_call()
11724 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK)) { in check_helper_call()
11725 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
11759 tname = btf_name_by_offset(meta.ret_btf, t->name_off); in check_helper_call()
11762 return -EINVAL; in check_helper_call()
11792 ret_btf = meta.kptr_field->kptr.btf; in check_helper_call()
11793 ret_btf_id = meta.kptr_field->kptr.btf_id; in check_helper_call()
11796 if (meta.kptr_field->type == BPF_KPTR_PERCPU) in check_helper_call()
11800 if (fn->ret_btf_id == BPF_PTR_POISON) { in check_helper_call()
11801 verifier_bug(env, "func %s has non-overwritten BPF_PTR_POISON return type", in check_helper_call()
11803 return -EFAULT; in check_helper_call()
11806 ret_btf_id = *fn->ret_btf_id; in check_helper_call()
11812 return -EINVAL; in check_helper_call()
11821 return -EINVAL; in check_helper_call()
11825 regs[BPF_REG_0].id = ++env->id_gen; in check_helper_call()
11830 return -EFAULT; in check_helper_call()
11850 err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); in check_helper_call()
11860 !env->prog->has_callchain_buf) { in check_helper_call()
11867 err = -ENOTSUPP; in check_helper_call()
11875 env->prog->has_callchain_buf = true; in check_helper_call()
11879 env->prog->call_get_stack = true; in check_helper_call()
11883 return -ENOTSUPP; in check_helper_call()
11884 env->prog->call_get_func_ip = true; in check_helper_call()
11902 reg->subreg_def = reg_size == sizeof(u64) ? in __mark_btf_func_reg_size()
11903 DEF_NOT_SUBREG : env->insn_idx + 1; in __mark_btf_func_reg_size()
11918 return meta->kfunc_flags & KF_ACQUIRE; in is_kfunc_acquire()
11923 return meta->kfunc_flags & KF_RELEASE; in is_kfunc_release()
11928 return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); in is_kfunc_trusted_args()
11933 return meta->kfunc_flags & KF_SLEEPABLE; in is_kfunc_sleepable()
11938 return meta->kfunc_flags & KF_DESTRUCTIVE; in is_kfunc_destructive()
11943 return meta->kfunc_flags & KF_RCU; in is_kfunc_rcu()
11948 return meta->kfunc_flags & KF_RCU_PROTECTED; in is_kfunc_rcu_protected()
11957 t = btf_type_skip_modifiers(btf, arg->type, NULL); in is_kfunc_arg_mem_size()
11958 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) in is_kfunc_arg_mem_size()
11970 t = btf_type_skip_modifiers(btf, arg->type, NULL); in is_kfunc_arg_const_mem_size()
11971 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) in is_kfunc_arg_const_mem_size()
12039 param_name = btf_name_by_offset(btf, arg->name_off); in is_kfunc_arg_scalar_with_name()
12078 t = btf_type_skip_modifiers(btf, arg->type, NULL); in BTF_ID()
12083 t = btf_type_skip_modifiers(btf, t->type, &res_id); in BTF_ID()
12144 t = btf_type_resolve_func_ptr(btf, arg->type, NULL); in is_kfunc_arg_callback()
12166 member_type = btf_type_skip_modifiers(btf, member->type, NULL); in __btf_type_is_scalar_struct()
12178 if (!array->nelems) in __btf_type_is_scalar_struct()
12180 member_type = btf_type_skip_modifiers(btf, array->type, NULL); in __btf_type_is_scalar_struct()
12345 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && in is_kfunc_ret_null()
12346 meta->arg_owning_ref) { in is_kfunc_ret_null()
12350 return meta->kfunc_flags & KF_RET_NULL; in is_kfunc_ret_null()
12355 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock]; in is_kfunc_bpf_rcu_read_lock()
12360 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; in is_kfunc_bpf_rcu_read_unlock()
12365 return meta->func_id == special_kfunc_list[KF_bpf_preempt_disable]; in is_kfunc_bpf_preempt_disable()
12370 return meta->func_id == special_kfunc_list[KF_bpf_preempt_enable]; in is_kfunc_bpf_preempt_enable()
12375 return meta->func_id == special_kfunc_list[KF_bpf_xdp_pull_data]; in is_kfunc_pkt_changing()
12390 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) in get_kfunc_ptr_arg_type()
12398 if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) in get_kfunc_ptr_arg_type()
12401 if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) in get_kfunc_ptr_arg_type()
12404 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12407 if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12410 if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12416 if (is_kfunc_arg_list_head(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12419 if (is_kfunc_arg_list_node(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12422 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12425 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12428 if (is_kfunc_arg_const_str(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12431 if (is_kfunc_arg_map(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12434 if (is_kfunc_arg_wq(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12437 if (is_kfunc_arg_task_work(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12440 if (is_kfunc_arg_irq_flag(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12443 if (is_kfunc_arg_res_spin_lock(meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12446 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { in get_kfunc_ptr_arg_type()
12449 meta->func_name, argno, btf_type_str(ref_t), ref_tname); in get_kfunc_ptr_arg_type()
12450 return -EINVAL; in get_kfunc_ptr_arg_type()
12455 if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) in get_kfunc_ptr_arg_type()
12459 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) || in get_kfunc_ptr_arg_type()
12460 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))) in get_kfunc_ptr_arg_type()
12468 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && in get_kfunc_ptr_arg_type()
12472 return -EINVAL; in get_kfunc_ptr_arg_type()
12492 if (base_type(reg->type) == PTR_TO_BTF_ID) { in process_kf_arg_ptr_to_btf_id()
12493 reg_btf = reg->btf; in process_kf_arg_ptr_to_btf_id()
12494 reg_ref_id = reg->btf_id; in process_kf_arg_ptr_to_btf_id()
12497 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; in process_kf_arg_ptr_to_btf_id()
12501 * or releasing a reference, or are no-cast aliases. We do _not_ in process_kf_arg_ptr_to_btf_id()
12524 if ((is_kfunc_release(meta) && reg->ref_obj_id) || in process_kf_arg_ptr_to_btf_id()
12525 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) in process_kf_arg_ptr_to_btf_id()
12529 (reg->off || !tnum_is_const(reg->var_off) || in process_kf_arg_ptr_to_btf_id()
12530 reg->var_off.value)); in process_kf_arg_ptr_to_btf_id()
12533 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); in process_kf_arg_ptr_to_btf_id()
12534 …struct_same = btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, st… in process_kf_arg_ptr_to_btf_id()
12536 * actually use it -- it must cast to the underlying type. So we allow in process_kf_arg_ptr_to_btf_id()
12542 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, in process_kf_arg_ptr_to_btf_id()
12544 return -EINVAL; in process_kf_arg_ptr_to_btf_id()
12556 if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_save] || in process_irq_flag()
12557 meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave]) { in process_irq_flag()
12559 if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave]) in process_irq_flag()
12561 } else if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_restore] || in process_irq_flag()
12562 meta->func_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore]) { in process_irq_flag()
12564 if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore]) in process_irq_flag()
12568 return -EFAULT; in process_irq_flag()
12573 verbose(env, "expected uninitialized irq flag as arg#%d\n", regno - 1); in process_irq_flag()
12574 return -EINVAL; in process_irq_flag()
12577 err = check_mem_access(env, env->insn_idx, regno, 0, BPF_DW, BPF_WRITE, -1, false, false); in process_irq_flag()
12581 err = mark_stack_slot_irq_flag(env, meta, reg, env->insn_idx, kfunc_class); in process_irq_flag()
12587 verbose(env, "expected an initialized irq flag as arg#%d\n", regno - 1); in process_irq_flag()
12607 if (!env->cur_state->active_locks) { in ref_set_non_owning()
12609 return -EFAULT; in ref_set_non_owning()
12612 if (type_flag(reg->type) & NON_OWN_REF) { in ref_set_non_owning()
12614 return -EFAULT; in ref_set_non_owning()
12617 reg->type |= NON_OWN_REF; in ref_set_non_owning()
12618 if (rec->refcount_off >= 0) in ref_set_non_owning()
12619 reg->type |= MEM_RCU; in ref_set_non_owning()
12626 struct bpf_verifier_state *state = env->cur_state; in ref_convert_owning_non_owning()
12632 verifier_bug(env, "ref_obj_id is zero for owning -> non-owning conversion"); in ref_convert_owning_non_owning()
12633 return -EFAULT; in ref_convert_owning_non_owning()
12636 for (i = 0; i < state->acquired_refs; i++) { in ref_convert_owning_non_owning()
12637 if (state->refs[i].id != ref_obj_id) in ref_convert_owning_non_owning()
12643 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ in ref_convert_owning_non_owning()
12644 if (reg->ref_obj_id == ref_obj_id) { in ref_convert_owning_non_owning()
12645 reg->ref_obj_id = 0; in ref_convert_owning_non_owning()
12653 return -EFAULT; in ref_convert_owning_non_owning()
12665 * allocation, the verifier preserves a unique reg->id for it.
12677 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
12678 * allocated objects is the reg->btf pointer.
12680 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
12691 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
12693 * will get different reg->id assigned to each lookup, hence different
12696 * In case of allocated objects, active_lock.ptr is the reg->btf, and the
12697 * reg->id is a unique ID preserved after the NULL pointer check on the pointer
12698 * returned from bpf_obj_new. Each allocation receives a new reg->id.
12706 switch ((int)reg->type) { in check_reg_allocation_locked()
12708 ptr = reg->map_ptr; in check_reg_allocation_locked()
12711 ptr = reg->btf; in check_reg_allocation_locked()
12715 return -EFAULT; in check_reg_allocation_locked()
12717 id = reg->id; in check_reg_allocation_locked()
12719 if (!env->cur_state->active_locks) in check_reg_allocation_locked()
12720 return -EINVAL; in check_reg_allocation_locked()
12721 s = find_lock_state(env->cur_state, REF_TYPE_LOCK_MASK, id, ptr); in check_reg_allocation_locked()
12724 return -EINVAL; in check_reg_allocation_locked()
12789 return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && in is_bpf_throw_kfunc()
12790 insn->imm == special_kfunc_list[KF_bpf_throw]; in is_bpf_throw_kfunc()
12875 if (meta->btf != btf_vmlinux) { in __process_kf_arg_ptr_to_graph_root()
12877 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
12880 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_root()
12881 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
12884 if (!tnum_is_const(reg->var_off)) { in __process_kf_arg_ptr_to_graph_root()
12888 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
12892 head_off = reg->off + reg->var_off.value; in __process_kf_arg_ptr_to_graph_root()
12896 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
12902 rec->spin_lock_off, head_type_name); in __process_kf_arg_ptr_to_graph_root()
12903 return -EINVAL; in __process_kf_arg_ptr_to_graph_root()
12908 return -EFAULT; in __process_kf_arg_ptr_to_graph_root()
12919 &meta->arg_list_head.field); in process_kf_arg_ptr_to_list_head()
12927 &meta->arg_rbtree_root.field); in process_kf_arg_ptr_to_rbtree_root()
12943 if (meta->btf != btf_vmlinux) { in __process_kf_arg_ptr_to_graph_node()
12945 return -EFAULT; in __process_kf_arg_ptr_to_graph_node()
12948 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) in __process_kf_arg_ptr_to_graph_node()
12949 return -EFAULT; in __process_kf_arg_ptr_to_graph_node()
12952 if (!tnum_is_const(reg->var_off)) { in __process_kf_arg_ptr_to_graph_node()
12956 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
12959 node_off = reg->off + reg->var_off.value; in __process_kf_arg_ptr_to_graph_node()
12963 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
12968 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id); in __process_kf_arg_ptr_to_graph_node()
12969 t = btf_type_by_id(reg->btf, reg->btf_id); in __process_kf_arg_ptr_to_graph_node()
12970 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, in __process_kf_arg_ptr_to_graph_node()
12971 field->graph_root.value_btf_id, true)) { in __process_kf_arg_ptr_to_graph_node()
12976 field->graph_root.node_offset, in __process_kf_arg_ptr_to_graph_node()
12977 btf_name_by_offset(field->graph_root.btf, et->name_off), in __process_kf_arg_ptr_to_graph_node()
12978 node_off, btf_name_by_offset(reg->btf, t->name_off)); in __process_kf_arg_ptr_to_graph_node()
12979 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
12981 meta->arg_btf = reg->btf; in __process_kf_arg_ptr_to_graph_node()
12982 meta->arg_btf_id = reg->btf_id; in __process_kf_arg_ptr_to_graph_node()
12984 if (node_off != field->graph_root.node_offset) { in __process_kf_arg_ptr_to_graph_node()
12987 field->graph_root.node_offset, in __process_kf_arg_ptr_to_graph_node()
12988 btf_name_by_offset(field->graph_root.btf, et->name_off)); in __process_kf_arg_ptr_to_graph_node()
12989 return -EINVAL; in __process_kf_arg_ptr_to_graph_node()
13001 &meta->arg_list_head.field); in process_kf_arg_ptr_to_list_node()
13010 &meta->arg_rbtree_root.field); in process_kf_arg_ptr_to_rbtree_node()
13015 * LSM hooks and iters (both sleepable and non-sleepable) are safe.
13021 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_css_task_iter_allowlist()
13027 if (env->prog->expected_attach_type == BPF_TRACE_ITER) in check_css_task_iter_allowlist()
13038 const char *func_name = meta->func_name, *ref_tname; in check_kfunc_args()
13039 const struct btf *btf = meta->btf; in check_kfunc_args()
13045 args = (const struct btf_param *)(meta->func_proto + 1); in check_kfunc_args()
13046 nargs = btf_type_vlen(meta->func_proto); in check_kfunc_args()
13050 return -EINVAL; in check_kfunc_args()
13071 if (meta->arg_prog) { in check_kfunc_args()
13072 verifier_bug(env, "Only 1 prog->aux argument supported per-kfunc"); in check_kfunc_args()
13073 return -EFAULT; in check_kfunc_args()
13075 meta->arg_prog = true; in check_kfunc_args()
13076 cur_aux(env)->arg_prog = regno; in check_kfunc_args()
13081 if (reg->type != SCALAR_VALUE) { in check_kfunc_args()
13083 return -EINVAL; in check_kfunc_args()
13086 if (is_kfunc_arg_constant(meta->btf, &args[i])) { in check_kfunc_args()
13087 if (meta->arg_constant.found) { in check_kfunc_args()
13089 return -EFAULT; in check_kfunc_args()
13091 if (!tnum_is_const(reg->var_off)) { in check_kfunc_args()
13093 return -EINVAL; in check_kfunc_args()
13098 meta->arg_constant.found = true; in check_kfunc_args()
13099 meta->arg_constant.value = reg->var_off.value; in check_kfunc_args()
13101 meta->r0_rdonly = true; in check_kfunc_args()
13108 if (meta->r0_size) { in check_kfunc_args()
13110 return -EINVAL; in check_kfunc_args()
13113 if (!tnum_is_const(reg->var_off)) { in check_kfunc_args()
13115 return -EINVAL; in check_kfunc_args()
13118 meta->r0_size = reg->var_off.value; in check_kfunc_args()
13128 return -EINVAL; in check_kfunc_args()
13132 (register_is_null(reg) || type_may_be_null(reg->type)) && in check_kfunc_args()
13133 !is_kfunc_arg_nullable(meta->btf, &args[i])) { in check_kfunc_args()
13135 return -EACCES; in check_kfunc_args()
13138 if (reg->ref_obj_id) { in check_kfunc_args()
13139 if (is_kfunc_release(meta) && meta->ref_obj_id) { in check_kfunc_args()
13141 regno, reg->ref_obj_id, in check_kfunc_args()
13142 meta->ref_obj_id); in check_kfunc_args()
13143 return -EFAULT; in check_kfunc_args()
13145 meta->ref_obj_id = reg->ref_obj_id; in check_kfunc_args()
13147 meta->release_regno = regno; in check_kfunc_args()
13150 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); in check_kfunc_args()
13151 ref_tname = btf_name_by_offset(btf, ref_t->name_off); in check_kfunc_args()
13161 if (!reg->map_ptr) { in check_kfunc_args()
13163 return -EINVAL; in check_kfunc_args()
13165 if (meta->map.ptr && (reg->map_ptr->record->wq_off >= 0 || in check_kfunc_args()
13166 reg->map_ptr->record->task_work_off >= 0)) { in check_kfunc_args()
13179 if (meta->map.ptr != reg->map_ptr || in check_kfunc_args()
13180 meta->map.uid != reg->map_uid) { in check_kfunc_args()
13181 if (reg->map_ptr->record->task_work_off >= 0) { in check_kfunc_args()
13184 meta->map.uid, reg->map_uid); in check_kfunc_args()
13185 return -EINVAL; in check_kfunc_args()
13189 meta->map.uid, reg->map_uid); in check_kfunc_args()
13190 return -EINVAL; in check_kfunc_args()
13193 meta->map.ptr = reg->map_ptr; in check_kfunc_args()
13194 meta->map.uid = reg->map_uid; in check_kfunc_args()
13204 return -EINVAL; in check_kfunc_args()
13208 return -EINVAL; in check_kfunc_args()
13231 return -EFAULT; in check_kfunc_args()
13234 if (is_kfunc_release(meta) && reg->ref_obj_id) in check_kfunc_args()
13242 if (reg->type != PTR_TO_CTX) { in check_kfunc_args()
13244 i, reg_type_str(env, reg->type)); in check_kfunc_args()
13245 return -EINVAL; in check_kfunc_args()
13248 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { in check_kfunc_args()
13249 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); in check_kfunc_args()
13251 return -EINVAL; in check_kfunc_args()
13252 meta->ret_btf_id = ret; in check_kfunc_args()
13256 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
13257 if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) { in check_kfunc_args()
13259 return -EINVAL; in check_kfunc_args()
13261 } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) { in check_kfunc_args()
13262 if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { in check_kfunc_args()
13264 return -EINVAL; in check_kfunc_args()
13268 return -EINVAL; in check_kfunc_args()
13270 if (!reg->ref_obj_id) { in check_kfunc_args()
13272 return -EINVAL; in check_kfunc_args()
13274 if (meta->btf == btf_vmlinux) { in check_kfunc_args()
13275 meta->arg_btf = reg->btf; in check_kfunc_args()
13276 meta->arg_btf_id = reg->btf_id; in check_kfunc_args()
13284 if (reg->type == CONST_PTR_TO_DYNPTR) in check_kfunc_args()
13290 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { in check_kfunc_args()
13292 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { in check_kfunc_args()
13294 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb_meta]) { in check_kfunc_args()
13296 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] && in check_kfunc_args()
13298 enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type; in check_kfunc_args()
13302 return -EFAULT; in check_kfunc_args()
13306 clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id; in check_kfunc_args()
13309 return -EFAULT; in check_kfunc_args()
13324 meta->initialized_dynptr.id = id; in check_kfunc_args()
13325 meta->initialized_dynptr.type = dynptr_get_type(env, reg); in check_kfunc_args()
13326 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); in check_kfunc_args()
13332 if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { in check_kfunc_args()
13335 return -EINVAL; in check_kfunc_args()
13343 if (reg->type != PTR_TO_MAP_VALUE && in check_kfunc_args()
13344 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
13346 return -EINVAL; in check_kfunc_args()
13348 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { in check_kfunc_args()
13350 return -EINVAL; in check_kfunc_args()
13357 if (reg->type != PTR_TO_MAP_VALUE && in check_kfunc_args()
13358 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
13360 return -EINVAL; in check_kfunc_args()
13362 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { in check_kfunc_args()
13364 return -EINVAL; in check_kfunc_args()
13371 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
13373 return -EINVAL; in check_kfunc_args()
13375 if (!reg->ref_obj_id) { in check_kfunc_args()
13377 return -EINVAL; in check_kfunc_args()
13384 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in check_kfunc_args()
13385 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
13387 return -EINVAL; in check_kfunc_args()
13389 if (!reg->ref_obj_id) { in check_kfunc_args()
13391 return -EINVAL; in check_kfunc_args()
13394 if (!type_is_non_owning_ref(reg->type) && !reg->ref_obj_id) { in check_kfunc_args()
13395 verbose(env, "%s can only take non-owning or refcounted bpf_rb_node pointer\n", func_name); in check_kfunc_args()
13396 return -EINVAL; in check_kfunc_args()
13400 return -EINVAL; in check_kfunc_args()
13412 ref_tname = btf_name_by_offset(btf, ref_t->name_off); in check_kfunc_args()
13416 if ((base_type(reg->type) != PTR_TO_BTF_ID || in check_kfunc_args()
13417 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && in check_kfunc_args()
13418 !reg2btf_ids[base_type(reg->type)]) { in check_kfunc_args()
13419 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); in check_kfunc_args()
13421 reg_type_str(env, base_type(reg->type) | in check_kfunc_args()
13422 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); in check_kfunc_args()
13423 return -EINVAL; in check_kfunc_args()
13434 return -EINVAL; in check_kfunc_args()
13447 if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) { in check_kfunc_args()
13455 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { in check_kfunc_args()
13456 if (meta->arg_constant.found) { in check_kfunc_args()
13458 return -EFAULT; in check_kfunc_args()
13460 if (!tnum_is_const(size_reg->var_off)) { in check_kfunc_args()
13462 return -EINVAL; in check_kfunc_args()
13464 meta->arg_constant.found = true; in check_kfunc_args()
13465 meta->arg_constant.value = size_reg->var_off.value; in check_kfunc_args()
13473 if (reg->type != PTR_TO_FUNC) { in check_kfunc_args()
13475 return -EINVAL; in check_kfunc_args()
13477 meta->subprogno = reg->subprogno; in check_kfunc_args()
13480 if (!type_is_ptr_alloc_obj(reg->type)) { in check_kfunc_args()
13481 verbose(env, "arg#%d is neither owning or non-owning ref\n", i); in check_kfunc_args()
13482 return -EINVAL; in check_kfunc_args()
13484 if (!type_is_non_owning_ref(reg->type)) in check_kfunc_args()
13485 meta->arg_owning_ref = true; in check_kfunc_args()
13490 return -EFAULT; in check_kfunc_args()
13493 if (rec->refcount_off < 0) { in check_kfunc_args()
13495 return -EINVAL; in check_kfunc_args()
13498 meta->arg_btf = reg->btf; in check_kfunc_args()
13499 meta->arg_btf_id = reg->btf_id; in check_kfunc_args()
13502 if (reg->type != PTR_TO_MAP_VALUE) { in check_kfunc_args()
13504 return -EINVAL; in check_kfunc_args()
13511 if (reg->type != PTR_TO_MAP_VALUE) { in check_kfunc_args()
13513 return -EINVAL; in check_kfunc_args()
13520 if (reg->type != PTR_TO_MAP_VALUE) { in check_kfunc_args()
13522 return -EINVAL; in check_kfunc_args()
13529 if (reg->type != PTR_TO_STACK) { in check_kfunc_args()
13531 return -EINVAL; in check_kfunc_args()
13541 if (reg->type != PTR_TO_MAP_VALUE && reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { in check_kfunc_args()
13543 return -EINVAL; in check_kfunc_args()
13546 if (!is_bpf_res_spin_lock_kfunc(meta->func_id)) in check_kfunc_args()
13547 return -EFAULT; in check_kfunc_args()
13548 if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock] || in check_kfunc_args()
13549 meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave]) in check_kfunc_args()
13551 if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] || in check_kfunc_args()
13552 meta->func_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore]) in check_kfunc_args()
13562 if (is_kfunc_release(meta) && !meta->release_regno) { in check_kfunc_args()
13565 return -EINVAL; in check_kfunc_args()
13584 if (!insn->imm) in fetch_kfunc_meta()
13585 return -EINVAL; in fetch_kfunc_meta()
13587 desc_btf = find_kfunc_desc_btf(env, insn->off); in fetch_kfunc_meta()
13591 func_id = insn->imm; in fetch_kfunc_meta()
13593 func_name = btf_name_by_offset(desc_btf, func->name_off); in fetch_kfunc_meta()
13596 func_proto = btf_type_by_id(desc_btf, func->type); in fetch_kfunc_meta()
13598 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); in fetch_kfunc_meta()
13600 return -EACCES; in fetch_kfunc_meta()
13604 meta->btf = desc_btf; in fetch_kfunc_meta()
13605 meta->func_id = func_id; in fetch_kfunc_meta()
13606 meta->kfunc_flags = *kfunc_flags; in fetch_kfunc_meta()
13607 meta->func_proto = func_proto; in fetch_kfunc_meta()
13608 meta->func_name = func_name; in fetch_kfunc_meta()
13614 * 1 - not fall-through to 'else' branch, continue verification
13615 * 0 - fall-through to 'else' branch
13616 * < 0 - not fall-through to 'else' branch, return error
13625 if (meta->btf != btf_vmlinux) in check_special_kfunc()
13628 if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || in check_special_kfunc()
13629 meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { in check_special_kfunc()
13634 if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set) in check_special_kfunc()
13635 return -ENOMEM; in check_special_kfunc()
13637 if (((u64)(u32)meta->arg_constant.value) != meta->arg_constant.value) { in check_special_kfunc()
13639 return -EINVAL; in check_special_kfunc()
13642 ret_btf = env->prog->aux->btf; in check_special_kfunc()
13643 ret_btf_id = meta->arg_constant.value; in check_special_kfunc()
13648 return -EINVAL; in check_special_kfunc()
13654 return -EINVAL; in check_special_kfunc()
13657 if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { in check_special_kfunc()
13658 if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) { in check_special_kfunc()
13660 ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE); in check_special_kfunc()
13661 return -EINVAL; in check_special_kfunc()
13680 err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size); in check_special_kfunc()
13687 if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { in check_special_kfunc()
13690 return -EINVAL; in check_special_kfunc()
13695 return -EINVAL; in check_special_kfunc()
13703 if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) in check_special_kfunc()
13706 insn_aux->obj_new_size = ret_t->size; in check_special_kfunc()
13707 insn_aux->kptr_struct_meta = struct_meta; in check_special_kfunc()
13708 } else if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { in check_special_kfunc()
13711 regs[BPF_REG_0].btf = meta->arg_btf; in check_special_kfunc()
13712 regs[BPF_REG_0].btf_id = meta->arg_btf_id; in check_special_kfunc()
13714 insn_aux->kptr_struct_meta = in check_special_kfunc()
13715 btf_find_struct_meta(meta->arg_btf, in check_special_kfunc()
13716 meta->arg_btf_id); in check_special_kfunc()
13718 struct btf_field *field = meta->arg_list_head.field; in check_special_kfunc()
13720 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_special_kfunc()
13722 struct btf_field *field = meta->arg_rbtree_root.field; in check_special_kfunc()
13724 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); in check_special_kfunc()
13725 } else if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { in check_special_kfunc()
13729 regs[BPF_REG_0].btf_id = meta->ret_btf_id; in check_special_kfunc()
13730 } else if (meta->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { in check_special_kfunc()
13731 ret_t = btf_type_by_id(desc_btf, meta->arg_constant.value); in check_special_kfunc()
13734 meta->arg_constant.value); in check_special_kfunc()
13735 return -EINVAL; in check_special_kfunc()
13740 regs[BPF_REG_0].btf_id = meta->arg_constant.value; in check_special_kfunc()
13748 return -EINVAL; in check_special_kfunc()
13750 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice] || in check_special_kfunc()
13751 meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) { in check_special_kfunc()
13752 enum bpf_type_flag type_flag = get_dynptr_type_flag(meta->initialized_dynptr.type); in check_special_kfunc()
13756 if (!meta->arg_constant.found) { in check_special_kfunc()
13758 return -EFAULT; in check_special_kfunc()
13761 regs[BPF_REG_0].mem_size = meta->arg_constant.value; in check_special_kfunc()
13766 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice]) { in check_special_kfunc()
13769 /* this will set env->seen_direct_write to true */ in check_special_kfunc()
13772 return -EINVAL; in check_special_kfunc()
13776 if (!meta->initialized_dynptr.id) { in check_special_kfunc()
13778 return -EFAULT; in check_special_kfunc()
13780 regs[BPF_REG_0].dynptr_id = meta->initialized_dynptr.id; in check_special_kfunc()
13810 if (!insn->imm) in check_kfunc_call()
13814 if (err == -EACCES && func_name) in check_kfunc_call()
13819 insn_aux = &env->insn_aux_data[insn_idx]; in check_kfunc_call()
13821 insn_aux->is_iter_next = is_iter_next_kfunc(&meta); in check_kfunc_call()
13823 if (!insn->off && in check_kfunc_call()
13824 (insn->imm == special_kfunc_list[KF_bpf_res_spin_lock] || in check_kfunc_call()
13825 insn->imm == special_kfunc_list[KF_bpf_res_spin_lock_irqsave])) { in check_kfunc_call()
13829 branch = push_stack(env, env->insn_idx + 1, env->insn_idx, false); in check_kfunc_call()
13832 return -ENOMEM; in check_kfunc_call()
13835 regs = branch->frame[branch->curframe]->regs; in check_kfunc_call()
13837 /* Clear r0-r5 registers in forked state */ in check_kfunc_call()
13842 err = __mark_reg_s32_range(env, regs, BPF_REG_0, -MAX_ERRNO, -1); in check_kfunc_call()
13848 } else if (!insn->off && insn->imm == special_kfunc_list[KF___bpf_trap]) { in check_kfunc_call()
13850 return -EFAULT; in check_kfunc_call()
13855 return -EACCES; in check_kfunc_call()
13861 return -EACCES; in check_kfunc_call()
13910 if (env->cur_state->active_rcu_lock) { in check_kfunc_call()
13917 return -EACCES; in check_kfunc_call()
13922 return -EINVAL; in check_kfunc_call()
13924 bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({ in check_kfunc_call()
13925 if (reg->type & MEM_RCU) { in check_kfunc_call()
13926 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); in check_kfunc_call()
13927 reg->type |= PTR_UNTRUSTED; in check_kfunc_call()
13930 env->cur_state->active_rcu_lock = false; in check_kfunc_call()
13933 return -EACCES; in check_kfunc_call()
13936 env->cur_state->active_rcu_lock = true; in check_kfunc_call()
13939 return -EINVAL; in check_kfunc_call()
13942 if (env->cur_state->active_preempt_locks) { in check_kfunc_call()
13944 env->cur_state->active_preempt_locks++; in check_kfunc_call()
13946 env->cur_state->active_preempt_locks--; in check_kfunc_call()
13948 verbose(env, "kernel func %s is sleepable within non-preemptible region\n", func_name); in check_kfunc_call()
13949 return -EACCES; in check_kfunc_call()
13952 env->cur_state->active_preempt_locks++; in check_kfunc_call()
13955 return -EINVAL; in check_kfunc_call()
13958 if (env->cur_state->active_irq_id && sleepable) { in check_kfunc_call()
13959 verbose(env, "kernel func %s is sleepable within IRQ-disabled region\n", func_name); in check_kfunc_call()
13960 return -EACCES; in check_kfunc_call()
13965 return -EACCES; in check_kfunc_call()
13984 insn_aux->insert_off = regs[BPF_REG_2].off; in check_kfunc_call()
13985 insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); in check_kfunc_call()
13988 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", in check_kfunc_call()
14005 return -ENOTSUPP; in check_kfunc_call()
14007 env->seen_exception = true; in check_kfunc_call()
14012 if (!env->exception_callback_subprog) { in check_kfunc_call()
14023 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); in check_kfunc_call()
14032 return -EINVAL; in check_kfunc_call()
14041 mark_btf_func_reg_size(env, BPF_REG_0, t->size); in check_kfunc_call()
14043 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); in check_kfunc_call()
14062 ptr_type->name_off); in check_kfunc_call()
14068 return -EINVAL; in check_kfunc_call()
14098 cur_iter = get_iter_from_state(env->cur_state, &meta); in check_kfunc_call()
14100 if (cur_iter->type & MEM_RCU) /* KF_RCU_PROTECTED */ in check_kfunc_call()
14110 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
14126 regs[BPF_REG_0].id = ++env->id_gen; in check_kfunc_call()
14131 insn_aux->kptr_struct_meta = in check_kfunc_call()
14151 mark_btf_func_reg_size(env, regno, t->size); in check_kfunc_call()
14167 bool known = tnum_is_const(reg->var_off); in check_reg_sane_offset()
14168 s64 val = reg->var_off.value; in check_reg_sane_offset()
14169 s64 smin = reg->smin_value; in check_reg_sane_offset()
14171 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { in check_reg_sane_offset()
14177 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
14179 reg_type_str(env, type), reg->off); in check_reg_sane_offset()
14189 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { in check_reg_sane_offset()
14199 REASON_BOUNDS = -1,
14200 REASON_TYPE = -2,
14201 REASON_PATHS = -3,
14202 REASON_LIMIT = -4,
14203 REASON_STACK = -5,
14211 switch (ptr_reg->type) { in retrieve_ptr_limit()
14213 /* Offset 0 is out-of-bounds, but acceptable start for the in retrieve_ptr_limit()
14219 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); in retrieve_ptr_limit()
14222 max = ptr_reg->map_ptr->value_size; in retrieve_ptr_limit()
14224 ptr_reg->smin_value : in retrieve_ptr_limit()
14225 ptr_reg->umax_value) + ptr_reg->off; in retrieve_ptr_limit()
14240 return env->bypass_spec_v1 || in can_skip_alu_sanitation()
14241 BPF_SRC(insn->code) == BPF_K || in can_skip_alu_sanitation()
14242 cur_aux(env)->nospec; in can_skip_alu_sanitation()
14251 if (aux->alu_state && in update_alu_sanitation_state()
14252 (aux->alu_state != alu_state || in update_alu_sanitation_state()
14253 aux->alu_limit != alu_limit)) in update_alu_sanitation_state()
14257 aux->alu_state = alu_state; in update_alu_sanitation_state()
14258 aux->alu_limit = alu_limit; in update_alu_sanitation_state()
14293 regs = branch->frame[branch->curframe]->regs; in sanitize_speculative_path()
14294 if (BPF_SRC(insn->code) == BPF_K) { in sanitize_speculative_path()
14295 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
14296 } else if (BPF_SRC(insn->code) == BPF_X) { in sanitize_speculative_path()
14297 mark_reg_unknown(env, regs, insn->dst_reg); in sanitize_speculative_path()
14298 mark_reg_unknown(env, regs, insn->src_reg); in sanitize_speculative_path()
14312 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; in sanitize_ptr_alu()
14313 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_ptr_alu()
14314 bool off_is_imm = tnum_is_const(off_reg->var_off); in sanitize_ptr_alu()
14315 bool off_is_neg = off_reg->smin_value < 0; in sanitize_ptr_alu()
14317 u8 opcode = BPF_OP(insn->code); in sanitize_ptr_alu()
14326 /* We already marked aux for masking from non-speculative in sanitize_ptr_alu()
14330 if (vstate->speculative) in sanitize_ptr_alu()
14334 if (!tnum_is_const(off_reg->var_off) && in sanitize_ptr_alu()
14335 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) in sanitize_ptr_alu()
14338 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || in sanitize_ptr_alu()
14342 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); in sanitize_ptr_alu()
14350 alu_state = info->aux.alu_state; in sanitize_ptr_alu()
14351 alu_limit = abs(info->aux.alu_limit - alu_limit); in sanitize_ptr_alu()
14362 env->explore_alu_limits = true; in sanitize_ptr_alu()
14373 * Also, when register is a known constant, we rewrite register-based in sanitize_ptr_alu()
14374 * operation to immediate-based, and thus do not need masking (and as in sanitize_ptr_alu()
14375 * a consequence, do not need to simulate the zero-truncation either). in sanitize_ptr_alu()
14380 /* Simulate and find potential out-of-bounds access under in sanitize_ptr_alu()
14384 * to simulate dst (== 0) +/-= ptr. Needed, for example, in sanitize_ptr_alu()
14385 * for cases where we use K-based arithmetic in one direction in sanitize_ptr_alu()
14386 * and truncated reg-based in the other in order to explore in sanitize_ptr_alu()
14393 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, in sanitize_ptr_alu()
14394 env->insn_idx); in sanitize_ptr_alu()
14402 struct bpf_verifier_state *vstate = env->cur_state; in sanitize_mark_insn_seen()
14406 * the non-speculative domain, sanitize_dead_code() can still in sanitize_mark_insn_seen()
14409 if (!vstate->speculative) in sanitize_mark_insn_seen()
14410 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; in sanitize_mark_insn_seen()
14419 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; in sanitize_err()
14420 u32 dst = insn->dst_reg, src = insn->src_reg; in sanitize_err()
14442 return -ENOMEM; in sanitize_err()
14448 return -EACCES; in sanitize_err()
14459 * 'off' includes 'reg->off'.
14467 if (!tnum_is_const(reg->var_off)) { in check_stack_access_for_ptr_arithmetic()
14470 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); in check_stack_access_for_ptr_arithmetic()
14473 return -EACCES; in check_stack_access_for_ptr_arithmetic()
14476 if (off >= 0 || off < -MAX_BPF_STACK) { in check_stack_access_for_ptr_arithmetic()
14479 return -EACCES; in check_stack_access_for_ptr_arithmetic()
14489 u32 dst = insn->dst_reg; in sanitize_check_bounds()
14494 if (env->bypass_spec_v1) in sanitize_check_bounds()
14497 switch (dst_reg->type) { in sanitize_check_bounds()
14500 dst_reg->off + dst_reg->var_off.value)) in sanitize_check_bounds()
14501 return -EACCES; in sanitize_check_bounds()
14504 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { in sanitize_check_bounds()
14507 return -EACCES; in sanitize_check_bounds()
14511 return -EOPNOTSUPP; in sanitize_check_bounds()
14519 * If we return -EACCES, caller may want to try again treating pointer as a
14520 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
14527 struct bpf_verifier_state *vstate = env->cur_state; in adjust_ptr_min_max_vals()
14528 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_ptr_min_max_vals()
14529 struct bpf_reg_state *regs = state->regs, *dst_reg; in adjust_ptr_min_max_vals()
14530 bool known = tnum_is_const(off_reg->var_off); in adjust_ptr_min_max_vals()
14531 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, in adjust_ptr_min_max_vals()
14532 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; in adjust_ptr_min_max_vals()
14533 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, in adjust_ptr_min_max_vals()
14534 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; in adjust_ptr_min_max_vals()
14536 u8 opcode = BPF_OP(insn->code); in adjust_ptr_min_max_vals()
14537 u32 dst = insn->dst_reg; in adjust_ptr_min_max_vals()
14551 if (BPF_CLASS(insn->code) != BPF_ALU64) { in adjust_ptr_min_max_vals()
14552 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ in adjust_ptr_min_max_vals()
14553 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_ptr_min_max_vals()
14559 "R%d 32-bit pointer arithmetic prohibited\n", in adjust_ptr_min_max_vals()
14561 return -EACCES; in adjust_ptr_min_max_vals()
14564 if (ptr_reg->type & PTR_MAYBE_NULL) { in adjust_ptr_min_max_vals()
14565 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", in adjust_ptr_min_max_vals()
14566 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
14567 return -EACCES; in adjust_ptr_min_max_vals()
14574 if (base_type(ptr_reg->type) == PTR_TO_MEM && (ptr_reg->type & PTR_UNTRUSTED)) in adjust_ptr_min_max_vals()
14577 switch (base_type(ptr_reg->type)) { in adjust_ptr_min_max_vals()
14602 dst, reg_type_str(env, ptr_reg->type)); in adjust_ptr_min_max_vals()
14603 return -EACCES; in adjust_ptr_min_max_vals()
14609 dst_reg->type = ptr_reg->type; in adjust_ptr_min_max_vals()
14610 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
14612 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || in adjust_ptr_min_max_vals()
14613 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
14614 return -EINVAL; in adjust_ptr_min_max_vals()
14616 /* pointer types do not carry 32-bit bounds at the moment. */ in adjust_ptr_min_max_vals()
14631 if (known && (ptr_reg->off + smin_val == in adjust_ptr_min_max_vals()
14632 (s64)(s32)(ptr_reg->off + smin_val))) { in adjust_ptr_min_max_vals()
14634 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
14635 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
14636 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
14637 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
14638 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
14639 dst_reg->off = ptr_reg->off + smin_val; in adjust_ptr_min_max_vals()
14640 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
14643 /* A new variable offset is created. Note that off_reg->off in adjust_ptr_min_max_vals()
14652 if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) || in adjust_ptr_min_max_vals()
14653 check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) { in adjust_ptr_min_max_vals()
14654 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
14655 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
14657 if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) || in adjust_ptr_min_max_vals()
14658 check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) { in adjust_ptr_min_max_vals()
14659 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
14660 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
14662 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
14663 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
14664 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
14666 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
14668 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); in adjust_ptr_min_max_vals()
14673 /* scalar -= pointer. Creates an unknown scalar */ in adjust_ptr_min_max_vals()
14676 return -EACCES; in adjust_ptr_min_max_vals()
14682 if (ptr_reg->type == PTR_TO_STACK) { in adjust_ptr_min_max_vals()
14685 return -EACCES; in adjust_ptr_min_max_vals()
14687 if (known && (ptr_reg->off - smin_val == in adjust_ptr_min_max_vals()
14688 (s64)(s32)(ptr_reg->off - smin_val))) { in adjust_ptr_min_max_vals()
14689 /* pointer -= K. Subtract it from fixed offset */ in adjust_ptr_min_max_vals()
14690 dst_reg->smin_value = smin_ptr; in adjust_ptr_min_max_vals()
14691 dst_reg->smax_value = smax_ptr; in adjust_ptr_min_max_vals()
14692 dst_reg->umin_value = umin_ptr; in adjust_ptr_min_max_vals()
14693 dst_reg->umax_value = umax_ptr; in adjust_ptr_min_max_vals()
14694 dst_reg->var_off = ptr_reg->var_off; in adjust_ptr_min_max_vals()
14695 dst_reg->id = ptr_reg->id; in adjust_ptr_min_max_vals()
14696 dst_reg->off = ptr_reg->off - smin_val; in adjust_ptr_min_max_vals()
14697 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
14701 * nonnegative, then any reg->range we had before is still good. in adjust_ptr_min_max_vals()
14703 if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) || in adjust_ptr_min_max_vals()
14704 check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) { in adjust_ptr_min_max_vals()
14706 dst_reg->smin_value = S64_MIN; in adjust_ptr_min_max_vals()
14707 dst_reg->smax_value = S64_MAX; in adjust_ptr_min_max_vals()
14711 dst_reg->umin_value = 0; in adjust_ptr_min_max_vals()
14712 dst_reg->umax_value = U64_MAX; in adjust_ptr_min_max_vals()
14715 dst_reg->umin_value = umin_ptr - umax_val; in adjust_ptr_min_max_vals()
14716 dst_reg->umax_value = umax_ptr - umin_val; in adjust_ptr_min_max_vals()
14718 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); in adjust_ptr_min_max_vals()
14719 dst_reg->off = ptr_reg->off; in adjust_ptr_min_max_vals()
14720 dst_reg->raw = ptr_reg->raw; in adjust_ptr_min_max_vals()
14722 dst_reg->id = ++env->id_gen; in adjust_ptr_min_max_vals()
14725 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); in adjust_ptr_min_max_vals()
14734 return -EACCES; in adjust_ptr_min_max_vals()
14736 /* other operators (e.g. MUL,LSH) produce non-pointer results */ in adjust_ptr_min_max_vals()
14739 return -EACCES; in adjust_ptr_min_max_vals()
14742 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) in adjust_ptr_min_max_vals()
14743 return -EINVAL; in adjust_ptr_min_max_vals()
14746 if (bounds_ret == -EACCES) in adjust_ptr_min_max_vals()
14752 && !env->cur_state->speculative in adjust_ptr_min_max_vals()
14756 return -EFAULT; in adjust_ptr_min_max_vals()
14768 s32 *dst_smin = &dst_reg->s32_min_value; in scalar32_min_max_add()
14769 s32 *dst_smax = &dst_reg->s32_max_value; in scalar32_min_max_add()
14770 u32 *dst_umin = &dst_reg->u32_min_value; in scalar32_min_max_add()
14771 u32 *dst_umax = &dst_reg->u32_max_value; in scalar32_min_max_add()
14772 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_add()
14773 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_add()
14776 if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) || in scalar32_min_max_add()
14777 check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) { in scalar32_min_max_add()
14799 s64 *dst_smin = &dst_reg->smin_value; in scalar_min_max_add()
14800 s64 *dst_smax = &dst_reg->smax_value; in scalar_min_max_add()
14801 u64 *dst_umin = &dst_reg->umin_value; in scalar_min_max_add()
14802 u64 *dst_umax = &dst_reg->umax_value; in scalar_min_max_add()
14803 u64 umin_val = src_reg->umin_value; in scalar_min_max_add()
14804 u64 umax_val = src_reg->umax_value; in scalar_min_max_add()
14807 if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) || in scalar_min_max_add()
14808 check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) { in scalar_min_max_add()
14830 s32 *dst_smin = &dst_reg->s32_min_value; in scalar32_min_max_sub()
14831 s32 *dst_smax = &dst_reg->s32_max_value; in scalar32_min_max_sub()
14832 u32 *dst_umin = &dst_reg->u32_min_value; in scalar32_min_max_sub()
14833 u32 *dst_umax = &dst_reg->u32_max_value; in scalar32_min_max_sub()
14834 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_sub()
14835 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_sub()
14838 if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) || in scalar32_min_max_sub()
14839 check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) { in scalar32_min_max_sub()
14846 * underflow, it is okay to set: dst_umin = dst_umin - src_umax, in scalar32_min_max_sub()
14847 * dst_umax = dst_umax - src_umin. Otherwise (some subtractions in scalar32_min_max_sub()
14862 s64 *dst_smin = &dst_reg->smin_value; in scalar_min_max_sub()
14863 s64 *dst_smax = &dst_reg->smax_value; in scalar_min_max_sub()
14864 u64 *dst_umin = &dst_reg->umin_value; in scalar_min_max_sub()
14865 u64 *dst_umax = &dst_reg->umax_value; in scalar_min_max_sub()
14866 u64 umin_val = src_reg->umin_value; in scalar_min_max_sub()
14867 u64 umax_val = src_reg->umax_value; in scalar_min_max_sub()
14870 if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) || in scalar_min_max_sub()
14871 check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) { in scalar_min_max_sub()
14878 * underflow, it is okay to set: dst_umin = dst_umin - src_umax, in scalar_min_max_sub()
14879 * dst_umax = dst_umax - src_umin. Otherwise (some subtractions in scalar_min_max_sub()
14894 s32 *dst_smin = &dst_reg->s32_min_value; in scalar32_min_max_mul()
14895 s32 *dst_smax = &dst_reg->s32_max_value; in scalar32_min_max_mul()
14896 u32 *dst_umin = &dst_reg->u32_min_value; in scalar32_min_max_mul()
14897 u32 *dst_umax = &dst_reg->u32_max_value; in scalar32_min_max_mul()
14900 if (check_mul_overflow(*dst_umax, src_reg->u32_max_value, dst_umax) || in scalar32_min_max_mul()
14901 check_mul_overflow(*dst_umin, src_reg->u32_min_value, dst_umin)) { in scalar32_min_max_mul()
14906 if (check_mul_overflow(*dst_smin, src_reg->s32_min_value, &tmp_prod[0]) || in scalar32_min_max_mul()
14907 check_mul_overflow(*dst_smin, src_reg->s32_max_value, &tmp_prod[1]) || in scalar32_min_max_mul()
14908 check_mul_overflow(*dst_smax, src_reg->s32_min_value, &tmp_prod[2]) || in scalar32_min_max_mul()
14909 check_mul_overflow(*dst_smax, src_reg->s32_max_value, &tmp_prod[3])) { in scalar32_min_max_mul()
14922 s64 *dst_smin = &dst_reg->smin_value; in scalar_min_max_mul()
14923 s64 *dst_smax = &dst_reg->smax_value; in scalar_min_max_mul()
14924 u64 *dst_umin = &dst_reg->umin_value; in scalar_min_max_mul()
14925 u64 *dst_umax = &dst_reg->umax_value; in scalar_min_max_mul()
14928 if (check_mul_overflow(*dst_umax, src_reg->umax_value, dst_umax) || in scalar_min_max_mul()
14929 check_mul_overflow(*dst_umin, src_reg->umin_value, dst_umin)) { in scalar_min_max_mul()
14934 if (check_mul_overflow(*dst_smin, src_reg->smin_value, &tmp_prod[0]) || in scalar_min_max_mul()
14935 check_mul_overflow(*dst_smin, src_reg->smax_value, &tmp_prod[1]) || in scalar_min_max_mul()
14936 check_mul_overflow(*dst_smax, src_reg->smin_value, &tmp_prod[2]) || in scalar_min_max_mul()
14937 check_mul_overflow(*dst_smax, src_reg->smax_value, &tmp_prod[3])) { in scalar_min_max_mul()
14950 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_and()
14951 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_and()
14952 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_and()
14953 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_and()
14963 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_and()
14964 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); in scalar32_min_max_and()
14969 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_and()
14970 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_and()
14971 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_and()
14973 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_and()
14974 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_and()
14981 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_and()
14982 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_and()
14983 u64 umax_val = src_reg->umax_value; in scalar_min_max_and()
14986 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_and()
14993 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_and()
14994 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); in scalar_min_max_and()
14999 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_and()
15000 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_and()
15001 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_and()
15003 dst_reg->smin_value = S64_MIN; in scalar_min_max_and()
15004 dst_reg->smax_value = S64_MAX; in scalar_min_max_and()
15013 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_or()
15014 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_or()
15015 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_or()
15016 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_or()
15026 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); in scalar32_min_max_or()
15027 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_or()
15032 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_or()
15033 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_or()
15034 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_or()
15036 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_or()
15037 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_or()
15044 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_or()
15045 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_or()
15046 u64 umin_val = src_reg->umin_value; in scalar_min_max_or()
15049 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_or()
15056 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); in scalar_min_max_or()
15057 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_or()
15062 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_or()
15063 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_or()
15064 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_or()
15066 dst_reg->smin_value = S64_MIN; in scalar_min_max_or()
15067 dst_reg->smax_value = S64_MAX; in scalar_min_max_or()
15076 bool src_known = tnum_subreg_is_const(src_reg->var_off); in scalar32_min_max_xor()
15077 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); in scalar32_min_max_xor()
15078 struct tnum var32_off = tnum_subreg(dst_reg->var_off); in scalar32_min_max_xor()
15086 dst_reg->u32_min_value = var32_off.value; in scalar32_min_max_xor()
15087 dst_reg->u32_max_value = var32_off.value | var32_off.mask; in scalar32_min_max_xor()
15092 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { in scalar32_min_max_xor()
15093 dst_reg->s32_min_value = dst_reg->u32_min_value; in scalar32_min_max_xor()
15094 dst_reg->s32_max_value = dst_reg->u32_max_value; in scalar32_min_max_xor()
15096 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_xor()
15097 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_xor()
15104 bool src_known = tnum_is_const(src_reg->var_off); in scalar_min_max_xor()
15105 bool dst_known = tnum_is_const(dst_reg->var_off); in scalar_min_max_xor()
15108 /* dst_reg->var_off.value has been updated earlier */ in scalar_min_max_xor()
15109 __mark_reg_known(dst_reg, dst_reg->var_off.value); in scalar_min_max_xor()
15114 dst_reg->umin_value = dst_reg->var_off.value; in scalar_min_max_xor()
15115 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; in scalar_min_max_xor()
15120 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { in scalar_min_max_xor()
15121 dst_reg->smin_value = dst_reg->umin_value; in scalar_min_max_xor()
15122 dst_reg->smax_value = dst_reg->umax_value; in scalar_min_max_xor()
15124 dst_reg->smin_value = S64_MIN; in scalar_min_max_xor()
15125 dst_reg->smax_value = S64_MAX; in scalar_min_max_xor()
15137 dst_reg->s32_min_value = S32_MIN; in __scalar32_min_max_lsh()
15138 dst_reg->s32_max_value = S32_MAX; in __scalar32_min_max_lsh()
15140 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { in __scalar32_min_max_lsh()
15141 dst_reg->u32_min_value = 0; in __scalar32_min_max_lsh()
15142 dst_reg->u32_max_value = U32_MAX; in __scalar32_min_max_lsh()
15144 dst_reg->u32_min_value <<= umin_val; in __scalar32_min_max_lsh()
15145 dst_reg->u32_max_value <<= umax_val; in __scalar32_min_max_lsh()
15152 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_lsh()
15153 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_lsh()
15155 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_lsh()
15158 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); in scalar32_min_max_lsh()
15177 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) in __scalar64_min_max_lsh()
15178 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; in __scalar64_min_max_lsh()
15180 dst_reg->smax_value = S64_MAX; in __scalar64_min_max_lsh()
15182 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) in __scalar64_min_max_lsh()
15183 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; in __scalar64_min_max_lsh()
15185 dst_reg->smin_value = S64_MIN; in __scalar64_min_max_lsh()
15188 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { in __scalar64_min_max_lsh()
15189 dst_reg->umin_value = 0; in __scalar64_min_max_lsh()
15190 dst_reg->umax_value = U64_MAX; in __scalar64_min_max_lsh()
15192 dst_reg->umin_value <<= umin_val; in __scalar64_min_max_lsh()
15193 dst_reg->umax_value <<= umax_val; in __scalar64_min_max_lsh()
15200 u64 umax_val = src_reg->umax_value; in scalar_min_max_lsh()
15201 u64 umin_val = src_reg->umin_value; in scalar_min_max_lsh()
15207 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); in scalar_min_max_lsh()
15215 struct tnum subreg = tnum_subreg(dst_reg->var_off); in scalar32_min_max_rsh()
15216 u32 umax_val = src_reg->u32_max_value; in scalar32_min_max_rsh()
15217 u32 umin_val = src_reg->u32_min_value; in scalar32_min_max_rsh()
15233 dst_reg->s32_min_value = S32_MIN; in scalar32_min_max_rsh()
15234 dst_reg->s32_max_value = S32_MAX; in scalar32_min_max_rsh()
15236 dst_reg->var_off = tnum_rshift(subreg, umin_val); in scalar32_min_max_rsh()
15237 dst_reg->u32_min_value >>= umax_val; in scalar32_min_max_rsh()
15238 dst_reg->u32_max_value >>= umin_val; in scalar32_min_max_rsh()
15247 u64 umax_val = src_reg->umax_value; in scalar_min_max_rsh()
15248 u64 umin_val = src_reg->umin_value; in scalar_min_max_rsh()
15264 dst_reg->smin_value = S64_MIN; in scalar_min_max_rsh()
15265 dst_reg->smax_value = S64_MAX; in scalar_min_max_rsh()
15266 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); in scalar_min_max_rsh()
15267 dst_reg->umin_value >>= umax_val; in scalar_min_max_rsh()
15268 dst_reg->umax_value >>= umin_val; in scalar_min_max_rsh()
15281 u64 umin_val = src_reg->u32_min_value; in scalar32_min_max_arsh()
15286 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); in scalar32_min_max_arsh()
15287 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); in scalar32_min_max_arsh()
15289 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); in scalar32_min_max_arsh()
15294 dst_reg->u32_min_value = 0; in scalar32_min_max_arsh()
15295 dst_reg->u32_max_value = U32_MAX; in scalar32_min_max_arsh()
15304 u64 umin_val = src_reg->umin_value; in scalar_min_max_arsh()
15309 dst_reg->smin_value >>= umin_val; in scalar_min_max_arsh()
15310 dst_reg->smax_value >>= umin_val; in scalar_min_max_arsh()
15312 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); in scalar_min_max_arsh()
15317 dst_reg->umin_value = 0; in scalar_min_max_arsh()
15318 dst_reg->umax_value = U64_MAX; in scalar_min_max_arsh()
15321 * on bits being shifted in from upper 32-bits. Take easy way out in scalar_min_max_arsh()
15332 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; in is_safe_to_compute_dst_reg_range()
15335 if (tnum_subreg_is_const(src_reg->var_off) in is_safe_to_compute_dst_reg_range()
15336 && src_reg->s32_min_value == src_reg->s32_max_value in is_safe_to_compute_dst_reg_range()
15337 && src_reg->u32_min_value == src_reg->u32_max_value) in is_safe_to_compute_dst_reg_range()
15340 if (tnum_is_const(src_reg->var_off) in is_safe_to_compute_dst_reg_range()
15341 && src_reg->smin_value == src_reg->smax_value in is_safe_to_compute_dst_reg_range()
15342 && src_reg->umin_value == src_reg->umax_value) in is_safe_to_compute_dst_reg_range()
15346 switch (BPF_OP(insn->code)) { in is_safe_to_compute_dst_reg_range()
15363 return (src_is_const && src_reg->umax_value < insn_bitness); in is_safe_to_compute_dst_reg_range()
15369 /* WARNING: This function does calculations on 64-bit values, but the actual
15370 * execution may occur on 32-bit values. Therefore, things like bitshifts
15371 * need extra checks in the 32-bit case.
15378 u8 opcode = BPF_OP(insn->code); in adjust_scalar_min_max_vals()
15379 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_scalar_min_max_vals()
15399 * understand and calculate behavior in both 32-bit and 64-bit alu ops. in adjust_scalar_min_max_vals()
15411 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
15416 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
15419 env->fake_reg[0] = *dst_reg; in adjust_scalar_min_max_vals()
15421 scalar32_min_max_sub(dst_reg, &env->fake_reg[0]); in adjust_scalar_min_max_vals()
15422 scalar_min_max_sub(dst_reg, &env->fake_reg[0]); in adjust_scalar_min_max_vals()
15423 dst_reg->var_off = tnum_neg(env->fake_reg[0].var_off); in adjust_scalar_min_max_vals()
15426 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
15431 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
15436 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
15441 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); in adjust_scalar_min_max_vals()
15480 struct bpf_verifier_state *vstate = env->cur_state; in adjust_reg_min_max_vals()
15481 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in adjust_reg_min_max_vals()
15482 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; in adjust_reg_min_max_vals()
15484 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); in adjust_reg_min_max_vals()
15485 u8 opcode = BPF_OP(insn->code); in adjust_reg_min_max_vals()
15488 dst_reg = &regs[insn->dst_reg]; in adjust_reg_min_max_vals()
15491 if (dst_reg->type == PTR_TO_ARENA) { in adjust_reg_min_max_vals()
15494 if (BPF_CLASS(insn->code) == BPF_ALU64) in adjust_reg_min_max_vals()
15496 * 32-bit operations zero upper bits automatically. in adjust_reg_min_max_vals()
15497 * 64-bit operations need to be converted to 32. in adjust_reg_min_max_vals()
15499 aux->needs_zext = true; in adjust_reg_min_max_vals()
15505 if (dst_reg->type != SCALAR_VALUE) in adjust_reg_min_max_vals()
15508 if (BPF_SRC(insn->code) == BPF_X) { in adjust_reg_min_max_vals()
15509 src_reg = &regs[insn->src_reg]; in adjust_reg_min_max_vals()
15510 if (src_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
15511 if (dst_reg->type != SCALAR_VALUE) { in adjust_reg_min_max_vals()
15516 if (opcode == BPF_SUB && env->allow_ptr_leaks) { in adjust_reg_min_max_vals()
15517 mark_reg_unknown(env, regs, insn->dst_reg); in adjust_reg_min_max_vals()
15521 insn->dst_reg, in adjust_reg_min_max_vals()
15523 return -EACCES; in adjust_reg_min_max_vals()
15529 err = mark_chain_precision(env, insn->dst_reg); in adjust_reg_min_max_vals()
15537 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
15542 } else if (dst_reg->precise) { in adjust_reg_min_max_vals()
15544 err = mark_chain_precision(env, insn->src_reg); in adjust_reg_min_max_vals()
15553 __mark_reg_known(&off_reg, insn->imm); in adjust_reg_min_max_vals()
15562 print_verifier_state(env, vstate, vstate->curframe, true); in adjust_reg_min_max_vals()
15564 return -EFAULT; in adjust_reg_min_max_vals()
15567 print_verifier_state(env, vstate, vstate->curframe, true); in adjust_reg_min_max_vals()
15569 return -EFAULT; in adjust_reg_min_max_vals()
15580 * So for 64-bit alu remember constant delta between r2 and r1 and in adjust_reg_min_max_vals()
15583 if (env->bpf_capable && in adjust_reg_min_max_vals()
15584 BPF_OP(insn->code) == BPF_ADD && !alu32 && in adjust_reg_min_max_vals()
15585 dst_reg->id && is_reg_const(src_reg, false)) { in adjust_reg_min_max_vals()
15588 if ((dst_reg->id & BPF_ADD_CONST) || in adjust_reg_min_max_vals()
15593 * we cannot accumulate another val into rx->off. in adjust_reg_min_max_vals()
15595 dst_reg->off = 0; in adjust_reg_min_max_vals()
15596 dst_reg->id = 0; in adjust_reg_min_max_vals()
15598 dst_reg->id |= BPF_ADD_CONST; in adjust_reg_min_max_vals()
15599 dst_reg->off = val; in adjust_reg_min_max_vals()
15606 dst_reg->id = 0; in adjust_reg_min_max_vals()
15611 /* check validity of 32-bit and 64-bit arithmetic operations */
15615 u8 opcode = BPF_OP(insn->code); in check_alu_op()
15620 if (BPF_SRC(insn->code) != BPF_K || in check_alu_op()
15621 insn->src_reg != BPF_REG_0 || in check_alu_op()
15622 insn->off != 0 || insn->imm != 0) { in check_alu_op()
15624 return -EINVAL; in check_alu_op()
15627 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || in check_alu_op()
15628 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || in check_alu_op()
15629 (BPF_CLASS(insn->code) == BPF_ALU64 && in check_alu_op()
15630 BPF_SRC(insn->code) != BPF_TO_LE)) { in check_alu_op()
15632 return -EINVAL; in check_alu_op()
15637 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
15641 if (is_pointer_value(env, insn->dst_reg)) { in check_alu_op()
15643 insn->dst_reg); in check_alu_op()
15644 return -EACCES; in check_alu_op()
15649 regs[insn->dst_reg].type == SCALAR_VALUE) { in check_alu_op()
15650 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
15652 &regs[insn->dst_reg], in check_alu_op()
15653 regs[insn->dst_reg]); in check_alu_op()
15655 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_alu_op()
15662 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
15663 if (BPF_CLASS(insn->code) == BPF_ALU) { in check_alu_op()
15664 if ((insn->off != 0 && insn->off != 8 && insn->off != 16) || in check_alu_op()
15665 insn->imm) { in check_alu_op()
15667 return -EINVAL; in check_alu_op()
15669 } else if (insn->off == BPF_ADDR_SPACE_CAST) { in check_alu_op()
15670 if (insn->imm != 1 && insn->imm != 1u << 16) { in check_alu_op()
15672 return -EINVAL; in check_alu_op()
15674 if (!env->prog->aux->arena) { in check_alu_op()
15676 return -EINVAL; in check_alu_op()
15679 if ((insn->off != 0 && insn->off != 8 && insn->off != 16 && in check_alu_op()
15680 insn->off != 32) || insn->imm) { in check_alu_op()
15682 return -EINVAL; in check_alu_op()
15687 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
15691 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { in check_alu_op()
15693 return -EINVAL; in check_alu_op()
15698 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
15702 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
15703 struct bpf_reg_state *src_reg = regs + insn->src_reg; in check_alu_op()
15704 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; in check_alu_op()
15706 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
15707 if (insn->imm) { in check_alu_op()
15709 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
15710 if (insn->imm == 1) { /* cast from as(1) to as(0) */ in check_alu_op()
15711 dst_reg->type = PTR_TO_ARENA; in check_alu_op()
15712 /* PTR_TO_ARENA is 32-bit */ in check_alu_op()
15713 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
15715 } else if (insn->off == 0) { in check_alu_op()
15721 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
15724 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
15726 "R%d sign-extension part of pointer\n", in check_alu_op()
15727 insn->src_reg); in check_alu_op()
15728 return -EACCES; in check_alu_op()
15729 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
15732 no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); in check_alu_op()
15737 dst_reg->id = 0; in check_alu_op()
15738 coerce_reg_to_size_sx(dst_reg, insn->off >> 3); in check_alu_op()
15739 dst_reg->subreg_def = DEF_NOT_SUBREG; in check_alu_op()
15741 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
15746 if (is_pointer_value(env, insn->src_reg)) { in check_alu_op()
15749 insn->src_reg); in check_alu_op()
15750 return -EACCES; in check_alu_op()
15751 } else if (src_reg->type == SCALAR_VALUE) { in check_alu_op()
15752 if (insn->off == 0) { in check_alu_op()
15763 dst_reg->id = 0; in check_alu_op()
15764 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
15767 bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); in check_alu_op()
15773 dst_reg->id = 0; in check_alu_op()
15774 dst_reg->subreg_def = env->insn_idx + 1; in check_alu_op()
15775 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3); in check_alu_op()
15779 insn->dst_reg); in check_alu_op()
15789 mark_reg_unknown(env, regs, insn->dst_reg); in check_alu_op()
15790 regs[insn->dst_reg].type = SCALAR_VALUE; in check_alu_op()
15791 if (BPF_CLASS(insn->code) == BPF_ALU64) { in check_alu_op()
15792 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
15793 insn->imm); in check_alu_op()
15795 __mark_reg_known(regs + insn->dst_reg, in check_alu_op()
15796 (u32)insn->imm); in check_alu_op()
15802 return -EINVAL; in check_alu_op()
15806 if (BPF_SRC(insn->code) == BPF_X) { in check_alu_op()
15807 if (insn->imm != 0 || (insn->off != 0 && insn->off != 1) || in check_alu_op()
15808 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { in check_alu_op()
15810 return -EINVAL; in check_alu_op()
15813 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_alu_op()
15817 if (insn->src_reg != BPF_REG_0 || (insn->off != 0 && insn->off != 1) || in check_alu_op()
15818 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { in check_alu_op()
15820 return -EINVAL; in check_alu_op()
15825 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_alu_op()
15830 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { in check_alu_op()
15832 return -EINVAL; in check_alu_op()
15836 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { in check_alu_op()
15837 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; in check_alu_op()
15839 if (insn->imm < 0 || insn->imm >= size) { in check_alu_op()
15840 verbose(env, "invalid shift %d\n", insn->imm); in check_alu_op()
15841 return -EINVAL; in check_alu_op()
15846 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); in check_alu_op()
15852 return reg_bounds_sanity_check(env, &regs[insn->dst_reg], "alu"); in check_alu_op()
15864 if (dst_reg->off < 0 || in find_good_pkt_pointers()
15865 (dst_reg->off == 0 && range_right_open)) in find_good_pkt_pointers()
15869 if (dst_reg->umax_value > MAX_PACKET_OFF || in find_good_pkt_pointers()
15870 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) in find_good_pkt_pointers()
15876 new_range = dst_reg->off; in find_good_pkt_pointers()
15917 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) in find_good_pkt_pointers()
15918 * and [r3, r3 + 8-1) respectively is safe to access depending on in find_good_pkt_pointers()
15925 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. in find_good_pkt_pointers()
15928 if (reg->type == type && reg->id == dst_reg->id) in find_good_pkt_pointers()
15930 reg->range = max(reg->range, new_range); in find_good_pkt_pointers()
15940 struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off; in is_scalar_branch_taken()
15941 struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off; in is_scalar_branch_taken()
15942 u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value; in is_scalar_branch_taken()
15943 u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value; in is_scalar_branch_taken()
15944 s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value; in is_scalar_branch_taken()
15945 s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value; in is_scalar_branch_taken()
15946 u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value; in is_scalar_branch_taken()
15947 u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value; in is_scalar_branch_taken()
15948 s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value; in is_scalar_branch_taken()
15949 s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value; in is_scalar_branch_taken()
15960 /* non-overlapping ranges */ in is_scalar_branch_taken()
15966 /* if 64-bit ranges are inconclusive, see if we can in is_scalar_branch_taken()
15967 * utilize 32-bit subrange knowledge to eliminate in is_scalar_branch_taken()
15970 if (reg1->u32_min_value > reg2->u32_max_value || in is_scalar_branch_taken()
15971 reg1->u32_max_value < reg2->u32_min_value) in is_scalar_branch_taken()
15973 if (reg1->s32_min_value > reg2->s32_max_value || in is_scalar_branch_taken()
15974 reg1->s32_max_value < reg2->s32_min_value) in is_scalar_branch_taken()
15986 /* non-overlapping ranges */ in is_scalar_branch_taken()
15992 /* if 64-bit ranges are inconclusive, see if we can in is_scalar_branch_taken()
15993 * utilize 32-bit subrange knowledge to eliminate in is_scalar_branch_taken()
15996 if (reg1->u32_min_value > reg2->u32_max_value || in is_scalar_branch_taken()
15997 reg1->u32_max_value < reg2->u32_min_value) in is_scalar_branch_taken()
15999 if (reg1->s32_min_value > reg2->s32_max_value || in is_scalar_branch_taken()
16000 reg1->s32_max_value < reg2->s32_min_value) in is_scalar_branch_taken()
16010 return -1; in is_scalar_branch_taken()
16066 return -1; in is_scalar_branch_taken()
16096 if (src_reg->type == PTR_TO_PACKET_END) { in is_pkt_ptr_branch_taken()
16098 } else if (dst_reg->type == PTR_TO_PACKET_END) { in is_pkt_ptr_branch_taken()
16102 return -1; in is_pkt_ptr_branch_taken()
16105 if (pkt->range >= 0) in is_pkt_ptr_branch_taken()
16106 return -1; in is_pkt_ptr_branch_taken()
16114 if (pkt->range == BEYOND_PKT_END) in is_pkt_ptr_branch_taken()
16123 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) in is_pkt_ptr_branch_taken()
16127 return -1; in is_pkt_ptr_branch_taken()
16132 * 1 - branch will be taken and "goto target" will be executed
16133 * 0 - branch will not be taken and fall-through to next insn
16134 * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value
16153 return -1; in is_branch_taken()
16156 return -1; in is_branch_taken()
16163 return -1; in is_branch_taken()
16171 return -1; in is_branch_taken()
16227 reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); in regs_refine_cond_op()
16228 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); in regs_refine_cond_op()
16229 reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); in regs_refine_cond_op()
16230 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); in regs_refine_cond_op()
16231 reg2->u32_min_value = reg1->u32_min_value; in regs_refine_cond_op()
16232 reg2->u32_max_value = reg1->u32_max_value; in regs_refine_cond_op()
16233 reg2->s32_min_value = reg1->s32_min_value; in regs_refine_cond_op()
16234 reg2->s32_max_value = reg1->s32_max_value; in regs_refine_cond_op()
16236 t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off)); in regs_refine_cond_op()
16237 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
16238 reg2->var_off = tnum_with_subreg(reg2->var_off, t); in regs_refine_cond_op()
16240 reg1->umin_value = max(reg1->umin_value, reg2->umin_value); in regs_refine_cond_op()
16241 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); in regs_refine_cond_op()
16242 reg1->smin_value = max(reg1->smin_value, reg2->smin_value); in regs_refine_cond_op()
16243 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); in regs_refine_cond_op()
16244 reg2->umin_value = reg1->umin_value; in regs_refine_cond_op()
16245 reg2->umax_value = reg1->umax_value; in regs_refine_cond_op()
16246 reg2->smin_value = reg1->smin_value; in regs_refine_cond_op()
16247 reg2->smax_value = reg1->smax_value; in regs_refine_cond_op()
16249 reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off); in regs_refine_cond_op()
16250 reg2->var_off = reg1->var_off; in regs_refine_cond_op()
16273 if (reg1->u32_min_value == (u32)val) in regs_refine_cond_op()
16274 reg1->u32_min_value++; in regs_refine_cond_op()
16275 if (reg1->u32_max_value == (u32)val) in regs_refine_cond_op()
16276 reg1->u32_max_value--; in regs_refine_cond_op()
16277 if (reg1->s32_min_value == (s32)val) in regs_refine_cond_op()
16278 reg1->s32_min_value++; in regs_refine_cond_op()
16279 if (reg1->s32_max_value == (s32)val) in regs_refine_cond_op()
16280 reg1->s32_max_value--; in regs_refine_cond_op()
16282 if (reg1->umin_value == (u64)val) in regs_refine_cond_op()
16283 reg1->umin_value++; in regs_refine_cond_op()
16284 if (reg1->umax_value == (u64)val) in regs_refine_cond_op()
16285 reg1->umax_value--; in regs_refine_cond_op()
16286 if (reg1->smin_value == (s64)val) in regs_refine_cond_op()
16287 reg1->smin_value++; in regs_refine_cond_op()
16288 if (reg1->smax_value == (s64)val) in regs_refine_cond_op()
16289 reg1->smax_value--; in regs_refine_cond_op()
16302 * it's a single-bit value to begin with. in regs_refine_cond_op()
16311 t = tnum_or(tnum_subreg(reg1->var_off), tnum_const(val)); in regs_refine_cond_op()
16312 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
16314 reg1->var_off = tnum_or(reg1->var_off, tnum_const(val)); in regs_refine_cond_op()
16328 t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val)); in regs_refine_cond_op()
16329 reg1->var_off = tnum_with_subreg(reg1->var_off, t); in regs_refine_cond_op()
16331 reg1->var_off = tnum_and(reg1->var_off, tnum_const(~val)); in regs_refine_cond_op()
16336 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); in regs_refine_cond_op()
16337 reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); in regs_refine_cond_op()
16339 reg1->umax_value = min(reg1->umax_value, reg2->umax_value); in regs_refine_cond_op()
16340 reg2->umin_value = max(reg1->umin_value, reg2->umin_value); in regs_refine_cond_op()
16345 reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1); in regs_refine_cond_op()
16346 reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value); in regs_refine_cond_op()
16348 reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1); in regs_refine_cond_op()
16349 reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value); in regs_refine_cond_op()
16354 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); in regs_refine_cond_op()
16355 reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); in regs_refine_cond_op()
16357 reg1->smax_value = min(reg1->smax_value, reg2->smax_value); in regs_refine_cond_op()
16358 reg2->smin_value = max(reg1->smin_value, reg2->smin_value); in regs_refine_cond_op()
16363 reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1); in regs_refine_cond_op()
16364 reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value); in regs_refine_cond_op()
16366 reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1); in regs_refine_cond_op()
16367 reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value); in regs_refine_cond_op()
16377 * check, in which case we have a fake SCALAR_VALUE representing insn->imm).
16394 if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE) in reg_set_min_max()
16418 if (type_may_be_null(reg->type) && reg->id == id && in mark_ptr_or_null_reg()
16419 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) { in mark_ptr_or_null_reg()
16421 * known-zero, because we don't allow pointer arithmetic on in mark_ptr_or_null_reg()
16427 * is fine to expect to see reg->off. in mark_ptr_or_null_reg()
16429 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) in mark_ptr_or_null_reg()
16431 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && in mark_ptr_or_null_reg()
16432 WARN_ON_ONCE(reg->off)) in mark_ptr_or_null_reg()
16436 reg->type = SCALAR_VALUE; in mark_ptr_or_null_reg()
16441 reg->id = 0; in mark_ptr_or_null_reg()
16442 reg->ref_obj_id = 0; in mark_ptr_or_null_reg()
16450 /* For not-NULL ptr, reg->ref_obj_id will be reset in mark_ptr_or_null_reg()
16453 * reg->id is still used by spin_lock ptr. Other in mark_ptr_or_null_reg()
16454 * than spin_lock ptr type, reg->id can be reset. in mark_ptr_or_null_reg()
16456 reg->id = 0; in mark_ptr_or_null_reg()
16467 struct bpf_func_state *state = vstate->frame[vstate->curframe]; in mark_ptr_or_null_regs()
16468 struct bpf_reg_state *regs = state->regs, *reg; in mark_ptr_or_null_regs()
16490 if (BPF_SRC(insn->code) != BPF_X) in try_match_pkt_pointers()
16493 /* Pointers are always 64-bit. */ in try_match_pkt_pointers()
16494 if (BPF_CLASS(insn->code) == BPF_JMP32) in try_match_pkt_pointers()
16497 switch (BPF_OP(insn->code)) { in try_match_pkt_pointers()
16499 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
16500 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
16501 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
16505 dst_reg->type, false); in try_match_pkt_pointers()
16506 mark_pkt_end(other_branch, insn->dst_reg, true); in try_match_pkt_pointers()
16507 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
16508 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
16510 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
16513 src_reg->type, true); in try_match_pkt_pointers()
16514 mark_pkt_end(this_branch, insn->src_reg, false); in try_match_pkt_pointers()
16520 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
16521 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
16522 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
16526 dst_reg->type, true); in try_match_pkt_pointers()
16527 mark_pkt_end(this_branch, insn->dst_reg, false); in try_match_pkt_pointers()
16528 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
16529 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
16531 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
16534 src_reg->type, false); in try_match_pkt_pointers()
16535 mark_pkt_end(other_branch, insn->src_reg, true); in try_match_pkt_pointers()
16541 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
16542 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
16543 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
16547 dst_reg->type, true); in try_match_pkt_pointers()
16548 mark_pkt_end(other_branch, insn->dst_reg, false); in try_match_pkt_pointers()
16549 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
16550 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
16552 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
16555 src_reg->type, false); in try_match_pkt_pointers()
16556 mark_pkt_end(this_branch, insn->src_reg, true); in try_match_pkt_pointers()
16562 if ((dst_reg->type == PTR_TO_PACKET && in try_match_pkt_pointers()
16563 src_reg->type == PTR_TO_PACKET_END) || in try_match_pkt_pointers()
16564 (dst_reg->type == PTR_TO_PACKET_META && in try_match_pkt_pointers()
16568 dst_reg->type, false); in try_match_pkt_pointers()
16569 mark_pkt_end(this_branch, insn->dst_reg, true); in try_match_pkt_pointers()
16570 } else if ((dst_reg->type == PTR_TO_PACKET_END && in try_match_pkt_pointers()
16571 src_reg->type == PTR_TO_PACKET) || in try_match_pkt_pointers()
16573 src_reg->type == PTR_TO_PACKET_META)) { in try_match_pkt_pointers()
16576 src_reg->type, true); in try_match_pkt_pointers()
16577 mark_pkt_end(other_branch, insn->src_reg, false); in try_match_pkt_pointers()
16594 if (reg->type != SCALAR_VALUE || (reg->id & ~BPF_ADD_CONST) != id) in __collect_linked_regs()
16599 e->frameno = frameno; in __collect_linked_regs()
16600 e->is_reg = is_reg; in __collect_linked_regs()
16601 e->regno = spi_or_reg; in __collect_linked_regs()
16603 reg->id = 0; in __collect_linked_regs()
16608 * in verifier state, save R in linked_regs if R->id == id.
16619 for (i = vstate->curframe; i >= 0; i--) { in collect_linked_regs()
16620 func = vstate->frame[i]; in collect_linked_regs()
16622 reg = &func->regs[j]; in collect_linked_regs()
16625 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in collect_linked_regs()
16626 if (!is_spilled_reg(&func->stack[j])) in collect_linked_regs()
16628 reg = &func->stack[j].spilled_ptr; in collect_linked_regs()
16635 * if R->id == known_reg->id.
16645 for (i = 0; i < linked_regs->cnt; ++i) { in sync_linked_regs()
16646 e = &linked_regs->entries[i]; in sync_linked_regs()
16647 reg = e->is_reg ? &vstate->frame[e->frameno]->regs[e->regno] in sync_linked_regs()
16648 : &vstate->frame[e->frameno]->stack[e->spi].spilled_ptr; in sync_linked_regs()
16649 if (reg->type != SCALAR_VALUE || reg == known_reg) in sync_linked_regs()
16651 if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) in sync_linked_regs()
16653 if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || in sync_linked_regs()
16654 reg->off == known_reg->off) { in sync_linked_regs()
16655 s32 saved_subreg_def = reg->subreg_def; in sync_linked_regs()
16658 reg->subreg_def = saved_subreg_def; in sync_linked_regs()
16660 s32 saved_subreg_def = reg->subreg_def; in sync_linked_regs()
16661 s32 saved_off = reg->off; in sync_linked_regs()
16664 __mark_reg_known(&fake_reg, (s32)reg->off - (s32)known_reg->off); in sync_linked_regs()
16672 reg->off = saved_off; in sync_linked_regs()
16673 reg->subreg_def = saved_subreg_def; in sync_linked_regs()
16677 reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); in sync_linked_regs()
16685 struct bpf_verifier_state *this_branch = env->cur_state; in check_cond_jmp_op()
16687 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; in check_cond_jmp_op()
16691 u8 opcode = BPF_OP(insn->code); in check_cond_jmp_op()
16694 int pred = -1; in check_cond_jmp_op()
16700 return -EINVAL; in check_cond_jmp_op()
16704 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; in check_cond_jmp_op()
16707 if (insn->code != (BPF_JMP | BPF_JCOND) || in check_cond_jmp_op()
16708 insn->src_reg != BPF_MAY_GOTO || in check_cond_jmp_op()
16709 insn->dst_reg || insn->imm) { in check_cond_jmp_op()
16710 verbose(env, "invalid may_goto imm %d\n", insn->imm); in check_cond_jmp_op()
16711 return -EINVAL; in check_cond_jmp_op()
16713 prev_st = find_prev_entry(env, cur_st->parent, idx); in check_cond_jmp_op()
16718 return -ENOMEM; in check_cond_jmp_op()
16720 queued_st->may_goto_depth++; in check_cond_jmp_op()
16723 *insn_idx += insn->off; in check_cond_jmp_op()
16728 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in check_cond_jmp_op()
16732 dst_reg = &regs[insn->dst_reg]; in check_cond_jmp_op()
16733 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
16734 if (insn->imm != 0) { in check_cond_jmp_op()
16736 return -EINVAL; in check_cond_jmp_op()
16740 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_cond_jmp_op()
16744 src_reg = &regs[insn->src_reg]; in check_cond_jmp_op()
16746 is_pointer_value(env, insn->src_reg)) { in check_cond_jmp_op()
16748 insn->src_reg); in check_cond_jmp_op()
16749 return -EACCES; in check_cond_jmp_op()
16752 if (src_reg->type == PTR_TO_STACK) in check_cond_jmp_op()
16754 if (dst_reg->type == PTR_TO_STACK) in check_cond_jmp_op()
16757 if (insn->src_reg != BPF_REG_0) { in check_cond_jmp_op()
16759 return -EINVAL; in check_cond_jmp_op()
16761 src_reg = &env->fake_reg[0]; in check_cond_jmp_op()
16763 src_reg->type = SCALAR_VALUE; in check_cond_jmp_op()
16764 __mark_reg_known(src_reg, insn->imm); in check_cond_jmp_op()
16766 if (dst_reg->type == PTR_TO_STACK) in check_cond_jmp_op()
16776 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; in check_cond_jmp_op()
16783 err = mark_chain_precision(env, insn->dst_reg); in check_cond_jmp_op()
16784 if (BPF_SRC(insn->code) == BPF_X && !err && in check_cond_jmp_op()
16786 err = mark_chain_precision(env, insn->src_reg); in check_cond_jmp_op()
16792 /* Only follow the goto, ignore fall-through. If needed, push in check_cond_jmp_op()
16793 * the fall-through branch for simulation under speculative in check_cond_jmp_op()
16796 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
16799 return -EFAULT; in check_cond_jmp_op()
16800 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
16801 print_insn_state(env, this_branch, this_branch->curframe); in check_cond_jmp_op()
16802 *insn_idx += insn->off; in check_cond_jmp_op()
16805 /* Only follow the fall-through branch, since that's where the in check_cond_jmp_op()
16809 if (!env->bypass_spec_v1 && in check_cond_jmp_op()
16811 *insn_idx + insn->off + 1, in check_cond_jmp_op()
16813 return -EFAULT; in check_cond_jmp_op()
16814 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
16815 print_insn_state(env, this_branch, this_branch->curframe); in check_cond_jmp_op()
16824 if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id) in check_cond_jmp_op()
16825 collect_linked_regs(this_branch, src_reg->id, &linked_regs); in check_cond_jmp_op()
16826 if (dst_reg->type == SCALAR_VALUE && dst_reg->id) in check_cond_jmp_op()
16827 collect_linked_regs(this_branch, dst_reg->id, &linked_regs); in check_cond_jmp_op()
16834 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, in check_cond_jmp_op()
16837 return -EFAULT; in check_cond_jmp_op()
16838 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; in check_cond_jmp_op()
16840 if (BPF_SRC(insn->code) == BPF_X) { in check_cond_jmp_op()
16842 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
16843 &other_branch_regs[insn->src_reg], in check_cond_jmp_op()
16845 } else /* BPF_SRC(insn->code) == BPF_K */ { in check_cond_jmp_op()
16850 memcpy(&env->fake_reg[1], &env->fake_reg[0], in check_cond_jmp_op()
16851 sizeof(env->fake_reg[0])); in check_cond_jmp_op()
16853 &other_branch_regs[insn->dst_reg], in check_cond_jmp_op()
16854 &env->fake_reg[0], in check_cond_jmp_op()
16855 dst_reg, &env->fake_reg[1], in check_cond_jmp_op()
16861 if (BPF_SRC(insn->code) == BPF_X && in check_cond_jmp_op()
16862 src_reg->type == SCALAR_VALUE && src_reg->id && in check_cond_jmp_op()
16863 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { in check_cond_jmp_op()
16865 sync_linked_regs(other_branch, &other_branch_regs[insn->src_reg], &linked_regs); in check_cond_jmp_op()
16867 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && in check_cond_jmp_op()
16868 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { in check_cond_jmp_op()
16870 sync_linked_regs(other_branch, &other_branch_regs[insn->dst_reg], &linked_regs); in check_cond_jmp_op()
16875 * E.g. register A - maybe null in check_cond_jmp_op()
16876 * register B - not null in check_cond_jmp_op()
16877 * for JNE A, B, ... - A is not null in the false branch; in check_cond_jmp_op()
16878 * for JEQ A, B, ... - A is not null in the true branch. in check_cond_jmp_op()
16885 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X && in check_cond_jmp_op()
16887 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) && in check_cond_jmp_op()
16888 base_type(src_reg->type) != PTR_TO_BTF_ID && in check_cond_jmp_op()
16889 base_type(dst_reg->type) != PTR_TO_BTF_ID) { in check_cond_jmp_op()
16903 if (type_may_be_null(src_reg->type)) in check_cond_jmp_op()
16904 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]); in check_cond_jmp_op()
16906 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]); in check_cond_jmp_op()
16914 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && in check_cond_jmp_op()
16915 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && in check_cond_jmp_op()
16916 type_may_be_null(dst_reg->type)) { in check_cond_jmp_op()
16920 mark_ptr_or_null_regs(this_branch, insn->dst_reg, in check_cond_jmp_op()
16922 mark_ptr_or_null_regs(other_branch, insn->dst_reg, in check_cond_jmp_op()
16924 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], in check_cond_jmp_op()
16926 is_pointer_value(env, insn->dst_reg)) { in check_cond_jmp_op()
16928 insn->dst_reg); in check_cond_jmp_op()
16929 return -EACCES; in check_cond_jmp_op()
16931 if (env->log.level & BPF_LOG_LEVEL) in check_cond_jmp_op()
16932 print_insn_state(env, this_branch, this_branch->curframe); in check_cond_jmp_op()
16945 if (BPF_SIZE(insn->code) != BPF_DW) { in check_ld_imm()
16947 return -EINVAL; in check_ld_imm()
16949 if (insn->off != 0) { in check_ld_imm()
16951 return -EINVAL; in check_ld_imm()
16954 err = check_reg_arg(env, insn->dst_reg, DST_OP); in check_ld_imm()
16958 dst_reg = &regs[insn->dst_reg]; in check_ld_imm()
16959 if (insn->src_reg == 0) { in check_ld_imm()
16960 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; in check_ld_imm()
16962 dst_reg->type = SCALAR_VALUE; in check_ld_imm()
16963 __mark_reg_known(&regs[insn->dst_reg], imm); in check_ld_imm()
16968 * we either succeed and assign a corresponding dst_reg->type after in check_ld_imm()
16971 mark_reg_known_zero(env, regs, insn->dst_reg); in check_ld_imm()
16973 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { in check_ld_imm()
16974 dst_reg->type = aux->btf_var.reg_type; in check_ld_imm()
16975 switch (base_type(dst_reg->type)) { in check_ld_imm()
16977 dst_reg->mem_size = aux->btf_var.mem_size; in check_ld_imm()
16980 dst_reg->btf = aux->btf_var.btf; in check_ld_imm()
16981 dst_reg->btf_id = aux->btf_var.btf_id; in check_ld_imm()
16985 return -EFAULT; in check_ld_imm()
16990 if (insn->src_reg == BPF_PSEUDO_FUNC) { in check_ld_imm()
16991 struct bpf_prog_aux *aux = env->prog->aux; in check_ld_imm()
16993 env->insn_idx + insn->imm + 1); in check_ld_imm()
16995 if (!aux->func_info) { in check_ld_imm()
16997 return -EINVAL; in check_ld_imm()
16999 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { in check_ld_imm()
17001 return -EINVAL; in check_ld_imm()
17004 dst_reg->type = PTR_TO_FUNC; in check_ld_imm()
17005 dst_reg->subprogno = subprogno; in check_ld_imm()
17009 map = env->used_maps[aux->map_index]; in check_ld_imm()
17010 dst_reg->map_ptr = map; in check_ld_imm()
17012 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || in check_ld_imm()
17013 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { in check_ld_imm()
17014 if (map->map_type == BPF_MAP_TYPE_ARENA) { in check_ld_imm()
17018 dst_reg->type = PTR_TO_MAP_VALUE; in check_ld_imm()
17019 dst_reg->off = aux->map_off; in check_ld_imm()
17020 WARN_ON_ONCE(map->max_entries != 1); in check_ld_imm()
17021 /* We want reg->id to be same (0) as map_value is not distinct */ in check_ld_imm()
17022 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || in check_ld_imm()
17023 insn->src_reg == BPF_PSEUDO_MAP_IDX) { in check_ld_imm()
17024 dst_reg->type = CONST_PTR_TO_MAP; in check_ld_imm()
17027 return -EFAULT; in check_ld_imm()
17046 * - they can only appear in the programs where ctx == skb
17047 * - since they are wrappers of function calls, they scratch R1-R5 registers,
17048 * preserve R6-R9, and store return value into R0
17055 * IMM == 32-bit immediate
17058 * R0 - 8/16/32-bit skb data converted to cpu endianness
17064 u8 mode = BPF_MODE(insn->code); in check_ld_abs()
17067 if (!may_access_skb(resolve_prog_type(env->prog))) { in check_ld_abs()
17069 return -EINVAL; in check_ld_abs()
17072 if (!env->ops->gen_ld_abs) { in check_ld_abs()
17074 return -EFAULT; in check_ld_abs()
17077 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || in check_ld_abs()
17078 BPF_SIZE(insn->code) == BPF_DW || in check_ld_abs()
17079 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { in check_ld_abs()
17081 return -EINVAL; in check_ld_abs()
17100 return -EINVAL; in check_ld_abs()
17105 err = check_reg_arg(env, insn->src_reg, SRC_OP); in check_ld_abs()
17125 /* ld_abs load up to 32-bit skb data. */ in check_ld_abs()
17126 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; in check_ld_abs()
17134 const struct bpf_prog *prog = env->prog; in check_return_code()
17137 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); in check_return_code()
17139 struct bpf_func_state *frame = env->cur_state->frame[0]; in check_return_code()
17140 const bool is_subprog = frame->subprogno; in check_return_code()
17144 /* LSM and struct_ops func-ptr's return type could be "void" */ in check_return_code()
17145 if (!is_subprog || frame->in_exception_callback_fn) { in check_return_code()
17148 if (prog->expected_attach_type == BPF_LSM_CGROUP) in check_return_code()
17149 /* See below, can be 0 or 0-1 depending on hook. */ in check_return_code()
17151 if (!prog->aux->attach_func_proto->type) in check_return_code()
17155 if (!prog->aux->attach_func_proto->type) in check_return_code()
17158 if (frame->in_exception_callback_fn) in check_return_code()
17165 reg_type = reg->btf ? btf_type_by_id(reg->btf, reg->btf_id) : NULL; in check_return_code()
17166 ret_type = btf_type_resolve_ptr(prog->aux->attach_btf, in check_return_code()
17167 prog->aux->attach_func_proto->type, in check_return_code()
17169 if (ret_type && ret_type == reg_type && reg->ref_obj_id) in check_return_code()
17189 return -EACCES; in check_return_code()
17192 if (frame->in_async_callback_fn) { in check_return_code()
17194 range = frame->callback_ret_range; in check_return_code()
17198 if (is_subprog && !frame->in_exception_callback_fn) { in check_return_code()
17199 if (reg->type != SCALAR_VALUE) { in check_return_code()
17201 regno, reg_type_str(env, reg->type)); in check_return_code()
17202 return -EINVAL; in check_return_code()
17209 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || in check_return_code()
17210 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || in check_return_code()
17211 env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG || in check_return_code()
17212 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || in check_return_code()
17213 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || in check_return_code()
17214 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME || in check_return_code()
17215 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || in check_return_code()
17216 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME || in check_return_code()
17217 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME) in check_return_code()
17219 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || in check_return_code()
17220 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) in check_return_code()
17224 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { in check_return_code()
17236 if (!env->prog->aux->attach_btf_id) in check_return_code()
17241 switch (env->prog->expected_attach_type) { in check_return_code()
17252 return -ENOTSUPP; in check_return_code()
17256 switch (env->prog->expected_attach_type) { in check_return_code()
17270 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { in check_return_code()
17272 if (!get_func_retval_range(env->prog, &range)) in check_return_code()
17278 } else if (!env->prog->aux->attach_func_proto->type) { in check_return_code()
17296 * depends on the to-be-replaced kernel func or bpf program. in check_return_code()
17303 if (reg->type != SCALAR_VALUE) { in check_return_code()
17305 exit_ctx, regno, reg_type_str(env, reg->type)); in check_return_code()
17306 return -EINVAL; in check_return_code()
17316 prog->expected_attach_type == BPF_LSM_CGROUP && in check_return_code()
17318 !prog->aux->attach_func_proto->type) in check_return_code()
17320 return -EINVAL; in check_return_code()
17324 tnum_in(enforce_attach_type_range, reg->var_off)) in check_return_code()
17325 env->prog->enforce_expected_attach_type = 1; in check_return_code()
17334 subprog->changes_pkt_data = true; in mark_subprog_changes_pkt_data()
17342 subprog->might_sleep = true; in mark_subprog_might_sleep()
17345 /* 't' is an index of a call-site.
17347 * Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED.
17357 caller->changes_pkt_data |= callee->changes_pkt_data; in merge_callee_effects()
17358 caller->might_sleep |= callee->might_sleep; in merge_callee_effects()
17361 /* non-recursive DFS pseudo code
17362 * 1 procedure DFS-iterative(G,v):
17367 * 6 t <- S.peek()
17373 * 12 w <- G.adjacentVertex(t,e)
17375 * 14 label e as tree-edge
17380 * 19 label e as back-edge
17383 * 22 label e as forward- or cross-edge
17388 * 0x10 - discovered
17389 * 0x11 - discovered and fall-through edge labelled
17390 * 0x12 - discovered and fall-through and branch edges labelled
17391 * 0x20 - explored
17403 env->insn_aux_data[idx].prune_point = true; in mark_prune_point()
17408 return env->insn_aux_data[insn_idx].prune_point; in is_prune_point()
17413 env->insn_aux_data[idx].force_checkpoint = true; in mark_force_checkpoint()
17418 return env->insn_aux_data[insn_idx].force_checkpoint; in is_force_checkpoint()
17423 env->insn_aux_data[idx].calls_callback = true; in mark_calls_callback()
17428 return env->insn_aux_data[insn_idx].calls_callback; in bpf_calls_callback()
17436 /* t, w, e - match pseudo-code above:
17437 * t - index of current instruction
17438 * w - next instruction
17439 * e - edge
17443 int *insn_stack = env->cfg.insn_stack; in push_insn()
17444 int *insn_state = env->cfg.insn_state; in push_insn()
17452 if (w < 0 || w >= env->prog->len) { in push_insn()
17455 return -EINVAL; in push_insn()
17465 /* tree-edge */ in push_insn()
17468 if (env->cfg.cur_stack >= env->prog->len) in push_insn()
17469 return -E2BIG; in push_insn()
17470 insn_stack[env->cfg.cur_stack++] = w; in push_insn()
17473 if (env->bpf_capable) in push_insn()
17477 verbose(env, "back-edge from insn %d to %d\n", t, w); in push_insn()
17478 return -EINVAL; in push_insn()
17480 /* forward- or cross-edge */ in push_insn()
17484 return -EFAULT; in push_insn()
17502 /* when we exit from subprog, we need to record non-linear history */ in visit_func_call_insn()
17515 #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
17526 return env->prog->jit_requested && bpf_jit_supports_percpu_insn(); in verifier_inlines_helper_call()
17551 if (get_helper_proto(env, call->imm, &fn) < 0) in get_call_summary()
17554 cs->fastcall = fn->allow_fastcall && in get_call_summary()
17555 (verifier_inlines_helper_call(env, call->imm) || in get_call_summary()
17556 bpf_jit_inlines_helper_call(call->imm)); in get_call_summary()
17557 cs->is_void = fn->ret_type == RET_VOID; in get_call_summary()
17558 cs->num_params = 0; in get_call_summary()
17559 for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i) { in get_call_summary()
17560 if (fn->arg_type[i] == ARG_DONTCARE) in get_call_summary()
17562 cs->num_params++; in get_call_summary()
17574 cs->num_params = btf_type_vlen(meta.func_proto); in get_call_summary()
17575 cs->fastcall = meta.kfunc_flags & KF_FASTCALL; in get_call_summary()
17576 cs->is_void = btf_type_is_void(btf_type_by_id(meta.btf, meta.func_proto->type)); in get_call_summary()
17587 * - R0 is scratched only if function is non-void;
17588 * - R1-R5 are scratched only if corresponding parameter type is defined
17595 * - for bpf_fastcall calls clang allocates registers as-if relevant r0-r5
17598 * - as a post-processing step, clang visits each bpf_fastcall call and adds
17599 * spill/fill for every live r0-r5;
17601 * - stack offsets used for the spill/fill are allocated as lowest
17605 * - when kernel loads a program, it looks for such patterns
17609 * - if so, and if verifier or current JIT inlines the call to the
17613 * - when old kernel loads a program, presence of spill/fill pairs
17620 * *(u64 *)(r10 - 8) = r1; r1 = 1;
17621 * *(u64 *)(r10 - 16) = r2; r2 = 2;
17622 * call %[to_be_inlined] --> call %[to_be_inlined]
17623 * r2 = *(u64 *)(r10 - 16); r0 = r1;
17624 * r1 = *(u64 *)(r10 - 8); r0 += r2;
17630 * - look for such patterns;
17631 * - mark spill and fill instructions in env->insn_aux_data[*].fastcall_pattern;
17632 * - mark set env->insn_aux_data[*].fastcall_spills_num for call instruction;
17633 * - update env->subprog_info[*]->fastcall_stack_off to find an offset
17635 * - update env->subprog_info[*]->keep_fastcall_stack.
17650 * *(u64 *)(r10 - 8) = r1; r1 = 1;
17651 * call %[to_be_inlined] --> call %[to_be_inlined]
17652 * r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!!
17653 * r0 = *(u64 *)(r10 - 8); r0 += r1;
17661 struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx; in mark_fastcall_pattern_for_call()
17662 struct bpf_insn *call = &env->prog->insnsi[insn_idx]; in mark_fastcall_pattern_for_call()
17675 * - includes R0 if function is non-void; in mark_fastcall_pattern_for_call()
17676 * - includes R1-R5 if corresponding parameter has is described in mark_fastcall_pattern_for_call()
17685 * *(u64 *)(r10 - Y) = rX (where Y % 8 == 0) in mark_fastcall_pattern_for_call()
17689 * rX = *(u64 *)(r10 - Y) in mark_fastcall_pattern_for_call()
17692 if (insn_idx - i < 0 || insn_idx + i >= env->prog->len) in mark_fastcall_pattern_for_call()
17694 stx = &insns[insn_idx - i]; in mark_fastcall_pattern_for_call()
17697 if (stx->code != (BPF_STX | BPF_MEM | BPF_DW) || in mark_fastcall_pattern_for_call()
17698 ldx->code != (BPF_LDX | BPF_MEM | BPF_DW) || in mark_fastcall_pattern_for_call()
17699 stx->dst_reg != BPF_REG_10 || in mark_fastcall_pattern_for_call()
17700 ldx->src_reg != BPF_REG_10) in mark_fastcall_pattern_for_call()
17703 if (stx->src_reg != ldx->dst_reg) in mark_fastcall_pattern_for_call()
17706 if ((BIT(stx->src_reg) & expected_regs_mask) == 0) in mark_fastcall_pattern_for_call()
17710 * is always 8-byte aligned. in mark_fastcall_pattern_for_call()
17712 if (stx->off != off || ldx->off != off) in mark_fastcall_pattern_for_call()
17714 expected_regs_mask &= ~BIT(stx->src_reg); in mark_fastcall_pattern_for_call()
17715 env->insn_aux_data[insn_idx - i].fastcall_pattern = 1; in mark_fastcall_pattern_for_call()
17716 env->insn_aux_data[insn_idx + i].fastcall_pattern = 1; in mark_fastcall_pattern_for_call()
17725 * 1: *(u64 *)(r10 - 8) = r1 in mark_fastcall_pattern_for_call()
17727 * 3: r1 = *(u64 *)(r10 - 8) in mark_fastcall_pattern_for_call()
17728 * 4: *(u64 *)(r10 - 8) = r1 in mark_fastcall_pattern_for_call()
17730 * 6: r1 = *(u64 *)(r10 - 8) in mark_fastcall_pattern_for_call()
17738 env->insn_aux_data[insn_idx].fastcall_spills_num = i - 1; in mark_fastcall_pattern_for_call()
17740 subprog->keep_fastcall_stack = 1; in mark_fastcall_pattern_for_call()
17741 subprog->fastcall_stack_off = min(subprog->fastcall_stack_off, off); in mark_fastcall_pattern_for_call()
17746 struct bpf_subprog_info *subprog = env->subprog_info; in mark_fastcall_patterns()
17751 for (s = 0; s < env->subprog_cnt; ++s, ++subprog) { in mark_fastcall_patterns()
17754 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in mark_fastcall_patterns()
17755 insn = env->prog->insnsi + i; in mark_fastcall_patterns()
17756 if (insn->code != (BPF_STX | BPF_MEM | BPF_DW) || in mark_fastcall_patterns()
17757 insn->dst_reg != BPF_REG_10) in mark_fastcall_patterns()
17759 lowest_off = min(lowest_off, insn->off); in mark_fastcall_patterns()
17762 for (i = subprog->start; i < (subprog + 1)->start; ++i) { in mark_fastcall_patterns()
17763 insn = env->prog->insnsi + i; in mark_fastcall_patterns()
17764 if (insn->code != (BPF_JMP | BPF_CALL)) in mark_fastcall_patterns()
17773 * < 0 - an error occurred
17774 * DONE_EXPLORING - the instruction was fully explored
17775 * KEEP_EXPLORING - there is still work to be done before it is fully explored
17779 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; in visit_insn()
17785 /* All non-branch instructions have a single fall-through edge. */ in visit_insn()
17786 if (BPF_CLASS(insn->code) != BPF_JMP && in visit_insn()
17787 BPF_CLASS(insn->code) != BPF_JMP32) { in visit_insn()
17792 switch (BPF_OP(insn->code)) { in visit_insn()
17822 ret = get_helper_proto(env, insn->imm, &fp); in visit_insn()
17823 /* If called in a non-sleepable context program will be in visit_insn()
17828 if (ret == 0 && fp->might_sleep) in visit_insn()
17830 if (bpf_helper_changes_pkt_data(insn->imm)) in visit_insn()
17832 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in visit_insn()
17839 * is crucial for fast convergence of open-coded iterator loop in visit_insn()
17845 * It is expected that with correct open-coded iterators in visit_insn()
17851 /* Same as helpers, if called in a non-sleepable context in visit_insn()
17861 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); in visit_insn()
17864 if (BPF_SRC(insn->code) != BPF_K) in visit_insn()
17865 return -EINVAL; in visit_insn()
17867 if (BPF_CLASS(insn->code) == BPF_JMP) in visit_insn()
17868 off = insn->off; in visit_insn()
17870 off = insn->imm; in visit_insn()
17892 return push_insn(t, t + insn->off + 1, BRANCH, env); in visit_insn()
17896 /* non-recursive depth-first-search to detect loops in BPF program
17897 * loop == back-edge in directed graph
17901 int insn_cnt = env->prog->len; in check_cfg()
17905 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL_ACCOUNT); in check_cfg()
17907 return -ENOMEM; in check_cfg()
17909 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL_ACCOUNT); in check_cfg()
17912 return -ENOMEM; in check_cfg()
17915 ex_insn_beg = env->exception_callback_subprog in check_cfg()
17916 ? env->subprog_info[env->exception_callback_subprog].start in check_cfg()
17921 env->cfg.cur_stack = 1; in check_cfg()
17924 while (env->cfg.cur_stack > 0) { in check_cfg()
17925 int t = insn_stack[env->cfg.cur_stack - 1]; in check_cfg()
17931 env->cfg.cur_stack--; in check_cfg()
17938 ret = -EFAULT; in check_cfg()
17944 if (env->cfg.cur_stack < 0) { in check_cfg()
17946 ret = -EFAULT; in check_cfg()
17953 env->cfg.cur_stack = 1; in check_cfg()
17958 struct bpf_insn *insn = &env->prog->insnsi[i]; in check_cfg()
17962 ret = -EINVAL; in check_cfg()
17968 ret = -EINVAL; in check_cfg()
17975 env->prog->aux->changes_pkt_data = env->subprog_info[0].changes_pkt_data; in check_cfg()
17976 env->prog->aux->might_sleep = env->subprog_info[0].might_sleep; in check_cfg()
17981 env->cfg.insn_state = env->cfg.insn_stack = NULL; in check_cfg()
17986 * For each subprogram 'i' fill array env->cfg.insn_subprogram sub-range
17987 * [env->subprog_info[i].postorder_start, env->subprog_info[i+1].postorder_start)
17995 postorder = kvcalloc(env->prog->len, sizeof(int), GFP_KERNEL_ACCOUNT); in compute_postorder()
17996 state = kvcalloc(env->prog->len, sizeof(int), GFP_KERNEL_ACCOUNT); in compute_postorder()
17997 stack = kvcalloc(env->prog->len, sizeof(int), GFP_KERNEL_ACCOUNT); in compute_postorder()
18002 return -ENOMEM; in compute_postorder()
18005 for (i = 0; i < env->subprog_cnt; i++) { in compute_postorder()
18006 env->subprog_info[i].postorder_start = cur_postorder; in compute_postorder()
18007 stack[0] = env->subprog_info[i].start; in compute_postorder()
18010 top = stack[stack_sz - 1]; in compute_postorder()
18014 stack_sz--; in compute_postorder()
18017 succ_cnt = bpf_insn_successors(env->prog, top, succ); in compute_postorder()
18027 env->subprog_info[i].postorder_start = cur_postorder; in compute_postorder()
18028 env->cfg.insn_postorder = postorder; in compute_postorder()
18029 env->cfg.cur_postorder = cur_postorder; in compute_postorder()
18039 for (i = 1; i < env->subprog_cnt; i++) { in check_abnormal_return()
18040 if (env->subprog_info[i].has_ld_abs) { in check_abnormal_return()
18042 return -EINVAL; in check_abnormal_return()
18044 if (env->subprog_info[i].has_tail_call) { in check_abnormal_return()
18046 return -EINVAL; in check_abnormal_return()
18068 int ret = -ENOMEM; in check_btf_func_early()
18070 nfuncs = attr->func_info_cnt; in check_btf_func_early()
18073 return -EINVAL; in check_btf_func_early()
18077 urec_size = attr->func_info_rec_size; in check_btf_func_early()
18082 return -EINVAL; in check_btf_func_early()
18085 prog = env->prog; in check_btf_func_early()
18086 btf = prog->aux->btf; in check_btf_func_early()
18088 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); in check_btf_func_early()
18093 return -ENOMEM; in check_btf_func_early()
18098 if (ret == -E2BIG) { in check_btf_func_early()
18106 ret = -EFAULT; in check_btf_func_early()
18112 ret = -EFAULT; in check_btf_func_early()
18117 ret = -EINVAL; in check_btf_func_early()
18140 func_proto = btf_type_by_id(btf, type->type); in check_btf_func_early()
18149 prog->aux->func_info = krecord; in check_btf_func_early()
18150 prog->aux->func_info_cnt = nfuncs; in check_btf_func_early()
18170 int ret = -ENOMEM; in check_btf_func()
18172 nfuncs = attr->func_info_cnt; in check_btf_func()
18175 return -EINVAL; in check_btf_func()
18178 if (nfuncs != env->subprog_cnt) { in check_btf_func()
18180 return -EINVAL; in check_btf_func()
18183 urec_size = attr->func_info_rec_size; in check_btf_func()
18185 prog = env->prog; in check_btf_func()
18186 btf = prog->aux->btf; in check_btf_func()
18188 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); in check_btf_func()
18190 krecord = prog->aux->func_info; in check_btf_func()
18193 return -ENOMEM; in check_btf_func()
18197 ret = -EINVAL; in check_btf_func()
18199 if (env->subprog_info[i].start != krecord[i].insn_off) { in check_btf_func()
18206 info_aux[i].linkage = BTF_INFO_VLEN(type->info); in check_btf_func()
18208 func_proto = btf_type_by_id(btf, type->type); in check_btf_func()
18210 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); in check_btf_func()
18213 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { in check_btf_func()
18217 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { in check_btf_func()
18225 prog->aux->func_info_aux = info_aux; in check_btf_func()
18235 struct bpf_prog_aux *aux = env->prog->aux; in adjust_btf_func()
18238 if (!aux->func_info) in adjust_btf_func()
18242 for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++) in adjust_btf_func()
18243 aux->func_info[i].insn_off = env->subprog_info[i].start; in adjust_btf_func()
18261 nr_linfo = attr->line_info_cnt; in check_btf_line()
18265 return -EINVAL; in check_btf_line()
18267 rec_size = attr->line_info_rec_size; in check_btf_line()
18270 rec_size & (sizeof(u32) - 1)) in check_btf_line()
18271 return -EINVAL; in check_btf_line()
18279 return -ENOMEM; in check_btf_line()
18281 prog = env->prog; in check_btf_line()
18282 btf = prog->aux->btf; in check_btf_line()
18285 sub = env->subprog_info; in check_btf_line()
18286 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); in check_btf_line()
18292 if (err == -E2BIG) { in check_btf_line()
18297 err = -EFAULT; in check_btf_line()
18303 err = -EFAULT; in check_btf_line()
18310 * 2) bounded by prog->len in check_btf_line()
18319 linfo[i].insn_off >= prog->len) { in check_btf_line()
18320 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", in check_btf_line()
18322 prog->len); in check_btf_line()
18323 err = -EINVAL; in check_btf_line()
18327 if (!prog->insnsi[linfo[i].insn_off].code) { in check_btf_line()
18331 err = -EINVAL; in check_btf_line()
18338 err = -EINVAL; in check_btf_line()
18342 if (s != env->subprog_cnt) { in check_btf_line()
18348 err = -EINVAL; in check_btf_line()
18357 if (s != env->subprog_cnt) { in check_btf_line()
18359 env->subprog_cnt - s, s); in check_btf_line()
18360 err = -EINVAL; in check_btf_line()
18364 prog->aux->linfo = linfo; in check_btf_line()
18365 prog->aux->nr_linfo = nr_linfo; in check_btf_line()
18383 struct bpf_prog *prog = env->prog; in check_core_relo()
18384 const struct btf *btf = prog->aux->btf; in check_core_relo()
18386 .log = &env->log, in check_core_relo()
18392 nr_core_relo = attr->core_relo_cnt; in check_core_relo()
18396 return -EINVAL; in check_core_relo()
18398 rec_size = attr->core_relo_rec_size; in check_core_relo()
18402 return -EINVAL; in check_core_relo()
18404 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); in check_core_relo()
18408 /* Unlike func_info and line_info, copy and apply each CO-RE in check_core_relo()
18415 if (err == -E2BIG) { in check_core_relo()
18420 err = -EFAULT; in check_core_relo()
18426 err = -EFAULT; in check_core_relo()
18430 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { in check_core_relo()
18431 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", in check_core_relo()
18432 i, core_relo.insn_off, prog->len); in check_core_relo()
18433 err = -EINVAL; in check_core_relo()
18438 &prog->insnsi[core_relo.insn_off / 8]); in check_core_relo()
18453 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info_early()
18455 return -EINVAL; in check_btf_info_early()
18459 btf = btf_get_by_fd(attr->prog_btf_fd); in check_btf_info_early()
18464 return -EACCES; in check_btf_info_early()
18466 env->prog->aux->btf = btf; in check_btf_info_early()
18480 if (!attr->func_info_cnt && !attr->line_info_cnt) { in check_btf_info()
18482 return -EINVAL; in check_btf_info()
18505 return old->umin_value <= cur->umin_value && in range_within()
18506 old->umax_value >= cur->umax_value && in range_within()
18507 old->smin_value <= cur->smin_value && in range_within()
18508 old->smax_value >= cur->smax_value && in range_within()
18509 old->u32_min_value <= cur->u32_min_value && in range_within()
18510 old->u32_max_value >= cur->u32_max_value && in range_within()
18511 old->s32_min_value <= cur->s32_min_value && in range_within()
18512 old->s32_max_value >= cur->s32_max_value; in range_within()
18527 struct bpf_id_pair *map = idmap->map; in check_ids()
18560 old_id = old_id ? old_id : ++idmap->tmp_id_gen; in check_scalar_ids()
18561 cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen; in check_scalar_ids()
18570 u16 live_regs = env->insn_aux_data[ip].live_regs_before; in clean_func_state()
18579 __mark_reg_not_init(env, &st->regs[i]); in clean_func_state()
18582 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
18583 if (!bpf_stack_slot_alive(env, st->frameno, i)) { in clean_func_state()
18584 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); in clean_func_state()
18586 st->stack[i].slot_type[j] = STACK_INVALID; in clean_func_state()
18597 st->cleaned = true; in clean_verifier_state()
18598 for (i = 0; i <= st->curframe; i++) { in clean_verifier_state()
18600 clean_func_state(env, st->frame[i], ip); in clean_verifier_state()
18612 * 1: *(u64)(r10 - 8) = 1
18614 * 3: *(u64)(r10 - 8) = 2
18615 * 4: r0 = *(u64)(r10 - 8)
18617 * when the verifier reaches exit insn the stack slot -8 in the state list of
18620 * analysis would propagate read mark for -8 at insn 2.
18643 if (sl->state.branches) in clean_live_states()
18645 if (sl->state.insn_idx != insn || in clean_live_states()
18646 !same_callsites(&sl->state, cur)) in clean_live_states()
18648 if (sl->state.cleaned) in clean_live_states()
18651 if (incomplete_read_marks(env, &sl->state)) in clean_live_states()
18653 clean_verifier_state(env, &sl->state); in clean_live_states()
18662 check_ids(rold->id, rcur->id, idmap) && in regs_exact()
18663 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); in regs_exact()
18680 if (rold->type == NOT_INIT) { in regsafe()
18681 if (exact == NOT_EXACT || rcur->type == NOT_INIT) in regsafe()
18703 * a non-MAYBE_NULL variant. in regsafe()
18705 * non-MAYBE_NULL registers as well. in regsafe()
18707 if (rold->type != rcur->type) in regsafe()
18710 switch (base_type(rold->type)) { in regsafe()
18712 if (env->explore_alu_limits) { in regsafe()
18717 check_scalar_ids(rold->id, rcur->id, idmap); in regsafe()
18719 if (!rold->precise && exact == NOT_EXACT) in regsafe()
18721 if ((rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST)) in regsafe()
18723 if ((rold->id & BPF_ADD_CONST) && (rold->off != rcur->off)) in regsafe()
18735 * First verification path is [1-6]: in regsafe()
18736 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; in regsafe()
18737 * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark in regsafe()
18739 * Next verification path is [1-4, 6]. in regsafe()
18742 * I. r6{.id=b}, r7{.id=b} via path 1-6; in regsafe()
18743 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. in regsafe()
18746 * --- in regsafe()
18750 tnum_in(rold->var_off, rcur->var_off) && in regsafe()
18751 check_scalar_ids(rold->id, rcur->id, idmap); in regsafe()
18762 tnum_in(rold->var_off, rcur->var_off) && in regsafe()
18763 check_ids(rold->id, rcur->id, idmap) && in regsafe()
18764 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); in regsafe()
18770 * since someone could have accessed through (ptr - k), or in regsafe()
18771 * even done ptr -= k in a register, to get a safe access. in regsafe()
18773 if (rold->range > rcur->range) in regsafe()
18778 if (rold->off != rcur->off) in regsafe()
18781 if (!check_ids(rold->id, rcur->id, idmap)) in regsafe()
18785 tnum_in(rold->var_off, rcur->var_off); in regsafe()
18788 * the same stack frame, since fp-8 in foo != fp-8 in bar in regsafe()
18790 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno; in regsafe()
18812 for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i) { in is_stack_all_misc()
18813 if ((stack->slot_type[i] == STACK_MISC) || in is_stack_all_misc()
18814 (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack)) in is_stack_all_misc()
18826 return &stack->spilled_ptr; in scalar_reg_for_stack()
18844 for (i = 0; i < old->allocated_stack; i++) { in stacksafe()
18850 (i >= cur->allocated_stack || in stacksafe()
18851 old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
18852 cur->stack[spi].slot_type[i % BPF_REG_SIZE])) in stacksafe()
18855 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
18858 if (env->allow_uninit_stack && in stacksafe()
18859 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) in stacksafe()
18865 if (i >= cur->allocated_stack) in stacksafe()
18868 /* 64-bit scalar spill vs all slots MISC and vice versa. in stacksafe()
18873 old_reg = scalar_reg_for_stack(env, &old->stack[spi]); in stacksafe()
18874 cur_reg = scalar_reg_for_stack(env, &cur->stack[spi]); in stacksafe()
18878 i += BPF_REG_SIZE - 1; in stacksafe()
18883 * it will be safe with zero-initialized stack. in stacksafe()
18886 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
18887 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
18889 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
18890 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
18892 * this stack slot, but current has STACK_MISC -> in stacksafe()
18897 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
18900 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { in stacksafe()
18906 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} in stacksafe()
18908 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} in stacksafe()
18912 if (!regsafe(env, &old->stack[spi].spilled_ptr, in stacksafe()
18913 &cur->stack[spi].spilled_ptr, idmap, exact)) in stacksafe()
18917 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
18918 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
18919 if (old_reg->dynptr.type != cur_reg->dynptr.type || in stacksafe()
18920 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || in stacksafe()
18921 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
18925 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
18926 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
18933 if (old_reg->iter.btf != cur_reg->iter.btf || in stacksafe()
18934 old_reg->iter.btf_id != cur_reg->iter.btf_id || in stacksafe()
18935 old_reg->iter.state != cur_reg->iter.state || in stacksafe()
18936 /* ignore {old_reg,cur_reg}->iter.depth, see above */ in stacksafe()
18937 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) in stacksafe()
18941 old_reg = &old->stack[spi].spilled_ptr; in stacksafe()
18942 cur_reg = &cur->stack[spi].spilled_ptr; in stacksafe()
18943 if (!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap) || in stacksafe()
18944 old_reg->irq.kfunc_class != cur_reg->irq.kfunc_class) in stacksafe()
18964 if (old->acquired_refs != cur->acquired_refs) in refsafe()
18967 if (old->active_locks != cur->active_locks) in refsafe()
18970 if (old->active_preempt_locks != cur->active_preempt_locks) in refsafe()
18973 if (old->active_rcu_lock != cur->active_rcu_lock) in refsafe()
18976 if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap)) in refsafe()
18979 if (!check_ids(old->active_lock_id, cur->active_lock_id, idmap) || in refsafe()
18980 old->active_lock_ptr != cur->active_lock_ptr) in refsafe()
18983 for (i = 0; i < old->acquired_refs; i++) { in refsafe()
18984 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) || in refsafe()
18985 old->refs[i].type != cur->refs[i].type) in refsafe()
18987 switch (old->refs[i].type) { in refsafe()
18994 if (old->refs[i].ptr != cur->refs[i].ptr) in refsafe()
18998 WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old->refs[i].type); in refsafe()
19035 u16 live_regs = env->insn_aux_data[insn_idx].live_regs_before; in func_states_equal()
19038 if (old->callback_depth > cur->callback_depth) in func_states_equal()
19043 !regsafe(env, &old->regs[i], &cur->regs[i], in func_states_equal()
19044 &env->idmap_scratch, exact)) in func_states_equal()
19047 if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) in func_states_equal()
19055 env->idmap_scratch.tmp_id_gen = env->id_gen; in reset_idmap_scratch()
19056 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); in reset_idmap_scratch()
19067 if (old->curframe != cur->curframe) in states_equal()
19073 * must never prune a non-speculative execution one. in states_equal()
19075 if (old->speculative && !cur->speculative) in states_equal()
19078 if (old->in_sleepable != cur->in_sleepable) in states_equal()
19081 if (!refsafe(old, cur, &env->idmap_scratch)) in states_equal()
19087 for (i = 0; i <= old->curframe; i++) { in states_equal()
19089 if (old->frame[i]->callsite != cur->frame[i]->callsite) in states_equal()
19091 if (!func_states_equal(env, old->frame[i], cur->frame[i], insn_idx, exact)) in states_equal()
19110 for (fr = old->curframe; fr >= 0; fr--) { in propagate_precision()
19111 state = old->frame[fr]; in propagate_precision()
19112 state_reg = state->regs; in propagate_precision()
19115 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
19116 !state_reg->precise) in propagate_precision()
19118 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
19124 bt_set_frame_reg(&env->bt, fr, i); in propagate_precision()
19128 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
19129 if (!is_spilled_reg(&state->stack[i])) in propagate_precision()
19131 state_reg = &state->stack[i].spilled_ptr; in propagate_precision()
19132 if (state_reg->type != SCALAR_VALUE || in propagate_precision()
19133 !state_reg->precise) in propagate_precision()
19135 if (env->log.level & BPF_LOG_LEVEL2) { in propagate_precision()
19138 fr, (-i - 1) * BPF_REG_SIZE); in propagate_precision()
19140 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); in propagate_precision()
19142 bt_set_frame_slot(&env->bt, fr, i); in propagate_precision()
19149 err = __mark_chain_precision(env, cur, -1, changed); in propagate_precision()
19158 /* Propagate read and precision marks from visit->backedges[*].state->equal_state
19159 * to corresponding parent states of visit->backedges[*].state until fixed point is reached,
19160 * then free visit->backedges.
19162 * for all states corresponding to @visit->callchain.
19174 if (env->log.level & BPF_LOG_LEVEL2) in propagate_backedges()
19176 for (backedge = visit->backedges; backedge; backedge = backedge->next) in propagate_backedges()
19177 mark_all_scalars_precise(env, &backedge->state); in propagate_backedges()
19181 for (backedge = visit->backedges; backedge; backedge = backedge->next) { in propagate_backedges()
19182 st = &backedge->state; in propagate_backedges()
19183 err = propagate_precision(env, st->equal_state, st, &changed); in propagate_backedges()
19197 int i, fr = cur->curframe; in states_maybe_looping()
19199 if (old->curframe != fr) in states_maybe_looping()
19202 fold = old->frame[fr]; in states_maybe_looping()
19203 fcur = cur->frame[fr]; in states_maybe_looping()
19205 if (memcmp(&fold->regs[i], &fcur->regs[i], in states_maybe_looping()
19213 return env->insn_aux_data[insn_idx].is_iter_next; in is_iter_next_insn()
19223 * Here's a situation in pseudo-BPF assembly form:
19243 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
19250 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
19270 * while (x--) {} // <<-- infinite loop here
19280 for (fr = old->curframe; fr >= 0; fr--) { in iter_active_depths_differ()
19281 state = old->frame[fr]; in iter_active_depths_differ()
19282 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in iter_active_depths_differ()
19283 if (state->stack[i].slot_type[0] != STACK_ITER) in iter_active_depths_differ()
19286 slot = &state->stack[i].spilled_ptr; in iter_active_depths_differ()
19287 if (slot->iter.state != BPF_ITER_STATE_ACTIVE) in iter_active_depths_differ()
19290 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr; in iter_active_depths_differ()
19291 if (cur_slot->iter.depth != slot->iter.depth) in iter_active_depths_differ()
19302 struct bpf_verifier_state *cur = env->cur_state, *new; in is_state_visited()
19307 force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) || in is_state_visited()
19309 cur->jmp_history_cnt > 40; in is_state_visited()
19312 * http://vger.kernel.org/bpfconf2019.html#session-1 in is_state_visited()
19320 if (env->jmps_processed - env->prev_jmps_processed >= 2 && in is_state_visited()
19321 env->insn_processed - env->prev_insn_processed >= 8) in is_state_visited()
19331 if (sl->state.insn_idx != insn_idx) in is_state_visited()
19334 if (sl->state.branches) { in is_state_visited()
19335 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; in is_state_visited()
19337 if (frame->in_async_callback_fn && in is_state_visited()
19338 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { in is_state_visited()
19352 /* BPF open-coded iterators loop detection is special. in is_state_visited()
19368 * 1. r7 = -16 in is_state_visited()
19370 * 3. while (bpf_iter_num_next(&fp[-8])) { in is_state_visited()
19372 * 5. r7 = -32 in is_state_visited()
19382 * Here verifier would first visit path 1-3, create a checkpoint at 3 in is_state_visited()
19383 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does in is_state_visited()
19385 * comparison would discard current state with r7=-32 in is_state_visited()
19389 if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) { in is_state_visited()
19394 cur_frame = cur->frame[cur->curframe]; in is_state_visited()
19398 iter_reg = &cur_frame->regs[BPF_REG_1]; in is_state_visited()
19401 * no need for extra (re-)validations in is_state_visited()
19403 spi = __get_spi(iter_reg->off + iter_reg->var_off.value); in is_state_visited()
19404 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; in is_state_visited()
19405 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) { in is_state_visited()
19413 if (sl->state.may_goto_depth != cur->may_goto_depth && in is_state_visited()
19414 states_equal(env, &sl->state, cur, RANGE_WITHIN)) { in is_state_visited()
19420 if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) in is_state_visited()
19425 if (states_maybe_looping(&sl->state, cur) && in is_state_visited()
19426 states_equal(env, &sl->state, cur, EXACT) && in is_state_visited()
19427 !iter_active_depths_differ(&sl->state, cur) && in is_state_visited()
19428 sl->state.may_goto_depth == cur->may_goto_depth && in is_state_visited()
19429 sl->state.callback_unroll_depth == cur->callback_unroll_depth) { in is_state_visited()
19433 print_verifier_state(env, cur, cur->curframe, true); in is_state_visited()
19435 print_verifier_state(env, &sl->state, cur->curframe, true); in is_state_visited()
19436 return -EINVAL; in is_state_visited()
19445 * if r1 < 1000000 goto pc-2 in is_state_visited()
19452 env->jmps_processed - env->prev_jmps_processed < 20 && in is_state_visited()
19453 env->insn_processed - env->prev_insn_processed < 100) in is_state_visited()
19458 loop = incomplete_read_marks(env, &sl->state); in is_state_visited()
19459 if (states_equal(env, &sl->state, cur, loop ? RANGE_WITHIN : NOT_EXACT)) { in is_state_visited()
19461 sl->hit_cnt++; in is_state_visited()
19469 if (is_jmp_point(env, env->insn_idx)) in is_state_visited()
19471 err = err ? : propagate_precision(env, &sl->state, cur, NULL); in is_state_visited()
19478 * .-> A --. Assume the states are visited in the order A, B, C. in is_state_visited()
19481 * '-- B C has not received any read or precision marks from C. in is_state_visited()
19486 * - Prior to the main verification pass, strongly connected components in is_state_visited()
19490 * - During the main verification pass, `maybe_enter_scc()` checks in is_state_visited()
19495 * - This instance is associated not with the SCC itself, but with a in is_state_visited()
19499 * - When a verification path encounters a `states_equal(..., in is_state_visited()
19503 * `bpf_scc_visit->backedges`. in is_state_visited()
19505 * - When a verification path terminates, `maybe_exit_scc()` is called in is_state_visited()
19516 * -------------------- in is_state_visited()
19528 * - (A, SCC#1) in is_state_visited()
19529 * - (C, SCC#1) in is_state_visited()
19539 * - States explored during `C: foo()` would contribute backedges to in is_state_visited()
19542 * - By that time, the states explored between `A: foo()` and `C: foo()` in is_state_visited()
19551 return -ENOMEM; in is_state_visited()
19552 err = copy_verifier_state(&backedge->state, cur); in is_state_visited()
19553 backedge->state.equal_state = &sl->state; in is_state_visited()
19554 backedge->state.insn_idx = insn_idx; in is_state_visited()
19555 err = err ?: add_scc_backedge(env, &sl->state, backedge); in is_state_visited()
19557 free_verifier_state(&backedge->state, false); in is_state_visited()
19572 sl->miss_cnt++; in is_state_visited()
19581 n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; in is_state_visited()
19582 if (sl->miss_cnt > sl->hit_cnt * n + n) { in is_state_visited()
19586 sl->in_free_list = true; in is_state_visited()
19587 list_del(&sl->node); in is_state_visited()
19588 list_add(&sl->node, &env->free_list); in is_state_visited()
19589 env->free_list_size++; in is_state_visited()
19590 env->explored_states_size--; in is_state_visited()
19595 if (env->max_states_per_insn < states_cnt) in is_state_visited()
19596 env->max_states_per_insn = states_cnt; in is_state_visited()
19598 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) in is_state_visited()
19610 * When looping the sl->state.branches will be > 0 and this state in is_state_visited()
19615 return -ENOMEM; in is_state_visited()
19616 env->total_states++; in is_state_visited()
19617 env->explored_states_size++; in is_state_visited()
19619 env->prev_jmps_processed = env->jmps_processed; in is_state_visited()
19620 env->prev_insn_processed = env->insn_processed; in is_state_visited()
19623 if (env->bpf_capable) in is_state_visited()
19627 new = &new_sl->state; in is_state_visited()
19634 new->insn_idx = insn_idx; in is_state_visited()
19635 verifier_bug_if(new->branches != 1, env, in is_state_visited()
19637 __func__, new->branches, insn_idx); in is_state_visited()
19645 cur->parent = new; in is_state_visited()
19646 cur->first_insn_idx = insn_idx; in is_state_visited()
19647 cur->dfs_depth = new->dfs_depth + 1; in is_state_visited()
19649 list_add(&new_sl->node, head); in is_state_visited()
19707 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; in save_aux_ptr_type()
19745 return -EINVAL; in save_aux_ptr_type()
19762 * state->curframe > 0, it may be a callback function, in process_bpf_exit_full()
19767 !env->cur_state->curframe, in process_bpf_exit_full()
19784 if (env->cur_state->curframe) { in process_bpf_exit_full()
19789 err = prepare_func_exit(env, &env->insn_idx); in process_bpf_exit_full()
19805 struct bpf_insn *insn = &env->prog->insnsi[env->insn_idx]; in do_check_insn()
19806 u8 class = BPF_CLASS(insn->code); in do_check_insn()
19814 bool is_ldsx = BPF_MODE(insn->code) == BPF_MEMSX; in do_check_insn()
19823 if (BPF_MODE(insn->code) == BPF_ATOMIC) { in do_check_insn()
19827 env->insn_idx++; in do_check_insn()
19831 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { in do_check_insn()
19833 return -EINVAL; in do_check_insn()
19842 if (BPF_MODE(insn->code) != BPF_MEM || in do_check_insn()
19843 insn->src_reg != BPF_REG_0) { in do_check_insn()
19845 return -EINVAL; in do_check_insn()
19848 err = check_reg_arg(env, insn->dst_reg, SRC_OP); in do_check_insn()
19852 dst_reg_type = cur_regs(env)[insn->dst_reg].type; in do_check_insn()
19855 err = check_mem_access(env, env->insn_idx, insn->dst_reg, in do_check_insn()
19856 insn->off, BPF_SIZE(insn->code), in do_check_insn()
19857 BPF_WRITE, -1, false, false); in do_check_insn()
19865 u8 opcode = BPF_OP(insn->code); in do_check_insn()
19867 env->jmps_processed++; in do_check_insn()
19869 if (BPF_SRC(insn->code) != BPF_K || in do_check_insn()
19870 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL && in do_check_insn()
19871 insn->off != 0) || in do_check_insn()
19872 (insn->src_reg != BPF_REG_0 && in do_check_insn()
19873 insn->src_reg != BPF_PSEUDO_CALL && in do_check_insn()
19874 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || in do_check_insn()
19875 insn->dst_reg != BPF_REG_0 || class == BPF_JMP32) { in do_check_insn()
19877 return -EINVAL; in do_check_insn()
19880 if (env->cur_state->active_locks) { in do_check_insn()
19881 if ((insn->src_reg == BPF_REG_0 && in do_check_insn()
19882 insn->imm != BPF_FUNC_spin_unlock) || in do_check_insn()
19883 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && in do_check_insn()
19884 (insn->off != 0 || !kfunc_spin_allowed(insn->imm)))) { in do_check_insn()
19887 return -EINVAL; in do_check_insn()
19890 if (insn->src_reg == BPF_PSEUDO_CALL) { in do_check_insn()
19891 err = check_func_call(env, insn, &env->insn_idx); in do_check_insn()
19892 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_check_insn()
19893 err = check_kfunc_call(env, insn, &env->insn_idx); in do_check_insn()
19897 err = check_helper_call(env, insn, &env->insn_idx); in do_check_insn()
19904 if (BPF_SRC(insn->code) != BPF_K || in do_check_insn()
19905 insn->src_reg != BPF_REG_0 || in do_check_insn()
19906 insn->dst_reg != BPF_REG_0 || in do_check_insn()
19907 (class == BPF_JMP && insn->imm != 0) || in do_check_insn()
19908 (class == BPF_JMP32 && insn->off != 0)) { in do_check_insn()
19910 return -EINVAL; in do_check_insn()
19914 env->insn_idx += insn->off + 1; in do_check_insn()
19916 env->insn_idx += insn->imm + 1; in do_check_insn()
19919 if (BPF_SRC(insn->code) != BPF_K || in do_check_insn()
19920 insn->imm != 0 || in do_check_insn()
19921 insn->src_reg != BPF_REG_0 || in do_check_insn()
19922 insn->dst_reg != BPF_REG_0 || in do_check_insn()
19925 return -EINVAL; in do_check_insn()
19929 err = check_cond_jmp_op(env, insn, &env->insn_idx); in do_check_insn()
19934 u8 mode = BPF_MODE(insn->code); in do_check_insn()
19946 env->insn_idx++; in do_check_insn()
19950 return -EINVAL; in do_check_insn()
19954 return -EINVAL; in do_check_insn()
19957 env->insn_idx++; in do_check_insn()
19963 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check()
19964 struct bpf_verifier_state *state = env->cur_state; in do_check()
19965 struct bpf_insn *insns = env->prog->insnsi; in do_check()
19966 int insn_cnt = env->prog->len; in do_check()
19968 int prev_insn_idx = -1; in do_check()
19976 env->cur_hist_ent = NULL; in do_check()
19978 env->prev_insn_idx = prev_insn_idx; in do_check()
19979 if (env->insn_idx >= insn_cnt) { in do_check()
19981 env->insn_idx, insn_cnt); in do_check()
19982 return -EFAULT; in do_check()
19985 insn = &insns[env->insn_idx]; in do_check()
19986 insn_aux = &env->insn_aux_data[env->insn_idx]; in do_check()
19988 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { in do_check()
19991 env->insn_processed); in do_check()
19992 return -E2BIG; in do_check()
19995 state->last_insn_idx = env->prev_insn_idx; in do_check()
19996 state->insn_idx = env->insn_idx; in do_check()
19998 if (is_prune_point(env, env->insn_idx)) { in do_check()
19999 err = is_state_visited(env, env->insn_idx); in do_check()
20004 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
20007 env->prev_insn_idx, env->insn_idx, in do_check()
20008 env->cur_state->speculative ? in do_check()
20011 verbose(env, "%d: safe\n", env->insn_idx); in do_check()
20017 if (is_jmp_point(env, env->insn_idx)) { in do_check()
20024 return -EAGAIN; in do_check()
20029 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { in do_check()
20031 env->prev_insn_idx, env->insn_idx, in do_check()
20032 env->cur_state->speculative ? in do_check()
20034 print_verifier_state(env, state, state->curframe, true); in do_check()
20038 if (env->log.level & BPF_LOG_LEVEL) { in do_check()
20040 print_insn_state(env, state, state->curframe); in do_check()
20042 verbose_linfo(env, env->insn_idx, "; "); in do_check()
20043 env->prev_log_pos = env->log.end_pos; in do_check()
20044 verbose(env, "%d: ", env->insn_idx); in do_check()
20046 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; in do_check()
20047 env->prev_log_pos = env->log.end_pos; in do_check()
20050 if (bpf_prog_is_offloaded(env->prog->aux)) { in do_check()
20051 err = bpf_prog_offload_verify_insn(env, env->insn_idx, in do_check()
20052 env->prev_insn_idx); in do_check()
20058 prev_insn_idx = env->insn_idx; in do_check()
20063 if (state->speculative && insn_aux->nospec) in do_check()
20066 err = bpf_reset_stack_write_marks(env, env->insn_idx); in do_check()
20075 if (error_recoverable_with_nospec(err) && state->speculative) { in do_check()
20079 insn_aux->nospec = true; in do_check()
20083 insn_aux->alu_state = 0; in do_check()
20092 if (state->speculative && insn_aux->nospec_result) { in do_check()
20093 /* If we are on a path that performed a jump-op, this in do_check()
20094 * may skip a nospec patched-in after the jump. This can in do_check()
20096 * used for the write-ops in do_check()
20102 * All non-branch instructions have a single in do_check()
20103 * fall-through edge. For these, nospec_result should in do_check()
20106 if (verifier_bug_if(BPF_CLASS(insn->code) == BPF_JMP || in do_check()
20107 BPF_CLASS(insn->code) == BPF_JMP32, env, in do_check()
20109 return -EFAULT; in do_check()
20112 err = update_branch_counts(env, env->cur_state); in do_check()
20118 err = pop_stack(env, &prev_insn_idx, &env->insn_idx, in do_check()
20121 if (err != -ENOENT) in do_check()
20153 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) in find_btf_percpu_datasec()
20156 tname = btf_name_by_offset(btf, t->name_off); in find_btf_percpu_datasec()
20161 return -ENOENT; in find_btf_percpu_datasec()
20176 for (i = 0; i < env->used_btf_cnt; i++) in __add_used_btf()
20177 if (env->used_btfs[i].btf == btf) in __add_used_btf()
20180 if (env->used_btf_cnt >= MAX_USED_BTFS) { in __add_used_btf()
20183 return -E2BIG; in __add_used_btf()
20188 btf_mod = &env->used_btfs[env->used_btf_cnt]; in __add_used_btf()
20189 btf_mod->btf = btf; in __add_used_btf()
20190 btf_mod->module = NULL; in __add_used_btf()
20194 btf_mod->module = btf_try_get_module(btf); in __add_used_btf()
20195 if (!btf_mod->module) { in __add_used_btf()
20197 return -ENXIO; in __add_used_btf()
20201 return env->used_btf_cnt++; in __add_used_btf()
20215 u32 type, id = insn->imm; in __check_pseudo_btf_id()
20223 return -ENOENT; in __check_pseudo_btf_id()
20228 return -EINVAL; in __check_pseudo_btf_id()
20231 sym_name = btf_name_by_offset(btf, t->name_off); in __check_pseudo_btf_id()
20236 return -ENOENT; in __check_pseudo_btf_id()
20242 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; in __check_pseudo_btf_id()
20243 aux->btf_var.mem_size = 0; in __check_pseudo_btf_id()
20251 if (vsi->type == id) { in __check_pseudo_btf_id()
20258 type = t->type; in __check_pseudo_btf_id()
20261 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; in __check_pseudo_btf_id()
20262 aux->btf_var.btf = btf; in __check_pseudo_btf_id()
20263 aux->btf_var.btf_id = type; in __check_pseudo_btf_id()
20272 tname = btf_name_by_offset(btf, t->name_off); in __check_pseudo_btf_id()
20275 return -EINVAL; in __check_pseudo_btf_id()
20277 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; in __check_pseudo_btf_id()
20278 aux->btf_var.mem_size = tsize; in __check_pseudo_btf_id()
20280 aux->btf_var.reg_type = PTR_TO_BTF_ID; in __check_pseudo_btf_id()
20281 aux->btf_var.btf = btf; in __check_pseudo_btf_id()
20282 aux->btf_var.btf_id = type; in __check_pseudo_btf_id()
20303 return -EINVAL; in check_pseudo_btf_id()
20308 return -EINVAL; in check_pseudo_btf_id()
20339 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || in bpf_map_is_cgroup_storage()
20340 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); in bpf_map_is_cgroup_storage()
20350 if (map->excl_prog_sha && in check_map_prog_compatibility()
20351 memcmp(map->excl_prog_sha, prog->digest, SHA256_DIGEST_SIZE)) { in check_map_prog_compatibility()
20353 return -EACCES; in check_map_prog_compatibility()
20356 if (btf_record_has_field(map->record, BPF_LIST_HEAD) || in check_map_prog_compatibility()
20357 btf_record_has_field(map->record, BPF_RB_ROOT)) { in check_map_prog_compatibility()
20360 return -EINVAL; in check_map_prog_compatibility()
20364 if (btf_record_has_field(map->record, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK)) { in check_map_prog_compatibility()
20367 return -EINVAL; in check_map_prog_compatibility()
20372 return -EINVAL; in check_map_prog_compatibility()
20376 if (btf_record_has_field(map->record, BPF_TIMER)) { in check_map_prog_compatibility()
20379 return -EINVAL; in check_map_prog_compatibility()
20383 if (btf_record_has_field(map->record, BPF_WORKQUEUE)) { in check_map_prog_compatibility()
20386 return -EINVAL; in check_map_prog_compatibility()
20390 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && in check_map_prog_compatibility()
20393 return -EINVAL; in check_map_prog_compatibility()
20396 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in check_map_prog_compatibility()
20398 return -EINVAL; in check_map_prog_compatibility()
20401 if (prog->sleepable) in check_map_prog_compatibility()
20402 switch (map->map_type) { in check_map_prog_compatibility()
20424 return -EINVAL; in check_map_prog_compatibility()
20428 bpf_cgroup_storage_assign(env->prog->aux, map)) { in check_map_prog_compatibility()
20430 return -EBUSY; in check_map_prog_compatibility()
20433 if (map->map_type == BPF_MAP_TYPE_ARENA) { in check_map_prog_compatibility()
20434 if (env->prog->aux->arena) { in check_map_prog_compatibility()
20436 return -EBUSY; in check_map_prog_compatibility()
20438 if (!env->allow_ptr_leaks || !env->bpf_capable) { in check_map_prog_compatibility()
20440 return -EPERM; in check_map_prog_compatibility()
20442 if (!env->prog->jit_requested) { in check_map_prog_compatibility()
20444 return -EOPNOTSUPP; in check_map_prog_compatibility()
20448 return -EOPNOTSUPP; in check_map_prog_compatibility()
20450 env->prog->aux->arena = (void *)map; in check_map_prog_compatibility()
20451 if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { in check_map_prog_compatibility()
20453 return -EINVAL; in check_map_prog_compatibility()
20465 for (i = 0; i < env->used_map_cnt; i++) in __add_used_map()
20466 if (env->used_maps[i] == map) in __add_used_map()
20469 if (env->used_map_cnt >= MAX_USED_MAPS) { in __add_used_map()
20472 return -E2BIG; in __add_used_map()
20475 err = check_map_prog_compatibility(env, map, env->prog); in __add_used_map()
20479 if (env->prog->sleepable) in __add_used_map()
20480 atomic64_inc(&map->sleepable_refcnt); in __add_used_map()
20489 env->used_maps[env->used_map_cnt++] = map; in __add_used_map()
20491 return env->used_map_cnt - 1; in __add_used_map()
20521 struct bpf_insn *insn = env->prog->insnsi; in resolve_pseudo_ldimm64()
20522 int insn_cnt = env->prog->len; in resolve_pseudo_ldimm64()
20525 err = bpf_prog_calc_tag(env->prog); in resolve_pseudo_ldimm64()
20530 if (BPF_CLASS(insn->code) == BPF_LDX && in resolve_pseudo_ldimm64()
20531 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) || in resolve_pseudo_ldimm64()
20532 insn->imm != 0)) { in resolve_pseudo_ldimm64()
20534 return -EINVAL; in resolve_pseudo_ldimm64()
20544 if (i == insn_cnt - 1 || insn[1].code != 0 || in resolve_pseudo_ldimm64()
20548 return -EINVAL; in resolve_pseudo_ldimm64()
20552 /* valid generic load 64-bit imm */ in resolve_pseudo_ldimm64()
20556 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
20564 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
20565 aux->ptr_type = PTR_TO_FUNC; in resolve_pseudo_ldimm64()
20570 * converted into regular 64-bit imm load insn. in resolve_pseudo_ldimm64()
20583 return -EINVAL; in resolve_pseudo_ldimm64()
20589 if (bpfptr_is_null(env->fd_array)) { in resolve_pseudo_ldimm64()
20591 return -EPROTO; in resolve_pseudo_ldimm64()
20593 if (copy_from_bpfptr_offset(&fd, env->fd_array, in resolve_pseudo_ldimm64()
20596 return -EFAULT; in resolve_pseudo_ldimm64()
20606 map = env->used_maps[map_idx]; in resolve_pseudo_ldimm64()
20608 aux = &env->insn_aux_data[i]; in resolve_pseudo_ldimm64()
20609 aux->map_index = map_idx; in resolve_pseudo_ldimm64()
20619 return -EINVAL; in resolve_pseudo_ldimm64()
20622 if (!map->ops->map_direct_value_addr) { in resolve_pseudo_ldimm64()
20624 return -EINVAL; in resolve_pseudo_ldimm64()
20627 err = map->ops->map_direct_value_addr(map, &addr, off); in resolve_pseudo_ldimm64()
20630 map->value_size, off); in resolve_pseudo_ldimm64()
20634 aux->map_off = off; in resolve_pseudo_ldimm64()
20648 if (!bpf_opcode_in_insntable(insn->code)) { in resolve_pseudo_ldimm64()
20649 verbose(env, "unknown opcode %02x\n", insn->code); in resolve_pseudo_ldimm64()
20650 return -EINVAL; in resolve_pseudo_ldimm64()
20664 __bpf_free_used_maps(env->prog->aux, env->used_maps, in release_maps()
20665 env->used_map_cnt); in release_maps()
20671 __bpf_free_used_btfs(env->used_btfs, env->used_btf_cnt); in release_btfs()
20677 struct bpf_insn *insn = env->prog->insnsi; in convert_pseudo_ld_imm64()
20678 int insn_cnt = env->prog->len; in convert_pseudo_ld_imm64()
20682 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) in convert_pseudo_ld_imm64()
20684 if (insn->src_reg == BPF_PSEUDO_FUNC) in convert_pseudo_ld_imm64()
20686 insn->src_reg = 0; in convert_pseudo_ld_imm64()
20690 /* single env->prog->insni[off] instruction was replaced with the range
20697 struct bpf_insn_aux_data *data = env->insn_aux_data; in adjust_insn_aux_data()
20698 struct bpf_insn *insn = new_prog->insnsi; in adjust_insn_aux_data()
20707 data[off].zext_dst = insn_has_def32(insn + off + cnt - 1); in adjust_insn_aux_data()
20711 prog_len = new_prog->len; in adjust_insn_aux_data()
20713 memmove(data + off + cnt - 1, data + off, in adjust_insn_aux_data()
20714 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); in adjust_insn_aux_data()
20715 memset(data + off, 0, sizeof(struct bpf_insn_aux_data) * (cnt - 1)); in adjust_insn_aux_data()
20716 for (i = off; i < off + cnt - 1; i++) { in adjust_insn_aux_data()
20730 for (i = 0; i <= env->subprog_cnt; i++) { in adjust_subprog_starts()
20731 if (env->subprog_info[i].start <= off) in adjust_subprog_starts()
20733 env->subprog_info[i].start += len - 1; in adjust_subprog_starts()
20739 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; in adjust_poke_descs()
20740 int i, sz = prog->aux->size_poke_tab; in adjust_poke_descs()
20745 if (desc->insn_idx <= off) in adjust_poke_descs()
20747 desc->insn_idx += len - 1; in adjust_poke_descs()
20758 new_data = vrealloc(env->insn_aux_data, in bpf_patch_insn_data()
20759 array_size(env->prog->len + len - 1, in bpf_patch_insn_data()
20765 env->insn_aux_data = new_data; in bpf_patch_insn_data()
20768 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); in bpf_patch_insn_data()
20770 if (PTR_ERR(new_prog) == -ERANGE) in bpf_patch_insn_data()
20772 "insn %d cannot be patched due to 16-bit range\n", in bpf_patch_insn_data()
20773 env->insn_aux_data[off].orig_idx); in bpf_patch_insn_data()
20788 struct bpf_insn *insn = prog->insnsi; in adjust_jmp_off()
20789 u32 insn_cnt = prog->len, i; in adjust_jmp_off()
20794 u8 code = insn->code; in adjust_jmp_off()
20803 if (insn->code == (BPF_JMP32 | BPF_JA)) { in adjust_jmp_off()
20804 if (i + 1 + insn->imm != tgt_idx) in adjust_jmp_off()
20806 if (check_add_overflow(insn->imm, delta, &imm)) in adjust_jmp_off()
20807 return -ERANGE; in adjust_jmp_off()
20808 insn->imm = imm; in adjust_jmp_off()
20810 if (i + 1 + insn->off != tgt_idx) in adjust_jmp_off()
20812 if (check_add_overflow(insn->off, delta, &off)) in adjust_jmp_off()
20813 return -ERANGE; in adjust_jmp_off()
20814 insn->off = off; in adjust_jmp_off()
20826 for (i = 0; i < env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
20827 if (env->subprog_info[i].start >= off) in adjust_subprog_starts_after_remove()
20830 for (j = i; j < env->subprog_cnt; j++) in adjust_subprog_starts_after_remove()
20831 if (env->subprog_info[j].start >= off + cnt) in adjust_subprog_starts_after_remove()
20836 if (env->subprog_info[j].start != off + cnt) in adjust_subprog_starts_after_remove()
20837 j--; in adjust_subprog_starts_after_remove()
20840 struct bpf_prog_aux *aux = env->prog->aux; in adjust_subprog_starts_after_remove()
20844 move = env->subprog_cnt + 1 - j; in adjust_subprog_starts_after_remove()
20846 memmove(env->subprog_info + i, in adjust_subprog_starts_after_remove()
20847 env->subprog_info + j, in adjust_subprog_starts_after_remove()
20848 sizeof(*env->subprog_info) * move); in adjust_subprog_starts_after_remove()
20849 env->subprog_cnt -= j - i; in adjust_subprog_starts_after_remove()
20852 if (aux->func_info) { in adjust_subprog_starts_after_remove()
20853 move = aux->func_info_cnt - j; in adjust_subprog_starts_after_remove()
20855 memmove(aux->func_info + i, in adjust_subprog_starts_after_remove()
20856 aux->func_info + j, in adjust_subprog_starts_after_remove()
20857 sizeof(*aux->func_info) * move); in adjust_subprog_starts_after_remove()
20858 aux->func_info_cnt -= j - i; in adjust_subprog_starts_after_remove()
20859 /* func_info->insn_off is set after all code rewrites, in adjust_subprog_starts_after_remove()
20860 * in adjust_btf_func() - no need to adjust in adjust_subprog_starts_after_remove()
20865 if (env->subprog_info[i].start == off) in adjust_subprog_starts_after_remove()
20870 for (; i <= env->subprog_cnt; i++) in adjust_subprog_starts_after_remove()
20871 env->subprog_info[i].start -= cnt; in adjust_subprog_starts_after_remove()
20879 struct bpf_prog *prog = env->prog; in bpf_adj_linfo_after_remove()
20883 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
20887 linfo = prog->aux->linfo; in bpf_adj_linfo_after_remove()
20903 * last removed linfo. prog is already modified, so prog->len == off in bpf_adj_linfo_after_remove()
20906 if (prog->len != off && l_cnt && in bpf_adj_linfo_after_remove()
20908 l_cnt--; in bpf_adj_linfo_after_remove()
20909 linfo[--i].insn_off = off + cnt; in bpf_adj_linfo_after_remove()
20915 sizeof(*linfo) * (nr_linfo - i)); in bpf_adj_linfo_after_remove()
20917 prog->aux->nr_linfo -= l_cnt; in bpf_adj_linfo_after_remove()
20918 nr_linfo = prog->aux->nr_linfo; in bpf_adj_linfo_after_remove()
20923 linfo[i].insn_off -= cnt; in bpf_adj_linfo_after_remove()
20926 for (i = 0; i <= env->subprog_cnt; i++) in bpf_adj_linfo_after_remove()
20927 if (env->subprog_info[i].linfo_idx > l_off) { in bpf_adj_linfo_after_remove()
20931 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) in bpf_adj_linfo_after_remove()
20932 env->subprog_info[i].linfo_idx -= l_cnt; in bpf_adj_linfo_after_remove()
20934 env->subprog_info[i].linfo_idx = l_off; in bpf_adj_linfo_after_remove()
20942 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in verifier_remove_insns()
20943 unsigned int orig_prog_len = env->prog->len; in verifier_remove_insns()
20946 if (bpf_prog_is_offloaded(env->prog->aux)) in verifier_remove_insns()
20949 err = bpf_remove_insns(env->prog, off, cnt); in verifier_remove_insns()
20962 sizeof(*aux_data) * (orig_prog_len - off - cnt)); in verifier_remove_insns()
20969 * have dead code too. Therefore replace all dead at-run-time code
20970 * with 'ja -1'.
20980 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in sanitize_dead_code()
20981 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); in sanitize_dead_code()
20982 struct bpf_insn *insn = env->prog->insnsi; in sanitize_dead_code()
20983 const int insn_cnt = env->prog->len; in sanitize_dead_code()
21010 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_hard_wire_dead_code_branches()
21012 struct bpf_insn *insn = env->prog->insnsi; in opt_hard_wire_dead_code_branches()
21013 const int insn_cnt = env->prog->len; in opt_hard_wire_dead_code_branches()
21017 if (!insn_is_cond_jump(insn->code)) in opt_hard_wire_dead_code_branches()
21021 ja.off = insn->off; in opt_hard_wire_dead_code_branches()
21022 else if (!aux_data[i + 1 + insn->off].seen) in opt_hard_wire_dead_code_branches()
21027 if (bpf_prog_is_offloaded(env->prog->aux)) in opt_hard_wire_dead_code_branches()
21036 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; in opt_remove_dead_code()
21037 int insn_cnt = env->prog->len; in opt_remove_dead_code()
21052 insn_cnt = env->prog->len; in opt_remove_dead_code()
21063 struct bpf_insn *insn = env->prog->insnsi; in opt_remove_nops()
21064 int insn_cnt = env->prog->len; in opt_remove_nops()
21078 insn_cnt--; in opt_remove_nops()
21080 i -= (is_may_goto_0 && i > 0) ? 2 : 1; in opt_remove_nops()
21090 /* use env->insn_buf as two independent buffers */ in opt_subreg_zext_lo32_rnd_hi32()
21091 struct bpf_insn *zext_patch = env->insn_buf; in opt_subreg_zext_lo32_rnd_hi32()
21092 struct bpf_insn *rnd_hi32_patch = &env->insn_buf[2]; in opt_subreg_zext_lo32_rnd_hi32()
21093 struct bpf_insn_aux_data *aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
21094 int i, patch_len, delta = 0, len = env->prog->len; in opt_subreg_zext_lo32_rnd_hi32()
21095 struct bpf_insn *insns = env->prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
21099 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; in opt_subreg_zext_lo32_rnd_hi32()
21120 if (load_reg == -1) in opt_subreg_zext_lo32_rnd_hi32()
21148 /* Add in an zero-extend instruction if a) the JIT has requested in opt_subreg_zext_lo32_rnd_hi32()
21152 * R0, therefore always zero-extends. However some archs' in opt_subreg_zext_lo32_rnd_hi32()
21155 * orthogonal to the general zero-extension behaviour of the in opt_subreg_zext_lo32_rnd_hi32()
21161 /* Zero-extension is done by the caller. */ in opt_subreg_zext_lo32_rnd_hi32()
21165 if (verifier_bug_if(load_reg == -1, env, in opt_subreg_zext_lo32_rnd_hi32()
21167 return -EFAULT; in opt_subreg_zext_lo32_rnd_hi32()
21177 return -ENOMEM; in opt_subreg_zext_lo32_rnd_hi32()
21178 env->prog = new_prog; in opt_subreg_zext_lo32_rnd_hi32()
21179 insns = new_prog->insnsi; in opt_subreg_zext_lo32_rnd_hi32()
21180 aux = env->insn_aux_data; in opt_subreg_zext_lo32_rnd_hi32()
21181 delta += patch_len - 1; in opt_subreg_zext_lo32_rnd_hi32()
21189 * struct __sk_buff -> struct sk_buff
21190 * struct bpf_sock_ops -> struct sock
21194 struct bpf_subprog_info *subprogs = env->subprog_info; in convert_ctx_accesses()
21195 const struct bpf_verifier_ops *ops = env->ops; in convert_ctx_accesses()
21197 const int insn_cnt = env->prog->len; in convert_ctx_accesses()
21198 struct bpf_insn *epilogue_buf = env->epilogue_buf; in convert_ctx_accesses()
21199 struct bpf_insn *insn_buf = env->insn_buf; in convert_ctx_accesses()
21207 if (ops->gen_epilogue) { in convert_ctx_accesses()
21208 epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog, in convert_ctx_accesses()
21209 -(subprogs[0].stack_depth + 8)); in convert_ctx_accesses()
21212 return -EFAULT; in convert_ctx_accesses()
21218 -subprogs[0].stack_depth); in convert_ctx_accesses()
21219 insn_buf[cnt++] = env->prog->insnsi[0]; in convert_ctx_accesses()
21222 return -ENOMEM; in convert_ctx_accesses()
21223 env->prog = new_prog; in convert_ctx_accesses()
21224 delta += cnt - 1; in convert_ctx_accesses()
21226 ret = add_kfunc_in_insns(env, epilogue_buf, epilogue_cnt - 1); in convert_ctx_accesses()
21232 if (ops->gen_prologue || env->seen_direct_write) { in convert_ctx_accesses()
21233 if (!ops->gen_prologue) { in convert_ctx_accesses()
21235 return -EFAULT; in convert_ctx_accesses()
21237 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, in convert_ctx_accesses()
21238 env->prog); in convert_ctx_accesses()
21241 return -EFAULT; in convert_ctx_accesses()
21245 return -ENOMEM; in convert_ctx_accesses()
21247 env->prog = new_prog; in convert_ctx_accesses()
21248 delta += cnt - 1; in convert_ctx_accesses()
21250 ret = add_kfunc_in_insns(env, insn_buf, cnt - 1); in convert_ctx_accesses()
21257 WARN_ON(adjust_jmp_off(env->prog, 0, delta)); in convert_ctx_accesses()
21259 if (bpf_prog_is_offloaded(env->prog->aux)) in convert_ctx_accesses()
21262 insn = env->prog->insnsi + delta; in convert_ctx_accesses()
21268 if (env->insn_aux_data[i + delta].nospec) { in convert_ctx_accesses()
21269 WARN_ON_ONCE(env->insn_aux_data[i + delta].alu_state); in convert_ctx_accesses()
21274 cnt = patch - insn_buf; in convert_ctx_accesses()
21277 return -ENOMEM; in convert_ctx_accesses()
21279 delta += cnt - 1; in convert_ctx_accesses()
21280 env->prog = new_prog; in convert_ctx_accesses()
21281 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
21283 * nospec_result-case, because an insn may require a in convert_ctx_accesses()
21290 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
21291 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
21292 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
21293 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
21294 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || in convert_ctx_accesses()
21295 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || in convert_ctx_accesses()
21296 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { in convert_ctx_accesses()
21298 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || in convert_ctx_accesses()
21299 insn->code == (BPF_STX | BPF_MEM | BPF_H) || in convert_ctx_accesses()
21300 insn->code == (BPF_STX | BPF_MEM | BPF_W) || in convert_ctx_accesses()
21301 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || in convert_ctx_accesses()
21302 insn->code == (BPF_ST | BPF_MEM | BPF_B) || in convert_ctx_accesses()
21303 insn->code == (BPF_ST | BPF_MEM | BPF_H) || in convert_ctx_accesses()
21304 insn->code == (BPF_ST | BPF_MEM | BPF_W) || in convert_ctx_accesses()
21305 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { in convert_ctx_accesses()
21307 } else if ((insn->code == (BPF_STX | BPF_ATOMIC | BPF_B) || in convert_ctx_accesses()
21308 insn->code == (BPF_STX | BPF_ATOMIC | BPF_H) || in convert_ctx_accesses()
21309 insn->code == (BPF_STX | BPF_ATOMIC | BPF_W) || in convert_ctx_accesses()
21310 insn->code == (BPF_STX | BPF_ATOMIC | BPF_DW)) && in convert_ctx_accesses()
21311 env->insn_aux_data[i + delta].ptr_type == PTR_TO_ARENA) { in convert_ctx_accesses()
21312 insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code); in convert_ctx_accesses()
21313 env->prog->aux->num_exentries++; in convert_ctx_accesses()
21315 } else if (insn->code == (BPF_JMP | BPF_EXIT) && in convert_ctx_accesses()
21321 insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1); in convert_ctx_accesses()
21339 env->insn_aux_data[i + delta].nospec_result) { in convert_ctx_accesses()
21341 * to limit verification-time for Spectre v1. in convert_ctx_accesses()
21347 cnt = patch - insn_buf; in convert_ctx_accesses()
21350 return -ENOMEM; in convert_ctx_accesses()
21352 delta += cnt - 1; in convert_ctx_accesses()
21353 env->prog = new_prog; in convert_ctx_accesses()
21354 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
21358 switch ((int)env->insn_aux_data[i + delta].ptr_type) { in convert_ctx_accesses()
21360 if (!ops->convert_ctx_access) in convert_ctx_accesses()
21362 convert_ctx_access = ops->convert_ctx_access; in convert_ctx_accesses()
21385 if (BPF_MODE(insn->code) == BPF_MEM) in convert_ctx_accesses()
21386 insn->code = BPF_LDX | BPF_PROBE_MEM | in convert_ctx_accesses()
21387 BPF_SIZE((insn)->code); in convert_ctx_accesses()
21389 insn->code = BPF_LDX | BPF_PROBE_MEMSX | in convert_ctx_accesses()
21390 BPF_SIZE((insn)->code); in convert_ctx_accesses()
21391 env->prog->aux->num_exentries++; in convert_ctx_accesses()
21395 if (BPF_MODE(insn->code) == BPF_MEMSX) { in convert_ctx_accesses()
21398 return -EOPNOTSUPP; in convert_ctx_accesses()
21400 insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32SX | BPF_SIZE(insn->code); in convert_ctx_accesses()
21402 insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code); in convert_ctx_accesses()
21404 env->prog->aux->num_exentries++; in convert_ctx_accesses()
21410 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; in convert_ctx_accesses()
21412 mode = BPF_MODE(insn->code); in convert_ctx_accesses()
21415 * convert to a 4/8-byte load, to minimum program type specific in convert_ctx_accesses()
21421 off = insn->off; in convert_ctx_accesses()
21427 return -EFAULT; in convert_ctx_accesses()
21436 insn->off = off & ~(size_default - 1); in convert_ctx_accesses()
21437 insn->code = BPF_LDX | BPF_MEM | size_code; in convert_ctx_accesses()
21441 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, in convert_ctx_accesses()
21446 return -EFAULT; in convert_ctx_accesses()
21454 return -EFAULT; in convert_ctx_accesses()
21459 insn->dst_reg, in convert_ctx_accesses()
21461 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
21462 (1 << size * 8) - 1); in convert_ctx_accesses()
21466 insn->dst_reg, in convert_ctx_accesses()
21468 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, in convert_ctx_accesses()
21469 (1ULL << size * 8) - 1); in convert_ctx_accesses()
21474 insn->dst_reg, insn->dst_reg, in convert_ctx_accesses()
21480 return -ENOMEM; in convert_ctx_accesses()
21482 delta += cnt - 1; in convert_ctx_accesses()
21485 env->prog = new_prog; in convert_ctx_accesses()
21486 insn = new_prog->insnsi + i + delta; in convert_ctx_accesses()
21494 struct bpf_prog *prog = env->prog, **func, *tmp; in jit_subprogs()
21501 if (env->subprog_cnt <= 1) in jit_subprogs()
21504 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
21509 * need a hard reject of the program. Thus -EFAULT is in jit_subprogs()
21512 subprog = find_subprog(env, i + insn->imm + 1); in jit_subprogs()
21514 i + insn->imm + 1)) in jit_subprogs()
21515 return -EFAULT; in jit_subprogs()
21519 insn->off = subprog; in jit_subprogs()
21523 env->insn_aux_data[i].call_imm = insn->imm; in jit_subprogs()
21525 insn->imm = 1; in jit_subprogs()
21545 err = -ENOMEM; in jit_subprogs()
21546 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); in jit_subprogs()
21550 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
21552 subprog_end = env->subprog_info[i + 1].start; in jit_subprogs()
21554 len = subprog_end - subprog_start; in jit_subprogs()
21558 * func[i]->stats will never be accessed and stays NULL in jit_subprogs()
21563 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], in jit_subprogs()
21565 func[i]->type = prog->type; in jit_subprogs()
21566 func[i]->len = len; in jit_subprogs()
21569 func[i]->is_func = 1; in jit_subprogs()
21570 func[i]->sleepable = prog->sleepable; in jit_subprogs()
21571 func[i]->aux->func_idx = i; in jit_subprogs()
21572 /* Below members will be freed only at prog->aux */ in jit_subprogs()
21573 func[i]->aux->btf = prog->aux->btf; in jit_subprogs()
21574 func[i]->aux->func_info = prog->aux->func_info; in jit_subprogs()
21575 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; in jit_subprogs()
21576 func[i]->aux->poke_tab = prog->aux->poke_tab; in jit_subprogs()
21577 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; in jit_subprogs()
21578 func[i]->aux->main_prog_aux = prog->aux; in jit_subprogs()
21580 for (j = 0; j < prog->aux->size_poke_tab; j++) { in jit_subprogs()
21583 poke = &prog->aux->poke_tab[j]; in jit_subprogs()
21584 if (poke->insn_idx < subprog_end && in jit_subprogs()
21585 poke->insn_idx >= subprog_start) in jit_subprogs()
21586 poke->aux = func[i]->aux; in jit_subprogs()
21589 func[i]->aux->name[0] = 'F'; in jit_subprogs()
21590 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; in jit_subprogs()
21591 if (env->subprog_info[i].priv_stack_mode == PRIV_STACK_ADAPTIVE) in jit_subprogs()
21592 func[i]->aux->jits_use_priv_stack = true; in jit_subprogs()
21594 func[i]->jit_requested = 1; in jit_subprogs()
21595 func[i]->blinding_requested = prog->blinding_requested; in jit_subprogs()
21596 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; in jit_subprogs()
21597 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; in jit_subprogs()
21598 func[i]->aux->linfo = prog->aux->linfo; in jit_subprogs()
21599 func[i]->aux->nr_linfo = prog->aux->nr_linfo; in jit_subprogs()
21600 func[i]->aux->jited_linfo = prog->aux->jited_linfo; in jit_subprogs()
21601 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; in jit_subprogs()
21602 func[i]->aux->arena = prog->aux->arena; in jit_subprogs()
21604 insn = func[i]->insnsi; in jit_subprogs()
21605 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
21606 if (BPF_CLASS(insn->code) == BPF_LDX && in jit_subprogs()
21607 (BPF_MODE(insn->code) == BPF_PROBE_MEM || in jit_subprogs()
21608 BPF_MODE(insn->code) == BPF_PROBE_MEM32 || in jit_subprogs()
21609 BPF_MODE(insn->code) == BPF_PROBE_MEM32SX || in jit_subprogs()
21610 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) in jit_subprogs()
21612 if ((BPF_CLASS(insn->code) == BPF_STX || in jit_subprogs()
21613 BPF_CLASS(insn->code) == BPF_ST) && in jit_subprogs()
21614 BPF_MODE(insn->code) == BPF_PROBE_MEM32) in jit_subprogs()
21616 if (BPF_CLASS(insn->code) == BPF_STX && in jit_subprogs()
21617 BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) in jit_subprogs()
21620 func[i]->aux->num_exentries = num_exentries; in jit_subprogs()
21621 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; in jit_subprogs()
21622 func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; in jit_subprogs()
21623 func[i]->aux->changes_pkt_data = env->subprog_info[i].changes_pkt_data; in jit_subprogs()
21624 func[i]->aux->might_sleep = env->subprog_info[i].might_sleep; in jit_subprogs()
21626 func[i]->aux->exception_boundary = env->seen_exception; in jit_subprogs()
21628 if (!func[i]->jited) { in jit_subprogs()
21629 err = -ENOTSUPP; in jit_subprogs()
21639 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
21640 insn = func[i]->insnsi; in jit_subprogs()
21641 for (j = 0; j < func[i]->len; j++, insn++) { in jit_subprogs()
21643 subprog = insn->off; in jit_subprogs()
21644 insn[0].imm = (u32)(long)func[subprog]->bpf_func; in jit_subprogs()
21645 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; in jit_subprogs()
21650 subprog = insn->off; in jit_subprogs()
21651 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); in jit_subprogs()
21665 func[i]->aux->func = func; in jit_subprogs()
21666 func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
21667 func[i]->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
21669 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
21670 old_bpf_func = func[i]->bpf_func; in jit_subprogs()
21672 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { in jit_subprogs()
21673 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); in jit_subprogs()
21674 err = -ENOTSUPP; in jit_subprogs()
21684 for (i = 1; i < env->subprog_cnt; i++) { in jit_subprogs()
21690 for (i = 1; i < env->subprog_cnt; i++) in jit_subprogs()
21697 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
21699 insn[0].imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
21700 insn[1].imm = insn->off; in jit_subprogs()
21701 insn->off = 0; in jit_subprogs()
21706 insn->off = env->insn_aux_data[i].call_imm; in jit_subprogs()
21707 subprog = find_subprog(env, i + insn->off + 1); in jit_subprogs()
21708 insn->imm = subprog; in jit_subprogs()
21711 prog->jited = 1; in jit_subprogs()
21712 prog->bpf_func = func[0]->bpf_func; in jit_subprogs()
21713 prog->jited_len = func[0]->jited_len; in jit_subprogs()
21714 prog->aux->extable = func[0]->aux->extable; in jit_subprogs()
21715 prog->aux->num_exentries = func[0]->aux->num_exentries; in jit_subprogs()
21716 prog->aux->func = func; in jit_subprogs()
21717 prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; in jit_subprogs()
21718 prog->aux->real_func_cnt = env->subprog_cnt; in jit_subprogs()
21719 prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; in jit_subprogs()
21720 prog->aux->exception_boundary = func[0]->aux->exception_boundary; in jit_subprogs()
21728 for (i = 0; i < prog->aux->size_poke_tab; i++) { in jit_subprogs()
21729 map_ptr = prog->aux->poke_tab[i].tail_call.map; in jit_subprogs()
21730 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); in jit_subprogs()
21736 for (i = 0; i < env->subprog_cnt; i++) { in jit_subprogs()
21739 func[i]->aux->poke_tab = NULL; in jit_subprogs()
21745 prog->jit_requested = 0; in jit_subprogs()
21746 prog->blinding_requested = 0; in jit_subprogs()
21747 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { in jit_subprogs()
21750 insn->off = 0; in jit_subprogs()
21751 insn->imm = env->insn_aux_data[i].call_imm; in jit_subprogs()
21760 struct bpf_prog *prog = env->prog; in fixup_call_args()
21761 struct bpf_insn *insn = prog->insnsi; in fixup_call_args()
21767 if (env->prog->jit_requested && in fixup_call_args()
21768 !bpf_prog_is_offloaded(env->prog->aux)) { in fixup_call_args()
21772 if (err == -EFAULT) in fixup_call_args()
21777 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); in fixup_call_args()
21778 return -EINVAL; in fixup_call_args()
21780 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { in fixup_call_args()
21784 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); in fixup_call_args()
21785 return -EINVAL; in fixup_call_args()
21787 for (i = 0; i < prog->len; i++, insn++) { in fixup_call_args()
21792 verbose(env, "callbacks are not allowed in non-JITed programs\n"); in fixup_call_args()
21793 return -EINVAL; in fixup_call_args()
21812 struct bpf_prog *prog = env->prog; in specialize_kfunc()
21830 seen_direct_write = env->seen_direct_write; in specialize_kfunc()
21836 /* restore env->seen_direct_write to its original value, since in specialize_kfunc()
21839 env->seen_direct_write = seen_direct_write; in specialize_kfunc()
21858 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; in __fixup_collection_insert_kfunc()
21863 insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); in __fixup_collection_insert_kfunc()
21873 if (!insn->imm) { in fixup_kfunc_call()
21875 return -EINVAL; in fixup_kfunc_call()
21880 /* insn->imm has the btf func_id. Replace it with an offset relative to in fixup_kfunc_call()
21884 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); in fixup_kfunc_call()
21887 insn->imm); in fixup_kfunc_call()
21888 return -EFAULT; in fixup_kfunc_call()
21892 insn->imm = BPF_CALL_IMM(desc->addr); in fixup_kfunc_call()
21893 if (insn->off) in fixup_kfunc_call()
21895 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || in fixup_kfunc_call()
21896 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { in fixup_kfunc_call()
21897 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
21899 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; in fixup_kfunc_call()
21901 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) { in fixup_kfunc_call()
21904 return -EFAULT; in fixup_kfunc_call()
21912 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || in fixup_kfunc_call()
21913 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] || in fixup_kfunc_call()
21914 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { in fixup_kfunc_call()
21915 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
21918 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) { in fixup_kfunc_call()
21921 return -EFAULT; in fixup_kfunc_call()
21924 if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && in fixup_kfunc_call()
21928 return -EFAULT; in fixup_kfunc_call()
21935 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || in fixup_kfunc_call()
21936 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || in fixup_kfunc_call()
21937 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in fixup_kfunc_call()
21938 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; in fixup_kfunc_call()
21942 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ in fixup_kfunc_call()
21943 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { in fixup_kfunc_call()
21951 return -EFAULT; in fixup_kfunc_call()
21954 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, in fixup_kfunc_call()
21956 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || in fixup_kfunc_call()
21957 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { in fixup_kfunc_call()
21962 if (env->insn_aux_data[insn_idx].arg_prog) { in fixup_kfunc_call()
21963 u32 regno = env->insn_aux_data[insn_idx].arg_prog; in fixup_kfunc_call()
21964 struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(regno, (long)env->prog->aux) }; in fixup_kfunc_call()
21975 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */
21978 struct bpf_subprog_info *info = env->subprog_info; in add_hidden_subprog()
21979 int cnt = env->subprog_cnt; in add_hidden_subprog()
21983 if (env->hidden_subprog_cnt) { in add_hidden_subprog()
21985 return -EFAULT; in add_hidden_subprog()
21989 * in bpf_patch_insn_data are no-ops. in add_hidden_subprog()
21991 prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); in add_hidden_subprog()
21993 return -ENOMEM; in add_hidden_subprog()
21994 env->prog = prog; in add_hidden_subprog()
21996 info[cnt].start = prog->len - len + 1; in add_hidden_subprog()
21997 env->subprog_cnt++; in add_hidden_subprog()
21998 env->hidden_subprog_cnt++; in add_hidden_subprog()
22002 /* Do various post-verification rewrites in a single program pass.
22007 struct bpf_prog *prog = env->prog; in do_misc_fixups()
22008 enum bpf_attach_type eatype = prog->expected_attach_type; in do_misc_fixups()
22010 struct bpf_insn *insn = prog->insnsi; in do_misc_fixups()
22012 const int insn_cnt = prog->len; in do_misc_fixups()
22015 struct bpf_insn *insn_buf = env->insn_buf; in do_misc_fixups()
22019 struct bpf_subprog_info *subprogs = env->subprog_info; in do_misc_fixups()
22023 if (env->seen_exception && !env->exception_callback_subprog) { in do_misc_fixups()
22026 *patch++ = env->prog->insnsi[insn_cnt - 1]; in do_misc_fixups()
22029 ret = add_hidden_subprog(env, insn_buf, patch - insn_buf); in do_misc_fixups()
22032 prog = env->prog; in do_misc_fixups()
22033 insn = prog->insnsi; in do_misc_fixups()
22035 env->exception_callback_subprog = env->subprog_cnt - 1; in do_misc_fixups()
22037 mark_subprog_exc_cb(env, env->exception_callback_subprog); in do_misc_fixups()
22041 if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) { in do_misc_fixups()
22042 if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) || in do_misc_fixups()
22043 (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) { in do_misc_fixups()
22044 /* convert to 32-bit mov that clears upper 32-bit */ in do_misc_fixups()
22045 insn->code = BPF_ALU | BPF_MOV | BPF_X; in do_misc_fixups()
22047 insn->off = 0; in do_misc_fixups()
22048 insn->imm = 0; in do_misc_fixups()
22053 if (env->insn_aux_data[i + delta].needs_zext) in do_misc_fixups()
22054 /* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */ in do_misc_fixups()
22055 insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code); in do_misc_fixups()
22057 /* Make sdiv/smod divide-by-minus-one exceptions impossible. */ in do_misc_fixups()
22058 if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) || in do_misc_fixups()
22059 insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) || in do_misc_fixups()
22060 insn->code == (BPF_ALU | BPF_MOD | BPF_K) || in do_misc_fixups()
22061 insn->code == (BPF_ALU | BPF_DIV | BPF_K)) && in do_misc_fixups()
22062 insn->off == 1 && insn->imm == -1) { in do_misc_fixups()
22063 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in do_misc_fixups()
22064 bool isdiv = BPF_OP(insn->code) == BPF_DIV; in do_misc_fixups()
22069 BPF_NEG | BPF_K, insn->dst_reg, in do_misc_fixups()
22072 *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0); in do_misc_fixups()
22074 cnt = patch - insn_buf; in do_misc_fixups()
22078 return -ENOMEM; in do_misc_fixups()
22080 delta += cnt - 1; in do_misc_fixups()
22081 env->prog = prog = new_prog; in do_misc_fixups()
22082 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22086 /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */ in do_misc_fixups()
22087 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || in do_misc_fixups()
22088 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || in do_misc_fixups()
22089 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || in do_misc_fixups()
22090 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { in do_misc_fixups()
22091 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; in do_misc_fixups()
22092 bool isdiv = BPF_OP(insn->code) == BPF_DIV; in do_misc_fixups()
22093 bool is_sdiv = isdiv && insn->off == 1; in do_misc_fixups()
22094 bool is_smod = !isdiv && insn->off == 1; in do_misc_fixups()
22098 /* [R,W]x sdiv 0 -> 0 in do_misc_fixups()
22099 * LLONG_MIN sdiv -1 -> LLONG_MIN in do_misc_fixups()
22100 * INT_MIN sdiv -1 -> INT_MIN in do_misc_fixups()
22102 *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); in do_misc_fixups()
22113 BPF_MOV | BPF_K, insn->dst_reg, in do_misc_fixups()
22115 /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ in do_misc_fixups()
22117 BPF_NEG | BPF_K, insn->dst_reg, in do_misc_fixups()
22121 cnt = patch - insn_buf; in do_misc_fixups()
22123 /* [R,W]x mod 0 -> [R,W]x */ in do_misc_fixups()
22124 /* [R,W]x mod -1 -> 0 */ in do_misc_fixups()
22125 *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); in do_misc_fixups()
22135 *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0); in do_misc_fixups()
22141 *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg); in do_misc_fixups()
22143 cnt = patch - insn_buf; in do_misc_fixups()
22145 /* [R,W]x div 0 -> 0 */ in do_misc_fixups()
22147 BPF_JNE | BPF_K, insn->src_reg, in do_misc_fixups()
22149 *patch++ = BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg); in do_misc_fixups()
22152 cnt = patch - insn_buf; in do_misc_fixups()
22154 /* [R,W]x mod 0 -> [R,W]x */ in do_misc_fixups()
22156 BPF_JEQ | BPF_K, insn->src_reg, in do_misc_fixups()
22162 *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg); in do_misc_fixups()
22164 cnt = patch - insn_buf; in do_misc_fixups()
22169 return -ENOMEM; in do_misc_fixups()
22171 delta += cnt - 1; in do_misc_fixups()
22172 env->prog = prog = new_prog; in do_misc_fixups()
22173 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22177 /* Make it impossible to de-reference a userspace address */ in do_misc_fixups()
22178 if (BPF_CLASS(insn->code) == BPF_LDX && in do_misc_fixups()
22179 (BPF_MODE(insn->code) == BPF_PROBE_MEM || in do_misc_fixups()
22180 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { in do_misc_fixups()
22187 *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); in do_misc_fixups()
22188 if (insn->off) in do_misc_fixups()
22189 *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off); in do_misc_fixups()
22194 *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0); in do_misc_fixups()
22196 cnt = patch - insn_buf; in do_misc_fixups()
22199 return -ENOMEM; in do_misc_fixups()
22201 delta += cnt - 1; in do_misc_fixups()
22202 env->prog = prog = new_prog; in do_misc_fixups()
22203 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22208 if (BPF_CLASS(insn->code) == BPF_LD && in do_misc_fixups()
22209 (BPF_MODE(insn->code) == BPF_ABS || in do_misc_fixups()
22210 BPF_MODE(insn->code) == BPF_IND)) { in do_misc_fixups()
22211 cnt = env->ops->gen_ld_abs(insn, insn_buf); in do_misc_fixups()
22214 return -EFAULT; in do_misc_fixups()
22219 return -ENOMEM; in do_misc_fixups()
22221 delta += cnt - 1; in do_misc_fixups()
22222 env->prog = prog = new_prog; in do_misc_fixups()
22223 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22228 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || in do_misc_fixups()
22229 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { in do_misc_fixups()
22236 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
22237 if (!aux->alu_state || in do_misc_fixups()
22238 aux->alu_state == BPF_ALU_NON_POINTER) in do_misc_fixups()
22241 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; in do_misc_fixups()
22242 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == in do_misc_fixups()
22244 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; in do_misc_fixups()
22246 off_reg = issrc ? insn->src_reg : insn->dst_reg; in do_misc_fixups()
22248 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); in do_misc_fixups()
22251 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in do_misc_fixups()
22252 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); in do_misc_fixups()
22260 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); in do_misc_fixups()
22261 insn->src_reg = BPF_REG_AX; in do_misc_fixups()
22263 insn->code = insn->code == code_add ? in do_misc_fixups()
22267 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); in do_misc_fixups()
22268 cnt = patch - insn_buf; in do_misc_fixups()
22272 return -ENOMEM; in do_misc_fixups()
22274 delta += cnt - 1; in do_misc_fixups()
22275 env->prog = prog = new_prog; in do_misc_fixups()
22276 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22281 int stack_off_cnt = -stack_depth - 16; in do_misc_fixups()
22284 * Two 8 byte slots, depth-16 stores the count, and in do_misc_fixups()
22285 * depth-8 stores the start timestamp of the loop. in do_misc_fixups()
22299 if (insn->off >= 0) in do_misc_fixups()
22300 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 5); in do_misc_fixups()
22302 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1); in do_misc_fixups()
22317 return -ENOMEM; in do_misc_fixups()
22319 delta += cnt - 1; in do_misc_fixups()
22320 env->prog = prog = new_prog; in do_misc_fixups()
22321 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22324 int stack_off = -stack_depth - 8; in do_misc_fixups()
22328 if (insn->off >= 0) in do_misc_fixups()
22329 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2); in do_misc_fixups()
22331 insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1); in do_misc_fixups()
22338 return -ENOMEM; in do_misc_fixups()
22340 delta += cnt - 1; in do_misc_fixups()
22341 env->prog = prog = new_prog; in do_misc_fixups()
22342 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22346 if (insn->code != (BPF_JMP | BPF_CALL)) in do_misc_fixups()
22348 if (insn->src_reg == BPF_PSEUDO_CALL) in do_misc_fixups()
22350 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { in do_misc_fixups()
22359 return -ENOMEM; in do_misc_fixups()
22361 delta += cnt - 1; in do_misc_fixups()
22362 env->prog = prog = new_prog; in do_misc_fixups()
22363 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22368 if (bpf_jit_inlines_helper_call(insn->imm)) in do_misc_fixups()
22371 if (insn->imm == BPF_FUNC_get_route_realm) in do_misc_fixups()
22372 prog->dst_needed = 1; in do_misc_fixups()
22373 if (insn->imm == BPF_FUNC_get_prandom_u32) in do_misc_fixups()
22375 if (insn->imm == BPF_FUNC_override_return) in do_misc_fixups()
22376 prog->kprobe_override = 1; in do_misc_fixups()
22377 if (insn->imm == BPF_FUNC_tail_call) { in do_misc_fixups()
22383 prog->cb_access = 1; in do_misc_fixups()
22385 prog->aux->stack_depth = MAX_BPF_STACK; in do_misc_fixups()
22386 prog->aux->max_pkt_offset = MAX_PACKET_OFF; in do_misc_fixups()
22393 insn->imm = 0; in do_misc_fixups()
22394 insn->code = BPF_JMP | BPF_TAIL_CALL; in do_misc_fixups()
22396 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
22397 if (env->bpf_capable && !prog->blinding_requested && in do_misc_fixups()
22398 prog->jit_requested && in do_misc_fixups()
22404 .tail_call.map = aux->map_ptr_state.map_ptr, in do_misc_fixups()
22415 insn->imm = ret + 1; in do_misc_fixups()
22425 * index &= array->index_mask; in do_misc_fixups()
22426 * to avoid out-of-bounds cpu speculation in do_misc_fixups()
22430 return -EINVAL; in do_misc_fixups()
22433 map_ptr = aux->map_ptr_state.map_ptr; in do_misc_fixups()
22435 map_ptr->max_entries, 2); in do_misc_fixups()
22439 map)->index_mask); in do_misc_fixups()
22444 return -ENOMEM; in do_misc_fixups()
22446 delta += cnt - 1; in do_misc_fixups()
22447 env->prog = prog = new_prog; in do_misc_fixups()
22448 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22452 if (insn->imm == BPF_FUNC_timer_set_callback) { in do_misc_fixups()
22461 * Those that were not bpf_timer_init-ed will return -EINVAL. in do_misc_fixups()
22463 * Those that were not both bpf_timer_init-ed and in do_misc_fixups()
22464 * bpf_timer_set_callback-ed will return -EINVAL. in do_misc_fixups()
22467 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), in do_misc_fixups()
22477 return -ENOMEM; in do_misc_fixups()
22479 delta += cnt - 1; in do_misc_fixups()
22480 env->prog = prog = new_prog; in do_misc_fixups()
22481 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22485 if (is_storage_get_function(insn->imm)) { in do_misc_fixups()
22487 env->insn_aux_data[i + delta].storage_get_func_atomic) in do_misc_fixups()
22496 return -ENOMEM; in do_misc_fixups()
22498 delta += cnt - 1; in do_misc_fixups()
22499 env->prog = prog = new_prog; in do_misc_fixups()
22500 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22505 if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) { in do_misc_fixups()
22515 return -ENOMEM; in do_misc_fixups()
22517 delta += cnt - 1; in do_misc_fixups()
22518 env->prog = prog = new_prog; in do_misc_fixups()
22519 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22527 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
22528 (insn->imm == BPF_FUNC_map_lookup_elem || in do_misc_fixups()
22529 insn->imm == BPF_FUNC_map_update_elem || in do_misc_fixups()
22530 insn->imm == BPF_FUNC_map_delete_elem || in do_misc_fixups()
22531 insn->imm == BPF_FUNC_map_push_elem || in do_misc_fixups()
22532 insn->imm == BPF_FUNC_map_pop_elem || in do_misc_fixups()
22533 insn->imm == BPF_FUNC_map_peek_elem || in do_misc_fixups()
22534 insn->imm == BPF_FUNC_redirect_map || in do_misc_fixups()
22535 insn->imm == BPF_FUNC_for_each_map_elem || in do_misc_fixups()
22536 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { in do_misc_fixups()
22537 aux = &env->insn_aux_data[i + delta]; in do_misc_fixups()
22541 map_ptr = aux->map_ptr_state.map_ptr; in do_misc_fixups()
22542 ops = map_ptr->ops; in do_misc_fixups()
22543 if (insn->imm == BPF_FUNC_map_lookup_elem && in do_misc_fixups()
22544 ops->map_gen_lookup) { in do_misc_fixups()
22545 cnt = ops->map_gen_lookup(map_ptr, insn_buf); in do_misc_fixups()
22546 if (cnt == -EOPNOTSUPP) in do_misc_fixups()
22550 return -EFAULT; in do_misc_fixups()
22556 return -ENOMEM; in do_misc_fixups()
22558 delta += cnt - 1; in do_misc_fixups()
22559 env->prog = prog = new_prog; in do_misc_fixups()
22560 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22564 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, in do_misc_fixups()
22566 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, in do_misc_fixups()
22568 BUILD_BUG_ON(!__same_type(ops->map_update_elem, in do_misc_fixups()
22571 BUILD_BUG_ON(!__same_type(ops->map_push_elem, in do_misc_fixups()
22574 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, in do_misc_fixups()
22576 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, in do_misc_fixups()
22578 BUILD_BUG_ON(!__same_type(ops->map_redirect, in do_misc_fixups()
22580 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, in do_misc_fixups()
22585 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, in do_misc_fixups()
22589 switch (insn->imm) { in do_misc_fixups()
22591 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); in do_misc_fixups()
22594 insn->imm = BPF_CALL_IMM(ops->map_update_elem); in do_misc_fixups()
22597 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); in do_misc_fixups()
22600 insn->imm = BPF_CALL_IMM(ops->map_push_elem); in do_misc_fixups()
22603 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); in do_misc_fixups()
22606 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); in do_misc_fixups()
22609 insn->imm = BPF_CALL_IMM(ops->map_redirect); in do_misc_fixups()
22612 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); in do_misc_fixups()
22615 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); in do_misc_fixups()
22623 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
22624 insn->imm == BPF_FUNC_jiffies64) { in do_misc_fixups()
22639 return -ENOMEM; in do_misc_fixups()
22641 delta += cnt - 1; in do_misc_fixups()
22642 env->prog = prog = new_prog; in do_misc_fixups()
22643 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22649 if (insn->imm == BPF_FUNC_get_smp_processor_id && in do_misc_fixups()
22650 verifier_inlines_helper_call(env, insn->imm)) { in do_misc_fixups()
22667 return -ENOMEM; in do_misc_fixups()
22669 delta += cnt - 1; in do_misc_fixups()
22670 env->prog = prog = new_prog; in do_misc_fixups()
22671 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22677 insn->imm == BPF_FUNC_get_func_arg) { in do_misc_fixups()
22678 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
22679 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
22687 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); in do_misc_fixups()
22692 return -ENOMEM; in do_misc_fixups()
22694 delta += cnt - 1; in do_misc_fixups()
22695 env->prog = prog = new_prog; in do_misc_fixups()
22696 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22702 insn->imm == BPF_FUNC_get_func_ret) { in do_misc_fixups()
22705 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
22706 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
22714 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); in do_misc_fixups()
22720 return -ENOMEM; in do_misc_fixups()
22722 delta += cnt - 1; in do_misc_fixups()
22723 env->prog = prog = new_prog; in do_misc_fixups()
22724 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22730 insn->imm == BPF_FUNC_get_func_arg_cnt) { in do_misc_fixups()
22731 /* Load nr_args from ctx - 8 */ in do_misc_fixups()
22732 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); in do_misc_fixups()
22736 return -ENOMEM; in do_misc_fixups()
22738 env->prog = prog = new_prog; in do_misc_fixups()
22739 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22745 insn->imm == BPF_FUNC_get_func_ip) { in do_misc_fixups()
22746 /* Load IP address from ctx - 16 */ in do_misc_fixups()
22747 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); in do_misc_fixups()
22751 return -ENOMEM; in do_misc_fixups()
22753 env->prog = prog = new_prog; in do_misc_fixups()
22754 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22760 prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
22761 insn->imm == BPF_FUNC_get_branch_snapshot) { in do_misc_fixups()
22774 /* if (unlikely(flags)) return -EINVAL */ in do_misc_fixups()
22779 * divide-by-3 through multiplication, followed by further in do_misc_fixups()
22780 * division by 8 through 3-bit right shift. in do_misc_fixups()
22792 /* if (entry_cnt == 0) return -ENOENT */ in do_misc_fixups()
22797 /* return -EINVAL; */ in do_misc_fixups()
22798 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); in do_misc_fixups()
22800 /* return -ENOENT; */ in do_misc_fixups()
22801 insn_buf[10] = BPF_MOV64_IMM(BPF_REG_0, -ENOENT); in do_misc_fixups()
22806 return -ENOMEM; in do_misc_fixups()
22808 delta += cnt - 1; in do_misc_fixups()
22809 env->prog = prog = new_prog; in do_misc_fixups()
22810 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22815 if (prog->jit_requested && BITS_PER_LONG == 64 && in do_misc_fixups()
22816 insn->imm == BPF_FUNC_kptr_xchg && in do_misc_fixups()
22824 return -ENOMEM; in do_misc_fixups()
22826 delta += cnt - 1; in do_misc_fixups()
22827 env->prog = prog = new_prog; in do_misc_fixups()
22828 insn = new_prog->insnsi + i + delta; in do_misc_fixups()
22832 fn = env->ops->get_func_proto(insn->imm, env->prog); in do_misc_fixups()
22834 * programs to call them, must be real in-kernel functions in do_misc_fixups()
22836 if (!fn->func) { in do_misc_fixups()
22839 func_id_name(insn->imm), insn->imm); in do_misc_fixups()
22840 return -EFAULT; in do_misc_fixups()
22842 insn->imm = fn->func - __bpf_call_base; in do_misc_fixups()
22849 if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) { in do_misc_fixups()
22852 return -EINVAL; in do_misc_fixups()
22862 env->prog->aux->stack_depth = subprogs[0].stack_depth; in do_misc_fixups()
22863 for (i = 0; i < env->subprog_cnt; i++) { in do_misc_fixups()
22874 return -EFAULT; in do_misc_fixups()
22879 insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth, in do_misc_fixups()
22881 insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth + 8, 0); in do_misc_fixups()
22884 insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth, in do_misc_fixups()
22888 insn_buf[cnt++] = env->prog->insnsi[subprog_start]; in do_misc_fixups()
22892 return -ENOMEM; in do_misc_fixups()
22893 env->prog = prog = new_prog; in do_misc_fixups()
22900 WARN_ON(adjust_jmp_off(env->prog, subprog_start, delta)); in do_misc_fixups()
22904 for (i = 0; i < prog->aux->size_poke_tab; i++) { in do_misc_fixups()
22905 map_ptr = prog->aux->poke_tab[i].tail_call.map; in do_misc_fixups()
22906 if (!map_ptr->ops->map_poke_track || in do_misc_fixups()
22907 !map_ptr->ops->map_poke_untrack || in do_misc_fixups()
22908 !map_ptr->ops->map_poke_run) { in do_misc_fixups()
22910 return -EFAULT; in do_misc_fixups()
22913 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); in do_misc_fixups()
22920 sort_kfunc_descs_by_imm_off(env->prog); in do_misc_fixups()
22938 struct bpf_insn *insn_buf = env->insn_buf; in inline_bpf_loop()
22953 insn_buf[cnt++] = BPF_MOV32_IMM(BPF_REG_0, -E2BIG); in inline_bpf_loop()
22976 insn_buf[cnt++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6); in inline_bpf_loop()
22992 callback_start = env->subprog_info[callback_subprogno].start; in inline_bpf_loop()
22995 callback_offset = callback_start - call_insn_offset - 1; in inline_bpf_loop()
22996 new_prog->insnsi[call_insn_offset].imm = callback_offset; in inline_bpf_loop()
23003 return insn->code == (BPF_JMP | BPF_CALL) && in is_bpf_loop_call()
23004 insn->src_reg == 0 && in is_bpf_loop_call()
23005 insn->imm == BPF_FUNC_loop; in is_bpf_loop_call()
23008 /* For all sub-programs in the program (including main) check
23019 struct bpf_subprog_info *subprogs = env->subprog_info; in optimize_bpf_loop()
23021 struct bpf_insn *insn = env->prog->insnsi; in optimize_bpf_loop()
23022 int insn_cnt = env->prog->len; in optimize_bpf_loop()
23024 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; in optimize_bpf_loop()
23029 &env->insn_aux_data[i + delta].loop_inline_state; in optimize_bpf_loop()
23031 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { in optimize_bpf_loop()
23037 -(stack_depth + stack_depth_extra), in optimize_bpf_loop()
23038 inline_state->callback_subprogno, in optimize_bpf_loop()
23041 return -ENOMEM; in optimize_bpf_loop()
23043 delta += cnt - 1; in optimize_bpf_loop()
23044 env->prog = new_prog; in optimize_bpf_loop()
23045 insn = new_prog->insnsi + i + delta; in optimize_bpf_loop()
23052 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; in optimize_bpf_loop()
23057 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in optimize_bpf_loop()
23067 struct bpf_subprog_info *subprog = env->subprog_info; in remove_fastcall_spills_fills()
23068 struct bpf_insn_aux_data *aux = env->insn_aux_data; in remove_fastcall_spills_fills()
23069 struct bpf_insn *insn = env->prog->insnsi; in remove_fastcall_spills_fills()
23070 int insn_cnt = env->prog->len; in remove_fastcall_spills_fills()
23080 *(insn - j) = NOP; in remove_fastcall_spills_fills()
23085 if ((subprog + 1)->start == i + 1) { in remove_fastcall_spills_fills()
23086 if (modified && !subprog->keep_fastcall_stack) in remove_fastcall_spills_fills()
23087 subprog->stack_depth = -subprog->fastcall_stack_off; in remove_fastcall_spills_fills()
23103 free_verifier_state(env->cur_state, true); in free_states()
23104 env->cur_state = NULL; in free_states()
23107 list_for_each_safe(pos, tmp, &env->free_list) { in free_states()
23109 free_verifier_state(&sl->state, false); in free_states()
23112 INIT_LIST_HEAD(&env->free_list); in free_states()
23114 for (i = 0; i < env->scc_cnt; ++i) { in free_states()
23115 info = env->scc_info[i]; in free_states()
23118 for (j = 0; j < info->num_visits; j++) in free_states()
23119 free_backedges(&info->visits[j]); in free_states()
23121 env->scc_info[i] = NULL; in free_states()
23124 if (!env->explored_states) in free_states()
23128 head = &env->explored_states[i]; in free_states()
23132 free_verifier_state(&sl->state, false); in free_states()
23135 INIT_LIST_HEAD(&env->explored_states[i]); in free_states()
23141 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); in do_check_common()
23143 struct bpf_prog_aux *aux = env->prog->aux; in do_check_common()
23148 env->prev_linfo = NULL; in do_check_common()
23149 env->pass_cnt++; in do_check_common()
23153 return -ENOMEM; in do_check_common()
23154 state->curframe = 0; in do_check_common()
23155 state->speculative = false; in do_check_common()
23156 state->branches = 1; in do_check_common()
23157 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL_ACCOUNT); in do_check_common()
23158 if (!state->frame[0]) { in do_check_common()
23160 return -ENOMEM; in do_check_common()
23162 env->cur_state = state; in do_check_common()
23163 init_func_state(env, state->frame[0], in do_check_common()
23167 state->first_insn_idx = env->subprog_info[subprog].start; in do_check_common()
23168 state->last_insn_idx = -1; in do_check_common()
23170 regs = state->frame[state->curframe]->regs; in do_check_common()
23171 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { in do_check_common()
23182 state->frame[0]->in_exception_callback_fn = true; in do_check_common()
23187 if (sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_ANYTHING) { in do_check_common()
23189 ret = -EINVAL; in do_check_common()
23193 for (i = BPF_REG_1; i <= sub->arg_cnt; i++) { in do_check_common()
23194 arg = &sub->args[i - BPF_REG_1]; in do_check_common()
23197 if (arg->arg_type == ARG_PTR_TO_CTX) { in do_check_common()
23198 reg->type = PTR_TO_CTX; in do_check_common()
23200 } else if (arg->arg_type == ARG_ANYTHING) { in do_check_common()
23201 reg->type = SCALAR_VALUE; in do_check_common()
23203 } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { in do_check_common()
23205 __mark_dynptr_reg(reg, BPF_DYNPTR_TYPE_LOCAL, true, ++env->id_gen); in do_check_common()
23206 } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { in do_check_common()
23207 reg->type = PTR_TO_MEM; in do_check_common()
23208 reg->type |= arg->arg_type & in do_check_common()
23211 reg->mem_size = arg->mem_size; in do_check_common()
23212 if (arg->arg_type & PTR_MAYBE_NULL) in do_check_common()
23213 reg->id = ++env->id_gen; in do_check_common()
23214 } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { in do_check_common()
23215 reg->type = PTR_TO_BTF_ID; in do_check_common()
23216 if (arg->arg_type & PTR_MAYBE_NULL) in do_check_common()
23217 reg->type |= PTR_MAYBE_NULL; in do_check_common()
23218 if (arg->arg_type & PTR_UNTRUSTED) in do_check_common()
23219 reg->type |= PTR_UNTRUSTED; in do_check_common()
23220 if (arg->arg_type & PTR_TRUSTED) in do_check_common()
23221 reg->type |= PTR_TRUSTED; in do_check_common()
23223 reg->btf = bpf_get_btf_vmlinux(); /* can't fail at this point */ in do_check_common()
23224 reg->btf_id = arg->btf_id; in do_check_common()
23225 reg->id = ++env->id_gen; in do_check_common()
23226 } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) { in do_check_common()
23231 i - BPF_REG_1, arg->arg_type); in do_check_common()
23232 ret = -EFAULT; in do_check_common()
23241 if (env->prog->aux->func_info_aux) { in do_check_common()
23243 if (ret || sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_PTR_TO_CTX) in do_check_common()
23244 env->prog->aux->func_info_aux[0].unreliable = true; in do_check_common()
23253 if (!subprog && env->prog->type == BPF_PROG_TYPE_STRUCT_OPS) { in do_check_common()
23254 for (i = 0; i < aux->ctx_arg_info_size; i++) in do_check_common()
23255 aux->ctx_arg_info[i].ref_obj_id = aux->ctx_arg_info[i].refcounted ? in do_check_common()
23262 bpf_vlog_reset(&env->log, 0); in do_check_common()
23289 struct bpf_prog_aux *aux = env->prog->aux; in do_check_subprogs()
23293 if (!aux->func_info) in do_check_subprogs()
23297 if (env->exception_callback_subprog) in do_check_subprogs()
23298 subprog_aux(env, env->exception_callback_subprog)->called = true; in do_check_subprogs()
23302 for (i = 1; i < env->subprog_cnt; i++) { in do_check_subprogs()
23307 if (!sub_aux->called || sub_aux->verified) in do_check_subprogs()
23310 env->insn_idx = env->subprog_info[i].start; in do_check_subprogs()
23311 WARN_ON_ONCE(env->insn_idx == 0); in do_check_subprogs()
23315 } else if (env->log.level & BPF_LOG_LEVEL) { in do_check_subprogs()
23324 sub_aux->verified = true; in do_check_subprogs()
23341 env->insn_idx = 0; in do_check_main()
23344 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; in do_check_main()
23353 if (env->log.level & BPF_LOG_STATS) { in print_verification_stats()
23355 div_u64(env->verification_time, 1000)); in print_verification_stats()
23357 for (i = 0; i < env->subprog_cnt; i++) { in print_verification_stats()
23358 u32 depth = env->subprog_info[i].stack_depth; in print_verification_stats()
23361 if (i + 1 < env->subprog_cnt) in print_verification_stats()
23368 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, in print_verification_stats()
23369 env->max_states_per_insn, env->total_states, in print_verification_stats()
23370 env->peak_states, env->longest_mark_read_walk); in print_verification_stats()
23376 prog->aux->ctx_arg_info = kmemdup_array(info, cnt, sizeof(*info), GFP_KERNEL_ACCOUNT); in bpf_prog_ctx_arg_info_init()
23377 prog->aux->ctx_arg_info_size = cnt; in bpf_prog_ctx_arg_info_init()
23379 return prog->aux->ctx_arg_info ? 0 : -ENOMEM; in bpf_prog_ctx_arg_info_init()
23388 struct bpf_prog *prog = env->prog; in check_struct_ops_btf_id()
23395 if (!prog->gpl_compatible) { in check_struct_ops_btf_id()
23397 return -EINVAL; in check_struct_ops_btf_id()
23400 if (!prog->aux->attach_btf_id) in check_struct_ops_btf_id()
23401 return -ENOTSUPP; in check_struct_ops_btf_id()
23403 btf = prog->aux->attach_btf; in check_struct_ops_btf_id()
23406 env->attach_btf_mod = btf_try_get_module(btf); in check_struct_ops_btf_id()
23407 if (!env->attach_btf_mod) { in check_struct_ops_btf_id()
23410 return -ENOTSUPP; in check_struct_ops_btf_id()
23414 btf_id = prog->aux->attach_btf_id; in check_struct_ops_btf_id()
23419 return -ENOTSUPP; in check_struct_ops_btf_id()
23421 st_ops = st_ops_desc->st_ops; in check_struct_ops_btf_id()
23423 t = st_ops_desc->type; in check_struct_ops_btf_id()
23424 member_idx = prog->expected_attach_type; in check_struct_ops_btf_id()
23427 member_idx, st_ops->name); in check_struct_ops_btf_id()
23428 return -EINVAL; in check_struct_ops_btf_id()
23432 mname = btf_name_by_offset(btf, member->name_off); in check_struct_ops_btf_id()
23433 func_proto = btf_type_resolve_func_ptr(btf, member->type, in check_struct_ops_btf_id()
23437 mname, member_idx, st_ops->name); in check_struct_ops_btf_id()
23438 return -EINVAL; in check_struct_ops_btf_id()
23445 mname, st_ops->name); in check_struct_ops_btf_id()
23449 if (st_ops->check_member) { in check_struct_ops_btf_id()
23450 err = st_ops->check_member(t, member, prog); in check_struct_ops_btf_id()
23454 mname, st_ops->name); in check_struct_ops_btf_id()
23459 if (prog->aux->priv_stack_requested && !bpf_jit_supports_private_stack()) { in check_struct_ops_btf_id()
23461 return -EACCES; in check_struct_ops_btf_id()
23464 for (i = 0; i < st_ops_desc->arg_info[member_idx].cnt; i++) { in check_struct_ops_btf_id()
23465 if (st_ops_desc->arg_info[member_idx].info->refcounted) { in check_struct_ops_btf_id()
23474 for (i = 0; i < env->subprog_cnt; i++) { in check_struct_ops_btf_id()
23475 if (has_refcounted_arg && env->subprog_info[i].has_tail_call) { in check_struct_ops_btf_id()
23477 return -EINVAL; in check_struct_ops_btf_id()
23481 prog->aux->st_ops = st_ops; in check_struct_ops_btf_id()
23482 prog->aux->attach_st_ops_member_off = member_off; in check_struct_ops_btf_id()
23484 prog->aux->attach_func_proto = func_proto; in check_struct_ops_btf_id()
23485 prog->aux->attach_func_name = mname; in check_struct_ops_btf_id()
23486 env->ops = st_ops->verifier_ops; in check_struct_ops_btf_id()
23488 return bpf_prog_ctx_arg_info_init(prog, st_ops_desc->arg_info[member_idx].info, in check_struct_ops_btf_id()
23489 st_ops_desc->arg_info[member_idx].cnt); in check_struct_ops_btf_id()
23496 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) in check_attach_modify_return()
23499 return -EINVAL; in check_attach_modify_return()
23502 /* list of non-sleepable functions that are otherwise on
23506 /* Three functions below can be called from sleepable and non-sleepable context.
23507 * Assume non-sleepable from bpf safety point of view.
23529 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; in bpf_check_attach_target()
23530 bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING; in bpf_check_attach_target()
23534 int ret = 0, subprog = -1, i; in bpf_check_attach_target()
23544 return -EINVAL; in bpf_check_attach_target()
23546 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; in bpf_check_attach_target()
23550 return -EINVAL; in bpf_check_attach_target()
23555 return -EINVAL; in bpf_check_attach_target()
23557 tname = btf_name_by_offset(btf, t->name_off); in bpf_check_attach_target()
23560 return -EINVAL; in bpf_check_attach_target()
23563 struct bpf_prog_aux *aux = tgt_prog->aux; in bpf_check_attach_target()
23567 if (bpf_prog_is_dev_bound(prog->aux) && in bpf_check_attach_target()
23570 return -EINVAL; in bpf_check_attach_target()
23573 for (i = 0; i < aux->func_info_cnt; i++) in bpf_check_attach_target()
23574 if (aux->func_info[i].type_id == btf_id) { in bpf_check_attach_target()
23578 if (subprog == -1) { in bpf_check_attach_target()
23580 return -EINVAL; in bpf_check_attach_target()
23582 if (aux->func && aux->func[subprog]->aux->exception_cb) { in bpf_check_attach_target()
23586 return -EINVAL; in bpf_check_attach_target()
23588 conservative = aux->func_info_aux[subprog].unreliable; in bpf_check_attach_target()
23593 return -EINVAL; in bpf_check_attach_target()
23595 if (!prog->jit_requested) { in bpf_check_attach_target()
23598 return -EINVAL; in bpf_check_attach_target()
23600 tgt_changes_pkt_data = aux->func in bpf_check_attach_target()
23601 ? aux->func[subprog]->aux->changes_pkt_data in bpf_check_attach_target()
23602 : aux->changes_pkt_data; in bpf_check_attach_target()
23603 if (prog->aux->changes_pkt_data && !tgt_changes_pkt_data) { in bpf_check_attach_target()
23606 return -EINVAL; in bpf_check_attach_target()
23609 tgt_might_sleep = aux->func in bpf_check_attach_target()
23610 ? aux->func[subprog]->aux->might_sleep in bpf_check_attach_target()
23611 : aux->might_sleep; in bpf_check_attach_target()
23612 if (prog->aux->might_sleep && !tgt_might_sleep) { in bpf_check_attach_target()
23615 return -EINVAL; in bpf_check_attach_target()
23618 if (!tgt_prog->jited) { in bpf_check_attach_target()
23620 return -EINVAL; in bpf_check_attach_target()
23623 if (aux->attach_tracing_prog) { in bpf_check_attach_target()
23630 return -EINVAL; in bpf_check_attach_target()
23632 } else if (tgt_prog->type == prog->type) { in bpf_check_attach_target()
23639 return -EINVAL; in bpf_check_attach_target()
23641 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && in bpf_check_attach_target()
23643 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || in bpf_check_attach_target()
23644 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { in bpf_check_attach_target()
23655 * long call chain fentry->extension->fentry->extension in bpf_check_attach_target()
23660 return -EINVAL; in bpf_check_attach_target()
23665 return -EINVAL; in bpf_check_attach_target()
23669 switch (prog->expected_attach_type) { in bpf_check_attach_target()
23674 return -EINVAL; in bpf_check_attach_target()
23679 return -EINVAL; in bpf_check_attach_target()
23681 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { in bpf_check_attach_target()
23684 return -EINVAL; in bpf_check_attach_target()
23686 tname += sizeof(prefix) - 1; in bpf_check_attach_target()
23693 return -EINVAL; in bpf_check_attach_target()
23694 fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL, in bpf_check_attach_target()
23704 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
23707 return -EINVAL; in bpf_check_attach_target()
23712 return -EINVAL; in bpf_check_attach_target()
23715 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
23718 return -EINVAL; in bpf_check_attach_target()
23725 return -EINVAL; in bpf_check_attach_target()
23727 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
23729 return -EINVAL; in bpf_check_attach_target()
23730 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
23736 return -EINVAL; in bpf_check_attach_target()
23746 return -EINVAL; in bpf_check_attach_target()
23750 return -EINVAL; in bpf_check_attach_target()
23751 t = btf_type_by_id(btf, t->type); in bpf_check_attach_target()
23753 return -EINVAL; in bpf_check_attach_target()
23755 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && in bpf_check_attach_target()
23756 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || in bpf_check_attach_target()
23757 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) in bpf_check_attach_target()
23758 return -EINVAL; in bpf_check_attach_target()
23763 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); in bpf_check_attach_target()
23769 addr = (long) tgt_prog->bpf_func; in bpf_check_attach_target()
23771 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; in bpf_check_attach_target()
23787 return -ENOENT; in bpf_check_attach_target()
23791 if (prog->sleepable) { in bpf_check_attach_target()
23792 ret = -EINVAL; in bpf_check_attach_target()
23793 switch (prog->type) { in bpf_check_attach_target()
23828 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { in bpf_check_attach_target()
23832 return -EINVAL; in bpf_check_attach_target()
23834 ret = -EINVAL; in bpf_check_attach_target()
23847 tgt_info->tgt_addr = addr; in bpf_check_attach_target()
23848 tgt_info->tgt_name = tname; in bpf_check_attach_target()
23849 tgt_info->tgt_type = t; in bpf_check_attach_target()
23850 tgt_info->tgt_mod = mod; in bpf_check_attach_target()
23903 if (prog->type == BPF_PROG_TYPE_TRACING) { in BTF_SET_START()
23904 switch (prog->expected_attach_type) { in BTF_SET_START()
23914 return prog->type == BPF_PROG_TYPE_LSM || in BTF_SET_START()
23915 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || in BTF_SET_START()
23916 prog->type == BPF_PROG_TYPE_STRUCT_OPS; in BTF_SET_START()
23921 struct bpf_prog *prog = env->prog; in check_attach_btf_id()
23922 struct bpf_prog *tgt_prog = prog->aux->dst_prog; in check_attach_btf_id()
23924 u32 btf_id = prog->aux->attach_btf_id; in check_attach_btf_id()
23929 if (prog->type == BPF_PROG_TYPE_SYSCALL) { in check_attach_btf_id()
23930 if (prog->sleepable) in check_attach_btf_id()
23934 return -EINVAL; in check_attach_btf_id()
23937 if (prog->sleepable && !can_be_sleepable(prog)) { in check_attach_btf_id()
23939 return -EINVAL; in check_attach_btf_id()
23942 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) in check_attach_btf_id()
23945 if (prog->type != BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
23946 prog->type != BPF_PROG_TYPE_LSM && in check_attach_btf_id()
23947 prog->type != BPF_PROG_TYPE_EXT) in check_attach_btf_id()
23950 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); in check_attach_btf_id()
23954 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { in check_attach_btf_id()
23956 * inherit env->ops and expected_attach_type for the rest of the in check_attach_btf_id()
23959 env->ops = bpf_verifier_ops[tgt_prog->type]; in check_attach_btf_id()
23960 prog->expected_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
23964 prog->aux->attach_func_proto = tgt_info.tgt_type; in check_attach_btf_id()
23965 prog->aux->attach_func_name = tgt_info.tgt_name; in check_attach_btf_id()
23966 prog->aux->mod = tgt_info.tgt_mod; in check_attach_btf_id()
23969 prog->aux->saved_dst_prog_type = tgt_prog->type; in check_attach_btf_id()
23970 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; in check_attach_btf_id()
23973 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { in check_attach_btf_id()
23974 prog->aux->attach_btf_trace = true; in check_attach_btf_id()
23976 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { in check_attach_btf_id()
23980 if (prog->type == BPF_PROG_TYPE_LSM) { in check_attach_btf_id()
23981 ret = bpf_lsm_verify_prog(&env->log, prog); in check_attach_btf_id()
23984 } else if (prog->type == BPF_PROG_TYPE_TRACING && in check_attach_btf_id()
23988 return -EINVAL; in check_attach_btf_id()
23989 } else if ((prog->expected_attach_type == BPF_TRACE_FEXIT || in check_attach_btf_id()
23990 prog->expected_attach_type == BPF_MODIFY_RETURN) && in check_attach_btf_id()
23994 return -EINVAL; in check_attach_btf_id()
23997 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); in check_attach_btf_id()
24000 return -ENOMEM; in check_attach_btf_id()
24002 if (tgt_prog && tgt_prog->aux->tail_call_reachable) in check_attach_btf_id()
24003 tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; in check_attach_btf_id()
24005 prog->aux->dst_trampoline = tr; in check_attach_btf_id()
24021 * The add_fd_from_fd_array() is executed only if fd_array_cnt is non-zero. In
24059 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); in process_fd_array()
24066 if (!attr->fd_array_cnt) in process_fd_array()
24070 if (attr->fd_array_cnt >= (U32_MAX / size)) { in process_fd_array()
24071 verbose(env, "fd_array_cnt is too big (%u)\n", attr->fd_array_cnt); in process_fd_array()
24072 return -EINVAL; in process_fd_array()
24075 for (i = 0; i < attr->fd_array_cnt; i++) { in process_fd_array()
24076 if (copy_from_bpfptr_offset(&fd, env->fd_array, i * size, size)) in process_fd_array()
24077 return -EFAULT; in process_fd_array()
24096 #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
24098 /* Compute info->{use,def} fields for the instruction */
24104 u8 class = BPF_CLASS(insn->code); in compute_insn_live_regs()
24105 u8 code = BPF_OP(insn->code); in compute_insn_live_regs()
24106 u8 mode = BPF_MODE(insn->code); in compute_insn_live_regs()
24107 u16 src = BIT(insn->src_reg); in compute_insn_live_regs()
24108 u16 dst = BIT(insn->dst_reg); in compute_insn_live_regs()
24117 if (BPF_SIZE(insn->code) == BPF_DW) { in compute_insn_live_regs()
24152 switch (insn->imm) { in compute_insn_live_regs()
24167 if (insn->imm & BPF_FETCH) in compute_insn_live_regs()
24184 if (BPF_SRC(insn->code) == BPF_K) in compute_insn_live_regs()
24191 if (BPF_SRC(insn->code) == BPF_K) in compute_insn_live_regs()
24217 if (BPF_SRC(insn->code) == BPF_K) in compute_insn_live_regs()
24225 info->def = def; in compute_insn_live_regs()
24226 info->use = use; in compute_insn_live_regs()
24229 /* Compute may-live registers after each instruction in the program.
24234 * Store result in env->insn_aux_data[i].live_regs.
24238 struct bpf_insn_aux_data *insn_aux = env->insn_aux_data; in compute_live_registers()
24239 struct bpf_insn *insns = env->prog->insnsi; in compute_live_registers()
24241 int insn_cnt = env->prog->len; in compute_live_registers()
24246 * - define the following: in compute_live_registers()
24247 * - I.use : a set of all registers read by instruction I; in compute_live_registers()
24248 * - I.def : a set of all registers written by instruction I; in compute_live_registers()
24249 * - I.in : a set of all registers that may be alive before I execution; in compute_live_registers()
24250 * - I.out : a set of all registers that may be alive after I execution; in compute_live_registers()
24251 * - insn_successors(I): a set of instructions S that might immediately in compute_live_registers()
24253 * - associate separate empty sets 'I.in' and 'I.out' with each instruction; in compute_live_registers()
24254 * - visit each instruction in a postorder and update in compute_live_registers()
24261 * - repeat the computation while {in,out} fields changes for in compute_live_registers()
24266 err = -ENOMEM; in compute_live_registers()
24276 for (i = 0; i < env->cfg.cur_postorder; ++i) { in compute_live_registers()
24277 int insn_idx = env->cfg.insn_postorder[i]; in compute_live_registers()
24284 succ_num = bpf_insn_successors(env->prog, insn_idx, succ); in compute_live_registers()
24287 new_in = (new_out & ~live->def) | live->use; in compute_live_registers()
24288 if (new_out != live->out || new_in != live->in) { in compute_live_registers()
24289 live->in = new_in; in compute_live_registers()
24290 live->out = new_out; in compute_live_registers()
24299 if (env->log.level & BPF_LOG_LEVEL2) { in compute_live_registers()
24302 if (env->insn_aux_data[i].scc) in compute_live_registers()
24303 verbose(env, "%3d ", env->insn_aux_data[i].scc); in compute_live_registers()
24326 * Assign an SCC number to each instruction, recorded in env->insn_aux[*].scc.
24329 * Uses a non-recursive adaptation of Tarjan's algorithm for SCC computation.
24335 struct bpf_insn_aux_data *aux = env->insn_aux_data; in compute_scc()
24336 const u32 insn_cnt = env->prog->len; in compute_scc()
24348 * - 'stack' accumulates vertices in DFS order, see invariant comment below; in compute_scc()
24349 * - 'pre[t] == p' => preorder number of vertex 't' is 'p'; in compute_scc()
24350 * - 'low[t] == n' => smallest preorder number of the vertex reachable from 't' is 'n'; in compute_scc()
24351 * - 'dfs' DFS traversal stack, used to emulate explicit recursion. in compute_scc()
24358 err = -ENOMEM; in compute_scc()
24363 * [1] R. Tarjan "Depth-First Search and Linear Graph Algorithms" in compute_scc()
24364 * [2] D. J. Pearce "A Space-Efficient Algorithm for Finding Strongly Connected Components" in compute_scc()
24367 * - suppose there is a path 'u' ~> 'v', such that 'pre[v] < pre[u]'; in compute_scc()
24368 * - then, vertex 'u' remains on stack while vertex 'v' is on stack. in compute_scc()
24371 * - If 'low[v] < pre[v]', there is a path from 'v' to some vertex 'u', in compute_scc()
24374 * - If 'low[v] == pre[v]', loops containing 'v' have been explored, in compute_scc()
24377 * Here is a pseudo-code for an explicitly recursive version of the algorithm: in compute_scc()
24442 w = dfs[dfs_sz - 1]; in compute_scc()
24450 succ_cnt = bpf_insn_successors(env->prog, w, succ); in compute_scc()
24464 dfs_sz--; in compute_scc()
24471 assign_scc = stack[stack_sz - 1] != w; in compute_scc()
24480 t = stack[--stack_sz]; in compute_scc()
24487 dfs_sz--; in compute_scc()
24490 env->scc_info = kvcalloc(next_scc_id, sizeof(*env->scc_info), GFP_KERNEL_ACCOUNT); in compute_scc()
24491 if (!env->scc_info) { in compute_scc()
24492 err = -ENOMEM; in compute_scc()
24495 env->scc_cnt = next_scc_id; in compute_scc()
24508 int i, len, ret = -EINVAL, err; in bpf_check()
24516 return -EINVAL; in bpf_check()
24523 return -ENOMEM; in bpf_check()
24525 env->bt.env = env; in bpf_check()
24527 len = (*prog)->len; in bpf_check()
24528 env->insn_aux_data = in bpf_check()
24530 ret = -ENOMEM; in bpf_check()
24531 if (!env->insn_aux_data) in bpf_check()
24534 env->insn_aux_data[i].orig_idx = i; in bpf_check()
24535 env->prog = *prog; in bpf_check()
24536 env->ops = bpf_verifier_ops[env->prog->type]; in bpf_check()
24538 env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token); in bpf_check()
24539 env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token); in bpf_check()
24540 env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token); in bpf_check()
24541 env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token); in bpf_check()
24542 env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF); in bpf_check()
24553 ret = bpf_vlog_init(&env->log, attr->log_level, in bpf_check()
24554 (char __user *) (unsigned long) attr->log_buf, in bpf_check()
24555 attr->log_size); in bpf_check()
24567 verbose(env, "in-kernel BTF is malformed\n"); in bpf_check()
24572 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); in bpf_check()
24574 env->strict_alignment = true; in bpf_check()
24575 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) in bpf_check()
24576 env->strict_alignment = false; in bpf_check()
24579 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; in bpf_check()
24580 env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS; in bpf_check()
24582 env->explored_states = kvcalloc(state_htab_size(env), in bpf_check()
24585 ret = -ENOMEM; in bpf_check()
24586 if (!env->explored_states) in bpf_check()
24590 INIT_LIST_HEAD(&env->explored_states[i]); in bpf_check()
24591 INIT_LIST_HEAD(&env->free_list); in bpf_check()
24613 if (bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
24614 ret = bpf_prog_offload_verifier_prep(env->prog); in bpf_check()
24650 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) in bpf_check()
24654 kvfree(env->explored_states); in bpf_check()
24688 /* do 32-bit optimization after insn patching has done so those patched in bpf_check()
24691 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { in bpf_check()
24693 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret in bpf_check()
24700 env->verification_time = ktime_get_ns() - start_time; in bpf_check()
24702 env->prog->aux->verified_insns = env->insn_processed; in bpf_check()
24705 err = bpf_vlog_finalize(&env->log, &log_true_size); in bpf_check()
24712 ret = -EFAULT; in bpf_check()
24719 if (env->used_map_cnt) { in bpf_check()
24721 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, in bpf_check()
24722 sizeof(env->used_maps[0]), in bpf_check()
24725 if (!env->prog->aux->used_maps) { in bpf_check()
24726 ret = -ENOMEM; in bpf_check()
24730 memcpy(env->prog->aux->used_maps, env->used_maps, in bpf_check()
24731 sizeof(env->used_maps[0]) * env->used_map_cnt); in bpf_check()
24732 env->prog->aux->used_map_cnt = env->used_map_cnt; in bpf_check()
24734 if (env->used_btf_cnt) { in bpf_check()
24736 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, in bpf_check()
24737 sizeof(env->used_btfs[0]), in bpf_check()
24739 if (!env->prog->aux->used_btfs) { in bpf_check()
24740 ret = -ENOMEM; in bpf_check()
24744 memcpy(env->prog->aux->used_btfs, env->used_btfs, in bpf_check()
24745 sizeof(env->used_btfs[0]) * env->used_btf_cnt); in bpf_check()
24746 env->prog->aux->used_btf_cnt = env->used_btf_cnt; in bpf_check()
24748 if (env->used_map_cnt || env->used_btf_cnt) { in bpf_check()
24758 if (!env->prog->aux->used_maps) in bpf_check()
24763 if (!env->prog->aux->used_btfs) in bpf_check()
24769 if (env->prog->type == BPF_PROG_TYPE_EXT) in bpf_check()
24770 env->prog->expected_attach_type = 0; in bpf_check()
24772 *prog = env->prog; in bpf_check()
24774 module_put(env->attach_btf_mod); in bpf_check()
24778 vfree(env->insn_aux_data); in bpf_check()
24781 kvfree(env->cfg.insn_postorder); in bpf_check()
24782 kvfree(env->scc_info); in bpf_check()