Lines Matching defs:struct
36 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
53 struct bpf_mem_alloc bpf_global_percpu_ma;
121 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
128 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
182 struct bpf_verifier_stack_elem {
187 struct bpf_verifier_state st;
190 struct bpf_verifier_stack_elem *next;
205 static int acquire_reference(struct bpf_verifier_env *env, int insn_idx);
206 static int release_reference_nomark(struct bpf_verifier_state *state, int ref_obj_id);
207 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
208 static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
209 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
210 static int ref_set_non_owning(struct bpf_verifier_env *env,
211 struct bpf_reg_state *reg);
212 static bool is_trusted_reg(const struct bpf_reg_state *reg);
214 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
219 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
224 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
225 struct bpf_map *map,
234 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
239 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
244 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
249 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
257 static bool bpf_helper_call(const struct bpf_insn *insn)
263 static bool bpf_pseudo_call(const struct bpf_insn *insn)
269 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
275 struct bpf_map_desc {
276 struct bpf_map *ptr;
280 struct bpf_call_arg_meta {
281 struct bpf_map_desc map;
292 struct btf *btf;
294 struct btf *ret_btf;
297 struct btf_field *kptr_field;
301 struct bpf_kfunc_meta {
302 struct btf *btf;
303 const struct btf_type *proto;
309 struct bpf_kfunc_call_arg_meta {
311 struct btf *btf;
314 const struct btf_type *func_proto;
323 struct {
338 struct btf *arg_btf;
343 struct {
344 struct btf_field *field;
346 struct {
347 struct btf_field *field;
349 struct {
354 struct {
358 struct bpf_map_desc map;
362 struct btf *btf_vmlinux;
364 static const char *btf_type_name(const struct btf *btf, u32 id)
374 struct bpf_verifier_env *env = private_data;
385 static void verbose_invalid_scalar(struct bpf_verifier_env *env,
386 struct bpf_reg_state *reg,
387 struct bpf_retval_range range, const char *ctx,
406 static bool reg_not_null(const struct bpf_reg_state *reg)
425 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
427 struct btf_record *rec = NULL;
428 struct btf_struct_meta *meta;
440 static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
442 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
447 static const char *subprog_name(const struct bpf_verifier_env *env, int subprog)
449 struct bpf_func_info *info;
458 static void mark_subprog_exc_cb(struct bpf_verifier_env *env, int subprog)
460 struct bpf_subprog_info *info = subprog_info(env, subprog);
467 static bool subprog_is_exc_cb(struct bpf_verifier_env *env, int subprog)
472 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
483 const struct bpf_map *map)
522 static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
546 static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
552 static bool is_async_callback_calling_insn(struct bpf_insn *insn)
558 static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn *insn)
573 static bool is_may_goto_insn(struct bpf_insn *insn)
578 static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx)
592 const struct bpf_map *map)
606 static bool is_cmpxchg_insn(const struct bpf_insn *insn)
613 static bool is_atomic_load_insn(const struct bpf_insn *insn)
620 static bool is_atomic_fetch_insn(const struct bpf_insn *insn)
632 static struct bpf_func_state *func(struct bpf_verifier_env *env,
633 const struct bpf_reg_state *reg)
635 struct bpf_verifier_state *cur = env->cur_state;
640 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
654 static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
681 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
686 static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots)
691 static int irq_flag_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
741 static void __mark_dynptr_reg(struct bpf_reg_state *reg,
745 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
746 struct bpf_reg_state *reg);
748 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
749 struct bpf_reg_state *sreg1,
750 struct bpf_reg_state *sreg2,
759 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
760 struct bpf_reg_state *reg,
766 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
767 struct bpf_func_state *state, int spi);
769 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
772 struct bpf_func_state *state = func(env, reg);
829 static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi)
844 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
846 struct bpf_func_state *state = func(env, reg);
899 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
900 struct bpf_reg_state *reg);
902 static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
910 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
911 struct bpf_func_state *state, int spi)
913 struct bpf_func_state *fstate;
914 struct bpf_reg_state *dreg;
964 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
992 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
994 struct bpf_func_state *state = func(env, reg);
1021 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1024 struct bpf_func_state *state = func(env, reg);
1043 static void __mark_reg_known_zero(struct bpf_reg_state *reg);
1045 static bool in_rcu_cs(struct bpf_verifier_env *env);
1047 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta);
1049 static int mark_stack_slots_iter(struct bpf_verifier_env *env,
1050 struct bpf_kfunc_call_arg_meta *meta,
1051 struct bpf_reg_state *reg, int insn_idx,
1052 struct btf *btf, u32 btf_id, int nr_slots)
1054 struct bpf_func_state *state = func(env, reg);
1066 struct bpf_stack_state *slot = &state->stack[spi - i];
1067 struct bpf_reg_state *st = &slot->spilled_ptr;
1093 static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
1094 struct bpf_reg_state *reg, int nr_slots)
1096 struct bpf_func_state *state = func(env, reg);
1104 struct bpf_stack_state *slot = &state->stack[spi - i];
1105 struct bpf_reg_state *st = &slot->spilled_ptr;
1122 static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env,
1123 struct bpf_reg_state *reg, int nr_slots)
1125 struct bpf_func_state *state = func(env, reg);
1139 struct bpf_stack_state *slot = &state->stack[spi - i];
1149 static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1150 struct btf *btf, u32 btf_id, int nr_slots)
1152 struct bpf_func_state *state = func(env, reg);
1160 struct bpf_stack_state *slot = &state->stack[spi - i];
1161 struct bpf_reg_state *st = &slot->spilled_ptr;
1181 static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx);
1182 static int release_irq_state(struct bpf_verifier_state *state, int id);
1184 static int mark_stack_slot_irq_flag(struct bpf_verifier_env *env,
1185 struct bpf_kfunc_call_arg_meta *meta,
1186 struct bpf_reg_state *reg, int insn_idx,
1189 struct bpf_func_state *state = func(env, reg);
1190 struct bpf_stack_state *slot;
1191 struct bpf_reg_state *st;
1218 static int unmark_stack_slot_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1221 struct bpf_func_state *state = func(env, reg);
1222 struct bpf_stack_state *slot;
1223 struct bpf_reg_state *st;
1270 static bool is_irq_flag_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1272 struct bpf_func_state *state = func(env, reg);
1273 struct bpf_stack_state *slot;
1294 static int is_irq_flag_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1296 struct bpf_func_state *state = func(env, reg);
1297 struct bpf_stack_state *slot;
1298 struct bpf_reg_state *st;
1323 static bool is_stack_slot_special(const struct bpf_stack_state *stack)
1346 static bool is_spilled_reg(const struct bpf_stack_state *stack)
1351 static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
1357 static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
1372 static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
1446 static int copy_reference_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src)
1449 sizeof(struct bpf_reference_state), GFP_KERNEL_ACCOUNT);
1463 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1467 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1476 static int resize_reference_state(struct bpf_verifier_state *state, size_t n)
1479 sizeof(struct bpf_reference_state));
1490 static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size)
1501 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1519 static struct bpf_reference_state *acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
1521 struct bpf_verifier_state *state = env->cur_state;
1533 static int acquire_reference(struct bpf_verifier_env *env, int insn_idx)
1535 struct bpf_reference_state *s;
1545 static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum ref_state_type type,
1548 struct bpf_verifier_state *state = env->cur_state;
1549 struct bpf_reference_state *s;
1564 static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx)
1566 struct bpf_verifier_state *state = env->cur_state;
1567 struct bpf_reference_state *s;
1579 static void release_reference_state(struct bpf_verifier_state *state, int idx)
1598 static bool find_reference_state(struct bpf_verifier_state *state, int ptr_id)
1609 static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr)
1633 static int release_irq_state(struct bpf_verifier_state *state, int id)
1655 static struct bpf_reference_state *find_lock_state(struct bpf_verifier_state *state, enum ref_state_type type,
1661 struct bpf_reference_state *s = &state->refs[i];
1672 static void update_peak_states(struct bpf_verifier_env *env)
1680 static void free_func_state(struct bpf_func_state *state)
1688 static void clear_jmp_history(struct bpf_verifier_state *state)
1695 static void free_verifier_state(struct bpf_verifier_state *state,
1710 /* struct bpf_verifier_state->parent refers to states
1712 * In both cases the state is contained in struct bpf_verifier_state_list.
1714 static struct bpf_verifier_state_list *state_parent_as_list(struct bpf_verifier_state *st)
1717 return container_of(st->parent, struct bpf_verifier_state_list, state);
1721 static bool incomplete_read_marks(struct bpf_verifier_env *env,
1722 struct bpf_verifier_state *st);
1728 static void maybe_free_verifier_state(struct bpf_verifier_env *env,
1729 struct bpf_verifier_state_list *sl)
1744 static int copy_func_state(struct bpf_func_state *dst,
1745 const struct bpf_func_state *src)
1747 memcpy(dst, src, offsetof(struct bpf_func_state, stack));
1751 static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1752 const struct bpf_verifier_state *src)
1754 struct bpf_func_state *dst;
1801 static u32 state_htab_size(struct bpf_verifier_env *env)
1806 static struct list_head *explored_state(struct bpf_verifier_env *env, int idx)
1808 struct bpf_verifier_state *cur = env->cur_state;
1809 struct bpf_func_state *state = cur->frame[cur->curframe];
1814 static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b)
1829 static u32 frame_insn_idx(struct bpf_verifier_state *st, u32 frame)
1849 static bool compute_scc_callchain(struct bpf_verifier_env *env,
1850 struct bpf_verifier_state *st,
1851 struct bpf_scc_callchain *callchain)
1872 static struct bpf_scc_visit *scc_visit_lookup(struct bpf_verifier_env *env,
1873 struct bpf_scc_callchain *callchain)
1875 struct bpf_scc_info *info = env->scc_info[callchain->scc];
1876 struct bpf_scc_visit *visits = info->visits;
1891 static struct bpf_scc_visit *scc_visit_alloc(struct bpf_verifier_env *env,
1892 struct bpf_scc_callchain *callchain)
1894 struct bpf_scc_visit *visit;
1895 struct bpf_scc_info *info;
1902 new_sz = sizeof(*info) + sizeof(struct bpf_scc_visit) * (num_visits + 1);
1915 static char *format_callchain(struct bpf_verifier_env *env, struct bpf_scc_callchain *callchain)
1935 static int maybe_enter_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1937 struct bpf_scc_callchain *callchain = &env->callchain_buf;
1938 struct bpf_scc_visit *visit;
1954 static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visit *visit);
1960 static int maybe_exit_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1962 struct bpf_scc_callchain *callchain = &env->callchain_buf;
1963 struct bpf_scc_visit *visit;
2002 static int add_scc_backedge(struct bpf_verifier_env *env,
2003 struct bpf_verifier_state *st,
2004 struct bpf_scc_backedge *backedge)
2006 struct bpf_scc_callchain *callchain = &env->callchain_buf;
2007 struct bpf_scc_visit *visit;
2034 static bool incomplete_read_marks(struct bpf_verifier_env *env,
2035 struct bpf_verifier_state *st)
2037 struct bpf_scc_callchain *callchain = &env->callchain_buf;
2038 struct bpf_scc_visit *visit;
2048 static void free_backedges(struct bpf_scc_visit *visit)
2050 struct bpf_scc_backedge *backedge, *next;
2060 static int update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
2062 struct bpf_verifier_state_list *sl = NULL, *parent_sl;
2063 struct bpf_verifier_state *parent;
2088 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
2091 struct bpf_verifier_state *cur = env->cur_state;
2092 struct bpf_verifier_stack_elem *elem, *head = env->head;
2129 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
2133 struct bpf_verifier_state *cur = env->cur_state;
2134 struct bpf_verifier_stack_elem *elem;
2137 elem = kzalloc_obj(struct bpf_verifier_stack_elem, GFP_KERNEL_ACCOUNT);
2177 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
2194 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
2198 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
2204 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
2216 static void __mark_reg_known_zero(struct bpf_reg_state *reg)
2221 static void __mark_reg_const_zero(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
2231 static void mark_reg_known_zero(struct bpf_verifier_env *env,
2232 struct bpf_reg_state *regs, u32 regno)
2244 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
2259 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
2262 const struct bpf_map *map = reg->map_ptr;
2288 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
2289 struct btf_field_graph_root *ds_head)
2298 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
2303 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
2309 static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg)
2317 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
2331 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
2344 static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
2352 static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
2360 static void reset_reg64_and_tnum(struct bpf_reg_state *reg)
2366 static void reset_reg32_and_tnum(struct bpf_reg_state *reg)
2372 static void __update_reg32_bounds(struct bpf_reg_state *reg)
2374 struct tnum var32_off = tnum_subreg(reg->var_off);
2387 static void __update_reg64_bounds(struct bpf_reg_state *reg)
2430 static void __update_reg_bounds(struct bpf_reg_state *reg)
2437 static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
2548 static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
2683 static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
2750 static void __reg_deduce_bounds(struct bpf_reg_state *reg)
2758 static void __reg_bound_offset(struct bpf_reg_state *reg)
2760 struct tnum var64_off = tnum_intersect(reg->var_off,
2763 struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
2770 static void reg_bounds_sync(struct bpf_reg_state *reg)
2787 static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
2788 struct bpf_reg_state *reg, const char *ctx)
2842 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
2862 static void __mark_reg_unknown_imprecise(struct bpf_reg_state *reg)
2868 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
2881 static void __mark_reg_unknown(const struct bpf_verifier_env *env,
2882 struct bpf_reg_state *reg)
2888 static void mark_reg_unknown(struct bpf_verifier_env *env,
2889 struct bpf_reg_state *regs, u32 regno)
2901 static int __mark_reg_s32_range(struct bpf_verifier_env *env,
2902 struct bpf_reg_state *regs,
2907 struct bpf_reg_state *reg = regs + regno;
2920 static void __mark_reg_not_init(const struct bpf_verifier_env *env,
2921 struct bpf_reg_state *reg)
2927 static void mark_reg_not_init(struct bpf_verifier_env *env,
2928 struct bpf_reg_state *regs, u32 regno)
2940 static int mark_btf_ld_reg(struct bpf_verifier_env *env,
2941 struct bpf_reg_state *regs, u32 regno,
2943 struct btf *btf, u32 btf_id,
2970 static void init_reg_state(struct bpf_verifier_env *env,
2971 struct bpf_func_state *state)
2973 struct bpf_reg_state *regs = state->regs;
2987 static struct bpf_retval_range retval_range(s32 minval, s32 maxval)
2989 return (struct bpf_retval_range){ minval, maxval };
2993 static void init_func_state(struct bpf_verifier_env *env,
2994 struct bpf_func_state *state,
3006 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
3010 struct bpf_verifier_stack_elem *elem;
3011 struct bpf_func_state *frame;
3013 elem = kzalloc_obj(struct bpf_verifier_stack_elem, GFP_KERNEL_ACCOUNT);
3056 return ((struct bpf_subprog_info *)a)->start -
3057 ((struct bpf_subprog_info *)b)->start;
3061 struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off)
3063 struct bpf_subprog_info *vals = env->subprog_info;
3082 static int find_subprog(struct bpf_verifier_env *env, int off)
3084 struct bpf_subprog_info *p;
3092 static int add_subprog(struct bpf_verifier_env *env, int off)
3115 static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env)
3117 struct bpf_prog_aux *aux = env->prog->aux;
3118 struct btf *btf = aux->btf;
3119 const struct btf_type *t;
3180 struct bpf_kfunc_desc {
3181 struct btf_func_model func_model;
3188 struct bpf_kfunc_btf {
3189 struct btf *btf;
3190 struct module *module;
3194 struct bpf_kfunc_desc_tab {
3200 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
3204 struct bpf_kfunc_btf_tab {
3205 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
3209 static int specialize_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_desc *desc,
3214 const struct bpf_kfunc_desc *d0 = a;
3215 const struct bpf_kfunc_desc *d1 = b;
3223 const struct bpf_kfunc_btf *d0 = a;
3224 const struct bpf_kfunc_btf *d1 = b;
3229 static struct bpf_kfunc_desc *
3230 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
3232 struct bpf_kfunc_desc desc = {
3236 struct bpf_kfunc_desc_tab *tab;
3243 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
3246 const struct bpf_kfunc_desc *desc;
3256 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
3259 struct bpf_kfunc_btf kf_btf = { .offset = offset };
3260 struct bpf_kfunc_btf_tab *tab;
3261 struct bpf_kfunc_btf *b;
3262 struct module *mod;
3263 struct btf *btf;
3320 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
3332 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
3350 static const struct btf_type *find_kfunc_impl_proto(struct bpf_verifier_env *env,
3351 struct btf *btf,
3355 const struct btf_type *func;
3376 static int fetch_kfunc_meta(struct bpf_verifier_env *env,
3379 struct bpf_kfunc_meta *kfunc)
3381 const struct btf_type *func, *func_proto;
3384 struct btf *btf;
3439 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
3441 struct bpf_kfunc_btf_tab *btf_tab;
3442 struct btf_func_model func_model;
3443 struct bpf_kfunc_desc_tab *tab;
3444 struct bpf_prog_aux *prog_aux;
3445 struct bpf_kfunc_meta kfunc;
3446 struct bpf_kfunc_desc *desc;
3536 const struct bpf_kfunc_desc *d0 = a;
3537 const struct bpf_kfunc_desc *d1 = b;
3546 static int set_kfunc_desc_imm(struct bpf_verifier_env *env, struct bpf_kfunc_desc *desc)
3565 static int sort_kfunc_descs_by_imm_off(struct bpf_verifier_env *env)
3567 struct bpf_kfunc_desc_tab *tab;
3585 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
3590 const struct btf_func_model *
3591 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
3592 const struct bpf_insn *insn)
3594 const struct bpf_kfunc_desc desc = {
3598 const struct bpf_kfunc_desc *res;
3599 struct bpf_kfunc_desc_tab *tab;
3608 static int add_kfunc_in_insns(struct bpf_verifier_env *env,
3609 struct bpf_insn *insn, int cnt)
3623 static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
3625 struct bpf_subprog_info *subprog = env->subprog_info;
3627 struct bpf_insn *insn = env->prog->insnsi;
3686 static int check_subprogs(struct bpf_verifier_env *env)
3689 struct bpf_subprog_info *subprog = env->subprog_info;
3690 struct bpf_insn *insn = env->prog->insnsi;
3742 static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
3756 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
3776 static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
3782 static int mark_irq_flag_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
3796 static bool is_reg64(struct bpf_insn *insn,
3797 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
3883 static int insn_def_regno(const struct bpf_insn *insn)
3907 static bool insn_has_def32(struct bpf_insn *insn)
3917 static void mark_insn_zext(struct bpf_verifier_env *env,
3918 struct bpf_reg_state *reg)
3930 static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno,
3933 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
3934 struct bpf_reg_state *reg;
3973 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
3976 struct bpf_verifier_state *vstate = env->cur_state;
3977 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3997 static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
4002 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
4018 struct linked_reg {
4027 struct linked_regs {
4029 struct linked_reg entries[LINKED_REGS_MAX];
4032 static struct linked_reg *linked_regs_push(struct linked_regs *s)
4047 static u64 linked_regs_pack(struct linked_regs *s)
4053 struct linked_reg *e = &s->entries[i];
4068 static void linked_regs_unpack(u64 val, struct linked_regs *s)
4076 struct linked_reg *e = &s->entries[i];
4086 static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
4090 struct bpf_jmp_history_entry *p;
4128 static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
4149 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
4170 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
4172 const struct btf_type *func;
4173 struct btf *desc_btf;
4186 static void verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn)
4188 const struct bpf_insn_cbs cbs = {
4197 static inline void bt_init(struct backtrack_state *bt, u32 frame)
4202 static inline void bt_reset(struct backtrack_state *bt)
4204 struct bpf_verifier_env *env = bt->env;
4210 static inline u32 bt_empty(struct backtrack_state *bt)
4221 static inline int bt_subprog_enter(struct backtrack_state *bt)
4231 static inline int bt_subprog_exit(struct backtrack_state *bt)
4241 static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
4246 static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
4251 static inline void bt_set_reg(struct backtrack_state *bt, u32 reg)
4256 static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg)
4261 static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
4266 static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
4271 static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
4276 static inline u32 bt_reg_mask(struct backtrack_state *bt)
4281 static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame)
4286 static inline u64 bt_stack_mask(struct backtrack_state *bt)
4291 static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
4296 static inline bool bt_is_frame_reg_set(struct backtrack_state *bt, u32 frame, u32 reg)
4301 static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
4348 static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_jmp_history_entry *hist)
4350 struct linked_regs linked_regs;
4359 struct linked_reg *e = &linked_regs.entries[i];
4372 struct linked_reg *e = &linked_regs.entries[i];
4390 static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
4391 struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
4393 struct bpf_insn *insn = env->prog->insnsi + idx;
4749 static void mark_all_scalars_precise(struct bpf_verifier_env *env,
4750 struct bpf_verifier_state *st)
4752 struct bpf_func_state *func;
4753 struct bpf_reg_state *reg;
4796 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
4798 struct bpf_func_state *func;
4799 struct bpf_reg_state *reg;
4908 static int __mark_chain_precision(struct bpf_verifier_env *env,
4909 struct bpf_verifier_state *starting_state,
4913 struct bpf_verifier_state *st = starting_state;
4914 struct backtrack_state *bt = &env->bt;
4918 struct bpf_func_state *func;
4920 struct bpf_reg_state *reg;
4950 struct bpf_jmp_history_entry *hist;
5093 int mark_chain_precision(struct bpf_verifier_env *env, int regno)
5101 static int mark_chain_precision_batch(struct bpf_verifier_env *env,
5102 struct bpf_verifier_state *starting_state)
5135 static bool register_is_null(struct bpf_reg_state *reg)
5141 static bool is_reg_const(struct bpf_reg_state *reg, bool subreg32)
5148 static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32)
5154 const struct bpf_reg_state *reg)
5162 static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
5163 struct bpf_reg_state *src_reg)
5187 static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
5192 static void save_register_state(struct bpf_verifier_env *env,
5193 struct bpf_func_state *state,
5194 int spi, struct bpf_reg_state *reg,
5209 static bool is_bpf_st_mem(struct bpf_insn *insn)
5214 static int get_reg_width(struct bpf_reg_state *reg)
5220 static void check_fastcall_stack_contract(struct bpf_verifier_env *env,
5221 struct bpf_func_state *state, int insn_idx, int off)
5223 struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno];
5224 struct bpf_insn_aux_data *aux = env->insn_aux_data;
5247 static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
5249 struct bpf_func_state *state,
5253 struct bpf_func_state *cur; /* state of the current function */
5255 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
5256 struct bpf_reg_state *reg = NULL;
5320 struct bpf_reg_state *tmp_reg = &env->fake_reg[0];
5393 static int check_stack_write_var_off(struct bpf_verifier_env *env,
5395 struct bpf_func_state *state,
5399 struct bpf_func_state *cur; /* state of the current function */
5402 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
5403 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
5462 struct bpf_reg_state *spill_reg = &state->stack[spi].spilled_ptr;
5511 static void mark_reg_stack_read(struct bpf_verifier_env *env,
5513 struct bpf_func_state *ptr_state,
5516 struct bpf_verifier_state *vstate = env->cur_state;
5517 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5551 static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
5553 struct bpf_func_state *reg_state,
5556 struct bpf_verifier_state *vstate = env->cur_state;
5557 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5559 struct bpf_reg_state *reg;
5695 static int check_stack_range_initialized(struct bpf_verifier_env *env,
5699 struct bpf_call_arg_meta *meta);
5701 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
5719 static int check_stack_read_var_off(struct bpf_verifier_env *env,
5723 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
5724 struct bpf_func_state *ptr_state = func(env, reg);
5751 static int check_stack_read(struct bpf_verifier_env *env,
5755 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
5756 struct bpf_func_state *state = func(env, reg);
5808 static int check_stack_write(struct bpf_verifier_env *env,
5812 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
5813 struct bpf_func_state *state = func(env, reg);
5831 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
5834 struct bpf_reg_state *reg = reg_state(env, regno);
5835 struct bpf_map *map = reg->map_ptr;
5854 static int __check_mem_access(struct bpf_verifier_env *env, int regno,
5859 struct bpf_reg_state *reg;
5890 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
5894 struct bpf_verifier_state *vstate = env->cur_state;
5895 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5896 struct bpf_reg_state *reg = &state->regs[regno];
5945 static int __check_ptr_off_reg(struct bpf_verifier_env *env,
5946 const struct bpf_reg_state *reg, int regno,
5977 static int check_ptr_off_reg(struct bpf_verifier_env *env,
5978 const struct bpf_reg_state *reg, int regno)
5983 static int map_kptr_match_type(struct bpf_verifier_env *env,
5984 struct btf_field *kptr_field,
5985 struct bpf_reg_state *reg, u32 regno)
6023 * struct foo {
6024 * struct bar br;
6025 * struct baz bz;
6028 * struct foo *v;
6032 * // first member type of struct after comparison fails
6033 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
6038 * the struct to match type against first member of struct, i.e. reject
6059 static bool in_sleepable(struct bpf_verifier_env *env)
6067 static bool in_rcu_cs(struct bpf_verifier_env *env)
6077 BTF_ID(struct, prog_test_ref_kfunc)
6080 BTF_ID(struct, cgroup)
6083 BTF_ID(struct, bpf_cpumask)
6085 BTF_ID(struct, task_struct)
6087 BTF_ID(struct, bpf_crypto_ctx)
6091 static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
6098 static struct btf_record *kptr_pointee_btf_record(struct btf_field *kptr_field)
6100 struct btf_struct_meta *meta;
6111 static bool rcu_safe_kptr(const struct btf_field *field)
6113 const struct btf_field_kptr *kptr = &field->kptr;
6119 static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field)
6121 struct btf_record *rec;
6142 static int mark_uptr_ld_reg(struct bpf_verifier_env *env, u32 regno,
6143 struct btf_field *field)
6145 struct bpf_reg_state *reg;
6146 const struct btf_type *t;
6158 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
6160 struct btf_field *kptr_field)
6162 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
6164 struct bpf_reg_state *val_reg;
6226 static u32 map_mem_size(const struct bpf_map *map)
6235 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
6239 struct bpf_verifier_state *vstate = env->cur_state;
6240 struct bpf_func_state *state = vstate->frame[vstate->curframe];
6241 struct bpf_reg_state *reg = &state->regs[regno];
6242 struct bpf_map *map = reg->map_ptr;
6244 struct btf_record *rec;
6255 struct btf_field *field = &rec->fields[i];
6303 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
6304 const struct bpf_call_arg_meta *meta,
6345 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
6348 struct bpf_reg_state *reg = reg_state(env, regno);
6386 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
6387 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
6388 enum bpf_access_type t, struct bpf_insn_access_aux *info)
6419 static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
6423 (u64)off + size > sizeof(struct bpf_flow_keys)) {
6431 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
6435 struct bpf_reg_state *reg = reg_state(env, regno);
6436 struct bpf_insn_access_aux info = {};
6475 static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
6480 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
6482 const struct bpf_reg_state *reg = reg_state(env, regno);
6487 static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
6489 const struct bpf_reg_state *reg = reg_state(env, regno);
6494 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
6496 const struct bpf_reg_state *reg = reg_state(env, regno);
6501 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
6503 const struct bpf_reg_state *reg = reg_state(env, regno);
6509 static bool is_arena_reg(struct bpf_verifier_env *env, int regno)
6511 const struct bpf_reg_state *reg = reg_state(env, regno);
6519 static bool atomic_ptr_type_ok(struct bpf_verifier_env *env, int regno,
6520 struct bpf_insn *insn)
6545 static bool is_trusted_reg(const struct bpf_reg_state *reg)
6569 static bool is_rcu_reg(const struct bpf_reg_state *reg)
6579 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
6580 const struct bpf_reg_state *reg,
6583 struct tnum reg_off;
6614 static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
6615 const struct bpf_reg_state *reg,
6619 struct tnum reg_off;
6638 static int check_ptr_alignment(struct bpf_verifier_env *env,
6639 const struct bpf_reg_state *reg, int off,
6695 static enum priv_stack_mode bpf_enable_priv_stack(struct bpf_prog *prog)
6723 static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth)
6740 static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx,
6743 struct bpf_subprog_info *subprog = env->subprog_info;
6744 struct bpf_insn *insn = env->prog->insnsi;
6901 static int check_max_stack_depth(struct bpf_verifier_env *env)
6904 struct bpf_subprog_info *si = env->subprog_info;
6946 static int get_callee_stack_depth(struct bpf_verifier_env *env,
6947 const struct bpf_insn *insn, int idx)
6958 static int __check_buffer_access(struct bpf_verifier_env *env,
6960 const struct bpf_reg_state *reg,
6982 static int check_tp_buffer_access(struct bpf_verifier_env *env,
6983 const struct bpf_reg_state *reg,
6998 static int check_buffer_access(struct bpf_verifier_env *env,
6999 const struct bpf_reg_state *reg,
7018 static void zext_32_to_64(struct bpf_reg_state *reg)
7027 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
7056 static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
7075 static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
7134 static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
7149 static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
7199 static bool bpf_map_is_rdonly(const struct bpf_map *map)
7219 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
7262 BTF_TYPE_SAFE_RCU(struct task_struct) {
7264 struct css_set __rcu *cgroups;
7265 struct task_struct __rcu *real_parent;
7266 struct task_struct *group_leader;
7269 BTF_TYPE_SAFE_RCU(struct cgroup) {
7271 struct kernfs_node *kn;
7274 BTF_TYPE_SAFE_RCU(struct css_set) {
7275 struct cgroup *dfl_cgrp;
7278 BTF_TYPE_SAFE_RCU(struct cgroup_subsys_state) {
7279 struct cgroup *cgroup;
7283 BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) {
7284 struct file __rcu *exe_file;
7286 struct task_struct __rcu *owner;
7293 BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) {
7294 struct sock *sk;
7297 BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) {
7298 struct sock *sk;
7302 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) {
7303 struct seq_file *seq;
7306 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) {
7307 struct bpf_iter_meta *meta;
7308 struct task_struct *task;
7311 BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) {
7312 struct file *file;
7315 BTF_TYPE_SAFE_TRUSTED(struct file) {
7316 struct inode *f_inode;
7319 BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct dentry) {
7320 struct inode *d_inode;
7323 BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket) {
7324 struct sock *sk;
7327 BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct vm_area_struct) {
7328 struct mm_struct *vm_mm;
7329 struct file *vm_file;
7332 static bool type_is_rcu(struct bpf_verifier_env *env,
7333 struct bpf_reg_state *reg,
7336 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct));
7337 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup));
7338 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set));
7339 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup_subsys_state));
7344 static bool type_is_rcu_or_null(struct bpf_verifier_env *env,
7345 struct bpf_reg_state *reg,
7348 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct));
7349 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff));
7350 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock));
7355 static bool type_is_trusted(struct bpf_verifier_env *env,
7356 struct bpf_reg_state *reg,
7359 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta));
7360 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task));
7361 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
7362 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
7367 static bool type_is_trusted_or_null(struct bpf_verifier_env *env,
7368 struct bpf_reg_state *reg,
7371 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket));
7372 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct dentry));
7373 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct vm_area_struct));
7379 static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
7380 struct bpf_reg_state *regs,
7385 struct bpf_reg_state *reg = regs + regno;
7386 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
7395 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
7401 "Cannot access kernel 'struct %s' from non-GPL compatible program\n",
7532 static int check_ptr_to_map_access(struct bpf_verifier_env *env,
7533 struct bpf_reg_state *regs,
7538 struct bpf_reg_state *reg = regs + regno;
7539 struct bpf_map *map = reg->map_ptr;
7540 struct bpf_reg_state map_reg;
7542 const struct btf_type *t;
7563 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
7604 static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
7606 struct bpf_func_state *state,
7627 struct bpf_verifier_env *env,
7631 struct bpf_reg_state *reg = reg_state(env, regno);
7632 struct bpf_func_state *state = func(env, reg);
7685 static bool get_func_retval_range(struct bpf_prog *prog,
7686 struct bpf_retval_range *range)
7702 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
7706 struct bpf_reg_state *regs = cur_regs(env);
7707 struct bpf_reg_state *reg = regs + regno;
7735 struct btf_field *kptr_field = NULL;
7754 struct bpf_map *map = reg->map_ptr;
7818 struct bpf_retval_range range;
7819 struct bpf_insn_access_aux info = {
7970 static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
7973 static int check_load_mem(struct bpf_verifier_env *env, struct bpf_insn *insn,
7977 struct bpf_reg_state *regs = cur_regs(env);
8006 static int check_store_reg(struct bpf_verifier_env *env, struct bpf_insn *insn,
8009 struct bpf_reg_state *regs = cur_regs(env);
8034 static int check_atomic_rmw(struct bpf_verifier_env *env,
8035 struct bpf_insn *insn)
8123 static int check_atomic_load(struct bpf_verifier_env *env,
8124 struct bpf_insn *insn)
8142 static int check_atomic_store(struct bpf_verifier_env *env,
8143 struct bpf_insn *insn)
8161 static int check_atomic(struct bpf_verifier_env *env, struct bpf_insn *insn)
8207 struct bpf_verifier_env *env, int regno, int off,
8209 enum bpf_access_type type, struct bpf_call_arg_meta *meta)
8211 struct bpf_reg_state *reg = reg_state(env, regno);
8212 struct bpf_func_state *state = func(env, reg);
8352 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
8355 struct bpf_call_arg_meta *meta)
8357 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
8449 static int check_mem_size_reg(struct bpf_verifier_env *env,
8450 struct bpf_reg_state *reg, u32 regno,
8453 struct bpf_call_arg_meta *meta)
8499 static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
8503 struct bpf_reg_state saved_reg;
8527 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
8530 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
8532 struct bpf_reg_state saved_reg;
8533 struct bpf_call_arg_meta meta;
8569 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
8582 static int process_spin_lock(struct bpf_verifier_env *env, int regno, int flags)
8586 struct bpf_reg_state *reg = reg_state(env, regno);
8587 struct bpf_verifier_state *cur = env->cur_state;
8591 struct bpf_map *map = NULL;
8592 struct btf *btf = NULL;
8593 struct btf_record *rec;
8623 verbose(env, "off %lld doesn't point to 'struct %s_lock' that is at %d\n",
8699 static int check_map_field_pointer(struct bpf_verifier_env *env, u32 regno,
8701 struct bpf_map_desc *map_desc)
8703 struct bpf_reg_state *reg = reg_state(env, regno);
8705 struct bpf_map *map = reg->map_ptr;
8740 verbose(env, "off %lld doesn't point to 'struct %s' that is at %d\n",
8753 static int process_timer_func(struct bpf_verifier_env *env, int regno,
8754 struct bpf_map_desc *map)
8763 static int process_timer_helper(struct bpf_verifier_env *env, int regno,
8764 struct bpf_call_arg_meta *meta)
8769 static int process_timer_kfunc(struct bpf_verifier_env *env, int regno,
8770 struct bpf_kfunc_call_arg_meta *meta)
8775 static int process_kptr_func(struct bpf_verifier_env *env, int regno,
8776 struct bpf_call_arg_meta *meta)
8778 struct bpf_reg_state *reg = reg_state(env, regno);
8779 struct btf_field *kptr_field;
8780 struct bpf_map *map_ptr;
8781 struct btf_record *rec;
8830 * Mutability of bpf_dynptr is at two levels, one is at the level of struct
8831 * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
8832 * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
8841 * This is consistent with how C applies the const modifier to a struct object,
8846 * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
8848 static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx,
8851 struct bpf_reg_state *reg = reg_state(env, regno);
8856 "arg#%d expected pointer to stack or const struct bpf_dynptr\n",
8928 static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi)
8930 struct bpf_func_state *state = func(env, reg);
8935 static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta)
8940 static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta)
8945 static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
8950 static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
8955 static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg_idx,
8956 const struct btf_param *arg)
8968 static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
8969 struct bpf_kfunc_call_arg_meta *meta)
8971 struct bpf_reg_state *reg = reg_state(env, regno);
8972 const struct btf_type *t;
8981 * ensures struct convention, so we wouldn't need to do any BTF
9057 static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env,
9058 struct bpf_verifier_state *cur,
9061 struct bpf_verifier_state_list *sl;
9062 struct bpf_verifier_state *st;
9063 struct list_head *pos, *head;
9068 sl = container_of(pos, struct bpf_verifier_state_list, node);
9081 static void reset_idmap_scratch(struct bpf_verifier_env *env);
9082 static bool regs_exact(const struct bpf_reg_state *rold,
9083 const struct bpf_reg_state *rcur,
9084 struct bpf_idmap *idmap);
9090 static bool scalars_exact_for_widen(const struct bpf_reg_state *rold,
9091 const struct bpf_reg_state *rcur)
9093 return !memcmp(rold, rcur, offsetof(struct bpf_reg_state, id));
9096 static void maybe_widen_reg(struct bpf_verifier_env *env,
9097 struct bpf_reg_state *rold, struct bpf_reg_state *rcur)
9108 static int widen_imprecise_scalars(struct bpf_verifier_env *env,
9109 struct bpf_verifier_state *old,
9110 struct bpf_verifier_state *cur)
9112 struct bpf_func_state *fold, *fcur;
9139 static struct bpf_reg_state *get_iter_from_state(struct bpf_verifier_state *cur_st,
9140 struct bpf_kfunc_call_arg_meta *meta)
9211 * struct bpf_num_iter it;
9226 static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
9227 struct bpf_kfunc_call_arg_meta *meta)
9229 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st;
9230 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr;
9231 struct bpf_reg_state *cur_iter, *queued_iter;
9233 BTF_TYPE_EMIT(struct bpf_iter);
9303 static int resolve_map_arg_type(struct bpf_verifier_env *env,
9304 const struct bpf_call_arg_meta *meta,
9333 struct bpf_reg_types {
9338 static const struct bpf_reg_types sock_types = {
9348 static const struct bpf_reg_types btf_id_sock_common_types = {
9361 static const struct bpf_reg_types mem_types = {
9375 static const struct bpf_reg_types spin_lock_types = {
9382 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
9383 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
9384 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
9385 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
9386 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
9387 static const struct bpf_reg_types btf_ptr_types = {
9394 static const struct bpf_reg_types percpu_btf_ptr_types = {
9401 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
9402 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
9403 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
9404 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
9405 static const struct bpf_reg_types kptr_xchg_dest_types = {
9411 static const struct bpf_reg_types dynptr_types = {
9418 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
9444 static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
9447 struct bpf_call_arg_meta *meta)
9449 struct bpf_reg_state *reg = reg_state(env, regno);
9451 const struct bpf_reg_types *compatible;
9524 * 'struct sock_common', hence make an exception for it. This
9591 static struct btf_field *
9592 reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
9594 struct btf_field *field;
9595 struct btf_record *rec;
9608 static int check_func_arg_reg_off(struct bpf_verifier_env *env,
9609 const struct bpf_reg_state *reg, int regno,
9679 static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env,
9680 const struct bpf_func_proto *fn,
9681 struct bpf_reg_state *regs)
9683 struct bpf_reg_state *state = NULL;
9701 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
9703 struct bpf_func_state *state = func(env, reg);
9714 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
9716 struct bpf_func_state *state = func(env, reg);
9727 static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
9728 struct bpf_reg_state *reg)
9730 struct bpf_func_state *state = func(env, reg);
9745 static int check_reg_const_str(struct bpf_verifier_env *env,
9746 struct bpf_reg_state *reg, u32 regno)
9748 struct bpf_map *map = reg->map_ptr;
9799 static int get_constant_map_key(struct bpf_verifier_env *env,
9800 struct bpf_reg_state *key,
9804 struct bpf_func_state *state = func(env, key);
9805 struct bpf_reg_state *reg;
9861 static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
9862 struct bpf_call_arg_meta *meta,
9863 const struct bpf_func_proto *fn,
9867 struct bpf_reg_state *reg = reg_state(env, regno);
9924 struct bpf_func_state *state = func(env, reg);
10131 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
10140 /* It's not possible to get access to a locked struct sock in these
10169 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
10175 static int check_map_func_compatibility(struct bpf_verifier_env *env,
10176 struct bpf_map *map, int func_id)
10428 static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
10450 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
10465 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
10483 static bool check_btf_id_ok(const struct bpf_func_proto *fn)
10502 static bool check_mem_arg_rw_flag_ok(const struct bpf_func_proto *fn)
10518 static int check_func_proto(const struct bpf_func_proto *fn)
10532 static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
10534 struct bpf_func_state *state;
10535 struct bpf_reg_state *reg;
10548 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
10550 struct bpf_func_state *state = vstate->frame[vstate->curframe];
10551 struct bpf_reg_state *reg = &state->regs[regn];
10569 static int release_reference_nomark(struct bpf_verifier_state *state, int ref_obj_id)
10589 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id)
10591 struct bpf_verifier_state *vstate = env->cur_state;
10592 struct bpf_func_state *state;
10593 struct bpf_reg_state *reg;
10608 static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
10610 struct bpf_func_state *unused;
10611 struct bpf_reg_state *reg;
10619 static void clear_caller_saved_regs(struct bpf_verifier_env *env,
10620 struct bpf_reg_state *regs)
10631 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
10632 struct bpf_func_state *caller,
10633 struct bpf_func_state *callee,
10636 static int set_callee_state(struct bpf_verifier_env *env,
10637 struct bpf_func_state *caller,
10638 struct bpf_func_state *callee, int insn_idx);
10640 static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite,
10642 struct bpf_verifier_state *state)
10644 struct bpf_func_state *caller, *callee;
10688 static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
10689 const struct btf *btf,
10690 struct bpf_reg_state *regs)
10692 struct bpf_subprog_info *sub = subprog_info(env, subprog);
10693 struct bpf_verifier_log *log = &env->log;
10706 struct bpf_reg_state *reg = ®s[regno];
10707 struct bpf_subprog_arg_info *arg = &sub->args[i];
10762 struct bpf_call_arg_meta meta;
10789 static int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
10790 struct bpf_reg_state *regs)
10792 struct bpf_prog *prog = env->prog;
10793 struct btf *btf = prog->aux->btf;
10817 static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
10821 struct bpf_verifier_state *state = env->cur_state, *callback_state;
10822 struct bpf_func_state *caller, *callee;
10848 struct bpf_verifier_state *async_cb;
10886 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
10889 struct bpf_verifier_state *state = env->cur_state;
10890 struct bpf_func_state *caller;
10968 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
10969 struct bpf_func_state *caller,
10970 struct bpf_func_state *callee)
10972 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
10974 * callback_fn(struct bpf_map *map, void *key, void *value,
10995 static int set_callee_state(struct bpf_verifier_env *env,
10996 struct bpf_func_state *caller,
10997 struct bpf_func_state *callee, int insn_idx)
11009 static int set_map_elem_callback_state(struct bpf_verifier_env *env,
11010 struct bpf_func_state *caller,
11011 struct bpf_func_state *callee,
11014 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
11015 struct bpf_map *map;
11035 static int set_loop_callback_state(struct bpf_verifier_env *env,
11036 struct bpf_func_state *caller,
11037 struct bpf_func_state *callee,
11057 static int set_timer_callback_state(struct bpf_verifier_env *env,
11058 struct bpf_func_state *caller,
11059 struct bpf_func_state *callee,
11062 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
11064 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
11065 * callback_fn(struct bpf_map *map, void *key, void *value);
11087 static int set_find_vma_callback_state(struct bpf_verifier_env *env,
11088 struct bpf_func_state *caller,
11089 struct bpf_func_state *callee,
11092 /* bpf_find_vma(struct task_struct *task, u64 addr,
11094 * (callback_fn)(struct task_struct *task,
11095 * struct vm_area_struct *vma, void *callback_ctx);
11115 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
11116 struct bpf_func_state *caller,
11117 struct bpf_func_state *callee,
11120 /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
11122 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
11138 static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
11139 struct bpf_func_state *caller,
11140 struct bpf_func_state *callee,
11143 /* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
11144 * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
11146 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset
11150 struct btf_field *field;
11170 static int set_task_work_schedule_callback_state(struct bpf_verifier_env *env,
11171 struct bpf_func_state *caller,
11172 struct bpf_func_state *callee,
11175 struct bpf_map *map_ptr = caller->regs[BPF_REG_3].map_ptr;
11178 * callback_fn(struct bpf_map *map, void *key, void *value);
11206 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
11208 struct bpf_verifier_state *state = env->cur_state;
11209 struct bpf_insn *insn = env->prog->insnsi;
11210 struct bpf_func_state *callee;
11225 static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg,
11234 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
11236 struct bpf_verifier_state *state = env->cur_state, *prev_st;
11237 struct bpf_func_state *caller, *callee;
11238 struct bpf_reg_state *r0;
11310 * struct ctx { int i; }
11311 * void cb(int idx, struct ctx *ctx) { ctx->i++; ... }
11313 * struct ctx = { .i = 0; }
11328 static int do_refine_retval_range(struct bpf_verifier_env *env,
11329 struct bpf_reg_state *regs, int ret_type,
11331 struct bpf_call_arg_meta *meta)
11333 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0];
11367 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
11370 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
11371 struct bpf_map *map = meta->map.ptr;
11413 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
11416 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
11417 struct bpf_reg_state *reg;
11418 struct bpf_map *map = meta->map.ptr;
11449 static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit)
11451 struct bpf_verifier_state *state = env->cur_state;
11453 struct bpf_reg_state *reg = reg_state(env, BPF_REG_0);
11476 static int check_resource_leak(struct bpf_verifier_env *env, bool exception_exit, bool check_lock, const char *prefix)
11509 static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
11510 struct bpf_reg_state *regs)
11512 struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3];
11513 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5];
11514 struct bpf_map *fmt_map = fmt_reg->map_ptr;
11515 struct bpf_bprintf_data data = {};
11547 static int check_get_func_ip(struct bpf_verifier_env *env)
11568 static struct bpf_insn_aux_data *cur_aux(const struct bpf_verifier_env *env)
11573 static bool loop_flag_is_zero(struct bpf_verifier_env *env)
11575 struct bpf_reg_state *reg = reg_state(env, BPF_REG_4);
11584 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
11586 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
11617 static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
11618 const struct bpf_func_proto **ptr)
11631 static inline bool in_sleepable_context(struct bpf_verifier_env *env)
11640 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
11645 const struct bpf_func_proto *fn = NULL;
11648 struct bpf_reg_state *regs;
11649 struct bpf_call_arg_meta meta;
11763 struct bpf_func_state *state;
11764 struct bpf_reg_state *reg;
11864 struct bpf_reg_state *reg;
11901 struct bpf_reg_state *reg;
11923 struct bpf_reg_state *reg = ®s[BPF_REG_1];
11924 const struct btf_type *type;
12012 const struct btf_type *t;
12018 const struct btf_type *ret;
12051 struct btf *ret_btf;
12154 struct bpf_verifier_state *branch;
12179 static void __mark_btf_func_reg_size(struct bpf_verifier_env *env, struct bpf_reg_state *regs,
12182 struct bpf_reg_state *reg = ®s[regno];
12194 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
12200 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
12205 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
12210 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
12215 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
12220 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
12225 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta)
12230 static bool is_kfunc_arg_mem_size(const struct btf *btf,
12231 const struct btf_param *arg,
12232 const struct bpf_reg_state *reg)
12234 const struct btf_type *t;
12243 static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
12244 const struct btf_param *arg,
12245 const struct bpf_reg_state *reg)
12247 const struct btf_type *t;
12256 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
12261 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
12266 static bool is_kfunc_arg_map(const struct btf *btf, const struct btf_param *arg)
12271 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
12276 static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
12281 static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
12286 static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg)
12291 static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg)
12296 static bool is_kfunc_arg_irq_flag(const struct btf *btf, const struct btf_param *arg)
12301 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
12302 const struct btf_param *arg,
12334 BTF_ID(struct, bpf_dynptr)
12335 BTF_ID(struct, bpf_list_head)
12336 BTF_ID(struct, bpf_list_node)
12337 BTF_ID(struct, bpf_rb_root)
12338 BTF_ID(struct, bpf_rb_node)
12339 BTF_ID(struct, bpf_wq)
12340 BTF_ID(struct, bpf_res_spin_lock)
12341 BTF_ID(struct, bpf_task_work)
12342 BTF_ID(struct, bpf_prog_aux)
12343 BTF_ID(struct, bpf_timer)
12345 static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
12346 const struct btf_param *arg, int type)
12348 const struct btf_type *t;
12362 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
12367 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
12372 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
12377 static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
12382 static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
12387 static bool is_kfunc_arg_timer(const struct btf *btf, const struct btf_param *arg)
12392 static bool is_kfunc_arg_wq(const struct btf *btf, const struct btf_param *arg)
12397 static bool is_kfunc_arg_task_work(const struct btf *btf, const struct btf_param *arg)
12402 static bool is_kfunc_arg_res_spin_lock(const struct btf *btf, const struct btf_param *arg)
12407 static bool is_rbtree_node_type(const struct btf_type *t)
12412 static bool is_list_node_type(const struct btf_type *t)
12417 static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
12418 const struct btf_param *arg)
12420 const struct btf_type *t;
12429 static bool is_kfunc_arg_prog_aux(const struct btf *btf, const struct btf_param *arg)
12434 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
12435 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
12436 const struct btf *btf,
12437 const struct btf_type *t, int rec)
12439 const struct btf_type *member_type;
12440 const struct btf_member *member;
12447 const struct btf_array *array;
12452 verbose(env, "max struct nesting depth exceeded\n");
12643 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
12653 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
12658 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
12663 static bool is_kfunc_bpf_preempt_disable(struct bpf_kfunc_call_arg_meta *meta)
12668 static bool is_kfunc_bpf_preempt_enable(struct bpf_kfunc_call_arg_meta *meta)
12673 static bool is_kfunc_pkt_changing(struct bpf_kfunc_call_arg_meta *meta)
12679 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
12680 struct bpf_kfunc_call_arg_meta *meta,
12681 const struct btf_type *t, const struct btf_type *ref_t,
12682 const char *ref_tname, const struct btf_param *args,
12686 struct bpf_reg_state *regs = cur_regs(env);
12687 struct bpf_reg_state *reg = ®s[regno];
12771 * pointer to scalar, or struct composed (recursively) of scalars. When
12776 verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
12783 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
12784 struct bpf_reg_state *reg,
12785 const struct btf_type *ref_t,
12787 struct bpf_kfunc_call_arg_meta *meta,
12790 const struct btf_type *reg_ref_t;
12792 const struct btf *reg_btf;
12815 * struct bpf_cpumask {
12821 * to a struct cpumask, so it would be safe to pass a struct
12822 * bpf_cpumask * to a kfunc expecting a struct cpumask *.
12827 * btf_struct_ids_match() to walk the struct at the 0th offset, and
12855 static int process_irq_flag(struct bpf_verifier_env *env, int regno,
12856 struct bpf_kfunc_call_arg_meta *meta)
12858 struct bpf_reg_state *reg = reg_state(env, regno);
12909 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
12911 struct btf_record *rec = reg_btf_record(reg);
12930 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
12932 struct bpf_verifier_state *state = env->cur_state;
12933 struct bpf_func_state *unused;
12934 struct bpf_reg_state *reg;
13006 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
13008 struct bpf_reference_state *s;
13107 static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
13129 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
13154 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
13184 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
13185 struct bpf_reg_state *reg, u32 regno,
13186 struct bpf_kfunc_call_arg_meta *meta,
13188 struct btf_field **head_field)
13191 struct btf_field *field;
13192 struct btf_record *rec;
13234 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
13235 struct bpf_reg_state *reg, u32 regno,
13236 struct bpf_kfunc_call_arg_meta *meta)
13242 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
13243 struct bpf_reg_state *reg, u32 regno,
13244 struct bpf_kfunc_call_arg_meta *meta)
13251 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
13252 struct bpf_reg_state *reg, u32 regno,
13253 struct bpf_kfunc_call_arg_meta *meta,
13256 struct btf_field **node_field)
13259 const struct btf_type *et, *t;
13260 struct btf_field *field;
13293 "in struct %s, but arg is at offset=%d in struct %s\n",
13305 verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
13315 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
13316 struct bpf_reg_state *reg, u32 regno,
13317 struct bpf_kfunc_call_arg_meta *meta)
13324 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
13325 struct bpf_reg_state *reg, u32 regno,
13326 struct bpf_kfunc_call_arg_meta *meta)
13339 static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
13355 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
13359 const struct btf *btf = meta->btf;
13360 const struct btf_param *args;
13361 struct btf_record *rec;
13365 args = (const struct btf_param *)(meta->func_proto + 1);
13377 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[i + 1];
13378 const struct btf_type *t, *ref_t, *resolve_ret;
13731 /* If argument has '__map' suffix expect 'struct bpf_map *' */
13764 struct bpf_reg_state *buff_reg = ®s[regno];
13765 const struct btf_param *buff_arg = &args[i];
13766 struct bpf_reg_state *size_reg = ®s[regno + 1];
13767 const struct btf_param *size_arg = &args[i + 1];
13902 static int fetch_kfunc_arg_meta(struct bpf_verifier_env *env,
13905 struct bpf_kfunc_call_arg_meta *meta)
13907 struct bpf_kfunc_meta kfunc;
13933 static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
13934 struct bpf_reg_state *regs, struct bpf_insn_aux_data *insn_aux,
13935 const struct btf_type *ptr_type, struct btf *desc_btf)
13937 const struct btf_type *ret_t;
13945 struct btf_struct_meta *struct_meta;
13946 struct btf *ret_btf;
13968 verbose(env, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n");
14004 verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n");
14033 struct btf_field *field = meta->arg_list_head.field;
14037 struct btf_field *field = meta->arg_rbtree_root.field;
14062 "kfunc bpf_rdonly_cast type ID argument must be of a struct or void\n");
14108 static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name);
14110 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
14115 struct bpf_reg_state *regs = cur_regs(env);
14117 const struct btf_type *t, *ptr_type;
14118 struct bpf_kfunc_call_arg_meta meta;
14119 struct bpf_insn_aux_data *insn_aux;
14121 const struct btf_param *args;
14122 struct btf *desc_btf;
14142 struct bpf_verifier_state *branch;
14143 struct bpf_reg_state *regs;
14233 struct bpf_func_state *state;
14234 struct bpf_reg_state *reg;
14289 struct bpf_reg_state *reg = ®s[meta.release_regno];
14481 args = (const struct btf_param *)(meta.func_proto + 1);
14505 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
14506 const struct bpf_reg_state *reg,
14548 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
14579 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
14580 const struct bpf_insn *insn)
14587 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
14604 static int sanitize_val_alu(struct bpf_verifier_env *env,
14605 struct bpf_insn *insn)
14607 struct bpf_insn_aux_data *aux = cur_aux(env);
14620 struct bpf_sanitize_info {
14621 struct bpf_insn_aux_data aux;
14625 static int sanitize_speculative_path(struct bpf_verifier_env *env,
14626 const struct bpf_insn *insn,
14629 struct bpf_verifier_state *branch;
14630 struct bpf_reg_state *regs;
14645 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
14646 struct bpf_insn *insn,
14647 const struct bpf_reg_state *ptr_reg,
14648 const struct bpf_reg_state *off_reg,
14649 struct bpf_reg_state *dst_reg,
14650 struct bpf_sanitize_info *info,
14653 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
14654 struct bpf_verifier_state *vstate = env->cur_state;
14660 struct bpf_reg_state tmp;
14741 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
14743 struct bpf_verifier_state *vstate = env->cur_state;
14754 static int sanitize_err(struct bpf_verifier_env *env,
14755 const struct bpf_insn *insn, int reason,
14756 const struct bpf_reg_state *off_reg,
14757 const struct bpf_reg_state *dst_reg)
14803 struct bpf_verifier_env *env,
14805 const struct bpf_reg_state *reg,
14826 static int sanitize_check_bounds(struct bpf_verifier_env *env,
14827 const struct bpf_insn *insn,
14828 const struct bpf_reg_state *dst_reg)
14863 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
14864 struct bpf_insn *insn,
14865 const struct bpf_reg_state *ptr_reg,
14866 const struct bpf_reg_state *off_reg)
14868 struct bpf_verifier_state *vstate = env->cur_state;
14869 struct bpf_func_state *state = vstate->frame[vstate->curframe];
14870 struct bpf_reg_state *regs = state->regs, *dst_reg;
14876 struct bpf_sanitize_info info = {};
15106 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
15107 struct bpf_reg_state *src_reg)
15137 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
15138 struct bpf_reg_state *src_reg)
15168 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
15169 struct bpf_reg_state *src_reg)
15200 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
15201 struct bpf_reg_state *src_reg)
15232 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
15233 struct bpf_reg_state *src_reg)
15260 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
15261 struct bpf_reg_state *src_reg)
15288 static void scalar32_min_max_udiv(struct bpf_reg_state *dst_reg,
15289 struct bpf_reg_state *src_reg)
15304 static void scalar_min_max_udiv(struct bpf_reg_state *dst_reg,
15305 struct bpf_reg_state *src_reg)
15320 static void scalar32_min_max_sdiv(struct bpf_reg_state *dst_reg,
15321 struct bpf_reg_state *src_reg)
15357 static void scalar_min_max_sdiv(struct bpf_reg_state *dst_reg,
15358 struct bpf_reg_state *src_reg)
15394 static void scalar32_min_max_umod(struct bpf_reg_state *dst_reg,
15395 struct bpf_reg_state *src_reg)
15418 static void scalar_min_max_umod(struct bpf_reg_state *dst_reg,
15419 struct bpf_reg_state *src_reg)
15442 static void scalar32_min_max_smod(struct bpf_reg_state *dst_reg,
15443 struct bpf_reg_state *src_reg)
15488 static void scalar_min_max_smod(struct bpf_reg_state *dst_reg,
15489 struct bpf_reg_state *src_reg)
15534 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
15535 struct bpf_reg_state *src_reg)
15539 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
15565 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
15566 struct bpf_reg_state *src_reg)
15597 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
15598 struct bpf_reg_state *src_reg)
15602 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
15628 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
15629 struct bpf_reg_state *src_reg)
15660 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
15661 struct bpf_reg_state *src_reg)
15665 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
15688 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
15689 struct bpf_reg_state *src_reg)
15718 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
15736 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
15737 struct bpf_reg_state *src_reg)
15742 struct tnum subreg = tnum_subreg(dst_reg->var_off);
15754 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
15780 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
15781 struct bpf_reg_state *src_reg)
15795 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
15796 struct bpf_reg_state *src_reg)
15798 struct tnum subreg = tnum_subreg(dst_reg->var_off);
15827 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
15828 struct bpf_reg_state *src_reg)
15861 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
15862 struct bpf_reg_state *src_reg)
15884 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
15885 struct bpf_reg_state *src_reg)
15911 static void scalar_byte_swap(struct bpf_reg_state *dst_reg, struct bpf_insn *insn)
15960 static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
15961 const struct bpf_reg_state *src_reg)
16010 static int maybe_fork_scalars(struct bpf_verifier_env *env, struct bpf_insn *insn,
16011 struct bpf_reg_state *dst_reg)
16013 struct bpf_verifier_state *branch;
16014 struct bpf_reg_state *regs;
16043 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
16044 struct bpf_insn *insn,
16045 struct bpf_reg_state *dst_reg,
16046 struct bpf_reg_state src_reg)
16201 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
16202 struct bpf_insn *insn)
16204 struct bpf_verifier_state *vstate = env->cur_state;
16205 struct bpf_func_state *state = vstate->frame[vstate->curframe];
16206 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
16207 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
16216 struct bpf_insn_aux_data *aux = cur_aux(env);
16361 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
16363 struct bpf_reg_state *regs = cur_regs(env);
16452 struct bpf_reg_state *src_reg = regs + insn->src_reg;
16453 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
16604 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
16605 struct bpf_reg_state *dst_reg,
16609 struct bpf_func_state *state;
16610 struct bpf_reg_state *reg;
16686 static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
16689 struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off;
16690 struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off;
16863 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
16864 struct bpf_reg_state *src_reg,
16867 struct bpf_reg_state *pkt;
16910 static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
16978 static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
16981 struct tnum t;
17154 static int reg_set_min_max(struct bpf_verifier_env *env,
17155 struct bpf_reg_state *true_reg1,
17156 struct bpf_reg_state *true_reg2,
17157 struct bpf_reg_state *false_reg1,
17158 struct bpf_reg_state *false_reg2,
17194 static void mark_ptr_or_null_reg(struct bpf_func_state *state,
17195 struct bpf_reg_state *reg, u32 id,
17244 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
17247 struct bpf_func_state *state = vstate->frame[vstate->curframe];
17248 struct bpf_reg_state *regs = state->regs, *reg;
17264 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
17265 struct bpf_reg_state *dst_reg,
17266 struct bpf_reg_state *src_reg,
17267 struct bpf_verifier_state *this_branch,
17268 struct bpf_verifier_state *other_branch)
17369 static void __collect_linked_regs(struct linked_regs *reg_set, struct bpf_reg_state *reg,
17372 struct linked_reg *e;
17391 static void collect_linked_regs(struct bpf_verifier_env *env,
17392 struct bpf_verifier_state *vstate,
17394 struct linked_regs *linked_regs)
17396 struct bpf_insn_aux_data *aux = env->insn_aux_data;
17397 struct bpf_func_state *func;
17398 struct bpf_reg_state *reg;
17424 static void sync_linked_regs(struct bpf_verifier_env *env, struct bpf_verifier_state *vstate,
17425 struct bpf_reg_state *known_reg, struct linked_regs *linked_regs)
17427 struct bpf_reg_state fake_reg;
17428 struct bpf_reg_state *reg;
17429 struct linked_reg *e;
17484 static int check_cond_jmp_op(struct bpf_verifier_env *env,
17485 struct bpf_insn *insn, int *insn_idx)
17487 struct bpf_verifier_state *this_branch = env->cur_state;
17488 struct bpf_verifier_state *other_branch;
17489 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
17490 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
17491 struct bpf_reg_state *eq_branch_regs;
17492 struct linked_regs linked_regs = {};
17506 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st;
17685 * Since PTR_TO_BTF_ID points to a kernel struct that does
17742 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17744 struct bpf_insn_aux_data *aux = cur_aux(env);
17745 struct bpf_reg_state *regs = cur_regs(env);
17746 struct bpf_reg_state *dst_reg;
17747 struct bpf_map *map;
17796 struct bpf_prog_aux *aux = env->prog->aux;
17866 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
17868 struct bpf_reg_state *regs = cur_regs(env);
17936 static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name)
17939 struct tnum enforce_attach_type_range = tnum_unknown;
17940 const struct bpf_prog *prog = env->prog;
17941 struct bpf_reg_state *reg = reg_state(env, regno);
17942 struct bpf_retval_range range = retval_range(0, 1);
17945 struct bpf_func_state *frame = env->cur_state->frame[0];
17948 const struct btf_type *reg_type, *ret_type = NULL;
18136 static void mark_subprog_changes_pkt_data(struct bpf_verifier_env *env, int off)
18138 struct bpf_subprog_info *subprog;
18144 static void mark_subprog_might_sleep(struct bpf_verifier_env *env, int off)
18146 struct bpf_subprog_info *subprog;
18158 static void merge_callee_effects(struct bpf_verifier_env *env, int t, int w)
18160 struct bpf_subprog_info *caller, *callee;
18208 static void mark_prune_point(struct bpf_verifier_env *env, int idx)
18213 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
18218 static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
18223 static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
18228 static void mark_calls_callback(struct bpf_verifier_env *env, int idx)
18233 bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx)
18248 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
18296 static int visit_func_call_insn(int t, struct bpf_insn *insns,
18297 struct bpf_verifier_env *env,
18328 static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
18344 struct call_summary {
18353 static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call,
18354 struct call_summary *cs)
18356 struct bpf_kfunc_call_arg_meta meta;
18357 const struct bpf_func_proto *fn;
18468 static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env,
18469 struct bpf_subprog_info *subprog,
18472 struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx;
18473 struct bpf_insn *call = &env->prog->insnsi[insn_idx];
18475 struct call_summary cs;
18555 static int mark_fastcall_patterns(struct bpf_verifier_env *env)
18557 struct bpf_subprog_info *subprog = env->subprog_info;
18558 struct bpf_insn *insn;
18583 static struct bpf_iarray *iarray_realloc(struct bpf_iarray *old, size_t n_elem)
18585 size_t new_size = sizeof(struct bpf_iarray) + n_elem * sizeof(old->items[0]);
18586 struct bpf_iarray *new;
18599 static int copy_insn_array(struct bpf_map *map, u32 start, u32 end, u32 *items)
18601 struct bpf_insn_array_value *value;
18641 static int copy_insn_array_uniq(struct bpf_map *map, u32 start, u32 end, u32 *off)
18656 static struct bpf_iarray *jt_from_map(struct bpf_map *map)
18658 struct bpf_iarray *jt;
18687 static struct bpf_iarray *jt_from_subprog(struct bpf_verifier_env *env,
18690 struct bpf_iarray *jt = NULL;
18691 struct bpf_map *map;
18692 struct bpf_iarray *jt_cur;
18734 static struct bpf_iarray *
18735 create_jt(int t, struct bpf_verifier_env *env)
18737 static struct bpf_subprog_info *subprog;
18739 struct bpf_iarray *jt;
18763 static int visit_gotox_insn(int t, struct bpf_verifier_env *env)
18768 struct bpf_iarray *jt;
18805 static int visit_tailcall_insn(struct bpf_verifier_env *env, int t)
18807 static struct bpf_subprog_info *subprog;
18808 struct bpf_iarray *jt;
18829 static int visit_insn(int t, struct bpf_verifier_env *env)
18831 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
18872 const struct bpf_func_proto *fp;
18887 struct bpf_kfunc_call_arg_meta meta;
18953 static int check_cfg(struct bpf_verifier_env *env)
19014 struct bpf_insn *insn = &env->prog->insnsi[i];
19046 static int compute_postorder(struct bpf_verifier_env *env)
19050 struct bpf_iarray *succ;
19092 static int check_abnormal_return(struct bpf_verifier_env *env)
19113 static int check_btf_func_early(struct bpf_verifier_env *env,
19117 u32 krec_size = sizeof(struct bpf_func_info);
19118 const struct btf_type *type, *func_proto;
19120 struct bpf_func_info *krecord;
19121 struct bpf_prog *prog;
19122 const struct btf *btf;
19215 static int check_btf_func(struct bpf_verifier_env *env,
19219 const struct btf_type *type, *func_proto, *ret_type;
19221 struct bpf_func_info *krecord;
19222 struct bpf_func_info_aux *info_aux = NULL;
19223 struct bpf_prog *prog;
19224 const struct btf *btf;
19291 static void adjust_btf_func(struct bpf_verifier_env *env)
19293 struct bpf_prog_aux *aux = env->prog->aux;
19304 #define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col)
19307 static int check_btf_line(struct bpf_verifier_env *env,
19312 struct bpf_subprog_info *sub;
19313 struct bpf_line_info *linfo;
19314 struct bpf_prog *prog;
19315 const struct btf *btf;
19322 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
19334 linfo = kvzalloc_objs(struct bpf_line_info, nr_linfo,
19345 expected_size = sizeof(struct bpf_line_info);
19432 #define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo)
19435 static int check_core_relo(struct bpf_verifier_env *env,
19440 struct bpf_core_relo core_relo = {};
19441 struct bpf_prog *prog = env->prog;
19442 const struct btf *btf = prog->aux->btf;
19443 struct bpf_core_ctx ctx = {
19453 if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
19463 expected_size = sizeof(struct bpf_core_relo);
19504 static int check_btf_info_early(struct bpf_verifier_env *env,
19508 struct btf *btf;
19532 static int check_btf_info(struct bpf_verifier_env *env,
19560 static bool range_within(const struct bpf_reg_state *old,
19561 const struct bpf_reg_state *cur)
19583 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
19585 struct bpf_id_pair *map = idmap->map;
19633 static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
19643 static void clean_func_state(struct bpf_verifier_env *env,
19644 struct bpf_func_state *st,
19668 static void clean_verifier_state(struct bpf_verifier_env *env,
19669 struct bpf_verifier_state *st)
19713 static void idset_cnt_inc(struct bpf_idset *idset, u32 id)
19732 static u32 idset_cnt_get(struct bpf_idset *idset, u32 id)
19748 static void clear_singular_ids(struct bpf_verifier_env *env,
19749 struct bpf_verifier_state *st)
19751 struct bpf_idset *idset = &env->idset_scratch;
19752 struct bpf_func_state *func;
19753 struct bpf_reg_state *reg;
19777 static void clean_live_states(struct bpf_verifier_env *env, int insn,
19778 struct bpf_verifier_state *cur)
19780 struct bpf_verifier_state_list *sl;
19781 struct list_head *pos, *head;
19785 sl = container_of(pos, struct bpf_verifier_state_list, node);
19800 static bool regs_exact(const struct bpf_reg_state *rold,
19801 const struct bpf_reg_state *rcur,
19802 struct bpf_idmap *idmap)
19804 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
19816 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
19817 struct bpf_reg_state *rcur, struct bpf_idmap *idmap,
19857 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
19927 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
19966 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
19974 static struct bpf_reg_state unbound_reg;
19983 static bool is_stack_all_misc(struct bpf_verifier_env *env,
19984 struct bpf_stack_state *stack)
19998 static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
19999 struct bpf_stack_state *stack)
20010 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
20011 struct bpf_func_state *cur, struct bpf_idmap *idmap,
20021 struct bpf_reg_state *old_reg, *cur_reg;
20135 static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur,
20136 struct bpf_idmap *idmap)
20208 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
20209 struct bpf_func_state *cur, u32 insn_idx, enum exact_level exact)
20229 static void reset_idmap_scratch(struct bpf_verifier_env *env)
20231 struct bpf_idmap *idmap = &env->idmap_scratch;
20237 static bool states_equal(struct bpf_verifier_env *env,
20238 struct bpf_verifier_state *old,
20239 struct bpf_verifier_state *cur,
20278 static int propagate_precision(struct bpf_verifier_env *env,
20279 const struct bpf_verifier_state *old,
20280 struct bpf_verifier_state *cur,
20283 struct bpf_reg_state *state_reg;
20284 struct bpf_func_state *state;
20342 static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visit *visit)
20344 struct bpf_scc_backedge *backedge;
20345 struct bpf_verifier_state *st;
20371 static bool states_maybe_looping(struct bpf_verifier_state *old,
20372 struct bpf_verifier_state *cur)
20374 struct bpf_func_state *fold, *fcur;
20384 offsetof(struct bpf_reg_state, frameno)))
20389 static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
20442 * struct bpf_iter_num it;
20452 static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
20454 struct bpf_reg_state *slot, *cur_slot;
20455 struct bpf_func_state *state;
20476 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
20478 struct bpf_verifier_state_list *new_sl;
20479 struct bpf_verifier_state_list *sl;
20480 struct bpf_verifier_state *cur = env->cur_state, *new;
20483 struct list_head *pos, *tmp, *head;
20507 sl = container_of(pos, struct bpf_verifier_state_list, node);
20513 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
20568 struct bpf_func_state *cur_frame;
20569 struct bpf_reg_state *iter_state, *iter_reg;
20727 struct bpf_scc_backedge *backedge;
20794 new_sl = kzalloc_obj(struct bpf_verifier_state_list, GFP_KERNEL_ACCOUNT);
20887 static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
20939 static int process_bpf_exit_full(struct bpf_verifier_env *env,
20983 static int indirect_jump_min_max_index(struct bpf_verifier_env *env,
20985 struct bpf_map *map,
20988 struct bpf_reg_state *reg = reg_state(env, regno);
21020 static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *insn)
21022 struct bpf_verifier_state *other_branch;
21023 struct bpf_reg_state *dst_reg;
21024 struct bpf_map *map;
21076 static int do_check_insn(struct bpf_verifier_env *env, bool *do_print_state)
21079 struct bpf_insn *insn = &env->prog->insnsi[env->insn_idx];
21244 static int do_check(struct bpf_verifier_env *env)
21247 struct bpf_verifier_state *state = env->cur_state;
21248 struct bpf_insn *insns = env->prog->insnsi;
21254 struct bpf_insn *insn;
21255 struct bpf_insn_aux_data *insn_aux;
21419 static int find_btf_percpu_datasec(struct btf *btf)
21421 const struct btf_type *t;
21450 static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf)
21452 struct btf_mod_pair *btf_mod;
21491 static int __check_pseudo_btf_id(struct bpf_verifier_env *env,
21492 struct bpf_insn *insn,
21493 struct bpf_insn_aux_data *aux,
21494 struct btf *btf)
21496 const struct btf_var_secinfo *vsi;
21497 const struct btf_type *datasec;
21498 const struct btf_type *t;
21551 const struct btf_type *ret;
21574 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
21575 struct bpf_insn *insn,
21576 struct bpf_insn_aux_data *aux)
21578 struct btf *btf;
21621 static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
21627 static int check_map_prog_compatibility(struct bpf_verifier_env *env,
21628 struct bpf_map *map,
21629 struct bpf_prog *prog)
21732 static int __add_used_map(struct bpf_verifier_env *env, struct bpf_map *map)
21779 static int add_used_map(struct bpf_verifier_env *env, int fd)
21781 struct bpf_map *map;
21800 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
21802 struct bpf_insn *insn = env->prog->insnsi;
21819 struct bpf_insn_aux_data *aux;
21820 struct bpf_map *map;
21931 * 'struct bpf_map *' into a register instead of user map_fd.
21938 static void release_maps(struct bpf_verifier_env *env)
21945 static void release_btfs(struct bpf_verifier_env *env)
21951 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
21953 struct bpf_insn *insn = env->prog->insnsi;
21970 static void adjust_insn_aux_data(struct bpf_verifier_env *env,
21971 struct bpf_prog *new_prog, u32 off, u32 cnt)
21973 struct bpf_insn_aux_data *data = env->insn_aux_data;
21974 struct bpf_insn *insn = new_prog->insnsi;
21990 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
21991 memset(data + off, 0, sizeof(struct bpf_insn_aux_data) * (cnt - 1));
21999 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
22013 static void release_insn_arrays(struct bpf_verifier_env *env)
22021 static void adjust_insn_arrays(struct bpf_verifier_env *env, u32 off, u32 len)
22032 static void adjust_insn_arrays_after_remove(struct bpf_verifier_env *env, u32 off, u32 len)
22040 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
22042 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
22044 struct bpf_jit_poke_descriptor *desc;
22054 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
22055 const struct bpf_insn *patch, u32 len)
22057 struct bpf_prog *new_prog;
22058 struct bpf_insn_aux_data *new_data = NULL;
22063 sizeof(struct bpf_insn_aux_data)),
22090 static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
22092 struct bpf_insn *insn = prog->insnsi;
22124 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
22144 struct bpf_prog_aux *aux = env->prog->aux;
22180 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
22183 struct bpf_prog *prog = env->prog;
22185 struct bpf_line_info *linfo;
22247 static void clear_insn_aux_data(struct bpf_verifier_env *env, int start, int len)
22249 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
22250 struct bpf_insn *insns = env->prog->insnsi;
22265 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
22267 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
22308 static void sanitize_dead_code(struct bpf_verifier_env *env)
22310 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
22311 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
22312 struct bpf_insn *insn = env->prog->insnsi;
22338 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
22340 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
22341 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
22342 struct bpf_insn *insn = env->prog->insnsi;
22364 static int opt_remove_dead_code(struct bpf_verifier_env *env)
22366 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
22388 static const struct bpf_insn NOP = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
22389 static const struct bpf_insn MAY_GOTO_0 = BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0, 0);
22391 static int opt_remove_nops(struct bpf_verifier_env *env)
22393 struct bpf_insn *insn = env->prog->insnsi;
22416 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
22419 struct bpf_insn *patch;
22421 struct bpf_insn *zext_patch = env->insn_buf;
22422 struct bpf_insn *rnd_hi32_patch = &env->insn_buf[2];
22423 struct bpf_insn_aux_data *aux = env->insn_aux_data;
22425 struct bpf_insn *insns = env->prog->insnsi;
22426 struct bpf_prog *new_prog;
22436 struct bpf_insn insn;
22519 * struct __sk_buff -> struct sk_buff
22520 * struct bpf_sock_ops -> struct sock
22522 static int convert_ctx_accesses(struct bpf_verifier_env *env)
22524 struct bpf_subprog_info *subprogs = env->subprog_info;
22525 const struct bpf_verifier_ops *ops = env->ops;
22528 struct bpf_insn *epilogue_buf = env->epilogue_buf;
22529 struct bpf_insn *insn_buf = env->insn_buf;
22530 struct bpf_insn *insn;
22532 struct bpf_prog *new_prog;
22600 struct bpf_insn *patch = insn_buf;
22673 struct bpf_insn *patch = insn_buf;
22822 static int jit_subprogs(struct bpf_verifier_env *env)
22824 struct bpf_prog *prog = env->prog, **func, *tmp;
22826 struct bpf_map *map_ptr;
22827 struct bpf_insn *insn;
22895 len * sizeof(struct bpf_insn));
22913 struct bpf_jit_poke_descriptor *poke;
23108 static int fixup_call_args(struct bpf_verifier_env *env)
23111 struct bpf_prog *prog = env->prog;
23112 struct bpf_insn *insn = prog->insnsi;
23160 static int specialize_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_desc *desc, int insn_idx)
23162 struct bpf_prog *prog = env->prog;
23209 static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
23212 struct bpf_insn *insn,
23213 struct bpf_insn *insn_buf,
23216 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta;
23217 struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) };
23226 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
23227 struct bpf_insn *insn_buf, int insn_idx, int *cnt)
23229 struct bpf_kfunc_desc *desc;
23259 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
23260 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
23277 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
23278 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
23300 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
23356 struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(regno, (long)env->prog->aux) };
23368 static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len)
23370 struct bpf_subprog_info *info = env->subprog_info;
23372 struct bpf_prog *prog;
23397 static int do_misc_fixups(struct bpf_verifier_env *env)
23399 struct bpf_prog *prog = env->prog;
23402 struct bpf_insn *insn = prog->insnsi;
23403 const struct bpf_func_proto *fn;
23405 const struct bpf_map_ops *ops;
23406 struct bpf_insn_aux_data *aux;
23407 struct bpf_insn *insn_buf = env->insn_buf;
23408 struct bpf_prog *new_prog;
23409 struct bpf_map *map_ptr;
23411 struct bpf_subprog_info *subprogs = env->subprog_info;
23416 struct bpf_insn *patch = insn_buf;
23435 (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {
23457 struct bpf_insn *patch = insn_buf;
23487 struct bpf_insn *patch = insn_buf;
23573 struct bpf_insn *patch = insn_buf;
23624 struct bpf_insn *patch = insn_buf;
23794 struct bpf_jit_poke_descriptor desc = {
23830 struct bpf_array,
23858 struct bpf_insn ld_addrs[2] = {
23956 (void *(*)(struct bpf_map *map, void *key))NULL));
23958 (long (*)(struct bpf_map *map, void *key))NULL));
23960 (long (*)(struct bpf_map *map, void *key, void *value,
23963 (long (*)(struct bpf_map *map, void *value,
23966 (long (*)(struct bpf_map *map, void *value))NULL));
23968 (long (*)(struct bpf_map *map, void *value))NULL));
23970 (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
23972 (long (*)(struct bpf_map *map,
23977 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
24016 struct bpf_insn ld_jiffies_addr[2] = {
24195 * int perf_snapshot_branch_stack(struct perf_branch_entry *entries, u32 cnt);
24197 const u32 br_entry_size = sizeof(struct perf_branch_entry);
24199 /* struct perf_branch_entry is part of UAPI and is
24225 /* return entry_cnt * sizeof(struct perf_branch_entry) */
24358 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
24371 struct bpf_insn *insn_buf = env->insn_buf;
24372 struct bpf_prog *new_prog;
24434 static bool is_bpf_loop_call(struct bpf_insn *insn)
24450 static int optimize_bpf_loop(struct bpf_verifier_env *env)
24452 struct bpf_subprog_info *subprogs = env->subprog_info;
24454 struct bpf_insn *insn = env->prog->insnsi;
24461 struct bpf_loop_inline_state *inline_state =
24465 struct bpf_prog *new_prog;
24498 static int remove_fastcall_spills_fills(struct bpf_verifier_env *env)
24500 struct bpf_subprog_info *subprog = env->subprog_info;
24501 struct bpf_insn_aux_data *aux = env->insn_aux_data;
24502 struct bpf_insn *insn = env->prog->insnsi;
24529 static void free_states(struct bpf_verifier_env *env)
24531 struct bpf_verifier_state_list *sl;
24532 struct list_head *head, *pos, *tmp;
24533 struct bpf_scc_info *info;
24541 sl = container_of(pos, struct bpf_verifier_state_list, node);
24564 sl = container_of(pos, struct bpf_verifier_state_list, node);
24572 static int do_check_common(struct bpf_verifier_env *env, int subprog)
24575 struct bpf_subprog_info *sub = subprog_info(env, subprog);
24576 struct bpf_prog_aux *aux = env->prog->aux;
24577 struct bpf_verifier_state *state;
24578 struct bpf_reg_state *regs;
24584 state = kzalloc_obj(struct bpf_verifier_state, GFP_KERNEL_ACCOUNT);
24591 state->frame[0] = kzalloc_obj(struct bpf_func_state, GFP_KERNEL_ACCOUNT);
24607 struct bpf_subprog_arg_info *arg;
24608 struct bpf_reg_state *reg;
24722 static int do_check_subprogs(struct bpf_verifier_env *env)
24724 struct bpf_prog_aux *aux = env->prog->aux;
24725 struct bpf_func_info_aux *sub_aux;
24772 static int do_check_main(struct bpf_verifier_env *env)
24784 static void print_verification_stats(struct bpf_verifier_env *env)
24808 int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
24809 const struct bpf_ctx_arg_aux *info, u32 cnt)
24817 static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
24819 const struct btf_type *t, *func_proto;
24820 const struct bpf_struct_ops_desc *st_ops_desc;
24821 const struct bpf_struct_ops *st_ops;
24822 const struct btf_member *member;
24823 struct bpf_prog *prog = env->prog;
24826 struct btf *btf;
24831 verbose(env, "struct ops programs must have a GPL compatible license\n");
24852 verbose(env, "attach_btf_id %u is not a supported struct\n",
24861 verbose(env, "attach to invalid member idx %u of struct %s\n",
24871 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
24879 verbose(env, "attach to unsupported member %s of struct %s\n",
24888 verbose(env, "attach to unsupported member %s of struct %s\n",
24958 int bpf_check_attach_target(struct bpf_verifier_log *log,
24959 const struct bpf_prog *prog,
24960 const struct bpf_prog *tgt_prog,
24962 struct bpf_attach_target_info *tgt_info)
24968 struct bpf_raw_event_map *btp;
24970 const struct btf_type *t;
24973 struct btf *btf;
24975 struct module *mod = NULL;
24998 struct bpf_prog_aux *aux = tgt_prog->aux;
25342 static bool can_be_sleepable(struct bpf_prog *prog)
25361 static int check_attach_btf_id(struct bpf_verifier_env *env)
25363 struct bpf_prog *prog = env->prog;
25364 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
25365 struct bpf_attach_target_info tgt_info = {};
25367 struct bpf_trampoline *tr;
25452 struct btf *bpf_get_btf_vmlinux(void)
25468 static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd)
25470 struct bpf_map *map;
25471 struct btf *btf;
25493 static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr, bpfptr_t uattr)
25529 struct insn_live_regs {
25540 static void compute_insn_live_regs(struct bpf_verifier_env *env,
25541 struct bpf_insn *insn,
25542 struct insn_live_regs *info)
25544 struct call_summary cs;
25683 static int compute_live_registers(struct bpf_verifier_env *env)
25685 struct bpf_insn_aux_data *insn_aux = env->insn_aux_data;
25686 struct bpf_insn *insns = env->prog->insnsi;
25687 struct insn_live_regs *state;
25725 struct insn_live_regs *live = &state[insn_idx];
25726 struct bpf_iarray *succ;
25777 static int compute_scc(struct bpf_verifier_env *env)
25781 struct bpf_insn_aux_data *aux = env->insn_aux_data;
25789 struct bpf_iarray *succ;
25954 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
25957 struct bpf_verifier_env *env;
25968 /* 'struct bpf_verifier_env' can be global, but since it's not small,
25971 env = kvzalloc_obj(struct bpf_verifier_env, GFP_KERNEL_ACCOUNT);
25979 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
26035 env->explored_states = kvzalloc_objs(struct list_head,