Lines Matching defs:dst_reg
3694 return insn->dst_reg;
3700 return insn->dst_reg;
3707 int dst_reg = insn_def_regno(insn);
3709 if (dst_reg == -1)
3712 return !is_reg64(insn, dst_reg, NULL, DST_OP);
4195 u32 dreg = insn->dst_reg;
4277 /* stx & st shouldn't be using _scalar_ dst_reg
4959 * dst_reg and then will be used by sync_linked_regs() to
7723 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
7729 /* Check if (src_reg + off) is readable. The state of dst_reg will be
7733 BPF_SIZE(insn->code), BPF_READ, insn->dst_reg,
7737 err = err ?: reg_bounds_sanity_check(env, ®s[insn->dst_reg], ctx);
7755 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7759 dst_reg_type = regs[insn->dst_reg].type;
7761 /* Check if (dst_reg + off) is writeable. */
7762 err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off,
7787 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7810 if (!atomic_ptr_type_ok(env, insn->dst_reg, insn)) {
7812 insn->dst_reg,
7813 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
7837 err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off,
7840 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7846 if (is_arena_reg(env, insn->dst_reg)) {
7852 err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off,
7887 if (!atomic_ptr_type_ok(env, insn->dst_reg, insn)) {
7889 insn->dst_reg,
7890 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
14295 mark_reg_unknown(env, regs, insn->dst_reg);
14297 mark_reg_unknown(env, regs, insn->dst_reg);
14308 struct bpf_reg_state *dst_reg,
14316 bool ptr_is_dst_reg = ptr_reg == dst_reg;
14370 * pushed the truncated dst_reg into the speculative verification
14390 tmp = *dst_reg;
14391 copy_register_state(dst_reg, ptr_reg);
14396 *dst_reg = tmp;
14416 const struct bpf_reg_state *dst_reg)
14420 u32 dst = insn->dst_reg, src = insn->src_reg;
14425 off_reg == dst_reg ? dst : src, err);
14429 off_reg == dst_reg ? src : dst, err);
14487 const struct bpf_reg_state *dst_reg)
14489 u32 dst = insn->dst_reg;
14497 switch (dst_reg->type) {
14499 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
14500 dst_reg->off + dst_reg->var_off.value))
14504 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
14529 struct bpf_reg_state *regs = state->regs, *dst_reg;
14537 u32 dst = insn->dst_reg;
14540 dst_reg = ®s[dst];
14547 __mark_reg_unknown(env, dst_reg);
14554 __mark_reg_unknown(env, dst_reg);
14606 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
14609 dst_reg->type = ptr_reg->type;
14610 dst_reg->id = ptr_reg->id;
14617 __mark_reg32_unbounded(dst_reg);
14620 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
14623 return sanitize_err(env, insn, ret, off_reg, dst_reg);
14634 dst_reg->smin_value = smin_ptr;
14635 dst_reg->smax_value = smax_ptr;
14636 dst_reg->umin_value = umin_ptr;
14637 dst_reg->umax_value = umax_ptr;
14638 dst_reg->var_off = ptr_reg->var_off;
14639 dst_reg->off = ptr_reg->off + smin_val;
14640 dst_reg->raw = ptr_reg->raw;
14645 * dst_reg gets the pointer type and since some positive
14652 if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) ||
14653 check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) {
14654 dst_reg->smin_value = S64_MIN;
14655 dst_reg->smax_value = S64_MAX;
14657 if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) ||
14658 check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) {
14659 dst_reg->umin_value = 0;
14660 dst_reg->umax_value = U64_MAX;
14662 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
14663 dst_reg->off = ptr_reg->off;
14664 dst_reg->raw = ptr_reg->raw;
14666 dst_reg->id = ++env->id_gen;
14668 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
14672 if (dst_reg == off_reg) {
14690 dst_reg->smin_value = smin_ptr;
14691 dst_reg->smax_value = smax_ptr;
14692 dst_reg->umin_value = umin_ptr;
14693 dst_reg->umax_value = umax_ptr;
14694 dst_reg->var_off = ptr_reg->var_off;
14695 dst_reg->id = ptr_reg->id;
14696 dst_reg->off = ptr_reg->off - smin_val;
14697 dst_reg->raw = ptr_reg->raw;
14703 if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) ||
14704 check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) {
14706 dst_reg->smin_value = S64_MIN;
14707 dst_reg->smax_value = S64_MAX;
14711 dst_reg->umin_value = 0;
14712 dst_reg->umax_value = U64_MAX;
14715 dst_reg->umin_value = umin_ptr - umax_val;
14716 dst_reg->umax_value = umax_ptr - umin_val;
14718 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
14719 dst_reg->off = ptr_reg->off;
14720 dst_reg->raw = ptr_reg->raw;
14722 dst_reg->id = ++env->id_gen;
14725 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
14742 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
14744 reg_bounds_sync(dst_reg);
14745 bounds_ret = sanitize_check_bounds(env, insn, dst_reg);
14749 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
14759 return sanitize_err(env, insn, ret, off_reg, dst_reg);
14765 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
14768 s32 *dst_smin = &dst_reg->s32_min_value;
14769 s32 *dst_smax = &dst_reg->s32_max_value;
14770 u32 *dst_umin = &dst_reg->u32_min_value;
14771 u32 *dst_umax = &dst_reg->u32_max_value;
14796 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
14799 s64 *dst_smin = &dst_reg->smin_value;
14800 s64 *dst_smax = &dst_reg->smax_value;
14801 u64 *dst_umin = &dst_reg->umin_value;
14802 u64 *dst_umax = &dst_reg->umax_value;
14827 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
14830 s32 *dst_smin = &dst_reg->s32_min_value;
14831 s32 *dst_smax = &dst_reg->s32_max_value;
14832 u32 *dst_umin = &dst_reg->u32_min_value;
14833 u32 *dst_umax = &dst_reg->u32_max_value;
14859 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
14862 s64 *dst_smin = &dst_reg->smin_value;
14863 s64 *dst_smax = &dst_reg->smax_value;
14864 u64 *dst_umin = &dst_reg->umin_value;
14865 u64 *dst_umax = &dst_reg->umax_value;
14891 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
14894 s32 *dst_smin = &dst_reg->s32_min_value;
14895 s32 *dst_smax = &dst_reg->s32_max_value;
14896 u32 *dst_umin = &dst_reg->u32_min_value;
14897 u32 *dst_umax = &dst_reg->u32_max_value;
14919 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
14922 s64 *dst_smin = &dst_reg->smin_value;
14923 s64 *dst_smax = &dst_reg->smax_value;
14924 u64 *dst_umin = &dst_reg->umin_value;
14925 u64 *dst_umax = &dst_reg->umax_value;
14947 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
14951 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
14952 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
14956 __mark_reg32_known(dst_reg, var32_off.value);
14963 dst_reg->u32_min_value = var32_off.value;
14964 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
14969 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) {
14970 dst_reg->s32_min_value = dst_reg->u32_min_value;
14971 dst_reg->s32_max_value = dst_reg->u32_max_value;
14973 dst_reg->s32_min_value = S32_MIN;
14974 dst_reg->s32_max_value = S32_MAX;
14978 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
14982 bool dst_known = tnum_is_const(dst_reg->var_off);
14986 __mark_reg_known(dst_reg, dst_reg->var_off.value);
14993 dst_reg->umin_value = dst_reg->var_off.value;
14994 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
14999 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) {
15000 dst_reg->smin_value = dst_reg->umin_value;
15001 dst_reg->smax_value = dst_reg->umax_value;
15003 dst_reg->smin_value = S64_MIN;
15004 dst_reg->smax_value = S64_MAX;
15007 __update_reg_bounds(dst_reg);
15010 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
15014 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
15015 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
15019 __mark_reg32_known(dst_reg, var32_off.value);
15026 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
15027 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
15032 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) {
15033 dst_reg->s32_min_value = dst_reg->u32_min_value;
15034 dst_reg->s32_max_value = dst_reg->u32_max_value;
15036 dst_reg->s32_min_value = S32_MIN;
15037 dst_reg->s32_max_value = S32_MAX;
15041 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
15045 bool dst_known = tnum_is_const(dst_reg->var_off);
15049 __mark_reg_known(dst_reg, dst_reg->var_off.value);
15056 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
15057 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
15062 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) {
15063 dst_reg->smin_value = dst_reg->umin_value;
15064 dst_reg->smax_value = dst_reg->umax_value;
15066 dst_reg->smin_value = S64_MIN;
15067 dst_reg->smax_value = S64_MAX;
15070 __update_reg_bounds(dst_reg);
15073 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
15077 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
15078 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
15081 __mark_reg32_known(dst_reg, var32_off.value);
15086 dst_reg->u32_min_value = var32_off.value;
15087 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
15092 if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) {
15093 dst_reg->s32_min_value = dst_reg->u32_min_value;
15094 dst_reg->s32_max_value = dst_reg->u32_max_value;
15096 dst_reg->s32_min_value = S32_MIN;
15097 dst_reg->s32_max_value = S32_MAX;
15101 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
15105 bool dst_known = tnum_is_const(dst_reg->var_off);
15108 /* dst_reg->var_off.value has been updated earlier */
15109 __mark_reg_known(dst_reg, dst_reg->var_off.value);
15114 dst_reg->umin_value = dst_reg->var_off.value;
15115 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
15120 if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) {
15121 dst_reg->smin_value = dst_reg->umin_value;
15122 dst_reg->smax_value = dst_reg->umax_value;
15124 dst_reg->smin_value = S64_MIN;
15125 dst_reg->smax_value = S64_MAX;
15128 __update_reg_bounds(dst_reg);
15131 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
15137 dst_reg->s32_min_value = S32_MIN;
15138 dst_reg->s32_max_value = S32_MAX;
15140 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
15141 dst_reg->u32_min_value = 0;
15142 dst_reg->u32_max_value = U32_MAX;
15144 dst_reg->u32_min_value <<= umin_val;
15145 dst_reg->u32_max_value <<= umax_val;
15149 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
15155 struct tnum subreg = tnum_subreg(dst_reg->var_off);
15157 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
15158 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
15163 __mark_reg64_unbounded(dst_reg);
15164 __update_reg32_bounds(dst_reg);
15167 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
15177 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
15178 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
15180 dst_reg->smax_value = S64_MAX;
15182 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
15183 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
15185 dst_reg->smin_value = S64_MIN;
15188 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
15189 dst_reg->umin_value = 0;
15190 dst_reg->umax_value = U64_MAX;
15192 dst_reg->umin_value <<= umin_val;
15193 dst_reg->umax_value <<= umax_val;
15197 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
15204 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
15205 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
15207 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
15209 __update_reg_bounds(dst_reg);
15212 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
15215 struct tnum subreg = tnum_subreg(dst_reg->var_off);
15219 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
15227 * If the value in dst_reg is known nonnegative, then again the
15233 dst_reg->s32_min_value = S32_MIN;
15234 dst_reg->s32_max_value = S32_MAX;
15236 dst_reg->var_off = tnum_rshift(subreg, umin_val);
15237 dst_reg->u32_min_value >>= umax_val;
15238 dst_reg->u32_max_value >>= umin_val;
15240 __mark_reg64_unbounded(dst_reg);
15241 __update_reg32_bounds(dst_reg);
15244 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
15250 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
15258 * If the value in dst_reg is known nonnegative, then again the
15264 dst_reg->smin_value = S64_MIN;
15265 dst_reg->smax_value = S64_MAX;
15266 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
15267 dst_reg->umin_value >>= umax_val;
15268 dst_reg->umax_value >>= umin_val;
15274 __mark_reg32_unbounded(dst_reg);
15275 __update_reg_bounds(dst_reg);
15278 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
15286 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
15287 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
15289 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
15291 /* blow away the dst_reg umin_value/umax_value and rely on
15292 * dst_reg var_off to refine the result.
15294 dst_reg->u32_min_value = 0;
15295 dst_reg->u32_max_value = U32_MAX;
15297 __mark_reg64_unbounded(dst_reg);
15298 __update_reg32_bounds(dst_reg);
15301 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
15309 dst_reg->smin_value >>= umin_val;
15310 dst_reg->smax_value >>= umin_val;
15312 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
15314 /* blow away the dst_reg umin_value/umax_value and rely on
15315 * dst_reg var_off to refine the result.
15317 dst_reg->umin_value = 0;
15318 dst_reg->umax_value = U64_MAX;
15324 __mark_reg32_unbounded(dst_reg);
15325 __update_reg_bounds(dst_reg);
15375 struct bpf_reg_state *dst_reg,
15383 __mark_reg_unknown(env, dst_reg);
15409 scalar32_min_max_add(dst_reg, &src_reg);
15410 scalar_min_max_add(dst_reg, &src_reg);
15411 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
15414 scalar32_min_max_sub(dst_reg, &src_reg);
15415 scalar_min_max_sub(dst_reg, &src_reg);
15416 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
15419 env->fake_reg[0] = *dst_reg;
15420 __mark_reg_known(dst_reg, 0);
15421 scalar32_min_max_sub(dst_reg, &env->fake_reg[0]);
15422 scalar_min_max_sub(dst_reg, &env->fake_reg[0]);
15423 dst_reg->var_off = tnum_neg(env->fake_reg[0].var_off);
15426 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
15427 scalar32_min_max_mul(dst_reg, &src_reg);
15428 scalar_min_max_mul(dst_reg, &src_reg);
15431 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
15432 scalar32_min_max_and(dst_reg, &src_reg);
15433 scalar_min_max_and(dst_reg, &src_reg);
15436 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
15437 scalar32_min_max_or(dst_reg, &src_reg);
15438 scalar_min_max_or(dst_reg, &src_reg);
15441 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
15442 scalar32_min_max_xor(dst_reg, &src_reg);
15443 scalar_min_max_xor(dst_reg, &src_reg);
15447 scalar32_min_max_lsh(dst_reg, &src_reg);
15449 scalar_min_max_lsh(dst_reg, &src_reg);
15453 scalar32_min_max_rsh(dst_reg, &src_reg);
15455 scalar_min_max_rsh(dst_reg, &src_reg);
15459 scalar32_min_max_arsh(dst_reg, &src_reg);
15461 scalar_min_max_arsh(dst_reg, &src_reg);
15469 zext_32_to_64(dst_reg);
15470 reg_bounds_sync(dst_reg);
15482 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
15488 dst_reg = ®s[insn->dst_reg];
15491 if (dst_reg->type == PTR_TO_ARENA) {
15505 if (dst_reg->type != SCALAR_VALUE)
15506 ptr_reg = dst_reg;
15511 if (dst_reg->type != SCALAR_VALUE) {
15517 mark_reg_unknown(env, regs, insn->dst_reg);
15521 insn->dst_reg,
15529 err = mark_chain_precision(env, insn->dst_reg);
15533 src_reg, dst_reg);
15541 dst_reg, src_reg);
15542 } else if (dst_reg->precise) {
15543 /* if dst_reg is precise, src_reg should be precise as well */
15571 err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
15585 dst_reg->id && is_reg_const(src_reg, false)) {
15588 if ((dst_reg->id & BPF_ADD_CONST) ||
15595 dst_reg->off = 0;
15596 dst_reg->id = 0;
15598 dst_reg->id |= BPF_ADD_CONST;
15599 dst_reg->off = val;
15603 * Make sure ID is cleared otherwise dst_reg min/max could be
15606 dst_reg->id = 0;
15637 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
15641 if (is_pointer_value(env, insn->dst_reg)) {
15643 insn->dst_reg);
15649 regs[insn->dst_reg].type == SCALAR_VALUE) {
15650 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
15652 ®s[insn->dst_reg],
15653 regs[insn->dst_reg]);
15655 err = check_reg_arg(env, insn->dst_reg, DST_OP);
15698 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
15704 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
15709 mark_reg_unknown(env, regs, insn->dst_reg);
15711 dst_reg->type = PTR_TO_ARENA;
15713 dst_reg->subreg_def = env->insn_idx + 1;
15720 copy_register_state(dst_reg, src_reg);
15721 dst_reg->subreg_def = DEF_NOT_SUBREG;
15735 copy_register_state(dst_reg, src_reg);
15737 dst_reg->id = 0;
15738 coerce_reg_to_size_sx(dst_reg, insn->off >> 3);
15739 dst_reg->subreg_def = DEF_NOT_SUBREG;
15741 mark_reg_unknown(env, regs, insn->dst_reg);
15757 copy_register_state(dst_reg, src_reg);
15759 * range otherwise dst_reg min/max could be incorrectly
15763 dst_reg->id = 0;
15764 dst_reg->subreg_def = env->insn_idx + 1;
15771 copy_register_state(dst_reg, src_reg);
15773 dst_reg->id = 0;
15774 dst_reg->subreg_def = env->insn_idx + 1;
15775 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3);
15779 insn->dst_reg);
15781 zext_32_to_64(dst_reg);
15782 reg_bounds_sync(dst_reg);
15789 mark_reg_unknown(env, regs, insn->dst_reg);
15790 regs[insn->dst_reg].type = SCALAR_VALUE;
15792 __mark_reg_known(regs + insn->dst_reg,
15795 __mark_reg_known(regs + insn->dst_reg,
15825 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
15846 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
15852 return reg_bounds_sanity_check(env, ®s[insn->dst_reg], "alu");
15856 struct bpf_reg_state *dst_reg,
15864 if (dst_reg->off < 0 ||
15865 (dst_reg->off == 0 && range_right_open))
15869 if (dst_reg->umax_value > MAX_PACKET_OFF ||
15870 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
15876 new_range = dst_reg->off;
15895 * r2 == dst_reg, pkt_end == src_reg
15912 * pkt_end == dst_reg, r2 == src_reg
15925 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
15928 if (reg->type == type && reg->id == dst_reg->id)
16090 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
16097 pkt = dst_reg;
16098 } else if (dst_reg->type == PTR_TO_PACKET_END) {
16375 /* Adjusts the register min/max values in the case that the dst_reg and
16485 struct bpf_reg_state *dst_reg,
16499 if ((dst_reg->type == PTR_TO_PACKET &&
16501 (dst_reg->type == PTR_TO_PACKET_META &&
16504 find_good_pkt_pointers(this_branch, dst_reg,
16505 dst_reg->type, false);
16506 mark_pkt_end(other_branch, insn->dst_reg, true);
16507 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
16509 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
16520 if ((dst_reg->type == PTR_TO_PACKET &&
16522 (dst_reg->type == PTR_TO_PACKET_META &&
16525 find_good_pkt_pointers(other_branch, dst_reg,
16526 dst_reg->type, true);
16527 mark_pkt_end(this_branch, insn->dst_reg, false);
16528 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
16530 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
16541 if ((dst_reg->type == PTR_TO_PACKET &&
16543 (dst_reg->type == PTR_TO_PACKET_META &&
16546 find_good_pkt_pointers(this_branch, dst_reg,
16547 dst_reg->type, true);
16548 mark_pkt_end(other_branch, insn->dst_reg, false);
16549 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
16551 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
16562 if ((dst_reg->type == PTR_TO_PACKET &&
16564 (dst_reg->type == PTR_TO_PACKET_META &&
16567 find_good_pkt_pointers(other_branch, dst_reg,
16568 dst_reg->type, false);
16569 mark_pkt_end(this_branch, insn->dst_reg, true);
16570 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
16572 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
16688 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
16709 insn->dst_reg || insn->imm) {
16728 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
16732 dst_reg = ®s[insn->dst_reg];
16745 if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) &&
16754 if (dst_reg->type == PTR_TO_STACK)
16766 if (dst_reg->type == PTR_TO_STACK)
16777 pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32);
16779 /* If we get here with a dst_reg pointer type it is because
16782 if (!__is_pointer_value(false, dst_reg))
16783 err = mark_chain_precision(env, insn->dst_reg);
16826 if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
16827 collect_linked_regs(this_branch, dst_reg->id, &linked_regs);
16842 &other_branch_regs[insn->dst_reg],
16844 dst_reg, src_reg, opcode, is_jmp32);
16853 &other_branch_regs[insn->dst_reg],
16855 dst_reg, &env->fake_reg[1],
16867 if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
16868 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
16869 sync_linked_regs(this_branch, dst_reg, &linked_regs);
16870 sync_linked_regs(other_branch, &other_branch_regs[insn->dst_reg], &linked_regs);
16886 __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
16887 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
16889 base_type(dst_reg->type) != PTR_TO_BTF_ID) {
16906 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
16916 type_may_be_null(dst_reg->type)) {
16920 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
16922 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
16924 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
16926 is_pointer_value(env, insn->dst_reg)) {
16928 insn->dst_reg);
16941 struct bpf_reg_state *dst_reg;
16954 err = check_reg_arg(env, insn->dst_reg, DST_OP);
16958 dst_reg = ®s[insn->dst_reg];
16962 dst_reg->type = SCALAR_VALUE;
16963 __mark_reg_known(®s[insn->dst_reg], imm);
16968 * we either succeed and assign a corresponding dst_reg->type after
16971 mark_reg_known_zero(env, regs, insn->dst_reg);
16974 dst_reg->type = aux->btf_var.reg_type;
16975 switch (base_type(dst_reg->type)) {
16977 dst_reg->mem_size = aux->btf_var.mem_size;
16980 dst_reg->btf = aux->btf_var.btf;
16981 dst_reg->btf_id = aux->btf_var.btf_id;
17004 dst_reg->type = PTR_TO_FUNC;
17005 dst_reg->subprogno = subprogno;
17010 dst_reg->map_ptr = map;
17015 __mark_reg_unknown(env, dst_reg);
17018 dst_reg->type = PTR_TO_MAP_VALUE;
17019 dst_reg->off = aux->map_off;
17024 dst_reg->type = CONST_PTR_TO_MAP;
17077 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
17699 stx->dst_reg != BPF_REG_10 ||
17703 if (stx->src_reg != ldx->dst_reg)
17757 insn->dst_reg != BPF_REG_10)
19712 * dst_reg = *(u32 *)(src_reg + off)
19718 * dst_reg = *(u32*) (src_reg + off)
19848 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
19852 dst_reg_type = cur_regs(env)[insn->dst_reg].type;
19854 /* check that memory (dst_reg + off) is writeable */
19855 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
19875 insn->dst_reg != BPF_REG_0 || class == BPF_JMP32) {
19906 insn->dst_reg != BPF_REG_0 ||
19922 insn->dst_reg != BPF_REG_0 ||
20097 * `*(size*)(dst_reg+off)=src_reg|imm32` which must
20545 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
21142 rnd_hi32_patch[3].dst_reg = load_reg;
21170 zext_patch[1].dst_reg = load_reg;
21459 insn->dst_reg,
21461 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
21466 insn->dst_reg,
21468 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
21474 insn->dst_reg, insn->dst_reg,
22069 BPF_NEG | BPF_K, insn->dst_reg,
22072 *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0);
22113 BPF_MOV | BPF_K, insn->dst_reg,
22117 BPF_NEG | BPF_K, insn->dst_reg,
22135 *patch++ = BPF_MOV32_IMM(insn->dst_reg, 0);
22141 *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg);
22149 *patch++ = BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg);
22162 *patch++ = BPF_MOV32_REG(insn->dst_reg, insn->dst_reg);
22194 *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0);
22246 off_reg = issrc ? insn->src_reg : insn->dst_reg;
22260 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
24108 u16 dst = BIT(insn->dst_reg);