xref: /linux/kernel/bpf/backtrack.c (revision f5ad4101009e7f5f5984ffea6923d4fcd470932a)
1*ed0b9710SAlexei Starovoitov // SPDX-License-Identifier: GPL-2.0-only
2*ed0b9710SAlexei Starovoitov /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3*ed0b9710SAlexei Starovoitov #include <linux/bpf.h>
4*ed0b9710SAlexei Starovoitov #include <linux/bpf_verifier.h>
5*ed0b9710SAlexei Starovoitov #include <linux/filter.h>
6*ed0b9710SAlexei Starovoitov #include <linux/bitmap.h>
7*ed0b9710SAlexei Starovoitov 
8*ed0b9710SAlexei Starovoitov #define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
9*ed0b9710SAlexei Starovoitov 
10*ed0b9710SAlexei Starovoitov /* for any branch, call, exit record the history of jmps in the given state */
11*ed0b9710SAlexei Starovoitov int bpf_push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
12*ed0b9710SAlexei Starovoitov 			 int insn_flags, u64 linked_regs)
13*ed0b9710SAlexei Starovoitov {
14*ed0b9710SAlexei Starovoitov 	u32 cnt = cur->jmp_history_cnt;
15*ed0b9710SAlexei Starovoitov 	struct bpf_jmp_history_entry *p;
16*ed0b9710SAlexei Starovoitov 	size_t alloc_size;
17*ed0b9710SAlexei Starovoitov 
18*ed0b9710SAlexei Starovoitov 	/* combine instruction flags if we already recorded this instruction */
19*ed0b9710SAlexei Starovoitov 	if (env->cur_hist_ent) {
20*ed0b9710SAlexei Starovoitov 		/* atomic instructions push insn_flags twice, for READ and
21*ed0b9710SAlexei Starovoitov 		 * WRITE sides, but they should agree on stack slot
22*ed0b9710SAlexei Starovoitov 		 */
23*ed0b9710SAlexei Starovoitov 		verifier_bug_if((env->cur_hist_ent->flags & insn_flags) &&
24*ed0b9710SAlexei Starovoitov 				(env->cur_hist_ent->flags & insn_flags) != insn_flags,
25*ed0b9710SAlexei Starovoitov 				env, "insn history: insn_idx %d cur flags %x new flags %x",
26*ed0b9710SAlexei Starovoitov 				env->insn_idx, env->cur_hist_ent->flags, insn_flags);
27*ed0b9710SAlexei Starovoitov 		env->cur_hist_ent->flags |= insn_flags;
28*ed0b9710SAlexei Starovoitov 		verifier_bug_if(env->cur_hist_ent->linked_regs != 0, env,
29*ed0b9710SAlexei Starovoitov 				"insn history: insn_idx %d linked_regs: %#llx",
30*ed0b9710SAlexei Starovoitov 				env->insn_idx, env->cur_hist_ent->linked_regs);
31*ed0b9710SAlexei Starovoitov 		env->cur_hist_ent->linked_regs = linked_regs;
32*ed0b9710SAlexei Starovoitov 		return 0;
33*ed0b9710SAlexei Starovoitov 	}
34*ed0b9710SAlexei Starovoitov 
35*ed0b9710SAlexei Starovoitov 	cnt++;
36*ed0b9710SAlexei Starovoitov 	alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
37*ed0b9710SAlexei Starovoitov 	p = krealloc(cur->jmp_history, alloc_size, GFP_KERNEL_ACCOUNT);
38*ed0b9710SAlexei Starovoitov 	if (!p)
39*ed0b9710SAlexei Starovoitov 		return -ENOMEM;
40*ed0b9710SAlexei Starovoitov 	cur->jmp_history = p;
41*ed0b9710SAlexei Starovoitov 
42*ed0b9710SAlexei Starovoitov 	p = &cur->jmp_history[cnt - 1];
43*ed0b9710SAlexei Starovoitov 	p->idx = env->insn_idx;
44*ed0b9710SAlexei Starovoitov 	p->prev_idx = env->prev_insn_idx;
45*ed0b9710SAlexei Starovoitov 	p->flags = insn_flags;
46*ed0b9710SAlexei Starovoitov 	p->linked_regs = linked_regs;
47*ed0b9710SAlexei Starovoitov 	cur->jmp_history_cnt = cnt;
48*ed0b9710SAlexei Starovoitov 	env->cur_hist_ent = p;
49*ed0b9710SAlexei Starovoitov 
50*ed0b9710SAlexei Starovoitov 	return 0;
51*ed0b9710SAlexei Starovoitov }
52*ed0b9710SAlexei Starovoitov 
53*ed0b9710SAlexei Starovoitov static bool is_atomic_load_insn(const struct bpf_insn *insn)
54*ed0b9710SAlexei Starovoitov {
55*ed0b9710SAlexei Starovoitov 	return BPF_CLASS(insn->code) == BPF_STX &&
56*ed0b9710SAlexei Starovoitov 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
57*ed0b9710SAlexei Starovoitov 	       insn->imm == BPF_LOAD_ACQ;
58*ed0b9710SAlexei Starovoitov }
59*ed0b9710SAlexei Starovoitov 
60*ed0b9710SAlexei Starovoitov static bool is_atomic_fetch_insn(const struct bpf_insn *insn)
61*ed0b9710SAlexei Starovoitov {
62*ed0b9710SAlexei Starovoitov 	return BPF_CLASS(insn->code) == BPF_STX &&
63*ed0b9710SAlexei Starovoitov 	       BPF_MODE(insn->code) == BPF_ATOMIC &&
64*ed0b9710SAlexei Starovoitov 	       (insn->imm & BPF_FETCH);
65*ed0b9710SAlexei Starovoitov }
66*ed0b9710SAlexei Starovoitov 
67*ed0b9710SAlexei Starovoitov static int insn_stack_access_spi(int insn_flags)
68*ed0b9710SAlexei Starovoitov {
69*ed0b9710SAlexei Starovoitov 	return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK;
70*ed0b9710SAlexei Starovoitov }
71*ed0b9710SAlexei Starovoitov 
72*ed0b9710SAlexei Starovoitov static int insn_stack_access_frameno(int insn_flags)
73*ed0b9710SAlexei Starovoitov {
74*ed0b9710SAlexei Starovoitov 	return insn_flags & INSN_F_FRAMENO_MASK;
75*ed0b9710SAlexei Starovoitov }
76*ed0b9710SAlexei Starovoitov 
77*ed0b9710SAlexei Starovoitov /* Backtrack one insn at a time. If idx is not at the top of recorded
78*ed0b9710SAlexei Starovoitov  * history then previous instruction came from straight line execution.
79*ed0b9710SAlexei Starovoitov  * Return -ENOENT if we exhausted all instructions within given state.
80*ed0b9710SAlexei Starovoitov  *
81*ed0b9710SAlexei Starovoitov  * It's legal to have a bit of a looping with the same starting and ending
82*ed0b9710SAlexei Starovoitov  * insn index within the same state, e.g.: 3->4->5->3, so just because current
83*ed0b9710SAlexei Starovoitov  * instruction index is the same as state's first_idx doesn't mean we are
84*ed0b9710SAlexei Starovoitov  * done. If there is still some jump history left, we should keep going. We
85*ed0b9710SAlexei Starovoitov  * need to take into account that we might have a jump history between given
86*ed0b9710SAlexei Starovoitov  * state's parent and itself, due to checkpointing. In this case, we'll have
87*ed0b9710SAlexei Starovoitov  * history entry recording a jump from last instruction of parent state and
88*ed0b9710SAlexei Starovoitov  * first instruction of given state.
89*ed0b9710SAlexei Starovoitov  */
90*ed0b9710SAlexei Starovoitov static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
91*ed0b9710SAlexei Starovoitov 			     u32 *history)
92*ed0b9710SAlexei Starovoitov {
93*ed0b9710SAlexei Starovoitov 	u32 cnt = *history;
94*ed0b9710SAlexei Starovoitov 
95*ed0b9710SAlexei Starovoitov 	if (i == st->first_insn_idx) {
96*ed0b9710SAlexei Starovoitov 		if (cnt == 0)
97*ed0b9710SAlexei Starovoitov 			return -ENOENT;
98*ed0b9710SAlexei Starovoitov 		if (cnt == 1 && st->jmp_history[0].idx == i)
99*ed0b9710SAlexei Starovoitov 			return -ENOENT;
100*ed0b9710SAlexei Starovoitov 	}
101*ed0b9710SAlexei Starovoitov 
102*ed0b9710SAlexei Starovoitov 	if (cnt && st->jmp_history[cnt - 1].idx == i) {
103*ed0b9710SAlexei Starovoitov 		i = st->jmp_history[cnt - 1].prev_idx;
104*ed0b9710SAlexei Starovoitov 		(*history)--;
105*ed0b9710SAlexei Starovoitov 	} else {
106*ed0b9710SAlexei Starovoitov 		i--;
107*ed0b9710SAlexei Starovoitov 	}
108*ed0b9710SAlexei Starovoitov 	return i;
109*ed0b9710SAlexei Starovoitov }
110*ed0b9710SAlexei Starovoitov 
111*ed0b9710SAlexei Starovoitov static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
112*ed0b9710SAlexei Starovoitov 						        u32 hist_end, int insn_idx)
113*ed0b9710SAlexei Starovoitov {
114*ed0b9710SAlexei Starovoitov 	if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx)
115*ed0b9710SAlexei Starovoitov 		return &st->jmp_history[hist_end - 1];
116*ed0b9710SAlexei Starovoitov 	return NULL;
117*ed0b9710SAlexei Starovoitov }
118*ed0b9710SAlexei Starovoitov 
119*ed0b9710SAlexei Starovoitov static inline void bt_init(struct backtrack_state *bt, u32 frame)
120*ed0b9710SAlexei Starovoitov {
121*ed0b9710SAlexei Starovoitov 	bt->frame = frame;
122*ed0b9710SAlexei Starovoitov }
123*ed0b9710SAlexei Starovoitov 
124*ed0b9710SAlexei Starovoitov static inline void bt_reset(struct backtrack_state *bt)
125*ed0b9710SAlexei Starovoitov {
126*ed0b9710SAlexei Starovoitov 	struct bpf_verifier_env *env = bt->env;
127*ed0b9710SAlexei Starovoitov 
128*ed0b9710SAlexei Starovoitov 	memset(bt, 0, sizeof(*bt));
129*ed0b9710SAlexei Starovoitov 	bt->env = env;
130*ed0b9710SAlexei Starovoitov }
131*ed0b9710SAlexei Starovoitov 
132*ed0b9710SAlexei Starovoitov static inline u32 bt_empty(struct backtrack_state *bt)
133*ed0b9710SAlexei Starovoitov {
134*ed0b9710SAlexei Starovoitov 	u64 mask = 0;
135*ed0b9710SAlexei Starovoitov 	int i;
136*ed0b9710SAlexei Starovoitov 
137*ed0b9710SAlexei Starovoitov 	for (i = 0; i <= bt->frame; i++)
138*ed0b9710SAlexei Starovoitov 		mask |= bt->reg_masks[i] | bt->stack_masks[i];
139*ed0b9710SAlexei Starovoitov 
140*ed0b9710SAlexei Starovoitov 	return mask == 0;
141*ed0b9710SAlexei Starovoitov }
142*ed0b9710SAlexei Starovoitov 
143*ed0b9710SAlexei Starovoitov static inline int bt_subprog_enter(struct backtrack_state *bt)
144*ed0b9710SAlexei Starovoitov {
145*ed0b9710SAlexei Starovoitov 	if (bt->frame == MAX_CALL_FRAMES - 1) {
146*ed0b9710SAlexei Starovoitov 		verifier_bug(bt->env, "subprog enter from frame %d", bt->frame);
147*ed0b9710SAlexei Starovoitov 		return -EFAULT;
148*ed0b9710SAlexei Starovoitov 	}
149*ed0b9710SAlexei Starovoitov 	bt->frame++;
150*ed0b9710SAlexei Starovoitov 	return 0;
151*ed0b9710SAlexei Starovoitov }
152*ed0b9710SAlexei Starovoitov 
153*ed0b9710SAlexei Starovoitov static inline int bt_subprog_exit(struct backtrack_state *bt)
154*ed0b9710SAlexei Starovoitov {
155*ed0b9710SAlexei Starovoitov 	if (bt->frame == 0) {
156*ed0b9710SAlexei Starovoitov 		verifier_bug(bt->env, "subprog exit from frame 0");
157*ed0b9710SAlexei Starovoitov 		return -EFAULT;
158*ed0b9710SAlexei Starovoitov 	}
159*ed0b9710SAlexei Starovoitov 	bt->frame--;
160*ed0b9710SAlexei Starovoitov 	return 0;
161*ed0b9710SAlexei Starovoitov }
162*ed0b9710SAlexei Starovoitov 
163*ed0b9710SAlexei Starovoitov static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
164*ed0b9710SAlexei Starovoitov {
165*ed0b9710SAlexei Starovoitov 	bt->reg_masks[frame] &= ~(1 << reg);
166*ed0b9710SAlexei Starovoitov }
167*ed0b9710SAlexei Starovoitov 
168*ed0b9710SAlexei Starovoitov static inline void bt_set_reg(struct backtrack_state *bt, u32 reg)
169*ed0b9710SAlexei Starovoitov {
170*ed0b9710SAlexei Starovoitov 	bpf_bt_set_frame_reg(bt, bt->frame, reg);
171*ed0b9710SAlexei Starovoitov }
172*ed0b9710SAlexei Starovoitov 
173*ed0b9710SAlexei Starovoitov static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg)
174*ed0b9710SAlexei Starovoitov {
175*ed0b9710SAlexei Starovoitov 	bt_clear_frame_reg(bt, bt->frame, reg);
176*ed0b9710SAlexei Starovoitov }
177*ed0b9710SAlexei Starovoitov 
178*ed0b9710SAlexei Starovoitov static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
179*ed0b9710SAlexei Starovoitov {
180*ed0b9710SAlexei Starovoitov 	bt->stack_masks[frame] &= ~(1ull << slot);
181*ed0b9710SAlexei Starovoitov }
182*ed0b9710SAlexei Starovoitov 
183*ed0b9710SAlexei Starovoitov static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
184*ed0b9710SAlexei Starovoitov {
185*ed0b9710SAlexei Starovoitov 	return bt->reg_masks[frame];
186*ed0b9710SAlexei Starovoitov }
187*ed0b9710SAlexei Starovoitov 
188*ed0b9710SAlexei Starovoitov static inline u32 bt_reg_mask(struct backtrack_state *bt)
189*ed0b9710SAlexei Starovoitov {
190*ed0b9710SAlexei Starovoitov 	return bt->reg_masks[bt->frame];
191*ed0b9710SAlexei Starovoitov }
192*ed0b9710SAlexei Starovoitov 
193*ed0b9710SAlexei Starovoitov static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame)
194*ed0b9710SAlexei Starovoitov {
195*ed0b9710SAlexei Starovoitov 	return bt->stack_masks[frame];
196*ed0b9710SAlexei Starovoitov }
197*ed0b9710SAlexei Starovoitov 
198*ed0b9710SAlexei Starovoitov static inline u64 bt_stack_mask(struct backtrack_state *bt)
199*ed0b9710SAlexei Starovoitov {
200*ed0b9710SAlexei Starovoitov 	return bt->stack_masks[bt->frame];
201*ed0b9710SAlexei Starovoitov }
202*ed0b9710SAlexei Starovoitov 
203*ed0b9710SAlexei Starovoitov static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
204*ed0b9710SAlexei Starovoitov {
205*ed0b9710SAlexei Starovoitov 	return bt->reg_masks[bt->frame] & (1 << reg);
206*ed0b9710SAlexei Starovoitov }
207*ed0b9710SAlexei Starovoitov 
208*ed0b9710SAlexei Starovoitov 
209*ed0b9710SAlexei Starovoitov /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
210*ed0b9710SAlexei Starovoitov static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask)
211*ed0b9710SAlexei Starovoitov {
212*ed0b9710SAlexei Starovoitov 	DECLARE_BITMAP(mask, 64);
213*ed0b9710SAlexei Starovoitov 	bool first = true;
214*ed0b9710SAlexei Starovoitov 	int i, n;
215*ed0b9710SAlexei Starovoitov 
216*ed0b9710SAlexei Starovoitov 	buf[0] = '\0';
217*ed0b9710SAlexei Starovoitov 
218*ed0b9710SAlexei Starovoitov 	bitmap_from_u64(mask, reg_mask);
219*ed0b9710SAlexei Starovoitov 	for_each_set_bit(i, mask, 32) {
220*ed0b9710SAlexei Starovoitov 		n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i);
221*ed0b9710SAlexei Starovoitov 		first = false;
222*ed0b9710SAlexei Starovoitov 		buf += n;
223*ed0b9710SAlexei Starovoitov 		buf_sz -= n;
224*ed0b9710SAlexei Starovoitov 		if (buf_sz < 0)
225*ed0b9710SAlexei Starovoitov 			break;
226*ed0b9710SAlexei Starovoitov 	}
227*ed0b9710SAlexei Starovoitov }
228*ed0b9710SAlexei Starovoitov /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
229*ed0b9710SAlexei Starovoitov void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
230*ed0b9710SAlexei Starovoitov {
231*ed0b9710SAlexei Starovoitov 	DECLARE_BITMAP(mask, 64);
232*ed0b9710SAlexei Starovoitov 	bool first = true;
233*ed0b9710SAlexei Starovoitov 	int i, n;
234*ed0b9710SAlexei Starovoitov 
235*ed0b9710SAlexei Starovoitov 	buf[0] = '\0';
236*ed0b9710SAlexei Starovoitov 
237*ed0b9710SAlexei Starovoitov 	bitmap_from_u64(mask, stack_mask);
238*ed0b9710SAlexei Starovoitov 	for_each_set_bit(i, mask, 64) {
239*ed0b9710SAlexei Starovoitov 		n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8);
240*ed0b9710SAlexei Starovoitov 		first = false;
241*ed0b9710SAlexei Starovoitov 		buf += n;
242*ed0b9710SAlexei Starovoitov 		buf_sz -= n;
243*ed0b9710SAlexei Starovoitov 		if (buf_sz < 0)
244*ed0b9710SAlexei Starovoitov 			break;
245*ed0b9710SAlexei Starovoitov 	}
246*ed0b9710SAlexei Starovoitov }
247*ed0b9710SAlexei Starovoitov 
248*ed0b9710SAlexei Starovoitov 
249*ed0b9710SAlexei Starovoitov /* For given verifier state backtrack_insn() is called from the last insn to
250*ed0b9710SAlexei Starovoitov  * the first insn. Its purpose is to compute a bitmask of registers and
251*ed0b9710SAlexei Starovoitov  * stack slots that needs precision in the parent verifier state.
252*ed0b9710SAlexei Starovoitov  *
253*ed0b9710SAlexei Starovoitov  * @idx is an index of the instruction we are currently processing;
254*ed0b9710SAlexei Starovoitov  * @subseq_idx is an index of the subsequent instruction that:
255*ed0b9710SAlexei Starovoitov  *   - *would be* executed next, if jump history is viewed in forward order;
256*ed0b9710SAlexei Starovoitov  *   - *was* processed previously during backtracking.
257*ed0b9710SAlexei Starovoitov  */
258*ed0b9710SAlexei Starovoitov static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
259*ed0b9710SAlexei Starovoitov 			  struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
260*ed0b9710SAlexei Starovoitov {
261*ed0b9710SAlexei Starovoitov 	struct bpf_insn *insn = env->prog->insnsi + idx;
262*ed0b9710SAlexei Starovoitov 	u8 class = BPF_CLASS(insn->code);
263*ed0b9710SAlexei Starovoitov 	u8 opcode = BPF_OP(insn->code);
264*ed0b9710SAlexei Starovoitov 	u8 mode = BPF_MODE(insn->code);
265*ed0b9710SAlexei Starovoitov 	u32 dreg = insn->dst_reg;
266*ed0b9710SAlexei Starovoitov 	u32 sreg = insn->src_reg;
267*ed0b9710SAlexei Starovoitov 	u32 spi, i, fr;
268*ed0b9710SAlexei Starovoitov 
269*ed0b9710SAlexei Starovoitov 	if (insn->code == 0)
270*ed0b9710SAlexei Starovoitov 		return 0;
271*ed0b9710SAlexei Starovoitov 	if (env->log.level & BPF_LOG_LEVEL2) {
272*ed0b9710SAlexei Starovoitov 		fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt));
273*ed0b9710SAlexei Starovoitov 		verbose(env, "mark_precise: frame%d: regs=%s ",
274*ed0b9710SAlexei Starovoitov 			bt->frame, env->tmp_str_buf);
275*ed0b9710SAlexei Starovoitov 		bpf_fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt));
276*ed0b9710SAlexei Starovoitov 		verbose(env, "stack=%s before ", env->tmp_str_buf);
277*ed0b9710SAlexei Starovoitov 		verbose(env, "%d: ", idx);
278*ed0b9710SAlexei Starovoitov 		bpf_verbose_insn(env, insn);
279*ed0b9710SAlexei Starovoitov 	}
280*ed0b9710SAlexei Starovoitov 
281*ed0b9710SAlexei Starovoitov 	/* If there is a history record that some registers gained range at this insn,
282*ed0b9710SAlexei Starovoitov 	 * propagate precision marks to those registers, so that bt_is_reg_set()
283*ed0b9710SAlexei Starovoitov 	 * accounts for these registers.
284*ed0b9710SAlexei Starovoitov 	 */
285*ed0b9710SAlexei Starovoitov 	bpf_bt_sync_linked_regs(bt, hist);
286*ed0b9710SAlexei Starovoitov 
287*ed0b9710SAlexei Starovoitov 	if (class == BPF_ALU || class == BPF_ALU64) {
288*ed0b9710SAlexei Starovoitov 		if (!bt_is_reg_set(bt, dreg))
289*ed0b9710SAlexei Starovoitov 			return 0;
290*ed0b9710SAlexei Starovoitov 		if (opcode == BPF_END || opcode == BPF_NEG) {
291*ed0b9710SAlexei Starovoitov 			/* sreg is reserved and unused
292*ed0b9710SAlexei Starovoitov 			 * dreg still need precision before this insn
293*ed0b9710SAlexei Starovoitov 			 */
294*ed0b9710SAlexei Starovoitov 			return 0;
295*ed0b9710SAlexei Starovoitov 		} else if (opcode == BPF_MOV) {
296*ed0b9710SAlexei Starovoitov 			if (BPF_SRC(insn->code) == BPF_X) {
297*ed0b9710SAlexei Starovoitov 				/* dreg = sreg or dreg = (s8, s16, s32)sreg
298*ed0b9710SAlexei Starovoitov 				 * dreg needs precision after this insn
299*ed0b9710SAlexei Starovoitov 				 * sreg needs precision before this insn
300*ed0b9710SAlexei Starovoitov 				 */
301*ed0b9710SAlexei Starovoitov 				bt_clear_reg(bt, dreg);
302*ed0b9710SAlexei Starovoitov 				if (sreg != BPF_REG_FP)
303*ed0b9710SAlexei Starovoitov 					bt_set_reg(bt, sreg);
304*ed0b9710SAlexei Starovoitov 			} else {
305*ed0b9710SAlexei Starovoitov 				/* dreg = K
306*ed0b9710SAlexei Starovoitov 				 * dreg needs precision after this insn.
307*ed0b9710SAlexei Starovoitov 				 * Corresponding register is already marked
308*ed0b9710SAlexei Starovoitov 				 * as precise=true in this verifier state.
309*ed0b9710SAlexei Starovoitov 				 * No further markings in parent are necessary
310*ed0b9710SAlexei Starovoitov 				 */
311*ed0b9710SAlexei Starovoitov 				bt_clear_reg(bt, dreg);
312*ed0b9710SAlexei Starovoitov 			}
313*ed0b9710SAlexei Starovoitov 		} else {
314*ed0b9710SAlexei Starovoitov 			if (BPF_SRC(insn->code) == BPF_X) {
315*ed0b9710SAlexei Starovoitov 				/* dreg += sreg
316*ed0b9710SAlexei Starovoitov 				 * both dreg and sreg need precision
317*ed0b9710SAlexei Starovoitov 				 * before this insn
318*ed0b9710SAlexei Starovoitov 				 */
319*ed0b9710SAlexei Starovoitov 				if (sreg != BPF_REG_FP)
320*ed0b9710SAlexei Starovoitov 					bt_set_reg(bt, sreg);
321*ed0b9710SAlexei Starovoitov 			} /* else dreg += K
322*ed0b9710SAlexei Starovoitov 			   * dreg still needs precision before this insn
323*ed0b9710SAlexei Starovoitov 			   */
324*ed0b9710SAlexei Starovoitov 		}
325*ed0b9710SAlexei Starovoitov 	} else if (class == BPF_LDX ||
326*ed0b9710SAlexei Starovoitov 		   is_atomic_load_insn(insn) ||
327*ed0b9710SAlexei Starovoitov 		   is_atomic_fetch_insn(insn)) {
328*ed0b9710SAlexei Starovoitov 		u32 load_reg = dreg;
329*ed0b9710SAlexei Starovoitov 
330*ed0b9710SAlexei Starovoitov 		/*
331*ed0b9710SAlexei Starovoitov 		 * Atomic fetch operation writes the old value into
332*ed0b9710SAlexei Starovoitov 		 * a register (sreg or r0) and if it was tracked for
333*ed0b9710SAlexei Starovoitov 		 * precision, propagate to the stack slot like we do
334*ed0b9710SAlexei Starovoitov 		 * in regular ldx.
335*ed0b9710SAlexei Starovoitov 		 */
336*ed0b9710SAlexei Starovoitov 		if (is_atomic_fetch_insn(insn))
337*ed0b9710SAlexei Starovoitov 			load_reg = insn->imm == BPF_CMPXCHG ?
338*ed0b9710SAlexei Starovoitov 				   BPF_REG_0 : sreg;
339*ed0b9710SAlexei Starovoitov 
340*ed0b9710SAlexei Starovoitov 		if (!bt_is_reg_set(bt, load_reg))
341*ed0b9710SAlexei Starovoitov 			return 0;
342*ed0b9710SAlexei Starovoitov 		bt_clear_reg(bt, load_reg);
343*ed0b9710SAlexei Starovoitov 
344*ed0b9710SAlexei Starovoitov 		/* scalars can only be spilled into stack w/o losing precision.
345*ed0b9710SAlexei Starovoitov 		 * Load from any other memory can be zero extended.
346*ed0b9710SAlexei Starovoitov 		 * The desire to keep that precision is already indicated
347*ed0b9710SAlexei Starovoitov 		 * by 'precise' mark in corresponding register of this state.
348*ed0b9710SAlexei Starovoitov 		 * No further tracking necessary.
349*ed0b9710SAlexei Starovoitov 		 */
350*ed0b9710SAlexei Starovoitov 		if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
351*ed0b9710SAlexei Starovoitov 			return 0;
352*ed0b9710SAlexei Starovoitov 		/* dreg = *(u64 *)[fp - off] was a fill from the stack.
353*ed0b9710SAlexei Starovoitov 		 * that [fp - off] slot contains scalar that needs to be
354*ed0b9710SAlexei Starovoitov 		 * tracked with precision
355*ed0b9710SAlexei Starovoitov 		 */
356*ed0b9710SAlexei Starovoitov 		spi = insn_stack_access_spi(hist->flags);
357*ed0b9710SAlexei Starovoitov 		fr = insn_stack_access_frameno(hist->flags);
358*ed0b9710SAlexei Starovoitov 		bpf_bt_set_frame_slot(bt, fr, spi);
359*ed0b9710SAlexei Starovoitov 	} else if (class == BPF_STX || class == BPF_ST) {
360*ed0b9710SAlexei Starovoitov 		if (bt_is_reg_set(bt, dreg))
361*ed0b9710SAlexei Starovoitov 			/* stx & st shouldn't be using _scalar_ dst_reg
362*ed0b9710SAlexei Starovoitov 			 * to access memory. It means backtracking
363*ed0b9710SAlexei Starovoitov 			 * encountered a case of pointer subtraction.
364*ed0b9710SAlexei Starovoitov 			 */
365*ed0b9710SAlexei Starovoitov 			return -ENOTSUPP;
366*ed0b9710SAlexei Starovoitov 		/* scalars can only be spilled into stack */
367*ed0b9710SAlexei Starovoitov 		if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
368*ed0b9710SAlexei Starovoitov 			return 0;
369*ed0b9710SAlexei Starovoitov 		spi = insn_stack_access_spi(hist->flags);
370*ed0b9710SAlexei Starovoitov 		fr = insn_stack_access_frameno(hist->flags);
371*ed0b9710SAlexei Starovoitov 		if (!bt_is_frame_slot_set(bt, fr, spi))
372*ed0b9710SAlexei Starovoitov 			return 0;
373*ed0b9710SAlexei Starovoitov 		bt_clear_frame_slot(bt, fr, spi);
374*ed0b9710SAlexei Starovoitov 		if (class == BPF_STX)
375*ed0b9710SAlexei Starovoitov 			bt_set_reg(bt, sreg);
376*ed0b9710SAlexei Starovoitov 	} else if (class == BPF_JMP || class == BPF_JMP32) {
377*ed0b9710SAlexei Starovoitov 		if (bpf_pseudo_call(insn)) {
378*ed0b9710SAlexei Starovoitov 			int subprog_insn_idx, subprog;
379*ed0b9710SAlexei Starovoitov 
380*ed0b9710SAlexei Starovoitov 			subprog_insn_idx = idx + insn->imm + 1;
381*ed0b9710SAlexei Starovoitov 			subprog = bpf_find_subprog(env, subprog_insn_idx);
382*ed0b9710SAlexei Starovoitov 			if (subprog < 0)
383*ed0b9710SAlexei Starovoitov 				return -EFAULT;
384*ed0b9710SAlexei Starovoitov 
385*ed0b9710SAlexei Starovoitov 			if (bpf_subprog_is_global(env, subprog)) {
386*ed0b9710SAlexei Starovoitov 				/* check that jump history doesn't have any
387*ed0b9710SAlexei Starovoitov 				 * extra instructions from subprog; the next
388*ed0b9710SAlexei Starovoitov 				 * instruction after call to global subprog
389*ed0b9710SAlexei Starovoitov 				 * should be literally next instruction in
390*ed0b9710SAlexei Starovoitov 				 * caller program
391*ed0b9710SAlexei Starovoitov 				 */
392*ed0b9710SAlexei Starovoitov 				verifier_bug_if(idx + 1 != subseq_idx, env,
393*ed0b9710SAlexei Starovoitov 						"extra insn from subprog");
394*ed0b9710SAlexei Starovoitov 				/* r1-r5 are invalidated after subprog call,
395*ed0b9710SAlexei Starovoitov 				 * so for global func call it shouldn't be set
396*ed0b9710SAlexei Starovoitov 				 * anymore
397*ed0b9710SAlexei Starovoitov 				 */
398*ed0b9710SAlexei Starovoitov 				if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
399*ed0b9710SAlexei Starovoitov 					verifier_bug(env, "global subprog unexpected regs %x",
400*ed0b9710SAlexei Starovoitov 						     bt_reg_mask(bt));
401*ed0b9710SAlexei Starovoitov 					return -EFAULT;
402*ed0b9710SAlexei Starovoitov 				}
403*ed0b9710SAlexei Starovoitov 				/* global subprog always sets R0 */
404*ed0b9710SAlexei Starovoitov 				bt_clear_reg(bt, BPF_REG_0);
405*ed0b9710SAlexei Starovoitov 				return 0;
406*ed0b9710SAlexei Starovoitov 			} else {
407*ed0b9710SAlexei Starovoitov 				/* static subprog call instruction, which
408*ed0b9710SAlexei Starovoitov 				 * means that we are exiting current subprog,
409*ed0b9710SAlexei Starovoitov 				 * so only r1-r5 could be still requested as
410*ed0b9710SAlexei Starovoitov 				 * precise, r0 and r6-r10 or any stack slot in
411*ed0b9710SAlexei Starovoitov 				 * the current frame should be zero by now
412*ed0b9710SAlexei Starovoitov 				 */
413*ed0b9710SAlexei Starovoitov 				if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
414*ed0b9710SAlexei Starovoitov 					verifier_bug(env, "static subprog unexpected regs %x",
415*ed0b9710SAlexei Starovoitov 						     bt_reg_mask(bt));
416*ed0b9710SAlexei Starovoitov 					return -EFAULT;
417*ed0b9710SAlexei Starovoitov 				}
418*ed0b9710SAlexei Starovoitov 				/* we are now tracking register spills correctly,
419*ed0b9710SAlexei Starovoitov 				 * so any instance of leftover slots is a bug
420*ed0b9710SAlexei Starovoitov 				 */
421*ed0b9710SAlexei Starovoitov 				if (bt_stack_mask(bt) != 0) {
422*ed0b9710SAlexei Starovoitov 					verifier_bug(env,
423*ed0b9710SAlexei Starovoitov 						     "static subprog leftover stack slots %llx",
424*ed0b9710SAlexei Starovoitov 						     bt_stack_mask(bt));
425*ed0b9710SAlexei Starovoitov 					return -EFAULT;
426*ed0b9710SAlexei Starovoitov 				}
427*ed0b9710SAlexei Starovoitov 				/* propagate r1-r5 to the caller */
428*ed0b9710SAlexei Starovoitov 				for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
429*ed0b9710SAlexei Starovoitov 					if (bt_is_reg_set(bt, i)) {
430*ed0b9710SAlexei Starovoitov 						bt_clear_reg(bt, i);
431*ed0b9710SAlexei Starovoitov 						bpf_bt_set_frame_reg(bt, bt->frame - 1, i);
432*ed0b9710SAlexei Starovoitov 					}
433*ed0b9710SAlexei Starovoitov 				}
434*ed0b9710SAlexei Starovoitov 				if (bt_subprog_exit(bt))
435*ed0b9710SAlexei Starovoitov 					return -EFAULT;
436*ed0b9710SAlexei Starovoitov 				return 0;
437*ed0b9710SAlexei Starovoitov 			}
438*ed0b9710SAlexei Starovoitov 		} else if (bpf_is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) {
439*ed0b9710SAlexei Starovoitov 			/* exit from callback subprog to callback-calling helper or
440*ed0b9710SAlexei Starovoitov 			 * kfunc call. Use idx/subseq_idx check to discern it from
441*ed0b9710SAlexei Starovoitov 			 * straight line code backtracking.
442*ed0b9710SAlexei Starovoitov 			 * Unlike the subprog call handling above, we shouldn't
443*ed0b9710SAlexei Starovoitov 			 * propagate precision of r1-r5 (if any requested), as they are
444*ed0b9710SAlexei Starovoitov 			 * not actually arguments passed directly to callback subprogs
445*ed0b9710SAlexei Starovoitov 			 */
446*ed0b9710SAlexei Starovoitov 			if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
447*ed0b9710SAlexei Starovoitov 				verifier_bug(env, "callback unexpected regs %x",
448*ed0b9710SAlexei Starovoitov 					     bt_reg_mask(bt));
449*ed0b9710SAlexei Starovoitov 				return -EFAULT;
450*ed0b9710SAlexei Starovoitov 			}
451*ed0b9710SAlexei Starovoitov 			if (bt_stack_mask(bt) != 0) {
452*ed0b9710SAlexei Starovoitov 				verifier_bug(env, "callback leftover stack slots %llx",
453*ed0b9710SAlexei Starovoitov 					     bt_stack_mask(bt));
454*ed0b9710SAlexei Starovoitov 				return -EFAULT;
455*ed0b9710SAlexei Starovoitov 			}
456*ed0b9710SAlexei Starovoitov 			/* clear r1-r5 in callback subprog's mask */
457*ed0b9710SAlexei Starovoitov 			for (i = BPF_REG_1; i <= BPF_REG_5; i++)
458*ed0b9710SAlexei Starovoitov 				bt_clear_reg(bt, i);
459*ed0b9710SAlexei Starovoitov 			if (bt_subprog_exit(bt))
460*ed0b9710SAlexei Starovoitov 				return -EFAULT;
461*ed0b9710SAlexei Starovoitov 			return 0;
462*ed0b9710SAlexei Starovoitov 		} else if (opcode == BPF_CALL) {
463*ed0b9710SAlexei Starovoitov 			/* kfunc with imm==0 is invalid and fixup_kfunc_call will
464*ed0b9710SAlexei Starovoitov 			 * catch this error later. Make backtracking conservative
465*ed0b9710SAlexei Starovoitov 			 * with ENOTSUPP.
466*ed0b9710SAlexei Starovoitov 			 */
467*ed0b9710SAlexei Starovoitov 			if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
468*ed0b9710SAlexei Starovoitov 				return -ENOTSUPP;
469*ed0b9710SAlexei Starovoitov 			/* regular helper call sets R0 */
470*ed0b9710SAlexei Starovoitov 			bt_clear_reg(bt, BPF_REG_0);
471*ed0b9710SAlexei Starovoitov 			if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
472*ed0b9710SAlexei Starovoitov 				/* if backtracking was looking for registers R1-R5
473*ed0b9710SAlexei Starovoitov 				 * they should have been found already.
474*ed0b9710SAlexei Starovoitov 				 */
475*ed0b9710SAlexei Starovoitov 				verifier_bug(env, "backtracking call unexpected regs %x",
476*ed0b9710SAlexei Starovoitov 					     bt_reg_mask(bt));
477*ed0b9710SAlexei Starovoitov 				return -EFAULT;
478*ed0b9710SAlexei Starovoitov 			}
479*ed0b9710SAlexei Starovoitov 			if (insn->src_reg == BPF_REG_0 && insn->imm == BPF_FUNC_tail_call
480*ed0b9710SAlexei Starovoitov 			    && subseq_idx - idx != 1) {
481*ed0b9710SAlexei Starovoitov 				if (bt_subprog_enter(bt))
482*ed0b9710SAlexei Starovoitov 					return -EFAULT;
483*ed0b9710SAlexei Starovoitov 			}
484*ed0b9710SAlexei Starovoitov 		} else if (opcode == BPF_EXIT) {
485*ed0b9710SAlexei Starovoitov 			bool r0_precise;
486*ed0b9710SAlexei Starovoitov 
487*ed0b9710SAlexei Starovoitov 			/* Backtracking to a nested function call, 'idx' is a part of
488*ed0b9710SAlexei Starovoitov 			 * the inner frame 'subseq_idx' is a part of the outer frame.
489*ed0b9710SAlexei Starovoitov 			 * In case of a regular function call, instructions giving
490*ed0b9710SAlexei Starovoitov 			 * precision to registers R1-R5 should have been found already.
491*ed0b9710SAlexei Starovoitov 			 * In case of a callback, it is ok to have R1-R5 marked for
492*ed0b9710SAlexei Starovoitov 			 * backtracking, as these registers are set by the function
493*ed0b9710SAlexei Starovoitov 			 * invoking callback.
494*ed0b9710SAlexei Starovoitov 			 */
495*ed0b9710SAlexei Starovoitov 			if (subseq_idx >= 0 && bpf_calls_callback(env, subseq_idx))
496*ed0b9710SAlexei Starovoitov 				for (i = BPF_REG_1; i <= BPF_REG_5; i++)
497*ed0b9710SAlexei Starovoitov 					bt_clear_reg(bt, i);
498*ed0b9710SAlexei Starovoitov 			if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
499*ed0b9710SAlexei Starovoitov 				verifier_bug(env, "backtracking exit unexpected regs %x",
500*ed0b9710SAlexei Starovoitov 					     bt_reg_mask(bt));
501*ed0b9710SAlexei Starovoitov 				return -EFAULT;
502*ed0b9710SAlexei Starovoitov 			}
503*ed0b9710SAlexei Starovoitov 
504*ed0b9710SAlexei Starovoitov 			/* BPF_EXIT in subprog or callback always returns
505*ed0b9710SAlexei Starovoitov 			 * right after the call instruction, so by checking
506*ed0b9710SAlexei Starovoitov 			 * whether the instruction at subseq_idx-1 is subprog
507*ed0b9710SAlexei Starovoitov 			 * call or not we can distinguish actual exit from
508*ed0b9710SAlexei Starovoitov 			 * *subprog* from exit from *callback*. In the former
509*ed0b9710SAlexei Starovoitov 			 * case, we need to propagate r0 precision, if
510*ed0b9710SAlexei Starovoitov 			 * necessary. In the former we never do that.
511*ed0b9710SAlexei Starovoitov 			 */
512*ed0b9710SAlexei Starovoitov 			r0_precise = subseq_idx - 1 >= 0 &&
513*ed0b9710SAlexei Starovoitov 				     bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) &&
514*ed0b9710SAlexei Starovoitov 				     bt_is_reg_set(bt, BPF_REG_0);
515*ed0b9710SAlexei Starovoitov 
516*ed0b9710SAlexei Starovoitov 			bt_clear_reg(bt, BPF_REG_0);
517*ed0b9710SAlexei Starovoitov 			if (bt_subprog_enter(bt))
518*ed0b9710SAlexei Starovoitov 				return -EFAULT;
519*ed0b9710SAlexei Starovoitov 
520*ed0b9710SAlexei Starovoitov 			if (r0_precise)
521*ed0b9710SAlexei Starovoitov 				bt_set_reg(bt, BPF_REG_0);
522*ed0b9710SAlexei Starovoitov 			/* r6-r9 and stack slots will stay set in caller frame
523*ed0b9710SAlexei Starovoitov 			 * bitmasks until we return back from callee(s)
524*ed0b9710SAlexei Starovoitov 			 */
525*ed0b9710SAlexei Starovoitov 			return 0;
526*ed0b9710SAlexei Starovoitov 		} else if (BPF_SRC(insn->code) == BPF_X) {
527*ed0b9710SAlexei Starovoitov 			if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg))
528*ed0b9710SAlexei Starovoitov 				return 0;
529*ed0b9710SAlexei Starovoitov 			/* dreg <cond> sreg
530*ed0b9710SAlexei Starovoitov 			 * Both dreg and sreg need precision before
531*ed0b9710SAlexei Starovoitov 			 * this insn. If only sreg was marked precise
532*ed0b9710SAlexei Starovoitov 			 * before it would be equally necessary to
533*ed0b9710SAlexei Starovoitov 			 * propagate it to dreg.
534*ed0b9710SAlexei Starovoitov 			 */
535*ed0b9710SAlexei Starovoitov 			if (!hist || !(hist->flags & INSN_F_SRC_REG_STACK))
536*ed0b9710SAlexei Starovoitov 				bt_set_reg(bt, sreg);
537*ed0b9710SAlexei Starovoitov 			if (!hist || !(hist->flags & INSN_F_DST_REG_STACK))
538*ed0b9710SAlexei Starovoitov 				bt_set_reg(bt, dreg);
539*ed0b9710SAlexei Starovoitov 		} else if (BPF_SRC(insn->code) == BPF_K) {
540*ed0b9710SAlexei Starovoitov 			 /* dreg <cond> K
541*ed0b9710SAlexei Starovoitov 			  * Only dreg still needs precision before
542*ed0b9710SAlexei Starovoitov 			  * this insn, so for the K-based conditional
543*ed0b9710SAlexei Starovoitov 			  * there is nothing new to be marked.
544*ed0b9710SAlexei Starovoitov 			  */
545*ed0b9710SAlexei Starovoitov 		}
546*ed0b9710SAlexei Starovoitov 	} else if (class == BPF_LD) {
547*ed0b9710SAlexei Starovoitov 		if (!bt_is_reg_set(bt, dreg))
548*ed0b9710SAlexei Starovoitov 			return 0;
549*ed0b9710SAlexei Starovoitov 		bt_clear_reg(bt, dreg);
550*ed0b9710SAlexei Starovoitov 		/* It's ld_imm64 or ld_abs or ld_ind.
551*ed0b9710SAlexei Starovoitov 		 * For ld_imm64 no further tracking of precision
552*ed0b9710SAlexei Starovoitov 		 * into parent is necessary
553*ed0b9710SAlexei Starovoitov 		 */
554*ed0b9710SAlexei Starovoitov 		if (mode == BPF_IND || mode == BPF_ABS)
555*ed0b9710SAlexei Starovoitov 			/* to be analyzed */
556*ed0b9710SAlexei Starovoitov 			return -ENOTSUPP;
557*ed0b9710SAlexei Starovoitov 	}
558*ed0b9710SAlexei Starovoitov 	/* Propagate precision marks to linked registers, to account for
559*ed0b9710SAlexei Starovoitov 	 * registers marked as precise in this function.
560*ed0b9710SAlexei Starovoitov 	 */
561*ed0b9710SAlexei Starovoitov 	bpf_bt_sync_linked_regs(bt, hist);
562*ed0b9710SAlexei Starovoitov 	return 0;
563*ed0b9710SAlexei Starovoitov }
564*ed0b9710SAlexei Starovoitov 
565*ed0b9710SAlexei Starovoitov /* the scalar precision tracking algorithm:
566*ed0b9710SAlexei Starovoitov  * . at the start all registers have precise=false.
567*ed0b9710SAlexei Starovoitov  * . scalar ranges are tracked as normal through alu and jmp insns.
568*ed0b9710SAlexei Starovoitov  * . once precise value of the scalar register is used in:
569*ed0b9710SAlexei Starovoitov  *   .  ptr + scalar alu
570*ed0b9710SAlexei Starovoitov  *   . if (scalar cond K|scalar)
571*ed0b9710SAlexei Starovoitov  *   .  helper_call(.., scalar, ...) where ARG_CONST is expected
572*ed0b9710SAlexei Starovoitov  *   backtrack through the verifier states and mark all registers and
573*ed0b9710SAlexei Starovoitov  *   stack slots with spilled constants that these scalar registers
574*ed0b9710SAlexei Starovoitov  *   should be precise.
575*ed0b9710SAlexei Starovoitov  * . during state pruning two registers (or spilled stack slots)
576*ed0b9710SAlexei Starovoitov  *   are equivalent if both are not precise.
577*ed0b9710SAlexei Starovoitov  *
578*ed0b9710SAlexei Starovoitov  * Note the verifier cannot simply walk register parentage chain,
579*ed0b9710SAlexei Starovoitov  * since many different registers and stack slots could have been
580*ed0b9710SAlexei Starovoitov  * used to compute single precise scalar.
581*ed0b9710SAlexei Starovoitov  *
582*ed0b9710SAlexei Starovoitov  * The approach of starting with precise=true for all registers and then
583*ed0b9710SAlexei Starovoitov  * backtrack to mark a register as not precise when the verifier detects
584*ed0b9710SAlexei Starovoitov  * that program doesn't care about specific value (e.g., when helper
585*ed0b9710SAlexei Starovoitov  * takes register as ARG_ANYTHING parameter) is not safe.
586*ed0b9710SAlexei Starovoitov  *
587*ed0b9710SAlexei Starovoitov  * It's ok to walk single parentage chain of the verifier states.
588*ed0b9710SAlexei Starovoitov  * It's possible that this backtracking will go all the way till 1st insn.
589*ed0b9710SAlexei Starovoitov  * All other branches will be explored for needing precision later.
590*ed0b9710SAlexei Starovoitov  *
591*ed0b9710SAlexei Starovoitov  * The backtracking needs to deal with cases like:
592*ed0b9710SAlexei Starovoitov  *   R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
593*ed0b9710SAlexei Starovoitov  * r9 -= r8
594*ed0b9710SAlexei Starovoitov  * r5 = r9
595*ed0b9710SAlexei Starovoitov  * if r5 > 0x79f goto pc+7
596*ed0b9710SAlexei Starovoitov  *    R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
597*ed0b9710SAlexei Starovoitov  * r5 += 1
598*ed0b9710SAlexei Starovoitov  * ...
599*ed0b9710SAlexei Starovoitov  * call bpf_perf_event_output#25
600*ed0b9710SAlexei Starovoitov  *   where .arg5_type = ARG_CONST_SIZE_OR_ZERO
601*ed0b9710SAlexei Starovoitov  *
602*ed0b9710SAlexei Starovoitov  * and this case:
603*ed0b9710SAlexei Starovoitov  * r6 = 1
604*ed0b9710SAlexei Starovoitov  * call foo // uses callee's r6 inside to compute r0
605*ed0b9710SAlexei Starovoitov  * r0 += r6
606*ed0b9710SAlexei Starovoitov  * if r0 == 0 goto
607*ed0b9710SAlexei Starovoitov  *
608*ed0b9710SAlexei Starovoitov  * to track above reg_mask/stack_mask needs to be independent for each frame.
609*ed0b9710SAlexei Starovoitov  *
610*ed0b9710SAlexei Starovoitov  * Also if parent's curframe > frame where backtracking started,
611*ed0b9710SAlexei Starovoitov  * the verifier need to mark registers in both frames, otherwise callees
612*ed0b9710SAlexei Starovoitov  * may incorrectly prune callers. This is similar to
613*ed0b9710SAlexei Starovoitov  * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
614*ed0b9710SAlexei Starovoitov  *
615*ed0b9710SAlexei Starovoitov  * For now backtracking falls back into conservative marking.
616*ed0b9710SAlexei Starovoitov  */
617*ed0b9710SAlexei Starovoitov void bpf_mark_all_scalars_precise(struct bpf_verifier_env *env,
618*ed0b9710SAlexei Starovoitov 				 struct bpf_verifier_state *st)
619*ed0b9710SAlexei Starovoitov {
620*ed0b9710SAlexei Starovoitov 	struct bpf_func_state *func;
621*ed0b9710SAlexei Starovoitov 	struct bpf_reg_state *reg;
622*ed0b9710SAlexei Starovoitov 	int i, j;
623*ed0b9710SAlexei Starovoitov 
624*ed0b9710SAlexei Starovoitov 	if (env->log.level & BPF_LOG_LEVEL2) {
625*ed0b9710SAlexei Starovoitov 		verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n",
626*ed0b9710SAlexei Starovoitov 			st->curframe);
627*ed0b9710SAlexei Starovoitov 	}
628*ed0b9710SAlexei Starovoitov 
629*ed0b9710SAlexei Starovoitov 	/* big hammer: mark all scalars precise in this path.
630*ed0b9710SAlexei Starovoitov 	 * pop_stack may still get !precise scalars.
631*ed0b9710SAlexei Starovoitov 	 * We also skip current state and go straight to first parent state,
632*ed0b9710SAlexei Starovoitov 	 * because precision markings in current non-checkpointed state are
633*ed0b9710SAlexei Starovoitov 	 * not needed. See why in the comment in __mark_chain_precision below.
634*ed0b9710SAlexei Starovoitov 	 */
635*ed0b9710SAlexei Starovoitov 	for (st = st->parent; st; st = st->parent) {
636*ed0b9710SAlexei Starovoitov 		for (i = 0; i <= st->curframe; i++) {
637*ed0b9710SAlexei Starovoitov 			func = st->frame[i];
638*ed0b9710SAlexei Starovoitov 			for (j = 0; j < BPF_REG_FP; j++) {
639*ed0b9710SAlexei Starovoitov 				reg = &func->regs[j];
640*ed0b9710SAlexei Starovoitov 				if (reg->type != SCALAR_VALUE || reg->precise)
641*ed0b9710SAlexei Starovoitov 					continue;
642*ed0b9710SAlexei Starovoitov 				reg->precise = true;
643*ed0b9710SAlexei Starovoitov 				if (env->log.level & BPF_LOG_LEVEL2) {
644*ed0b9710SAlexei Starovoitov 					verbose(env, "force_precise: frame%d: forcing r%d to be precise\n",
645*ed0b9710SAlexei Starovoitov 						i, j);
646*ed0b9710SAlexei Starovoitov 				}
647*ed0b9710SAlexei Starovoitov 			}
648*ed0b9710SAlexei Starovoitov 			for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
649*ed0b9710SAlexei Starovoitov 				if (!bpf_is_spilled_reg(&func->stack[j]))
650*ed0b9710SAlexei Starovoitov 					continue;
651*ed0b9710SAlexei Starovoitov 				reg = &func->stack[j].spilled_ptr;
652*ed0b9710SAlexei Starovoitov 				if (reg->type != SCALAR_VALUE || reg->precise)
653*ed0b9710SAlexei Starovoitov 					continue;
654*ed0b9710SAlexei Starovoitov 				reg->precise = true;
655*ed0b9710SAlexei Starovoitov 				if (env->log.level & BPF_LOG_LEVEL2) {
656*ed0b9710SAlexei Starovoitov 					verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n",
657*ed0b9710SAlexei Starovoitov 						i, -(j + 1) * 8);
658*ed0b9710SAlexei Starovoitov 				}
659*ed0b9710SAlexei Starovoitov 			}
660*ed0b9710SAlexei Starovoitov 		}
661*ed0b9710SAlexei Starovoitov 	}
662*ed0b9710SAlexei Starovoitov }
663*ed0b9710SAlexei Starovoitov 
664*ed0b9710SAlexei Starovoitov /*
665*ed0b9710SAlexei Starovoitov  * bpf_mark_chain_precision() backtracks BPF program instruction sequence and
666*ed0b9710SAlexei Starovoitov  * chain of verifier states making sure that register *regno* (if regno >= 0)
667*ed0b9710SAlexei Starovoitov  * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
668*ed0b9710SAlexei Starovoitov  * SCALARS, as well as any other registers and slots that contribute to
669*ed0b9710SAlexei Starovoitov  * a tracked state of given registers/stack slots, depending on specific BPF
670*ed0b9710SAlexei Starovoitov  * assembly instructions (see backtrack_insns() for exact instruction handling
671*ed0b9710SAlexei Starovoitov  * logic). This backtracking relies on recorded jmp_history and is able to
672*ed0b9710SAlexei Starovoitov  * traverse entire chain of parent states. This process ends only when all the
673*ed0b9710SAlexei Starovoitov  * necessary registers/slots and their transitive dependencies are marked as
674*ed0b9710SAlexei Starovoitov  * precise.
675*ed0b9710SAlexei Starovoitov  *
676*ed0b9710SAlexei Starovoitov  * One important and subtle aspect is that precise marks *do not matter* in
677*ed0b9710SAlexei Starovoitov  * the currently verified state (current state). It is important to understand
678*ed0b9710SAlexei Starovoitov  * why this is the case.
679*ed0b9710SAlexei Starovoitov  *
680*ed0b9710SAlexei Starovoitov  * First, note that current state is the state that is not yet "checkpointed",
681*ed0b9710SAlexei Starovoitov  * i.e., it is not yet put into env->explored_states, and it has no children
682*ed0b9710SAlexei Starovoitov  * states as well. It's ephemeral, and can end up either a) being discarded if
683*ed0b9710SAlexei Starovoitov  * compatible explored state is found at some point or BPF_EXIT instruction is
684*ed0b9710SAlexei Starovoitov  * reached or b) checkpointed and put into env->explored_states, branching out
685*ed0b9710SAlexei Starovoitov  * into one or more children states.
686*ed0b9710SAlexei Starovoitov  *
687*ed0b9710SAlexei Starovoitov  * In the former case, precise markings in current state are completely
688*ed0b9710SAlexei Starovoitov  * ignored by state comparison code (see regsafe() for details). Only
689*ed0b9710SAlexei Starovoitov  * checkpointed ("old") state precise markings are important, and if old
690*ed0b9710SAlexei Starovoitov  * state's register/slot is precise, regsafe() assumes current state's
691*ed0b9710SAlexei Starovoitov  * register/slot as precise and checks value ranges exactly and precisely. If
692*ed0b9710SAlexei Starovoitov  * states turn out to be compatible, current state's necessary precise
693*ed0b9710SAlexei Starovoitov  * markings and any required parent states' precise markings are enforced
694*ed0b9710SAlexei Starovoitov  * after the fact with propagate_precision() logic, after the fact. But it's
695*ed0b9710SAlexei Starovoitov  * important to realize that in this case, even after marking current state
696*ed0b9710SAlexei Starovoitov  * registers/slots as precise, we immediately discard current state. So what
697*ed0b9710SAlexei Starovoitov  * actually matters is any of the precise markings propagated into current
698*ed0b9710SAlexei Starovoitov  * state's parent states, which are always checkpointed (due to b) case above).
699*ed0b9710SAlexei Starovoitov  * As such, for scenario a) it doesn't matter if current state has precise
700*ed0b9710SAlexei Starovoitov  * markings set or not.
701*ed0b9710SAlexei Starovoitov  *
702*ed0b9710SAlexei Starovoitov  * Now, for the scenario b), checkpointing and forking into child(ren)
703*ed0b9710SAlexei Starovoitov  * state(s). Note that before current state gets to checkpointing step, any
704*ed0b9710SAlexei Starovoitov  * processed instruction always assumes precise SCALAR register/slot
705*ed0b9710SAlexei Starovoitov  * knowledge: if precise value or range is useful to prune jump branch, BPF
706*ed0b9710SAlexei Starovoitov  * verifier takes this opportunity enthusiastically. Similarly, when
707*ed0b9710SAlexei Starovoitov  * register's value is used to calculate offset or memory address, exact
708*ed0b9710SAlexei Starovoitov  * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
709*ed0b9710SAlexei Starovoitov  * what we mentioned above about state comparison ignoring precise markings
710*ed0b9710SAlexei Starovoitov  * during state comparison, BPF verifier ignores and also assumes precise
711*ed0b9710SAlexei Starovoitov  * markings *at will* during instruction verification process. But as verifier
712*ed0b9710SAlexei Starovoitov  * assumes precision, it also propagates any precision dependencies across
713*ed0b9710SAlexei Starovoitov  * parent states, which are not yet finalized, so can be further restricted
714*ed0b9710SAlexei Starovoitov  * based on new knowledge gained from restrictions enforced by their children
715*ed0b9710SAlexei Starovoitov  * states. This is so that once those parent states are finalized, i.e., when
716*ed0b9710SAlexei Starovoitov  * they have no more active children state, state comparison logic in
717*ed0b9710SAlexei Starovoitov  * is_state_visited() would enforce strict and precise SCALAR ranges, if
718*ed0b9710SAlexei Starovoitov  * required for correctness.
719*ed0b9710SAlexei Starovoitov  *
720*ed0b9710SAlexei Starovoitov  * To build a bit more intuition, note also that once a state is checkpointed,
721*ed0b9710SAlexei Starovoitov  * the path we took to get to that state is not important. This is crucial
722*ed0b9710SAlexei Starovoitov  * property for state pruning. When state is checkpointed and finalized at
723*ed0b9710SAlexei Starovoitov  * some instruction index, it can be correctly and safely used to "short
724*ed0b9710SAlexei Starovoitov  * circuit" any *compatible* state that reaches exactly the same instruction
725*ed0b9710SAlexei Starovoitov  * index. I.e., if we jumped to that instruction from a completely different
726*ed0b9710SAlexei Starovoitov  * code path than original finalized state was derived from, it doesn't
727*ed0b9710SAlexei Starovoitov  * matter, current state can be discarded because from that instruction
728*ed0b9710SAlexei Starovoitov  * forward having a compatible state will ensure we will safely reach the
729*ed0b9710SAlexei Starovoitov  * exit. States describe preconditions for further exploration, but completely
730*ed0b9710SAlexei Starovoitov  * forget the history of how we got here.
731*ed0b9710SAlexei Starovoitov  *
732*ed0b9710SAlexei Starovoitov  * This also means that even if we needed precise SCALAR range to get to
733*ed0b9710SAlexei Starovoitov  * finalized state, but from that point forward *that same* SCALAR register is
734*ed0b9710SAlexei Starovoitov  * never used in a precise context (i.e., it's precise value is not needed for
735*ed0b9710SAlexei Starovoitov  * correctness), it's correct and safe to mark such register as "imprecise"
736*ed0b9710SAlexei Starovoitov  * (i.e., precise marking set to false). This is what we rely on when we do
737*ed0b9710SAlexei Starovoitov  * not set precise marking in current state. If no child state requires
738*ed0b9710SAlexei Starovoitov  * precision for any given SCALAR register, it's safe to dictate that it can
739*ed0b9710SAlexei Starovoitov  * be imprecise. If any child state does require this register to be precise,
740*ed0b9710SAlexei Starovoitov  * we'll mark it precise later retroactively during precise markings
741*ed0b9710SAlexei Starovoitov  * propagation from child state to parent states.
742*ed0b9710SAlexei Starovoitov  *
743*ed0b9710SAlexei Starovoitov  * Skipping precise marking setting in current state is a mild version of
744*ed0b9710SAlexei Starovoitov  * relying on the above observation. But we can utilize this property even
745*ed0b9710SAlexei Starovoitov  * more aggressively by proactively forgetting any precise marking in the
746*ed0b9710SAlexei Starovoitov  * current state (which we inherited from the parent state), right before we
747*ed0b9710SAlexei Starovoitov  * checkpoint it and branch off into new child state. This is done by
748*ed0b9710SAlexei Starovoitov  * mark_all_scalars_imprecise() to hopefully get more permissive and generic
749*ed0b9710SAlexei Starovoitov  * finalized states which help in short circuiting more future states.
750*ed0b9710SAlexei Starovoitov  */
751*ed0b9710SAlexei Starovoitov int bpf_mark_chain_precision(struct bpf_verifier_env *env,
752*ed0b9710SAlexei Starovoitov 			    struct bpf_verifier_state *starting_state,
753*ed0b9710SAlexei Starovoitov 			    int regno,
754*ed0b9710SAlexei Starovoitov 			    bool *changed)
755*ed0b9710SAlexei Starovoitov {
756*ed0b9710SAlexei Starovoitov 	struct bpf_verifier_state *st = starting_state;
757*ed0b9710SAlexei Starovoitov 	struct backtrack_state *bt = &env->bt;
758*ed0b9710SAlexei Starovoitov 	int first_idx = st->first_insn_idx;
759*ed0b9710SAlexei Starovoitov 	int last_idx = starting_state->insn_idx;
760*ed0b9710SAlexei Starovoitov 	int subseq_idx = -1;
761*ed0b9710SAlexei Starovoitov 	struct bpf_func_state *func;
762*ed0b9710SAlexei Starovoitov 	bool tmp, skip_first = true;
763*ed0b9710SAlexei Starovoitov 	struct bpf_reg_state *reg;
764*ed0b9710SAlexei Starovoitov 	int i, fr, err;
765*ed0b9710SAlexei Starovoitov 
766*ed0b9710SAlexei Starovoitov 	if (!env->bpf_capable)
767*ed0b9710SAlexei Starovoitov 		return 0;
768*ed0b9710SAlexei Starovoitov 
769*ed0b9710SAlexei Starovoitov 	changed = changed ?: &tmp;
770*ed0b9710SAlexei Starovoitov 	/* set frame number from which we are starting to backtrack */
771*ed0b9710SAlexei Starovoitov 	bt_init(bt, starting_state->curframe);
772*ed0b9710SAlexei Starovoitov 
773*ed0b9710SAlexei Starovoitov 	/* Do sanity checks against current state of register and/or stack
774*ed0b9710SAlexei Starovoitov 	 * slot, but don't set precise flag in current state, as precision
775*ed0b9710SAlexei Starovoitov 	 * tracking in the current state is unnecessary.
776*ed0b9710SAlexei Starovoitov 	 */
777*ed0b9710SAlexei Starovoitov 	func = st->frame[bt->frame];
778*ed0b9710SAlexei Starovoitov 	if (regno >= 0) {
779*ed0b9710SAlexei Starovoitov 		reg = &func->regs[regno];
780*ed0b9710SAlexei Starovoitov 		if (reg->type != SCALAR_VALUE) {
781*ed0b9710SAlexei Starovoitov 			verifier_bug(env, "backtracking misuse");
782*ed0b9710SAlexei Starovoitov 			return -EFAULT;
783*ed0b9710SAlexei Starovoitov 		}
784*ed0b9710SAlexei Starovoitov 		bt_set_reg(bt, regno);
785*ed0b9710SAlexei Starovoitov 	}
786*ed0b9710SAlexei Starovoitov 
787*ed0b9710SAlexei Starovoitov 	if (bt_empty(bt))
788*ed0b9710SAlexei Starovoitov 		return 0;
789*ed0b9710SAlexei Starovoitov 
790*ed0b9710SAlexei Starovoitov 	for (;;) {
791*ed0b9710SAlexei Starovoitov 		DECLARE_BITMAP(mask, 64);
792*ed0b9710SAlexei Starovoitov 		u32 history = st->jmp_history_cnt;
793*ed0b9710SAlexei Starovoitov 		struct bpf_jmp_history_entry *hist;
794*ed0b9710SAlexei Starovoitov 
795*ed0b9710SAlexei Starovoitov 		if (env->log.level & BPF_LOG_LEVEL2) {
796*ed0b9710SAlexei Starovoitov 			verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
797*ed0b9710SAlexei Starovoitov 				bt->frame, last_idx, first_idx, subseq_idx);
798*ed0b9710SAlexei Starovoitov 		}
799*ed0b9710SAlexei Starovoitov 
800*ed0b9710SAlexei Starovoitov 		if (last_idx < 0) {
801*ed0b9710SAlexei Starovoitov 			/* we are at the entry into subprog, which
802*ed0b9710SAlexei Starovoitov 			 * is expected for global funcs, but only if
803*ed0b9710SAlexei Starovoitov 			 * requested precise registers are R1-R5
804*ed0b9710SAlexei Starovoitov 			 * (which are global func's input arguments)
805*ed0b9710SAlexei Starovoitov 			 */
806*ed0b9710SAlexei Starovoitov 			if (st->curframe == 0 &&
807*ed0b9710SAlexei Starovoitov 			    st->frame[0]->subprogno > 0 &&
808*ed0b9710SAlexei Starovoitov 			    st->frame[0]->callsite == BPF_MAIN_FUNC &&
809*ed0b9710SAlexei Starovoitov 			    bt_stack_mask(bt) == 0 &&
810*ed0b9710SAlexei Starovoitov 			    (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) {
811*ed0b9710SAlexei Starovoitov 				bitmap_from_u64(mask, bt_reg_mask(bt));
812*ed0b9710SAlexei Starovoitov 				for_each_set_bit(i, mask, 32) {
813*ed0b9710SAlexei Starovoitov 					reg = &st->frame[0]->regs[i];
814*ed0b9710SAlexei Starovoitov 					bt_clear_reg(bt, i);
815*ed0b9710SAlexei Starovoitov 					if (reg->type == SCALAR_VALUE) {
816*ed0b9710SAlexei Starovoitov 						reg->precise = true;
817*ed0b9710SAlexei Starovoitov 						*changed = true;
818*ed0b9710SAlexei Starovoitov 					}
819*ed0b9710SAlexei Starovoitov 				}
820*ed0b9710SAlexei Starovoitov 				return 0;
821*ed0b9710SAlexei Starovoitov 			}
822*ed0b9710SAlexei Starovoitov 
823*ed0b9710SAlexei Starovoitov 			verifier_bug(env, "backtracking func entry subprog %d reg_mask %x stack_mask %llx",
824*ed0b9710SAlexei Starovoitov 				     st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt));
825*ed0b9710SAlexei Starovoitov 			return -EFAULT;
826*ed0b9710SAlexei Starovoitov 		}
827*ed0b9710SAlexei Starovoitov 
828*ed0b9710SAlexei Starovoitov 		for (i = last_idx;;) {
829*ed0b9710SAlexei Starovoitov 			if (skip_first) {
830*ed0b9710SAlexei Starovoitov 				err = 0;
831*ed0b9710SAlexei Starovoitov 				skip_first = false;
832*ed0b9710SAlexei Starovoitov 			} else {
833*ed0b9710SAlexei Starovoitov 				hist = get_jmp_hist_entry(st, history, i);
834*ed0b9710SAlexei Starovoitov 				err = backtrack_insn(env, i, subseq_idx, hist, bt);
835*ed0b9710SAlexei Starovoitov 			}
836*ed0b9710SAlexei Starovoitov 			if (err == -ENOTSUPP) {
837*ed0b9710SAlexei Starovoitov 				bpf_mark_all_scalars_precise(env, starting_state);
838*ed0b9710SAlexei Starovoitov 				bt_reset(bt);
839*ed0b9710SAlexei Starovoitov 				return 0;
840*ed0b9710SAlexei Starovoitov 			} else if (err) {
841*ed0b9710SAlexei Starovoitov 				return err;
842*ed0b9710SAlexei Starovoitov 			}
843*ed0b9710SAlexei Starovoitov 			if (bt_empty(bt))
844*ed0b9710SAlexei Starovoitov 				/* Found assignment(s) into tracked register in this state.
845*ed0b9710SAlexei Starovoitov 				 * Since this state is already marked, just return.
846*ed0b9710SAlexei Starovoitov 				 * Nothing to be tracked further in the parent state.
847*ed0b9710SAlexei Starovoitov 				 */
848*ed0b9710SAlexei Starovoitov 				return 0;
849*ed0b9710SAlexei Starovoitov 			subseq_idx = i;
850*ed0b9710SAlexei Starovoitov 			i = get_prev_insn_idx(st, i, &history);
851*ed0b9710SAlexei Starovoitov 			if (i == -ENOENT)
852*ed0b9710SAlexei Starovoitov 				break;
853*ed0b9710SAlexei Starovoitov 			if (i >= env->prog->len) {
854*ed0b9710SAlexei Starovoitov 				/* This can happen if backtracking reached insn 0
855*ed0b9710SAlexei Starovoitov 				 * and there are still reg_mask or stack_mask
856*ed0b9710SAlexei Starovoitov 				 * to backtrack.
857*ed0b9710SAlexei Starovoitov 				 * It means the backtracking missed the spot where
858*ed0b9710SAlexei Starovoitov 				 * particular register was initialized with a constant.
859*ed0b9710SAlexei Starovoitov 				 */
860*ed0b9710SAlexei Starovoitov 				verifier_bug(env, "backtracking idx %d", i);
861*ed0b9710SAlexei Starovoitov 				return -EFAULT;
862*ed0b9710SAlexei Starovoitov 			}
863*ed0b9710SAlexei Starovoitov 		}
864*ed0b9710SAlexei Starovoitov 		st = st->parent;
865*ed0b9710SAlexei Starovoitov 		if (!st)
866*ed0b9710SAlexei Starovoitov 			break;
867*ed0b9710SAlexei Starovoitov 
868*ed0b9710SAlexei Starovoitov 		for (fr = bt->frame; fr >= 0; fr--) {
869*ed0b9710SAlexei Starovoitov 			func = st->frame[fr];
870*ed0b9710SAlexei Starovoitov 			bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
871*ed0b9710SAlexei Starovoitov 			for_each_set_bit(i, mask, 32) {
872*ed0b9710SAlexei Starovoitov 				reg = &func->regs[i];
873*ed0b9710SAlexei Starovoitov 				if (reg->type != SCALAR_VALUE) {
874*ed0b9710SAlexei Starovoitov 					bt_clear_frame_reg(bt, fr, i);
875*ed0b9710SAlexei Starovoitov 					continue;
876*ed0b9710SAlexei Starovoitov 				}
877*ed0b9710SAlexei Starovoitov 				if (reg->precise) {
878*ed0b9710SAlexei Starovoitov 					bt_clear_frame_reg(bt, fr, i);
879*ed0b9710SAlexei Starovoitov 				} else {
880*ed0b9710SAlexei Starovoitov 					reg->precise = true;
881*ed0b9710SAlexei Starovoitov 					*changed = true;
882*ed0b9710SAlexei Starovoitov 				}
883*ed0b9710SAlexei Starovoitov 			}
884*ed0b9710SAlexei Starovoitov 
885*ed0b9710SAlexei Starovoitov 			bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
886*ed0b9710SAlexei Starovoitov 			for_each_set_bit(i, mask, 64) {
887*ed0b9710SAlexei Starovoitov 				if (verifier_bug_if(i >= func->allocated_stack / BPF_REG_SIZE,
888*ed0b9710SAlexei Starovoitov 						    env, "stack slot %d, total slots %d",
889*ed0b9710SAlexei Starovoitov 						    i, func->allocated_stack / BPF_REG_SIZE))
890*ed0b9710SAlexei Starovoitov 					return -EFAULT;
891*ed0b9710SAlexei Starovoitov 
892*ed0b9710SAlexei Starovoitov 				if (!bpf_is_spilled_scalar_reg(&func->stack[i])) {
893*ed0b9710SAlexei Starovoitov 					bt_clear_frame_slot(bt, fr, i);
894*ed0b9710SAlexei Starovoitov 					continue;
895*ed0b9710SAlexei Starovoitov 				}
896*ed0b9710SAlexei Starovoitov 				reg = &func->stack[i].spilled_ptr;
897*ed0b9710SAlexei Starovoitov 				if (reg->precise) {
898*ed0b9710SAlexei Starovoitov 					bt_clear_frame_slot(bt, fr, i);
899*ed0b9710SAlexei Starovoitov 				} else {
900*ed0b9710SAlexei Starovoitov 					reg->precise = true;
901*ed0b9710SAlexei Starovoitov 					*changed = true;
902*ed0b9710SAlexei Starovoitov 				}
903*ed0b9710SAlexei Starovoitov 			}
904*ed0b9710SAlexei Starovoitov 			if (env->log.level & BPF_LOG_LEVEL2) {
905*ed0b9710SAlexei Starovoitov 				fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
906*ed0b9710SAlexei Starovoitov 					     bt_frame_reg_mask(bt, fr));
907*ed0b9710SAlexei Starovoitov 				verbose(env, "mark_precise: frame%d: parent state regs=%s ",
908*ed0b9710SAlexei Starovoitov 					fr, env->tmp_str_buf);
909*ed0b9710SAlexei Starovoitov 				bpf_fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
910*ed0b9710SAlexei Starovoitov 					       bt_frame_stack_mask(bt, fr));
911*ed0b9710SAlexei Starovoitov 				verbose(env, "stack=%s: ", env->tmp_str_buf);
912*ed0b9710SAlexei Starovoitov 				print_verifier_state(env, st, fr, true);
913*ed0b9710SAlexei Starovoitov 			}
914*ed0b9710SAlexei Starovoitov 		}
915*ed0b9710SAlexei Starovoitov 
916*ed0b9710SAlexei Starovoitov 		if (bt_empty(bt))
917*ed0b9710SAlexei Starovoitov 			return 0;
918*ed0b9710SAlexei Starovoitov 
919*ed0b9710SAlexei Starovoitov 		subseq_idx = first_idx;
920*ed0b9710SAlexei Starovoitov 		last_idx = st->last_insn_idx;
921*ed0b9710SAlexei Starovoitov 		first_idx = st->first_insn_idx;
922*ed0b9710SAlexei Starovoitov 	}
923*ed0b9710SAlexei Starovoitov 
924*ed0b9710SAlexei Starovoitov 	/* if we still have requested precise regs or slots, we missed
925*ed0b9710SAlexei Starovoitov 	 * something (e.g., stack access through non-r10 register), so
926*ed0b9710SAlexei Starovoitov 	 * fallback to marking all precise
927*ed0b9710SAlexei Starovoitov 	 */
928*ed0b9710SAlexei Starovoitov 	if (!bt_empty(bt)) {
929*ed0b9710SAlexei Starovoitov 		bpf_mark_all_scalars_precise(env, starting_state);
930*ed0b9710SAlexei Starovoitov 		bt_reset(bt);
931*ed0b9710SAlexei Starovoitov 	}
932*ed0b9710SAlexei Starovoitov 
933*ed0b9710SAlexei Starovoitov 	return 0;
934*ed0b9710SAlexei Starovoitov }
935