xref: /linux/kernel/bpf/states.c (revision f5ad4101009e7f5f5984ffea6923d4fcd470932a)
1*c82834a5SAlexei Starovoitov // SPDX-License-Identifier: GPL-2.0-only
2*c82834a5SAlexei Starovoitov /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3*c82834a5SAlexei Starovoitov #include <linux/bpf.h>
4*c82834a5SAlexei Starovoitov #include <linux/bpf_verifier.h>
5*c82834a5SAlexei Starovoitov #include <linux/filter.h>
6*c82834a5SAlexei Starovoitov 
7*c82834a5SAlexei Starovoitov #define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
8*c82834a5SAlexei Starovoitov 
9*c82834a5SAlexei Starovoitov #define BPF_COMPLEXITY_LIMIT_STATES	64
10*c82834a5SAlexei Starovoitov 
11*c82834a5SAlexei Starovoitov static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx)
12*c82834a5SAlexei Starovoitov {
13*c82834a5SAlexei Starovoitov 	return bpf_is_may_goto_insn(&env->prog->insnsi[insn_idx]);
14*c82834a5SAlexei Starovoitov }
15*c82834a5SAlexei Starovoitov 
16*c82834a5SAlexei Starovoitov static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
17*c82834a5SAlexei Starovoitov {
18*c82834a5SAlexei Starovoitov 	return env->insn_aux_data[insn_idx].is_iter_next;
19*c82834a5SAlexei Starovoitov }
20*c82834a5SAlexei Starovoitov 
21*c82834a5SAlexei Starovoitov static void update_peak_states(struct bpf_verifier_env *env)
22*c82834a5SAlexei Starovoitov {
23*c82834a5SAlexei Starovoitov 	u32 cur_states;
24*c82834a5SAlexei Starovoitov 
25*c82834a5SAlexei Starovoitov 	cur_states = env->explored_states_size + env->free_list_size + env->num_backedges;
26*c82834a5SAlexei Starovoitov 	env->peak_states = max(env->peak_states, cur_states);
27*c82834a5SAlexei Starovoitov }
28*c82834a5SAlexei Starovoitov 
29*c82834a5SAlexei Starovoitov /* struct bpf_verifier_state->parent refers to states
30*c82834a5SAlexei Starovoitov  * that are in either of env->{expored_states,free_list}.
31*c82834a5SAlexei Starovoitov  * In both cases the state is contained in struct bpf_verifier_state_list.
32*c82834a5SAlexei Starovoitov  */
33*c82834a5SAlexei Starovoitov static struct bpf_verifier_state_list *state_parent_as_list(struct bpf_verifier_state *st)
34*c82834a5SAlexei Starovoitov {
35*c82834a5SAlexei Starovoitov 	if (st->parent)
36*c82834a5SAlexei Starovoitov 		return container_of(st->parent, struct bpf_verifier_state_list, state);
37*c82834a5SAlexei Starovoitov 	return NULL;
38*c82834a5SAlexei Starovoitov }
39*c82834a5SAlexei Starovoitov 
40*c82834a5SAlexei Starovoitov static bool incomplete_read_marks(struct bpf_verifier_env *env,
41*c82834a5SAlexei Starovoitov 				  struct bpf_verifier_state *st);
42*c82834a5SAlexei Starovoitov 
43*c82834a5SAlexei Starovoitov /* A state can be freed if it is no longer referenced:
44*c82834a5SAlexei Starovoitov  * - is in the env->free_list;
45*c82834a5SAlexei Starovoitov  * - has no children states;
46*c82834a5SAlexei Starovoitov  */
47*c82834a5SAlexei Starovoitov static void maybe_free_verifier_state(struct bpf_verifier_env *env,
48*c82834a5SAlexei Starovoitov 				      struct bpf_verifier_state_list *sl)
49*c82834a5SAlexei Starovoitov {
50*c82834a5SAlexei Starovoitov 	if (!sl->in_free_list
51*c82834a5SAlexei Starovoitov 	    || sl->state.branches != 0
52*c82834a5SAlexei Starovoitov 	    || incomplete_read_marks(env, &sl->state))
53*c82834a5SAlexei Starovoitov 		return;
54*c82834a5SAlexei Starovoitov 	list_del(&sl->node);
55*c82834a5SAlexei Starovoitov 	bpf_free_verifier_state(&sl->state, false);
56*c82834a5SAlexei Starovoitov 	kfree(sl);
57*c82834a5SAlexei Starovoitov 	env->free_list_size--;
58*c82834a5SAlexei Starovoitov }
59*c82834a5SAlexei Starovoitov 
60*c82834a5SAlexei Starovoitov /* For state @st look for a topmost frame with frame_insn_idx() in some SCC,
61*c82834a5SAlexei Starovoitov  * if such frame exists form a corresponding @callchain as an array of
62*c82834a5SAlexei Starovoitov  * call sites leading to this frame and SCC id.
63*c82834a5SAlexei Starovoitov  * E.g.:
64*c82834a5SAlexei Starovoitov  *
65*c82834a5SAlexei Starovoitov  *    void foo()  { A: loop {... SCC#1 ...}; }
66*c82834a5SAlexei Starovoitov  *    void bar()  { B: loop { C: foo(); ... SCC#2 ... }
67*c82834a5SAlexei Starovoitov  *                  D: loop { E: foo(); ... SCC#3 ... } }
68*c82834a5SAlexei Starovoitov  *    void main() { F: bar(); }
69*c82834a5SAlexei Starovoitov  *
70*c82834a5SAlexei Starovoitov  * @callchain at (A) would be either (F,SCC#2) or (F,SCC#3) depending
71*c82834a5SAlexei Starovoitov  * on @st frame call sites being (F,C,A) or (F,E,A).
72*c82834a5SAlexei Starovoitov  */
73*c82834a5SAlexei Starovoitov static bool compute_scc_callchain(struct bpf_verifier_env *env,
74*c82834a5SAlexei Starovoitov 				  struct bpf_verifier_state *st,
75*c82834a5SAlexei Starovoitov 				  struct bpf_scc_callchain *callchain)
76*c82834a5SAlexei Starovoitov {
77*c82834a5SAlexei Starovoitov 	u32 i, scc, insn_idx;
78*c82834a5SAlexei Starovoitov 
79*c82834a5SAlexei Starovoitov 	memset(callchain, 0, sizeof(*callchain));
80*c82834a5SAlexei Starovoitov 	for (i = 0; i <= st->curframe; i++) {
81*c82834a5SAlexei Starovoitov 		insn_idx = bpf_frame_insn_idx(st, i);
82*c82834a5SAlexei Starovoitov 		scc = env->insn_aux_data[insn_idx].scc;
83*c82834a5SAlexei Starovoitov 		if (scc) {
84*c82834a5SAlexei Starovoitov 			callchain->scc = scc;
85*c82834a5SAlexei Starovoitov 			break;
86*c82834a5SAlexei Starovoitov 		} else if (i < st->curframe) {
87*c82834a5SAlexei Starovoitov 			callchain->callsites[i] = insn_idx;
88*c82834a5SAlexei Starovoitov 		} else {
89*c82834a5SAlexei Starovoitov 			return false;
90*c82834a5SAlexei Starovoitov 		}
91*c82834a5SAlexei Starovoitov 	}
92*c82834a5SAlexei Starovoitov 	return true;
93*c82834a5SAlexei Starovoitov }
94*c82834a5SAlexei Starovoitov 
95*c82834a5SAlexei Starovoitov /* Check if bpf_scc_visit instance for @callchain exists. */
96*c82834a5SAlexei Starovoitov static struct bpf_scc_visit *scc_visit_lookup(struct bpf_verifier_env *env,
97*c82834a5SAlexei Starovoitov 					      struct bpf_scc_callchain *callchain)
98*c82834a5SAlexei Starovoitov {
99*c82834a5SAlexei Starovoitov 	struct bpf_scc_info *info = env->scc_info[callchain->scc];
100*c82834a5SAlexei Starovoitov 	struct bpf_scc_visit *visits = info->visits;
101*c82834a5SAlexei Starovoitov 	u32 i;
102*c82834a5SAlexei Starovoitov 
103*c82834a5SAlexei Starovoitov 	if (!info)
104*c82834a5SAlexei Starovoitov 		return NULL;
105*c82834a5SAlexei Starovoitov 	for (i = 0; i < info->num_visits; i++)
106*c82834a5SAlexei Starovoitov 		if (memcmp(callchain, &visits[i].callchain, sizeof(*callchain)) == 0)
107*c82834a5SAlexei Starovoitov 			return &visits[i];
108*c82834a5SAlexei Starovoitov 	return NULL;
109*c82834a5SAlexei Starovoitov }
110*c82834a5SAlexei Starovoitov 
111*c82834a5SAlexei Starovoitov /* Allocate a new bpf_scc_visit instance corresponding to @callchain.
112*c82834a5SAlexei Starovoitov  * Allocated instances are alive for a duration of the do_check_common()
113*c82834a5SAlexei Starovoitov  * call and are freed by free_states().
114*c82834a5SAlexei Starovoitov  */
115*c82834a5SAlexei Starovoitov static struct bpf_scc_visit *scc_visit_alloc(struct bpf_verifier_env *env,
116*c82834a5SAlexei Starovoitov 					     struct bpf_scc_callchain *callchain)
117*c82834a5SAlexei Starovoitov {
118*c82834a5SAlexei Starovoitov 	struct bpf_scc_visit *visit;
119*c82834a5SAlexei Starovoitov 	struct bpf_scc_info *info;
120*c82834a5SAlexei Starovoitov 	u32 scc, num_visits;
121*c82834a5SAlexei Starovoitov 	u64 new_sz;
122*c82834a5SAlexei Starovoitov 
123*c82834a5SAlexei Starovoitov 	scc = callchain->scc;
124*c82834a5SAlexei Starovoitov 	info = env->scc_info[scc];
125*c82834a5SAlexei Starovoitov 	num_visits = info ? info->num_visits : 0;
126*c82834a5SAlexei Starovoitov 	new_sz = sizeof(*info) + sizeof(struct bpf_scc_visit) * (num_visits + 1);
127*c82834a5SAlexei Starovoitov 	info = kvrealloc(env->scc_info[scc], new_sz, GFP_KERNEL_ACCOUNT);
128*c82834a5SAlexei Starovoitov 	if (!info)
129*c82834a5SAlexei Starovoitov 		return NULL;
130*c82834a5SAlexei Starovoitov 	env->scc_info[scc] = info;
131*c82834a5SAlexei Starovoitov 	info->num_visits = num_visits + 1;
132*c82834a5SAlexei Starovoitov 	visit = &info->visits[num_visits];
133*c82834a5SAlexei Starovoitov 	memset(visit, 0, sizeof(*visit));
134*c82834a5SAlexei Starovoitov 	memcpy(&visit->callchain, callchain, sizeof(*callchain));
135*c82834a5SAlexei Starovoitov 	return visit;
136*c82834a5SAlexei Starovoitov }
137*c82834a5SAlexei Starovoitov 
138*c82834a5SAlexei Starovoitov /* Form a string '(callsite#1,callsite#2,...,scc)' in env->tmp_str_buf */
139*c82834a5SAlexei Starovoitov static char *format_callchain(struct bpf_verifier_env *env, struct bpf_scc_callchain *callchain)
140*c82834a5SAlexei Starovoitov {
141*c82834a5SAlexei Starovoitov 	char *buf = env->tmp_str_buf;
142*c82834a5SAlexei Starovoitov 	int i, delta = 0;
143*c82834a5SAlexei Starovoitov 
144*c82834a5SAlexei Starovoitov 	delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "(");
145*c82834a5SAlexei Starovoitov 	for (i = 0; i < ARRAY_SIZE(callchain->callsites); i++) {
146*c82834a5SAlexei Starovoitov 		if (!callchain->callsites[i])
147*c82834a5SAlexei Starovoitov 			break;
148*c82834a5SAlexei Starovoitov 		delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "%u,",
149*c82834a5SAlexei Starovoitov 				  callchain->callsites[i]);
150*c82834a5SAlexei Starovoitov 	}
151*c82834a5SAlexei Starovoitov 	delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "%u)", callchain->scc);
152*c82834a5SAlexei Starovoitov 	return env->tmp_str_buf;
153*c82834a5SAlexei Starovoitov }
154*c82834a5SAlexei Starovoitov 
155*c82834a5SAlexei Starovoitov /* If callchain for @st exists (@st is in some SCC), ensure that
156*c82834a5SAlexei Starovoitov  * bpf_scc_visit instance for this callchain exists.
157*c82834a5SAlexei Starovoitov  * If instance does not exist or is empty, assign visit->entry_state to @st.
158*c82834a5SAlexei Starovoitov  */
159*c82834a5SAlexei Starovoitov static int maybe_enter_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
160*c82834a5SAlexei Starovoitov {
161*c82834a5SAlexei Starovoitov 	struct bpf_scc_callchain *callchain = &env->callchain_buf;
162*c82834a5SAlexei Starovoitov 	struct bpf_scc_visit *visit;
163*c82834a5SAlexei Starovoitov 
164*c82834a5SAlexei Starovoitov 	if (!compute_scc_callchain(env, st, callchain))
165*c82834a5SAlexei Starovoitov 		return 0;
166*c82834a5SAlexei Starovoitov 	visit = scc_visit_lookup(env, callchain);
167*c82834a5SAlexei Starovoitov 	visit = visit ?: scc_visit_alloc(env, callchain);
168*c82834a5SAlexei Starovoitov 	if (!visit)
169*c82834a5SAlexei Starovoitov 		return -ENOMEM;
170*c82834a5SAlexei Starovoitov 	if (!visit->entry_state) {
171*c82834a5SAlexei Starovoitov 		visit->entry_state = st;
172*c82834a5SAlexei Starovoitov 		if (env->log.level & BPF_LOG_LEVEL2)
173*c82834a5SAlexei Starovoitov 			verbose(env, "SCC enter %s\n", format_callchain(env, callchain));
174*c82834a5SAlexei Starovoitov 	}
175*c82834a5SAlexei Starovoitov 	return 0;
176*c82834a5SAlexei Starovoitov }
177*c82834a5SAlexei Starovoitov 
178*c82834a5SAlexei Starovoitov static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visit *visit);
179*c82834a5SAlexei Starovoitov 
180*c82834a5SAlexei Starovoitov /* If callchain for @st exists (@st is in some SCC), make it empty:
181*c82834a5SAlexei Starovoitov  * - set visit->entry_state to NULL;
182*c82834a5SAlexei Starovoitov  * - flush accumulated backedges.
183*c82834a5SAlexei Starovoitov  */
184*c82834a5SAlexei Starovoitov static int maybe_exit_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
185*c82834a5SAlexei Starovoitov {
186*c82834a5SAlexei Starovoitov 	struct bpf_scc_callchain *callchain = &env->callchain_buf;
187*c82834a5SAlexei Starovoitov 	struct bpf_scc_visit *visit;
188*c82834a5SAlexei Starovoitov 
189*c82834a5SAlexei Starovoitov 	if (!compute_scc_callchain(env, st, callchain))
190*c82834a5SAlexei Starovoitov 		return 0;
191*c82834a5SAlexei Starovoitov 	visit = scc_visit_lookup(env, callchain);
192*c82834a5SAlexei Starovoitov 	if (!visit) {
193*c82834a5SAlexei Starovoitov 		/*
194*c82834a5SAlexei Starovoitov 		 * If path traversal stops inside an SCC, corresponding bpf_scc_visit
195*c82834a5SAlexei Starovoitov 		 * must exist for non-speculative paths. For non-speculative paths
196*c82834a5SAlexei Starovoitov 		 * traversal stops when:
197*c82834a5SAlexei Starovoitov 		 * a. Verification error is found, maybe_exit_scc() is not called.
198*c82834a5SAlexei Starovoitov 		 * b. Top level BPF_EXIT is reached. Top level BPF_EXIT is not a member
199*c82834a5SAlexei Starovoitov 		 *    of any SCC.
200*c82834a5SAlexei Starovoitov 		 * c. A checkpoint is reached and matched. Checkpoints are created by
201*c82834a5SAlexei Starovoitov 		 *    is_state_visited(), which calls maybe_enter_scc(), which allocates
202*c82834a5SAlexei Starovoitov 		 *    bpf_scc_visit instances for checkpoints within SCCs.
203*c82834a5SAlexei Starovoitov 		 * (c) is the only case that can reach this point.
204*c82834a5SAlexei Starovoitov 		 */
205*c82834a5SAlexei Starovoitov 		if (!st->speculative) {
206*c82834a5SAlexei Starovoitov 			verifier_bug(env, "scc exit: no visit info for call chain %s",
207*c82834a5SAlexei Starovoitov 				     format_callchain(env, callchain));
208*c82834a5SAlexei Starovoitov 			return -EFAULT;
209*c82834a5SAlexei Starovoitov 		}
210*c82834a5SAlexei Starovoitov 		return 0;
211*c82834a5SAlexei Starovoitov 	}
212*c82834a5SAlexei Starovoitov 	if (visit->entry_state != st)
213*c82834a5SAlexei Starovoitov 		return 0;
214*c82834a5SAlexei Starovoitov 	if (env->log.level & BPF_LOG_LEVEL2)
215*c82834a5SAlexei Starovoitov 		verbose(env, "SCC exit %s\n", format_callchain(env, callchain));
216*c82834a5SAlexei Starovoitov 	visit->entry_state = NULL;
217*c82834a5SAlexei Starovoitov 	env->num_backedges -= visit->num_backedges;
218*c82834a5SAlexei Starovoitov 	visit->num_backedges = 0;
219*c82834a5SAlexei Starovoitov 	update_peak_states(env);
220*c82834a5SAlexei Starovoitov 	return propagate_backedges(env, visit);
221*c82834a5SAlexei Starovoitov }
222*c82834a5SAlexei Starovoitov 
223*c82834a5SAlexei Starovoitov /* Lookup an bpf_scc_visit instance corresponding to @st callchain
224*c82834a5SAlexei Starovoitov  * and add @backedge to visit->backedges. @st callchain must exist.
225*c82834a5SAlexei Starovoitov  */
226*c82834a5SAlexei Starovoitov static int add_scc_backedge(struct bpf_verifier_env *env,
227*c82834a5SAlexei Starovoitov 			    struct bpf_verifier_state *st,
228*c82834a5SAlexei Starovoitov 			    struct bpf_scc_backedge *backedge)
229*c82834a5SAlexei Starovoitov {
230*c82834a5SAlexei Starovoitov 	struct bpf_scc_callchain *callchain = &env->callchain_buf;
231*c82834a5SAlexei Starovoitov 	struct bpf_scc_visit *visit;
232*c82834a5SAlexei Starovoitov 
233*c82834a5SAlexei Starovoitov 	if (!compute_scc_callchain(env, st, callchain)) {
234*c82834a5SAlexei Starovoitov 		verifier_bug(env, "add backedge: no SCC in verification path, insn_idx %d",
235*c82834a5SAlexei Starovoitov 			     st->insn_idx);
236*c82834a5SAlexei Starovoitov 		return -EFAULT;
237*c82834a5SAlexei Starovoitov 	}
238*c82834a5SAlexei Starovoitov 	visit = scc_visit_lookup(env, callchain);
239*c82834a5SAlexei Starovoitov 	if (!visit) {
240*c82834a5SAlexei Starovoitov 		verifier_bug(env, "add backedge: no visit info for call chain %s",
241*c82834a5SAlexei Starovoitov 			     format_callchain(env, callchain));
242*c82834a5SAlexei Starovoitov 		return -EFAULT;
243*c82834a5SAlexei Starovoitov 	}
244*c82834a5SAlexei Starovoitov 	if (env->log.level & BPF_LOG_LEVEL2)
245*c82834a5SAlexei Starovoitov 		verbose(env, "SCC backedge %s\n", format_callchain(env, callchain));
246*c82834a5SAlexei Starovoitov 	backedge->next = visit->backedges;
247*c82834a5SAlexei Starovoitov 	visit->backedges = backedge;
248*c82834a5SAlexei Starovoitov 	visit->num_backedges++;
249*c82834a5SAlexei Starovoitov 	env->num_backedges++;
250*c82834a5SAlexei Starovoitov 	update_peak_states(env);
251*c82834a5SAlexei Starovoitov 	return 0;
252*c82834a5SAlexei Starovoitov }
253*c82834a5SAlexei Starovoitov 
254*c82834a5SAlexei Starovoitov /* bpf_reg_state->live marks for registers in a state @st are incomplete,
255*c82834a5SAlexei Starovoitov  * if state @st is in some SCC and not all execution paths starting at this
256*c82834a5SAlexei Starovoitov  * SCC are fully explored.
257*c82834a5SAlexei Starovoitov  */
258*c82834a5SAlexei Starovoitov static bool incomplete_read_marks(struct bpf_verifier_env *env,
259*c82834a5SAlexei Starovoitov 				  struct bpf_verifier_state *st)
260*c82834a5SAlexei Starovoitov {
261*c82834a5SAlexei Starovoitov 	struct bpf_scc_callchain *callchain = &env->callchain_buf;
262*c82834a5SAlexei Starovoitov 	struct bpf_scc_visit *visit;
263*c82834a5SAlexei Starovoitov 
264*c82834a5SAlexei Starovoitov 	if (!compute_scc_callchain(env, st, callchain))
265*c82834a5SAlexei Starovoitov 		return false;
266*c82834a5SAlexei Starovoitov 	visit = scc_visit_lookup(env, callchain);
267*c82834a5SAlexei Starovoitov 	if (!visit)
268*c82834a5SAlexei Starovoitov 		return false;
269*c82834a5SAlexei Starovoitov 	return !!visit->backedges;
270*c82834a5SAlexei Starovoitov }
271*c82834a5SAlexei Starovoitov 
272*c82834a5SAlexei Starovoitov int bpf_update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
273*c82834a5SAlexei Starovoitov {
274*c82834a5SAlexei Starovoitov 	struct bpf_verifier_state_list *sl = NULL, *parent_sl;
275*c82834a5SAlexei Starovoitov 	struct bpf_verifier_state *parent;
276*c82834a5SAlexei Starovoitov 	int err;
277*c82834a5SAlexei Starovoitov 
278*c82834a5SAlexei Starovoitov 	while (st) {
279*c82834a5SAlexei Starovoitov 		u32 br = --st->branches;
280*c82834a5SAlexei Starovoitov 
281*c82834a5SAlexei Starovoitov 		/* verifier_bug_if(br > 1, ...) technically makes sense here,
282*c82834a5SAlexei Starovoitov 		 * but see comment in push_stack(), hence:
283*c82834a5SAlexei Starovoitov 		 */
284*c82834a5SAlexei Starovoitov 		verifier_bug_if((int)br < 0, env, "%s:branches_to_explore=%d", __func__, br);
285*c82834a5SAlexei Starovoitov 		if (br)
286*c82834a5SAlexei Starovoitov 			break;
287*c82834a5SAlexei Starovoitov 		err = maybe_exit_scc(env, st);
288*c82834a5SAlexei Starovoitov 		if (err)
289*c82834a5SAlexei Starovoitov 			return err;
290*c82834a5SAlexei Starovoitov 		parent = st->parent;
291*c82834a5SAlexei Starovoitov 		parent_sl = state_parent_as_list(st);
292*c82834a5SAlexei Starovoitov 		if (sl)
293*c82834a5SAlexei Starovoitov 			maybe_free_verifier_state(env, sl);
294*c82834a5SAlexei Starovoitov 		st = parent;
295*c82834a5SAlexei Starovoitov 		sl = parent_sl;
296*c82834a5SAlexei Starovoitov 	}
297*c82834a5SAlexei Starovoitov 	return 0;
298*c82834a5SAlexei Starovoitov }
299*c82834a5SAlexei Starovoitov 
300*c82834a5SAlexei Starovoitov /* check %cur's range satisfies %old's */
301*c82834a5SAlexei Starovoitov static bool range_within(const struct bpf_reg_state *old,
302*c82834a5SAlexei Starovoitov 			 const struct bpf_reg_state *cur)
303*c82834a5SAlexei Starovoitov {
304*c82834a5SAlexei Starovoitov 	return old->umin_value <= cur->umin_value &&
305*c82834a5SAlexei Starovoitov 	       old->umax_value >= cur->umax_value &&
306*c82834a5SAlexei Starovoitov 	       old->smin_value <= cur->smin_value &&
307*c82834a5SAlexei Starovoitov 	       old->smax_value >= cur->smax_value &&
308*c82834a5SAlexei Starovoitov 	       old->u32_min_value <= cur->u32_min_value &&
309*c82834a5SAlexei Starovoitov 	       old->u32_max_value >= cur->u32_max_value &&
310*c82834a5SAlexei Starovoitov 	       old->s32_min_value <= cur->s32_min_value &&
311*c82834a5SAlexei Starovoitov 	       old->s32_max_value >= cur->s32_max_value;
312*c82834a5SAlexei Starovoitov }
313*c82834a5SAlexei Starovoitov 
314*c82834a5SAlexei Starovoitov /* If in the old state two registers had the same id, then they need to have
315*c82834a5SAlexei Starovoitov  * the same id in the new state as well.  But that id could be different from
316*c82834a5SAlexei Starovoitov  * the old state, so we need to track the mapping from old to new ids.
317*c82834a5SAlexei Starovoitov  * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
318*c82834a5SAlexei Starovoitov  * regs with old id 5 must also have new id 9 for the new state to be safe.  But
319*c82834a5SAlexei Starovoitov  * regs with a different old id could still have new id 9, we don't care about
320*c82834a5SAlexei Starovoitov  * that.
321*c82834a5SAlexei Starovoitov  * So we look through our idmap to see if this old id has been seen before.  If
322*c82834a5SAlexei Starovoitov  * so, we require the new id to match; otherwise, we add the id pair to the map.
323*c82834a5SAlexei Starovoitov  */
324*c82834a5SAlexei Starovoitov static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
325*c82834a5SAlexei Starovoitov {
326*c82834a5SAlexei Starovoitov 	struct bpf_id_pair *map = idmap->map;
327*c82834a5SAlexei Starovoitov 	unsigned int i;
328*c82834a5SAlexei Starovoitov 
329*c82834a5SAlexei Starovoitov 	/* either both IDs should be set or both should be zero */
330*c82834a5SAlexei Starovoitov 	if (!!old_id != !!cur_id)
331*c82834a5SAlexei Starovoitov 		return false;
332*c82834a5SAlexei Starovoitov 
333*c82834a5SAlexei Starovoitov 	if (old_id == 0) /* cur_id == 0 as well */
334*c82834a5SAlexei Starovoitov 		return true;
335*c82834a5SAlexei Starovoitov 
336*c82834a5SAlexei Starovoitov 	for (i = 0; i < idmap->cnt; i++) {
337*c82834a5SAlexei Starovoitov 		if (map[i].old == old_id)
338*c82834a5SAlexei Starovoitov 			return map[i].cur == cur_id;
339*c82834a5SAlexei Starovoitov 		if (map[i].cur == cur_id)
340*c82834a5SAlexei Starovoitov 			return false;
341*c82834a5SAlexei Starovoitov 	}
342*c82834a5SAlexei Starovoitov 
343*c82834a5SAlexei Starovoitov 	/* Reached the end of known mappings; haven't seen this id before */
344*c82834a5SAlexei Starovoitov 	if (idmap->cnt < BPF_ID_MAP_SIZE) {
345*c82834a5SAlexei Starovoitov 		map[idmap->cnt].old = old_id;
346*c82834a5SAlexei Starovoitov 		map[idmap->cnt].cur = cur_id;
347*c82834a5SAlexei Starovoitov 		idmap->cnt++;
348*c82834a5SAlexei Starovoitov 		return true;
349*c82834a5SAlexei Starovoitov 	}
350*c82834a5SAlexei Starovoitov 
351*c82834a5SAlexei Starovoitov 	/* We ran out of idmap slots, which should be impossible */
352*c82834a5SAlexei Starovoitov 	WARN_ON_ONCE(1);
353*c82834a5SAlexei Starovoitov 	return false;
354*c82834a5SAlexei Starovoitov }
355*c82834a5SAlexei Starovoitov 
356*c82834a5SAlexei Starovoitov /*
357*c82834a5SAlexei Starovoitov  * Compare scalar register IDs for state equivalence.
358*c82834a5SAlexei Starovoitov  *
359*c82834a5SAlexei Starovoitov  * When old_id == 0, the old register is independent - not linked to any
360*c82834a5SAlexei Starovoitov  * other register. Any linking in the current state only adds constraints,
361*c82834a5SAlexei Starovoitov  * making it more restrictive. Since the old state didn't rely on any ID
362*c82834a5SAlexei Starovoitov  * relationships for this register, it's always safe to accept cur regardless
363*c82834a5SAlexei Starovoitov  * of its ID. Hence, return true immediately.
364*c82834a5SAlexei Starovoitov  *
365*c82834a5SAlexei Starovoitov  * When old_id != 0 but cur_id == 0, we need to ensure that different
366*c82834a5SAlexei Starovoitov  * independent registers in cur don't incorrectly satisfy the ID matching
367*c82834a5SAlexei Starovoitov  * requirements of linked registers in old.
368*c82834a5SAlexei Starovoitov  *
369*c82834a5SAlexei Starovoitov  * Example: if old has r6.id=X and r7.id=X (linked), but cur has r6.id=0
370*c82834a5SAlexei Starovoitov  * and r7.id=0 (both independent), without temp IDs both would map old_id=X
371*c82834a5SAlexei Starovoitov  * to cur_id=0 and pass. With temp IDs: r6 maps X->temp1, r7 tries to map
372*c82834a5SAlexei Starovoitov  * X->temp2, but X is already mapped to temp1, so the check fails correctly.
373*c82834a5SAlexei Starovoitov  *
374*c82834a5SAlexei Starovoitov  * When old_id has BPF_ADD_CONST set, the compound id (base | flag) and the
375*c82834a5SAlexei Starovoitov  * base id (flag stripped) must both map consistently. Example: old has
376*c82834a5SAlexei Starovoitov  * r2.id=A, r3.id=A|flag (r3 = r2 + delta), cur has r2.id=B, r3.id=C|flag
377*c82834a5SAlexei Starovoitov  * (r3 derived from unrelated r4). Without the base check, idmap gets two
378*c82834a5SAlexei Starovoitov  * independent entries A->B and A|flag->C|flag, missing that A->C conflicts
379*c82834a5SAlexei Starovoitov  * with A->B. The base ID cross-check catches this.
380*c82834a5SAlexei Starovoitov  */
381*c82834a5SAlexei Starovoitov static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
382*c82834a5SAlexei Starovoitov {
383*c82834a5SAlexei Starovoitov 	if (!old_id)
384*c82834a5SAlexei Starovoitov 		return true;
385*c82834a5SAlexei Starovoitov 
386*c82834a5SAlexei Starovoitov 	cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen;
387*c82834a5SAlexei Starovoitov 
388*c82834a5SAlexei Starovoitov 	if (!check_ids(old_id, cur_id, idmap))
389*c82834a5SAlexei Starovoitov 		return false;
390*c82834a5SAlexei Starovoitov 	if (old_id & BPF_ADD_CONST) {
391*c82834a5SAlexei Starovoitov 		old_id &= ~BPF_ADD_CONST;
392*c82834a5SAlexei Starovoitov 		cur_id &= ~BPF_ADD_CONST;
393*c82834a5SAlexei Starovoitov 		if (!check_ids(old_id, cur_id, idmap))
394*c82834a5SAlexei Starovoitov 			return false;
395*c82834a5SAlexei Starovoitov 	}
396*c82834a5SAlexei Starovoitov 	return true;
397*c82834a5SAlexei Starovoitov }
398*c82834a5SAlexei Starovoitov 
399*c82834a5SAlexei Starovoitov static void __clean_func_state(struct bpf_verifier_env *env,
400*c82834a5SAlexei Starovoitov 			       struct bpf_func_state *st,
401*c82834a5SAlexei Starovoitov 			       u16 live_regs, int frame)
402*c82834a5SAlexei Starovoitov {
403*c82834a5SAlexei Starovoitov 	int i, j;
404*c82834a5SAlexei Starovoitov 
405*c82834a5SAlexei Starovoitov 	for (i = 0; i < BPF_REG_FP; i++) {
406*c82834a5SAlexei Starovoitov 		/* liveness must not touch this register anymore */
407*c82834a5SAlexei Starovoitov 		if (!(live_regs & BIT(i)))
408*c82834a5SAlexei Starovoitov 			/* since the register is unused, clear its state
409*c82834a5SAlexei Starovoitov 			 * to make further comparison simpler
410*c82834a5SAlexei Starovoitov 			 */
411*c82834a5SAlexei Starovoitov 			bpf_mark_reg_not_init(env, &st->regs[i]);
412*c82834a5SAlexei Starovoitov 	}
413*c82834a5SAlexei Starovoitov 
414*c82834a5SAlexei Starovoitov 	/*
415*c82834a5SAlexei Starovoitov 	 * Clean dead 4-byte halves within each SPI independently.
416*c82834a5SAlexei Starovoitov 	 * half_spi 2*i   → lower half: slot_type[0..3] (closer to FP)
417*c82834a5SAlexei Starovoitov 	 * half_spi 2*i+1 → upper half: slot_type[4..7] (farther from FP)
418*c82834a5SAlexei Starovoitov 	 */
419*c82834a5SAlexei Starovoitov 	for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
420*c82834a5SAlexei Starovoitov 		bool lo_live = bpf_stack_slot_alive(env, frame, i * 2);
421*c82834a5SAlexei Starovoitov 		bool hi_live = bpf_stack_slot_alive(env, frame, i * 2 + 1);
422*c82834a5SAlexei Starovoitov 
423*c82834a5SAlexei Starovoitov 		if (!hi_live || !lo_live) {
424*c82834a5SAlexei Starovoitov 			int start = !lo_live ? 0 : BPF_REG_SIZE / 2;
425*c82834a5SAlexei Starovoitov 			int end = !hi_live ? BPF_REG_SIZE : BPF_REG_SIZE / 2;
426*c82834a5SAlexei Starovoitov 			u8 stype = st->stack[i].slot_type[7];
427*c82834a5SAlexei Starovoitov 
428*c82834a5SAlexei Starovoitov 			/*
429*c82834a5SAlexei Starovoitov 			 * Don't clear special slots.
430*c82834a5SAlexei Starovoitov 			 * destroy_if_dynptr_stack_slot() needs STACK_DYNPTR to
431*c82834a5SAlexei Starovoitov 			 * detect overwrites and invalidate associated data slices.
432*c82834a5SAlexei Starovoitov 			 * is_iter_reg_valid_uninit() and is_irq_flag_reg_valid_uninit()
433*c82834a5SAlexei Starovoitov 			 * check for their respective slot types to detect double-create.
434*c82834a5SAlexei Starovoitov 			 */
435*c82834a5SAlexei Starovoitov 			if (stype == STACK_DYNPTR || stype == STACK_ITER ||
436*c82834a5SAlexei Starovoitov 			    stype == STACK_IRQ_FLAG)
437*c82834a5SAlexei Starovoitov 				continue;
438*c82834a5SAlexei Starovoitov 
439*c82834a5SAlexei Starovoitov 			/*
440*c82834a5SAlexei Starovoitov 			 * Only destroy spilled_ptr when hi half is dead.
441*c82834a5SAlexei Starovoitov 			 * If hi half is still live with STACK_SPILL, the
442*c82834a5SAlexei Starovoitov 			 * spilled_ptr metadata is needed for correct state
443*c82834a5SAlexei Starovoitov 			 * comparison in stacksafe().
444*c82834a5SAlexei Starovoitov 			 * is_spilled_reg() is using slot_type[7], but
445*c82834a5SAlexei Starovoitov 			 * is_spilled_scalar_after() check either slot_type[0] or [4]
446*c82834a5SAlexei Starovoitov 			 */
447*c82834a5SAlexei Starovoitov 			if (!hi_live) {
448*c82834a5SAlexei Starovoitov 				struct bpf_reg_state *spill = &st->stack[i].spilled_ptr;
449*c82834a5SAlexei Starovoitov 
450*c82834a5SAlexei Starovoitov 				if (lo_live && stype == STACK_SPILL) {
451*c82834a5SAlexei Starovoitov 					u8 val = STACK_MISC;
452*c82834a5SAlexei Starovoitov 
453*c82834a5SAlexei Starovoitov 					/*
454*c82834a5SAlexei Starovoitov 					 * 8 byte spill of scalar 0 where half slot is dead
455*c82834a5SAlexei Starovoitov 					 * should become STACK_ZERO in lo 4 bytes.
456*c82834a5SAlexei Starovoitov 					 */
457*c82834a5SAlexei Starovoitov 					if (bpf_register_is_null(spill))
458*c82834a5SAlexei Starovoitov 						val = STACK_ZERO;
459*c82834a5SAlexei Starovoitov 					for (j = 0; j < 4; j++) {
460*c82834a5SAlexei Starovoitov 						u8 *t = &st->stack[i].slot_type[j];
461*c82834a5SAlexei Starovoitov 
462*c82834a5SAlexei Starovoitov 						if (*t == STACK_SPILL)
463*c82834a5SAlexei Starovoitov 							*t = val;
464*c82834a5SAlexei Starovoitov 					}
465*c82834a5SAlexei Starovoitov 				}
466*c82834a5SAlexei Starovoitov 				bpf_mark_reg_not_init(env, spill);
467*c82834a5SAlexei Starovoitov 			}
468*c82834a5SAlexei Starovoitov 			for (j = start; j < end; j++)
469*c82834a5SAlexei Starovoitov 				st->stack[i].slot_type[j] = STACK_POISON;
470*c82834a5SAlexei Starovoitov 		}
471*c82834a5SAlexei Starovoitov 	}
472*c82834a5SAlexei Starovoitov }
473*c82834a5SAlexei Starovoitov 
474*c82834a5SAlexei Starovoitov static int clean_verifier_state(struct bpf_verifier_env *env,
475*c82834a5SAlexei Starovoitov 				 struct bpf_verifier_state *st)
476*c82834a5SAlexei Starovoitov {
477*c82834a5SAlexei Starovoitov 	int i, err;
478*c82834a5SAlexei Starovoitov 
479*c82834a5SAlexei Starovoitov 	err = bpf_live_stack_query_init(env, st);
480*c82834a5SAlexei Starovoitov 	if (err)
481*c82834a5SAlexei Starovoitov 		return err;
482*c82834a5SAlexei Starovoitov 	for (i = 0; i <= st->curframe; i++) {
483*c82834a5SAlexei Starovoitov 		u32 ip = bpf_frame_insn_idx(st, i);
484*c82834a5SAlexei Starovoitov 		u16 live_regs = env->insn_aux_data[ip].live_regs_before;
485*c82834a5SAlexei Starovoitov 
486*c82834a5SAlexei Starovoitov 		__clean_func_state(env, st->frame[i], live_regs, i);
487*c82834a5SAlexei Starovoitov 	}
488*c82834a5SAlexei Starovoitov 	return 0;
489*c82834a5SAlexei Starovoitov }
490*c82834a5SAlexei Starovoitov 
491*c82834a5SAlexei Starovoitov static bool regs_exact(const struct bpf_reg_state *rold,
492*c82834a5SAlexei Starovoitov 		       const struct bpf_reg_state *rcur,
493*c82834a5SAlexei Starovoitov 		       struct bpf_idmap *idmap)
494*c82834a5SAlexei Starovoitov {
495*c82834a5SAlexei Starovoitov 	return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
496*c82834a5SAlexei Starovoitov 	       check_ids(rold->id, rcur->id, idmap) &&
497*c82834a5SAlexei Starovoitov 	       check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
498*c82834a5SAlexei Starovoitov }
499*c82834a5SAlexei Starovoitov 
500*c82834a5SAlexei Starovoitov enum exact_level {
501*c82834a5SAlexei Starovoitov 	NOT_EXACT,
502*c82834a5SAlexei Starovoitov 	EXACT,
503*c82834a5SAlexei Starovoitov 	RANGE_WITHIN
504*c82834a5SAlexei Starovoitov };
505*c82834a5SAlexei Starovoitov 
506*c82834a5SAlexei Starovoitov /* Returns true if (rold safe implies rcur safe) */
507*c82834a5SAlexei Starovoitov static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
508*c82834a5SAlexei Starovoitov 		    struct bpf_reg_state *rcur, struct bpf_idmap *idmap,
509*c82834a5SAlexei Starovoitov 		    enum exact_level exact)
510*c82834a5SAlexei Starovoitov {
511*c82834a5SAlexei Starovoitov 	if (exact == EXACT)
512*c82834a5SAlexei Starovoitov 		return regs_exact(rold, rcur, idmap);
513*c82834a5SAlexei Starovoitov 
514*c82834a5SAlexei Starovoitov 	if (rold->type == NOT_INIT)
515*c82834a5SAlexei Starovoitov 		/* explored state can't have used this */
516*c82834a5SAlexei Starovoitov 		return true;
517*c82834a5SAlexei Starovoitov 
518*c82834a5SAlexei Starovoitov 	/* Enforce that register types have to match exactly, including their
519*c82834a5SAlexei Starovoitov 	 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
520*c82834a5SAlexei Starovoitov 	 * rule.
521*c82834a5SAlexei Starovoitov 	 *
522*c82834a5SAlexei Starovoitov 	 * One can make a point that using a pointer register as unbounded
523*c82834a5SAlexei Starovoitov 	 * SCALAR would be technically acceptable, but this could lead to
524*c82834a5SAlexei Starovoitov 	 * pointer leaks because scalars are allowed to leak while pointers
525*c82834a5SAlexei Starovoitov 	 * are not. We could make this safe in special cases if root is
526*c82834a5SAlexei Starovoitov 	 * calling us, but it's probably not worth the hassle.
527*c82834a5SAlexei Starovoitov 	 *
528*c82834a5SAlexei Starovoitov 	 * Also, register types that are *not* MAYBE_NULL could technically be
529*c82834a5SAlexei Starovoitov 	 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
530*c82834a5SAlexei Starovoitov 	 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
531*c82834a5SAlexei Starovoitov 	 * to the same map).
532*c82834a5SAlexei Starovoitov 	 * However, if the old MAYBE_NULL register then got NULL checked,
533*c82834a5SAlexei Starovoitov 	 * doing so could have affected others with the same id, and we can't
534*c82834a5SAlexei Starovoitov 	 * check for that because we lost the id when we converted to
535*c82834a5SAlexei Starovoitov 	 * a non-MAYBE_NULL variant.
536*c82834a5SAlexei Starovoitov 	 * So, as a general rule we don't allow mixing MAYBE_NULL and
537*c82834a5SAlexei Starovoitov 	 * non-MAYBE_NULL registers as well.
538*c82834a5SAlexei Starovoitov 	 */
539*c82834a5SAlexei Starovoitov 	if (rold->type != rcur->type)
540*c82834a5SAlexei Starovoitov 		return false;
541*c82834a5SAlexei Starovoitov 
542*c82834a5SAlexei Starovoitov 	switch (base_type(rold->type)) {
543*c82834a5SAlexei Starovoitov 	case SCALAR_VALUE:
544*c82834a5SAlexei Starovoitov 		if (env->explore_alu_limits) {
545*c82834a5SAlexei Starovoitov 			/* explore_alu_limits disables tnum_in() and range_within()
546*c82834a5SAlexei Starovoitov 			 * logic and requires everything to be strict
547*c82834a5SAlexei Starovoitov 			 */
548*c82834a5SAlexei Starovoitov 			return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
549*c82834a5SAlexei Starovoitov 			       check_scalar_ids(rold->id, rcur->id, idmap);
550*c82834a5SAlexei Starovoitov 		}
551*c82834a5SAlexei Starovoitov 		if (!rold->precise && exact == NOT_EXACT)
552*c82834a5SAlexei Starovoitov 			return true;
553*c82834a5SAlexei Starovoitov 		/*
554*c82834a5SAlexei Starovoitov 		 * Linked register tracking uses rold->id to detect relationships.
555*c82834a5SAlexei Starovoitov 		 * When rold->id == 0, the register is independent and any linking
556*c82834a5SAlexei Starovoitov 		 * in rcur only adds constraints. When rold->id != 0, we must verify
557*c82834a5SAlexei Starovoitov 		 * id mapping and (for BPF_ADD_CONST) offset consistency.
558*c82834a5SAlexei Starovoitov 		 *
559*c82834a5SAlexei Starovoitov 		 * +------------------+-----------+------------------+---------------+
560*c82834a5SAlexei Starovoitov 		 * |                  | rold->id  | rold + ADD_CONST | rold->id == 0 |
561*c82834a5SAlexei Starovoitov 		 * |------------------+-----------+------------------+---------------|
562*c82834a5SAlexei Starovoitov 		 * | rcur->id         | range,ids | false            | range         |
563*c82834a5SAlexei Starovoitov 		 * | rcur + ADD_CONST | false     | range,ids,off    | range         |
564*c82834a5SAlexei Starovoitov 		 * | rcur->id == 0    | range,ids | false            | range         |
565*c82834a5SAlexei Starovoitov 		 * +------------------+-----------+------------------+---------------+
566*c82834a5SAlexei Starovoitov 		 *
567*c82834a5SAlexei Starovoitov 		 * Why check_ids() for scalar registers?
568*c82834a5SAlexei Starovoitov 		 *
569*c82834a5SAlexei Starovoitov 		 * Consider the following BPF code:
570*c82834a5SAlexei Starovoitov 		 *   1: r6 = ... unbound scalar, ID=a ...
571*c82834a5SAlexei Starovoitov 		 *   2: r7 = ... unbound scalar, ID=b ...
572*c82834a5SAlexei Starovoitov 		 *   3: if (r6 > r7) goto +1
573*c82834a5SAlexei Starovoitov 		 *   4: r6 = r7
574*c82834a5SAlexei Starovoitov 		 *   5: if (r6 > X) goto ...
575*c82834a5SAlexei Starovoitov 		 *   6: ... memory operation using r7 ...
576*c82834a5SAlexei Starovoitov 		 *
577*c82834a5SAlexei Starovoitov 		 * First verification path is [1-6]:
578*c82834a5SAlexei Starovoitov 		 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7;
579*c82834a5SAlexei Starovoitov 		 * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark
580*c82834a5SAlexei Starovoitov 		 *   r7 <= X, because r6 and r7 share same id.
581*c82834a5SAlexei Starovoitov 		 * Next verification path is [1-4, 6].
582*c82834a5SAlexei Starovoitov 		 *
583*c82834a5SAlexei Starovoitov 		 * Instruction (6) would be reached in two states:
584*c82834a5SAlexei Starovoitov 		 *   I.  r6{.id=b}, r7{.id=b} via path 1-6;
585*c82834a5SAlexei Starovoitov 		 *   II. r6{.id=a}, r7{.id=b} via path 1-4, 6.
586*c82834a5SAlexei Starovoitov 		 *
587*c82834a5SAlexei Starovoitov 		 * Use check_ids() to distinguish these states.
588*c82834a5SAlexei Starovoitov 		 * ---
589*c82834a5SAlexei Starovoitov 		 * Also verify that new value satisfies old value range knowledge.
590*c82834a5SAlexei Starovoitov 		 */
591*c82834a5SAlexei Starovoitov 
592*c82834a5SAlexei Starovoitov 		/*
593*c82834a5SAlexei Starovoitov 		 * ADD_CONST flags must match exactly: BPF_ADD_CONST32 and
594*c82834a5SAlexei Starovoitov 		 * BPF_ADD_CONST64 have different linking semantics in
595*c82834a5SAlexei Starovoitov 		 * sync_linked_regs() (alu32 zero-extends, alu64 does not),
596*c82834a5SAlexei Starovoitov 		 * so pruning across different flag types is unsafe.
597*c82834a5SAlexei Starovoitov 		 */
598*c82834a5SAlexei Starovoitov 		if (rold->id &&
599*c82834a5SAlexei Starovoitov 		    (rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST))
600*c82834a5SAlexei Starovoitov 			return false;
601*c82834a5SAlexei Starovoitov 
602*c82834a5SAlexei Starovoitov 		/* Both have offset linkage: offsets must match */
603*c82834a5SAlexei Starovoitov 		if ((rold->id & BPF_ADD_CONST) && rold->delta != rcur->delta)
604*c82834a5SAlexei Starovoitov 			return false;
605*c82834a5SAlexei Starovoitov 
606*c82834a5SAlexei Starovoitov 		if (!check_scalar_ids(rold->id, rcur->id, idmap))
607*c82834a5SAlexei Starovoitov 			return false;
608*c82834a5SAlexei Starovoitov 
609*c82834a5SAlexei Starovoitov 		return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off);
610*c82834a5SAlexei Starovoitov 	case PTR_TO_MAP_KEY:
611*c82834a5SAlexei Starovoitov 	case PTR_TO_MAP_VALUE:
612*c82834a5SAlexei Starovoitov 	case PTR_TO_MEM:
613*c82834a5SAlexei Starovoitov 	case PTR_TO_BUF:
614*c82834a5SAlexei Starovoitov 	case PTR_TO_TP_BUFFER:
615*c82834a5SAlexei Starovoitov 		/* If the new min/max/var_off satisfy the old ones and
616*c82834a5SAlexei Starovoitov 		 * everything else matches, we are OK.
617*c82834a5SAlexei Starovoitov 		 */
618*c82834a5SAlexei Starovoitov 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
619*c82834a5SAlexei Starovoitov 		       range_within(rold, rcur) &&
620*c82834a5SAlexei Starovoitov 		       tnum_in(rold->var_off, rcur->var_off) &&
621*c82834a5SAlexei Starovoitov 		       check_ids(rold->id, rcur->id, idmap) &&
622*c82834a5SAlexei Starovoitov 		       check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
623*c82834a5SAlexei Starovoitov 	case PTR_TO_PACKET_META:
624*c82834a5SAlexei Starovoitov 	case PTR_TO_PACKET:
625*c82834a5SAlexei Starovoitov 		/* We must have at least as much range as the old ptr
626*c82834a5SAlexei Starovoitov 		 * did, so that any accesses which were safe before are
627*c82834a5SAlexei Starovoitov 		 * still safe.  This is true even if old range < old off,
628*c82834a5SAlexei Starovoitov 		 * since someone could have accessed through (ptr - k), or
629*c82834a5SAlexei Starovoitov 		 * even done ptr -= k in a register, to get a safe access.
630*c82834a5SAlexei Starovoitov 		 */
631*c82834a5SAlexei Starovoitov 		if (rold->range < 0 || rcur->range < 0) {
632*c82834a5SAlexei Starovoitov 			/* special case for [BEYOND|AT]_PKT_END */
633*c82834a5SAlexei Starovoitov 			if (rold->range != rcur->range)
634*c82834a5SAlexei Starovoitov 				return false;
635*c82834a5SAlexei Starovoitov 		} else if (rold->range > rcur->range) {
636*c82834a5SAlexei Starovoitov 			return false;
637*c82834a5SAlexei Starovoitov 		}
638*c82834a5SAlexei Starovoitov 		/* id relations must be preserved */
639*c82834a5SAlexei Starovoitov 		if (!check_ids(rold->id, rcur->id, idmap))
640*c82834a5SAlexei Starovoitov 			return false;
641*c82834a5SAlexei Starovoitov 		/* new val must satisfy old val knowledge */
642*c82834a5SAlexei Starovoitov 		return range_within(rold, rcur) &&
643*c82834a5SAlexei Starovoitov 		       tnum_in(rold->var_off, rcur->var_off);
644*c82834a5SAlexei Starovoitov 	case PTR_TO_STACK:
645*c82834a5SAlexei Starovoitov 		/* two stack pointers are equal only if they're pointing to
646*c82834a5SAlexei Starovoitov 		 * the same stack frame, since fp-8 in foo != fp-8 in bar
647*c82834a5SAlexei Starovoitov 		 */
648*c82834a5SAlexei Starovoitov 		return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
649*c82834a5SAlexei Starovoitov 	case PTR_TO_ARENA:
650*c82834a5SAlexei Starovoitov 		return true;
651*c82834a5SAlexei Starovoitov 	case PTR_TO_INSN:
652*c82834a5SAlexei Starovoitov 		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
653*c82834a5SAlexei Starovoitov 		       range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off);
654*c82834a5SAlexei Starovoitov 	default:
655*c82834a5SAlexei Starovoitov 		return regs_exact(rold, rcur, idmap);
656*c82834a5SAlexei Starovoitov 	}
657*c82834a5SAlexei Starovoitov }
658*c82834a5SAlexei Starovoitov 
659*c82834a5SAlexei Starovoitov static struct bpf_reg_state unbound_reg;
660*c82834a5SAlexei Starovoitov 
661*c82834a5SAlexei Starovoitov static __init int unbound_reg_init(void)
662*c82834a5SAlexei Starovoitov {
663*c82834a5SAlexei Starovoitov 	bpf_mark_reg_unknown_imprecise(&unbound_reg);
664*c82834a5SAlexei Starovoitov 	return 0;
665*c82834a5SAlexei Starovoitov }
666*c82834a5SAlexei Starovoitov late_initcall(unbound_reg_init);
667*c82834a5SAlexei Starovoitov 
668*c82834a5SAlexei Starovoitov static bool is_spilled_scalar_after(const struct bpf_stack_state *stack, int im)
669*c82834a5SAlexei Starovoitov {
670*c82834a5SAlexei Starovoitov 	return stack->slot_type[im] == STACK_SPILL &&
671*c82834a5SAlexei Starovoitov 	       stack->spilled_ptr.type == SCALAR_VALUE;
672*c82834a5SAlexei Starovoitov }
673*c82834a5SAlexei Starovoitov 
674*c82834a5SAlexei Starovoitov static bool is_stack_misc_after(struct bpf_verifier_env *env,
675*c82834a5SAlexei Starovoitov 				struct bpf_stack_state *stack, int im)
676*c82834a5SAlexei Starovoitov {
677*c82834a5SAlexei Starovoitov 	u32 i;
678*c82834a5SAlexei Starovoitov 
679*c82834a5SAlexei Starovoitov 	for (i = im; i < ARRAY_SIZE(stack->slot_type); ++i) {
680*c82834a5SAlexei Starovoitov 		if ((stack->slot_type[i] == STACK_MISC) ||
681*c82834a5SAlexei Starovoitov 		    ((stack->slot_type[i] == STACK_INVALID || stack->slot_type[i] == STACK_POISON) &&
682*c82834a5SAlexei Starovoitov 		     env->allow_uninit_stack))
683*c82834a5SAlexei Starovoitov 			continue;
684*c82834a5SAlexei Starovoitov 		return false;
685*c82834a5SAlexei Starovoitov 	}
686*c82834a5SAlexei Starovoitov 
687*c82834a5SAlexei Starovoitov 	return true;
688*c82834a5SAlexei Starovoitov }
689*c82834a5SAlexei Starovoitov 
690*c82834a5SAlexei Starovoitov static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
691*c82834a5SAlexei Starovoitov 						  struct bpf_stack_state *stack, int im)
692*c82834a5SAlexei Starovoitov {
693*c82834a5SAlexei Starovoitov 	if (is_spilled_scalar_after(stack, im))
694*c82834a5SAlexei Starovoitov 		return &stack->spilled_ptr;
695*c82834a5SAlexei Starovoitov 
696*c82834a5SAlexei Starovoitov 	if (is_stack_misc_after(env, stack, im))
697*c82834a5SAlexei Starovoitov 		return &unbound_reg;
698*c82834a5SAlexei Starovoitov 
699*c82834a5SAlexei Starovoitov 	return NULL;
700*c82834a5SAlexei Starovoitov }
701*c82834a5SAlexei Starovoitov 
702*c82834a5SAlexei Starovoitov static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
703*c82834a5SAlexei Starovoitov 		      struct bpf_func_state *cur, struct bpf_idmap *idmap,
704*c82834a5SAlexei Starovoitov 		      enum exact_level exact)
705*c82834a5SAlexei Starovoitov {
706*c82834a5SAlexei Starovoitov 	int i, spi;
707*c82834a5SAlexei Starovoitov 
708*c82834a5SAlexei Starovoitov 	/* walk slots of the explored stack and ignore any additional
709*c82834a5SAlexei Starovoitov 	 * slots in the current stack, since explored(safe) state
710*c82834a5SAlexei Starovoitov 	 * didn't use them
711*c82834a5SAlexei Starovoitov 	 */
712*c82834a5SAlexei Starovoitov 	for (i = 0; i < old->allocated_stack; i++) {
713*c82834a5SAlexei Starovoitov 		struct bpf_reg_state *old_reg, *cur_reg;
714*c82834a5SAlexei Starovoitov 		int im = i % BPF_REG_SIZE;
715*c82834a5SAlexei Starovoitov 
716*c82834a5SAlexei Starovoitov 		spi = i / BPF_REG_SIZE;
717*c82834a5SAlexei Starovoitov 
718*c82834a5SAlexei Starovoitov 		if (exact == EXACT) {
719*c82834a5SAlexei Starovoitov 			u8 old_type = old->stack[spi].slot_type[i % BPF_REG_SIZE];
720*c82834a5SAlexei Starovoitov 			u8 cur_type = i < cur->allocated_stack ?
721*c82834a5SAlexei Starovoitov 				      cur->stack[spi].slot_type[i % BPF_REG_SIZE] : STACK_INVALID;
722*c82834a5SAlexei Starovoitov 
723*c82834a5SAlexei Starovoitov 			/* STACK_INVALID and STACK_POISON are equivalent for pruning */
724*c82834a5SAlexei Starovoitov 			if (old_type == STACK_POISON)
725*c82834a5SAlexei Starovoitov 				old_type = STACK_INVALID;
726*c82834a5SAlexei Starovoitov 			if (cur_type == STACK_POISON)
727*c82834a5SAlexei Starovoitov 				cur_type = STACK_INVALID;
728*c82834a5SAlexei Starovoitov 			if (i >= cur->allocated_stack || old_type != cur_type)
729*c82834a5SAlexei Starovoitov 				return false;
730*c82834a5SAlexei Starovoitov 		}
731*c82834a5SAlexei Starovoitov 
732*c82834a5SAlexei Starovoitov 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID ||
733*c82834a5SAlexei Starovoitov 		    old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_POISON)
734*c82834a5SAlexei Starovoitov 			continue;
735*c82834a5SAlexei Starovoitov 
736*c82834a5SAlexei Starovoitov 		if (env->allow_uninit_stack &&
737*c82834a5SAlexei Starovoitov 		    old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
738*c82834a5SAlexei Starovoitov 			continue;
739*c82834a5SAlexei Starovoitov 
740*c82834a5SAlexei Starovoitov 		/* explored stack has more populated slots than current stack
741*c82834a5SAlexei Starovoitov 		 * and these slots were used
742*c82834a5SAlexei Starovoitov 		 */
743*c82834a5SAlexei Starovoitov 		if (i >= cur->allocated_stack)
744*c82834a5SAlexei Starovoitov 			return false;
745*c82834a5SAlexei Starovoitov 
746*c82834a5SAlexei Starovoitov 		/*
747*c82834a5SAlexei Starovoitov 		 * 64 and 32-bit scalar spills vs MISC/INVALID slots and vice versa.
748*c82834a5SAlexei Starovoitov 		 * Load from MISC/INVALID slots produces unbound scalar.
749*c82834a5SAlexei Starovoitov 		 * Construct a fake register for such stack and call
750*c82834a5SAlexei Starovoitov 		 * regsafe() to ensure scalar ids are compared.
751*c82834a5SAlexei Starovoitov 		 */
752*c82834a5SAlexei Starovoitov 		if (im == 0 || im == 4) {
753*c82834a5SAlexei Starovoitov 			old_reg = scalar_reg_for_stack(env, &old->stack[spi], im);
754*c82834a5SAlexei Starovoitov 			cur_reg = scalar_reg_for_stack(env, &cur->stack[spi], im);
755*c82834a5SAlexei Starovoitov 			if (old_reg && cur_reg) {
756*c82834a5SAlexei Starovoitov 				if (!regsafe(env, old_reg, cur_reg, idmap, exact))
757*c82834a5SAlexei Starovoitov 					return false;
758*c82834a5SAlexei Starovoitov 				i += (im == 0 ? BPF_REG_SIZE - 1 : 3);
759*c82834a5SAlexei Starovoitov 				continue;
760*c82834a5SAlexei Starovoitov 			}
761*c82834a5SAlexei Starovoitov 		}
762*c82834a5SAlexei Starovoitov 
763*c82834a5SAlexei Starovoitov 		/* if old state was safe with misc data in the stack
764*c82834a5SAlexei Starovoitov 		 * it will be safe with zero-initialized stack.
765*c82834a5SAlexei Starovoitov 		 * The opposite is not true
766*c82834a5SAlexei Starovoitov 		 */
767*c82834a5SAlexei Starovoitov 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
768*c82834a5SAlexei Starovoitov 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
769*c82834a5SAlexei Starovoitov 			continue;
770*c82834a5SAlexei Starovoitov 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
771*c82834a5SAlexei Starovoitov 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
772*c82834a5SAlexei Starovoitov 			/* Ex: old explored (safe) state has STACK_SPILL in
773*c82834a5SAlexei Starovoitov 			 * this stack slot, but current has STACK_MISC ->
774*c82834a5SAlexei Starovoitov 			 * this verifier states are not equivalent,
775*c82834a5SAlexei Starovoitov 			 * return false to continue verification of this path
776*c82834a5SAlexei Starovoitov 			 */
777*c82834a5SAlexei Starovoitov 			return false;
778*c82834a5SAlexei Starovoitov 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
779*c82834a5SAlexei Starovoitov 			continue;
780*c82834a5SAlexei Starovoitov 		/* Both old and cur are having same slot_type */
781*c82834a5SAlexei Starovoitov 		switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
782*c82834a5SAlexei Starovoitov 		case STACK_SPILL:
783*c82834a5SAlexei Starovoitov 			/* when explored and current stack slot are both storing
784*c82834a5SAlexei Starovoitov 			 * spilled registers, check that stored pointers types
785*c82834a5SAlexei Starovoitov 			 * are the same as well.
786*c82834a5SAlexei Starovoitov 			 * Ex: explored safe path could have stored
787*c82834a5SAlexei Starovoitov 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
788*c82834a5SAlexei Starovoitov 			 * but current path has stored:
789*c82834a5SAlexei Starovoitov 			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
790*c82834a5SAlexei Starovoitov 			 * such verifier states are not equivalent.
791*c82834a5SAlexei Starovoitov 			 * return false to continue verification of this path
792*c82834a5SAlexei Starovoitov 			 */
793*c82834a5SAlexei Starovoitov 			if (!regsafe(env, &old->stack[spi].spilled_ptr,
794*c82834a5SAlexei Starovoitov 				     &cur->stack[spi].spilled_ptr, idmap, exact))
795*c82834a5SAlexei Starovoitov 				return false;
796*c82834a5SAlexei Starovoitov 			break;
797*c82834a5SAlexei Starovoitov 		case STACK_DYNPTR:
798*c82834a5SAlexei Starovoitov 			old_reg = &old->stack[spi].spilled_ptr;
799*c82834a5SAlexei Starovoitov 			cur_reg = &cur->stack[spi].spilled_ptr;
800*c82834a5SAlexei Starovoitov 			if (old_reg->dynptr.type != cur_reg->dynptr.type ||
801*c82834a5SAlexei Starovoitov 			    old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
802*c82834a5SAlexei Starovoitov 			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
803*c82834a5SAlexei Starovoitov 				return false;
804*c82834a5SAlexei Starovoitov 			break;
805*c82834a5SAlexei Starovoitov 		case STACK_ITER:
806*c82834a5SAlexei Starovoitov 			old_reg = &old->stack[spi].spilled_ptr;
807*c82834a5SAlexei Starovoitov 			cur_reg = &cur->stack[spi].spilled_ptr;
808*c82834a5SAlexei Starovoitov 			/* iter.depth is not compared between states as it
809*c82834a5SAlexei Starovoitov 			 * doesn't matter for correctness and would otherwise
810*c82834a5SAlexei Starovoitov 			 * prevent convergence; we maintain it only to prevent
811*c82834a5SAlexei Starovoitov 			 * infinite loop check triggering, see
812*c82834a5SAlexei Starovoitov 			 * iter_active_depths_differ()
813*c82834a5SAlexei Starovoitov 			 */
814*c82834a5SAlexei Starovoitov 			if (old_reg->iter.btf != cur_reg->iter.btf ||
815*c82834a5SAlexei Starovoitov 			    old_reg->iter.btf_id != cur_reg->iter.btf_id ||
816*c82834a5SAlexei Starovoitov 			    old_reg->iter.state != cur_reg->iter.state ||
817*c82834a5SAlexei Starovoitov 			    /* ignore {old_reg,cur_reg}->iter.depth, see above */
818*c82834a5SAlexei Starovoitov 			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
819*c82834a5SAlexei Starovoitov 				return false;
820*c82834a5SAlexei Starovoitov 			break;
821*c82834a5SAlexei Starovoitov 		case STACK_IRQ_FLAG:
822*c82834a5SAlexei Starovoitov 			old_reg = &old->stack[spi].spilled_ptr;
823*c82834a5SAlexei Starovoitov 			cur_reg = &cur->stack[spi].spilled_ptr;
824*c82834a5SAlexei Starovoitov 			if (!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap) ||
825*c82834a5SAlexei Starovoitov 			    old_reg->irq.kfunc_class != cur_reg->irq.kfunc_class)
826*c82834a5SAlexei Starovoitov 				return false;
827*c82834a5SAlexei Starovoitov 			break;
828*c82834a5SAlexei Starovoitov 		case STACK_MISC:
829*c82834a5SAlexei Starovoitov 		case STACK_ZERO:
830*c82834a5SAlexei Starovoitov 		case STACK_INVALID:
831*c82834a5SAlexei Starovoitov 		case STACK_POISON:
832*c82834a5SAlexei Starovoitov 			continue;
833*c82834a5SAlexei Starovoitov 		/* Ensure that new unhandled slot types return false by default */
834*c82834a5SAlexei Starovoitov 		default:
835*c82834a5SAlexei Starovoitov 			return false;
836*c82834a5SAlexei Starovoitov 		}
837*c82834a5SAlexei Starovoitov 	}
838*c82834a5SAlexei Starovoitov 	return true;
839*c82834a5SAlexei Starovoitov }
840*c82834a5SAlexei Starovoitov 
841*c82834a5SAlexei Starovoitov static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur,
842*c82834a5SAlexei Starovoitov 		    struct bpf_idmap *idmap)
843*c82834a5SAlexei Starovoitov {
844*c82834a5SAlexei Starovoitov 	int i;
845*c82834a5SAlexei Starovoitov 
846*c82834a5SAlexei Starovoitov 	if (old->acquired_refs != cur->acquired_refs)
847*c82834a5SAlexei Starovoitov 		return false;
848*c82834a5SAlexei Starovoitov 
849*c82834a5SAlexei Starovoitov 	if (old->active_locks != cur->active_locks)
850*c82834a5SAlexei Starovoitov 		return false;
851*c82834a5SAlexei Starovoitov 
852*c82834a5SAlexei Starovoitov 	if (old->active_preempt_locks != cur->active_preempt_locks)
853*c82834a5SAlexei Starovoitov 		return false;
854*c82834a5SAlexei Starovoitov 
855*c82834a5SAlexei Starovoitov 	if (old->active_rcu_locks != cur->active_rcu_locks)
856*c82834a5SAlexei Starovoitov 		return false;
857*c82834a5SAlexei Starovoitov 
858*c82834a5SAlexei Starovoitov 	if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap))
859*c82834a5SAlexei Starovoitov 		return false;
860*c82834a5SAlexei Starovoitov 
861*c82834a5SAlexei Starovoitov 	if (!check_ids(old->active_lock_id, cur->active_lock_id, idmap) ||
862*c82834a5SAlexei Starovoitov 	    old->active_lock_ptr != cur->active_lock_ptr)
863*c82834a5SAlexei Starovoitov 		return false;
864*c82834a5SAlexei Starovoitov 
865*c82834a5SAlexei Starovoitov 	for (i = 0; i < old->acquired_refs; i++) {
866*c82834a5SAlexei Starovoitov 		if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
867*c82834a5SAlexei Starovoitov 		    old->refs[i].type != cur->refs[i].type)
868*c82834a5SAlexei Starovoitov 			return false;
869*c82834a5SAlexei Starovoitov 		switch (old->refs[i].type) {
870*c82834a5SAlexei Starovoitov 		case REF_TYPE_PTR:
871*c82834a5SAlexei Starovoitov 		case REF_TYPE_IRQ:
872*c82834a5SAlexei Starovoitov 			break;
873*c82834a5SAlexei Starovoitov 		case REF_TYPE_LOCK:
874*c82834a5SAlexei Starovoitov 		case REF_TYPE_RES_LOCK:
875*c82834a5SAlexei Starovoitov 		case REF_TYPE_RES_LOCK_IRQ:
876*c82834a5SAlexei Starovoitov 			if (old->refs[i].ptr != cur->refs[i].ptr)
877*c82834a5SAlexei Starovoitov 				return false;
878*c82834a5SAlexei Starovoitov 			break;
879*c82834a5SAlexei Starovoitov 		default:
880*c82834a5SAlexei Starovoitov 			WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old->refs[i].type);
881*c82834a5SAlexei Starovoitov 			return false;
882*c82834a5SAlexei Starovoitov 		}
883*c82834a5SAlexei Starovoitov 	}
884*c82834a5SAlexei Starovoitov 
885*c82834a5SAlexei Starovoitov 	return true;
886*c82834a5SAlexei Starovoitov }
887*c82834a5SAlexei Starovoitov 
888*c82834a5SAlexei Starovoitov /* compare two verifier states
889*c82834a5SAlexei Starovoitov  *
890*c82834a5SAlexei Starovoitov  * all states stored in state_list are known to be valid, since
891*c82834a5SAlexei Starovoitov  * verifier reached 'bpf_exit' instruction through them
892*c82834a5SAlexei Starovoitov  *
893*c82834a5SAlexei Starovoitov  * this function is called when verifier exploring different branches of
894*c82834a5SAlexei Starovoitov  * execution popped from the state stack. If it sees an old state that has
895*c82834a5SAlexei Starovoitov  * more strict register state and more strict stack state then this execution
896*c82834a5SAlexei Starovoitov  * branch doesn't need to be explored further, since verifier already
897*c82834a5SAlexei Starovoitov  * concluded that more strict state leads to valid finish.
898*c82834a5SAlexei Starovoitov  *
899*c82834a5SAlexei Starovoitov  * Therefore two states are equivalent if register state is more conservative
900*c82834a5SAlexei Starovoitov  * and explored stack state is more conservative than the current one.
901*c82834a5SAlexei Starovoitov  * Example:
902*c82834a5SAlexei Starovoitov  *       explored                   current
903*c82834a5SAlexei Starovoitov  * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
904*c82834a5SAlexei Starovoitov  * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
905*c82834a5SAlexei Starovoitov  *
906*c82834a5SAlexei Starovoitov  * In other words if current stack state (one being explored) has more
907*c82834a5SAlexei Starovoitov  * valid slots than old one that already passed validation, it means
908*c82834a5SAlexei Starovoitov  * the verifier can stop exploring and conclude that current state is valid too
909*c82834a5SAlexei Starovoitov  *
910*c82834a5SAlexei Starovoitov  * Similarly with registers. If explored state has register type as invalid
911*c82834a5SAlexei Starovoitov  * whereas register type in current state is meaningful, it means that
912*c82834a5SAlexei Starovoitov  * the current state will reach 'bpf_exit' instruction safely
913*c82834a5SAlexei Starovoitov  */
914*c82834a5SAlexei Starovoitov static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
915*c82834a5SAlexei Starovoitov 			      struct bpf_func_state *cur, u32 insn_idx, enum exact_level exact)
916*c82834a5SAlexei Starovoitov {
917*c82834a5SAlexei Starovoitov 	u16 live_regs = env->insn_aux_data[insn_idx].live_regs_before;
918*c82834a5SAlexei Starovoitov 	u16 i;
919*c82834a5SAlexei Starovoitov 
920*c82834a5SAlexei Starovoitov 	if (old->callback_depth > cur->callback_depth)
921*c82834a5SAlexei Starovoitov 		return false;
922*c82834a5SAlexei Starovoitov 
923*c82834a5SAlexei Starovoitov 	for (i = 0; i < MAX_BPF_REG; i++)
924*c82834a5SAlexei Starovoitov 		if (((1 << i) & live_regs) &&
925*c82834a5SAlexei Starovoitov 		    !regsafe(env, &old->regs[i], &cur->regs[i],
926*c82834a5SAlexei Starovoitov 			     &env->idmap_scratch, exact))
927*c82834a5SAlexei Starovoitov 			return false;
928*c82834a5SAlexei Starovoitov 
929*c82834a5SAlexei Starovoitov 	if (!stacksafe(env, old, cur, &env->idmap_scratch, exact))
930*c82834a5SAlexei Starovoitov 		return false;
931*c82834a5SAlexei Starovoitov 
932*c82834a5SAlexei Starovoitov 	return true;
933*c82834a5SAlexei Starovoitov }
934*c82834a5SAlexei Starovoitov 
935*c82834a5SAlexei Starovoitov static void reset_idmap_scratch(struct bpf_verifier_env *env)
936*c82834a5SAlexei Starovoitov {
937*c82834a5SAlexei Starovoitov 	struct bpf_idmap *idmap = &env->idmap_scratch;
938*c82834a5SAlexei Starovoitov 
939*c82834a5SAlexei Starovoitov 	idmap->tmp_id_gen = env->id_gen;
940*c82834a5SAlexei Starovoitov 	idmap->cnt = 0;
941*c82834a5SAlexei Starovoitov }
942*c82834a5SAlexei Starovoitov 
943*c82834a5SAlexei Starovoitov static bool states_equal(struct bpf_verifier_env *env,
944*c82834a5SAlexei Starovoitov 			 struct bpf_verifier_state *old,
945*c82834a5SAlexei Starovoitov 			 struct bpf_verifier_state *cur,
946*c82834a5SAlexei Starovoitov 			 enum exact_level exact)
947*c82834a5SAlexei Starovoitov {
948*c82834a5SAlexei Starovoitov 	u32 insn_idx;
949*c82834a5SAlexei Starovoitov 	int i;
950*c82834a5SAlexei Starovoitov 
951*c82834a5SAlexei Starovoitov 	if (old->curframe != cur->curframe)
952*c82834a5SAlexei Starovoitov 		return false;
953*c82834a5SAlexei Starovoitov 
954*c82834a5SAlexei Starovoitov 	reset_idmap_scratch(env);
955*c82834a5SAlexei Starovoitov 
956*c82834a5SAlexei Starovoitov 	/* Verification state from speculative execution simulation
957*c82834a5SAlexei Starovoitov 	 * must never prune a non-speculative execution one.
958*c82834a5SAlexei Starovoitov 	 */
959*c82834a5SAlexei Starovoitov 	if (old->speculative && !cur->speculative)
960*c82834a5SAlexei Starovoitov 		return false;
961*c82834a5SAlexei Starovoitov 
962*c82834a5SAlexei Starovoitov 	if (old->in_sleepable != cur->in_sleepable)
963*c82834a5SAlexei Starovoitov 		return false;
964*c82834a5SAlexei Starovoitov 
965*c82834a5SAlexei Starovoitov 	if (!refsafe(old, cur, &env->idmap_scratch))
966*c82834a5SAlexei Starovoitov 		return false;
967*c82834a5SAlexei Starovoitov 
968*c82834a5SAlexei Starovoitov 	/* for states to be equal callsites have to be the same
969*c82834a5SAlexei Starovoitov 	 * and all frame states need to be equivalent
970*c82834a5SAlexei Starovoitov 	 */
971*c82834a5SAlexei Starovoitov 	for (i = 0; i <= old->curframe; i++) {
972*c82834a5SAlexei Starovoitov 		insn_idx = bpf_frame_insn_idx(old, i);
973*c82834a5SAlexei Starovoitov 		if (old->frame[i]->callsite != cur->frame[i]->callsite)
974*c82834a5SAlexei Starovoitov 			return false;
975*c82834a5SAlexei Starovoitov 		if (!func_states_equal(env, old->frame[i], cur->frame[i], insn_idx, exact))
976*c82834a5SAlexei Starovoitov 			return false;
977*c82834a5SAlexei Starovoitov 	}
978*c82834a5SAlexei Starovoitov 	return true;
979*c82834a5SAlexei Starovoitov }
980*c82834a5SAlexei Starovoitov 
981*c82834a5SAlexei Starovoitov /* find precise scalars in the previous equivalent state and
982*c82834a5SAlexei Starovoitov  * propagate them into the current state
983*c82834a5SAlexei Starovoitov  */
984*c82834a5SAlexei Starovoitov static int propagate_precision(struct bpf_verifier_env *env,
985*c82834a5SAlexei Starovoitov 			       const struct bpf_verifier_state *old,
986*c82834a5SAlexei Starovoitov 			       struct bpf_verifier_state *cur,
987*c82834a5SAlexei Starovoitov 			       bool *changed)
988*c82834a5SAlexei Starovoitov {
989*c82834a5SAlexei Starovoitov 	struct bpf_reg_state *state_reg;
990*c82834a5SAlexei Starovoitov 	struct bpf_func_state *state;
991*c82834a5SAlexei Starovoitov 	int i, err = 0, fr;
992*c82834a5SAlexei Starovoitov 	bool first;
993*c82834a5SAlexei Starovoitov 
994*c82834a5SAlexei Starovoitov 	for (fr = old->curframe; fr >= 0; fr--) {
995*c82834a5SAlexei Starovoitov 		state = old->frame[fr];
996*c82834a5SAlexei Starovoitov 		state_reg = state->regs;
997*c82834a5SAlexei Starovoitov 		first = true;
998*c82834a5SAlexei Starovoitov 		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
999*c82834a5SAlexei Starovoitov 			if (state_reg->type != SCALAR_VALUE ||
1000*c82834a5SAlexei Starovoitov 			    !state_reg->precise)
1001*c82834a5SAlexei Starovoitov 				continue;
1002*c82834a5SAlexei Starovoitov 			if (env->log.level & BPF_LOG_LEVEL2) {
1003*c82834a5SAlexei Starovoitov 				if (first)
1004*c82834a5SAlexei Starovoitov 					verbose(env, "frame %d: propagating r%d", fr, i);
1005*c82834a5SAlexei Starovoitov 				else
1006*c82834a5SAlexei Starovoitov 					verbose(env, ",r%d", i);
1007*c82834a5SAlexei Starovoitov 			}
1008*c82834a5SAlexei Starovoitov 			bpf_bt_set_frame_reg(&env->bt, fr, i);
1009*c82834a5SAlexei Starovoitov 			first = false;
1010*c82834a5SAlexei Starovoitov 		}
1011*c82834a5SAlexei Starovoitov 
1012*c82834a5SAlexei Starovoitov 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1013*c82834a5SAlexei Starovoitov 			if (!bpf_is_spilled_reg(&state->stack[i]))
1014*c82834a5SAlexei Starovoitov 				continue;
1015*c82834a5SAlexei Starovoitov 			state_reg = &state->stack[i].spilled_ptr;
1016*c82834a5SAlexei Starovoitov 			if (state_reg->type != SCALAR_VALUE ||
1017*c82834a5SAlexei Starovoitov 			    !state_reg->precise)
1018*c82834a5SAlexei Starovoitov 				continue;
1019*c82834a5SAlexei Starovoitov 			if (env->log.level & BPF_LOG_LEVEL2) {
1020*c82834a5SAlexei Starovoitov 				if (first)
1021*c82834a5SAlexei Starovoitov 					verbose(env, "frame %d: propagating fp%d",
1022*c82834a5SAlexei Starovoitov 						fr, (-i - 1) * BPF_REG_SIZE);
1023*c82834a5SAlexei Starovoitov 				else
1024*c82834a5SAlexei Starovoitov 					verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE);
1025*c82834a5SAlexei Starovoitov 			}
1026*c82834a5SAlexei Starovoitov 			bpf_bt_set_frame_slot(&env->bt, fr, i);
1027*c82834a5SAlexei Starovoitov 			first = false;
1028*c82834a5SAlexei Starovoitov 		}
1029*c82834a5SAlexei Starovoitov 		if (!first && (env->log.level & BPF_LOG_LEVEL2))
1030*c82834a5SAlexei Starovoitov 			verbose(env, "\n");
1031*c82834a5SAlexei Starovoitov 	}
1032*c82834a5SAlexei Starovoitov 
1033*c82834a5SAlexei Starovoitov 	err = bpf_mark_chain_precision(env, cur, -1, changed);
1034*c82834a5SAlexei Starovoitov 	if (err < 0)
1035*c82834a5SAlexei Starovoitov 		return err;
1036*c82834a5SAlexei Starovoitov 
1037*c82834a5SAlexei Starovoitov 	return 0;
1038*c82834a5SAlexei Starovoitov }
1039*c82834a5SAlexei Starovoitov 
1040*c82834a5SAlexei Starovoitov #define MAX_BACKEDGE_ITERS 64
1041*c82834a5SAlexei Starovoitov 
1042*c82834a5SAlexei Starovoitov /* Propagate read and precision marks from visit->backedges[*].state->equal_state
1043*c82834a5SAlexei Starovoitov  * to corresponding parent states of visit->backedges[*].state until fixed point is reached,
1044*c82834a5SAlexei Starovoitov  * then free visit->backedges.
1045*c82834a5SAlexei Starovoitov  * After execution of this function incomplete_read_marks() will return false
1046*c82834a5SAlexei Starovoitov  * for all states corresponding to @visit->callchain.
1047*c82834a5SAlexei Starovoitov  */
1048*c82834a5SAlexei Starovoitov static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visit *visit)
1049*c82834a5SAlexei Starovoitov {
1050*c82834a5SAlexei Starovoitov 	struct bpf_scc_backedge *backedge;
1051*c82834a5SAlexei Starovoitov 	struct bpf_verifier_state *st;
1052*c82834a5SAlexei Starovoitov 	bool changed;
1053*c82834a5SAlexei Starovoitov 	int i, err;
1054*c82834a5SAlexei Starovoitov 
1055*c82834a5SAlexei Starovoitov 	i = 0;
1056*c82834a5SAlexei Starovoitov 	do {
1057*c82834a5SAlexei Starovoitov 		if (i++ > MAX_BACKEDGE_ITERS) {
1058*c82834a5SAlexei Starovoitov 			if (env->log.level & BPF_LOG_LEVEL2)
1059*c82834a5SAlexei Starovoitov 				verbose(env, "%s: too many iterations\n", __func__);
1060*c82834a5SAlexei Starovoitov 			for (backedge = visit->backedges; backedge; backedge = backedge->next)
1061*c82834a5SAlexei Starovoitov 				bpf_mark_all_scalars_precise(env, &backedge->state);
1062*c82834a5SAlexei Starovoitov 			break;
1063*c82834a5SAlexei Starovoitov 		}
1064*c82834a5SAlexei Starovoitov 		changed = false;
1065*c82834a5SAlexei Starovoitov 		for (backedge = visit->backedges; backedge; backedge = backedge->next) {
1066*c82834a5SAlexei Starovoitov 			st = &backedge->state;
1067*c82834a5SAlexei Starovoitov 			err = propagate_precision(env, st->equal_state, st, &changed);
1068*c82834a5SAlexei Starovoitov 			if (err)
1069*c82834a5SAlexei Starovoitov 				return err;
1070*c82834a5SAlexei Starovoitov 		}
1071*c82834a5SAlexei Starovoitov 	} while (changed);
1072*c82834a5SAlexei Starovoitov 
1073*c82834a5SAlexei Starovoitov 	bpf_free_backedges(visit);
1074*c82834a5SAlexei Starovoitov 	return 0;
1075*c82834a5SAlexei Starovoitov }
1076*c82834a5SAlexei Starovoitov 
1077*c82834a5SAlexei Starovoitov static bool states_maybe_looping(struct bpf_verifier_state *old,
1078*c82834a5SAlexei Starovoitov 				 struct bpf_verifier_state *cur)
1079*c82834a5SAlexei Starovoitov {
1080*c82834a5SAlexei Starovoitov 	struct bpf_func_state *fold, *fcur;
1081*c82834a5SAlexei Starovoitov 	int i, fr = cur->curframe;
1082*c82834a5SAlexei Starovoitov 
1083*c82834a5SAlexei Starovoitov 	if (old->curframe != fr)
1084*c82834a5SAlexei Starovoitov 		return false;
1085*c82834a5SAlexei Starovoitov 
1086*c82834a5SAlexei Starovoitov 	fold = old->frame[fr];
1087*c82834a5SAlexei Starovoitov 	fcur = cur->frame[fr];
1088*c82834a5SAlexei Starovoitov 	for (i = 0; i < MAX_BPF_REG; i++)
1089*c82834a5SAlexei Starovoitov 		if (memcmp(&fold->regs[i], &fcur->regs[i],
1090*c82834a5SAlexei Starovoitov 			   offsetof(struct bpf_reg_state, frameno)))
1091*c82834a5SAlexei Starovoitov 			return false;
1092*c82834a5SAlexei Starovoitov 	return true;
1093*c82834a5SAlexei Starovoitov }
1094*c82834a5SAlexei Starovoitov 
1095*c82834a5SAlexei Starovoitov /* is_state_visited() handles iter_next() (see process_iter_next_call() for
1096*c82834a5SAlexei Starovoitov  * terminology) calls specially: as opposed to bounded BPF loops, it *expects*
1097*c82834a5SAlexei Starovoitov  * states to match, which otherwise would look like an infinite loop. So while
1098*c82834a5SAlexei Starovoitov  * iter_next() calls are taken care of, we still need to be careful and
1099*c82834a5SAlexei Starovoitov  * prevent erroneous and too eager declaration of "infinite loop", when
1100*c82834a5SAlexei Starovoitov  * iterators are involved.
1101*c82834a5SAlexei Starovoitov  *
1102*c82834a5SAlexei Starovoitov  * Here's a situation in pseudo-BPF assembly form:
1103*c82834a5SAlexei Starovoitov  *
1104*c82834a5SAlexei Starovoitov  *   0: again:                          ; set up iter_next() call args
1105*c82834a5SAlexei Starovoitov  *   1:   r1 = &it                      ; <CHECKPOINT HERE>
1106*c82834a5SAlexei Starovoitov  *   2:   call bpf_iter_num_next        ; this is iter_next() call
1107*c82834a5SAlexei Starovoitov  *   3:   if r0 == 0 goto done
1108*c82834a5SAlexei Starovoitov  *   4:   ... something useful here ...
1109*c82834a5SAlexei Starovoitov  *   5:   goto again                    ; another iteration
1110*c82834a5SAlexei Starovoitov  *   6: done:
1111*c82834a5SAlexei Starovoitov  *   7:   r1 = &it
1112*c82834a5SAlexei Starovoitov  *   8:   call bpf_iter_num_destroy     ; clean up iter state
1113*c82834a5SAlexei Starovoitov  *   9:   exit
1114*c82834a5SAlexei Starovoitov  *
1115*c82834a5SAlexei Starovoitov  * This is a typical loop. Let's assume that we have a prune point at 1:,
1116*c82834a5SAlexei Starovoitov  * before we get to `call bpf_iter_num_next` (e.g., because of that `goto
1117*c82834a5SAlexei Starovoitov  * again`, assuming other heuristics don't get in a way).
1118*c82834a5SAlexei Starovoitov  *
1119*c82834a5SAlexei Starovoitov  * When we first time come to 1:, let's say we have some state X. We proceed
1120*c82834a5SAlexei Starovoitov  * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit.
1121*c82834a5SAlexei Starovoitov  * Now we come back to validate that forked ACTIVE state. We proceed through
1122*c82834a5SAlexei Starovoitov  * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
1123*c82834a5SAlexei Starovoitov  * are converging. But the problem is that we don't know that yet, as this
1124*c82834a5SAlexei Starovoitov  * convergence has to happen at iter_next() call site only. So if nothing is
1125*c82834a5SAlexei Starovoitov  * done, at 1: verifier will use bounded loop logic and declare infinite
1126*c82834a5SAlexei Starovoitov  * looping (and would be *technically* correct, if not for iterator's
1127*c82834a5SAlexei Starovoitov  * "eventual sticky NULL" contract, see process_iter_next_call()). But we
1128*c82834a5SAlexei Starovoitov  * don't want that. So what we do in process_iter_next_call() when we go on
1129*c82834a5SAlexei Starovoitov  * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
1130*c82834a5SAlexei Starovoitov  * a different iteration. So when we suspect an infinite loop, we additionally
1131*c82834a5SAlexei Starovoitov  * check if any of the *ACTIVE* iterator states depths differ. If yes, we
1132*c82834a5SAlexei Starovoitov  * pretend we are not looping and wait for next iter_next() call.
1133*c82834a5SAlexei Starovoitov  *
1134*c82834a5SAlexei Starovoitov  * This only applies to ACTIVE state. In DRAINED state we don't expect to
1135*c82834a5SAlexei Starovoitov  * loop, because that would actually mean infinite loop, as DRAINED state is
1136*c82834a5SAlexei Starovoitov  * "sticky", and so we'll keep returning into the same instruction with the
1137*c82834a5SAlexei Starovoitov  * same state (at least in one of possible code paths).
1138*c82834a5SAlexei Starovoitov  *
1139*c82834a5SAlexei Starovoitov  * This approach allows to keep infinite loop heuristic even in the face of
1140*c82834a5SAlexei Starovoitov  * active iterator. E.g., C snippet below is and will be detected as
1141*c82834a5SAlexei Starovoitov  * infinitely looping:
1142*c82834a5SAlexei Starovoitov  *
1143*c82834a5SAlexei Starovoitov  *   struct bpf_iter_num it;
1144*c82834a5SAlexei Starovoitov  *   int *p, x;
1145*c82834a5SAlexei Starovoitov  *
1146*c82834a5SAlexei Starovoitov  *   bpf_iter_num_new(&it, 0, 10);
1147*c82834a5SAlexei Starovoitov  *   while ((p = bpf_iter_num_next(&t))) {
1148*c82834a5SAlexei Starovoitov  *       x = p;
1149*c82834a5SAlexei Starovoitov  *       while (x--) {} // <<-- infinite loop here
1150*c82834a5SAlexei Starovoitov  *   }
1151*c82834a5SAlexei Starovoitov  *
1152*c82834a5SAlexei Starovoitov  */
1153*c82834a5SAlexei Starovoitov static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
1154*c82834a5SAlexei Starovoitov {
1155*c82834a5SAlexei Starovoitov 	struct bpf_reg_state *slot, *cur_slot;
1156*c82834a5SAlexei Starovoitov 	struct bpf_func_state *state;
1157*c82834a5SAlexei Starovoitov 	int i, fr;
1158*c82834a5SAlexei Starovoitov 
1159*c82834a5SAlexei Starovoitov 	for (fr = old->curframe; fr >= 0; fr--) {
1160*c82834a5SAlexei Starovoitov 		state = old->frame[fr];
1161*c82834a5SAlexei Starovoitov 		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1162*c82834a5SAlexei Starovoitov 			if (state->stack[i].slot_type[0] != STACK_ITER)
1163*c82834a5SAlexei Starovoitov 				continue;
1164*c82834a5SAlexei Starovoitov 
1165*c82834a5SAlexei Starovoitov 			slot = &state->stack[i].spilled_ptr;
1166*c82834a5SAlexei Starovoitov 			if (slot->iter.state != BPF_ITER_STATE_ACTIVE)
1167*c82834a5SAlexei Starovoitov 				continue;
1168*c82834a5SAlexei Starovoitov 
1169*c82834a5SAlexei Starovoitov 			cur_slot = &cur->frame[fr]->stack[i].spilled_ptr;
1170*c82834a5SAlexei Starovoitov 			if (cur_slot->iter.depth != slot->iter.depth)
1171*c82834a5SAlexei Starovoitov 				return true;
1172*c82834a5SAlexei Starovoitov 		}
1173*c82834a5SAlexei Starovoitov 	}
1174*c82834a5SAlexei Starovoitov 	return false;
1175*c82834a5SAlexei Starovoitov }
1176*c82834a5SAlexei Starovoitov 
1177*c82834a5SAlexei Starovoitov static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1178*c82834a5SAlexei Starovoitov {
1179*c82834a5SAlexei Starovoitov 	struct bpf_func_state *func;
1180*c82834a5SAlexei Starovoitov 	struct bpf_reg_state *reg;
1181*c82834a5SAlexei Starovoitov 	int i, j;
1182*c82834a5SAlexei Starovoitov 
1183*c82834a5SAlexei Starovoitov 	for (i = 0; i <= st->curframe; i++) {
1184*c82834a5SAlexei Starovoitov 		func = st->frame[i];
1185*c82834a5SAlexei Starovoitov 		for (j = 0; j < BPF_REG_FP; j++) {
1186*c82834a5SAlexei Starovoitov 			reg = &func->regs[j];
1187*c82834a5SAlexei Starovoitov 			if (reg->type != SCALAR_VALUE)
1188*c82834a5SAlexei Starovoitov 				continue;
1189*c82834a5SAlexei Starovoitov 			reg->precise = false;
1190*c82834a5SAlexei Starovoitov 		}
1191*c82834a5SAlexei Starovoitov 		for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
1192*c82834a5SAlexei Starovoitov 			if (!bpf_is_spilled_reg(&func->stack[j]))
1193*c82834a5SAlexei Starovoitov 				continue;
1194*c82834a5SAlexei Starovoitov 			reg = &func->stack[j].spilled_ptr;
1195*c82834a5SAlexei Starovoitov 			if (reg->type != SCALAR_VALUE)
1196*c82834a5SAlexei Starovoitov 				continue;
1197*c82834a5SAlexei Starovoitov 			reg->precise = false;
1198*c82834a5SAlexei Starovoitov 		}
1199*c82834a5SAlexei Starovoitov 	}
1200*c82834a5SAlexei Starovoitov }
1201*c82834a5SAlexei Starovoitov 
1202*c82834a5SAlexei Starovoitov int bpf_is_state_visited(struct bpf_verifier_env *env, int insn_idx)
1203*c82834a5SAlexei Starovoitov {
1204*c82834a5SAlexei Starovoitov 	struct bpf_verifier_state_list *new_sl;
1205*c82834a5SAlexei Starovoitov 	struct bpf_verifier_state_list *sl;
1206*c82834a5SAlexei Starovoitov 	struct bpf_verifier_state *cur = env->cur_state, *new;
1207*c82834a5SAlexei Starovoitov 	bool force_new_state, add_new_state, loop;
1208*c82834a5SAlexei Starovoitov 	int n, err, states_cnt = 0;
1209*c82834a5SAlexei Starovoitov 	struct list_head *pos, *tmp, *head;
1210*c82834a5SAlexei Starovoitov 
1211*c82834a5SAlexei Starovoitov 	force_new_state = env->test_state_freq || bpf_is_force_checkpoint(env, insn_idx) ||
1212*c82834a5SAlexei Starovoitov 			  /* Avoid accumulating infinitely long jmp history */
1213*c82834a5SAlexei Starovoitov 			  cur->jmp_history_cnt > 40;
1214*c82834a5SAlexei Starovoitov 
1215*c82834a5SAlexei Starovoitov 	/* bpf progs typically have pruning point every 4 instructions
1216*c82834a5SAlexei Starovoitov 	 * http://vger.kernel.org/bpfconf2019.html#session-1
1217*c82834a5SAlexei Starovoitov 	 * Do not add new state for future pruning if the verifier hasn't seen
1218*c82834a5SAlexei Starovoitov 	 * at least 2 jumps and at least 8 instructions.
1219*c82834a5SAlexei Starovoitov 	 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
1220*c82834a5SAlexei Starovoitov 	 * In tests that amounts to up to 50% reduction into total verifier
1221*c82834a5SAlexei Starovoitov 	 * memory consumption and 20% verifier time speedup.
1222*c82834a5SAlexei Starovoitov 	 */
1223*c82834a5SAlexei Starovoitov 	add_new_state = force_new_state;
1224*c82834a5SAlexei Starovoitov 	if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
1225*c82834a5SAlexei Starovoitov 	    env->insn_processed - env->prev_insn_processed >= 8)
1226*c82834a5SAlexei Starovoitov 		add_new_state = true;
1227*c82834a5SAlexei Starovoitov 
1228*c82834a5SAlexei Starovoitov 	/* keep cleaning the current state as registers/stack become dead */
1229*c82834a5SAlexei Starovoitov 	err = clean_verifier_state(env, cur);
1230*c82834a5SAlexei Starovoitov 	if (err)
1231*c82834a5SAlexei Starovoitov 		return err;
1232*c82834a5SAlexei Starovoitov 
1233*c82834a5SAlexei Starovoitov 	loop = false;
1234*c82834a5SAlexei Starovoitov 	head = bpf_explored_state(env, insn_idx);
1235*c82834a5SAlexei Starovoitov 	list_for_each_safe(pos, tmp, head) {
1236*c82834a5SAlexei Starovoitov 		sl = container_of(pos, struct bpf_verifier_state_list, node);
1237*c82834a5SAlexei Starovoitov 		states_cnt++;
1238*c82834a5SAlexei Starovoitov 		if (sl->state.insn_idx != insn_idx)
1239*c82834a5SAlexei Starovoitov 			continue;
1240*c82834a5SAlexei Starovoitov 
1241*c82834a5SAlexei Starovoitov 		if (sl->state.branches) {
1242*c82834a5SAlexei Starovoitov 			struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
1243*c82834a5SAlexei Starovoitov 
1244*c82834a5SAlexei Starovoitov 			if (frame->in_async_callback_fn &&
1245*c82834a5SAlexei Starovoitov 			    frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
1246*c82834a5SAlexei Starovoitov 				/* Different async_entry_cnt means that the verifier is
1247*c82834a5SAlexei Starovoitov 				 * processing another entry into async callback.
1248*c82834a5SAlexei Starovoitov 				 * Seeing the same state is not an indication of infinite
1249*c82834a5SAlexei Starovoitov 				 * loop or infinite recursion.
1250*c82834a5SAlexei Starovoitov 				 * But finding the same state doesn't mean that it's safe
1251*c82834a5SAlexei Starovoitov 				 * to stop processing the current state. The previous state
1252*c82834a5SAlexei Starovoitov 				 * hasn't yet reached bpf_exit, since state.branches > 0.
1253*c82834a5SAlexei Starovoitov 				 * Checking in_async_callback_fn alone is not enough either.
1254*c82834a5SAlexei Starovoitov 				 * Since the verifier still needs to catch infinite loops
1255*c82834a5SAlexei Starovoitov 				 * inside async callbacks.
1256*c82834a5SAlexei Starovoitov 				 */
1257*c82834a5SAlexei Starovoitov 				goto skip_inf_loop_check;
1258*c82834a5SAlexei Starovoitov 			}
1259*c82834a5SAlexei Starovoitov 			/* BPF open-coded iterators loop detection is special.
1260*c82834a5SAlexei Starovoitov 			 * states_maybe_looping() logic is too simplistic in detecting
1261*c82834a5SAlexei Starovoitov 			 * states that *might* be equivalent, because it doesn't know
1262*c82834a5SAlexei Starovoitov 			 * about ID remapping, so don't even perform it.
1263*c82834a5SAlexei Starovoitov 			 * See process_iter_next_call() and iter_active_depths_differ()
1264*c82834a5SAlexei Starovoitov 			 * for overview of the logic. When current and one of parent
1265*c82834a5SAlexei Starovoitov 			 * states are detected as equivalent, it's a good thing: we prove
1266*c82834a5SAlexei Starovoitov 			 * convergence and can stop simulating further iterations.
1267*c82834a5SAlexei Starovoitov 			 * It's safe to assume that iterator loop will finish, taking into
1268*c82834a5SAlexei Starovoitov 			 * account iter_next() contract of eventually returning
1269*c82834a5SAlexei Starovoitov 			 * sticky NULL result.
1270*c82834a5SAlexei Starovoitov 			 *
1271*c82834a5SAlexei Starovoitov 			 * Note, that states have to be compared exactly in this case because
1272*c82834a5SAlexei Starovoitov 			 * read and precision marks might not be finalized inside the loop.
1273*c82834a5SAlexei Starovoitov 			 * E.g. as in the program below:
1274*c82834a5SAlexei Starovoitov 			 *
1275*c82834a5SAlexei Starovoitov 			 *     1. r7 = -16
1276*c82834a5SAlexei Starovoitov 			 *     2. r6 = bpf_get_prandom_u32()
1277*c82834a5SAlexei Starovoitov 			 *     3. while (bpf_iter_num_next(&fp[-8])) {
1278*c82834a5SAlexei Starovoitov 			 *     4.   if (r6 != 42) {
1279*c82834a5SAlexei Starovoitov 			 *     5.     r7 = -32
1280*c82834a5SAlexei Starovoitov 			 *     6.     r6 = bpf_get_prandom_u32()
1281*c82834a5SAlexei Starovoitov 			 *     7.     continue
1282*c82834a5SAlexei Starovoitov 			 *     8.   }
1283*c82834a5SAlexei Starovoitov 			 *     9.   r0 = r10
1284*c82834a5SAlexei Starovoitov 			 *    10.   r0 += r7
1285*c82834a5SAlexei Starovoitov 			 *    11.   r8 = *(u64 *)(r0 + 0)
1286*c82834a5SAlexei Starovoitov 			 *    12.   r6 = bpf_get_prandom_u32()
1287*c82834a5SAlexei Starovoitov 			 *    13. }
1288*c82834a5SAlexei Starovoitov 			 *
1289*c82834a5SAlexei Starovoitov 			 * Here verifier would first visit path 1-3, create a checkpoint at 3
1290*c82834a5SAlexei Starovoitov 			 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does
1291*c82834a5SAlexei Starovoitov 			 * not have read or precision mark for r7 yet, thus inexact states
1292*c82834a5SAlexei Starovoitov 			 * comparison would discard current state with r7=-32
1293*c82834a5SAlexei Starovoitov 			 * => unsafe memory access at 11 would not be caught.
1294*c82834a5SAlexei Starovoitov 			 */
1295*c82834a5SAlexei Starovoitov 			if (is_iter_next_insn(env, insn_idx)) {
1296*c82834a5SAlexei Starovoitov 				if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
1297*c82834a5SAlexei Starovoitov 					struct bpf_func_state *cur_frame;
1298*c82834a5SAlexei Starovoitov 					struct bpf_reg_state *iter_state, *iter_reg;
1299*c82834a5SAlexei Starovoitov 					int spi;
1300*c82834a5SAlexei Starovoitov 
1301*c82834a5SAlexei Starovoitov 					cur_frame = cur->frame[cur->curframe];
1302*c82834a5SAlexei Starovoitov 					/* btf_check_iter_kfuncs() enforces that
1303*c82834a5SAlexei Starovoitov 					 * iter state pointer is always the first arg
1304*c82834a5SAlexei Starovoitov 					 */
1305*c82834a5SAlexei Starovoitov 					iter_reg = &cur_frame->regs[BPF_REG_1];
1306*c82834a5SAlexei Starovoitov 					/* current state is valid due to states_equal(),
1307*c82834a5SAlexei Starovoitov 					 * so we can assume valid iter and reg state,
1308*c82834a5SAlexei Starovoitov 					 * no need for extra (re-)validations
1309*c82834a5SAlexei Starovoitov 					 */
1310*c82834a5SAlexei Starovoitov 					spi = bpf_get_spi(iter_reg->var_off.value);
1311*c82834a5SAlexei Starovoitov 					iter_state = &bpf_func(env, iter_reg)->stack[spi].spilled_ptr;
1312*c82834a5SAlexei Starovoitov 					if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) {
1313*c82834a5SAlexei Starovoitov 						loop = true;
1314*c82834a5SAlexei Starovoitov 						goto hit;
1315*c82834a5SAlexei Starovoitov 					}
1316*c82834a5SAlexei Starovoitov 				}
1317*c82834a5SAlexei Starovoitov 				goto skip_inf_loop_check;
1318*c82834a5SAlexei Starovoitov 			}
1319*c82834a5SAlexei Starovoitov 			if (is_may_goto_insn_at(env, insn_idx)) {
1320*c82834a5SAlexei Starovoitov 				if (sl->state.may_goto_depth != cur->may_goto_depth &&
1321*c82834a5SAlexei Starovoitov 				    states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
1322*c82834a5SAlexei Starovoitov 					loop = true;
1323*c82834a5SAlexei Starovoitov 					goto hit;
1324*c82834a5SAlexei Starovoitov 				}
1325*c82834a5SAlexei Starovoitov 			}
1326*c82834a5SAlexei Starovoitov 			if (bpf_calls_callback(env, insn_idx)) {
1327*c82834a5SAlexei Starovoitov 				if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
1328*c82834a5SAlexei Starovoitov 					loop = true;
1329*c82834a5SAlexei Starovoitov 					goto hit;
1330*c82834a5SAlexei Starovoitov 				}
1331*c82834a5SAlexei Starovoitov 				goto skip_inf_loop_check;
1332*c82834a5SAlexei Starovoitov 			}
1333*c82834a5SAlexei Starovoitov 			/* attempt to detect infinite loop to avoid unnecessary doomed work */
1334*c82834a5SAlexei Starovoitov 			if (states_maybe_looping(&sl->state, cur) &&
1335*c82834a5SAlexei Starovoitov 			    states_equal(env, &sl->state, cur, EXACT) &&
1336*c82834a5SAlexei Starovoitov 			    !iter_active_depths_differ(&sl->state, cur) &&
1337*c82834a5SAlexei Starovoitov 			    sl->state.may_goto_depth == cur->may_goto_depth &&
1338*c82834a5SAlexei Starovoitov 			    sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
1339*c82834a5SAlexei Starovoitov 				verbose_linfo(env, insn_idx, "; ");
1340*c82834a5SAlexei Starovoitov 				verbose(env, "infinite loop detected at insn %d\n", insn_idx);
1341*c82834a5SAlexei Starovoitov 				verbose(env, "cur state:");
1342*c82834a5SAlexei Starovoitov 				print_verifier_state(env, cur, cur->curframe, true);
1343*c82834a5SAlexei Starovoitov 				verbose(env, "old state:");
1344*c82834a5SAlexei Starovoitov 				print_verifier_state(env, &sl->state, cur->curframe, true);
1345*c82834a5SAlexei Starovoitov 				return -EINVAL;
1346*c82834a5SAlexei Starovoitov 			}
1347*c82834a5SAlexei Starovoitov 			/* if the verifier is processing a loop, avoid adding new state
1348*c82834a5SAlexei Starovoitov 			 * too often, since different loop iterations have distinct
1349*c82834a5SAlexei Starovoitov 			 * states and may not help future pruning.
1350*c82834a5SAlexei Starovoitov 			 * This threshold shouldn't be too low to make sure that
1351*c82834a5SAlexei Starovoitov 			 * a loop with large bound will be rejected quickly.
1352*c82834a5SAlexei Starovoitov 			 * The most abusive loop will be:
1353*c82834a5SAlexei Starovoitov 			 * r1 += 1
1354*c82834a5SAlexei Starovoitov 			 * if r1 < 1000000 goto pc-2
1355*c82834a5SAlexei Starovoitov 			 * 1M insn_procssed limit / 100 == 10k peak states.
1356*c82834a5SAlexei Starovoitov 			 * This threshold shouldn't be too high either, since states
1357*c82834a5SAlexei Starovoitov 			 * at the end of the loop are likely to be useful in pruning.
1358*c82834a5SAlexei Starovoitov 			 */
1359*c82834a5SAlexei Starovoitov skip_inf_loop_check:
1360*c82834a5SAlexei Starovoitov 			if (!force_new_state &&
1361*c82834a5SAlexei Starovoitov 			    env->jmps_processed - env->prev_jmps_processed < 20 &&
1362*c82834a5SAlexei Starovoitov 			    env->insn_processed - env->prev_insn_processed < 100)
1363*c82834a5SAlexei Starovoitov 				add_new_state = false;
1364*c82834a5SAlexei Starovoitov 			goto miss;
1365*c82834a5SAlexei Starovoitov 		}
1366*c82834a5SAlexei Starovoitov 		/* See comments for mark_all_regs_read_and_precise() */
1367*c82834a5SAlexei Starovoitov 		loop = incomplete_read_marks(env, &sl->state);
1368*c82834a5SAlexei Starovoitov 		if (states_equal(env, &sl->state, cur, loop ? RANGE_WITHIN : NOT_EXACT)) {
1369*c82834a5SAlexei Starovoitov hit:
1370*c82834a5SAlexei Starovoitov 			sl->hit_cnt++;
1371*c82834a5SAlexei Starovoitov 
1372*c82834a5SAlexei Starovoitov 			/* if previous state reached the exit with precision and
1373*c82834a5SAlexei Starovoitov 			 * current state is equivalent to it (except precision marks)
1374*c82834a5SAlexei Starovoitov 			 * the precision needs to be propagated back in
1375*c82834a5SAlexei Starovoitov 			 * the current state.
1376*c82834a5SAlexei Starovoitov 			 */
1377*c82834a5SAlexei Starovoitov 			err = 0;
1378*c82834a5SAlexei Starovoitov 			if (bpf_is_jmp_point(env, env->insn_idx))
1379*c82834a5SAlexei Starovoitov 				err = bpf_push_jmp_history(env, cur, 0, 0);
1380*c82834a5SAlexei Starovoitov 			err = err ? : propagate_precision(env, &sl->state, cur, NULL);
1381*c82834a5SAlexei Starovoitov 			if (err)
1382*c82834a5SAlexei Starovoitov 				return err;
1383*c82834a5SAlexei Starovoitov 			/* When processing iterator based loops above propagate_liveness and
1384*c82834a5SAlexei Starovoitov 			 * propagate_precision calls are not sufficient to transfer all relevant
1385*c82834a5SAlexei Starovoitov 			 * read and precision marks. E.g. consider the following case:
1386*c82834a5SAlexei Starovoitov 			 *
1387*c82834a5SAlexei Starovoitov 			 *  .-> A --.  Assume the states are visited in the order A, B, C.
1388*c82834a5SAlexei Starovoitov 			 *  |   |   |  Assume that state B reaches a state equivalent to state A.
1389*c82834a5SAlexei Starovoitov 			 *  |   v   v  At this point, state C is not processed yet, so state A
1390*c82834a5SAlexei Starovoitov 			 *  '-- B   C  has not received any read or precision marks from C.
1391*c82834a5SAlexei Starovoitov 			 *             Thus, marks propagated from A to B are incomplete.
1392*c82834a5SAlexei Starovoitov 			 *
1393*c82834a5SAlexei Starovoitov 			 * The verifier mitigates this by performing the following steps:
1394*c82834a5SAlexei Starovoitov 			 *
1395*c82834a5SAlexei Starovoitov 			 * - Prior to the main verification pass, strongly connected components
1396*c82834a5SAlexei Starovoitov 			 *   (SCCs) are computed over the program's control flow graph,
1397*c82834a5SAlexei Starovoitov 			 *   intraprocedurally.
1398*c82834a5SAlexei Starovoitov 			 *
1399*c82834a5SAlexei Starovoitov 			 * - During the main verification pass, `maybe_enter_scc()` checks
1400*c82834a5SAlexei Starovoitov 			 *   whether the current verifier state is entering an SCC. If so, an
1401*c82834a5SAlexei Starovoitov 			 *   instance of a `bpf_scc_visit` object is created, and the state
1402*c82834a5SAlexei Starovoitov 			 *   entering the SCC is recorded as the entry state.
1403*c82834a5SAlexei Starovoitov 			 *
1404*c82834a5SAlexei Starovoitov 			 * - This instance is associated not with the SCC itself, but with a
1405*c82834a5SAlexei Starovoitov 			 *   `bpf_scc_callchain`: a tuple consisting of the call sites leading to
1406*c82834a5SAlexei Starovoitov 			 *   the SCC and the SCC id. See `compute_scc_callchain()`.
1407*c82834a5SAlexei Starovoitov 			 *
1408*c82834a5SAlexei Starovoitov 			 * - When a verification path encounters a `states_equal(...,
1409*c82834a5SAlexei Starovoitov 			 *   RANGE_WITHIN)` condition, there exists a call chain describing the
1410*c82834a5SAlexei Starovoitov 			 *   current state and a corresponding `bpf_scc_visit` instance. A copy
1411*c82834a5SAlexei Starovoitov 			 *   of the current state is created and added to
1412*c82834a5SAlexei Starovoitov 			 *   `bpf_scc_visit->backedges`.
1413*c82834a5SAlexei Starovoitov 			 *
1414*c82834a5SAlexei Starovoitov 			 * - When a verification path terminates, `maybe_exit_scc()` is called
1415*c82834a5SAlexei Starovoitov 			 *   from `bpf_update_branch_counts()`. For states with `branches == 0`, it
1416*c82834a5SAlexei Starovoitov 			 *   checks whether the state is the entry state of any `bpf_scc_visit`
1417*c82834a5SAlexei Starovoitov 			 *   instance. If it is, this indicates that all paths originating from
1418*c82834a5SAlexei Starovoitov 			 *   this SCC visit have been explored. `propagate_backedges()` is then
1419*c82834a5SAlexei Starovoitov 			 *   called, which propagates read and precision marks through the
1420*c82834a5SAlexei Starovoitov 			 *   backedges until a fixed point is reached.
1421*c82834a5SAlexei Starovoitov 			 *   (In the earlier example, this would propagate marks from A to B,
1422*c82834a5SAlexei Starovoitov 			 *    from C to A, and then again from A to B.)
1423*c82834a5SAlexei Starovoitov 			 *
1424*c82834a5SAlexei Starovoitov 			 * A note on callchains
1425*c82834a5SAlexei Starovoitov 			 * --------------------
1426*c82834a5SAlexei Starovoitov 			 *
1427*c82834a5SAlexei Starovoitov 			 * Consider the following example:
1428*c82834a5SAlexei Starovoitov 			 *
1429*c82834a5SAlexei Starovoitov 			 *     void foo() { loop { ... SCC#1 ... } }
1430*c82834a5SAlexei Starovoitov 			 *     void main() {
1431*c82834a5SAlexei Starovoitov 			 *       A: foo();
1432*c82834a5SAlexei Starovoitov 			 *       B: ...
1433*c82834a5SAlexei Starovoitov 			 *       C: foo();
1434*c82834a5SAlexei Starovoitov 			 *     }
1435*c82834a5SAlexei Starovoitov 			 *
1436*c82834a5SAlexei Starovoitov 			 * Here, there are two distinct callchains leading to SCC#1:
1437*c82834a5SAlexei Starovoitov 			 * - (A, SCC#1)
1438*c82834a5SAlexei Starovoitov 			 * - (C, SCC#1)
1439*c82834a5SAlexei Starovoitov 			 *
1440*c82834a5SAlexei Starovoitov 			 * Each callchain identifies a separate `bpf_scc_visit` instance that
1441*c82834a5SAlexei Starovoitov 			 * accumulates backedge states. The `propagate_{liveness,precision}()`
1442*c82834a5SAlexei Starovoitov 			 * functions traverse the parent state of each backedge state, which
1443*c82834a5SAlexei Starovoitov 			 * means these parent states must remain valid (i.e., not freed) while
1444*c82834a5SAlexei Starovoitov 			 * the corresponding `bpf_scc_visit` instance exists.
1445*c82834a5SAlexei Starovoitov 			 *
1446*c82834a5SAlexei Starovoitov 			 * Associating `bpf_scc_visit` instances directly with SCCs instead of
1447*c82834a5SAlexei Starovoitov 			 * callchains would break this invariant:
1448*c82834a5SAlexei Starovoitov 			 * - States explored during `C: foo()` would contribute backedges to
1449*c82834a5SAlexei Starovoitov 			 *   SCC#1, but SCC#1 would only be exited once the exploration of
1450*c82834a5SAlexei Starovoitov 			 *   `A: foo()` completes.
1451*c82834a5SAlexei Starovoitov 			 * - By that time, the states explored between `A: foo()` and `C: foo()`
1452*c82834a5SAlexei Starovoitov 			 *   (i.e., `B: ...`) may have already been freed, causing the parent
1453*c82834a5SAlexei Starovoitov 			 *   links for states from `C: foo()` to become invalid.
1454*c82834a5SAlexei Starovoitov 			 */
1455*c82834a5SAlexei Starovoitov 			if (loop) {
1456*c82834a5SAlexei Starovoitov 				struct bpf_scc_backedge *backedge;
1457*c82834a5SAlexei Starovoitov 
1458*c82834a5SAlexei Starovoitov 				backedge = kzalloc_obj(*backedge,
1459*c82834a5SAlexei Starovoitov 						       GFP_KERNEL_ACCOUNT);
1460*c82834a5SAlexei Starovoitov 				if (!backedge)
1461*c82834a5SAlexei Starovoitov 					return -ENOMEM;
1462*c82834a5SAlexei Starovoitov 				err = bpf_copy_verifier_state(&backedge->state, cur);
1463*c82834a5SAlexei Starovoitov 				backedge->state.equal_state = &sl->state;
1464*c82834a5SAlexei Starovoitov 				backedge->state.insn_idx = insn_idx;
1465*c82834a5SAlexei Starovoitov 				err = err ?: add_scc_backedge(env, &sl->state, backedge);
1466*c82834a5SAlexei Starovoitov 				if (err) {
1467*c82834a5SAlexei Starovoitov 					bpf_free_verifier_state(&backedge->state, false);
1468*c82834a5SAlexei Starovoitov 					kfree(backedge);
1469*c82834a5SAlexei Starovoitov 					return err;
1470*c82834a5SAlexei Starovoitov 				}
1471*c82834a5SAlexei Starovoitov 			}
1472*c82834a5SAlexei Starovoitov 			return 1;
1473*c82834a5SAlexei Starovoitov 		}
1474*c82834a5SAlexei Starovoitov miss:
1475*c82834a5SAlexei Starovoitov 		/* when new state is not going to be added do not increase miss count.
1476*c82834a5SAlexei Starovoitov 		 * Otherwise several loop iterations will remove the state
1477*c82834a5SAlexei Starovoitov 		 * recorded earlier. The goal of these heuristics is to have
1478*c82834a5SAlexei Starovoitov 		 * states from some iterations of the loop (some in the beginning
1479*c82834a5SAlexei Starovoitov 		 * and some at the end) to help pruning.
1480*c82834a5SAlexei Starovoitov 		 */
1481*c82834a5SAlexei Starovoitov 		if (add_new_state)
1482*c82834a5SAlexei Starovoitov 			sl->miss_cnt++;
1483*c82834a5SAlexei Starovoitov 		/* heuristic to determine whether this state is beneficial
1484*c82834a5SAlexei Starovoitov 		 * to keep checking from state equivalence point of view.
1485*c82834a5SAlexei Starovoitov 		 * Higher numbers increase max_states_per_insn and verification time,
1486*c82834a5SAlexei Starovoitov 		 * but do not meaningfully decrease insn_processed.
1487*c82834a5SAlexei Starovoitov 		 * 'n' controls how many times state could miss before eviction.
1488*c82834a5SAlexei Starovoitov 		 * Use bigger 'n' for checkpoints because evicting checkpoint states
1489*c82834a5SAlexei Starovoitov 		 * too early would hinder iterator convergence.
1490*c82834a5SAlexei Starovoitov 		 */
1491*c82834a5SAlexei Starovoitov 		n = bpf_is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3;
1492*c82834a5SAlexei Starovoitov 		if (sl->miss_cnt > sl->hit_cnt * n + n) {
1493*c82834a5SAlexei Starovoitov 			/* the state is unlikely to be useful. Remove it to
1494*c82834a5SAlexei Starovoitov 			 * speed up verification
1495*c82834a5SAlexei Starovoitov 			 */
1496*c82834a5SAlexei Starovoitov 			sl->in_free_list = true;
1497*c82834a5SAlexei Starovoitov 			list_del(&sl->node);
1498*c82834a5SAlexei Starovoitov 			list_add(&sl->node, &env->free_list);
1499*c82834a5SAlexei Starovoitov 			env->free_list_size++;
1500*c82834a5SAlexei Starovoitov 			env->explored_states_size--;
1501*c82834a5SAlexei Starovoitov 			maybe_free_verifier_state(env, sl);
1502*c82834a5SAlexei Starovoitov 		}
1503*c82834a5SAlexei Starovoitov 	}
1504*c82834a5SAlexei Starovoitov 
1505*c82834a5SAlexei Starovoitov 	if (env->max_states_per_insn < states_cnt)
1506*c82834a5SAlexei Starovoitov 		env->max_states_per_insn = states_cnt;
1507*c82834a5SAlexei Starovoitov 
1508*c82834a5SAlexei Starovoitov 	if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
1509*c82834a5SAlexei Starovoitov 		return 0;
1510*c82834a5SAlexei Starovoitov 
1511*c82834a5SAlexei Starovoitov 	if (!add_new_state)
1512*c82834a5SAlexei Starovoitov 		return 0;
1513*c82834a5SAlexei Starovoitov 
1514*c82834a5SAlexei Starovoitov 	/* There were no equivalent states, remember the current one.
1515*c82834a5SAlexei Starovoitov 	 * Technically the current state is not proven to be safe yet,
1516*c82834a5SAlexei Starovoitov 	 * but it will either reach outer most bpf_exit (which means it's safe)
1517*c82834a5SAlexei Starovoitov 	 * or it will be rejected. When there are no loops the verifier won't be
1518*c82834a5SAlexei Starovoitov 	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
1519*c82834a5SAlexei Starovoitov 	 * again on the way to bpf_exit.
1520*c82834a5SAlexei Starovoitov 	 * When looping the sl->state.branches will be > 0 and this state
1521*c82834a5SAlexei Starovoitov 	 * will not be considered for equivalence until branches == 0.
1522*c82834a5SAlexei Starovoitov 	 */
1523*c82834a5SAlexei Starovoitov 	new_sl = kzalloc_obj(struct bpf_verifier_state_list, GFP_KERNEL_ACCOUNT);
1524*c82834a5SAlexei Starovoitov 	if (!new_sl)
1525*c82834a5SAlexei Starovoitov 		return -ENOMEM;
1526*c82834a5SAlexei Starovoitov 	env->total_states++;
1527*c82834a5SAlexei Starovoitov 	env->explored_states_size++;
1528*c82834a5SAlexei Starovoitov 	update_peak_states(env);
1529*c82834a5SAlexei Starovoitov 	env->prev_jmps_processed = env->jmps_processed;
1530*c82834a5SAlexei Starovoitov 	env->prev_insn_processed = env->insn_processed;
1531*c82834a5SAlexei Starovoitov 
1532*c82834a5SAlexei Starovoitov 	/* forget precise markings we inherited, see __mark_chain_precision */
1533*c82834a5SAlexei Starovoitov 	if (env->bpf_capable)
1534*c82834a5SAlexei Starovoitov 		mark_all_scalars_imprecise(env, cur);
1535*c82834a5SAlexei Starovoitov 
1536*c82834a5SAlexei Starovoitov 	bpf_clear_singular_ids(env, cur);
1537*c82834a5SAlexei Starovoitov 
1538*c82834a5SAlexei Starovoitov 	/* add new state to the head of linked list */
1539*c82834a5SAlexei Starovoitov 	new = &new_sl->state;
1540*c82834a5SAlexei Starovoitov 	err = bpf_copy_verifier_state(new, cur);
1541*c82834a5SAlexei Starovoitov 	if (err) {
1542*c82834a5SAlexei Starovoitov 		bpf_free_verifier_state(new, false);
1543*c82834a5SAlexei Starovoitov 		kfree(new_sl);
1544*c82834a5SAlexei Starovoitov 		return err;
1545*c82834a5SAlexei Starovoitov 	}
1546*c82834a5SAlexei Starovoitov 	new->insn_idx = insn_idx;
1547*c82834a5SAlexei Starovoitov 	verifier_bug_if(new->branches != 1, env,
1548*c82834a5SAlexei Starovoitov 			"%s:branches_to_explore=%d insn %d",
1549*c82834a5SAlexei Starovoitov 			__func__, new->branches, insn_idx);
1550*c82834a5SAlexei Starovoitov 	err = maybe_enter_scc(env, new);
1551*c82834a5SAlexei Starovoitov 	if (err) {
1552*c82834a5SAlexei Starovoitov 		bpf_free_verifier_state(new, false);
1553*c82834a5SAlexei Starovoitov 		kfree(new_sl);
1554*c82834a5SAlexei Starovoitov 		return err;
1555*c82834a5SAlexei Starovoitov 	}
1556*c82834a5SAlexei Starovoitov 
1557*c82834a5SAlexei Starovoitov 	cur->parent = new;
1558*c82834a5SAlexei Starovoitov 	cur->first_insn_idx = insn_idx;
1559*c82834a5SAlexei Starovoitov 	cur->dfs_depth = new->dfs_depth + 1;
1560*c82834a5SAlexei Starovoitov 	bpf_clear_jmp_history(cur);
1561*c82834a5SAlexei Starovoitov 	list_add(&new_sl->node, head);
1562*c82834a5SAlexei Starovoitov 	return 0;
1563*c82834a5SAlexei Starovoitov }
1564