xref: /linux/kernel/bpf/check_btf.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3 #include <linux/bpf.h>
4 #include <linux/bpf_verifier.h>
5 #include <linux/filter.h>
6 #include <linux/btf.h>
7 
8 #define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
9 
10 static int check_abnormal_return(struct bpf_verifier_env *env)
11 {
12 	int i;
13 
14 	for (i = 1; i < env->subprog_cnt; i++) {
15 		if (env->subprog_info[i].has_ld_abs) {
16 			verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
17 			return -EINVAL;
18 		}
19 		if (env->subprog_info[i].has_tail_call) {
20 			verbose(env, "tail_call is not allowed in subprogs without BTF\n");
21 			return -EINVAL;
22 		}
23 	}
24 	return 0;
25 }
26 
27 /* The minimum supported BTF func info size */
28 #define MIN_BPF_FUNCINFO_SIZE	8
29 #define MAX_FUNCINFO_REC_SIZE	252
30 
31 static int check_btf_func_early(struct bpf_verifier_env *env,
32 				const union bpf_attr *attr,
33 				bpfptr_t uattr)
34 {
35 	u32 krec_size = sizeof(struct bpf_func_info);
36 	const struct btf_type *type, *func_proto;
37 	u32 i, nfuncs, urec_size, min_size;
38 	struct bpf_func_info *krecord;
39 	struct bpf_prog *prog;
40 	const struct btf *btf;
41 	u32 prev_offset = 0;
42 	bpfptr_t urecord;
43 	int ret = -ENOMEM;
44 
45 	nfuncs = attr->func_info_cnt;
46 	if (!nfuncs) {
47 		if (check_abnormal_return(env))
48 			return -EINVAL;
49 		return 0;
50 	}
51 
52 	urec_size = attr->func_info_rec_size;
53 	if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
54 	    urec_size > MAX_FUNCINFO_REC_SIZE ||
55 	    urec_size % sizeof(u32)) {
56 		verbose(env, "invalid func info rec size %u\n", urec_size);
57 		return -EINVAL;
58 	}
59 
60 	prog = env->prog;
61 	btf = prog->aux->btf;
62 
63 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
64 	min_size = min_t(u32, krec_size, urec_size);
65 
66 	krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
67 	if (!krecord)
68 		return -ENOMEM;
69 
70 	for (i = 0; i < nfuncs; i++) {
71 		ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
72 		if (ret) {
73 			if (ret == -E2BIG) {
74 				verbose(env, "nonzero tailing record in func info");
75 				/* set the size kernel expects so loader can zero
76 				 * out the rest of the record.
77 				 */
78 				if (copy_to_bpfptr_offset(uattr,
79 							  offsetof(union bpf_attr, func_info_rec_size),
80 							  &min_size, sizeof(min_size)))
81 					ret = -EFAULT;
82 			}
83 			goto err_free;
84 		}
85 
86 		if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
87 			ret = -EFAULT;
88 			goto err_free;
89 		}
90 
91 		/* check insn_off */
92 		ret = -EINVAL;
93 		if (i == 0) {
94 			if (krecord[i].insn_off) {
95 				verbose(env,
96 					"nonzero insn_off %u for the first func info record",
97 					krecord[i].insn_off);
98 				goto err_free;
99 			}
100 		} else if (krecord[i].insn_off <= prev_offset) {
101 			verbose(env,
102 				"same or smaller insn offset (%u) than previous func info record (%u)",
103 				krecord[i].insn_off, prev_offset);
104 			goto err_free;
105 		}
106 
107 		/* check type_id */
108 		type = btf_type_by_id(btf, krecord[i].type_id);
109 		if (!type || !btf_type_is_func(type)) {
110 			verbose(env, "invalid type id %d in func info",
111 				krecord[i].type_id);
112 			goto err_free;
113 		}
114 
115 		func_proto = btf_type_by_id(btf, type->type);
116 		if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
117 			/* btf_func_check() already verified it during BTF load */
118 			goto err_free;
119 
120 		prev_offset = krecord[i].insn_off;
121 		bpfptr_add(&urecord, urec_size);
122 	}
123 
124 	prog->aux->func_info = krecord;
125 	prog->aux->func_info_cnt = nfuncs;
126 	return 0;
127 
128 err_free:
129 	kvfree(krecord);
130 	return ret;
131 }
132 
133 static int check_btf_func(struct bpf_verifier_env *env,
134 			  const union bpf_attr *attr,
135 			  bpfptr_t uattr)
136 {
137 	const struct btf_type *type, *func_proto, *ret_type;
138 	u32 i, nfuncs, urec_size;
139 	struct bpf_func_info *krecord;
140 	struct bpf_func_info_aux *info_aux = NULL;
141 	struct bpf_prog *prog;
142 	const struct btf *btf;
143 	bpfptr_t urecord;
144 	bool scalar_return;
145 	int ret = -ENOMEM;
146 
147 	nfuncs = attr->func_info_cnt;
148 	if (!nfuncs) {
149 		if (check_abnormal_return(env))
150 			return -EINVAL;
151 		return 0;
152 	}
153 	if (nfuncs != env->subprog_cnt) {
154 		verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
155 		return -EINVAL;
156 	}
157 
158 	urec_size = attr->func_info_rec_size;
159 
160 	prog = env->prog;
161 	btf = prog->aux->btf;
162 
163 	urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
164 
165 	krecord = prog->aux->func_info;
166 	info_aux = kzalloc_objs(*info_aux, nfuncs,
167 				GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
168 	if (!info_aux)
169 		return -ENOMEM;
170 
171 	for (i = 0; i < nfuncs; i++) {
172 		/* check insn_off */
173 		ret = -EINVAL;
174 
175 		if (env->subprog_info[i].start != krecord[i].insn_off) {
176 			verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
177 			goto err_free;
178 		}
179 
180 		/* Already checked type_id */
181 		type = btf_type_by_id(btf, krecord[i].type_id);
182 		info_aux[i].linkage = BTF_INFO_VLEN(type->info);
183 		/* Already checked func_proto */
184 		func_proto = btf_type_by_id(btf, type->type);
185 
186 		ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
187 		scalar_return =
188 			btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
189 		if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
190 			verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
191 			goto err_free;
192 		}
193 		if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
194 			verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
195 			goto err_free;
196 		}
197 
198 		env->subprog_info[i].name = btf_name_by_offset(btf, type->name_off);
199 		bpfptr_add(&urecord, urec_size);
200 	}
201 
202 	prog->aux->func_info_aux = info_aux;
203 	return 0;
204 
205 err_free:
206 	kfree(info_aux);
207 	return ret;
208 }
209 
210 #define MIN_BPF_LINEINFO_SIZE	offsetofend(struct bpf_line_info, line_col)
211 #define MAX_LINEINFO_REC_SIZE	MAX_FUNCINFO_REC_SIZE
212 
213 static int check_btf_line(struct bpf_verifier_env *env,
214 			  const union bpf_attr *attr,
215 			  bpfptr_t uattr)
216 {
217 	u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
218 	struct bpf_subprog_info *sub;
219 	struct bpf_line_info *linfo;
220 	struct bpf_prog *prog;
221 	const struct btf *btf;
222 	bpfptr_t ulinfo;
223 	int err;
224 
225 	nr_linfo = attr->line_info_cnt;
226 	if (!nr_linfo)
227 		return 0;
228 	if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
229 		return -EINVAL;
230 
231 	rec_size = attr->line_info_rec_size;
232 	if (rec_size < MIN_BPF_LINEINFO_SIZE ||
233 	    rec_size > MAX_LINEINFO_REC_SIZE ||
234 	    rec_size & (sizeof(u32) - 1))
235 		return -EINVAL;
236 
237 	/* Need to zero it in case the userspace may
238 	 * pass in a smaller bpf_line_info object.
239 	 */
240 	linfo = kvzalloc_objs(struct bpf_line_info, nr_linfo,
241 			      GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
242 	if (!linfo)
243 		return -ENOMEM;
244 
245 	prog = env->prog;
246 	btf = prog->aux->btf;
247 
248 	s = 0;
249 	sub = env->subprog_info;
250 	ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
251 	expected_size = sizeof(struct bpf_line_info);
252 	ncopy = min_t(u32, expected_size, rec_size);
253 	for (i = 0; i < nr_linfo; i++) {
254 		err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
255 		if (err) {
256 			if (err == -E2BIG) {
257 				verbose(env, "nonzero tailing record in line_info");
258 				if (copy_to_bpfptr_offset(uattr,
259 							  offsetof(union bpf_attr, line_info_rec_size),
260 							  &expected_size, sizeof(expected_size)))
261 					err = -EFAULT;
262 			}
263 			goto err_free;
264 		}
265 
266 		if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
267 			err = -EFAULT;
268 			goto err_free;
269 		}
270 
271 		/*
272 		 * Check insn_off to ensure
273 		 * 1) strictly increasing AND
274 		 * 2) bounded by prog->len
275 		 *
276 		 * The linfo[0].insn_off == 0 check logically falls into
277 		 * the later "missing bpf_line_info for func..." case
278 		 * because the first linfo[0].insn_off must be the
279 		 * first sub also and the first sub must have
280 		 * subprog_info[0].start == 0.
281 		 */
282 		if ((i && linfo[i].insn_off <= prev_offset) ||
283 		    linfo[i].insn_off >= prog->len) {
284 			verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
285 				i, linfo[i].insn_off, prev_offset,
286 				prog->len);
287 			err = -EINVAL;
288 			goto err_free;
289 		}
290 
291 		if (!prog->insnsi[linfo[i].insn_off].code) {
292 			verbose(env,
293 				"Invalid insn code at line_info[%u].insn_off\n",
294 				i);
295 			err = -EINVAL;
296 			goto err_free;
297 		}
298 
299 		if (!btf_name_by_offset(btf, linfo[i].line_off) ||
300 		    !btf_name_by_offset(btf, linfo[i].file_name_off)) {
301 			verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
302 			err = -EINVAL;
303 			goto err_free;
304 		}
305 
306 		if (s != env->subprog_cnt) {
307 			if (linfo[i].insn_off == sub[s].start) {
308 				sub[s].linfo_idx = i;
309 				s++;
310 			} else if (sub[s].start < linfo[i].insn_off) {
311 				verbose(env, "missing bpf_line_info for func#%u\n", s);
312 				err = -EINVAL;
313 				goto err_free;
314 			}
315 		}
316 
317 		prev_offset = linfo[i].insn_off;
318 		bpfptr_add(&ulinfo, rec_size);
319 	}
320 
321 	if (s != env->subprog_cnt) {
322 		verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
323 			env->subprog_cnt - s, s);
324 		err = -EINVAL;
325 		goto err_free;
326 	}
327 
328 	prog->aux->linfo = linfo;
329 	prog->aux->nr_linfo = nr_linfo;
330 
331 	return 0;
332 
333 err_free:
334 	kvfree(linfo);
335 	return err;
336 }
337 
338 #define MIN_CORE_RELO_SIZE	sizeof(struct bpf_core_relo)
339 #define MAX_CORE_RELO_SIZE	MAX_FUNCINFO_REC_SIZE
340 
341 static int check_core_relo(struct bpf_verifier_env *env,
342 			   const union bpf_attr *attr,
343 			   bpfptr_t uattr)
344 {
345 	u32 i, nr_core_relo, ncopy, expected_size, rec_size;
346 	struct bpf_core_relo core_relo = {};
347 	struct bpf_prog *prog = env->prog;
348 	const struct btf *btf = prog->aux->btf;
349 	struct bpf_core_ctx ctx = {
350 		.log = &env->log,
351 		.btf = btf,
352 	};
353 	bpfptr_t u_core_relo;
354 	int err;
355 
356 	nr_core_relo = attr->core_relo_cnt;
357 	if (!nr_core_relo)
358 		return 0;
359 	if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
360 		return -EINVAL;
361 
362 	rec_size = attr->core_relo_rec_size;
363 	if (rec_size < MIN_CORE_RELO_SIZE ||
364 	    rec_size > MAX_CORE_RELO_SIZE ||
365 	    rec_size % sizeof(u32))
366 		return -EINVAL;
367 
368 	u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
369 	expected_size = sizeof(struct bpf_core_relo);
370 	ncopy = min_t(u32, expected_size, rec_size);
371 
372 	/* Unlike func_info and line_info, copy and apply each CO-RE
373 	 * relocation record one at a time.
374 	 */
375 	for (i = 0; i < nr_core_relo; i++) {
376 		/* future proofing when sizeof(bpf_core_relo) changes */
377 		err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
378 		if (err) {
379 			if (err == -E2BIG) {
380 				verbose(env, "nonzero tailing record in core_relo");
381 				if (copy_to_bpfptr_offset(uattr,
382 							  offsetof(union bpf_attr, core_relo_rec_size),
383 							  &expected_size, sizeof(expected_size)))
384 					err = -EFAULT;
385 			}
386 			break;
387 		}
388 
389 		if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
390 			err = -EFAULT;
391 			break;
392 		}
393 
394 		if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
395 			verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
396 				i, core_relo.insn_off, prog->len);
397 			err = -EINVAL;
398 			break;
399 		}
400 
401 		err = bpf_core_apply(&ctx, &core_relo, i,
402 				     &prog->insnsi[core_relo.insn_off / 8]);
403 		if (err)
404 			break;
405 		bpfptr_add(&u_core_relo, rec_size);
406 	}
407 	return err;
408 }
409 
410 int bpf_check_btf_info_early(struct bpf_verifier_env *env,
411 			     const union bpf_attr *attr,
412 			     bpfptr_t uattr)
413 {
414 	struct btf *btf;
415 	int err;
416 
417 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
418 		if (check_abnormal_return(env))
419 			return -EINVAL;
420 		return 0;
421 	}
422 
423 	btf = btf_get_by_fd(attr->prog_btf_fd);
424 	if (IS_ERR(btf))
425 		return PTR_ERR(btf);
426 	if (btf_is_kernel(btf)) {
427 		btf_put(btf);
428 		return -EACCES;
429 	}
430 	env->prog->aux->btf = btf;
431 
432 	err = check_btf_func_early(env, attr, uattr);
433 	if (err)
434 		return err;
435 	return 0;
436 }
437 
438 int bpf_check_btf_info(struct bpf_verifier_env *env,
439 		       const union bpf_attr *attr,
440 		       bpfptr_t uattr)
441 {
442 	int err;
443 
444 	if (!attr->func_info_cnt && !attr->line_info_cnt) {
445 		if (check_abnormal_return(env))
446 			return -EINVAL;
447 		return 0;
448 	}
449 
450 	err = check_btf_func(env, attr, uattr);
451 	if (err)
452 		return err;
453 
454 	err = check_btf_line(env, attr, uattr);
455 	if (err)
456 		return err;
457 
458 	err = check_core_relo(env, attr, uattr);
459 	if (err)
460 		return err;
461 
462 	return 0;
463 }
464