xref: /linux/tools/lib/bpf/features.c (revision 110d3047a3ec033de00322b1a8068b1215efa97a)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 #include <linux/kernel.h>
4 #include <linux/filter.h>
5 #include "bpf.h"
6 #include "libbpf.h"
7 #include "libbpf_common.h"
8 #include "libbpf_internal.h"
9 #include "str_error.h"
10 
11 static inline __u64 ptr_to_u64(const void *ptr)
12 {
13 	return (__u64)(unsigned long)ptr;
14 }
15 
16 int probe_fd(int fd)
17 {
18 	if (fd >= 0)
19 		close(fd);
20 	return fd >= 0;
21 }
22 
23 static int probe_kern_prog_name(int token_fd)
24 {
25 	const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
26 	struct bpf_insn insns[] = {
27 		BPF_MOV64_IMM(BPF_REG_0, 0),
28 		BPF_EXIT_INSN(),
29 	};
30 	union bpf_attr attr;
31 	int ret;
32 
33 	memset(&attr, 0, attr_sz);
34 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
35 	attr.license = ptr_to_u64("GPL");
36 	attr.insns = ptr_to_u64(insns);
37 	attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
38 	attr.prog_token_fd = token_fd;
39 	if (token_fd)
40 		attr.prog_flags |= BPF_F_TOKEN_FD;
41 	libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
42 
43 	/* make sure loading with name works */
44 	ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
45 	return probe_fd(ret);
46 }
47 
48 static int probe_kern_global_data(int token_fd)
49 {
50 	char *cp, errmsg[STRERR_BUFSIZE];
51 	struct bpf_insn insns[] = {
52 		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
53 		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
54 		BPF_MOV64_IMM(BPF_REG_0, 0),
55 		BPF_EXIT_INSN(),
56 	};
57 	LIBBPF_OPTS(bpf_map_create_opts, map_opts,
58 		.token_fd = token_fd,
59 		.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
60 	);
61 	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
62 		.token_fd = token_fd,
63 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
64 	);
65 	int ret, map, insn_cnt = ARRAY_SIZE(insns);
66 
67 	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
68 	if (map < 0) {
69 		ret = -errno;
70 		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
71 		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
72 			__func__, cp, -ret);
73 		return ret;
74 	}
75 
76 	insns[0].imm = map;
77 
78 	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
79 	close(map);
80 	return probe_fd(ret);
81 }
82 
83 static int probe_kern_btf(int token_fd)
84 {
85 	static const char strs[] = "\0int";
86 	__u32 types[] = {
87 		/* int */
88 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
89 	};
90 
91 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
92 					     strs, sizeof(strs), token_fd));
93 }
94 
95 static int probe_kern_btf_func(int token_fd)
96 {
97 	static const char strs[] = "\0int\0x\0a";
98 	/* void x(int a) {} */
99 	__u32 types[] = {
100 		/* int */
101 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
102 		/* FUNC_PROTO */                                /* [2] */
103 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
104 		BTF_PARAM_ENC(7, 1),
105 		/* FUNC x */                                    /* [3] */
106 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
107 	};
108 
109 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
110 					     strs, sizeof(strs), token_fd));
111 }
112 
113 static int probe_kern_btf_func_global(int token_fd)
114 {
115 	static const char strs[] = "\0int\0x\0a";
116 	/* static void x(int a) {} */
117 	__u32 types[] = {
118 		/* int */
119 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
120 		/* FUNC_PROTO */                                /* [2] */
121 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
122 		BTF_PARAM_ENC(7, 1),
123 		/* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
124 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
125 	};
126 
127 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
128 					     strs, sizeof(strs), token_fd));
129 }
130 
131 static int probe_kern_btf_datasec(int token_fd)
132 {
133 	static const char strs[] = "\0x\0.data";
134 	/* static int a; */
135 	__u32 types[] = {
136 		/* int */
137 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
138 		/* VAR x */                                     /* [2] */
139 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
140 		BTF_VAR_STATIC,
141 		/* DATASEC val */                               /* [3] */
142 		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
143 		BTF_VAR_SECINFO_ENC(2, 0, 4),
144 	};
145 
146 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
147 					     strs, sizeof(strs), token_fd));
148 }
149 
150 static int probe_kern_btf_float(int token_fd)
151 {
152 	static const char strs[] = "\0float";
153 	__u32 types[] = {
154 		/* float */
155 		BTF_TYPE_FLOAT_ENC(1, 4),
156 	};
157 
158 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
159 					     strs, sizeof(strs), token_fd));
160 }
161 
162 static int probe_kern_btf_decl_tag(int token_fd)
163 {
164 	static const char strs[] = "\0tag";
165 	__u32 types[] = {
166 		/* int */
167 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
168 		/* VAR x */                                     /* [2] */
169 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
170 		BTF_VAR_STATIC,
171 		/* attr */
172 		BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
173 	};
174 
175 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
176 					     strs, sizeof(strs), token_fd));
177 }
178 
179 static int probe_kern_btf_type_tag(int token_fd)
180 {
181 	static const char strs[] = "\0tag";
182 	__u32 types[] = {
183 		/* int */
184 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),		/* [1] */
185 		/* attr */
186 		BTF_TYPE_TYPE_TAG_ENC(1, 1),				/* [2] */
187 		/* ptr */
188 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),	/* [3] */
189 	};
190 
191 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
192 					     strs, sizeof(strs), token_fd));
193 }
194 
195 static int probe_kern_array_mmap(int token_fd)
196 {
197 	LIBBPF_OPTS(bpf_map_create_opts, opts,
198 		.map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
199 		.token_fd = token_fd,
200 	);
201 	int fd;
202 
203 	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
204 	return probe_fd(fd);
205 }
206 
207 static int probe_kern_exp_attach_type(int token_fd)
208 {
209 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
210 		.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
211 		.token_fd = token_fd,
212 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
213 	);
214 	struct bpf_insn insns[] = {
215 		BPF_MOV64_IMM(BPF_REG_0, 0),
216 		BPF_EXIT_INSN(),
217 	};
218 	int fd, insn_cnt = ARRAY_SIZE(insns);
219 
220 	/* use any valid combination of program type and (optional)
221 	 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
222 	 * to see if kernel supports expected_attach_type field for
223 	 * BPF_PROG_LOAD command
224 	 */
225 	fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
226 	return probe_fd(fd);
227 }
228 
229 static int probe_kern_probe_read_kernel(int token_fd)
230 {
231 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
232 		.token_fd = token_fd,
233 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
234 	);
235 	struct bpf_insn insns[] = {
236 		BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),	/* r1 = r10 (fp) */
237 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),	/* r1 += -8 */
238 		BPF_MOV64_IMM(BPF_REG_2, 8),		/* r2 = 8 */
239 		BPF_MOV64_IMM(BPF_REG_3, 0),		/* r3 = 0 */
240 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
241 		BPF_EXIT_INSN(),
242 	};
243 	int fd, insn_cnt = ARRAY_SIZE(insns);
244 
245 	fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
246 	return probe_fd(fd);
247 }
248 
249 static int probe_prog_bind_map(int token_fd)
250 {
251 	char *cp, errmsg[STRERR_BUFSIZE];
252 	struct bpf_insn insns[] = {
253 		BPF_MOV64_IMM(BPF_REG_0, 0),
254 		BPF_EXIT_INSN(),
255 	};
256 	LIBBPF_OPTS(bpf_map_create_opts, map_opts,
257 		.token_fd = token_fd,
258 		.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
259 	);
260 	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
261 		.token_fd = token_fd,
262 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
263 	);
264 	int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
265 
266 	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
267 	if (map < 0) {
268 		ret = -errno;
269 		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
270 		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
271 			__func__, cp, -ret);
272 		return ret;
273 	}
274 
275 	prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
276 	if (prog < 0) {
277 		close(map);
278 		return 0;
279 	}
280 
281 	ret = bpf_prog_bind_map(prog, map, NULL);
282 
283 	close(map);
284 	close(prog);
285 
286 	return ret >= 0;
287 }
288 
289 static int probe_module_btf(int token_fd)
290 {
291 	static const char strs[] = "\0int";
292 	__u32 types[] = {
293 		/* int */
294 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
295 	};
296 	struct bpf_btf_info info;
297 	__u32 len = sizeof(info);
298 	char name[16];
299 	int fd, err;
300 
301 	fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
302 	if (fd < 0)
303 		return 0; /* BTF not supported at all */
304 
305 	memset(&info, 0, sizeof(info));
306 	info.name = ptr_to_u64(name);
307 	info.name_len = sizeof(name);
308 
309 	/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
310 	 * kernel's module BTF support coincides with support for
311 	 * name/name_len fields in struct bpf_btf_info.
312 	 */
313 	err = bpf_btf_get_info_by_fd(fd, &info, &len);
314 	close(fd);
315 	return !err;
316 }
317 
318 static int probe_perf_link(int token_fd)
319 {
320 	struct bpf_insn insns[] = {
321 		BPF_MOV64_IMM(BPF_REG_0, 0),
322 		BPF_EXIT_INSN(),
323 	};
324 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
325 		.token_fd = token_fd,
326 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
327 	);
328 	int prog_fd, link_fd, err;
329 
330 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
331 				insns, ARRAY_SIZE(insns), &opts);
332 	if (prog_fd < 0)
333 		return -errno;
334 
335 	/* use invalid perf_event FD to get EBADF, if link is supported;
336 	 * otherwise EINVAL should be returned
337 	 */
338 	link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
339 	err = -errno; /* close() can clobber errno */
340 
341 	if (link_fd >= 0)
342 		close(link_fd);
343 	close(prog_fd);
344 
345 	return link_fd < 0 && err == -EBADF;
346 }
347 
348 static int probe_uprobe_multi_link(int token_fd)
349 {
350 	LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
351 		.expected_attach_type = BPF_TRACE_UPROBE_MULTI,
352 		.token_fd = token_fd,
353 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
354 	);
355 	LIBBPF_OPTS(bpf_link_create_opts, link_opts);
356 	struct bpf_insn insns[] = {
357 		BPF_MOV64_IMM(BPF_REG_0, 0),
358 		BPF_EXIT_INSN(),
359 	};
360 	int prog_fd, link_fd, err;
361 	unsigned long offset = 0;
362 
363 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
364 				insns, ARRAY_SIZE(insns), &load_opts);
365 	if (prog_fd < 0)
366 		return -errno;
367 
368 	/* Creating uprobe in '/' binary should fail with -EBADF. */
369 	link_opts.uprobe_multi.path = "/";
370 	link_opts.uprobe_multi.offsets = &offset;
371 	link_opts.uprobe_multi.cnt = 1;
372 
373 	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
374 	err = -errno; /* close() can clobber errno */
375 
376 	if (link_fd >= 0)
377 		close(link_fd);
378 	close(prog_fd);
379 
380 	return link_fd < 0 && err == -EBADF;
381 }
382 
383 static int probe_kern_bpf_cookie(int token_fd)
384 {
385 	struct bpf_insn insns[] = {
386 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
387 		BPF_EXIT_INSN(),
388 	};
389 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
390 		.token_fd = token_fd,
391 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
392 	);
393 	int ret, insn_cnt = ARRAY_SIZE(insns);
394 
395 	ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
396 	return probe_fd(ret);
397 }
398 
399 static int probe_kern_btf_enum64(int token_fd)
400 {
401 	static const char strs[] = "\0enum64";
402 	__u32 types[] = {
403 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
404 	};
405 
406 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
407 					     strs, sizeof(strs), token_fd));
408 }
409 
410 typedef int (*feature_probe_fn)(int /* token_fd */);
411 
412 static struct kern_feature_cache feature_cache;
413 
414 static struct kern_feature_desc {
415 	const char *desc;
416 	feature_probe_fn probe;
417 } feature_probes[__FEAT_CNT] = {
418 	[FEAT_PROG_NAME] = {
419 		"BPF program name", probe_kern_prog_name,
420 	},
421 	[FEAT_GLOBAL_DATA] = {
422 		"global variables", probe_kern_global_data,
423 	},
424 	[FEAT_BTF] = {
425 		"minimal BTF", probe_kern_btf,
426 	},
427 	[FEAT_BTF_FUNC] = {
428 		"BTF functions", probe_kern_btf_func,
429 	},
430 	[FEAT_BTF_GLOBAL_FUNC] = {
431 		"BTF global function", probe_kern_btf_func_global,
432 	},
433 	[FEAT_BTF_DATASEC] = {
434 		"BTF data section and variable", probe_kern_btf_datasec,
435 	},
436 	[FEAT_ARRAY_MMAP] = {
437 		"ARRAY map mmap()", probe_kern_array_mmap,
438 	},
439 	[FEAT_EXP_ATTACH_TYPE] = {
440 		"BPF_PROG_LOAD expected_attach_type attribute",
441 		probe_kern_exp_attach_type,
442 	},
443 	[FEAT_PROBE_READ_KERN] = {
444 		"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
445 	},
446 	[FEAT_PROG_BIND_MAP] = {
447 		"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
448 	},
449 	[FEAT_MODULE_BTF] = {
450 		"module BTF support", probe_module_btf,
451 	},
452 	[FEAT_BTF_FLOAT] = {
453 		"BTF_KIND_FLOAT support", probe_kern_btf_float,
454 	},
455 	[FEAT_PERF_LINK] = {
456 		"BPF perf link support", probe_perf_link,
457 	},
458 	[FEAT_BTF_DECL_TAG] = {
459 		"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
460 	},
461 	[FEAT_BTF_TYPE_TAG] = {
462 		"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
463 	},
464 	[FEAT_MEMCG_ACCOUNT] = {
465 		"memcg-based memory accounting", probe_memcg_account,
466 	},
467 	[FEAT_BPF_COOKIE] = {
468 		"BPF cookie support", probe_kern_bpf_cookie,
469 	},
470 	[FEAT_BTF_ENUM64] = {
471 		"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
472 	},
473 	[FEAT_SYSCALL_WRAPPER] = {
474 		"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
475 	},
476 	[FEAT_UPROBE_MULTI_LINK] = {
477 		"BPF multi-uprobe link support", probe_uprobe_multi_link,
478 	},
479 };
480 
481 bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
482 {
483 	struct kern_feature_desc *feat = &feature_probes[feat_id];
484 	int ret;
485 
486 	/* assume global feature cache, unless custom one is provided */
487 	if (!cache)
488 		cache = &feature_cache;
489 
490 	if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
491 		ret = feat->probe(cache->token_fd);
492 		if (ret > 0) {
493 			WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
494 		} else if (ret == 0) {
495 			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
496 		} else {
497 			pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
498 			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
499 		}
500 	}
501 
502 	return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
503 }
504