xref: /linux/tools/lib/bpf/features.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 #include <linux/kernel.h>
4 #include <linux/filter.h>
5 #include "bpf.h"
6 #include "libbpf.h"
7 #include "libbpf_common.h"
8 #include "libbpf_internal.h"
9 
10 static inline __u64 ptr_to_u64(const void *ptr)
11 {
12 	return (__u64)(unsigned long)ptr;
13 }
14 
15 int probe_fd(int fd)
16 {
17 	if (fd >= 0)
18 		close(fd);
19 	return fd >= 0;
20 }
21 
22 static int probe_kern_prog_name(int token_fd)
23 {
24 	const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
25 	struct bpf_insn insns[] = {
26 		BPF_MOV64_IMM(BPF_REG_0, 0),
27 		BPF_EXIT_INSN(),
28 	};
29 	union bpf_attr attr;
30 	int ret;
31 
32 	memset(&attr, 0, attr_sz);
33 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
34 	attr.license = ptr_to_u64("GPL");
35 	attr.insns = ptr_to_u64(insns);
36 	attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
37 	attr.prog_token_fd = token_fd;
38 	if (token_fd)
39 		attr.prog_flags |= BPF_F_TOKEN_FD;
40 	libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
41 
42 	/* make sure loading with name works */
43 	ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
44 	return probe_fd(ret);
45 }
46 
47 static int probe_kern_global_data(int token_fd)
48 {
49 	struct bpf_insn insns[] = {
50 		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
51 		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
52 		BPF_MOV64_IMM(BPF_REG_0, 0),
53 		BPF_EXIT_INSN(),
54 	};
55 	LIBBPF_OPTS(bpf_map_create_opts, map_opts,
56 		.token_fd = token_fd,
57 		.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
58 	);
59 	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
60 		.token_fd = token_fd,
61 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
62 	);
63 	int ret, map, insn_cnt = ARRAY_SIZE(insns);
64 
65 	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
66 	if (map < 0) {
67 		ret = -errno;
68 		pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
69 			__func__, errstr(ret));
70 		return ret;
71 	}
72 
73 	insns[0].imm = map;
74 
75 	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
76 	close(map);
77 	return probe_fd(ret);
78 }
79 
80 static int probe_kern_btf(int token_fd)
81 {
82 	static const char strs[] = "\0int";
83 	__u32 types[] = {
84 		/* int */
85 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
86 	};
87 
88 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
89 					     strs, sizeof(strs), token_fd));
90 }
91 
92 static int probe_kern_btf_func(int token_fd)
93 {
94 	static const char strs[] = "\0int\0x\0a";
95 	/* void x(int a) {} */
96 	__u32 types[] = {
97 		/* int */
98 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
99 		/* FUNC_PROTO */                                /* [2] */
100 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
101 		BTF_PARAM_ENC(7, 1),
102 		/* FUNC x */                                    /* [3] */
103 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
104 	};
105 
106 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
107 					     strs, sizeof(strs), token_fd));
108 }
109 
110 static int probe_kern_btf_func_global(int token_fd)
111 {
112 	static const char strs[] = "\0int\0x\0a";
113 	/* static void x(int a) {} */
114 	__u32 types[] = {
115 		/* int */
116 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
117 		/* FUNC_PROTO */                                /* [2] */
118 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
119 		BTF_PARAM_ENC(7, 1),
120 		/* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
121 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
122 	};
123 
124 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
125 					     strs, sizeof(strs), token_fd));
126 }
127 
128 static int probe_kern_btf_datasec(int token_fd)
129 {
130 	static const char strs[] = "\0x\0.data";
131 	/* static int a; */
132 	__u32 types[] = {
133 		/* int */
134 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
135 		/* VAR x */                                     /* [2] */
136 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
137 		BTF_VAR_STATIC,
138 		/* DATASEC val */                               /* [3] */
139 		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
140 		BTF_VAR_SECINFO_ENC(2, 0, 4),
141 	};
142 
143 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
144 					     strs, sizeof(strs), token_fd));
145 }
146 
147 static int probe_kern_btf_qmark_datasec(int token_fd)
148 {
149 	static const char strs[] = "\0x\0?.data";
150 	/* static int a; */
151 	__u32 types[] = {
152 		/* int */
153 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
154 		/* VAR x */                                     /* [2] */
155 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
156 		BTF_VAR_STATIC,
157 		/* DATASEC ?.data */                            /* [3] */
158 		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
159 		BTF_VAR_SECINFO_ENC(2, 0, 4),
160 	};
161 
162 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
163 					     strs, sizeof(strs), token_fd));
164 }
165 
166 static int probe_kern_btf_float(int token_fd)
167 {
168 	static const char strs[] = "\0float";
169 	__u32 types[] = {
170 		/* float */
171 		BTF_TYPE_FLOAT_ENC(1, 4),
172 	};
173 
174 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
175 					     strs, sizeof(strs), token_fd));
176 }
177 
178 static int probe_kern_btf_decl_tag(int token_fd)
179 {
180 	static const char strs[] = "\0tag";
181 	__u32 types[] = {
182 		/* int */
183 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
184 		/* VAR x */                                     /* [2] */
185 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
186 		BTF_VAR_STATIC,
187 		/* attr */
188 		BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
189 	};
190 
191 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
192 					     strs, sizeof(strs), token_fd));
193 }
194 
195 static int probe_kern_btf_type_tag(int token_fd)
196 {
197 	static const char strs[] = "\0tag";
198 	__u32 types[] = {
199 		/* int */
200 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),		/* [1] */
201 		/* attr */
202 		BTF_TYPE_TYPE_TAG_ENC(1, 1),				/* [2] */
203 		/* ptr */
204 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),	/* [3] */
205 	};
206 
207 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
208 					     strs, sizeof(strs), token_fd));
209 }
210 
211 static int probe_kern_array_mmap(int token_fd)
212 {
213 	LIBBPF_OPTS(bpf_map_create_opts, opts,
214 		.map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
215 		.token_fd = token_fd,
216 	);
217 	int fd;
218 
219 	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
220 	return probe_fd(fd);
221 }
222 
223 static int probe_kern_exp_attach_type(int token_fd)
224 {
225 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
226 		.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
227 		.token_fd = token_fd,
228 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
229 	);
230 	struct bpf_insn insns[] = {
231 		BPF_MOV64_IMM(BPF_REG_0, 0),
232 		BPF_EXIT_INSN(),
233 	};
234 	int fd, insn_cnt = ARRAY_SIZE(insns);
235 
236 	/* use any valid combination of program type and (optional)
237 	 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
238 	 * to see if kernel supports expected_attach_type field for
239 	 * BPF_PROG_LOAD command
240 	 */
241 	fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
242 	return probe_fd(fd);
243 }
244 
245 static int probe_kern_probe_read_kernel(int token_fd)
246 {
247 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
248 		.token_fd = token_fd,
249 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
250 	);
251 	struct bpf_insn insns[] = {
252 		BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),	/* r1 = r10 (fp) */
253 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),	/* r1 += -8 */
254 		BPF_MOV64_IMM(BPF_REG_2, 8),		/* r2 = 8 */
255 		BPF_MOV64_IMM(BPF_REG_3, 0),		/* r3 = 0 */
256 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
257 		BPF_EXIT_INSN(),
258 	};
259 	int fd, insn_cnt = ARRAY_SIZE(insns);
260 
261 	fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
262 	return probe_fd(fd);
263 }
264 
265 static int probe_prog_bind_map(int token_fd)
266 {
267 	struct bpf_insn insns[] = {
268 		BPF_MOV64_IMM(BPF_REG_0, 0),
269 		BPF_EXIT_INSN(),
270 	};
271 	LIBBPF_OPTS(bpf_map_create_opts, map_opts,
272 		.token_fd = token_fd,
273 		.map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
274 	);
275 	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
276 		.token_fd = token_fd,
277 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
278 	);
279 	int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
280 
281 	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
282 	if (map < 0) {
283 		ret = -errno;
284 		pr_warn("Error in %s(): %s. Couldn't create simple array map.\n",
285 			__func__, errstr(ret));
286 		return ret;
287 	}
288 
289 	prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
290 	if (prog < 0) {
291 		close(map);
292 		return 0;
293 	}
294 
295 	ret = bpf_prog_bind_map(prog, map, NULL);
296 
297 	close(map);
298 	close(prog);
299 
300 	return ret >= 0;
301 }
302 
303 static int probe_module_btf(int token_fd)
304 {
305 	static const char strs[] = "\0int";
306 	__u32 types[] = {
307 		/* int */
308 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
309 	};
310 	struct bpf_btf_info info;
311 	__u32 len = sizeof(info);
312 	char name[16];
313 	int fd, err;
314 
315 	fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
316 	if (fd < 0)
317 		return 0; /* BTF not supported at all */
318 
319 	memset(&info, 0, sizeof(info));
320 	info.name = ptr_to_u64(name);
321 	info.name_len = sizeof(name);
322 
323 	/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
324 	 * kernel's module BTF support coincides with support for
325 	 * name/name_len fields in struct bpf_btf_info.
326 	 */
327 	err = bpf_btf_get_info_by_fd(fd, &info, &len);
328 	close(fd);
329 	return !err;
330 }
331 
332 static int probe_perf_link(int token_fd)
333 {
334 	struct bpf_insn insns[] = {
335 		BPF_MOV64_IMM(BPF_REG_0, 0),
336 		BPF_EXIT_INSN(),
337 	};
338 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
339 		.token_fd = token_fd,
340 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
341 	);
342 	int prog_fd, link_fd, err;
343 
344 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
345 				insns, ARRAY_SIZE(insns), &opts);
346 	if (prog_fd < 0)
347 		return -errno;
348 
349 	/* use invalid perf_event FD to get EBADF, if link is supported;
350 	 * otherwise EINVAL should be returned
351 	 */
352 	link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
353 	err = -errno; /* close() can clobber errno */
354 
355 	if (link_fd >= 0)
356 		close(link_fd);
357 	close(prog_fd);
358 
359 	return link_fd < 0 && err == -EBADF;
360 }
361 
362 static int probe_uprobe_multi_link(int token_fd)
363 {
364 	LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
365 		.expected_attach_type = BPF_TRACE_UPROBE_MULTI,
366 		.token_fd = token_fd,
367 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
368 	);
369 	LIBBPF_OPTS(bpf_link_create_opts, link_opts);
370 	struct bpf_insn insns[] = {
371 		BPF_MOV64_IMM(BPF_REG_0, 0),
372 		BPF_EXIT_INSN(),
373 	};
374 	int prog_fd, link_fd, err;
375 	unsigned long offset = 0;
376 
377 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
378 				insns, ARRAY_SIZE(insns), &load_opts);
379 	if (prog_fd < 0)
380 		return -errno;
381 
382 	/* Creating uprobe in '/' binary should fail with -EBADF. */
383 	link_opts.uprobe_multi.path = "/";
384 	link_opts.uprobe_multi.offsets = &offset;
385 	link_opts.uprobe_multi.cnt = 1;
386 
387 	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
388 	err = -errno; /* close() can clobber errno */
389 
390 	if (link_fd >= 0 || err != -EBADF) {
391 		if (link_fd >= 0)
392 			close(link_fd);
393 		close(prog_fd);
394 		return 0;
395 	}
396 
397 	/* Initial multi-uprobe support in kernel didn't handle PID filtering
398 	 * correctly (it was doing thread filtering, not process filtering).
399 	 * So now we'll detect if PID filtering logic was fixed, and, if not,
400 	 * we'll pretend multi-uprobes are not supported, if not.
401 	 * Multi-uprobes are used in USDT attachment logic, and we need to be
402 	 * conservative here, because multi-uprobe selection happens early at
403 	 * load time, while the use of PID filtering is known late at
404 	 * attachment time, at which point it's too late to undo multi-uprobe
405 	 * selection.
406 	 *
407 	 * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
408 	 * early with -EINVAL on kernels with fixed PID filtering logic;
409 	 * otherwise -ESRCH would be returned if passed correct binary path
410 	 * (but we'll just get -BADF, of course).
411 	 */
412 	link_opts.uprobe_multi.pid = -1; /* invalid PID */
413 	link_opts.uprobe_multi.path = "/"; /* invalid path */
414 	link_opts.uprobe_multi.offsets = &offset;
415 	link_opts.uprobe_multi.cnt = 1;
416 
417 	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
418 	err = -errno; /* close() can clobber errno */
419 
420 	if (link_fd >= 0)
421 		close(link_fd);
422 	close(prog_fd);
423 
424 	return link_fd < 0 && err == -EINVAL;
425 }
426 
427 static int probe_kern_bpf_cookie(int token_fd)
428 {
429 	struct bpf_insn insns[] = {
430 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
431 		BPF_EXIT_INSN(),
432 	};
433 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
434 		.token_fd = token_fd,
435 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
436 	);
437 	int ret, insn_cnt = ARRAY_SIZE(insns);
438 
439 	ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
440 	return probe_fd(ret);
441 }
442 
443 static int probe_kern_btf_enum64(int token_fd)
444 {
445 	static const char strs[] = "\0enum64";
446 	__u32 types[] = {
447 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
448 	};
449 
450 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
451 					     strs, sizeof(strs), token_fd));
452 }
453 
454 static int probe_kern_arg_ctx_tag(int token_fd)
455 {
456 	static const char strs[] = "\0a\0b\0arg:ctx\0";
457 	const __u32 types[] = {
458 		/* [1] INT */
459 		BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
460 		/* [2] PTR -> VOID */
461 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
462 		/* [3] FUNC_PROTO `int(void *a)` */
463 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
464 		BTF_PARAM_ENC(1 /* "a" */, 2),
465 		/* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
466 		BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
467 		/* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
468 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
469 		BTF_PARAM_ENC(3 /* "b" */, 2),
470 		/* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
471 		BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
472 		/* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
473 		BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
474 	};
475 	const struct bpf_insn insns[] = {
476 		/* main prog */
477 		BPF_CALL_REL(+1),
478 		BPF_EXIT_INSN(),
479 		/* global subprog */
480 		BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
481 		BPF_EXIT_INSN(),
482 	};
483 	const struct bpf_func_info_min func_infos[] = {
484 		{ 0, 4 }, /* main prog -> FUNC 'a' */
485 		{ 2, 6 }, /* subprog -> FUNC 'b' */
486 	};
487 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
488 		.token_fd = token_fd,
489 		.prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
490 	);
491 	int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
492 
493 	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
494 	if (btf_fd < 0)
495 		return 0;
496 
497 	opts.prog_btf_fd = btf_fd;
498 	opts.func_info = &func_infos;
499 	opts.func_info_cnt = ARRAY_SIZE(func_infos);
500 	opts.func_info_rec_size = sizeof(func_infos[0]);
501 
502 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
503 				"GPL", insns, insn_cnt, &opts);
504 	close(btf_fd);
505 
506 	return probe_fd(prog_fd);
507 }
508 
509 typedef int (*feature_probe_fn)(int /* token_fd */);
510 
511 static struct kern_feature_cache feature_cache;
512 
513 static struct kern_feature_desc {
514 	const char *desc;
515 	feature_probe_fn probe;
516 } feature_probes[__FEAT_CNT] = {
517 	[FEAT_PROG_NAME] = {
518 		"BPF program name", probe_kern_prog_name,
519 	},
520 	[FEAT_GLOBAL_DATA] = {
521 		"global variables", probe_kern_global_data,
522 	},
523 	[FEAT_BTF] = {
524 		"minimal BTF", probe_kern_btf,
525 	},
526 	[FEAT_BTF_FUNC] = {
527 		"BTF functions", probe_kern_btf_func,
528 	},
529 	[FEAT_BTF_GLOBAL_FUNC] = {
530 		"BTF global function", probe_kern_btf_func_global,
531 	},
532 	[FEAT_BTF_DATASEC] = {
533 		"BTF data section and variable", probe_kern_btf_datasec,
534 	},
535 	[FEAT_ARRAY_MMAP] = {
536 		"ARRAY map mmap()", probe_kern_array_mmap,
537 	},
538 	[FEAT_EXP_ATTACH_TYPE] = {
539 		"BPF_PROG_LOAD expected_attach_type attribute",
540 		probe_kern_exp_attach_type,
541 	},
542 	[FEAT_PROBE_READ_KERN] = {
543 		"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
544 	},
545 	[FEAT_PROG_BIND_MAP] = {
546 		"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
547 	},
548 	[FEAT_MODULE_BTF] = {
549 		"module BTF support", probe_module_btf,
550 	},
551 	[FEAT_BTF_FLOAT] = {
552 		"BTF_KIND_FLOAT support", probe_kern_btf_float,
553 	},
554 	[FEAT_PERF_LINK] = {
555 		"BPF perf link support", probe_perf_link,
556 	},
557 	[FEAT_BTF_DECL_TAG] = {
558 		"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
559 	},
560 	[FEAT_BTF_TYPE_TAG] = {
561 		"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
562 	},
563 	[FEAT_MEMCG_ACCOUNT] = {
564 		"memcg-based memory accounting", probe_memcg_account,
565 	},
566 	[FEAT_BPF_COOKIE] = {
567 		"BPF cookie support", probe_kern_bpf_cookie,
568 	},
569 	[FEAT_BTF_ENUM64] = {
570 		"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
571 	},
572 	[FEAT_SYSCALL_WRAPPER] = {
573 		"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
574 	},
575 	[FEAT_UPROBE_MULTI_LINK] = {
576 		"BPF multi-uprobe link support", probe_uprobe_multi_link,
577 	},
578 	[FEAT_ARG_CTX_TAG] = {
579 		"kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
580 	},
581 	[FEAT_BTF_QMARK_DATASEC] = {
582 		"BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
583 	},
584 };
585 
586 bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
587 {
588 	struct kern_feature_desc *feat = &feature_probes[feat_id];
589 	int ret;
590 
591 	/* assume global feature cache, unless custom one is provided */
592 	if (!cache)
593 		cache = &feature_cache;
594 
595 	if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
596 		ret = feat->probe(cache->token_fd);
597 		if (ret > 0) {
598 			WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
599 		} else if (ret == 0) {
600 			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
601 		} else {
602 			pr_warn("Detection of kernel %s support failed: %s\n",
603 				feat->desc, errstr(ret));
604 			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
605 		}
606 	}
607 
608 	return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
609 }
610