xref: /linux/tools/lib/bpf/features.c (revision 9d027a35a52a4ea9400390ef4414e4e9dcd54193)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 #include <linux/kernel.h>
4 #include <linux/filter.h>
5 #include "bpf.h"
6 #include "libbpf.h"
7 #include "libbpf_common.h"
8 #include "libbpf_internal.h"
9 #include "str_error.h"
10 
11 static inline __u64 ptr_to_u64(const void *ptr)
12 {
13 	return (__u64)(unsigned long)ptr;
14 }
15 
16 static int probe_fd(int fd)
17 {
18 	if (fd >= 0)
19 		close(fd);
20 	return fd >= 0;
21 }
22 
23 static int probe_kern_prog_name(int token_fd)
24 {
25 	const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
26 	struct bpf_insn insns[] = {
27 		BPF_MOV64_IMM(BPF_REG_0, 0),
28 		BPF_EXIT_INSN(),
29 	};
30 	union bpf_attr attr;
31 	int ret;
32 
33 	memset(&attr, 0, attr_sz);
34 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
35 	attr.license = ptr_to_u64("GPL");
36 	attr.insns = ptr_to_u64(insns);
37 	attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
38 	attr.prog_token_fd = token_fd;
39 	libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
40 
41 	/* make sure loading with name works */
42 	ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
43 	return probe_fd(ret);
44 }
45 
46 static int probe_kern_global_data(int token_fd)
47 {
48 	char *cp, errmsg[STRERR_BUFSIZE];
49 	struct bpf_insn insns[] = {
50 		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
51 		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
52 		BPF_MOV64_IMM(BPF_REG_0, 0),
53 		BPF_EXIT_INSN(),
54 	};
55 	LIBBPF_OPTS(bpf_map_create_opts, map_opts, .token_fd = token_fd);
56 	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts, .token_fd = token_fd);
57 	int ret, map, insn_cnt = ARRAY_SIZE(insns);
58 
59 	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
60 	if (map < 0) {
61 		ret = -errno;
62 		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
63 		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
64 			__func__, cp, -ret);
65 		return ret;
66 	}
67 
68 	insns[0].imm = map;
69 
70 	ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
71 	close(map);
72 	return probe_fd(ret);
73 }
74 
75 static int probe_kern_btf(int token_fd)
76 {
77 	static const char strs[] = "\0int";
78 	__u32 types[] = {
79 		/* int */
80 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
81 	};
82 
83 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
84 					     strs, sizeof(strs), token_fd));
85 }
86 
87 static int probe_kern_btf_func(int token_fd)
88 {
89 	static const char strs[] = "\0int\0x\0a";
90 	/* void x(int a) {} */
91 	__u32 types[] = {
92 		/* int */
93 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
94 		/* FUNC_PROTO */                                /* [2] */
95 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
96 		BTF_PARAM_ENC(7, 1),
97 		/* FUNC x */                                    /* [3] */
98 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
99 	};
100 
101 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
102 					     strs, sizeof(strs), token_fd));
103 }
104 
105 static int probe_kern_btf_func_global(int token_fd)
106 {
107 	static const char strs[] = "\0int\0x\0a";
108 	/* static void x(int a) {} */
109 	__u32 types[] = {
110 		/* int */
111 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
112 		/* FUNC_PROTO */                                /* [2] */
113 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
114 		BTF_PARAM_ENC(7, 1),
115 		/* FUNC x BTF_FUNC_GLOBAL */                    /* [3] */
116 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
117 	};
118 
119 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
120 					     strs, sizeof(strs), token_fd));
121 }
122 
123 static int probe_kern_btf_datasec(int token_fd)
124 {
125 	static const char strs[] = "\0x\0.data";
126 	/* static int a; */
127 	__u32 types[] = {
128 		/* int */
129 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
130 		/* VAR x */                                     /* [2] */
131 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
132 		BTF_VAR_STATIC,
133 		/* DATASEC val */                               /* [3] */
134 		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
135 		BTF_VAR_SECINFO_ENC(2, 0, 4),
136 	};
137 
138 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
139 					     strs, sizeof(strs), token_fd));
140 }
141 
142 static int probe_kern_btf_float(int token_fd)
143 {
144 	static const char strs[] = "\0float";
145 	__u32 types[] = {
146 		/* float */
147 		BTF_TYPE_FLOAT_ENC(1, 4),
148 	};
149 
150 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
151 					     strs, sizeof(strs), token_fd));
152 }
153 
154 static int probe_kern_btf_decl_tag(int token_fd)
155 {
156 	static const char strs[] = "\0tag";
157 	__u32 types[] = {
158 		/* int */
159 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
160 		/* VAR x */                                     /* [2] */
161 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
162 		BTF_VAR_STATIC,
163 		/* attr */
164 		BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
165 	};
166 
167 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
168 					     strs, sizeof(strs), token_fd));
169 }
170 
171 static int probe_kern_btf_type_tag(int token_fd)
172 {
173 	static const char strs[] = "\0tag";
174 	__u32 types[] = {
175 		/* int */
176 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),		/* [1] */
177 		/* attr */
178 		BTF_TYPE_TYPE_TAG_ENC(1, 1),				/* [2] */
179 		/* ptr */
180 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),	/* [3] */
181 	};
182 
183 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
184 					     strs, sizeof(strs), token_fd));
185 }
186 
187 static int probe_kern_array_mmap(int token_fd)
188 {
189 	LIBBPF_OPTS(bpf_map_create_opts, opts,
190 		.map_flags = BPF_F_MMAPABLE,
191 		.token_fd = token_fd,
192 	);
193 	int fd;
194 
195 	fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
196 	return probe_fd(fd);
197 }
198 
199 static int probe_kern_exp_attach_type(int token_fd)
200 {
201 	LIBBPF_OPTS(bpf_prog_load_opts, opts,
202 		.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
203 		.token_fd = token_fd,
204 	);
205 	struct bpf_insn insns[] = {
206 		BPF_MOV64_IMM(BPF_REG_0, 0),
207 		BPF_EXIT_INSN(),
208 	};
209 	int fd, insn_cnt = ARRAY_SIZE(insns);
210 
211 	/* use any valid combination of program type and (optional)
212 	 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
213 	 * to see if kernel supports expected_attach_type field for
214 	 * BPF_PROG_LOAD command
215 	 */
216 	fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
217 	return probe_fd(fd);
218 }
219 
220 static int probe_kern_probe_read_kernel(int token_fd)
221 {
222 	LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd);
223 	struct bpf_insn insns[] = {
224 		BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),	/* r1 = r10 (fp) */
225 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),	/* r1 += -8 */
226 		BPF_MOV64_IMM(BPF_REG_2, 8),		/* r2 = 8 */
227 		BPF_MOV64_IMM(BPF_REG_3, 0),		/* r3 = 0 */
228 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
229 		BPF_EXIT_INSN(),
230 	};
231 	int fd, insn_cnt = ARRAY_SIZE(insns);
232 
233 	fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
234 	return probe_fd(fd);
235 }
236 
237 static int probe_prog_bind_map(int token_fd)
238 {
239 	char *cp, errmsg[STRERR_BUFSIZE];
240 	struct bpf_insn insns[] = {
241 		BPF_MOV64_IMM(BPF_REG_0, 0),
242 		BPF_EXIT_INSN(),
243 	};
244 	LIBBPF_OPTS(bpf_map_create_opts, map_opts, .token_fd = token_fd);
245 	LIBBPF_OPTS(bpf_prog_load_opts, prog_opts, .token_fd = token_fd);
246 	int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
247 
248 	map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
249 	if (map < 0) {
250 		ret = -errno;
251 		cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
252 		pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
253 			__func__, cp, -ret);
254 		return ret;
255 	}
256 
257 	prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
258 	if (prog < 0) {
259 		close(map);
260 		return 0;
261 	}
262 
263 	ret = bpf_prog_bind_map(prog, map, NULL);
264 
265 	close(map);
266 	close(prog);
267 
268 	return ret >= 0;
269 }
270 
271 static int probe_module_btf(int token_fd)
272 {
273 	static const char strs[] = "\0int";
274 	__u32 types[] = {
275 		/* int */
276 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
277 	};
278 	struct bpf_btf_info info;
279 	__u32 len = sizeof(info);
280 	char name[16];
281 	int fd, err;
282 
283 	fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
284 	if (fd < 0)
285 		return 0; /* BTF not supported at all */
286 
287 	memset(&info, 0, sizeof(info));
288 	info.name = ptr_to_u64(name);
289 	info.name_len = sizeof(name);
290 
291 	/* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
292 	 * kernel's module BTF support coincides with support for
293 	 * name/name_len fields in struct bpf_btf_info.
294 	 */
295 	err = bpf_btf_get_info_by_fd(fd, &info, &len);
296 	close(fd);
297 	return !err;
298 }
299 
300 static int probe_perf_link(int token_fd)
301 {
302 	struct bpf_insn insns[] = {
303 		BPF_MOV64_IMM(BPF_REG_0, 0),
304 		BPF_EXIT_INSN(),
305 	};
306 	LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd);
307 	int prog_fd, link_fd, err;
308 
309 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
310 				insns, ARRAY_SIZE(insns), &opts);
311 	if (prog_fd < 0)
312 		return -errno;
313 
314 	/* use invalid perf_event FD to get EBADF, if link is supported;
315 	 * otherwise EINVAL should be returned
316 	 */
317 	link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
318 	err = -errno; /* close() can clobber errno */
319 
320 	if (link_fd >= 0)
321 		close(link_fd);
322 	close(prog_fd);
323 
324 	return link_fd < 0 && err == -EBADF;
325 }
326 
327 static int probe_uprobe_multi_link(int token_fd)
328 {
329 	LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
330 		.expected_attach_type = BPF_TRACE_UPROBE_MULTI,
331 		.token_fd = token_fd,
332 	);
333 	LIBBPF_OPTS(bpf_link_create_opts, link_opts);
334 	struct bpf_insn insns[] = {
335 		BPF_MOV64_IMM(BPF_REG_0, 0),
336 		BPF_EXIT_INSN(),
337 	};
338 	int prog_fd, link_fd, err;
339 	unsigned long offset = 0;
340 
341 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
342 				insns, ARRAY_SIZE(insns), &load_opts);
343 	if (prog_fd < 0)
344 		return -errno;
345 
346 	/* Creating uprobe in '/' binary should fail with -EBADF. */
347 	link_opts.uprobe_multi.path = "/";
348 	link_opts.uprobe_multi.offsets = &offset;
349 	link_opts.uprobe_multi.cnt = 1;
350 
351 	link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
352 	err = -errno; /* close() can clobber errno */
353 
354 	if (link_fd >= 0)
355 		close(link_fd);
356 	close(prog_fd);
357 
358 	return link_fd < 0 && err == -EBADF;
359 }
360 
361 static int probe_kern_bpf_cookie(int token_fd)
362 {
363 	struct bpf_insn insns[] = {
364 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
365 		BPF_EXIT_INSN(),
366 	};
367 	LIBBPF_OPTS(bpf_prog_load_opts, opts, .token_fd = token_fd);
368 	int ret, insn_cnt = ARRAY_SIZE(insns);
369 
370 	ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
371 	return probe_fd(ret);
372 }
373 
374 static int probe_kern_btf_enum64(int token_fd)
375 {
376 	static const char strs[] = "\0enum64";
377 	__u32 types[] = {
378 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
379 	};
380 
381 	return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
382 					     strs, sizeof(strs), token_fd));
383 }
384 
385 typedef int (*feature_probe_fn)(int /* token_fd */);
386 
387 static struct kern_feature_cache feature_cache;
388 
389 static struct kern_feature_desc {
390 	const char *desc;
391 	feature_probe_fn probe;
392 } feature_probes[__FEAT_CNT] = {
393 	[FEAT_PROG_NAME] = {
394 		"BPF program name", probe_kern_prog_name,
395 	},
396 	[FEAT_GLOBAL_DATA] = {
397 		"global variables", probe_kern_global_data,
398 	},
399 	[FEAT_BTF] = {
400 		"minimal BTF", probe_kern_btf,
401 	},
402 	[FEAT_BTF_FUNC] = {
403 		"BTF functions", probe_kern_btf_func,
404 	},
405 	[FEAT_BTF_GLOBAL_FUNC] = {
406 		"BTF global function", probe_kern_btf_func_global,
407 	},
408 	[FEAT_BTF_DATASEC] = {
409 		"BTF data section and variable", probe_kern_btf_datasec,
410 	},
411 	[FEAT_ARRAY_MMAP] = {
412 		"ARRAY map mmap()", probe_kern_array_mmap,
413 	},
414 	[FEAT_EXP_ATTACH_TYPE] = {
415 		"BPF_PROG_LOAD expected_attach_type attribute",
416 		probe_kern_exp_attach_type,
417 	},
418 	[FEAT_PROBE_READ_KERN] = {
419 		"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
420 	},
421 	[FEAT_PROG_BIND_MAP] = {
422 		"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
423 	},
424 	[FEAT_MODULE_BTF] = {
425 		"module BTF support", probe_module_btf,
426 	},
427 	[FEAT_BTF_FLOAT] = {
428 		"BTF_KIND_FLOAT support", probe_kern_btf_float,
429 	},
430 	[FEAT_PERF_LINK] = {
431 		"BPF perf link support", probe_perf_link,
432 	},
433 	[FEAT_BTF_DECL_TAG] = {
434 		"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
435 	},
436 	[FEAT_BTF_TYPE_TAG] = {
437 		"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
438 	},
439 	[FEAT_MEMCG_ACCOUNT] = {
440 		"memcg-based memory accounting", probe_memcg_account,
441 	},
442 	[FEAT_BPF_COOKIE] = {
443 		"BPF cookie support", probe_kern_bpf_cookie,
444 	},
445 	[FEAT_BTF_ENUM64] = {
446 		"BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
447 	},
448 	[FEAT_SYSCALL_WRAPPER] = {
449 		"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
450 	},
451 	[FEAT_UPROBE_MULTI_LINK] = {
452 		"BPF multi-uprobe link support", probe_uprobe_multi_link,
453 	},
454 };
455 
456 bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
457 {
458 	struct kern_feature_desc *feat = &feature_probes[feat_id];
459 	int ret;
460 
461 	/* assume global feature cache, unless custom one is provided */
462 	if (!cache)
463 		cache = &feature_cache;
464 
465 	if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
466 		ret = feat->probe(cache->token_fd);
467 		if (ret > 0) {
468 			WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
469 		} else if (ret == 0) {
470 			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
471 		} else {
472 			pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
473 			WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
474 		}
475 	}
476 
477 	return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
478 }
479