xref: /linux/tools/perf/util/bpf-event.c (revision 7fc7f25419f5a6b09199ba4b5026b94ef184fa79)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <bpf/bpf.h>
5 #include <bpf/btf.h>
6 #include <bpf/libbpf.h>
7 #include <linux/btf.h>
8 #include <linux/err.h>
9 #include <linux/string.h>
10 #include <internal/lib.h>
11 #include <symbol/kallsyms.h>
12 #include "bpf-event.h"
13 #include "bpf-utils.h"
14 #include "debug.h"
15 #include "dso.h"
16 #include "symbol.h"
17 #include "machine.h"
18 #include "env.h"
19 #include "session.h"
20 #include "map.h"
21 #include "evlist.h"
22 #include "record.h"
23 #include "util/synthetic-events.h"
24 
25 #ifndef HAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
26 struct btf *btf__load_from_kernel_by_id(__u32 id)
27 {
28        struct btf *btf;
29 #pragma GCC diagnostic push
30 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
31        int err = btf__get_from_id(id, &btf);
32 #pragma GCC diagnostic pop
33 
34        return err ? ERR_PTR(err) : btf;
35 }
36 #endif
37 
38 #ifndef HAVE_LIBBPF_BPF_PROG_LOAD
39 LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
40 				const struct bpf_insn *insns, size_t insns_cnt,
41 				const char *license, __u32 kern_version,
42 				char *log_buf, size_t log_buf_sz);
43 
44 int bpf_prog_load(enum bpf_prog_type prog_type,
45 		  const char *prog_name __maybe_unused,
46 		  const char *license,
47 		  const struct bpf_insn *insns, size_t insn_cnt,
48 		  const struct bpf_prog_load_opts *opts)
49 {
50 #pragma GCC diagnostic push
51 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
52 	return bpf_load_program(prog_type, insns, insn_cnt, license,
53 				opts->kern_version, opts->log_buf, opts->log_size);
54 #pragma GCC diagnostic pop
55 }
56 #endif
57 
58 #ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
59 struct bpf_program *
60 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
61 {
62 #pragma GCC diagnostic push
63 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
64 	return bpf_program__next(prev, obj);
65 #pragma GCC diagnostic pop
66 }
67 #endif
68 
69 #ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
70 struct bpf_map *
71 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
72 {
73 #pragma GCC diagnostic push
74 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
75 	return bpf_map__next(prev, obj);
76 #pragma GCC diagnostic pop
77 }
78 #endif
79 
80 #ifndef HAVE_LIBBPF_BTF__RAW_DATA
81 const void *
82 btf__raw_data(const struct btf *btf_ro, __u32 *size)
83 {
84 #pragma GCC diagnostic push
85 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
86 	return btf__get_raw_data(btf_ro, size);
87 #pragma GCC diagnostic pop
88 }
89 #endif
90 
91 static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
92 {
93 	int ret = 0;
94 	size_t i;
95 
96 	for (i = 0; i < len; i++)
97 		ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
98 	return ret;
99 }
100 
101 static int machine__process_bpf_event_load(struct machine *machine,
102 					   union perf_event *event,
103 					   struct perf_sample *sample __maybe_unused)
104 {
105 	struct bpf_prog_info_node *info_node;
106 	struct perf_env *env = machine->env;
107 	struct perf_bpil *info_linear;
108 	int id = event->bpf.id;
109 	unsigned int i;
110 
111 	/* perf-record, no need to handle bpf-event */
112 	if (env == NULL)
113 		return 0;
114 
115 	info_node = perf_env__find_bpf_prog_info(env, id);
116 	if (!info_node)
117 		return 0;
118 	info_linear = info_node->info_linear;
119 
120 	for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
121 		u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
122 		u64 addr = addrs[i];
123 		struct map *map = maps__find(machine__kernel_maps(machine), addr);
124 
125 		if (map) {
126 			map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
127 			map->dso->bpf_prog.id = id;
128 			map->dso->bpf_prog.sub_id = i;
129 			map->dso->bpf_prog.env = env;
130 		}
131 	}
132 	return 0;
133 }
134 
135 int machine__process_bpf(struct machine *machine, union perf_event *event,
136 			 struct perf_sample *sample)
137 {
138 	if (dump_trace)
139 		perf_event__fprintf_bpf(event, stdout);
140 
141 	switch (event->bpf.type) {
142 	case PERF_BPF_EVENT_PROG_LOAD:
143 		return machine__process_bpf_event_load(machine, event, sample);
144 
145 	case PERF_BPF_EVENT_PROG_UNLOAD:
146 		/*
147 		 * Do not free bpf_prog_info and btf of the program here,
148 		 * as annotation still need them. They will be freed at
149 		 * the end of the session.
150 		 */
151 		break;
152 	default:
153 		pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
154 		break;
155 	}
156 	return 0;
157 }
158 
159 static int perf_env__fetch_btf(struct perf_env *env,
160 			       u32 btf_id,
161 			       struct btf *btf)
162 {
163 	struct btf_node *node;
164 	u32 data_size;
165 	const void *data;
166 
167 	data = btf__raw_data(btf, &data_size);
168 
169 	node = malloc(data_size + sizeof(struct btf_node));
170 	if (!node)
171 		return -1;
172 
173 	node->id = btf_id;
174 	node->data_size = data_size;
175 	memcpy(node->data, data, data_size);
176 
177 	if (!perf_env__insert_btf(env, node)) {
178 		/* Insertion failed because of a duplicate. */
179 		free(node);
180 		return -1;
181 	}
182 	return 0;
183 }
184 
185 static int synthesize_bpf_prog_name(char *buf, int size,
186 				    struct bpf_prog_info *info,
187 				    struct btf *btf,
188 				    u32 sub_id)
189 {
190 	u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
191 	void *func_infos = (void *)(uintptr_t)(info->func_info);
192 	u32 sub_prog_cnt = info->nr_jited_ksyms;
193 	const struct bpf_func_info *finfo;
194 	const char *short_name = NULL;
195 	const struct btf_type *t;
196 	int name_len;
197 
198 	name_len = snprintf(buf, size, "bpf_prog_");
199 	name_len += snprintf_hex(buf + name_len, size - name_len,
200 				 prog_tags[sub_id], BPF_TAG_SIZE);
201 	if (btf) {
202 		finfo = func_infos + sub_id * info->func_info_rec_size;
203 		t = btf__type_by_id(btf, finfo->type_id);
204 		short_name = btf__name_by_offset(btf, t->name_off);
205 	} else if (sub_id == 0 && sub_prog_cnt == 1) {
206 		/* no subprog */
207 		if (info->name[0])
208 			short_name = info->name;
209 	} else
210 		short_name = "F";
211 	if (short_name)
212 		name_len += snprintf(buf + name_len, size - name_len,
213 				     "_%s", short_name);
214 	return name_len;
215 }
216 
217 /*
218  * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
219  * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
220  * one PERF_RECORD_KSYMBOL is generated for each sub program.
221  *
222  * Returns:
223  *    0 for success;
224  *   -1 for failures;
225  *   -2 for lack of kernel support.
226  */
227 static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
228 					       perf_event__handler_t process,
229 					       struct machine *machine,
230 					       int fd,
231 					       union perf_event *event,
232 					       struct record_opts *opts)
233 {
234 	struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
235 	struct perf_record_bpf_event *bpf_event = &event->bpf;
236 	struct perf_tool *tool = session->tool;
237 	struct bpf_prog_info_node *info_node;
238 	struct perf_bpil *info_linear;
239 	struct bpf_prog_info *info;
240 	struct btf *btf = NULL;
241 	struct perf_env *env;
242 	u32 sub_prog_cnt, i;
243 	int err = 0;
244 	u64 arrays;
245 
246 	/*
247 	 * for perf-record and perf-report use header.env;
248 	 * otherwise, use global perf_env.
249 	 */
250 	env = session->data ? &session->header.env : &perf_env;
251 
252 	arrays = 1UL << PERF_BPIL_JITED_KSYMS;
253 	arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
254 	arrays |= 1UL << PERF_BPIL_FUNC_INFO;
255 	arrays |= 1UL << PERF_BPIL_PROG_TAGS;
256 	arrays |= 1UL << PERF_BPIL_JITED_INSNS;
257 	arrays |= 1UL << PERF_BPIL_LINE_INFO;
258 	arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
259 
260 	info_linear = get_bpf_prog_info_linear(fd, arrays);
261 	if (IS_ERR_OR_NULL(info_linear)) {
262 		info_linear = NULL;
263 		pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
264 		return -1;
265 	}
266 
267 	if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
268 		free(info_linear);
269 		pr_debug("%s: the kernel is too old, aborting\n", __func__);
270 		return -2;
271 	}
272 
273 	info = &info_linear->info;
274 	if (!info->jited_ksyms) {
275 		free(info_linear);
276 		return -1;
277 	}
278 
279 	/* number of ksyms, func_lengths, and tags should match */
280 	sub_prog_cnt = info->nr_jited_ksyms;
281 	if (sub_prog_cnt != info->nr_prog_tags ||
282 	    sub_prog_cnt != info->nr_jited_func_lens) {
283 		free(info_linear);
284 		return -1;
285 	}
286 
287 	/* check BTF func info support */
288 	if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
289 		/* btf func info number should be same as sub_prog_cnt */
290 		if (sub_prog_cnt != info->nr_func_info) {
291 			pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
292 			free(info_linear);
293 			return -1;
294 		}
295 		btf = btf__load_from_kernel_by_id(info->btf_id);
296 		if (libbpf_get_error(btf)) {
297 			pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
298 			err = -1;
299 			goto out;
300 		}
301 		perf_env__fetch_btf(env, info->btf_id, btf);
302 	}
303 
304 	/* Synthesize PERF_RECORD_KSYMBOL */
305 	for (i = 0; i < sub_prog_cnt; i++) {
306 		__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
307 		__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
308 		int name_len;
309 
310 		*ksymbol_event = (struct perf_record_ksymbol) {
311 			.header = {
312 				.type = PERF_RECORD_KSYMBOL,
313 				.size = offsetof(struct perf_record_ksymbol, name),
314 			},
315 			.addr = prog_addrs[i],
316 			.len = prog_lens[i],
317 			.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
318 			.flags = 0,
319 		};
320 
321 		name_len = synthesize_bpf_prog_name(ksymbol_event->name,
322 						    KSYM_NAME_LEN, info, btf, i);
323 		ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
324 							 sizeof(u64));
325 
326 		memset((void *)event + event->header.size, 0, machine->id_hdr_size);
327 		event->header.size += machine->id_hdr_size;
328 		err = perf_tool__process_synth_event(tool, event,
329 						     machine, process);
330 	}
331 
332 	if (!opts->no_bpf_event) {
333 		/* Synthesize PERF_RECORD_BPF_EVENT */
334 		*bpf_event = (struct perf_record_bpf_event) {
335 			.header = {
336 				.type = PERF_RECORD_BPF_EVENT,
337 				.size = sizeof(struct perf_record_bpf_event),
338 			},
339 			.type = PERF_BPF_EVENT_PROG_LOAD,
340 			.flags = 0,
341 			.id = info->id,
342 		};
343 		memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
344 		memset((void *)event + event->header.size, 0, machine->id_hdr_size);
345 		event->header.size += machine->id_hdr_size;
346 
347 		/* save bpf_prog_info to env */
348 		info_node = malloc(sizeof(struct bpf_prog_info_node));
349 		if (!info_node) {
350 			err = -1;
351 			goto out;
352 		}
353 
354 		info_node->info_linear = info_linear;
355 		perf_env__insert_bpf_prog_info(env, info_node);
356 		info_linear = NULL;
357 
358 		/*
359 		 * process after saving bpf_prog_info to env, so that
360 		 * required information is ready for look up
361 		 */
362 		err = perf_tool__process_synth_event(tool, event,
363 						     machine, process);
364 	}
365 
366 out:
367 	free(info_linear);
368 	btf__free(btf);
369 	return err ? -1 : 0;
370 }
371 
372 struct kallsyms_parse {
373 	union perf_event	*event;
374 	perf_event__handler_t	 process;
375 	struct machine		*machine;
376 	struct perf_tool	*tool;
377 };
378 
379 static int
380 process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data)
381 {
382 	struct machine *machine = data->machine;
383 	union perf_event *event = data->event;
384 	struct perf_record_ksymbol *ksymbol;
385 	int len;
386 
387 	ksymbol = &event->ksymbol;
388 
389 	*ksymbol = (struct perf_record_ksymbol) {
390 		.header = {
391 			.type = PERF_RECORD_KSYMBOL,
392 			.size = offsetof(struct perf_record_ksymbol, name),
393 		},
394 		.addr      = addr,
395 		.len       = page_size,
396 		.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
397 		.flags     = 0,
398 	};
399 
400 	len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name);
401 	ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64));
402 	memset((void *) event + event->header.size, 0, machine->id_hdr_size);
403 	event->header.size += machine->id_hdr_size;
404 
405 	return perf_tool__process_synth_event(data->tool, event, machine,
406 					      data->process);
407 }
408 
409 static int
410 kallsyms_process_symbol(void *data, const char *_name,
411 			char type __maybe_unused, u64 start)
412 {
413 	char disp[KSYM_NAME_LEN];
414 	char *module, *name;
415 	unsigned long id;
416 	int err = 0;
417 
418 	module = strchr(_name, '\t');
419 	if (!module)
420 		return 0;
421 
422 	/* We are going after [bpf] module ... */
423 	if (strcmp(module + 1, "[bpf]"))
424 		return 0;
425 
426 	name = memdup(_name, (module - _name) + 1);
427 	if (!name)
428 		return -ENOMEM;
429 
430 	name[module - _name] = 0;
431 
432 	/* .. and only for trampolines and dispatchers */
433 	if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) ||
434 	    (sscanf(name, "bpf_dispatcher_%s", disp) == 1))
435 		err = process_bpf_image(name, start, data);
436 
437 	free(name);
438 	return err;
439 }
440 
441 int perf_event__synthesize_bpf_events(struct perf_session *session,
442 				      perf_event__handler_t process,
443 				      struct machine *machine,
444 				      struct record_opts *opts)
445 {
446 	const char *kallsyms_filename = "/proc/kallsyms";
447 	struct kallsyms_parse arg;
448 	union perf_event *event;
449 	__u32 id = 0;
450 	int err;
451 	int fd;
452 
453 	event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
454 	if (!event)
455 		return -1;
456 
457 	/* Synthesize all the bpf programs in system. */
458 	while (true) {
459 		err = bpf_prog_get_next_id(id, &id);
460 		if (err) {
461 			if (errno == ENOENT) {
462 				err = 0;
463 				break;
464 			}
465 			pr_debug("%s: can't get next program: %s%s\n",
466 				 __func__, strerror(errno),
467 				 errno == EINVAL ? " -- kernel too old?" : "");
468 			/* don't report error on old kernel or EPERM  */
469 			err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
470 			break;
471 		}
472 		fd = bpf_prog_get_fd_by_id(id);
473 		if (fd < 0) {
474 			pr_debug("%s: failed to get fd for prog_id %u\n",
475 				 __func__, id);
476 			continue;
477 		}
478 
479 		err = perf_event__synthesize_one_bpf_prog(session, process,
480 							  machine, fd,
481 							  event, opts);
482 		close(fd);
483 		if (err) {
484 			/* do not return error for old kernel */
485 			if (err == -2)
486 				err = 0;
487 			break;
488 		}
489 	}
490 
491 	/* Synthesize all the bpf images - trampolines/dispatchers. */
492 	if (symbol_conf.kallsyms_name != NULL)
493 		kallsyms_filename = symbol_conf.kallsyms_name;
494 
495 	arg = (struct kallsyms_parse) {
496 		.event   = event,
497 		.process = process,
498 		.machine = machine,
499 		.tool    = session->tool,
500 	};
501 
502 	if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) {
503 		pr_err("%s: failed to synthesize bpf images: %s\n",
504 		       __func__, strerror(errno));
505 	}
506 
507 	free(event);
508 	return err;
509 }
510 
511 static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
512 {
513 	struct bpf_prog_info_node *info_node;
514 	struct perf_bpil *info_linear;
515 	struct btf *btf = NULL;
516 	u64 arrays;
517 	u32 btf_id;
518 	int fd;
519 
520 	fd = bpf_prog_get_fd_by_id(id);
521 	if (fd < 0)
522 		return;
523 
524 	arrays = 1UL << PERF_BPIL_JITED_KSYMS;
525 	arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
526 	arrays |= 1UL << PERF_BPIL_FUNC_INFO;
527 	arrays |= 1UL << PERF_BPIL_PROG_TAGS;
528 	arrays |= 1UL << PERF_BPIL_JITED_INSNS;
529 	arrays |= 1UL << PERF_BPIL_LINE_INFO;
530 	arrays |= 1UL << PERF_BPIL_JITED_LINE_INFO;
531 
532 	info_linear = get_bpf_prog_info_linear(fd, arrays);
533 	if (IS_ERR_OR_NULL(info_linear)) {
534 		pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
535 		goto out;
536 	}
537 
538 	btf_id = info_linear->info.btf_id;
539 
540 	info_node = malloc(sizeof(struct bpf_prog_info_node));
541 	if (info_node) {
542 		info_node->info_linear = info_linear;
543 		perf_env__insert_bpf_prog_info(env, info_node);
544 	} else
545 		free(info_linear);
546 
547 	if (btf_id == 0)
548 		goto out;
549 
550 	btf = btf__load_from_kernel_by_id(btf_id);
551 	if (libbpf_get_error(btf)) {
552 		pr_debug("%s: failed to get BTF of id %u, aborting\n",
553 			 __func__, btf_id);
554 		goto out;
555 	}
556 	perf_env__fetch_btf(env, btf_id, btf);
557 
558 out:
559 	btf__free(btf);
560 	close(fd);
561 }
562 
563 static int bpf_event__sb_cb(union perf_event *event, void *data)
564 {
565 	struct perf_env *env = data;
566 
567 	if (event->header.type != PERF_RECORD_BPF_EVENT)
568 		return -1;
569 
570 	switch (event->bpf.type) {
571 	case PERF_BPF_EVENT_PROG_LOAD:
572 		perf_env__add_bpf_info(env, event->bpf.id);
573 
574 	case PERF_BPF_EVENT_PROG_UNLOAD:
575 		/*
576 		 * Do not free bpf_prog_info and btf of the program here,
577 		 * as annotation still need them. They will be freed at
578 		 * the end of the session.
579 		 */
580 		break;
581 	default:
582 		pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
583 		break;
584 	}
585 
586 	return 0;
587 }
588 
589 int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
590 {
591 	struct perf_event_attr attr = {
592 		.type	          = PERF_TYPE_SOFTWARE,
593 		.config           = PERF_COUNT_SW_DUMMY,
594 		.sample_id_all    = 1,
595 		.watermark        = 1,
596 		.bpf_event        = 1,
597 		.size	   = sizeof(attr), /* to capture ABI version */
598 	};
599 
600 	/*
601 	 * Older gcc versions don't support designated initializers, like above,
602 	 * for unnamed union members, such as the following:
603 	 */
604 	attr.wakeup_watermark = 1;
605 
606 	return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
607 }
608 
609 void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
610 				    struct perf_env *env,
611 				    FILE *fp)
612 {
613 	__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
614 	__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
615 	char name[KSYM_NAME_LEN];
616 	struct btf *btf = NULL;
617 	u32 sub_prog_cnt, i;
618 
619 	sub_prog_cnt = info->nr_jited_ksyms;
620 	if (sub_prog_cnt != info->nr_prog_tags ||
621 	    sub_prog_cnt != info->nr_jited_func_lens)
622 		return;
623 
624 	if (info->btf_id) {
625 		struct btf_node *node;
626 
627 		node = perf_env__find_btf(env, info->btf_id);
628 		if (node)
629 			btf = btf__new((__u8 *)(node->data),
630 				       node->data_size);
631 	}
632 
633 	if (sub_prog_cnt == 1) {
634 		synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
635 		fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
636 			info->id, name, prog_addrs[0], prog_lens[0]);
637 		goto out;
638 	}
639 
640 	fprintf(fp, "# bpf_prog_info %u:\n", info->id);
641 	for (i = 0; i < sub_prog_cnt; i++) {
642 		synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
643 
644 		fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
645 			i, name, prog_addrs[i], prog_lens[i]);
646 	}
647 out:
648 	btf__free(btf);
649 }
650