xref: /linux/tools/perf/util/bpf-event.c (revision 4e0ae876f77bc01a7e77724dea57b4b82bd53244)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <bpf/bpf.h>
5 #include <bpf/btf.h>
6 #include <linux/btf.h>
7 #include "bpf-event.h"
8 #include "debug.h"
9 #include "symbol.h"
10 #include "machine.h"
11 
12 #define ptr_to_u64(ptr)    ((__u64)(unsigned long)(ptr))
13 
14 static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
15 {
16 	int ret = 0;
17 	size_t i;
18 
19 	for (i = 0; i < len; i++)
20 		ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
21 	return ret;
22 }
23 
24 int machine__process_bpf_event(struct machine *machine __maybe_unused,
25 			       union perf_event *event,
26 			       struct perf_sample *sample __maybe_unused)
27 {
28 	if (dump_trace)
29 		perf_event__fprintf_bpf_event(event, stdout);
30 	return 0;
31 }
32 
33 /*
34  * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
35  * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
36  * one PERF_RECORD_KSYMBOL is generated for each sub program.
37  *
38  * Returns:
39  *    0 for success;
40  *   -1 for failures;
41  *   -2 for lack of kernel support.
42  */
43 static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
44 					       perf_event__handler_t process,
45 					       struct machine *machine,
46 					       int fd,
47 					       union perf_event *event,
48 					       struct record_opts *opts)
49 {
50 	struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
51 	struct bpf_event *bpf_event = &event->bpf_event;
52 	u32 sub_prog_cnt, i, func_info_rec_size = 0;
53 	u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
54 	struct bpf_prog_info info = { .type = 0, };
55 	u32 info_len = sizeof(info);
56 	void *func_infos = NULL;
57 	u64 *prog_addrs = NULL;
58 	struct btf *btf = NULL;
59 	u32 *prog_lens = NULL;
60 	bool has_btf = false;
61 	char errbuf[512];
62 	int err = 0;
63 
64 	/* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
65 	err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
66 
67 	if (err) {
68 		pr_debug("%s: failed to get BPF program info: %s, aborting\n",
69 			 __func__, str_error_r(errno, errbuf, sizeof(errbuf)));
70 		return -1;
71 	}
72 	if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
73 		pr_debug("%s: the kernel is too old, aborting\n", __func__);
74 		return -2;
75 	}
76 
77 	/* number of ksyms, func_lengths, and tags should match */
78 	sub_prog_cnt = info.nr_jited_ksyms;
79 	if (sub_prog_cnt != info.nr_prog_tags ||
80 	    sub_prog_cnt != info.nr_jited_func_lens)
81 		return -1;
82 
83 	/* check BTF func info support */
84 	if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
85 		/* btf func info number should be same as sub_prog_cnt */
86 		if (sub_prog_cnt != info.nr_func_info) {
87 			pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
88 			return -1;
89 		}
90 		if (btf__get_from_id(info.btf_id, &btf)) {
91 			pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
92 			return -1;
93 		}
94 		func_info_rec_size = info.func_info_rec_size;
95 		func_infos = calloc(sub_prog_cnt, func_info_rec_size);
96 		if (!func_infos) {
97 			pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
98 			return -1;
99 		}
100 		has_btf = true;
101 	}
102 
103 	/*
104 	 * We need address, length, and tag for each sub program.
105 	 * Allocate memory and call bpf_obj_get_info_by_fd() again
106 	 */
107 	prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
108 	if (!prog_addrs) {
109 		pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
110 		goto out;
111 	}
112 	prog_lens = calloc(sub_prog_cnt, sizeof(u32));
113 	if (!prog_lens) {
114 		pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
115 		goto out;
116 	}
117 	prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
118 	if (!prog_tags) {
119 		pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
120 		goto out;
121 	}
122 
123 	memset(&info, 0, sizeof(info));
124 	info.nr_jited_ksyms = sub_prog_cnt;
125 	info.nr_jited_func_lens = sub_prog_cnt;
126 	info.nr_prog_tags = sub_prog_cnt;
127 	info.jited_ksyms = ptr_to_u64(prog_addrs);
128 	info.jited_func_lens = ptr_to_u64(prog_lens);
129 	info.prog_tags = ptr_to_u64(prog_tags);
130 	info_len = sizeof(info);
131 	if (has_btf) {
132 		info.nr_func_info = sub_prog_cnt;
133 		info.func_info_rec_size = func_info_rec_size;
134 		info.func_info = ptr_to_u64(func_infos);
135 	}
136 
137 	err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
138 	if (err) {
139 		pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
140 		goto out;
141 	}
142 
143 	/* Synthesize PERF_RECORD_KSYMBOL */
144 	for (i = 0; i < sub_prog_cnt; i++) {
145 		const struct bpf_func_info *finfo;
146 		const char *short_name = NULL;
147 		const struct btf_type *t;
148 		int name_len;
149 
150 		*ksymbol_event = (struct ksymbol_event){
151 			.header = {
152 				.type = PERF_RECORD_KSYMBOL,
153 				.size = offsetof(struct ksymbol_event, name),
154 			},
155 			.addr = prog_addrs[i],
156 			.len = prog_lens[i],
157 			.ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
158 			.flags = 0,
159 		};
160 		name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
161 				    "bpf_prog_");
162 		name_len += snprintf_hex(ksymbol_event->name + name_len,
163 					 KSYM_NAME_LEN - name_len,
164 					 prog_tags[i], BPF_TAG_SIZE);
165 		if (has_btf) {
166 			finfo = func_infos + i * info.func_info_rec_size;
167 			t = btf__type_by_id(btf, finfo->type_id);
168 			short_name = btf__name_by_offset(btf, t->name_off);
169 		} else if (i == 0 && sub_prog_cnt == 1) {
170 			/* no subprog */
171 			if (info.name[0])
172 				short_name = info.name;
173 		} else
174 			short_name = "F";
175 		if (short_name)
176 			name_len += snprintf(ksymbol_event->name + name_len,
177 					     KSYM_NAME_LEN - name_len,
178 					     "_%s", short_name);
179 
180 		ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
181 							 sizeof(u64));
182 
183 		memset((void *)event + event->header.size, 0, machine->id_hdr_size);
184 		event->header.size += machine->id_hdr_size;
185 		err = perf_tool__process_synth_event(tool, event,
186 						     machine, process);
187 	}
188 
189 	/* Synthesize PERF_RECORD_BPF_EVENT */
190 	if (opts->bpf_event) {
191 		*bpf_event = (struct bpf_event){
192 			.header = {
193 				.type = PERF_RECORD_BPF_EVENT,
194 				.size = sizeof(struct bpf_event),
195 			},
196 			.type = PERF_BPF_EVENT_PROG_LOAD,
197 			.flags = 0,
198 			.id = info.id,
199 		};
200 		memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
201 		memset((void *)event + event->header.size, 0, machine->id_hdr_size);
202 		event->header.size += machine->id_hdr_size;
203 		err = perf_tool__process_synth_event(tool, event,
204 						     machine, process);
205 	}
206 
207 out:
208 	free(prog_tags);
209 	free(prog_lens);
210 	free(prog_addrs);
211 	free(func_infos);
212 	free(btf);
213 	return err ? -1 : 0;
214 }
215 
216 int perf_event__synthesize_bpf_events(struct perf_tool *tool,
217 				      perf_event__handler_t process,
218 				      struct machine *machine,
219 				      struct record_opts *opts)
220 {
221 	union perf_event *event;
222 	__u32 id = 0;
223 	int err;
224 	int fd;
225 
226 	event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
227 	if (!event)
228 		return -1;
229 	while (true) {
230 		err = bpf_prog_get_next_id(id, &id);
231 		if (err) {
232 			if (errno == ENOENT) {
233 				err = 0;
234 				break;
235 			}
236 			pr_debug("%s: can't get next program: %s%s\n",
237 				 __func__, strerror(errno),
238 				 errno == EINVAL ? " -- kernel too old?" : "");
239 			/* don't report error on old kernel or EPERM  */
240 			err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
241 			break;
242 		}
243 		fd = bpf_prog_get_fd_by_id(id);
244 		if (fd < 0) {
245 			pr_debug("%s: failed to get fd for prog_id %u\n",
246 				 __func__, id);
247 			continue;
248 		}
249 
250 		err = perf_event__synthesize_one_bpf_prog(tool, process,
251 							  machine, fd,
252 							  event, opts);
253 		close(fd);
254 		if (err) {
255 			/* do not return error for old kernel */
256 			if (err == -2)
257 				err = 0;
258 			break;
259 		}
260 	}
261 	free(event);
262 	return err;
263 }
264