xref: /linux/tools/testing/selftests/bpf/trace_helpers.c (revision b8e85e6f3a09fc56b0ff574887798962ef8a8f80)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <ctype.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <assert.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <poll.h>
10 #include <pthread.h>
11 #include <unistd.h>
12 #include <linux/perf_event.h>
13 #include <sys/mman.h>
14 #include "trace_helpers.h"
15 #include <linux/limits.h>
16 #include <libelf.h>
17 #include <gelf.h>
18 #include "bpf/libbpf_internal.h"
19 
20 #define TRACEFS_PIPE	"/sys/kernel/tracing/trace_pipe"
21 #define DEBUGFS_PIPE	"/sys/kernel/debug/tracing/trace_pipe"
22 
23 struct ksyms {
24 	struct ksym *syms;
25 	size_t sym_cap;
26 	size_t sym_cnt;
27 };
28 
29 static struct ksyms *ksyms;
30 static pthread_mutex_t ksyms_mutex = PTHREAD_MUTEX_INITIALIZER;
31 
32 static int ksyms__add_symbol(struct ksyms *ksyms, const char *name,
33 			     unsigned long addr)
34 {
35 	void *tmp;
36 
37 	tmp = strdup(name);
38 	if (!tmp)
39 		return -ENOMEM;
40 	ksyms->syms[ksyms->sym_cnt].addr = addr;
41 	ksyms->syms[ksyms->sym_cnt].name = tmp;
42 	ksyms->sym_cnt++;
43 	return 0;
44 }
45 
46 void free_kallsyms_local(struct ksyms *ksyms)
47 {
48 	unsigned int i;
49 
50 	if (!ksyms)
51 		return;
52 
53 	if (!ksyms->syms) {
54 		free(ksyms);
55 		return;
56 	}
57 
58 	for (i = 0; i < ksyms->sym_cnt; i++)
59 		free(ksyms->syms[i].name);
60 	free(ksyms->syms);
61 	free(ksyms);
62 }
63 
64 static int ksym_cmp(const void *p1, const void *p2)
65 {
66 	return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
67 }
68 
69 struct ksyms *load_kallsyms_local(void)
70 {
71 	FILE *f;
72 	char func[256], buf[256];
73 	char symbol;
74 	void *addr;
75 	int ret;
76 	struct ksyms *ksyms;
77 
78 	f = fopen("/proc/kallsyms", "r");
79 	if (!f)
80 		return NULL;
81 
82 	ksyms = calloc(1, sizeof(struct ksyms));
83 	if (!ksyms) {
84 		fclose(f);
85 		return NULL;
86 	}
87 
88 	while (fgets(buf, sizeof(buf), f)) {
89 		if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
90 			break;
91 		if (!addr)
92 			continue;
93 
94 		ret = libbpf_ensure_mem((void **) &ksyms->syms, &ksyms->sym_cap,
95 					sizeof(struct ksym), ksyms->sym_cnt + 1);
96 		if (ret)
97 			goto error;
98 		ret = ksyms__add_symbol(ksyms, func, (unsigned long)addr);
99 		if (ret)
100 			goto error;
101 	}
102 	fclose(f);
103 	qsort(ksyms->syms, ksyms->sym_cnt, sizeof(struct ksym), ksym_cmp);
104 	return ksyms;
105 
106 error:
107 	fclose(f);
108 	free_kallsyms_local(ksyms);
109 	return NULL;
110 }
111 
112 int load_kallsyms(void)
113 {
114 	pthread_mutex_lock(&ksyms_mutex);
115 	if (!ksyms)
116 		ksyms = load_kallsyms_local();
117 	pthread_mutex_unlock(&ksyms_mutex);
118 	return ksyms ? 0 : 1;
119 }
120 
121 struct ksym *ksym_search_local(struct ksyms *ksyms, long key)
122 {
123 	int start = 0, end = ksyms->sym_cnt;
124 	int result;
125 
126 	/* kallsyms not loaded. return NULL */
127 	if (ksyms->sym_cnt <= 0)
128 		return NULL;
129 
130 	while (start < end) {
131 		size_t mid = start + (end - start) / 2;
132 
133 		result = key - ksyms->syms[mid].addr;
134 		if (result < 0)
135 			end = mid;
136 		else if (result > 0)
137 			start = mid + 1;
138 		else
139 			return &ksyms->syms[mid];
140 	}
141 
142 	if (start >= 1 && ksyms->syms[start - 1].addr < key &&
143 	    key < ksyms->syms[start].addr)
144 		/* valid ksym */
145 		return &ksyms->syms[start - 1];
146 
147 	/* out of range. return _stext */
148 	return &ksyms->syms[0];
149 }
150 
151 struct ksym *ksym_search(long key)
152 {
153 	if (!ksyms)
154 		return NULL;
155 	return ksym_search_local(ksyms, key);
156 }
157 
158 long ksym_get_addr_local(struct ksyms *ksyms, const char *name)
159 {
160 	int i;
161 
162 	for (i = 0; i < ksyms->sym_cnt; i++) {
163 		if (strcmp(ksyms->syms[i].name, name) == 0)
164 			return ksyms->syms[i].addr;
165 	}
166 
167 	return 0;
168 }
169 
170 long ksym_get_addr(const char *name)
171 {
172 	if (!ksyms)
173 		return 0;
174 	return ksym_get_addr_local(ksyms, name);
175 }
176 
177 /* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
178  * this is faster than load + find.
179  */
180 int kallsyms_find(const char *sym, unsigned long long *addr)
181 {
182 	char type, name[500];
183 	unsigned long long value;
184 	int err = 0;
185 	FILE *f;
186 
187 	f = fopen("/proc/kallsyms", "r");
188 	if (!f)
189 		return -EINVAL;
190 
191 	while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
192 		if (strcmp(name, sym) == 0) {
193 			*addr = value;
194 			goto out;
195 		}
196 	}
197 	err = -ENOENT;
198 
199 out:
200 	fclose(f);
201 	return err;
202 }
203 
204 void read_trace_pipe(void)
205 {
206 	int trace_fd;
207 
208 	if (access(TRACEFS_PIPE, F_OK) == 0)
209 		trace_fd = open(TRACEFS_PIPE, O_RDONLY, 0);
210 	else
211 		trace_fd = open(DEBUGFS_PIPE, O_RDONLY, 0);
212 	if (trace_fd < 0)
213 		return;
214 
215 	while (1) {
216 		static char buf[4096];
217 		ssize_t sz;
218 
219 		sz = read(trace_fd, buf, sizeof(buf) - 1);
220 		if (sz > 0) {
221 			buf[sz] = 0;
222 			puts(buf);
223 		}
224 	}
225 }
226 
227 ssize_t get_uprobe_offset(const void *addr)
228 {
229 	size_t start, end, base;
230 	char buf[256];
231 	bool found = false;
232 	FILE *f;
233 
234 	f = fopen("/proc/self/maps", "r");
235 	if (!f)
236 		return -errno;
237 
238 	while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) {
239 		if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) {
240 			found = true;
241 			break;
242 		}
243 	}
244 
245 	fclose(f);
246 
247 	if (!found)
248 		return -ESRCH;
249 
250 #if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2
251 
252 #define OP_RT_RA_MASK   0xffff0000UL
253 #define LIS_R2          0x3c400000UL
254 #define ADDIS_R2_R12    0x3c4c0000UL
255 #define ADDI_R2_R2      0x38420000UL
256 
257 	/*
258 	 * A PPC64 ABIv2 function may have a local and a global entry
259 	 * point. We need to use the local entry point when patching
260 	 * functions, so identify and step over the global entry point
261 	 * sequence.
262 	 *
263 	 * The global entry point sequence is always of the form:
264 	 *
265 	 * addis r2,r12,XXXX
266 	 * addi  r2,r2,XXXX
267 	 *
268 	 * A linker optimisation may convert the addis to lis:
269 	 *
270 	 * lis   r2,XXXX
271 	 * addi  r2,r2,XXXX
272 	 */
273 	{
274 		const u32 *insn = (const u32 *)(uintptr_t)addr;
275 
276 		if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
277 		     ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
278 		    ((*(insn + 1) & OP_RT_RA_MASK) == ADDI_R2_R2))
279 			return (uintptr_t)(insn + 2) - start + base;
280 	}
281 #endif
282 	return (uintptr_t)addr - start + base;
283 }
284 
285 ssize_t get_rel_offset(uintptr_t addr)
286 {
287 	size_t start, end, offset;
288 	char buf[256];
289 	FILE *f;
290 
291 	f = fopen("/proc/self/maps", "r");
292 	if (!f)
293 		return -errno;
294 
295 	while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &offset) == 4) {
296 		if (addr >= start && addr < end) {
297 			fclose(f);
298 			return (size_t)addr - start + offset;
299 		}
300 	}
301 
302 	fclose(f);
303 	return -EINVAL;
304 }
305 
306 static int
307 parse_build_id_buf(const void *note_start, Elf32_Word note_size, char *build_id)
308 {
309 	Elf32_Word note_offs = 0;
310 
311 	while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
312 		Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
313 
314 		if (nhdr->n_type == 3 && nhdr->n_namesz == sizeof("GNU") &&
315 		    !strcmp((char *)(nhdr + 1), "GNU") && nhdr->n_descsz > 0 &&
316 		    nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
317 			memcpy(build_id, note_start + note_offs +
318 			       ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), nhdr->n_descsz);
319 			memset(build_id + nhdr->n_descsz, 0, BPF_BUILD_ID_SIZE - nhdr->n_descsz);
320 			return (int) nhdr->n_descsz;
321 		}
322 
323 		note_offs = note_offs + sizeof(Elf32_Nhdr) +
324 			   ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
325 	}
326 
327 	return -ENOENT;
328 }
329 
330 /* Reads binary from *path* file and returns it in the *build_id* buffer
331  * with *size* which is expected to be at least BPF_BUILD_ID_SIZE bytes.
332  * Returns size of build id on success. On error the error value is
333  * returned.
334  */
335 int read_build_id(const char *path, char *build_id, size_t size)
336 {
337 	int fd, err = -EINVAL;
338 	Elf *elf = NULL;
339 	GElf_Ehdr ehdr;
340 	size_t max, i;
341 
342 	if (size < BPF_BUILD_ID_SIZE)
343 		return -EINVAL;
344 
345 	fd = open(path, O_RDONLY | O_CLOEXEC);
346 	if (fd < 0)
347 		return -errno;
348 
349 	(void)elf_version(EV_CURRENT);
350 
351 	elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
352 	if (!elf)
353 		goto out;
354 	if (elf_kind(elf) != ELF_K_ELF)
355 		goto out;
356 	if (!gelf_getehdr(elf, &ehdr))
357 		goto out;
358 
359 	for (i = 0; i < ehdr.e_phnum; i++) {
360 		GElf_Phdr mem, *phdr;
361 		char *data;
362 
363 		phdr = gelf_getphdr(elf, i, &mem);
364 		if (!phdr)
365 			goto out;
366 		if (phdr->p_type != PT_NOTE)
367 			continue;
368 		data = elf_rawfile(elf, &max);
369 		if (!data)
370 			goto out;
371 		if (phdr->p_offset + phdr->p_memsz > max)
372 			goto out;
373 		err = parse_build_id_buf(data + phdr->p_offset, phdr->p_memsz, build_id);
374 		if (err > 0)
375 			break;
376 	}
377 
378 out:
379 	if (elf)
380 		elf_end(elf);
381 	close(fd);
382 	return err;
383 }
384