xref: /linux/tools/perf/tests/code-reading.c (revision 7685b334d1e4927cc73b62c65293ba65748d9c52)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <linux/kconfig.h>
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <inttypes.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <string.h>
11 #include <sys/param.h>
12 #include <sys/utsname.h>
13 #include <perf/cpumap.h>
14 #include <perf/evlist.h>
15 #include <perf/mmap.h>
16 
17 #include "debug.h"
18 #include "dso.h"
19 #include "env.h"
20 #include "parse-events.h"
21 #include "evlist.h"
22 #include "evsel.h"
23 #include "thread_map.h"
24 #include "machine.h"
25 #include "map.h"
26 #include "symbol.h"
27 #include "event.h"
28 #include "record.h"
29 #include "util/mmap.h"
30 #include "util/string2.h"
31 #include "util/synthetic-events.h"
32 #include "util/util.h"
33 #include "thread.h"
34 
35 #include "tests.h"
36 
37 #include <linux/ctype.h>
38 
39 #define BUFSZ	1024
40 #define READLEN	128
41 
42 struct state {
43 	u64 done[1024];
44 	size_t done_cnt;
45 };
46 
47 static size_t read_objdump_chunk(const char **line, unsigned char **buf,
48 				 size_t *buf_len)
49 {
50 	size_t bytes_read = 0;
51 	unsigned char *chunk_start = *buf;
52 
53 	/* Read bytes */
54 	while (*buf_len > 0) {
55 		char c1, c2;
56 
57 		/* Get 2 hex digits */
58 		c1 = *(*line)++;
59 		if (!isxdigit(c1))
60 			break;
61 		c2 = *(*line)++;
62 		if (!isxdigit(c2))
63 			break;
64 
65 		/* Store byte and advance buf */
66 		**buf = (hex(c1) << 4) | hex(c2);
67 		(*buf)++;
68 		(*buf_len)--;
69 		bytes_read++;
70 
71 		/* End of chunk? */
72 		if (isspace(**line))
73 			break;
74 	}
75 
76 	/*
77 	 * objdump will display raw insn as LE if code endian
78 	 * is LE and bytes_per_chunk > 1. In that case reverse
79 	 * the chunk we just read.
80 	 *
81 	 * see disassemble_bytes() at binutils/objdump.c for details
82 	 * how objdump chooses display endian)
83 	 */
84 	if (bytes_read > 1 && !host_is_bigendian()) {
85 		unsigned char *chunk_end = chunk_start + bytes_read - 1;
86 		unsigned char tmp;
87 
88 		while (chunk_start < chunk_end) {
89 			tmp = *chunk_start;
90 			*chunk_start = *chunk_end;
91 			*chunk_end = tmp;
92 			chunk_start++;
93 			chunk_end--;
94 		}
95 	}
96 
97 	return bytes_read;
98 }
99 
100 static size_t read_objdump_line(const char *line, unsigned char *buf,
101 				size_t buf_len)
102 {
103 	const char *p;
104 	size_t ret, bytes_read = 0;
105 
106 	/* Skip to a colon */
107 	p = strchr(line, ':');
108 	if (!p)
109 		return 0;
110 	p++;
111 
112 	/* Skip initial spaces */
113 	while (*p) {
114 		if (!isspace(*p))
115 			break;
116 		p++;
117 	}
118 
119 	do {
120 		ret = read_objdump_chunk(&p, &buf, &buf_len);
121 		bytes_read += ret;
122 		p++;
123 	} while (ret > 0);
124 
125 	/* return number of successfully read bytes */
126 	return bytes_read;
127 }
128 
129 static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
130 {
131 	char *line = NULL;
132 	size_t line_len, off_last = 0;
133 	ssize_t ret;
134 	int err = 0;
135 	u64 addr, last_addr = start_addr;
136 
137 	while (off_last < *len) {
138 		size_t off, read_bytes, written_bytes;
139 		unsigned char tmp[BUFSZ];
140 
141 		ret = getline(&line, &line_len, f);
142 		if (feof(f))
143 			break;
144 		if (ret < 0) {
145 			pr_debug("getline failed\n");
146 			err = -1;
147 			break;
148 		}
149 
150 		/* read objdump data into temporary buffer */
151 		read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
152 		if (!read_bytes)
153 			continue;
154 
155 		if (sscanf(line, "%"PRIx64, &addr) != 1)
156 			continue;
157 		if (addr < last_addr) {
158 			pr_debug("addr going backwards, read beyond section?\n");
159 			break;
160 		}
161 		last_addr = addr;
162 
163 		/* copy it from temporary buffer to 'buf' according
164 		 * to address on current objdump line */
165 		off = addr - start_addr;
166 		if (off >= *len)
167 			break;
168 		written_bytes = MIN(read_bytes, *len - off);
169 		memcpy(buf + off, tmp, written_bytes);
170 		off_last = off + written_bytes;
171 	}
172 
173 	/* len returns number of bytes that could not be read */
174 	*len -= off_last;
175 
176 	free(line);
177 
178 	return err;
179 }
180 
181 /*
182  * Only gets GNU objdump version. Returns 0 for llvm-objdump.
183  */
184 static int objdump_version(void)
185 {
186 	size_t line_len;
187 	char cmd[PATH_MAX * 2];
188 	char *line = NULL;
189 	const char *fmt;
190 	FILE *f;
191 	int ret;
192 
193 	int version_tmp, version_num = 0;
194 	char *version = 0, *token;
195 
196 	fmt = "%s --version";
197 	ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path);
198 	if (ret <= 0 || (size_t)ret >= sizeof(cmd))
199 		return -1;
200 	/* Ignore objdump errors */
201 	strcat(cmd, " 2>/dev/null");
202 	f = popen(cmd, "r");
203 	if (!f) {
204 		pr_debug("popen failed\n");
205 		return -1;
206 	}
207 	/* Get first line of objdump --version output */
208 	ret = getline(&line, &line_len, f);
209 	pclose(f);
210 	if (ret < 0) {
211 		pr_debug("getline failed\n");
212 		return -1;
213 	}
214 
215 	token = strsep(&line, " ");
216 	if (token != NULL && !strcmp(token, "GNU")) {
217 		// version is last part of first line of objdump --version output.
218 		while ((token = strsep(&line, " ")))
219 			version = token;
220 
221 		// Convert version into a format we can compare with
222 		token = strsep(&version, ".");
223 		version_num = atoi(token);
224 		if (version_num)
225 			version_num *= 10000;
226 
227 		token = strsep(&version, ".");
228 		version_tmp = atoi(token);
229 		if (token)
230 			version_num += version_tmp * 100;
231 
232 		token = strsep(&version, ".");
233 		version_tmp = atoi(token);
234 		if (token)
235 			version_num += version_tmp;
236 	}
237 
238 	return version_num;
239 }
240 
241 static int read_via_objdump(const char *filename, u64 addr, void *buf,
242 			    size_t len)
243 {
244 	u64 stop_address = addr + len;
245 	struct utsname uname_buf;
246 	char cmd[PATH_MAX * 2];
247 	const char *fmt;
248 	FILE *f;
249 	int ret;
250 
251 	ret = uname(&uname_buf);
252 	if (ret) {
253 		pr_debug("uname failed\n");
254 		return -1;
255 	}
256 
257 	if (!strncmp(uname_buf.machine, "riscv", 5)) {
258 		int version = objdump_version();
259 
260 		/* Default to this workaround if version parsing fails */
261 		if (version < 0 || version > 24100) {
262 			/*
263 			 * Starting at riscv objdump version 2.41, dumping in
264 			 * the middle of an instruction is not supported. riscv
265 			 * instructions are aligned along 2-byte intervals and
266 			 * can be either 2-bytes or 4-bytes. This makes it
267 			 * possible that the stop-address lands in the middle of
268 			 * a 4-byte instruction. Increase the stop_address by
269 			 * two to ensure an instruction is not cut in half, but
270 			 * leave the len as-is so only the expected number of
271 			 * bytes are collected.
272 			 */
273 			stop_address += 2;
274 		}
275 	}
276 
277 	fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
278 	ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, stop_address,
279 		       filename);
280 	if (ret <= 0 || (size_t)ret >= sizeof(cmd))
281 		return -1;
282 
283 	pr_debug("Objdump command is: %s\n", cmd);
284 
285 	/* Ignore objdump errors */
286 	strcat(cmd, " 2>/dev/null");
287 
288 	f = popen(cmd, "r");
289 	if (!f) {
290 		pr_debug("popen failed\n");
291 		return -1;
292 	}
293 
294 	ret = read_objdump_output(f, buf, &len, addr);
295 	if (len) {
296 		pr_debug("objdump read too few bytes: %zd\n", len);
297 		if (!ret)
298 			ret = len;
299 	}
300 
301 	pclose(f);
302 
303 	return ret;
304 }
305 
306 static void dump_buf(unsigned char *buf, size_t len)
307 {
308 	size_t i;
309 
310 	for (i = 0; i < len; i++) {
311 		pr_debug("0x%02x ", buf[i]);
312 		if (i % 16 == 15)
313 			pr_debug("\n");
314 	}
315 	pr_debug("\n");
316 }
317 
318 static int read_object_code(u64 addr, size_t len, u8 cpumode,
319 			    struct thread *thread, struct state *state)
320 {
321 	struct addr_location al;
322 	unsigned char buf1[BUFSZ] = {0};
323 	unsigned char buf2[BUFSZ] = {0};
324 	size_t ret_len;
325 	u64 objdump_addr;
326 	const char *objdump_name;
327 	char decomp_name[KMOD_DECOMP_LEN];
328 	bool decomp = false;
329 	int ret, err = 0;
330 	struct dso *dso;
331 
332 	pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
333 
334 	addr_location__init(&al);
335 	if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
336 		if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
337 			pr_debug("Hypervisor address can not be resolved - skipping\n");
338 			goto out;
339 		}
340 
341 		pr_debug("thread__find_map failed\n");
342 		err = -1;
343 		goto out;
344 	}
345 	dso = map__dso(al.map);
346 	pr_debug("File is: %s\n", dso__long_name(dso));
347 
348 	if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
349 		pr_debug("Unexpected kernel address - skipping\n");
350 		goto out;
351 	}
352 
353 	pr_debug("On file address is: %#"PRIx64"\n", al.addr);
354 
355 	if (len > BUFSZ)
356 		len = BUFSZ;
357 
358 	/* Do not go off the map */
359 	if (addr + len > map__end(al.map))
360 		len = map__end(al.map) - addr;
361 
362 	/*
363 	 * Some architectures (ex: powerpc) have stubs (trampolines) in kernel
364 	 * modules to manage long jumps. Check if the ip offset falls in stubs
365 	 * sections for kernel modules. And skip module address after text end
366 	 */
367 	if (dso__is_kmod(dso) && al.addr > dso__text_end(dso)) {
368 		pr_debug("skipping the module address %#"PRIx64" after text end\n", al.addr);
369 		goto out;
370 	}
371 
372 	/* Read the object code using perf */
373 	ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
374 					al.addr, buf1, len);
375 	if (ret_len != len) {
376 		pr_debug("dso__data_read_offset failed\n");
377 		err = -1;
378 		goto out;
379 	}
380 
381 	/*
382 	 * Converting addresses for use by objdump requires more information.
383 	 * map__load() does that.  See map__rip_2objdump() for details.
384 	 */
385 	if (map__load(al.map)) {
386 		err = -1;
387 		goto out;
388 	}
389 
390 	/* objdump struggles with kcore - try each map only once */
391 	if (dso__is_kcore(dso)) {
392 		size_t d;
393 
394 		for (d = 0; d < state->done_cnt; d++) {
395 			if (state->done[d] == map__start(al.map)) {
396 				pr_debug("kcore map tested already");
397 				pr_debug(" - skipping\n");
398 				goto out;
399 			}
400 		}
401 		if (state->done_cnt >= ARRAY_SIZE(state->done)) {
402 			pr_debug("Too many kcore maps - skipping\n");
403 			goto out;
404 		}
405 		state->done[state->done_cnt++] = map__start(al.map);
406 	}
407 
408 	objdump_name = dso__long_name(dso);
409 	if (dso__needs_decompress(dso)) {
410 		if (dso__decompress_kmodule_path(dso, objdump_name,
411 						 decomp_name,
412 						 sizeof(decomp_name)) < 0) {
413 			pr_debug("decompression failed\n");
414 			err = -1;
415 			goto out;
416 		}
417 
418 		decomp = true;
419 		objdump_name = decomp_name;
420 	}
421 
422 	/* Read the object code using objdump */
423 	objdump_addr = map__rip_2objdump(al.map, al.addr);
424 	ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
425 
426 	if (decomp)
427 		unlink(objdump_name);
428 
429 	if (ret > 0) {
430 		/*
431 		 * The kernel maps are inaccurate - assume objdump is right in
432 		 * that case.
433 		 */
434 		if (cpumode == PERF_RECORD_MISC_KERNEL ||
435 		    cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
436 			len -= ret;
437 			if (len) {
438 				pr_debug("Reducing len to %zu\n", len);
439 			} else if (dso__is_kcore(dso)) {
440 				/*
441 				 * objdump cannot handle very large segments
442 				 * that may be found in kcore.
443 				 */
444 				pr_debug("objdump failed for kcore");
445 				pr_debug(" - skipping\n");
446 			} else {
447 				err = -1;
448 			}
449 			goto out;
450 		}
451 	}
452 	if (ret < 0) {
453 		pr_debug("read_via_objdump failed\n");
454 		err = -1;
455 		goto out;
456 	}
457 
458 	/* The results should be identical */
459 	if (memcmp(buf1, buf2, len)) {
460 		pr_debug("Bytes read differ from those read by objdump\n");
461 		pr_debug("buf1 (dso):\n");
462 		dump_buf(buf1, len);
463 		pr_debug("buf2 (objdump):\n");
464 		dump_buf(buf2, len);
465 		err = -1;
466 		goto out;
467 	}
468 	pr_debug("Bytes read match those read by objdump\n");
469 out:
470 	addr_location__exit(&al);
471 	return err;
472 }
473 
474 static int process_sample_event(struct machine *machine,
475 				struct evlist *evlist,
476 				union perf_event *event, struct state *state)
477 {
478 	struct perf_sample sample;
479 	struct thread *thread;
480 	int ret;
481 
482 	if (evlist__parse_sample(evlist, event, &sample)) {
483 		pr_debug("evlist__parse_sample failed\n");
484 		return -1;
485 	}
486 
487 	thread = machine__findnew_thread(machine, sample.pid, sample.tid);
488 	if (!thread) {
489 		pr_debug("machine__findnew_thread failed\n");
490 		return -1;
491 	}
492 
493 	ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
494 	thread__put(thread);
495 	return ret;
496 }
497 
498 static int process_event(struct machine *machine, struct evlist *evlist,
499 			 union perf_event *event, struct state *state)
500 {
501 	if (event->header.type == PERF_RECORD_SAMPLE)
502 		return process_sample_event(machine, evlist, event, state);
503 
504 	if (event->header.type == PERF_RECORD_THROTTLE ||
505 	    event->header.type == PERF_RECORD_UNTHROTTLE)
506 		return 0;
507 
508 	if (event->header.type < PERF_RECORD_MAX) {
509 		int ret;
510 
511 		ret = machine__process_event(machine, event, NULL);
512 		if (ret < 0)
513 			pr_debug("machine__process_event failed, event type %u\n",
514 				 event->header.type);
515 		return ret;
516 	}
517 
518 	return 0;
519 }
520 
521 static int process_events(struct machine *machine, struct evlist *evlist,
522 			  struct state *state)
523 {
524 	union perf_event *event;
525 	struct mmap *md;
526 	int i, ret;
527 
528 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
529 		md = &evlist->mmap[i];
530 		if (perf_mmap__read_init(&md->core) < 0)
531 			continue;
532 
533 		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
534 			ret = process_event(machine, evlist, event, state);
535 			perf_mmap__consume(&md->core);
536 			if (ret < 0)
537 				return ret;
538 		}
539 		perf_mmap__read_done(&md->core);
540 	}
541 	return 0;
542 }
543 
544 static int comp(const void *a, const void *b)
545 {
546 	return *(int *)a - *(int *)b;
547 }
548 
549 static void do_sort_something(void)
550 {
551 	int buf[40960], i;
552 
553 	for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
554 		buf[i] = ARRAY_SIZE(buf) - i - 1;
555 
556 	qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
557 
558 	for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
559 		if (buf[i] != i) {
560 			pr_debug("qsort failed\n");
561 			break;
562 		}
563 	}
564 }
565 
566 static void sort_something(void)
567 {
568 	int i;
569 
570 	for (i = 0; i < 10; i++)
571 		do_sort_something();
572 }
573 
574 static void syscall_something(void)
575 {
576 	int pipefd[2];
577 	int i;
578 
579 	for (i = 0; i < 1000; i++) {
580 		if (pipe(pipefd) < 0) {
581 			pr_debug("pipe failed\n");
582 			break;
583 		}
584 		close(pipefd[1]);
585 		close(pipefd[0]);
586 	}
587 }
588 
589 static void fs_something(void)
590 {
591 	const char *test_file_name = "temp-perf-code-reading-test-file--";
592 	FILE *f;
593 	int i;
594 
595 	for (i = 0; i < 1000; i++) {
596 		f = fopen(test_file_name, "w+");
597 		if (f) {
598 			fclose(f);
599 			unlink(test_file_name);
600 		}
601 	}
602 }
603 
604 static void do_something(void)
605 {
606 	fs_something();
607 
608 	sort_something();
609 
610 	syscall_something();
611 }
612 
613 enum {
614 	TEST_CODE_READING_OK,
615 	TEST_CODE_READING_NO_VMLINUX,
616 	TEST_CODE_READING_NO_KCORE,
617 	TEST_CODE_READING_NO_ACCESS,
618 	TEST_CODE_READING_NO_KERNEL_OBJ,
619 };
620 
621 static int do_test_code_reading(bool try_kcore)
622 {
623 	struct machine *machine;
624 	struct thread *thread;
625 	struct record_opts opts = {
626 		.mmap_pages	     = UINT_MAX,
627 		.user_freq	     = UINT_MAX,
628 		.user_interval	     = ULLONG_MAX,
629 		.freq		     = 500,
630 		.target		     = {
631 			.uses_mmap   = true,
632 		},
633 	};
634 	struct state state = {
635 		.done_cnt = 0,
636 	};
637 	struct perf_thread_map *threads = NULL;
638 	struct perf_cpu_map *cpus = NULL;
639 	struct evlist *evlist = NULL;
640 	struct evsel *evsel = NULL;
641 	int err = -1, ret;
642 	pid_t pid;
643 	struct map *map;
644 	bool have_vmlinux, have_kcore;
645 	struct dso *dso;
646 	const char *events[] = { "cycles", "cycles:u", "cpu-clock", "cpu-clock:u", NULL };
647 	int evidx = 0;
648 
649 	pid = getpid();
650 
651 	machine = machine__new_host();
652 	machine->env = &perf_env;
653 
654 	ret = machine__create_kernel_maps(machine);
655 	if (ret < 0) {
656 		pr_debug("machine__create_kernel_maps failed\n");
657 		goto out_err;
658 	}
659 
660 	/* Force the use of kallsyms instead of vmlinux to try kcore */
661 	if (try_kcore)
662 		symbol_conf.kallsyms_name = "/proc/kallsyms";
663 
664 	/* Load kernel map */
665 	map = machine__kernel_map(machine);
666 	ret = map__load(map);
667 	if (ret < 0) {
668 		pr_debug("map__load failed\n");
669 		goto out_err;
670 	}
671 	dso = map__dso(map);
672 	have_vmlinux = dso__is_vmlinux(dso);
673 	have_kcore = dso__is_kcore(dso);
674 
675 	/* 2nd time through we just try kcore */
676 	if (try_kcore && !have_kcore)
677 		return TEST_CODE_READING_NO_KCORE;
678 
679 	/* No point getting kernel events if there is no kernel object */
680 	if (!have_vmlinux && !have_kcore)
681 		evidx++;
682 
683 	threads = thread_map__new_by_tid(pid);
684 	if (!threads) {
685 		pr_debug("thread_map__new_by_tid failed\n");
686 		goto out_err;
687 	}
688 
689 	ret = perf_event__synthesize_thread_map(NULL, threads,
690 						perf_event__process, machine,
691 						true, false);
692 	if (ret < 0) {
693 		pr_debug("perf_event__synthesize_thread_map failed\n");
694 		goto out_err;
695 	}
696 
697 	thread = machine__findnew_thread(machine, pid, pid);
698 	if (!thread) {
699 		pr_debug("machine__findnew_thread failed\n");
700 		goto out_put;
701 	}
702 
703 	cpus = perf_cpu_map__new_online_cpus();
704 	if (!cpus) {
705 		pr_debug("perf_cpu_map__new failed\n");
706 		goto out_put;
707 	}
708 
709 	while (events[evidx]) {
710 		const char *str;
711 
712 		evlist = evlist__new();
713 		if (!evlist) {
714 			pr_debug("evlist__new failed\n");
715 			goto out_put;
716 		}
717 
718 		perf_evlist__set_maps(&evlist->core, cpus, threads);
719 
720 		str = events[evidx];
721 		pr_debug("Parsing event '%s'\n", str);
722 		ret = parse_event(evlist, str);
723 		if (ret < 0) {
724 			pr_debug("parse_events failed\n");
725 			goto out_put;
726 		}
727 
728 		evlist__config(evlist, &opts, NULL);
729 
730 		evlist__for_each_entry(evlist, evsel) {
731 			evsel->core.attr.comm = 1;
732 			evsel->core.attr.disabled = 1;
733 			evsel->core.attr.enable_on_exec = 0;
734 		}
735 
736 		ret = evlist__open(evlist);
737 		if (ret < 0) {
738 			evidx++;
739 
740 			if (events[evidx] == NULL && verbose > 0) {
741 				char errbuf[512];
742 				evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
743 				pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
744 			}
745 
746 			/*
747 			 * Both cpus and threads are now owned by evlist
748 			 * and will be freed by following perf_evlist__set_maps
749 			 * call. Getting reference to keep them alive.
750 			 */
751 			perf_cpu_map__get(cpus);
752 			perf_thread_map__get(threads);
753 			perf_evlist__set_maps(&evlist->core, NULL, NULL);
754 			evlist__delete(evlist);
755 			evlist = NULL;
756 			continue;
757 		}
758 		break;
759 	}
760 
761 	if (events[evidx] == NULL)
762 		goto out_put;
763 
764 	ret = evlist__mmap(evlist, UINT_MAX);
765 	if (ret < 0) {
766 		pr_debug("evlist__mmap failed\n");
767 		goto out_put;
768 	}
769 
770 	evlist__enable(evlist);
771 
772 	do_something();
773 
774 	evlist__disable(evlist);
775 
776 	ret = process_events(machine, evlist, &state);
777 	if (ret < 0)
778 		goto out_put;
779 
780 	if (!have_vmlinux && !have_kcore && !try_kcore)
781 		err = TEST_CODE_READING_NO_KERNEL_OBJ;
782 	else if (!have_vmlinux && !try_kcore)
783 		err = TEST_CODE_READING_NO_VMLINUX;
784 	else if (strstr(events[evidx], ":u"))
785 		err = TEST_CODE_READING_NO_ACCESS;
786 	else
787 		err = TEST_CODE_READING_OK;
788 out_put:
789 	thread__put(thread);
790 out_err:
791 	evlist__delete(evlist);
792 	perf_cpu_map__put(cpus);
793 	perf_thread_map__put(threads);
794 	machine__delete(machine);
795 
796 	return err;
797 }
798 
799 static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
800 {
801 	int ret;
802 
803 	ret = do_test_code_reading(false);
804 	if (!ret)
805 		ret = do_test_code_reading(true);
806 
807 	switch (ret) {
808 	case TEST_CODE_READING_OK:
809 		return 0;
810 	case TEST_CODE_READING_NO_VMLINUX:
811 		pr_debug("no vmlinux\n");
812 		return 0;
813 	case TEST_CODE_READING_NO_KCORE:
814 		pr_debug("no kcore\n");
815 		return 0;
816 	case TEST_CODE_READING_NO_ACCESS:
817 		pr_debug("no access\n");
818 		return 0;
819 	case TEST_CODE_READING_NO_KERNEL_OBJ:
820 		pr_debug("no kernel obj\n");
821 		return 0;
822 	default:
823 		return -1;
824 	};
825 }
826 
827 DEFINE_SUITE("Object code reading", code_reading);
828