xref: /linux/tools/perf/tests/code-reading.c (revision ec714e371f22f716a04e6ecb2a24988c92b26911)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <linux/kconfig.h>
4 #include <linux/kernel.h>
5 #include <linux/rbtree.h>
6 #include <linux/types.h>
7 #include <inttypes.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <stdio.h>
11 #include <string.h>
12 #include <sys/param.h>
13 #include <sys/utsname.h>
14 #include <perf/cpumap.h>
15 #include <perf/evlist.h>
16 #include <perf/mmap.h>
17 
18 #include "debug.h"
19 #include "dso.h"
20 #include "env.h"
21 #include "parse-events.h"
22 #include "evlist.h"
23 #include "evsel.h"
24 #include "thread_map.h"
25 #include "machine.h"
26 #include "map.h"
27 #include "symbol.h"
28 #include "event.h"
29 #include "record.h"
30 #include "util/mmap.h"
31 #include "util/string2.h"
32 #include "util/synthetic-events.h"
33 #include "util/util.h"
34 #include "thread.h"
35 
36 #include "tests.h"
37 
38 #include <linux/ctype.h>
39 
40 #define BUFSZ	1024
41 #define READLEN	128
42 
43 struct tested_section {
44 	struct rb_node rb_node;
45 	u64 addr;
46 	char path[PATH_MAX];
47 };
48 
tested_code_insert_or_exists(const char * path,u64 addr,struct rb_root * tested_sections)49 static bool tested_code_insert_or_exists(const char *path, u64 addr,
50 					 struct rb_root *tested_sections)
51 {
52 	struct rb_node **node = &tested_sections->rb_node;
53 	struct rb_node *parent = NULL;
54 	struct tested_section *data;
55 
56 	while (*node) {
57 		int cmp;
58 
59 		parent = *node;
60 		data = rb_entry(*node, struct tested_section, rb_node);
61 		cmp = strcmp(path, data->path);
62 		if (!cmp) {
63 			if (addr < data->addr)
64 				cmp = -1;
65 			else if (addr > data->addr)
66 				cmp = 1;
67 			else
68 				return true; /* already tested */
69 		}
70 
71 		if (cmp < 0)
72 			node = &(*node)->rb_left;
73 		else
74 			node = &(*node)->rb_right;
75 	}
76 
77 	data = zalloc(sizeof(*data));
78 	if (!data)
79 		return true;
80 
81 	data->addr = addr;
82 	strlcpy(data->path, path, sizeof(data->path));
83 	rb_link_node(&data->rb_node, parent, node);
84 	rb_insert_color(&data->rb_node, tested_sections);
85 	return false;
86 }
87 
tested_sections__free(struct rb_root * root)88 static void tested_sections__free(struct rb_root *root)
89 {
90 	while (!RB_EMPTY_ROOT(root)) {
91 		struct rb_node *node = rb_first(root);
92 		struct tested_section *ts = rb_entry(node,
93 						     struct tested_section,
94 						     rb_node);
95 
96 		rb_erase(node, root);
97 		free(ts);
98 	}
99 }
100 
read_objdump_chunk(const char ** line,unsigned char ** buf,size_t * buf_len)101 static size_t read_objdump_chunk(const char **line, unsigned char **buf,
102 				 size_t *buf_len)
103 {
104 	size_t bytes_read = 0;
105 	unsigned char *chunk_start = *buf;
106 
107 	/* Read bytes */
108 	while (*buf_len > 0) {
109 		char c1, c2;
110 
111 		/* Get 2 hex digits */
112 		c1 = *(*line)++;
113 		if (!isxdigit(c1))
114 			break;
115 		c2 = *(*line)++;
116 		if (!isxdigit(c2))
117 			break;
118 
119 		/* Store byte and advance buf */
120 		**buf = (hex(c1) << 4) | hex(c2);
121 		(*buf)++;
122 		(*buf_len)--;
123 		bytes_read++;
124 
125 		/* End of chunk? */
126 		if (isspace(**line))
127 			break;
128 	}
129 
130 	/*
131 	 * objdump will display raw insn as LE if code endian
132 	 * is LE and bytes_per_chunk > 1. In that case reverse
133 	 * the chunk we just read.
134 	 *
135 	 * see disassemble_bytes() at binutils/objdump.c for details
136 	 * how objdump chooses display endian)
137 	 */
138 	if (bytes_read > 1 && !host_is_bigendian()) {
139 		unsigned char *chunk_end = chunk_start + bytes_read - 1;
140 		unsigned char tmp;
141 
142 		while (chunk_start < chunk_end) {
143 			tmp = *chunk_start;
144 			*chunk_start = *chunk_end;
145 			*chunk_end = tmp;
146 			chunk_start++;
147 			chunk_end--;
148 		}
149 	}
150 
151 	return bytes_read;
152 }
153 
read_objdump_line(const char * line,unsigned char * buf,size_t buf_len)154 static size_t read_objdump_line(const char *line, unsigned char *buf,
155 				size_t buf_len)
156 {
157 	const char *p;
158 	size_t ret, bytes_read = 0;
159 
160 	/* Skip to a colon */
161 	p = strchr(line, ':');
162 	if (!p)
163 		return 0;
164 	p++;
165 
166 	/* Skip initial spaces */
167 	while (*p) {
168 		if (!isspace(*p))
169 			break;
170 		p++;
171 	}
172 
173 	do {
174 		ret = read_objdump_chunk(&p, &buf, &buf_len);
175 		bytes_read += ret;
176 		p++;
177 	} while (ret > 0);
178 
179 	/* return number of successfully read bytes */
180 	return bytes_read;
181 }
182 
read_objdump_output(FILE * f,void * buf,size_t * len,u64 start_addr)183 static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
184 {
185 	char *line = NULL;
186 	size_t line_len, off_last = 0;
187 	ssize_t ret;
188 	int err = 0;
189 	u64 addr, last_addr = start_addr;
190 
191 	while (off_last < *len) {
192 		size_t off, read_bytes, written_bytes;
193 		unsigned char tmp[BUFSZ];
194 
195 		ret = getline(&line, &line_len, f);
196 		if (feof(f))
197 			break;
198 		if (ret < 0) {
199 			pr_debug("getline failed\n");
200 			err = -1;
201 			break;
202 		}
203 
204 		/* read objdump data into temporary buffer */
205 		read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
206 		if (!read_bytes)
207 			continue;
208 
209 		if (sscanf(line, "%"PRIx64, &addr) != 1)
210 			continue;
211 		if (addr < last_addr) {
212 			pr_debug("addr going backwards, read beyond section?\n");
213 			break;
214 		}
215 		last_addr = addr;
216 
217 		/* copy it from temporary buffer to 'buf' according
218 		 * to address on current objdump line */
219 		off = addr - start_addr;
220 		if (off >= *len)
221 			break;
222 		written_bytes = MIN(read_bytes, *len - off);
223 		memcpy(buf + off, tmp, written_bytes);
224 		off_last = off + written_bytes;
225 	}
226 
227 	/* len returns number of bytes that could not be read */
228 	*len -= off_last;
229 
230 	free(line);
231 
232 	return err;
233 }
234 
235 /*
236  * Only gets GNU objdump version. Returns 0 for llvm-objdump.
237  */
objdump_version(void)238 static int objdump_version(void)
239 {
240 	size_t line_len;
241 	char cmd[PATH_MAX * 2];
242 	char *line = NULL;
243 	const char *fmt;
244 	FILE *f;
245 	int ret;
246 
247 	int version_tmp, version_num = 0;
248 	char *version = 0, *token;
249 
250 	fmt = "%s --version";
251 	ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path);
252 	if (ret <= 0 || (size_t)ret >= sizeof(cmd))
253 		return -1;
254 	/* Ignore objdump errors */
255 	strcat(cmd, " 2>/dev/null");
256 	f = popen(cmd, "r");
257 	if (!f) {
258 		pr_debug("popen failed\n");
259 		return -1;
260 	}
261 	/* Get first line of objdump --version output */
262 	ret = getline(&line, &line_len, f);
263 	pclose(f);
264 	if (ret < 0) {
265 		pr_debug("getline failed\n");
266 		return -1;
267 	}
268 
269 	token = strsep(&line, " ");
270 	if (token != NULL && !strcmp(token, "GNU")) {
271 		// version is last part of first line of objdump --version output.
272 		while ((token = strsep(&line, " ")))
273 			version = token;
274 
275 		// Convert version into a format we can compare with
276 		token = strsep(&version, ".");
277 		version_num = atoi(token);
278 		if (version_num)
279 			version_num *= 10000;
280 
281 		token = strsep(&version, ".");
282 		version_tmp = atoi(token);
283 		if (token)
284 			version_num += version_tmp * 100;
285 
286 		token = strsep(&version, ".");
287 		version_tmp = atoi(token);
288 		if (token)
289 			version_num += version_tmp;
290 	}
291 
292 	return version_num;
293 }
294 
read_via_objdump(const char * filename,u64 addr,void * buf,size_t len)295 static int read_via_objdump(const char *filename, u64 addr, void *buf,
296 			    size_t len)
297 {
298 	u64 stop_address = addr + len;
299 	struct utsname uname_buf;
300 	char cmd[PATH_MAX * 2];
301 	const char *fmt;
302 	FILE *f;
303 	int ret;
304 
305 	ret = uname(&uname_buf);
306 	if (ret) {
307 		pr_debug("uname failed\n");
308 		return -1;
309 	}
310 
311 	if (!strncmp(uname_buf.machine, "riscv", 5)) {
312 		int version = objdump_version();
313 
314 		/* Default to this workaround if version parsing fails */
315 		if (version < 0 || version > 24100) {
316 			/*
317 			 * Starting at riscv objdump version 2.41, dumping in
318 			 * the middle of an instruction is not supported. riscv
319 			 * instructions are aligned along 2-byte intervals and
320 			 * can be either 2-bytes or 4-bytes. This makes it
321 			 * possible that the stop-address lands in the middle of
322 			 * a 4-byte instruction. Increase the stop_address by
323 			 * two to ensure an instruction is not cut in half, but
324 			 * leave the len as-is so only the expected number of
325 			 * bytes are collected.
326 			 */
327 			stop_address += 2;
328 		}
329 	}
330 
331 	fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
332 	ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, stop_address,
333 		       filename);
334 	if (ret <= 0 || (size_t)ret >= sizeof(cmd))
335 		return -1;
336 
337 	pr_debug("Objdump command is: %s\n", cmd);
338 
339 	/* Ignore objdump errors */
340 	strcat(cmd, " 2>/dev/null");
341 
342 	f = popen(cmd, "r");
343 	if (!f) {
344 		pr_debug("popen failed\n");
345 		return -1;
346 	}
347 
348 	ret = read_objdump_output(f, buf, &len, addr);
349 	if (len) {
350 		pr_debug("objdump read too few bytes: %zd\n", len);
351 		if (!ret)
352 			ret = len;
353 	}
354 
355 	pclose(f);
356 
357 	return ret;
358 }
359 
dump_buf(unsigned char * buf,size_t len)360 static void dump_buf(unsigned char *buf, size_t len)
361 {
362 	size_t i;
363 
364 	for (i = 0; i < len; i++) {
365 		pr_debug("0x%02x ", buf[i]);
366 		if (i % 16 == 15)
367 			pr_debug("\n");
368 	}
369 	pr_debug("\n");
370 }
371 
read_object_code(u64 addr,size_t len,u8 cpumode,struct thread * thread,struct rb_root * tested_sections)372 static int read_object_code(u64 addr, size_t len, u8 cpumode,
373 			    struct thread *thread,
374 			    struct rb_root *tested_sections)
375 {
376 	struct addr_location al;
377 	unsigned char buf1[BUFSZ] = {0};
378 	unsigned char buf2[BUFSZ] = {0};
379 	size_t ret_len;
380 	u64 objdump_addr;
381 	u64 skip_addr;
382 	const char *objdump_name;
383 	char decomp_name[KMOD_DECOMP_LEN];
384 	bool decomp = false;
385 	int ret, err = 0;
386 	struct dso *dso;
387 
388 	pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
389 
390 	addr_location__init(&al);
391 	if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
392 		if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
393 			pr_debug("Hypervisor address can not be resolved - skipping\n");
394 			goto out;
395 		}
396 
397 		pr_debug("thread__find_map failed\n");
398 		err = -1;
399 		goto out;
400 	}
401 	dso = map__dso(al.map);
402 	pr_debug("File is: %s\n", dso__long_name(dso));
403 
404 	if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
405 		pr_debug("Unexpected kernel address - skipping\n");
406 		goto out;
407 	}
408 
409 	/*
410 	 * Don't retest the same addresses. objdump struggles with kcore - try
411 	 * each map only once even if the address is different.
412 	 */
413 	skip_addr = dso__is_kcore(dso) ? map__start(al.map) : al.addr;
414 	if (tested_code_insert_or_exists(dso__long_name(dso), skip_addr,
415 					 tested_sections)) {
416 		pr_debug("Already tested %s @ %#"PRIx64" - skipping\n",
417 			 dso__long_name(dso), skip_addr);
418 		goto out;
419 	}
420 
421 	pr_debug("On file address is: %#"PRIx64"\n", al.addr);
422 
423 	if (len > BUFSZ)
424 		len = BUFSZ;
425 
426 	/* Do not go off the map */
427 	if (addr + len > map__end(al.map))
428 		len = map__end(al.map) - addr;
429 
430 	/*
431 	 * Some architectures (ex: powerpc) have stubs (trampolines) in kernel
432 	 * modules to manage long jumps. Check if the ip offset falls in stubs
433 	 * sections for kernel modules. And skip module address after text end
434 	 */
435 	if (dso__is_kmod(dso) && al.addr > dso__text_end(dso)) {
436 		pr_debug("skipping the module address %#"PRIx64" after text end\n", al.addr);
437 		goto out;
438 	}
439 
440 	/* Read the object code using perf */
441 	ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
442 					al.addr, buf1, len);
443 	if (ret_len != len) {
444 		pr_debug("dso__data_read_offset failed\n");
445 		err = -1;
446 		goto out;
447 	}
448 
449 	/*
450 	 * Converting addresses for use by objdump requires more information.
451 	 * map__load() does that.  See map__rip_2objdump() for details.
452 	 */
453 	if (map__load(al.map)) {
454 		err = -1;
455 		goto out;
456 	}
457 
458 	objdump_name = dso__long_name(dso);
459 	if (dso__needs_decompress(dso)) {
460 		if (dso__decompress_kmodule_path(dso, objdump_name,
461 						 decomp_name,
462 						 sizeof(decomp_name)) < 0) {
463 			pr_debug("decompression failed\n");
464 			err = -1;
465 			goto out;
466 		}
467 
468 		decomp = true;
469 		objdump_name = decomp_name;
470 	}
471 
472 	/* Read the object code using objdump */
473 	objdump_addr = map__rip_2objdump(al.map, al.addr);
474 	ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
475 
476 	if (decomp)
477 		unlink(objdump_name);
478 
479 	if (ret > 0) {
480 		/*
481 		 * The kernel maps are inaccurate - assume objdump is right in
482 		 * that case.
483 		 */
484 		if (cpumode == PERF_RECORD_MISC_KERNEL ||
485 		    cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
486 			len -= ret;
487 			if (len) {
488 				pr_debug("Reducing len to %zu\n", len);
489 			} else if (dso__is_kcore(dso)) {
490 				/*
491 				 * objdump cannot handle very large segments
492 				 * that may be found in kcore.
493 				 */
494 				pr_debug("objdump failed for kcore");
495 				pr_debug(" - skipping\n");
496 			} else {
497 				err = -1;
498 			}
499 			goto out;
500 		}
501 	}
502 	if (ret < 0) {
503 		pr_debug("read_via_objdump failed\n");
504 		err = -1;
505 		goto out;
506 	}
507 
508 	/* The results should be identical */
509 	if (memcmp(buf1, buf2, len)) {
510 		pr_debug("Bytes read differ from those read by objdump\n");
511 		pr_debug("buf1 (dso):\n");
512 		dump_buf(buf1, len);
513 		pr_debug("buf2 (objdump):\n");
514 		dump_buf(buf2, len);
515 		err = -1;
516 		goto out;
517 	}
518 	pr_debug("Bytes read match those read by objdump\n");
519 out:
520 	addr_location__exit(&al);
521 	return err;
522 }
523 
process_sample_event(struct machine * machine,struct evlist * evlist,union perf_event * event,struct rb_root * tested_sections)524 static int process_sample_event(struct machine *machine, struct evlist *evlist,
525 				union perf_event *event,
526 				struct rb_root *tested_sections)
527 {
528 	struct perf_sample sample;
529 	struct thread *thread;
530 	int ret;
531 
532 	perf_sample__init(&sample, /*all=*/false);
533 	ret = evlist__parse_sample(evlist, event, &sample);
534 	if (ret) {
535 		pr_debug("evlist__parse_sample failed\n");
536 		ret = -1;
537 		goto out;
538 	}
539 
540 	thread = machine__findnew_thread(machine, sample.pid, sample.tid);
541 	if (!thread) {
542 		pr_debug("machine__findnew_thread failed\n");
543 		ret = -1;
544 		goto out;
545 	}
546 
547 	ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread,
548 			       tested_sections);
549 	thread__put(thread);
550 out:
551 	perf_sample__exit(&sample);
552 	return ret;
553 }
554 
process_event(struct machine * machine,struct evlist * evlist,union perf_event * event,struct rb_root * tested_sections)555 static int process_event(struct machine *machine, struct evlist *evlist,
556 			 union perf_event *event, struct rb_root *tested_sections)
557 {
558 	if (event->header.type == PERF_RECORD_SAMPLE)
559 		return process_sample_event(machine, evlist, event,
560 					    tested_sections);
561 
562 	if (event->header.type == PERF_RECORD_THROTTLE ||
563 	    event->header.type == PERF_RECORD_UNTHROTTLE)
564 		return 0;
565 
566 	if (event->header.type < PERF_RECORD_MAX) {
567 		int ret;
568 
569 		ret = machine__process_event(machine, event, NULL);
570 		if (ret < 0)
571 			pr_debug("machine__process_event failed, event type %u\n",
572 				 event->header.type);
573 		return ret;
574 	}
575 
576 	return 0;
577 }
578 
process_events(struct machine * machine,struct evlist * evlist,struct rb_root * tested_sections)579 static int process_events(struct machine *machine, struct evlist *evlist,
580 			  struct rb_root *tested_sections)
581 {
582 	union perf_event *event;
583 	struct mmap *md;
584 	int i, ret;
585 
586 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
587 		md = &evlist->mmap[i];
588 		if (perf_mmap__read_init(&md->core) < 0)
589 			continue;
590 
591 		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
592 			ret = process_event(machine, evlist, event, tested_sections);
593 			perf_mmap__consume(&md->core);
594 			if (ret < 0)
595 				return ret;
596 		}
597 		perf_mmap__read_done(&md->core);
598 	}
599 	return 0;
600 }
601 
comp(const void * a,const void * b)602 static int comp(const void *a, const void *b)
603 {
604 	return *(int *)a - *(int *)b;
605 }
606 
do_sort_something(void)607 static void do_sort_something(void)
608 {
609 	int buf[40960], i;
610 
611 	for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
612 		buf[i] = ARRAY_SIZE(buf) - i - 1;
613 
614 	qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
615 
616 	for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
617 		if (buf[i] != i) {
618 			pr_debug("qsort failed\n");
619 			break;
620 		}
621 	}
622 }
623 
sort_something(void)624 static void sort_something(void)
625 {
626 	int i;
627 
628 	for (i = 0; i < 10; i++)
629 		do_sort_something();
630 }
631 
syscall_something(void)632 static void syscall_something(void)
633 {
634 	int pipefd[2];
635 	int i;
636 
637 	for (i = 0; i < 1000; i++) {
638 		if (pipe(pipefd) < 0) {
639 			pr_debug("pipe failed\n");
640 			break;
641 		}
642 		close(pipefd[1]);
643 		close(pipefd[0]);
644 	}
645 }
646 
fs_something(void)647 static void fs_something(void)
648 {
649 	const char *test_file_name = "temp-perf-code-reading-test-file--";
650 	FILE *f;
651 	int i;
652 
653 	for (i = 0; i < 1000; i++) {
654 		f = fopen(test_file_name, "w+");
655 		if (f) {
656 			fclose(f);
657 			unlink(test_file_name);
658 		}
659 	}
660 }
661 
do_something(void)662 static void do_something(void)
663 {
664 	fs_something();
665 
666 	sort_something();
667 
668 	syscall_something();
669 }
670 
671 enum {
672 	TEST_CODE_READING_OK,
673 	TEST_CODE_READING_NO_VMLINUX,
674 	TEST_CODE_READING_NO_KCORE,
675 	TEST_CODE_READING_NO_ACCESS,
676 	TEST_CODE_READING_NO_KERNEL_OBJ,
677 };
678 
do_test_code_reading(bool try_kcore)679 static int do_test_code_reading(bool try_kcore)
680 {
681 	struct machine *machine;
682 	struct thread *thread;
683 	struct record_opts opts = {
684 		.mmap_pages	     = UINT_MAX,
685 		.user_freq	     = UINT_MAX,
686 		.user_interval	     = ULLONG_MAX,
687 		.freq		     = 500,
688 		.target		     = {
689 			.uses_mmap   = true,
690 		},
691 	};
692 	struct rb_root tested_sections = RB_ROOT;
693 	struct perf_thread_map *threads = NULL;
694 	struct perf_cpu_map *cpus = NULL;
695 	struct evlist *evlist = NULL;
696 	struct evsel *evsel = NULL;
697 	int err = -1, ret;
698 	pid_t pid;
699 	struct map *map;
700 	bool have_vmlinux, have_kcore;
701 	struct dso *dso;
702 	const char *events[] = { "cycles", "cycles:u", "cpu-clock", "cpu-clock:u", NULL };
703 	int evidx = 0;
704 	struct perf_env host_env;
705 
706 	pid = getpid();
707 
708 	perf_env__init(&host_env);
709 	machine = machine__new_host(&host_env);
710 
711 	ret = machine__create_kernel_maps(machine);
712 	if (ret < 0) {
713 		pr_debug("machine__create_kernel_maps failed\n");
714 		goto out_err;
715 	}
716 
717 	/* Force the use of kallsyms instead of vmlinux to try kcore */
718 	if (try_kcore)
719 		symbol_conf.kallsyms_name = "/proc/kallsyms";
720 
721 	/* Load kernel map */
722 	map = machine__kernel_map(machine);
723 	ret = map__load(map);
724 	if (ret < 0) {
725 		pr_debug("map__load failed\n");
726 		goto out_err;
727 	}
728 	dso = map__dso(map);
729 	have_vmlinux = dso__is_vmlinux(dso);
730 	have_kcore = dso__is_kcore(dso);
731 
732 	/* 2nd time through we just try kcore */
733 	if (try_kcore && !have_kcore)
734 		return TEST_CODE_READING_NO_KCORE;
735 
736 	/* No point getting kernel events if there is no kernel object */
737 	if (!have_vmlinux && !have_kcore)
738 		evidx++;
739 
740 	threads = thread_map__new_by_tid(pid);
741 	if (!threads) {
742 		pr_debug("thread_map__new_by_tid failed\n");
743 		goto out_err;
744 	}
745 
746 	ret = perf_event__synthesize_thread_map(NULL, threads,
747 						perf_event__process, machine,
748 						true, false);
749 	if (ret < 0) {
750 		pr_debug("perf_event__synthesize_thread_map failed\n");
751 		goto out_err;
752 	}
753 
754 	thread = machine__findnew_thread(machine, pid, pid);
755 	if (!thread) {
756 		pr_debug("machine__findnew_thread failed\n");
757 		goto out_put;
758 	}
759 
760 	cpus = perf_cpu_map__new_online_cpus();
761 	if (!cpus) {
762 		pr_debug("perf_cpu_map__new failed\n");
763 		goto out_put;
764 	}
765 
766 	while (events[evidx]) {
767 		const char *str;
768 
769 		evlist = evlist__new();
770 		if (!evlist) {
771 			pr_debug("evlist__new failed\n");
772 			goto out_put;
773 		}
774 
775 		perf_evlist__set_maps(&evlist->core, cpus, threads);
776 
777 		str = events[evidx];
778 		pr_debug("Parsing event '%s'\n", str);
779 		ret = parse_event(evlist, str);
780 		if (ret < 0) {
781 			pr_debug("parse_events failed\n");
782 			goto out_put;
783 		}
784 
785 		evlist__config(evlist, &opts, NULL);
786 
787 		evlist__for_each_entry(evlist, evsel) {
788 			evsel->core.attr.comm = 1;
789 			evsel->core.attr.disabled = 1;
790 			evsel->core.attr.enable_on_exec = 0;
791 		}
792 
793 		ret = evlist__open(evlist);
794 		if (ret < 0) {
795 			evidx++;
796 
797 			if (events[evidx] == NULL && verbose > 0) {
798 				char errbuf[512];
799 				evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
800 				pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
801 			}
802 
803 			perf_evlist__set_maps(&evlist->core, NULL, NULL);
804 			evlist__delete(evlist);
805 			evlist = NULL;
806 			continue;
807 		}
808 		break;
809 	}
810 
811 	if (events[evidx] == NULL)
812 		goto out_put;
813 
814 	ret = evlist__mmap(evlist, UINT_MAX);
815 	if (ret < 0) {
816 		pr_debug("evlist__mmap failed\n");
817 		goto out_put;
818 	}
819 
820 	evlist__enable(evlist);
821 
822 	do_something();
823 
824 	evlist__disable(evlist);
825 
826 	ret = process_events(machine, evlist, &tested_sections);
827 	if (ret < 0)
828 		goto out_put;
829 
830 	if (!have_vmlinux && !have_kcore && !try_kcore)
831 		err = TEST_CODE_READING_NO_KERNEL_OBJ;
832 	else if (!have_vmlinux && !try_kcore)
833 		err = TEST_CODE_READING_NO_VMLINUX;
834 	else if (strstr(events[evidx], ":u"))
835 		err = TEST_CODE_READING_NO_ACCESS;
836 	else
837 		err = TEST_CODE_READING_OK;
838 out_put:
839 	thread__put(thread);
840 out_err:
841 	evlist__delete(evlist);
842 	perf_cpu_map__put(cpus);
843 	perf_thread_map__put(threads);
844 	machine__delete(machine);
845 	perf_env__exit(&host_env);
846 	tested_sections__free(&tested_sections);
847 
848 	return err;
849 }
850 
test__code_reading(struct test_suite * test __maybe_unused,int subtest __maybe_unused)851 static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
852 {
853 	int ret;
854 
855 	ret = do_test_code_reading(false);
856 	if (!ret)
857 		ret = do_test_code_reading(true);
858 
859 	switch (ret) {
860 	case TEST_CODE_READING_OK:
861 		return 0;
862 	case TEST_CODE_READING_NO_VMLINUX:
863 		pr_debug("no vmlinux\n");
864 		return 0;
865 	case TEST_CODE_READING_NO_KCORE:
866 		pr_debug("no kcore\n");
867 		return 0;
868 	case TEST_CODE_READING_NO_ACCESS:
869 		pr_debug("no access\n");
870 		return 0;
871 	case TEST_CODE_READING_NO_KERNEL_OBJ:
872 		pr_debug("no kernel obj\n");
873 		return 0;
874 	default:
875 		return -1;
876 	};
877 }
878 
879 DEFINE_SUITE("Object code reading", code_reading);
880