1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <linux/kconfig.h>
4 #include <linux/kernel.h>
5 #include <linux/rbtree.h>
6 #include <linux/types.h>
7 #include <inttypes.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <stdio.h>
11 #include <string.h>
12 #include <sys/param.h>
13 #include <sys/utsname.h>
14 #include <perf/cpumap.h>
15 #include <perf/evlist.h>
16 #include <perf/mmap.h>
17
18 #include "debug.h"
19 #include "dso.h"
20 #include "env.h"
21 #include "parse-events.h"
22 #include "evlist.h"
23 #include "evsel.h"
24 #include "thread_map.h"
25 #include "machine.h"
26 #include "map.h"
27 #include "symbol.h"
28 #include "event.h"
29 #include "record.h"
30 #include "util/mmap.h"
31 #include "util/string2.h"
32 #include "util/synthetic-events.h"
33 #include "util/util.h"
34 #include "thread.h"
35
36 #include "tests.h"
37
38 #include <linux/ctype.h>
39
40 #define BUFSZ 1024
41 #define READLEN 128
42
43 struct tested_section {
44 struct rb_node rb_node;
45 u64 addr;
46 char *path;
47 };
48
tested_code_insert_or_exists(const char * path,u64 addr,struct rb_root * tested_sections)49 static bool tested_code_insert_or_exists(const char *path, u64 addr,
50 struct rb_root *tested_sections)
51 {
52 struct rb_node **node = &tested_sections->rb_node;
53 struct rb_node *parent = NULL;
54 struct tested_section *data;
55
56 while (*node) {
57 int cmp;
58
59 parent = *node;
60 data = rb_entry(*node, struct tested_section, rb_node);
61 cmp = strcmp(path, data->path);
62 if (!cmp) {
63 if (addr < data->addr)
64 cmp = -1;
65 else if (addr > data->addr)
66 cmp = 1;
67 else
68 return true; /* already tested */
69 }
70
71 if (cmp < 0)
72 node = &(*node)->rb_left;
73 else
74 node = &(*node)->rb_right;
75 }
76
77 data = zalloc(sizeof(*data));
78 if (!data)
79 return true;
80
81 data->addr = addr;
82 data->path = strdup(path);
83 if (!data->path) {
84 free(data);
85 return true;
86 }
87 rb_link_node(&data->rb_node, parent, node);
88 rb_insert_color(&data->rb_node, tested_sections);
89 return false;
90 }
91
tested_sections__free(struct rb_root * root)92 static void tested_sections__free(struct rb_root *root)
93 {
94 while (!RB_EMPTY_ROOT(root)) {
95 struct rb_node *node = rb_first(root);
96 struct tested_section *ts = rb_entry(node,
97 struct tested_section,
98 rb_node);
99
100 rb_erase(node, root);
101 free(ts->path);
102 free(ts);
103 }
104 }
105
read_objdump_chunk(const char ** line,unsigned char ** buf,size_t * buf_len)106 static size_t read_objdump_chunk(const char **line, unsigned char **buf,
107 size_t *buf_len)
108 {
109 size_t bytes_read = 0;
110 unsigned char *chunk_start = *buf;
111
112 /* Read bytes */
113 while (*buf_len > 0) {
114 char c1, c2;
115
116 /* Get 2 hex digits */
117 c1 = *(*line)++;
118 if (!isxdigit(c1))
119 break;
120 c2 = *(*line)++;
121 if (!isxdigit(c2))
122 break;
123
124 /* Store byte and advance buf */
125 **buf = (hex(c1) << 4) | hex(c2);
126 (*buf)++;
127 (*buf_len)--;
128 bytes_read++;
129
130 /* End of chunk? */
131 if (isspace(**line))
132 break;
133 }
134
135 /*
136 * objdump will display raw insn as LE if code endian
137 * is LE and bytes_per_chunk > 1. In that case reverse
138 * the chunk we just read.
139 *
140 * see disassemble_bytes() at binutils/objdump.c for details
141 * how objdump chooses display endian)
142 */
143 if (bytes_read > 1 && !host_is_bigendian()) {
144 unsigned char *chunk_end = chunk_start + bytes_read - 1;
145 unsigned char tmp;
146
147 while (chunk_start < chunk_end) {
148 tmp = *chunk_start;
149 *chunk_start = *chunk_end;
150 *chunk_end = tmp;
151 chunk_start++;
152 chunk_end--;
153 }
154 }
155
156 return bytes_read;
157 }
158
read_objdump_line(const char * line,unsigned char * buf,size_t buf_len)159 static size_t read_objdump_line(const char *line, unsigned char *buf,
160 size_t buf_len)
161 {
162 const char *p;
163 size_t ret, bytes_read = 0;
164
165 /* Skip to a colon */
166 p = strchr(line, ':');
167 if (!p)
168 return 0;
169 p++;
170
171 /* Skip initial spaces */
172 while (*p) {
173 if (!isspace(*p))
174 break;
175 p++;
176 }
177
178 do {
179 ret = read_objdump_chunk(&p, &buf, &buf_len);
180 bytes_read += ret;
181 p++;
182 } while (ret > 0);
183
184 /* return number of successfully read bytes */
185 return bytes_read;
186 }
187
read_objdump_output(FILE * f,void * buf,size_t * len,u64 start_addr)188 static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
189 {
190 char *line = NULL;
191 size_t line_len, off_last = 0;
192 ssize_t ret;
193 int err = 0;
194 u64 addr, last_addr = start_addr;
195
196 while (off_last < *len) {
197 size_t off, read_bytes, written_bytes;
198 unsigned char tmp[BUFSZ];
199
200 ret = getline(&line, &line_len, f);
201 if (feof(f))
202 break;
203 if (ret < 0) {
204 pr_debug("getline failed\n");
205 err = -1;
206 break;
207 }
208
209 /* read objdump data into temporary buffer */
210 read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
211 if (!read_bytes)
212 continue;
213
214 if (sscanf(line, "%"PRIx64, &addr) != 1)
215 continue;
216 if (addr < last_addr) {
217 pr_debug("addr going backwards, read beyond section?\n");
218 break;
219 }
220 last_addr = addr;
221
222 /* copy it from temporary buffer to 'buf' according
223 * to address on current objdump line */
224 off = addr - start_addr;
225 if (off >= *len)
226 break;
227 written_bytes = MIN(read_bytes, *len - off);
228 memcpy(buf + off, tmp, written_bytes);
229 off_last = off + written_bytes;
230 }
231
232 /* len returns number of bytes that could not be read */
233 *len -= off_last;
234
235 free(line);
236
237 return err;
238 }
239
240 /*
241 * Only gets GNU objdump version. Returns 0 for llvm-objdump.
242 */
objdump_version(void)243 static int objdump_version(void)
244 {
245 size_t line_len;
246 char cmd[PATH_MAX * 2];
247 char *line = NULL;
248 const char *fmt;
249 FILE *f;
250 int ret;
251
252 int version_tmp, version_num = 0;
253 char *version = 0, *token;
254
255 fmt = "%s --version";
256 ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path);
257 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
258 return -1;
259 /* Ignore objdump errors */
260 strcat(cmd, " 2>/dev/null");
261 f = popen(cmd, "r");
262 if (!f) {
263 pr_debug("popen failed\n");
264 return -1;
265 }
266 /* Get first line of objdump --version output */
267 ret = getline(&line, &line_len, f);
268 pclose(f);
269 if (ret < 0) {
270 pr_debug("getline failed\n");
271 return -1;
272 }
273
274 token = strsep(&line, " ");
275 if (token != NULL && !strcmp(token, "GNU")) {
276 // version is last part of first line of objdump --version output.
277 while ((token = strsep(&line, " ")))
278 version = token;
279
280 // Convert version into a format we can compare with
281 token = strsep(&version, ".");
282 version_num = atoi(token);
283 if (version_num)
284 version_num *= 10000;
285
286 token = strsep(&version, ".");
287 version_tmp = atoi(token);
288 if (token)
289 version_num += version_tmp * 100;
290
291 token = strsep(&version, ".");
292 version_tmp = atoi(token);
293 if (token)
294 version_num += version_tmp;
295 }
296
297 return version_num;
298 }
299
read_via_objdump(const char * filename,u64 addr,void * buf,size_t len)300 static int read_via_objdump(const char *filename, u64 addr, void *buf,
301 size_t len)
302 {
303 u64 stop_address = addr + len;
304 struct utsname uname_buf;
305 char cmd[PATH_MAX * 2];
306 const char *fmt;
307 FILE *f;
308 int ret;
309
310 ret = uname(&uname_buf);
311 if (ret) {
312 pr_debug("uname failed\n");
313 return -1;
314 }
315
316 if (!strncmp(uname_buf.machine, "riscv", 5)) {
317 int version = objdump_version();
318
319 /* Default to this workaround if version parsing fails */
320 if (version < 0 || version > 24100) {
321 /*
322 * Starting at riscv objdump version 2.41, dumping in
323 * the middle of an instruction is not supported. riscv
324 * instructions are aligned along 2-byte intervals and
325 * can be either 2-bytes or 4-bytes. This makes it
326 * possible that the stop-address lands in the middle of
327 * a 4-byte instruction. Increase the stop_address by
328 * two to ensure an instruction is not cut in half, but
329 * leave the len as-is so only the expected number of
330 * bytes are collected.
331 */
332 stop_address += 2;
333 }
334 }
335
336 fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
337 ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, stop_address,
338 filename);
339 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
340 return -1;
341
342 pr_debug("Objdump command is: %s\n", cmd);
343
344 /* Ignore objdump errors */
345 strcat(cmd, " 2>/dev/null");
346
347 f = popen(cmd, "r");
348 if (!f) {
349 pr_debug("popen failed\n");
350 return -1;
351 }
352
353 ret = read_objdump_output(f, buf, &len, addr);
354 if (len) {
355 pr_debug("objdump read too few bytes: %zd\n", len);
356 if (!ret)
357 ret = len;
358 }
359
360 pclose(f);
361
362 return ret;
363 }
364
dump_buf(unsigned char * buf,size_t len)365 static void dump_buf(unsigned char *buf, size_t len)
366 {
367 size_t i;
368
369 for (i = 0; i < len; i++) {
370 pr_debug("0x%02x ", buf[i]);
371 if (i % 16 == 15)
372 pr_debug("\n");
373 }
374 pr_debug("\n");
375 }
376
read_object_code(u64 addr,size_t len,u8 cpumode,struct thread * thread,struct rb_root * tested_sections)377 static int read_object_code(u64 addr, size_t len, u8 cpumode,
378 struct thread *thread,
379 struct rb_root *tested_sections)
380 {
381 struct addr_location al;
382 unsigned char buf1[BUFSZ] = {0};
383 unsigned char buf2[BUFSZ] = {0};
384 size_t ret_len;
385 u64 objdump_addr;
386 u64 skip_addr;
387 const char *objdump_name;
388 char decomp_name[KMOD_DECOMP_LEN];
389 bool decomp = false;
390 int ret, err = 0;
391 struct dso *dso;
392
393 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
394
395 addr_location__init(&al);
396 if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
397 if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
398 pr_debug("Hypervisor address can not be resolved - skipping\n");
399 goto out;
400 }
401
402 pr_debug("thread__find_map failed\n");
403 err = -1;
404 goto out;
405 }
406 dso = map__dso(al.map);
407 pr_debug("File is: %s\n", dso__long_name(dso));
408
409 if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
410 pr_debug("Unexpected kernel address - skipping\n");
411 goto out;
412 }
413
414 /*
415 * Don't retest the same addresses. objdump struggles with kcore - try
416 * each map only once even if the address is different.
417 */
418 skip_addr = dso__is_kcore(dso) ? map__start(al.map) : al.addr;
419 if (tested_code_insert_or_exists(dso__long_name(dso), skip_addr,
420 tested_sections)) {
421 pr_debug("Already tested %s @ %#"PRIx64" - skipping\n",
422 dso__long_name(dso), skip_addr);
423 goto out;
424 }
425
426 pr_debug("On file address is: %#"PRIx64"\n", al.addr);
427
428 if (len > BUFSZ)
429 len = BUFSZ;
430
431 /* Do not go off the map */
432 if (addr + len > map__end(al.map))
433 len = map__end(al.map) - addr;
434
435 /*
436 * Some architectures (ex: powerpc) have stubs (trampolines) in kernel
437 * modules to manage long jumps. Check if the ip offset falls in stubs
438 * sections for kernel modules. And skip module address after text end
439 */
440 if (dso__is_kmod(dso) && al.addr > dso__text_end(dso)) {
441 pr_debug("skipping the module address %#"PRIx64" after text end\n", al.addr);
442 goto out;
443 }
444
445 /* Read the object code using perf */
446 ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
447 al.addr, buf1, len);
448 if (ret_len != len) {
449 pr_debug("dso__data_read_offset failed\n");
450 err = -1;
451 goto out;
452 }
453
454 /*
455 * Converting addresses for use by objdump requires more information.
456 * map__load() does that. See map__rip_2objdump() for details.
457 */
458 if (map__load(al.map)) {
459 err = -1;
460 goto out;
461 }
462
463 objdump_name = dso__long_name(dso);
464 if (dso__needs_decompress(dso)) {
465 if (dso__decompress_kmodule_path(dso, objdump_name,
466 decomp_name,
467 sizeof(decomp_name)) < 0) {
468 pr_debug("decompression failed\n");
469 err = -1;
470 goto out;
471 }
472
473 decomp = true;
474 objdump_name = decomp_name;
475 }
476
477 /* Read the object code using objdump */
478 objdump_addr = map__rip_2objdump(al.map, al.addr);
479 ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
480
481 if (decomp)
482 unlink(objdump_name);
483
484 if (ret > 0) {
485 /*
486 * The kernel maps are inaccurate - assume objdump is right in
487 * that case.
488 */
489 if (cpumode == PERF_RECORD_MISC_KERNEL ||
490 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
491 len -= ret;
492 if (len) {
493 pr_debug("Reducing len to %zu\n", len);
494 } else if (dso__is_kcore(dso)) {
495 /*
496 * objdump cannot handle very large segments
497 * that may be found in kcore.
498 */
499 pr_debug("objdump failed for kcore");
500 pr_debug(" - skipping\n");
501 } else {
502 err = -1;
503 }
504 goto out;
505 }
506 }
507 if (ret < 0) {
508 pr_debug("read_via_objdump failed\n");
509 err = -1;
510 goto out;
511 }
512
513 /* The results should be identical */
514 if (memcmp(buf1, buf2, len)) {
515 pr_debug("Bytes read differ from those read by objdump\n");
516 pr_debug("buf1 (dso):\n");
517 dump_buf(buf1, len);
518 pr_debug("buf2 (objdump):\n");
519 dump_buf(buf2, len);
520 err = -1;
521 goto out;
522 }
523 pr_debug("Bytes read match those read by objdump\n");
524 out:
525 addr_location__exit(&al);
526 return err;
527 }
528
process_sample_event(struct machine * machine,struct evlist * evlist,union perf_event * event,struct rb_root * tested_sections)529 static int process_sample_event(struct machine *machine, struct evlist *evlist,
530 union perf_event *event,
531 struct rb_root *tested_sections)
532 {
533 struct perf_sample sample;
534 struct thread *thread;
535 int ret;
536
537 perf_sample__init(&sample, /*all=*/false);
538 ret = evlist__parse_sample(evlist, event, &sample);
539 if (ret) {
540 pr_debug("evlist__parse_sample failed\n");
541 ret = -1;
542 goto out;
543 }
544
545 thread = machine__findnew_thread(machine, sample.pid, sample.tid);
546 if (!thread) {
547 pr_debug("machine__findnew_thread failed\n");
548 ret = -1;
549 goto out;
550 }
551
552 ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread,
553 tested_sections);
554 thread__put(thread);
555 out:
556 perf_sample__exit(&sample);
557 return ret;
558 }
559
process_event(struct machine * machine,struct evlist * evlist,union perf_event * event,struct rb_root * tested_sections)560 static int process_event(struct machine *machine, struct evlist *evlist,
561 union perf_event *event, struct rb_root *tested_sections)
562 {
563 if (event->header.type == PERF_RECORD_SAMPLE)
564 return process_sample_event(machine, evlist, event,
565 tested_sections);
566
567 if (event->header.type == PERF_RECORD_THROTTLE ||
568 event->header.type == PERF_RECORD_UNTHROTTLE)
569 return 0;
570
571 if (event->header.type < PERF_RECORD_MAX) {
572 int ret;
573
574 ret = machine__process_event(machine, event, NULL);
575 if (ret < 0)
576 pr_debug("machine__process_event failed, event type %u\n",
577 event->header.type);
578 return ret;
579 }
580
581 return 0;
582 }
583
process_events(struct machine * machine,struct evlist * evlist,struct rb_root * tested_sections)584 static int process_events(struct machine *machine, struct evlist *evlist,
585 struct rb_root *tested_sections)
586 {
587 union perf_event *event;
588 struct mmap *md;
589 int i, ret;
590
591 for (i = 0; i < evlist->core.nr_mmaps; i++) {
592 md = &evlist->mmap[i];
593 if (perf_mmap__read_init(&md->core) < 0)
594 continue;
595
596 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
597 ret = process_event(machine, evlist, event, tested_sections);
598 perf_mmap__consume(&md->core);
599 if (ret < 0)
600 return ret;
601 }
602 perf_mmap__read_done(&md->core);
603 }
604 return 0;
605 }
606
comp(const void * a,const void * b)607 static int comp(const void *a, const void *b)
608 {
609 return *(int *)a - *(int *)b;
610 }
611
do_sort_something(void)612 static void do_sort_something(void)
613 {
614 int buf[40960], i;
615
616 for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
617 buf[i] = ARRAY_SIZE(buf) - i - 1;
618
619 qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
620
621 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
622 if (buf[i] != i) {
623 pr_debug("qsort failed\n");
624 break;
625 }
626 }
627 }
628
sort_something(void)629 static void sort_something(void)
630 {
631 int i;
632
633 for (i = 0; i < 10; i++)
634 do_sort_something();
635 }
636
syscall_something(void)637 static void syscall_something(void)
638 {
639 int pipefd[2];
640 int i;
641
642 for (i = 0; i < 1000; i++) {
643 if (pipe(pipefd) < 0) {
644 pr_debug("pipe failed\n");
645 break;
646 }
647 close(pipefd[1]);
648 close(pipefd[0]);
649 }
650 }
651
fs_something(void)652 static void fs_something(void)
653 {
654 const char *test_file_name = "temp-perf-code-reading-test-file--";
655 FILE *f;
656 int i;
657
658 for (i = 0; i < 1000; i++) {
659 f = fopen(test_file_name, "w+");
660 if (f) {
661 fclose(f);
662 unlink(test_file_name);
663 }
664 }
665 }
666
do_something(void)667 static void do_something(void)
668 {
669 fs_something();
670
671 sort_something();
672
673 syscall_something();
674 }
675
676 enum {
677 TEST_CODE_READING_OK,
678 TEST_CODE_READING_NO_VMLINUX,
679 TEST_CODE_READING_NO_KCORE,
680 TEST_CODE_READING_NO_ACCESS,
681 TEST_CODE_READING_NO_KERNEL_OBJ,
682 };
683
do_test_code_reading(bool try_kcore)684 static int do_test_code_reading(bool try_kcore)
685 {
686 struct machine *machine;
687 struct thread *thread;
688 struct record_opts opts = {
689 .mmap_pages = UINT_MAX,
690 .user_freq = UINT_MAX,
691 .user_interval = ULLONG_MAX,
692 .freq = 500,
693 .target = {
694 .uses_mmap = true,
695 },
696 };
697 struct rb_root tested_sections = RB_ROOT;
698 struct perf_thread_map *threads = NULL;
699 struct perf_cpu_map *cpus = NULL;
700 struct evlist *evlist = NULL;
701 struct evsel *evsel = NULL;
702 int err = -1, ret;
703 pid_t pid;
704 struct map *map;
705 bool have_vmlinux, have_kcore;
706 struct dso *dso;
707 const char *events[] = { "cpu-cycles", "cpu-cycles:u", "cpu-clock", "cpu-clock:u", NULL };
708 int evidx = 0;
709 struct perf_env host_env;
710
711 pid = getpid();
712
713 perf_env__init(&host_env);
714 machine = machine__new_host(&host_env);
715
716 ret = machine__create_kernel_maps(machine);
717 if (ret < 0) {
718 pr_debug("machine__create_kernel_maps failed\n");
719 goto out_err;
720 }
721
722 /* Force the use of kallsyms instead of vmlinux to try kcore */
723 if (try_kcore)
724 symbol_conf.kallsyms_name = "/proc/kallsyms";
725
726 /* Load kernel map */
727 map = machine__kernel_map(machine);
728 ret = map__load(map);
729 if (ret < 0) {
730 pr_debug("map__load failed\n");
731 goto out_err;
732 }
733 dso = map__dso(map);
734 have_vmlinux = dso__is_vmlinux(dso);
735 have_kcore = dso__is_kcore(dso);
736
737 /* 2nd time through we just try kcore */
738 if (try_kcore && !have_kcore)
739 return TEST_CODE_READING_NO_KCORE;
740
741 /* No point getting kernel events if there is no kernel object */
742 if (!have_vmlinux && !have_kcore)
743 evidx++;
744
745 threads = thread_map__new_by_tid(pid);
746 if (!threads) {
747 pr_debug("thread_map__new_by_tid failed\n");
748 goto out_err;
749 }
750
751 ret = perf_event__synthesize_thread_map(NULL, threads,
752 perf_event__process, machine,
753 true, false);
754 if (ret < 0) {
755 pr_debug("perf_event__synthesize_thread_map failed\n");
756 goto out_err;
757 }
758
759 thread = machine__findnew_thread(machine, pid, pid);
760 if (!thread) {
761 pr_debug("machine__findnew_thread failed\n");
762 goto out_put;
763 }
764
765 cpus = perf_cpu_map__new_online_cpus();
766 if (!cpus) {
767 pr_debug("perf_cpu_map__new failed\n");
768 goto out_put;
769 }
770
771 while (events[evidx]) {
772 const char *str;
773
774 evlist = evlist__new();
775 if (!evlist) {
776 pr_debug("evlist__new failed\n");
777 goto out_put;
778 }
779
780 perf_evlist__set_maps(&evlist->core, cpus, threads);
781
782 str = events[evidx];
783 pr_debug("Parsing event '%s'\n", str);
784 ret = parse_event(evlist, str);
785 if (ret < 0) {
786 pr_debug("parse_events failed\n");
787 goto out_put;
788 }
789
790 evlist__config(evlist, &opts, NULL);
791
792 evlist__for_each_entry(evlist, evsel) {
793 evsel->core.attr.comm = 1;
794 evsel->core.attr.disabled = 1;
795 evsel->core.attr.enable_on_exec = 0;
796 }
797
798 ret = evlist__open(evlist);
799 if (ret < 0) {
800 evidx++;
801
802 if (events[evidx] == NULL && verbose > 0) {
803 char errbuf[512];
804 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
805 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
806 }
807
808 perf_evlist__set_maps(&evlist->core, NULL, NULL);
809 evlist__delete(evlist);
810 evlist = NULL;
811 continue;
812 }
813 break;
814 }
815
816 if (events[evidx] == NULL)
817 goto out_put;
818
819 ret = evlist__mmap(evlist, UINT_MAX);
820 if (ret < 0) {
821 pr_debug("evlist__mmap failed\n");
822 goto out_put;
823 }
824
825 evlist__enable(evlist);
826
827 do_something();
828
829 evlist__disable(evlist);
830
831 ret = process_events(machine, evlist, &tested_sections);
832 if (ret < 0)
833 goto out_put;
834
835 if (!have_vmlinux && !have_kcore && !try_kcore)
836 err = TEST_CODE_READING_NO_KERNEL_OBJ;
837 else if (!have_vmlinux && !try_kcore)
838 err = TEST_CODE_READING_NO_VMLINUX;
839 else if (strstr(events[evidx], ":u"))
840 err = TEST_CODE_READING_NO_ACCESS;
841 else
842 err = TEST_CODE_READING_OK;
843 out_put:
844 thread__put(thread);
845 out_err:
846 evlist__delete(evlist);
847 perf_cpu_map__put(cpus);
848 perf_thread_map__put(threads);
849 machine__delete(machine);
850 perf_env__exit(&host_env);
851 tested_sections__free(&tested_sections);
852
853 return err;
854 }
855
test__code_reading(struct test_suite * test __maybe_unused,int subtest __maybe_unused)856 static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
857 {
858 int ret;
859
860 ret = do_test_code_reading(false);
861 if (!ret)
862 ret = do_test_code_reading(true);
863
864 switch (ret) {
865 case TEST_CODE_READING_OK:
866 return 0;
867 case TEST_CODE_READING_NO_VMLINUX:
868 pr_debug("no vmlinux\n");
869 return 0;
870 case TEST_CODE_READING_NO_KCORE:
871 pr_debug("no kcore\n");
872 return 0;
873 case TEST_CODE_READING_NO_ACCESS:
874 pr_debug("no access\n");
875 return 0;
876 case TEST_CODE_READING_NO_KERNEL_OBJ:
877 pr_debug("no kernel obj\n");
878 return 0;
879 default:
880 return -1;
881 };
882 }
883
884 DEFINE_SUITE("Object code reading", code_reading);
885