1 #include "evlist.h" 2 #include "evsel.h" 3 #include "thread_map.h" 4 #include "cpumap.h" 5 #include "tests.h" 6 7 /* 8 * This test will generate random numbers of calls to some getpid syscalls, 9 * then establish an mmap for a group of events that are created to monitor 10 * the syscalls. 11 * 12 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated 13 * sample.id field to map back to its respective perf_evsel instance. 14 * 15 * Then it checks if the number of syscalls reported as perf events by 16 * the kernel corresponds to the number of syscalls made. 17 */ 18 int test__basic_mmap(void) 19 { 20 int err = -1; 21 union perf_event *event; 22 struct thread_map *threads; 23 struct cpu_map *cpus; 24 struct perf_evlist *evlist; 25 cpu_set_t cpu_set; 26 const char *syscall_names[] = { "getsid", "getppid", "getpgid", }; 27 pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid }; 28 #define nsyscalls ARRAY_SIZE(syscall_names) 29 unsigned int nr_events[nsyscalls], 30 expected_nr_events[nsyscalls], i, j; 31 struct perf_evsel *evsels[nsyscalls], *evsel; 32 char sbuf[STRERR_BUFSIZE]; 33 34 threads = thread_map__new(-1, getpid(), UINT_MAX); 35 if (threads == NULL) { 36 pr_debug("thread_map__new\n"); 37 return -1; 38 } 39 40 cpus = cpu_map__new(NULL); 41 if (cpus == NULL) { 42 pr_debug("cpu_map__new\n"); 43 goto out_free_threads; 44 } 45 46 CPU_ZERO(&cpu_set); 47 CPU_SET(cpus->map[0], &cpu_set); 48 sched_setaffinity(0, sizeof(cpu_set), &cpu_set); 49 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { 50 pr_debug("sched_setaffinity() failed on CPU %d: %s ", 51 cpus->map[0], strerror_r(errno, sbuf, sizeof(sbuf))); 52 goto out_free_cpus; 53 } 54 55 evlist = perf_evlist__new(); 56 if (evlist == NULL) { 57 pr_debug("perf_evlist__new\n"); 58 goto out_free_cpus; 59 } 60 61 perf_evlist__set_maps(evlist, cpus, threads); 62 63 for (i = 0; i < nsyscalls; ++i) { 64 char name[64]; 65 66 snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); 67 evsels[i] = perf_evsel__newtp("syscalls", name); 68 if (evsels[i] == NULL) { 69 pr_debug("perf_evsel__new\n"); 70 goto out_delete_evlist; 71 } 72 73 evsels[i]->attr.wakeup_events = 1; 74 perf_evsel__set_sample_id(evsels[i], false); 75 76 perf_evlist__add(evlist, evsels[i]); 77 78 if (perf_evsel__open(evsels[i], cpus, threads) < 0) { 79 pr_debug("failed to open counter: %s, " 80 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 81 strerror_r(errno, sbuf, sizeof(sbuf))); 82 goto out_delete_evlist; 83 } 84 85 nr_events[i] = 0; 86 expected_nr_events[i] = 1 + rand() % 127; 87 } 88 89 if (perf_evlist__mmap(evlist, 128, true) < 0) { 90 pr_debug("failed to mmap events: %d (%s)\n", errno, 91 strerror_r(errno, sbuf, sizeof(sbuf))); 92 goto out_delete_evlist; 93 } 94 95 for (i = 0; i < nsyscalls; ++i) 96 for (j = 0; j < expected_nr_events[i]; ++j) { 97 int foo = syscalls[i](); 98 ++foo; 99 } 100 101 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) { 102 struct perf_sample sample; 103 104 if (event->header.type != PERF_RECORD_SAMPLE) { 105 pr_debug("unexpected %s event\n", 106 perf_event__name(event->header.type)); 107 goto out_delete_evlist; 108 } 109 110 err = perf_evlist__parse_sample(evlist, event, &sample); 111 if (err) { 112 pr_err("Can't parse sample, err = %d\n", err); 113 goto out_delete_evlist; 114 } 115 116 err = -1; 117 evsel = perf_evlist__id2evsel(evlist, sample.id); 118 if (evsel == NULL) { 119 pr_debug("event with id %" PRIu64 120 " doesn't map to an evsel\n", sample.id); 121 goto out_delete_evlist; 122 } 123 nr_events[evsel->idx]++; 124 perf_evlist__mmap_consume(evlist, 0); 125 } 126 127 err = 0; 128 evlist__for_each(evlist, evsel) { 129 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { 130 pr_debug("expected %d %s events, got %d\n", 131 expected_nr_events[evsel->idx], 132 perf_evsel__name(evsel), nr_events[evsel->idx]); 133 err = -1; 134 goto out_delete_evlist; 135 } 136 } 137 138 out_delete_evlist: 139 perf_evlist__delete(evlist); 140 cpus = NULL; 141 threads = NULL; 142 out_free_cpus: 143 cpu_map__put(cpus); 144 out_free_threads: 145 thread_map__put(threads); 146 return err; 147 } 148