1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <limits.h> 5 #include <stdbool.h> 6 #include <stdio.h> 7 #include <unistd.h> 8 #include <linux/types.h> 9 #include <sys/prctl.h> 10 #include <perf/cpumap.h> 11 #include <perf/evlist.h> 12 #include <perf/mmap.h> 13 14 #include "debug.h" 15 #include "parse-events.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "thread_map.h" 19 #include "record.h" 20 #include "tsc.h" 21 #include "mmap.h" 22 #include "tests.h" 23 #include "pmu.h" 24 #include "pmu-hybrid.h" 25 26 /* 27 * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just 28 * enable the test for x86_64/i386 and Arm64 archs. 29 */ 30 #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) 31 #define TSC_IS_SUPPORTED 1 32 #else 33 #define TSC_IS_SUPPORTED 0 34 #endif 35 36 #define CHECK__(x) { \ 37 while ((x) < 0) { \ 38 pr_debug(#x " failed!\n"); \ 39 goto out_err; \ 40 } \ 41 } 42 43 #define CHECK_NOT_NULL__(x) { \ 44 while ((x) == NULL) { \ 45 pr_debug(#x " failed!\n"); \ 46 goto out_err; \ 47 } \ 48 } 49 50 static int test__tsc_is_supported(struct test_suite *test __maybe_unused, 51 int subtest __maybe_unused) 52 { 53 if (!TSC_IS_SUPPORTED) { 54 pr_debug("Test not supported on this architecture\n"); 55 return TEST_SKIP; 56 } 57 58 return TEST_OK; 59 } 60 61 /** 62 * test__perf_time_to_tsc - test converting perf time to TSC. 63 * 64 * This function implements a test that checks that the conversion of perf time 65 * to and from TSC is consistent with the order of events. If the test passes 66 * %0 is returned, otherwise %-1 is returned. If TSC conversion is not 67 * supported then then the test passes but " (not supported)" is printed. 68 */ 69 static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 70 { 71 struct record_opts opts = { 72 .mmap_pages = UINT_MAX, 73 .user_freq = UINT_MAX, 74 .user_interval = ULLONG_MAX, 75 .target = { 76 .uses_mmap = true, 77 }, 78 .sample_time = true, 79 }; 80 struct perf_thread_map *threads = NULL; 81 struct perf_cpu_map *cpus = NULL; 82 struct evlist *evlist = NULL; 83 struct evsel *evsel = NULL; 84 int err = TEST_FAIL, ret, i; 85 const char *comm1, *comm2; 86 struct perf_tsc_conversion tc; 87 struct perf_event_mmap_page *pc; 88 union perf_event *event; 89 u64 test_tsc, comm1_tsc, comm2_tsc; 90 u64 test_time, comm1_time = 0, comm2_time = 0; 91 struct mmap *md; 92 93 94 threads = thread_map__new(-1, getpid(), UINT_MAX); 95 CHECK_NOT_NULL__(threads); 96 97 cpus = perf_cpu_map__new(NULL); 98 CHECK_NOT_NULL__(cpus); 99 100 evlist = evlist__new(); 101 CHECK_NOT_NULL__(evlist); 102 103 perf_evlist__set_maps(&evlist->core, cpus, threads); 104 105 CHECK__(parse_events(evlist, "cycles:u", NULL)); 106 107 evlist__config(evlist, &opts, NULL); 108 109 evsel = evlist__first(evlist); 110 111 evsel->core.attr.comm = 1; 112 evsel->core.attr.disabled = 1; 113 evsel->core.attr.enable_on_exec = 0; 114 115 /* 116 * For hybrid "cycles:u", it creates two events. 117 * Init the second evsel here. 118 */ 119 if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) { 120 evsel = evsel__next(evsel); 121 evsel->core.attr.comm = 1; 122 evsel->core.attr.disabled = 1; 123 evsel->core.attr.enable_on_exec = 0; 124 } 125 126 CHECK__(evlist__open(evlist)); 127 128 CHECK__(evlist__mmap(evlist, UINT_MAX)); 129 130 pc = evlist->mmap[0].core.base; 131 ret = perf_read_tsc_conversion(pc, &tc); 132 if (ret) { 133 if (ret == -EOPNOTSUPP) { 134 pr_debug("perf_read_tsc_conversion is not supported in current kernel\n"); 135 err = TEST_SKIP; 136 } 137 goto out_err; 138 } 139 140 evlist__enable(evlist); 141 142 comm1 = "Test COMM 1"; 143 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0)); 144 145 test_tsc = rdtsc(); 146 147 comm2 = "Test COMM 2"; 148 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0)); 149 150 evlist__disable(evlist); 151 152 for (i = 0; i < evlist->core.nr_mmaps; i++) { 153 md = &evlist->mmap[i]; 154 if (perf_mmap__read_init(&md->core) < 0) 155 continue; 156 157 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 158 struct perf_sample sample; 159 160 if (event->header.type != PERF_RECORD_COMM || 161 (pid_t)event->comm.pid != getpid() || 162 (pid_t)event->comm.tid != getpid()) 163 goto next_event; 164 165 if (strcmp(event->comm.comm, comm1) == 0) { 166 CHECK__(evsel__parse_sample(evsel, event, &sample)); 167 comm1_time = sample.time; 168 } 169 if (strcmp(event->comm.comm, comm2) == 0) { 170 CHECK__(evsel__parse_sample(evsel, event, &sample)); 171 comm2_time = sample.time; 172 } 173 next_event: 174 perf_mmap__consume(&md->core); 175 } 176 perf_mmap__read_done(&md->core); 177 } 178 179 if (!comm1_time || !comm2_time) 180 goto out_err; 181 182 test_time = tsc_to_perf_time(test_tsc, &tc); 183 comm1_tsc = perf_time_to_tsc(comm1_time, &tc); 184 comm2_tsc = perf_time_to_tsc(comm2_time, &tc); 185 186 pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n", 187 comm1_time, comm1_tsc); 188 pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n", 189 test_time, test_tsc); 190 pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n", 191 comm2_time, comm2_tsc); 192 193 if (test_time <= comm1_time || 194 test_time >= comm2_time) 195 goto out_err; 196 197 if (test_tsc <= comm1_tsc || 198 test_tsc >= comm2_tsc) 199 goto out_err; 200 201 err = TEST_OK; 202 203 out_err: 204 evlist__delete(evlist); 205 perf_cpu_map__put(cpus); 206 perf_thread_map__put(threads); 207 return err; 208 } 209 210 static struct test_case time_to_tsc_tests[] = { 211 TEST_CASE_REASON("TSC support", tsc_is_supported, 212 "This architecture does not support"), 213 TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc, 214 "perf_read_tsc_conversion is not supported"), 215 { .name = NULL, } 216 }; 217 218 struct test_suite suite__perf_time_to_tsc = { 219 .desc = "Convert perf time to TSC", 220 .test_cases = time_to_tsc_tests, 221 }; 222