1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/compiler.h> 3 #include <string.h> 4 #include <perf/cpumap.h> 5 #include <perf/evlist.h> 6 #include "metricgroup.h" 7 #include "tests.h" 8 #include "pmu-events/pmu-events.h" 9 #include "evlist.h" 10 #include "rblist.h" 11 #include "debug.h" 12 #include "expr.h" 13 #include "stat.h" 14 #include "pmu.h" 15 16 struct value { 17 const char *event; 18 u64 val; 19 }; 20 21 static u64 find_value(const char *name, struct value *values) 22 { 23 struct value *v = values; 24 25 while (v->event) { 26 if (!strcmp(name, v->event)) 27 return v->val; 28 v++; 29 } 30 return 0; 31 } 32 33 static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist, 34 struct value *vals) 35 { 36 struct evsel *evsel; 37 u64 count; 38 39 perf_stat__reset_shadow_stats(); 40 evlist__for_each_entry(evlist, evsel) { 41 count = find_value(evsel->name, vals); 42 perf_stat__update_shadow_stats(evsel, count, 0, st); 43 if (!strcmp(evsel->name, "duration_time")) 44 update_stats(&walltime_nsecs_stats, count); 45 } 46 } 47 48 static double compute_single(struct rblist *metric_events, struct evlist *evlist, 49 struct runtime_stat *st, const char *name) 50 { 51 struct metric_expr *mexp; 52 struct metric_event *me; 53 struct evsel *evsel; 54 55 evlist__for_each_entry(evlist, evsel) { 56 me = metricgroup__lookup(metric_events, evsel, false); 57 if (me != NULL) { 58 list_for_each_entry (mexp, &me->head, nd) { 59 if (strcmp(mexp->metric_name, name)) 60 continue; 61 return test_generic_metric(mexp, 0, st); 62 } 63 } 64 } 65 return 0.; 66 } 67 68 static int __compute_metric(const char *name, struct value *vals, 69 const char *name1, double *ratio1, 70 const char *name2, double *ratio2) 71 { 72 struct rblist metric_events = { 73 .nr_entries = 0, 74 }; 75 const struct pmu_events_table *pme_test; 76 struct perf_cpu_map *cpus; 77 struct runtime_stat st; 78 struct evlist *evlist; 79 int err; 80 81 /* 82 * We need to prepare evlist for stat mode running on CPU 0 83 * because that's where all the stats are going to be created. 84 */ 85 evlist = evlist__new(); 86 if (!evlist) 87 return -ENOMEM; 88 89 cpus = perf_cpu_map__new("0"); 90 if (!cpus) { 91 evlist__delete(evlist); 92 return -ENOMEM; 93 } 94 95 perf_evlist__set_maps(&evlist->core, cpus, NULL); 96 runtime_stat__init(&st); 97 98 /* Parse the metric into metric_events list. */ 99 pme_test = find_core_events_table("testarch", "testcpu"); 100 err = metricgroup__parse_groups_test(evlist, pme_test, name, 101 false, false, 102 &metric_events); 103 if (err) 104 goto out; 105 106 err = evlist__alloc_stats(/*config=*/NULL, evlist, /*alloc_raw=*/false); 107 if (err) 108 goto out; 109 110 /* Load the runtime stats with given numbers for events. */ 111 load_runtime_stat(&st, evlist, vals); 112 113 /* And execute the metric */ 114 if (name1 && ratio1) 115 *ratio1 = compute_single(&metric_events, evlist, &st, name1); 116 if (name2 && ratio2) 117 *ratio2 = compute_single(&metric_events, evlist, &st, name2); 118 119 out: 120 /* ... cleanup. */ 121 metricgroup__rblist_exit(&metric_events); 122 runtime_stat__exit(&st); 123 evlist__free_stats(evlist); 124 perf_cpu_map__put(cpus); 125 evlist__delete(evlist); 126 return err; 127 } 128 129 static int compute_metric(const char *name, struct value *vals, double *ratio) 130 { 131 return __compute_metric(name, vals, name, ratio, NULL, NULL); 132 } 133 134 static int compute_metric_group(const char *name, struct value *vals, 135 const char *name1, double *ratio1, 136 const char *name2, double *ratio2) 137 { 138 return __compute_metric(name, vals, name1, ratio1, name2, ratio2); 139 } 140 141 static int test_ipc(void) 142 { 143 double ratio; 144 struct value vals[] = { 145 { .event = "inst_retired.any", .val = 300 }, 146 { .event = "cpu_clk_unhalted.thread", .val = 200 }, 147 { .event = NULL, }, 148 }; 149 150 TEST_ASSERT_VAL("failed to compute metric", 151 compute_metric("IPC", vals, &ratio) == 0); 152 153 TEST_ASSERT_VAL("IPC failed, wrong ratio", 154 ratio == 1.5); 155 return 0; 156 } 157 158 static int test_frontend(void) 159 { 160 double ratio; 161 struct value vals[] = { 162 { .event = "idq_uops_not_delivered.core", .val = 300 }, 163 { .event = "cpu_clk_unhalted.thread", .val = 200 }, 164 { .event = "cpu_clk_unhalted.one_thread_active", .val = 400 }, 165 { .event = "cpu_clk_unhalted.ref_xclk", .val = 600 }, 166 { .event = NULL, }, 167 }; 168 169 TEST_ASSERT_VAL("failed to compute metric", 170 compute_metric("Frontend_Bound_SMT", vals, &ratio) == 0); 171 172 TEST_ASSERT_VAL("Frontend_Bound_SMT failed, wrong ratio", 173 ratio == 0.45); 174 return 0; 175 } 176 177 static int test_cache_miss_cycles(void) 178 { 179 double ratio; 180 struct value vals[] = { 181 { .event = "l1d-loads-misses", .val = 300 }, 182 { .event = "l1i-loads-misses", .val = 200 }, 183 { .event = "inst_retired.any", .val = 400 }, 184 { .event = NULL, }, 185 }; 186 187 TEST_ASSERT_VAL("failed to compute metric", 188 compute_metric("cache_miss_cycles", vals, &ratio) == 0); 189 190 TEST_ASSERT_VAL("cache_miss_cycles failed, wrong ratio", 191 ratio == 1.25); 192 return 0; 193 } 194 195 196 /* 197 * DCache_L2_All_Hits = l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hi 198 * DCache_L2_All_Miss = max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + 199 * l2_rqsts.pf_miss + l2_rqsts.rfo_miss 200 * DCache_L2_All = dcache_l2_all_hits + dcache_l2_all_miss 201 * DCache_L2_Hits = d_ratio(dcache_l2_all_hits, dcache_l2_all) 202 * DCache_L2_Misses = d_ratio(dcache_l2_all_miss, dcache_l2_all) 203 * 204 * l2_rqsts.demand_data_rd_hit = 100 205 * l2_rqsts.pf_hit = 200 206 * l2_rqsts.rfo_hi = 300 207 * l2_rqsts.all_demand_data_rd = 400 208 * l2_rqsts.pf_miss = 500 209 * l2_rqsts.rfo_miss = 600 210 * 211 * DCache_L2_All_Hits = 600 212 * DCache_L2_All_Miss = MAX(400 - 100, 0) + 500 + 600 = 1400 213 * DCache_L2_All = 600 + 1400 = 2000 214 * DCache_L2_Hits = 600 / 2000 = 0.3 215 * DCache_L2_Misses = 1400 / 2000 = 0.7 216 */ 217 static int test_dcache_l2(void) 218 { 219 double ratio; 220 struct value vals[] = { 221 { .event = "l2_rqsts.demand_data_rd_hit", .val = 100 }, 222 { .event = "l2_rqsts.pf_hit", .val = 200 }, 223 { .event = "l2_rqsts.rfo_hit", .val = 300 }, 224 { .event = "l2_rqsts.all_demand_data_rd", .val = 400 }, 225 { .event = "l2_rqsts.pf_miss", .val = 500 }, 226 { .event = "l2_rqsts.rfo_miss", .val = 600 }, 227 { .event = NULL, }, 228 }; 229 230 TEST_ASSERT_VAL("failed to compute metric", 231 compute_metric("DCache_L2_Hits", vals, &ratio) == 0); 232 233 TEST_ASSERT_VAL("DCache_L2_Hits failed, wrong ratio", 234 ratio == 0.3); 235 236 TEST_ASSERT_VAL("failed to compute metric", 237 compute_metric("DCache_L2_Misses", vals, &ratio) == 0); 238 239 TEST_ASSERT_VAL("DCache_L2_Misses failed, wrong ratio", 240 ratio == 0.7); 241 return 0; 242 } 243 244 static int test_recursion_fail(void) 245 { 246 double ratio; 247 struct value vals[] = { 248 { .event = "inst_retired.any", .val = 300 }, 249 { .event = "cpu_clk_unhalted.thread", .val = 200 }, 250 { .event = NULL, }, 251 }; 252 253 TEST_ASSERT_VAL("failed to find recursion", 254 compute_metric("M1", vals, &ratio) == -1); 255 256 TEST_ASSERT_VAL("failed to find recursion", 257 compute_metric("M3", vals, &ratio) == -1); 258 return 0; 259 } 260 261 static int test_memory_bandwidth(void) 262 { 263 double ratio; 264 struct value vals[] = { 265 { .event = "l1d.replacement", .val = 4000000 }, 266 { .event = "duration_time", .val = 200000000 }, 267 { .event = NULL, }, 268 }; 269 270 TEST_ASSERT_VAL("failed to compute metric", 271 compute_metric("L1D_Cache_Fill_BW", vals, &ratio) == 0); 272 TEST_ASSERT_VAL("L1D_Cache_Fill_BW, wrong ratio", 273 1.28 == ratio); 274 275 return 0; 276 } 277 278 static int test_metric_group(void) 279 { 280 double ratio1, ratio2; 281 struct value vals[] = { 282 { .event = "cpu_clk_unhalted.thread", .val = 200 }, 283 { .event = "l1d-loads-misses", .val = 300 }, 284 { .event = "l1i-loads-misses", .val = 200 }, 285 { .event = "inst_retired.any", .val = 400 }, 286 { .event = NULL, }, 287 }; 288 289 TEST_ASSERT_VAL("failed to find recursion", 290 compute_metric_group("group1", vals, 291 "IPC", &ratio1, 292 "cache_miss_cycles", &ratio2) == 0); 293 294 TEST_ASSERT_VAL("group IPC failed, wrong ratio", 295 ratio1 == 2.0); 296 297 TEST_ASSERT_VAL("group cache_miss_cycles failed, wrong ratio", 298 ratio2 == 1.25); 299 return 0; 300 } 301 302 static int test__parse_metric(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 303 { 304 TEST_ASSERT_VAL("IPC failed", test_ipc() == 0); 305 TEST_ASSERT_VAL("frontend failed", test_frontend() == 0); 306 TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0); 307 TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0); 308 TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0); 309 310 if (!perf_pmu__has_hybrid()) { 311 TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0); 312 TEST_ASSERT_VAL("test metric group", test_metric_group() == 0); 313 } 314 return 0; 315 } 316 317 DEFINE_SUITE("Parse and process metrics", parse_metric); 318