1 // SPDX-License-Identifier: GPL-2.0 2 #include "tests.h" 3 #include "debug.h" 4 #include "symbol.h" 5 #include "sort.h" 6 #include "evsel.h" 7 #include "evlist.h" 8 #include "machine.h" 9 #include "map.h" 10 #include "parse-events.h" 11 #include "thread.h" 12 #include "hists_common.h" 13 #include "util/mmap.h" 14 #include <errno.h> 15 #include <linux/kernel.h> 16 17 struct sample { 18 u32 pid; 19 u64 ip; 20 struct thread *thread; 21 struct map *map; 22 struct symbol *sym; 23 }; 24 25 /* For the numbers, see hists_common.c */ 26 static struct sample fake_common_samples[] = { 27 /* perf [kernel] schedule() */ 28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, 29 /* perf [perf] main() */ 30 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, 31 /* perf [perf] cmd_record() */ 32 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, }, 33 /* bash [bash] xmalloc() */ 34 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, 35 /* bash [libc] malloc() */ 36 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, 37 }; 38 39 static struct sample fake_samples[][5] = { 40 { 41 /* perf [perf] run_command() */ 42 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, }, 43 /* perf [libc] malloc() */ 44 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, 45 /* perf [kernel] page_fault() */ 46 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 47 /* perf [kernel] sys_perf_event_open() */ 48 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, }, 49 /* bash [libc] free() */ 50 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, }, 51 }, 52 { 53 /* perf [libc] free() */ 54 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, }, 55 /* bash [libc] malloc() */ 56 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */ 57 /* bash [bash] xfee() */ 58 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, }, 59 /* bash [libc] realloc() */ 60 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, }, 61 /* bash [kernel] page_fault() */ 62 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 63 }, 64 }; 65 66 static int add_hist_entries(struct evlist *evlist, struct machine *machine) 67 { 68 struct evsel *evsel; 69 struct addr_location al; 70 struct hist_entry *he; 71 struct perf_sample sample = { .period = 1, .weight = 1, }; 72 size_t i = 0, k; 73 74 addr_location__init(&al); 75 /* 76 * each evsel will have 10 samples - 5 common and 5 distinct. 77 * However the second evsel also has a collapsed entry for 78 * "bash [libc] malloc" so total 9 entries will be in the tree. 79 */ 80 evlist__for_each_entry(evlist, evsel) { 81 struct hists *hists = evsel__hists(evsel); 82 83 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) { 84 sample.cpumode = PERF_RECORD_MISC_USER; 85 sample.pid = fake_common_samples[k].pid; 86 sample.tid = fake_common_samples[k].pid; 87 sample.ip = fake_common_samples[k].ip; 88 89 if (machine__resolve(machine, &al, &sample) < 0) 90 goto out; 91 92 he = hists__add_entry(hists, &al, NULL, 93 NULL, NULL, NULL, &sample, true); 94 if (he == NULL) { 95 goto out; 96 } 97 98 thread__put(fake_common_samples[k].thread); 99 fake_common_samples[k].thread = thread__get(al.thread); 100 map__put(fake_common_samples[k].map); 101 fake_common_samples[k].map = map__get(al.map); 102 fake_common_samples[k].sym = al.sym; 103 } 104 105 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) { 106 sample.pid = fake_samples[i][k].pid; 107 sample.tid = fake_samples[i][k].pid; 108 sample.ip = fake_samples[i][k].ip; 109 if (machine__resolve(machine, &al, &sample) < 0) 110 goto out; 111 112 he = hists__add_entry(hists, &al, NULL, 113 NULL, NULL, NULL, &sample, true); 114 if (he == NULL) { 115 goto out; 116 } 117 118 thread__put(fake_samples[i][k].thread); 119 fake_samples[i][k].thread = thread__get(al.thread); 120 map__put(fake_samples[i][k].map); 121 fake_samples[i][k].map = map__get(al.map); 122 fake_samples[i][k].sym = al.sym; 123 } 124 i++; 125 } 126 127 addr_location__exit(&al); 128 return 0; 129 out: 130 addr_location__exit(&al); 131 pr_debug("Not enough memory for adding a hist entry\n"); 132 return -1; 133 } 134 135 static void put_fake_samples(void) 136 { 137 size_t i, j; 138 139 for (i = 0; i < ARRAY_SIZE(fake_common_samples); i++) 140 map__put(fake_common_samples[i].map); 141 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { 142 for (j = 0; j < ARRAY_SIZE(fake_samples[0]); j++) 143 map__put(fake_samples[i][j].map); 144 } 145 } 146 147 static int find_sample(struct sample *samples, size_t nr_samples, 148 struct thread *t, struct map *m, struct symbol *s) 149 { 150 while (nr_samples--) { 151 if (RC_CHK_EQUAL(samples->thread, t) && 152 RC_CHK_EQUAL(samples->map, m) && 153 samples->sym == s) 154 return 1; 155 samples++; 156 } 157 return 0; 158 } 159 160 static int __validate_match(struct hists *hists) 161 { 162 size_t count = 0; 163 struct rb_root_cached *root; 164 struct rb_node *node; 165 166 /* 167 * Only entries from fake_common_samples should have a pair. 168 */ 169 if (hists__has(hists, need_collapse)) 170 root = &hists->entries_collapsed; 171 else 172 root = hists->entries_in; 173 174 node = rb_first_cached(root); 175 while (node) { 176 struct hist_entry *he; 177 178 he = rb_entry(node, struct hist_entry, rb_node_in); 179 180 if (hist_entry__has_pairs(he)) { 181 if (find_sample(fake_common_samples, 182 ARRAY_SIZE(fake_common_samples), 183 he->thread, he->ms.map, he->ms.sym)) { 184 count++; 185 } else { 186 pr_debug("Can't find the matched entry\n"); 187 return -1; 188 } 189 } 190 191 node = rb_next(node); 192 } 193 194 if (count != ARRAY_SIZE(fake_common_samples)) { 195 pr_debug("Invalid count for matched entries: %zd of %zd\n", 196 count, ARRAY_SIZE(fake_common_samples)); 197 return -1; 198 } 199 200 return 0; 201 } 202 203 static int validate_match(struct hists *leader, struct hists *other) 204 { 205 return __validate_match(leader) || __validate_match(other); 206 } 207 208 static int __validate_link(struct hists *hists, int idx) 209 { 210 size_t count = 0; 211 size_t count_pair = 0; 212 size_t count_dummy = 0; 213 struct rb_root_cached *root; 214 struct rb_node *node; 215 216 /* 217 * Leader hists (idx = 0) will have dummy entries from other, 218 * and some entries will have no pair. However every entry 219 * in other hists should have (dummy) pair. 220 */ 221 if (hists__has(hists, need_collapse)) 222 root = &hists->entries_collapsed; 223 else 224 root = hists->entries_in; 225 226 node = rb_first_cached(root); 227 while (node) { 228 struct hist_entry *he; 229 230 he = rb_entry(node, struct hist_entry, rb_node_in); 231 232 if (hist_entry__has_pairs(he)) { 233 if (!find_sample(fake_common_samples, 234 ARRAY_SIZE(fake_common_samples), 235 he->thread, he->ms.map, he->ms.sym) && 236 !find_sample(fake_samples[idx], 237 ARRAY_SIZE(fake_samples[idx]), 238 he->thread, he->ms.map, he->ms.sym)) { 239 count_dummy++; 240 } 241 count_pair++; 242 } else if (idx) { 243 pr_debug("A entry from the other hists should have pair\n"); 244 return -1; 245 } 246 247 count++; 248 node = rb_next(node); 249 } 250 251 /* 252 * Note that we have a entry collapsed in the other (idx = 1) hists. 253 */ 254 if (idx == 0) { 255 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) { 256 pr_debug("Invalid count of dummy entries: %zd of %zd\n", 257 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1); 258 return -1; 259 } 260 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) { 261 pr_debug("Invalid count of total leader entries: %zd of %zd\n", 262 count, count_pair + ARRAY_SIZE(fake_samples[0])); 263 return -1; 264 } 265 } else { 266 if (count != count_pair) { 267 pr_debug("Invalid count of total other entries: %zd of %zd\n", 268 count, count_pair); 269 return -1; 270 } 271 if (count_dummy > 0) { 272 pr_debug("Other hists should not have dummy entries: %zd\n", 273 count_dummy); 274 return -1; 275 } 276 } 277 278 return 0; 279 } 280 281 static int validate_link(struct hists *leader, struct hists *other) 282 { 283 return __validate_link(leader, 0) || __validate_link(other, 1); 284 } 285 286 static int test__hists_link(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 287 { 288 int err = -1; 289 struct hists *hists, *first_hists; 290 struct machines machines; 291 struct machine *machine = NULL; 292 struct evsel *evsel, *first; 293 struct evlist *evlist = evlist__new(); 294 295 if (evlist == NULL) 296 return -ENOMEM; 297 298 err = parse_event(evlist, "cpu-clock"); 299 if (err) 300 goto out; 301 err = parse_event(evlist, "task-clock"); 302 if (err) 303 goto out; 304 305 err = TEST_FAIL; 306 /* default sort order (comm,dso,sym) will be used */ 307 if (setup_sorting(NULL) < 0) 308 goto out; 309 310 machines__init(&machines); 311 312 /* setup threads/dso/map/symbols also */ 313 machine = setup_fake_machine(&machines); 314 if (!machine) 315 goto out; 316 317 if (verbose > 1) 318 machine__fprintf(machine, stderr); 319 320 /* process sample events */ 321 err = add_hist_entries(evlist, machine); 322 if (err < 0) 323 goto out; 324 325 evlist__for_each_entry(evlist, evsel) { 326 hists = evsel__hists(evsel); 327 hists__collapse_resort(hists, NULL); 328 329 if (verbose > 2) 330 print_hists_in(hists); 331 } 332 333 first = evlist__first(evlist); 334 evsel = evlist__last(evlist); 335 336 first_hists = evsel__hists(first); 337 hists = evsel__hists(evsel); 338 339 /* match common entries */ 340 hists__match(first_hists, hists); 341 err = validate_match(first_hists, hists); 342 if (err) 343 goto out; 344 345 /* link common and/or dummy entries */ 346 hists__link(first_hists, hists); 347 err = validate_link(first_hists, hists); 348 if (err) 349 goto out; 350 351 err = 0; 352 353 out: 354 /* tear down everything */ 355 evlist__delete(evlist); 356 reset_output_field(); 357 machines__exit(&machines); 358 put_fake_samples(); 359 360 return err; 361 } 362 363 DEFINE_SUITE("Match and link multiple hists", hists_link); 364