1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include <poll.h> 10 #include "cpumap.h" 11 #include "thread_map.h" 12 #include "evlist.h" 13 #include "evsel.h" 14 #include "util.h" 15 16 #include <sys/mman.h> 17 18 #include <linux/bitops.h> 19 #include <linux/hash.h> 20 21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 22 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 23 24 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 25 struct thread_map *threads) 26 { 27 int i; 28 29 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 30 INIT_HLIST_HEAD(&evlist->heads[i]); 31 INIT_LIST_HEAD(&evlist->entries); 32 perf_evlist__set_maps(evlist, cpus, threads); 33 } 34 35 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 36 struct thread_map *threads) 37 { 38 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 39 40 if (evlist != NULL) 41 perf_evlist__init(evlist, cpus, threads); 42 43 return evlist; 44 } 45 46 static void perf_evlist__purge(struct perf_evlist *evlist) 47 { 48 struct perf_evsel *pos, *n; 49 50 list_for_each_entry_safe(pos, n, &evlist->entries, node) { 51 list_del_init(&pos->node); 52 perf_evsel__delete(pos); 53 } 54 55 evlist->nr_entries = 0; 56 } 57 58 void perf_evlist__exit(struct perf_evlist *evlist) 59 { 60 free(evlist->mmap); 61 free(evlist->pollfd); 62 evlist->mmap = NULL; 63 evlist->pollfd = NULL; 64 } 65 66 void perf_evlist__delete(struct perf_evlist *evlist) 67 { 68 perf_evlist__purge(evlist); 69 perf_evlist__exit(evlist); 70 free(evlist); 71 } 72 73 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 74 { 75 list_add_tail(&entry->node, &evlist->entries); 76 ++evlist->nr_entries; 77 } 78 79 int perf_evlist__add_default(struct perf_evlist *evlist) 80 { 81 struct perf_event_attr attr = { 82 .type = PERF_TYPE_HARDWARE, 83 .config = PERF_COUNT_HW_CPU_CYCLES, 84 }; 85 struct perf_evsel *evsel = perf_evsel__new(&attr, 0); 86 87 if (evsel == NULL) 88 goto error; 89 90 /* use strdup() because free(evsel) assumes name is allocated */ 91 evsel->name = strdup("cycles"); 92 if (!evsel->name) 93 goto error_free; 94 95 perf_evlist__add(evlist, evsel); 96 return 0; 97 error_free: 98 perf_evsel__delete(evsel); 99 error: 100 return -ENOMEM; 101 } 102 103 void perf_evlist__disable(struct perf_evlist *evlist) 104 { 105 int cpu, thread; 106 struct perf_evsel *pos; 107 108 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 109 list_for_each_entry(pos, &evlist->entries, node) { 110 for (thread = 0; thread < evlist->threads->nr; thread++) 111 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); 112 } 113 } 114 } 115 116 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 117 { 118 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; 119 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 120 return evlist->pollfd != NULL ? 0 : -ENOMEM; 121 } 122 123 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 124 { 125 fcntl(fd, F_SETFL, O_NONBLOCK); 126 evlist->pollfd[evlist->nr_fds].fd = fd; 127 evlist->pollfd[evlist->nr_fds].events = POLLIN; 128 evlist->nr_fds++; 129 } 130 131 static void perf_evlist__id_hash(struct perf_evlist *evlist, 132 struct perf_evsel *evsel, 133 int cpu, int thread, u64 id) 134 { 135 int hash; 136 struct perf_sample_id *sid = SID(evsel, cpu, thread); 137 138 sid->id = id; 139 sid->evsel = evsel; 140 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 141 hlist_add_head(&sid->node, &evlist->heads[hash]); 142 } 143 144 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 145 int cpu, int thread, u64 id) 146 { 147 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 148 evsel->id[evsel->ids++] = id; 149 } 150 151 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 152 struct perf_evsel *evsel, 153 int cpu, int thread, int fd) 154 { 155 u64 read_data[4] = { 0, }; 156 int id_idx = 1; /* The first entry is the counter value */ 157 158 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 159 read(fd, &read_data, sizeof(read_data)) == -1) 160 return -1; 161 162 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 163 ++id_idx; 164 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 165 ++id_idx; 166 167 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); 168 return 0; 169 } 170 171 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 172 { 173 struct hlist_head *head; 174 struct hlist_node *pos; 175 struct perf_sample_id *sid; 176 int hash; 177 178 if (evlist->nr_entries == 1) 179 return list_entry(evlist->entries.next, struct perf_evsel, node); 180 181 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 182 head = &evlist->heads[hash]; 183 184 hlist_for_each_entry(sid, pos, head, node) 185 if (sid->id == id) 186 return sid->evsel; 187 return NULL; 188 } 189 190 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 191 { 192 /* XXX Move this to perf.c, making it generally available */ 193 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 194 struct perf_mmap *md = &evlist->mmap[idx]; 195 unsigned int head = perf_mmap__read_head(md); 196 unsigned int old = md->prev; 197 unsigned char *data = md->base + page_size; 198 union perf_event *event = NULL; 199 200 if (evlist->overwrite) { 201 /* 202 * If we're further behind than half the buffer, there's a chance 203 * the writer will bite our tail and mess up the samples under us. 204 * 205 * If we somehow ended up ahead of the head, we got messed up. 206 * 207 * In either case, truncate and restart at head. 208 */ 209 int diff = head - old; 210 if (diff > md->mask / 2 || diff < 0) { 211 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 212 213 /* 214 * head points to a known good entry, start there. 215 */ 216 old = head; 217 } 218 } 219 220 if (old != head) { 221 size_t size; 222 223 event = (union perf_event *)&data[old & md->mask]; 224 size = event->header.size; 225 226 /* 227 * Event straddles the mmap boundary -- header should always 228 * be inside due to u64 alignment of output. 229 */ 230 if ((old & md->mask) + size != ((old + size) & md->mask)) { 231 unsigned int offset = old; 232 unsigned int len = min(sizeof(*event), size), cpy; 233 void *dst = &evlist->event_copy; 234 235 do { 236 cpy = min(md->mask + 1 - (offset & md->mask), len); 237 memcpy(dst, &data[offset & md->mask], cpy); 238 offset += cpy; 239 dst += cpy; 240 len -= cpy; 241 } while (len); 242 243 event = &evlist->event_copy; 244 } 245 246 old += size; 247 } 248 249 md->prev = old; 250 251 if (!evlist->overwrite) 252 perf_mmap__write_tail(md, old); 253 254 return event; 255 } 256 257 void perf_evlist__munmap(struct perf_evlist *evlist) 258 { 259 int i; 260 261 for (i = 0; i < evlist->nr_mmaps; i++) { 262 if (evlist->mmap[i].base != NULL) { 263 munmap(evlist->mmap[i].base, evlist->mmap_len); 264 evlist->mmap[i].base = NULL; 265 } 266 } 267 268 free(evlist->mmap); 269 evlist->mmap = NULL; 270 } 271 272 int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 273 { 274 evlist->nr_mmaps = evlist->cpus->nr; 275 if (evlist->cpus->map[0] == -1) 276 evlist->nr_mmaps = evlist->threads->nr; 277 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 278 return evlist->mmap != NULL ? 0 : -ENOMEM; 279 } 280 281 static int __perf_evlist__mmap(struct perf_evlist *evlist, 282 int idx, int prot, int mask, int fd) 283 { 284 evlist->mmap[idx].prev = 0; 285 evlist->mmap[idx].mask = mask; 286 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 287 MAP_SHARED, fd, 0); 288 if (evlist->mmap[idx].base == MAP_FAILED) 289 return -1; 290 291 perf_evlist__add_pollfd(evlist, fd); 292 return 0; 293 } 294 295 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 296 { 297 struct perf_evsel *evsel; 298 int cpu, thread; 299 300 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 301 int output = -1; 302 303 for (thread = 0; thread < evlist->threads->nr; thread++) { 304 list_for_each_entry(evsel, &evlist->entries, node) { 305 int fd = FD(evsel, cpu, thread); 306 307 if (output == -1) { 308 output = fd; 309 if (__perf_evlist__mmap(evlist, cpu, 310 prot, mask, output) < 0) 311 goto out_unmap; 312 } else { 313 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 314 goto out_unmap; 315 } 316 317 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 318 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 319 goto out_unmap; 320 } 321 } 322 } 323 324 return 0; 325 326 out_unmap: 327 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 328 if (evlist->mmap[cpu].base != NULL) { 329 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 330 evlist->mmap[cpu].base = NULL; 331 } 332 } 333 return -1; 334 } 335 336 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 337 { 338 struct perf_evsel *evsel; 339 int thread; 340 341 for (thread = 0; thread < evlist->threads->nr; thread++) { 342 int output = -1; 343 344 list_for_each_entry(evsel, &evlist->entries, node) { 345 int fd = FD(evsel, 0, thread); 346 347 if (output == -1) { 348 output = fd; 349 if (__perf_evlist__mmap(evlist, thread, 350 prot, mask, output) < 0) 351 goto out_unmap; 352 } else { 353 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 354 goto out_unmap; 355 } 356 357 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 358 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) 359 goto out_unmap; 360 } 361 } 362 363 return 0; 364 365 out_unmap: 366 for (thread = 0; thread < evlist->threads->nr; thread++) { 367 if (evlist->mmap[thread].base != NULL) { 368 munmap(evlist->mmap[thread].base, evlist->mmap_len); 369 evlist->mmap[thread].base = NULL; 370 } 371 } 372 return -1; 373 } 374 375 /** perf_evlist__mmap - Create per cpu maps to receive events 376 * 377 * @evlist - list of events 378 * @pages - map length in pages 379 * @overwrite - overwrite older events? 380 * 381 * If overwrite is false the user needs to signal event consuption using: 382 * 383 * struct perf_mmap *m = &evlist->mmap[cpu]; 384 * unsigned int head = perf_mmap__read_head(m); 385 * 386 * perf_mmap__write_tail(m, head) 387 * 388 * Using perf_evlist__read_on_cpu does this automatically. 389 */ 390 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) 391 { 392 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 393 int mask = pages * page_size - 1; 394 struct perf_evsel *evsel; 395 const struct cpu_map *cpus = evlist->cpus; 396 const struct thread_map *threads = evlist->threads; 397 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); 398 399 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 400 return -ENOMEM; 401 402 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 403 return -ENOMEM; 404 405 evlist->overwrite = overwrite; 406 evlist->mmap_len = (pages + 1) * page_size; 407 408 list_for_each_entry(evsel, &evlist->entries, node) { 409 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 410 evsel->sample_id == NULL && 411 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) 412 return -ENOMEM; 413 } 414 415 if (evlist->cpus->map[0] == -1) 416 return perf_evlist__mmap_per_thread(evlist, prot, mask); 417 418 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 419 } 420 421 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, 422 pid_t target_tid, const char *cpu_list) 423 { 424 evlist->threads = thread_map__new(target_pid, target_tid); 425 426 if (evlist->threads == NULL) 427 return -1; 428 429 if (cpu_list == NULL && target_tid != -1) 430 evlist->cpus = cpu_map__dummy_new(); 431 else 432 evlist->cpus = cpu_map__new(cpu_list); 433 434 if (evlist->cpus == NULL) 435 goto out_delete_threads; 436 437 return 0; 438 439 out_delete_threads: 440 thread_map__delete(evlist->threads); 441 return -1; 442 } 443 444 void perf_evlist__delete_maps(struct perf_evlist *evlist) 445 { 446 cpu_map__delete(evlist->cpus); 447 thread_map__delete(evlist->threads); 448 evlist->cpus = NULL; 449 evlist->threads = NULL; 450 } 451 452 int perf_evlist__set_filters(struct perf_evlist *evlist) 453 { 454 const struct thread_map *threads = evlist->threads; 455 const struct cpu_map *cpus = evlist->cpus; 456 struct perf_evsel *evsel; 457 char *filter; 458 int thread; 459 int cpu; 460 int err; 461 int fd; 462 463 list_for_each_entry(evsel, &evlist->entries, node) { 464 filter = evsel->filter; 465 if (!filter) 466 continue; 467 for (cpu = 0; cpu < cpus->nr; cpu++) { 468 for (thread = 0; thread < threads->nr; thread++) { 469 fd = FD(evsel, cpu, thread); 470 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); 471 if (err) 472 return err; 473 } 474 } 475 } 476 477 return 0; 478 } 479 480 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist) 481 { 482 struct perf_evsel *pos, *first; 483 484 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); 485 486 list_for_each_entry_continue(pos, &evlist->entries, node) { 487 if (first->attr.sample_type != pos->attr.sample_type) 488 return false; 489 } 490 491 return true; 492 } 493 494 u64 perf_evlist__sample_type(const struct perf_evlist *evlist) 495 { 496 struct perf_evsel *first; 497 498 first = list_entry(evlist->entries.next, struct perf_evsel, node); 499 return first->attr.sample_type; 500 } 501 502 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) 503 { 504 struct perf_evsel *pos, *first; 505 506 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); 507 508 list_for_each_entry_continue(pos, &evlist->entries, node) { 509 if (first->attr.sample_id_all != pos->attr.sample_id_all) 510 return false; 511 } 512 513 return true; 514 } 515 516 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist) 517 { 518 struct perf_evsel *first; 519 520 first = list_entry(evlist->entries.next, struct perf_evsel, node); 521 return first->attr.sample_id_all; 522 } 523