1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include <poll.h> 10 #include "cpumap.h" 11 #include "thread_map.h" 12 #include "evlist.h" 13 #include "evsel.h" 14 #include "util.h" 15 16 #include <sys/mman.h> 17 18 #include <linux/bitops.h> 19 #include <linux/hash.h> 20 21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 22 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 23 24 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 25 struct thread_map *threads) 26 { 27 int i; 28 29 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 30 INIT_HLIST_HEAD(&evlist->heads[i]); 31 INIT_LIST_HEAD(&evlist->entries); 32 perf_evlist__set_maps(evlist, cpus, threads); 33 } 34 35 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 36 struct thread_map *threads) 37 { 38 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 39 40 if (evlist != NULL) 41 perf_evlist__init(evlist, cpus, threads); 42 43 return evlist; 44 } 45 46 static void perf_evlist__purge(struct perf_evlist *evlist) 47 { 48 struct perf_evsel *pos, *n; 49 50 list_for_each_entry_safe(pos, n, &evlist->entries, node) { 51 list_del_init(&pos->node); 52 perf_evsel__delete(pos); 53 } 54 55 evlist->nr_entries = 0; 56 } 57 58 void perf_evlist__exit(struct perf_evlist *evlist) 59 { 60 free(evlist->mmap); 61 free(evlist->pollfd); 62 evlist->mmap = NULL; 63 evlist->pollfd = NULL; 64 } 65 66 void perf_evlist__delete(struct perf_evlist *evlist) 67 { 68 perf_evlist__purge(evlist); 69 perf_evlist__exit(evlist); 70 free(evlist); 71 } 72 73 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 74 { 75 list_add_tail(&entry->node, &evlist->entries); 76 ++evlist->nr_entries; 77 } 78 79 int perf_evlist__add_default(struct perf_evlist *evlist) 80 { 81 struct perf_event_attr attr = { 82 .type = PERF_TYPE_HARDWARE, 83 .config = PERF_COUNT_HW_CPU_CYCLES, 84 }; 85 struct perf_evsel *evsel = perf_evsel__new(&attr, 0); 86 87 if (evsel == NULL) 88 return -ENOMEM; 89 90 perf_evlist__add(evlist, evsel); 91 return 0; 92 } 93 94 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 95 { 96 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; 97 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 98 return evlist->pollfd != NULL ? 0 : -ENOMEM; 99 } 100 101 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 102 { 103 fcntl(fd, F_SETFL, O_NONBLOCK); 104 evlist->pollfd[evlist->nr_fds].fd = fd; 105 evlist->pollfd[evlist->nr_fds].events = POLLIN; 106 evlist->nr_fds++; 107 } 108 109 static void perf_evlist__id_hash(struct perf_evlist *evlist, 110 struct perf_evsel *evsel, 111 int cpu, int thread, u64 id) 112 { 113 int hash; 114 struct perf_sample_id *sid = SID(evsel, cpu, thread); 115 116 sid->id = id; 117 sid->evsel = evsel; 118 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 119 hlist_add_head(&sid->node, &evlist->heads[hash]); 120 } 121 122 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 123 int cpu, int thread, u64 id) 124 { 125 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 126 evsel->id[evsel->ids++] = id; 127 } 128 129 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 130 struct perf_evsel *evsel, 131 int cpu, int thread, int fd) 132 { 133 u64 read_data[4] = { 0, }; 134 int id_idx = 1; /* The first entry is the counter value */ 135 136 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 137 read(fd, &read_data, sizeof(read_data)) == -1) 138 return -1; 139 140 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 141 ++id_idx; 142 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 143 ++id_idx; 144 145 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); 146 return 0; 147 } 148 149 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 150 { 151 struct hlist_head *head; 152 struct hlist_node *pos; 153 struct perf_sample_id *sid; 154 int hash; 155 156 if (evlist->nr_entries == 1) 157 return list_entry(evlist->entries.next, struct perf_evsel, node); 158 159 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 160 head = &evlist->heads[hash]; 161 162 hlist_for_each_entry(sid, pos, head, node) 163 if (sid->id == id) 164 return sid->evsel; 165 return NULL; 166 } 167 168 union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) 169 { 170 /* XXX Move this to perf.c, making it generally available */ 171 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 172 struct perf_mmap *md = &evlist->mmap[cpu]; 173 unsigned int head = perf_mmap__read_head(md); 174 unsigned int old = md->prev; 175 unsigned char *data = md->base + page_size; 176 union perf_event *event = NULL; 177 178 if (evlist->overwrite) { 179 /* 180 * If we're further behind than half the buffer, there's a chance 181 * the writer will bite our tail and mess up the samples under us. 182 * 183 * If we somehow ended up ahead of the head, we got messed up. 184 * 185 * In either case, truncate and restart at head. 186 */ 187 int diff = head - old; 188 if (diff > md->mask / 2 || diff < 0) { 189 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 190 191 /* 192 * head points to a known good entry, start there. 193 */ 194 old = head; 195 } 196 } 197 198 if (old != head) { 199 size_t size; 200 201 event = (union perf_event *)&data[old & md->mask]; 202 size = event->header.size; 203 204 /* 205 * Event straddles the mmap boundary -- header should always 206 * be inside due to u64 alignment of output. 207 */ 208 if ((old & md->mask) + size != ((old + size) & md->mask)) { 209 unsigned int offset = old; 210 unsigned int len = min(sizeof(*event), size), cpy; 211 void *dst = &evlist->event_copy; 212 213 do { 214 cpy = min(md->mask + 1 - (offset & md->mask), len); 215 memcpy(dst, &data[offset & md->mask], cpy); 216 offset += cpy; 217 dst += cpy; 218 len -= cpy; 219 } while (len); 220 221 event = &evlist->event_copy; 222 } 223 224 old += size; 225 } 226 227 md->prev = old; 228 229 if (!evlist->overwrite) 230 perf_mmap__write_tail(md, old); 231 232 return event; 233 } 234 235 void perf_evlist__munmap(struct perf_evlist *evlist) 236 { 237 int cpu; 238 239 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 240 if (evlist->mmap[cpu].base != NULL) { 241 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 242 evlist->mmap[cpu].base = NULL; 243 } 244 } 245 } 246 247 int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 248 { 249 evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); 250 return evlist->mmap != NULL ? 0 : -ENOMEM; 251 } 252 253 static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, 254 int mask, int fd) 255 { 256 evlist->mmap[cpu].prev = 0; 257 evlist->mmap[cpu].mask = mask; 258 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, 259 MAP_SHARED, fd, 0); 260 if (evlist->mmap[cpu].base == MAP_FAILED) 261 return -1; 262 263 perf_evlist__add_pollfd(evlist, fd); 264 return 0; 265 } 266 267 /** perf_evlist__mmap - Create per cpu maps to receive events 268 * 269 * @evlist - list of events 270 * @pages - map length in pages 271 * @overwrite - overwrite older events? 272 * 273 * If overwrite is false the user needs to signal event consuption using: 274 * 275 * struct perf_mmap *m = &evlist->mmap[cpu]; 276 * unsigned int head = perf_mmap__read_head(m); 277 * 278 * perf_mmap__write_tail(m, head) 279 * 280 * Using perf_evlist__read_on_cpu does this automatically. 281 */ 282 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) 283 { 284 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 285 int mask = pages * page_size - 1, cpu; 286 struct perf_evsel *first_evsel, *evsel; 287 const struct cpu_map *cpus = evlist->cpus; 288 const struct thread_map *threads = evlist->threads; 289 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); 290 291 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 292 return -ENOMEM; 293 294 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 295 return -ENOMEM; 296 297 evlist->overwrite = overwrite; 298 evlist->mmap_len = (pages + 1) * page_size; 299 first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); 300 301 list_for_each_entry(evsel, &evlist->entries, node) { 302 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 303 evsel->sample_id == NULL && 304 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) 305 return -ENOMEM; 306 307 for (cpu = 0; cpu < cpus->nr; cpu++) { 308 for (thread = 0; thread < threads->nr; thread++) { 309 int fd = FD(evsel, cpu, thread); 310 311 if (evsel->idx || thread) { 312 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, 313 FD(first_evsel, cpu, 0)) != 0) 314 goto out_unmap; 315 } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0) 316 goto out_unmap; 317 318 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 319 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 320 goto out_unmap; 321 } 322 } 323 } 324 325 return 0; 326 327 out_unmap: 328 for (cpu = 0; cpu < cpus->nr; cpu++) { 329 if (evlist->mmap[cpu].base != NULL) { 330 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 331 evlist->mmap[cpu].base = NULL; 332 } 333 } 334 return -1; 335 } 336 337 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, 338 pid_t target_tid, const char *cpu_list) 339 { 340 evlist->threads = thread_map__new(target_pid, target_tid); 341 342 if (evlist->threads == NULL) 343 return -1; 344 345 if (target_tid != -1) 346 evlist->cpus = cpu_map__dummy_new(); 347 else 348 evlist->cpus = cpu_map__new(cpu_list); 349 350 if (evlist->cpus == NULL) 351 goto out_delete_threads; 352 353 return 0; 354 355 out_delete_threads: 356 thread_map__delete(evlist->threads); 357 return -1; 358 } 359 360 void perf_evlist__delete_maps(struct perf_evlist *evlist) 361 { 362 cpu_map__delete(evlist->cpus); 363 thread_map__delete(evlist->threads); 364 evlist->cpus = NULL; 365 evlist->threads = NULL; 366 } 367 368 int perf_evlist__set_filters(struct perf_evlist *evlist) 369 { 370 const struct thread_map *threads = evlist->threads; 371 const struct cpu_map *cpus = evlist->cpus; 372 struct perf_evsel *evsel; 373 char *filter; 374 int thread; 375 int cpu; 376 int err; 377 int fd; 378 379 list_for_each_entry(evsel, &evlist->entries, node) { 380 filter = evsel->filter; 381 if (!filter) 382 continue; 383 for (cpu = 0; cpu < cpus->nr; cpu++) { 384 for (thread = 0; thread < threads->nr; thread++) { 385 fd = FD(evsel, cpu, thread); 386 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); 387 if (err) 388 return err; 389 } 390 } 391 } 392 393 return 0; 394 } 395