1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <byteswap.h> 11 #include <linux/bitops.h> 12 #include "asm/bug.h" 13 #include "debugfs.h" 14 #include "event-parse.h" 15 #include "evsel.h" 16 #include "evlist.h" 17 #include "util.h" 18 #include "cpumap.h" 19 #include "thread_map.h" 20 #include "target.h" 21 #include <linux/hw_breakpoint.h> 22 #include <linux/perf_event.h> 23 #include "perf_regs.h" 24 25 static struct { 26 bool sample_id_all; 27 bool exclude_guest; 28 } perf_missing_features; 29 30 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 31 32 static int __perf_evsel__sample_size(u64 sample_type) 33 { 34 u64 mask = sample_type & PERF_SAMPLE_MASK; 35 int size = 0; 36 int i; 37 38 for (i = 0; i < 64; i++) { 39 if (mask & (1ULL << i)) 40 size++; 41 } 42 43 size *= sizeof(u64); 44 45 return size; 46 } 47 48 void hists__init(struct hists *hists) 49 { 50 memset(hists, 0, sizeof(*hists)); 51 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; 52 hists->entries_in = &hists->entries_in_array[0]; 53 hists->entries_collapsed = RB_ROOT; 54 hists->entries = RB_ROOT; 55 pthread_mutex_init(&hists->lock, NULL); 56 } 57 58 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, 59 enum perf_event_sample_format bit) 60 { 61 if (!(evsel->attr.sample_type & bit)) { 62 evsel->attr.sample_type |= bit; 63 evsel->sample_size += sizeof(u64); 64 } 65 } 66 67 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, 68 enum perf_event_sample_format bit) 69 { 70 if (evsel->attr.sample_type & bit) { 71 evsel->attr.sample_type &= ~bit; 72 evsel->sample_size -= sizeof(u64); 73 } 74 } 75 76 void perf_evsel__set_sample_id(struct perf_evsel *evsel) 77 { 78 perf_evsel__set_sample_bit(evsel, ID); 79 evsel->attr.read_format |= PERF_FORMAT_ID; 80 } 81 82 void perf_evsel__init(struct perf_evsel *evsel, 83 struct perf_event_attr *attr, int idx) 84 { 85 evsel->idx = idx; 86 evsel->attr = *attr; 87 evsel->leader = evsel; 88 INIT_LIST_HEAD(&evsel->node); 89 hists__init(&evsel->hists); 90 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); 91 } 92 93 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) 94 { 95 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 96 97 if (evsel != NULL) 98 perf_evsel__init(evsel, attr, idx); 99 100 return evsel; 101 } 102 103 struct event_format *event_format__new(const char *sys, const char *name) 104 { 105 int fd, n; 106 char *filename; 107 void *bf = NULL, *nbf; 108 size_t size = 0, alloc_size = 0; 109 struct event_format *format = NULL; 110 111 if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0) 112 goto out; 113 114 fd = open(filename, O_RDONLY); 115 if (fd < 0) 116 goto out_free_filename; 117 118 do { 119 if (size == alloc_size) { 120 alloc_size += BUFSIZ; 121 nbf = realloc(bf, alloc_size); 122 if (nbf == NULL) 123 goto out_free_bf; 124 bf = nbf; 125 } 126 127 n = read(fd, bf + size, BUFSIZ); 128 if (n < 0) 129 goto out_free_bf; 130 size += n; 131 } while (n > 0); 132 133 pevent_parse_format(&format, bf, size, sys); 134 135 out_free_bf: 136 free(bf); 137 close(fd); 138 out_free_filename: 139 free(filename); 140 out: 141 return format; 142 } 143 144 struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) 145 { 146 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 147 148 if (evsel != NULL) { 149 struct perf_event_attr attr = { 150 .type = PERF_TYPE_TRACEPOINT, 151 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 152 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), 153 }; 154 155 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) 156 goto out_free; 157 158 evsel->tp_format = event_format__new(sys, name); 159 if (evsel->tp_format == NULL) 160 goto out_free; 161 162 event_attr_init(&attr); 163 attr.config = evsel->tp_format->id; 164 attr.sample_period = 1; 165 perf_evsel__init(evsel, &attr, idx); 166 } 167 168 return evsel; 169 170 out_free: 171 free(evsel->name); 172 free(evsel); 173 return NULL; 174 } 175 176 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { 177 "cycles", 178 "instructions", 179 "cache-references", 180 "cache-misses", 181 "branches", 182 "branch-misses", 183 "bus-cycles", 184 "stalled-cycles-frontend", 185 "stalled-cycles-backend", 186 "ref-cycles", 187 }; 188 189 static const char *__perf_evsel__hw_name(u64 config) 190 { 191 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) 192 return perf_evsel__hw_names[config]; 193 194 return "unknown-hardware"; 195 } 196 197 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) 198 { 199 int colon = 0, r = 0; 200 struct perf_event_attr *attr = &evsel->attr; 201 bool exclude_guest_default = false; 202 203 #define MOD_PRINT(context, mod) do { \ 204 if (!attr->exclude_##context) { \ 205 if (!colon) colon = ++r; \ 206 r += scnprintf(bf + r, size - r, "%c", mod); \ 207 } } while(0) 208 209 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { 210 MOD_PRINT(kernel, 'k'); 211 MOD_PRINT(user, 'u'); 212 MOD_PRINT(hv, 'h'); 213 exclude_guest_default = true; 214 } 215 216 if (attr->precise_ip) { 217 if (!colon) 218 colon = ++r; 219 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); 220 exclude_guest_default = true; 221 } 222 223 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { 224 MOD_PRINT(host, 'H'); 225 MOD_PRINT(guest, 'G'); 226 } 227 #undef MOD_PRINT 228 if (colon) 229 bf[colon - 1] = ':'; 230 return r; 231 } 232 233 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) 234 { 235 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); 236 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 237 } 238 239 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { 240 "cpu-clock", 241 "task-clock", 242 "page-faults", 243 "context-switches", 244 "cpu-migrations", 245 "minor-faults", 246 "major-faults", 247 "alignment-faults", 248 "emulation-faults", 249 }; 250 251 static const char *__perf_evsel__sw_name(u64 config) 252 { 253 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) 254 return perf_evsel__sw_names[config]; 255 return "unknown-software"; 256 } 257 258 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) 259 { 260 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); 261 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 262 } 263 264 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) 265 { 266 int r; 267 268 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); 269 270 if (type & HW_BREAKPOINT_R) 271 r += scnprintf(bf + r, size - r, "r"); 272 273 if (type & HW_BREAKPOINT_W) 274 r += scnprintf(bf + r, size - r, "w"); 275 276 if (type & HW_BREAKPOINT_X) 277 r += scnprintf(bf + r, size - r, "x"); 278 279 return r; 280 } 281 282 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) 283 { 284 struct perf_event_attr *attr = &evsel->attr; 285 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); 286 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); 287 } 288 289 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] 290 [PERF_EVSEL__MAX_ALIASES] = { 291 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 292 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 293 { "LLC", "L2", }, 294 { "dTLB", "d-tlb", "Data-TLB", }, 295 { "iTLB", "i-tlb", "Instruction-TLB", }, 296 { "branch", "branches", "bpu", "btb", "bpc", }, 297 { "node", }, 298 }; 299 300 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] 301 [PERF_EVSEL__MAX_ALIASES] = { 302 { "load", "loads", "read", }, 303 { "store", "stores", "write", }, 304 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 305 }; 306 307 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] 308 [PERF_EVSEL__MAX_ALIASES] = { 309 { "refs", "Reference", "ops", "access", }, 310 { "misses", "miss", }, 311 }; 312 313 #define C(x) PERF_COUNT_HW_CACHE_##x 314 #define CACHE_READ (1 << C(OP_READ)) 315 #define CACHE_WRITE (1 << C(OP_WRITE)) 316 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 317 #define COP(x) (1 << x) 318 319 /* 320 * cache operartion stat 321 * L1I : Read and prefetch only 322 * ITLB and BPU : Read-only 323 */ 324 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { 325 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 326 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 327 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 328 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 329 [C(ITLB)] = (CACHE_READ), 330 [C(BPU)] = (CACHE_READ), 331 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 332 }; 333 334 bool perf_evsel__is_cache_op_valid(u8 type, u8 op) 335 { 336 if (perf_evsel__hw_cache_stat[type] & COP(op)) 337 return true; /* valid */ 338 else 339 return false; /* invalid */ 340 } 341 342 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, 343 char *bf, size_t size) 344 { 345 if (result) { 346 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], 347 perf_evsel__hw_cache_op[op][0], 348 perf_evsel__hw_cache_result[result][0]); 349 } 350 351 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], 352 perf_evsel__hw_cache_op[op][1]); 353 } 354 355 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) 356 { 357 u8 op, result, type = (config >> 0) & 0xff; 358 const char *err = "unknown-ext-hardware-cache-type"; 359 360 if (type > PERF_COUNT_HW_CACHE_MAX) 361 goto out_err; 362 363 op = (config >> 8) & 0xff; 364 err = "unknown-ext-hardware-cache-op"; 365 if (op > PERF_COUNT_HW_CACHE_OP_MAX) 366 goto out_err; 367 368 result = (config >> 16) & 0xff; 369 err = "unknown-ext-hardware-cache-result"; 370 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX) 371 goto out_err; 372 373 err = "invalid-cache"; 374 if (!perf_evsel__is_cache_op_valid(type, op)) 375 goto out_err; 376 377 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); 378 out_err: 379 return scnprintf(bf, size, "%s", err); 380 } 381 382 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) 383 { 384 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); 385 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); 386 } 387 388 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) 389 { 390 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); 391 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); 392 } 393 394 const char *perf_evsel__name(struct perf_evsel *evsel) 395 { 396 char bf[128]; 397 398 if (evsel->name) 399 return evsel->name; 400 401 switch (evsel->attr.type) { 402 case PERF_TYPE_RAW: 403 perf_evsel__raw_name(evsel, bf, sizeof(bf)); 404 break; 405 406 case PERF_TYPE_HARDWARE: 407 perf_evsel__hw_name(evsel, bf, sizeof(bf)); 408 break; 409 410 case PERF_TYPE_HW_CACHE: 411 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); 412 break; 413 414 case PERF_TYPE_SOFTWARE: 415 perf_evsel__sw_name(evsel, bf, sizeof(bf)); 416 break; 417 418 case PERF_TYPE_TRACEPOINT: 419 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); 420 break; 421 422 case PERF_TYPE_BREAKPOINT: 423 perf_evsel__bp_name(evsel, bf, sizeof(bf)); 424 break; 425 426 default: 427 scnprintf(bf, sizeof(bf), "unknown attr type: %d", 428 evsel->attr.type); 429 break; 430 } 431 432 evsel->name = strdup(bf); 433 434 return evsel->name ?: "unknown"; 435 } 436 437 const char *perf_evsel__group_name(struct perf_evsel *evsel) 438 { 439 return evsel->group_name ?: "anon group"; 440 } 441 442 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) 443 { 444 int ret; 445 struct perf_evsel *pos; 446 const char *group_name = perf_evsel__group_name(evsel); 447 448 ret = scnprintf(buf, size, "%s", group_name); 449 450 ret += scnprintf(buf + ret, size - ret, " { %s", 451 perf_evsel__name(evsel)); 452 453 for_each_group_member(pos, evsel) 454 ret += scnprintf(buf + ret, size - ret, ", %s", 455 perf_evsel__name(pos)); 456 457 ret += scnprintf(buf + ret, size - ret, " }"); 458 459 return ret; 460 } 461 462 /* 463 * The enable_on_exec/disabled value strategy: 464 * 465 * 1) For any type of traced program: 466 * - all independent events and group leaders are disabled 467 * - all group members are enabled 468 * 469 * Group members are ruled by group leaders. They need to 470 * be enabled, because the group scheduling relies on that. 471 * 472 * 2) For traced programs executed by perf: 473 * - all independent events and group leaders have 474 * enable_on_exec set 475 * - we don't specifically enable or disable any event during 476 * the record command 477 * 478 * Independent events and group leaders are initially disabled 479 * and get enabled by exec. Group members are ruled by group 480 * leaders as stated in 1). 481 * 482 * 3) For traced programs attached by perf (pid/tid): 483 * - we specifically enable or disable all events during 484 * the record command 485 * 486 * When attaching events to already running traced we 487 * enable/disable events specifically, as there's no 488 * initial traced exec call. 489 */ 490 void perf_evsel__config(struct perf_evsel *evsel, 491 struct perf_record_opts *opts) 492 { 493 struct perf_event_attr *attr = &evsel->attr; 494 int track = !evsel->idx; /* only the first counter needs these */ 495 496 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; 497 attr->inherit = !opts->no_inherit; 498 499 perf_evsel__set_sample_bit(evsel, IP); 500 perf_evsel__set_sample_bit(evsel, TID); 501 502 /* 503 * We default some events to a 1 default interval. But keep 504 * it a weak assumption overridable by the user. 505 */ 506 if (!attr->sample_period || (opts->user_freq != UINT_MAX && 507 opts->user_interval != ULLONG_MAX)) { 508 if (opts->freq) { 509 perf_evsel__set_sample_bit(evsel, PERIOD); 510 attr->freq = 1; 511 attr->sample_freq = opts->freq; 512 } else { 513 attr->sample_period = opts->default_interval; 514 } 515 } 516 517 if (opts->no_samples) 518 attr->sample_freq = 0; 519 520 if (opts->inherit_stat) 521 attr->inherit_stat = 1; 522 523 if (opts->sample_address) { 524 perf_evsel__set_sample_bit(evsel, ADDR); 525 attr->mmap_data = track; 526 } 527 528 if (opts->call_graph) { 529 perf_evsel__set_sample_bit(evsel, CALLCHAIN); 530 531 if (opts->call_graph == CALLCHAIN_DWARF) { 532 perf_evsel__set_sample_bit(evsel, REGS_USER); 533 perf_evsel__set_sample_bit(evsel, STACK_USER); 534 attr->sample_regs_user = PERF_REGS_MASK; 535 attr->sample_stack_user = opts->stack_dump_size; 536 attr->exclude_callchain_user = 1; 537 } 538 } 539 540 if (perf_target__has_cpu(&opts->target)) 541 perf_evsel__set_sample_bit(evsel, CPU); 542 543 if (opts->period) 544 perf_evsel__set_sample_bit(evsel, PERIOD); 545 546 if (!perf_missing_features.sample_id_all && 547 (opts->sample_time || !opts->no_inherit || 548 perf_target__has_cpu(&opts->target))) 549 perf_evsel__set_sample_bit(evsel, TIME); 550 551 if (opts->raw_samples) { 552 perf_evsel__set_sample_bit(evsel, TIME); 553 perf_evsel__set_sample_bit(evsel, RAW); 554 perf_evsel__set_sample_bit(evsel, CPU); 555 } 556 557 if (opts->no_delay) { 558 attr->watermark = 0; 559 attr->wakeup_events = 1; 560 } 561 if (opts->branch_stack) { 562 perf_evsel__set_sample_bit(evsel, BRANCH_STACK); 563 attr->branch_sample_type = opts->branch_stack; 564 } 565 566 attr->mmap = track; 567 attr->comm = track; 568 569 /* 570 * XXX see the function comment above 571 * 572 * Disabling only independent events or group leaders, 573 * keeping group members enabled. 574 */ 575 if (perf_evsel__is_group_leader(evsel)) 576 attr->disabled = 1; 577 578 /* 579 * Setting enable_on_exec for independent events and 580 * group leaders for traced executed by perf. 581 */ 582 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) 583 attr->enable_on_exec = 1; 584 } 585 586 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 587 { 588 int cpu, thread; 589 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); 590 591 if (evsel->fd) { 592 for (cpu = 0; cpu < ncpus; cpu++) { 593 for (thread = 0; thread < nthreads; thread++) { 594 FD(evsel, cpu, thread) = -1; 595 } 596 } 597 } 598 599 return evsel->fd != NULL ? 0 : -ENOMEM; 600 } 601 602 int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, 603 const char *filter) 604 { 605 int cpu, thread; 606 607 for (cpu = 0; cpu < ncpus; cpu++) { 608 for (thread = 0; thread < nthreads; thread++) { 609 int fd = FD(evsel, cpu, thread), 610 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); 611 612 if (err) 613 return err; 614 } 615 } 616 617 return 0; 618 } 619 620 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) 621 { 622 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); 623 if (evsel->sample_id == NULL) 624 return -ENOMEM; 625 626 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); 627 if (evsel->id == NULL) { 628 xyarray__delete(evsel->sample_id); 629 evsel->sample_id = NULL; 630 return -ENOMEM; 631 } 632 633 return 0; 634 } 635 636 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) 637 { 638 evsel->counts = zalloc((sizeof(*evsel->counts) + 639 (ncpus * sizeof(struct perf_counts_values)))); 640 return evsel->counts != NULL ? 0 : -ENOMEM; 641 } 642 643 void perf_evsel__free_fd(struct perf_evsel *evsel) 644 { 645 xyarray__delete(evsel->fd); 646 evsel->fd = NULL; 647 } 648 649 void perf_evsel__free_id(struct perf_evsel *evsel) 650 { 651 xyarray__delete(evsel->sample_id); 652 evsel->sample_id = NULL; 653 free(evsel->id); 654 evsel->id = NULL; 655 } 656 657 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) 658 { 659 int cpu, thread; 660 661 for (cpu = 0; cpu < ncpus; cpu++) 662 for (thread = 0; thread < nthreads; ++thread) { 663 close(FD(evsel, cpu, thread)); 664 FD(evsel, cpu, thread) = -1; 665 } 666 } 667 668 void perf_evsel__free_counts(struct perf_evsel *evsel) 669 { 670 free(evsel->counts); 671 } 672 673 void perf_evsel__exit(struct perf_evsel *evsel) 674 { 675 assert(list_empty(&evsel->node)); 676 xyarray__delete(evsel->fd); 677 xyarray__delete(evsel->sample_id); 678 free(evsel->id); 679 } 680 681 void perf_evsel__delete(struct perf_evsel *evsel) 682 { 683 perf_evsel__exit(evsel); 684 close_cgroup(evsel->cgrp); 685 free(evsel->group_name); 686 if (evsel->tp_format) 687 pevent_free_format(evsel->tp_format); 688 free(evsel->name); 689 free(evsel); 690 } 691 692 static inline void compute_deltas(struct perf_evsel *evsel, 693 int cpu, 694 struct perf_counts_values *count) 695 { 696 struct perf_counts_values tmp; 697 698 if (!evsel->prev_raw_counts) 699 return; 700 701 if (cpu == -1) { 702 tmp = evsel->prev_raw_counts->aggr; 703 evsel->prev_raw_counts->aggr = *count; 704 } else { 705 tmp = evsel->prev_raw_counts->cpu[cpu]; 706 evsel->prev_raw_counts->cpu[cpu] = *count; 707 } 708 709 count->val = count->val - tmp.val; 710 count->ena = count->ena - tmp.ena; 711 count->run = count->run - tmp.run; 712 } 713 714 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, 715 int cpu, int thread, bool scale) 716 { 717 struct perf_counts_values count; 718 size_t nv = scale ? 3 : 1; 719 720 if (FD(evsel, cpu, thread) < 0) 721 return -EINVAL; 722 723 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) 724 return -ENOMEM; 725 726 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) 727 return -errno; 728 729 compute_deltas(evsel, cpu, &count); 730 731 if (scale) { 732 if (count.run == 0) 733 count.val = 0; 734 else if (count.run < count.ena) 735 count.val = (u64)((double)count.val * count.ena / count.run + 0.5); 736 } else 737 count.ena = count.run = 0; 738 739 evsel->counts->cpu[cpu] = count; 740 return 0; 741 } 742 743 int __perf_evsel__read(struct perf_evsel *evsel, 744 int ncpus, int nthreads, bool scale) 745 { 746 size_t nv = scale ? 3 : 1; 747 int cpu, thread; 748 struct perf_counts_values *aggr = &evsel->counts->aggr, count; 749 750 aggr->val = aggr->ena = aggr->run = 0; 751 752 for (cpu = 0; cpu < ncpus; cpu++) { 753 for (thread = 0; thread < nthreads; thread++) { 754 if (FD(evsel, cpu, thread) < 0) 755 continue; 756 757 if (readn(FD(evsel, cpu, thread), 758 &count, nv * sizeof(u64)) < 0) 759 return -errno; 760 761 aggr->val += count.val; 762 if (scale) { 763 aggr->ena += count.ena; 764 aggr->run += count.run; 765 } 766 } 767 } 768 769 compute_deltas(evsel, -1, aggr); 770 771 evsel->counts->scaled = 0; 772 if (scale) { 773 if (aggr->run == 0) { 774 evsel->counts->scaled = -1; 775 aggr->val = 0; 776 return 0; 777 } 778 779 if (aggr->run < aggr->ena) { 780 evsel->counts->scaled = 1; 781 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5); 782 } 783 } else 784 aggr->ena = aggr->run = 0; 785 786 return 0; 787 } 788 789 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) 790 { 791 struct perf_evsel *leader = evsel->leader; 792 int fd; 793 794 if (perf_evsel__is_group_leader(evsel)) 795 return -1; 796 797 /* 798 * Leader must be already processed/open, 799 * if not it's a bug. 800 */ 801 BUG_ON(!leader->fd); 802 803 fd = FD(leader, cpu, thread); 804 BUG_ON(fd == -1); 805 806 return fd; 807 } 808 809 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 810 struct thread_map *threads) 811 { 812 int cpu, thread; 813 unsigned long flags = 0; 814 int pid = -1, err; 815 816 if (evsel->fd == NULL && 817 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) 818 return -ENOMEM; 819 820 if (evsel->cgrp) { 821 flags = PERF_FLAG_PID_CGROUP; 822 pid = evsel->cgrp->fd; 823 } 824 825 fallback_missing_features: 826 if (perf_missing_features.exclude_guest) 827 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 828 retry_sample_id: 829 if (perf_missing_features.sample_id_all) 830 evsel->attr.sample_id_all = 0; 831 832 for (cpu = 0; cpu < cpus->nr; cpu++) { 833 834 for (thread = 0; thread < threads->nr; thread++) { 835 int group_fd; 836 837 if (!evsel->cgrp) 838 pid = threads->map[thread]; 839 840 group_fd = get_group_fd(evsel, cpu, thread); 841 842 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, 843 pid, 844 cpus->map[cpu], 845 group_fd, flags); 846 if (FD(evsel, cpu, thread) < 0) { 847 err = -errno; 848 goto try_fallback; 849 } 850 } 851 } 852 853 return 0; 854 855 try_fallback: 856 if (err != -EINVAL || cpu > 0 || thread > 0) 857 goto out_close; 858 859 if (!perf_missing_features.exclude_guest && 860 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { 861 perf_missing_features.exclude_guest = true; 862 goto fallback_missing_features; 863 } else if (!perf_missing_features.sample_id_all) { 864 perf_missing_features.sample_id_all = true; 865 goto retry_sample_id; 866 } 867 868 out_close: 869 do { 870 while (--thread >= 0) { 871 close(FD(evsel, cpu, thread)); 872 FD(evsel, cpu, thread) = -1; 873 } 874 thread = threads->nr; 875 } while (--cpu >= 0); 876 return err; 877 } 878 879 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) 880 { 881 if (evsel->fd == NULL) 882 return; 883 884 perf_evsel__close_fd(evsel, ncpus, nthreads); 885 perf_evsel__free_fd(evsel); 886 evsel->fd = NULL; 887 } 888 889 static struct { 890 struct cpu_map map; 891 int cpus[1]; 892 } empty_cpu_map = { 893 .map.nr = 1, 894 .cpus = { -1, }, 895 }; 896 897 static struct { 898 struct thread_map map; 899 int threads[1]; 900 } empty_thread_map = { 901 .map.nr = 1, 902 .threads = { -1, }, 903 }; 904 905 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 906 struct thread_map *threads) 907 { 908 if (cpus == NULL) { 909 /* Work around old compiler warnings about strict aliasing */ 910 cpus = &empty_cpu_map.map; 911 } 912 913 if (threads == NULL) 914 threads = &empty_thread_map.map; 915 916 return __perf_evsel__open(evsel, cpus, threads); 917 } 918 919 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 920 struct cpu_map *cpus) 921 { 922 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); 923 } 924 925 int perf_evsel__open_per_thread(struct perf_evsel *evsel, 926 struct thread_map *threads) 927 { 928 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); 929 } 930 931 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, 932 const union perf_event *event, 933 struct perf_sample *sample) 934 { 935 u64 type = evsel->attr.sample_type; 936 const u64 *array = event->sample.array; 937 bool swapped = evsel->needs_swap; 938 union u64_swap u; 939 940 array += ((event->header.size - 941 sizeof(event->header)) / sizeof(u64)) - 1; 942 943 if (type & PERF_SAMPLE_CPU) { 944 u.val64 = *array; 945 if (swapped) { 946 /* undo swap of u64, then swap on individual u32s */ 947 u.val64 = bswap_64(u.val64); 948 u.val32[0] = bswap_32(u.val32[0]); 949 } 950 951 sample->cpu = u.val32[0]; 952 array--; 953 } 954 955 if (type & PERF_SAMPLE_STREAM_ID) { 956 sample->stream_id = *array; 957 array--; 958 } 959 960 if (type & PERF_SAMPLE_ID) { 961 sample->id = *array; 962 array--; 963 } 964 965 if (type & PERF_SAMPLE_TIME) { 966 sample->time = *array; 967 array--; 968 } 969 970 if (type & PERF_SAMPLE_TID) { 971 u.val64 = *array; 972 if (swapped) { 973 /* undo swap of u64, then swap on individual u32s */ 974 u.val64 = bswap_64(u.val64); 975 u.val32[0] = bswap_32(u.val32[0]); 976 u.val32[1] = bswap_32(u.val32[1]); 977 } 978 979 sample->pid = u.val32[0]; 980 sample->tid = u.val32[1]; 981 } 982 983 return 0; 984 } 985 986 static bool sample_overlap(const union perf_event *event, 987 const void *offset, u64 size) 988 { 989 const void *base = event; 990 991 if (offset + size > base + event->header.size) 992 return true; 993 994 return false; 995 } 996 997 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, 998 struct perf_sample *data) 999 { 1000 u64 type = evsel->attr.sample_type; 1001 u64 regs_user = evsel->attr.sample_regs_user; 1002 bool swapped = evsel->needs_swap; 1003 const u64 *array; 1004 1005 /* 1006 * used for cross-endian analysis. See git commit 65014ab3 1007 * for why this goofiness is needed. 1008 */ 1009 union u64_swap u; 1010 1011 memset(data, 0, sizeof(*data)); 1012 data->cpu = data->pid = data->tid = -1; 1013 data->stream_id = data->id = data->time = -1ULL; 1014 data->period = 1; 1015 1016 if (event->header.type != PERF_RECORD_SAMPLE) { 1017 if (!evsel->attr.sample_id_all) 1018 return 0; 1019 return perf_evsel__parse_id_sample(evsel, event, data); 1020 } 1021 1022 array = event->sample.array; 1023 1024 if (evsel->sample_size + sizeof(event->header) > event->header.size) 1025 return -EFAULT; 1026 1027 if (type & PERF_SAMPLE_IP) { 1028 data->ip = event->ip.ip; 1029 array++; 1030 } 1031 1032 if (type & PERF_SAMPLE_TID) { 1033 u.val64 = *array; 1034 if (swapped) { 1035 /* undo swap of u64, then swap on individual u32s */ 1036 u.val64 = bswap_64(u.val64); 1037 u.val32[0] = bswap_32(u.val32[0]); 1038 u.val32[1] = bswap_32(u.val32[1]); 1039 } 1040 1041 data->pid = u.val32[0]; 1042 data->tid = u.val32[1]; 1043 array++; 1044 } 1045 1046 if (type & PERF_SAMPLE_TIME) { 1047 data->time = *array; 1048 array++; 1049 } 1050 1051 data->addr = 0; 1052 if (type & PERF_SAMPLE_ADDR) { 1053 data->addr = *array; 1054 array++; 1055 } 1056 1057 data->id = -1ULL; 1058 if (type & PERF_SAMPLE_ID) { 1059 data->id = *array; 1060 array++; 1061 } 1062 1063 if (type & PERF_SAMPLE_STREAM_ID) { 1064 data->stream_id = *array; 1065 array++; 1066 } 1067 1068 if (type & PERF_SAMPLE_CPU) { 1069 1070 u.val64 = *array; 1071 if (swapped) { 1072 /* undo swap of u64, then swap on individual u32s */ 1073 u.val64 = bswap_64(u.val64); 1074 u.val32[0] = bswap_32(u.val32[0]); 1075 } 1076 1077 data->cpu = u.val32[0]; 1078 array++; 1079 } 1080 1081 if (type & PERF_SAMPLE_PERIOD) { 1082 data->period = *array; 1083 array++; 1084 } 1085 1086 if (type & PERF_SAMPLE_READ) { 1087 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n"); 1088 return -1; 1089 } 1090 1091 if (type & PERF_SAMPLE_CALLCHAIN) { 1092 if (sample_overlap(event, array, sizeof(data->callchain->nr))) 1093 return -EFAULT; 1094 1095 data->callchain = (struct ip_callchain *)array; 1096 1097 if (sample_overlap(event, array, data->callchain->nr)) 1098 return -EFAULT; 1099 1100 array += 1 + data->callchain->nr; 1101 } 1102 1103 if (type & PERF_SAMPLE_RAW) { 1104 const u64 *pdata; 1105 1106 u.val64 = *array; 1107 if (WARN_ONCE(swapped, 1108 "Endianness of raw data not corrected!\n")) { 1109 /* undo swap of u64, then swap on individual u32s */ 1110 u.val64 = bswap_64(u.val64); 1111 u.val32[0] = bswap_32(u.val32[0]); 1112 u.val32[1] = bswap_32(u.val32[1]); 1113 } 1114 1115 if (sample_overlap(event, array, sizeof(u32))) 1116 return -EFAULT; 1117 1118 data->raw_size = u.val32[0]; 1119 pdata = (void *) array + sizeof(u32); 1120 1121 if (sample_overlap(event, pdata, data->raw_size)) 1122 return -EFAULT; 1123 1124 data->raw_data = (void *) pdata; 1125 1126 array = (void *)array + data->raw_size + sizeof(u32); 1127 } 1128 1129 if (type & PERF_SAMPLE_BRANCH_STACK) { 1130 u64 sz; 1131 1132 data->branch_stack = (struct branch_stack *)array; 1133 array++; /* nr */ 1134 1135 sz = data->branch_stack->nr * sizeof(struct branch_entry); 1136 sz /= sizeof(u64); 1137 array += sz; 1138 } 1139 1140 if (type & PERF_SAMPLE_REGS_USER) { 1141 /* First u64 tells us if we have any regs in sample. */ 1142 u64 avail = *array++; 1143 1144 if (avail) { 1145 data->user_regs.regs = (u64 *)array; 1146 array += hweight_long(regs_user); 1147 } 1148 } 1149 1150 if (type & PERF_SAMPLE_STACK_USER) { 1151 u64 size = *array++; 1152 1153 data->user_stack.offset = ((char *)(array - 1) 1154 - (char *) event); 1155 1156 if (!size) { 1157 data->user_stack.size = 0; 1158 } else { 1159 data->user_stack.data = (char *)array; 1160 array += size / sizeof(*array); 1161 data->user_stack.size = *array; 1162 } 1163 } 1164 1165 return 0; 1166 } 1167 1168 int perf_event__synthesize_sample(union perf_event *event, u64 type, 1169 const struct perf_sample *sample, 1170 bool swapped) 1171 { 1172 u64 *array; 1173 1174 /* 1175 * used for cross-endian analysis. See git commit 65014ab3 1176 * for why this goofiness is needed. 1177 */ 1178 union u64_swap u; 1179 1180 array = event->sample.array; 1181 1182 if (type & PERF_SAMPLE_IP) { 1183 event->ip.ip = sample->ip; 1184 array++; 1185 } 1186 1187 if (type & PERF_SAMPLE_TID) { 1188 u.val32[0] = sample->pid; 1189 u.val32[1] = sample->tid; 1190 if (swapped) { 1191 /* 1192 * Inverse of what is done in perf_evsel__parse_sample 1193 */ 1194 u.val32[0] = bswap_32(u.val32[0]); 1195 u.val32[1] = bswap_32(u.val32[1]); 1196 u.val64 = bswap_64(u.val64); 1197 } 1198 1199 *array = u.val64; 1200 array++; 1201 } 1202 1203 if (type & PERF_SAMPLE_TIME) { 1204 *array = sample->time; 1205 array++; 1206 } 1207 1208 if (type & PERF_SAMPLE_ADDR) { 1209 *array = sample->addr; 1210 array++; 1211 } 1212 1213 if (type & PERF_SAMPLE_ID) { 1214 *array = sample->id; 1215 array++; 1216 } 1217 1218 if (type & PERF_SAMPLE_STREAM_ID) { 1219 *array = sample->stream_id; 1220 array++; 1221 } 1222 1223 if (type & PERF_SAMPLE_CPU) { 1224 u.val32[0] = sample->cpu; 1225 if (swapped) { 1226 /* 1227 * Inverse of what is done in perf_evsel__parse_sample 1228 */ 1229 u.val32[0] = bswap_32(u.val32[0]); 1230 u.val64 = bswap_64(u.val64); 1231 } 1232 *array = u.val64; 1233 array++; 1234 } 1235 1236 if (type & PERF_SAMPLE_PERIOD) { 1237 *array = sample->period; 1238 array++; 1239 } 1240 1241 return 0; 1242 } 1243 1244 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) 1245 { 1246 return pevent_find_field(evsel->tp_format, name); 1247 } 1248 1249 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, 1250 const char *name) 1251 { 1252 struct format_field *field = perf_evsel__field(evsel, name); 1253 int offset; 1254 1255 if (!field) 1256 return NULL; 1257 1258 offset = field->offset; 1259 1260 if (field->flags & FIELD_IS_DYNAMIC) { 1261 offset = *(int *)(sample->raw_data + field->offset); 1262 offset &= 0xffff; 1263 } 1264 1265 return sample->raw_data + offset; 1266 } 1267 1268 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, 1269 const char *name) 1270 { 1271 struct format_field *field = perf_evsel__field(evsel, name); 1272 void *ptr; 1273 u64 value; 1274 1275 if (!field) 1276 return 0; 1277 1278 ptr = sample->raw_data + field->offset; 1279 1280 switch (field->size) { 1281 case 1: 1282 return *(u8 *)ptr; 1283 case 2: 1284 value = *(u16 *)ptr; 1285 break; 1286 case 4: 1287 value = *(u32 *)ptr; 1288 break; 1289 case 8: 1290 value = *(u64 *)ptr; 1291 break; 1292 default: 1293 return 0; 1294 } 1295 1296 if (!evsel->needs_swap) 1297 return value; 1298 1299 switch (field->size) { 1300 case 2: 1301 return bswap_16(value); 1302 case 4: 1303 return bswap_32(value); 1304 case 8: 1305 return bswap_64(value); 1306 default: 1307 return 0; 1308 } 1309 1310 return 0; 1311 } 1312 1313 static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) 1314 { 1315 va_list args; 1316 int ret = 0; 1317 1318 if (!*first) { 1319 ret += fprintf(fp, ","); 1320 } else { 1321 ret += fprintf(fp, ":"); 1322 *first = false; 1323 } 1324 1325 va_start(args, fmt); 1326 ret += vfprintf(fp, fmt, args); 1327 va_end(args); 1328 return ret; 1329 } 1330 1331 static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value) 1332 { 1333 if (value == 0) 1334 return 0; 1335 1336 return comma_fprintf(fp, first, " %s: %" PRIu64, field, value); 1337 } 1338 1339 #define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field) 1340 1341 struct bit_names { 1342 int bit; 1343 const char *name; 1344 }; 1345 1346 static int bits__fprintf(FILE *fp, const char *field, u64 value, 1347 struct bit_names *bits, bool *first) 1348 { 1349 int i = 0, printed = comma_fprintf(fp, first, " %s: ", field); 1350 bool first_bit = true; 1351 1352 do { 1353 if (value & bits[i].bit) { 1354 printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name); 1355 first_bit = false; 1356 } 1357 } while (bits[++i].name != NULL); 1358 1359 return printed; 1360 } 1361 1362 static int sample_type__fprintf(FILE *fp, bool *first, u64 value) 1363 { 1364 #define bit_name(n) { PERF_SAMPLE_##n, #n } 1365 struct bit_names bits[] = { 1366 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), 1367 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), 1368 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), 1369 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), 1370 { .name = NULL, } 1371 }; 1372 #undef bit_name 1373 return bits__fprintf(fp, "sample_type", value, bits, first); 1374 } 1375 1376 static int read_format__fprintf(FILE *fp, bool *first, u64 value) 1377 { 1378 #define bit_name(n) { PERF_FORMAT_##n, #n } 1379 struct bit_names bits[] = { 1380 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), 1381 bit_name(ID), bit_name(GROUP), 1382 { .name = NULL, } 1383 }; 1384 #undef bit_name 1385 return bits__fprintf(fp, "read_format", value, bits, first); 1386 } 1387 1388 int perf_evsel__fprintf(struct perf_evsel *evsel, 1389 struct perf_attr_details *details, FILE *fp) 1390 { 1391 bool first = true; 1392 int printed = 0; 1393 1394 if (details->event_group) { 1395 struct perf_evsel *pos; 1396 1397 if (!perf_evsel__is_group_leader(evsel)) 1398 return 0; 1399 1400 if (evsel->nr_members > 1) 1401 printed += fprintf(fp, "%s{", evsel->group_name ?: ""); 1402 1403 printed += fprintf(fp, "%s", perf_evsel__name(evsel)); 1404 for_each_group_member(pos, evsel) 1405 printed += fprintf(fp, ",%s", perf_evsel__name(pos)); 1406 1407 if (evsel->nr_members > 1) 1408 printed += fprintf(fp, "}"); 1409 goto out; 1410 } 1411 1412 printed += fprintf(fp, "%s", perf_evsel__name(evsel)); 1413 1414 if (details->verbose || details->freq) { 1415 printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, 1416 (u64)evsel->attr.sample_freq); 1417 } 1418 1419 if (details->verbose) { 1420 if_print(type); 1421 if_print(config); 1422 if_print(config1); 1423 if_print(config2); 1424 if_print(size); 1425 printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type); 1426 if (evsel->attr.read_format) 1427 printed += read_format__fprintf(fp, &first, evsel->attr.read_format); 1428 if_print(disabled); 1429 if_print(inherit); 1430 if_print(pinned); 1431 if_print(exclusive); 1432 if_print(exclude_user); 1433 if_print(exclude_kernel); 1434 if_print(exclude_hv); 1435 if_print(exclude_idle); 1436 if_print(mmap); 1437 if_print(comm); 1438 if_print(freq); 1439 if_print(inherit_stat); 1440 if_print(enable_on_exec); 1441 if_print(task); 1442 if_print(watermark); 1443 if_print(precise_ip); 1444 if_print(mmap_data); 1445 if_print(sample_id_all); 1446 if_print(exclude_host); 1447 if_print(exclude_guest); 1448 if_print(__reserved_1); 1449 if_print(wakeup_events); 1450 if_print(bp_type); 1451 if_print(branch_sample_type); 1452 } 1453 out: 1454 fputc('\n', fp); 1455 return ++printed; 1456 } 1457 1458 bool perf_evsel__fallback(struct perf_evsel *evsel, int err, 1459 char *msg, size_t msgsize) 1460 { 1461 if ((err == ENOENT || err == ENXIO) && 1462 evsel->attr.type == PERF_TYPE_HARDWARE && 1463 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { 1464 /* 1465 * If it's cycles then fall back to hrtimer based 1466 * cpu-clock-tick sw counter, which is always available even if 1467 * no PMU support. 1468 * 1469 * PPC returns ENXIO until 2.6.37 (behavior changed with commit 1470 * b0a873e). 1471 */ 1472 scnprintf(msg, msgsize, "%s", 1473 "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); 1474 1475 evsel->attr.type = PERF_TYPE_SOFTWARE; 1476 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; 1477 1478 free(evsel->name); 1479 evsel->name = NULL; 1480 return true; 1481 } 1482 1483 return false; 1484 } 1485 1486 int perf_evsel__open_strerror(struct perf_evsel *evsel, 1487 struct perf_target *target, 1488 int err, char *msg, size_t size) 1489 { 1490 switch (err) { 1491 case EPERM: 1492 case EACCES: 1493 return scnprintf(msg, size, "%s", 1494 "You may not have permission to collect %sstats.\n" 1495 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" 1496 " -1 - Not paranoid at all\n" 1497 " 0 - Disallow raw tracepoint access for unpriv\n" 1498 " 1 - Disallow cpu events for unpriv\n" 1499 " 2 - Disallow kernel profiling for unpriv", 1500 target->system_wide ? "system-wide " : ""); 1501 case ENOENT: 1502 return scnprintf(msg, size, "The %s event is not supported.", 1503 perf_evsel__name(evsel)); 1504 case EMFILE: 1505 return scnprintf(msg, size, "%s", 1506 "Too many events are opened.\n" 1507 "Try again after reducing the number of events."); 1508 case ENODEV: 1509 if (target->cpu_list) 1510 return scnprintf(msg, size, "%s", 1511 "No such device - did you specify an out-of-range profile CPU?\n"); 1512 break; 1513 case EOPNOTSUPP: 1514 if (evsel->attr.precise_ip) 1515 return scnprintf(msg, size, "%s", 1516 "\'precise\' request may not be supported. Try removing 'p' modifier."); 1517 #if defined(__i386__) || defined(__x86_64__) 1518 if (evsel->attr.type == PERF_TYPE_HARDWARE) 1519 return scnprintf(msg, size, "%s", 1520 "No hardware sampling interrupt available.\n" 1521 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); 1522 #endif 1523 break; 1524 default: 1525 break; 1526 } 1527 1528 return scnprintf(msg, size, 1529 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n" 1530 "/bin/dmesg may provide additional information.\n" 1531 "No CONFIG_PERF_EVENTS=y kernel support configured?\n", 1532 err, strerror(err), perf_evsel__name(evsel)); 1533 } 1534