1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
7 */
8 /*
9 * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
10 * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
11 */
12 #define __SANE_USERSPACE_TYPES__
13
14 #include <byteswap.h>
15 #include <errno.h>
16 #include <inttypes.h>
17 #include <linux/bitops.h>
18 #include <api/fs/fs.h>
19 #include <api/fs/tracing_path.h>
20 #include <linux/hw_breakpoint.h>
21 #include <linux/perf_event.h>
22 #include <linux/compiler.h>
23 #include <linux/err.h>
24 #include <linux/zalloc.h>
25 #include <sys/ioctl.h>
26 #include <sys/resource.h>
27 #include <sys/syscall.h>
28 #include <sys/types.h>
29 #include <dirent.h>
30 #include <stdlib.h>
31 #include <perf/evsel.h>
32 #include "asm/bug.h"
33 #include "bpf_counter.h"
34 #include "callchain.h"
35 #include "cgroup.h"
36 #include "counts.h"
37 #include "event.h"
38 #include "evsel.h"
39 #include "time-utils.h"
40 #include "util/env.h"
41 #include "util/evsel_config.h"
42 #include "util/evsel_fprintf.h"
43 #include "evlist.h"
44 #include <perf/cpumap.h>
45 #include "thread_map.h"
46 #include "target.h"
47 #include "perf_regs.h"
48 #include "record.h"
49 #include "debug.h"
50 #include "trace-event.h"
51 #include "stat.h"
52 #include "string2.h"
53 #include "memswap.h"
54 #include "util.h"
55 #include "util/hashmap.h"
56 #include "off_cpu.h"
57 #include "pmu.h"
58 #include "pmus.h"
59 #include "hwmon_pmu.h"
60 #include "tool_pmu.h"
61 #include "rlimit.h"
62 #include "../perf-sys.h"
63 #include "util/parse-branch-options.h"
64 #include "util/bpf-filter.h"
65 #include "util/hist.h"
66 #include <internal/xyarray.h>
67 #include <internal/lib.h>
68 #include <internal/threadmap.h>
69 #include "util/intel-tpebs.h"
70
71 #include <linux/ctype.h>
72
73 #ifdef HAVE_LIBTRACEEVENT
74 #include <event-parse.h>
75 #endif
76
77 struct perf_missing_features perf_missing_features;
78
79 static clockid_t clockid;
80
evsel__no_extra_init(struct evsel * evsel __maybe_unused)81 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
82 {
83 return 0;
84 }
85
test_attr__enabled(void)86 static bool test_attr__enabled(void)
87 {
88 static bool test_attr__enabled;
89 static bool test_attr__enabled_tested;
90
91 if (!test_attr__enabled_tested) {
92 char *dir = getenv("PERF_TEST_ATTR");
93
94 test_attr__enabled = (dir != NULL);
95 test_attr__enabled_tested = true;
96 }
97 return test_attr__enabled;
98 }
99
100 #define __WRITE_ASS(str, fmt, data) \
101 do { \
102 if (fprintf(file, #str "=%"fmt "\n", data) < 0) { \
103 perror("test attr - failed to write event file"); \
104 fclose(file); \
105 return -1; \
106 } \
107 } while (0)
108
109 #define WRITE_ASS(field, fmt) __WRITE_ASS(field, fmt, attr->field)
110
store_event(struct perf_event_attr * attr,pid_t pid,struct perf_cpu cpu,int fd,int group_fd,unsigned long flags)111 static int store_event(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
112 int fd, int group_fd, unsigned long flags)
113 {
114 FILE *file;
115 char path[PATH_MAX];
116 char *dir = getenv("PERF_TEST_ATTR");
117
118 snprintf(path, PATH_MAX, "%s/event-%d-%llu-%d", dir,
119 attr->type, attr->config, fd);
120
121 file = fopen(path, "w+");
122 if (!file) {
123 perror("test attr - failed to open event file");
124 return -1;
125 }
126
127 if (fprintf(file, "[event-%d-%llu-%d]\n",
128 attr->type, attr->config, fd) < 0) {
129 perror("test attr - failed to write event file");
130 fclose(file);
131 return -1;
132 }
133
134 /* syscall arguments */
135 __WRITE_ASS(fd, "d", fd);
136 __WRITE_ASS(group_fd, "d", group_fd);
137 __WRITE_ASS(cpu, "d", cpu.cpu);
138 __WRITE_ASS(pid, "d", pid);
139 __WRITE_ASS(flags, "lu", flags);
140
141 /* struct perf_event_attr */
142 WRITE_ASS(type, PRIu32);
143 WRITE_ASS(size, PRIu32);
144 WRITE_ASS(config, "llu");
145 WRITE_ASS(sample_period, "llu");
146 WRITE_ASS(sample_type, "llu");
147 WRITE_ASS(read_format, "llu");
148 WRITE_ASS(disabled, "d");
149 WRITE_ASS(inherit, "d");
150 WRITE_ASS(pinned, "d");
151 WRITE_ASS(exclusive, "d");
152 WRITE_ASS(exclude_user, "d");
153 WRITE_ASS(exclude_kernel, "d");
154 WRITE_ASS(exclude_hv, "d");
155 WRITE_ASS(exclude_idle, "d");
156 WRITE_ASS(mmap, "d");
157 WRITE_ASS(comm, "d");
158 WRITE_ASS(freq, "d");
159 WRITE_ASS(inherit_stat, "d");
160 WRITE_ASS(enable_on_exec, "d");
161 WRITE_ASS(task, "d");
162 WRITE_ASS(watermark, "d");
163 WRITE_ASS(precise_ip, "d");
164 WRITE_ASS(mmap_data, "d");
165 WRITE_ASS(sample_id_all, "d");
166 WRITE_ASS(exclude_host, "d");
167 WRITE_ASS(exclude_guest, "d");
168 WRITE_ASS(exclude_callchain_kernel, "d");
169 WRITE_ASS(exclude_callchain_user, "d");
170 WRITE_ASS(mmap2, "d");
171 WRITE_ASS(comm_exec, "d");
172 WRITE_ASS(context_switch, "d");
173 WRITE_ASS(write_backward, "d");
174 WRITE_ASS(namespaces, "d");
175 WRITE_ASS(use_clockid, "d");
176 WRITE_ASS(wakeup_events, PRIu32);
177 WRITE_ASS(bp_type, PRIu32);
178 WRITE_ASS(config1, "llu");
179 WRITE_ASS(config2, "llu");
180 WRITE_ASS(branch_sample_type, "llu");
181 WRITE_ASS(sample_regs_user, "llu");
182 WRITE_ASS(sample_stack_user, PRIu32);
183
184 fclose(file);
185 return 0;
186 }
187
188 #undef __WRITE_ASS
189 #undef WRITE_ASS
190
test_attr__open(struct perf_event_attr * attr,pid_t pid,struct perf_cpu cpu,int fd,int group_fd,unsigned long flags)191 static void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
192 int fd, int group_fd, unsigned long flags)
193 {
194 int errno_saved = errno;
195
196 if ((fd != -1) && store_event(attr, pid, cpu, fd, group_fd, flags)) {
197 pr_err("test attr FAILED");
198 exit(128);
199 }
200
201 errno = errno_saved;
202 }
203
evsel__no_extra_fini(struct evsel * evsel __maybe_unused)204 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
205 {
206 }
207
208 static struct {
209 size_t size;
210 int (*init)(struct evsel *evsel);
211 void (*fini)(struct evsel *evsel);
212 } perf_evsel__object = {
213 .size = sizeof(struct evsel),
214 .init = evsel__no_extra_init,
215 .fini = evsel__no_extra_fini,
216 };
217
evsel__object_config(size_t object_size,int (* init)(struct evsel * evsel),void (* fini)(struct evsel * evsel))218 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel),
219 void (*fini)(struct evsel *evsel))
220 {
221
222 if (object_size == 0)
223 goto set_methods;
224
225 if (perf_evsel__object.size > object_size)
226 return -EINVAL;
227
228 perf_evsel__object.size = object_size;
229
230 set_methods:
231 if (init != NULL)
232 perf_evsel__object.init = init;
233
234 if (fini != NULL)
235 perf_evsel__object.fini = fini;
236
237 return 0;
238 }
239
240 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
241
__evsel__sample_size(u64 sample_type)242 int __evsel__sample_size(u64 sample_type)
243 {
244 u64 mask = sample_type & PERF_SAMPLE_MASK;
245 int size = 0;
246 int i;
247
248 for (i = 0; i < 64; i++) {
249 if (mask & (1ULL << i))
250 size++;
251 }
252
253 size *= sizeof(u64);
254
255 return size;
256 }
257
258 /**
259 * __perf_evsel__calc_id_pos - calculate id_pos.
260 * @sample_type: sample type
261 *
262 * This function returns the position of the event id (PERF_SAMPLE_ID or
263 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
264 * perf_record_sample.
265 */
__perf_evsel__calc_id_pos(u64 sample_type)266 static int __perf_evsel__calc_id_pos(u64 sample_type)
267 {
268 int idx = 0;
269
270 if (sample_type & PERF_SAMPLE_IDENTIFIER)
271 return 0;
272
273 if (!(sample_type & PERF_SAMPLE_ID))
274 return -1;
275
276 if (sample_type & PERF_SAMPLE_IP)
277 idx += 1;
278
279 if (sample_type & PERF_SAMPLE_TID)
280 idx += 1;
281
282 if (sample_type & PERF_SAMPLE_TIME)
283 idx += 1;
284
285 if (sample_type & PERF_SAMPLE_ADDR)
286 idx += 1;
287
288 return idx;
289 }
290
291 /**
292 * __perf_evsel__calc_is_pos - calculate is_pos.
293 * @sample_type: sample type
294 *
295 * This function returns the position (counting backwards) of the event id
296 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
297 * sample_id_all is used there is an id sample appended to non-sample events.
298 */
__perf_evsel__calc_is_pos(u64 sample_type)299 static int __perf_evsel__calc_is_pos(u64 sample_type)
300 {
301 int idx = 1;
302
303 if (sample_type & PERF_SAMPLE_IDENTIFIER)
304 return 1;
305
306 if (!(sample_type & PERF_SAMPLE_ID))
307 return -1;
308
309 if (sample_type & PERF_SAMPLE_CPU)
310 idx += 1;
311
312 if (sample_type & PERF_SAMPLE_STREAM_ID)
313 idx += 1;
314
315 return idx;
316 }
317
evsel__calc_id_pos(struct evsel * evsel)318 void evsel__calc_id_pos(struct evsel *evsel)
319 {
320 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
321 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
322 }
323
__evsel__set_sample_bit(struct evsel * evsel,enum perf_event_sample_format bit)324 void __evsel__set_sample_bit(struct evsel *evsel,
325 enum perf_event_sample_format bit)
326 {
327 if (!(evsel->core.attr.sample_type & bit)) {
328 evsel->core.attr.sample_type |= bit;
329 evsel->sample_size += sizeof(u64);
330 evsel__calc_id_pos(evsel);
331 }
332 }
333
__evsel__reset_sample_bit(struct evsel * evsel,enum perf_event_sample_format bit)334 void __evsel__reset_sample_bit(struct evsel *evsel,
335 enum perf_event_sample_format bit)
336 {
337 if (evsel->core.attr.sample_type & bit) {
338 evsel->core.attr.sample_type &= ~bit;
339 evsel->sample_size -= sizeof(u64);
340 evsel__calc_id_pos(evsel);
341 }
342 }
343
evsel__set_sample_id(struct evsel * evsel,bool can_sample_identifier)344 void evsel__set_sample_id(struct evsel *evsel,
345 bool can_sample_identifier)
346 {
347 if (can_sample_identifier) {
348 evsel__reset_sample_bit(evsel, ID);
349 evsel__set_sample_bit(evsel, IDENTIFIER);
350 } else {
351 evsel__set_sample_bit(evsel, ID);
352 }
353 evsel->core.attr.read_format |= PERF_FORMAT_ID;
354 }
355
356 /**
357 * evsel__is_function_event - Return whether given evsel is a function
358 * trace event
359 *
360 * @evsel - evsel selector to be tested
361 *
362 * Return %true if event is function trace event
363 */
evsel__is_function_event(struct evsel * evsel)364 bool evsel__is_function_event(struct evsel *evsel)
365 {
366 #define FUNCTION_EVENT "ftrace:function"
367
368 return evsel->name &&
369 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
370
371 #undef FUNCTION_EVENT
372 }
373
evsel__init(struct evsel * evsel,struct perf_event_attr * attr,int idx)374 void evsel__init(struct evsel *evsel,
375 struct perf_event_attr *attr, int idx)
376 {
377 perf_evsel__init(&evsel->core, attr, idx);
378 evsel->tracking = !idx;
379 evsel->unit = strdup("");
380 evsel->scale = 1.0;
381 evsel->max_events = ULONG_MAX;
382 evsel->evlist = NULL;
383 evsel->bpf_obj = NULL;
384 evsel->bpf_fd = -1;
385 INIT_LIST_HEAD(&evsel->config_terms);
386 INIT_LIST_HEAD(&evsel->bpf_counter_list);
387 INIT_LIST_HEAD(&evsel->bpf_filters);
388 perf_evsel__object.init(evsel);
389 evsel->sample_size = __evsel__sample_size(attr->sample_type);
390 evsel__calc_id_pos(evsel);
391 evsel->cmdline_group_boundary = false;
392 evsel->metric_events = NULL;
393 evsel->per_pkg_mask = NULL;
394 evsel->collect_stat = false;
395 evsel->group_pmu_name = NULL;
396 evsel->skippable = false;
397 evsel->alternate_hw_config = PERF_COUNT_HW_MAX;
398 }
399
evsel__new_idx(struct perf_event_attr * attr,int idx)400 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
401 {
402 struct evsel *evsel = zalloc(perf_evsel__object.size);
403
404 if (!evsel)
405 return NULL;
406 evsel__init(evsel, attr, idx);
407
408 if (evsel__is_bpf_output(evsel) && !attr->sample_type) {
409 evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
410 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
411 evsel->core.attr.sample_period = 1;
412 }
413
414 if (evsel__is_clock(evsel)) {
415 free((char *)evsel->unit);
416 evsel->unit = strdup("msec");
417 evsel->scale = 1e-6;
418 }
419
420 return evsel;
421 }
422
copy_config_terms(struct list_head * dst,struct list_head * src)423 int copy_config_terms(struct list_head *dst, struct list_head *src)
424 {
425 struct evsel_config_term *pos, *tmp;
426
427 list_for_each_entry(pos, src, list) {
428 tmp = malloc(sizeof(*tmp));
429 if (tmp == NULL)
430 return -ENOMEM;
431
432 *tmp = *pos;
433 if (tmp->free_str) {
434 tmp->val.str = strdup(pos->val.str);
435 if (tmp->val.str == NULL) {
436 free(tmp);
437 return -ENOMEM;
438 }
439 }
440 list_add_tail(&tmp->list, dst);
441 }
442 return 0;
443 }
444
evsel__copy_config_terms(struct evsel * dst,struct evsel * src)445 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
446 {
447 return copy_config_terms(&dst->config_terms, &src->config_terms);
448 }
449
450 /**
451 * evsel__clone - create a new evsel copied from @orig
452 * @orig: original evsel
453 *
454 * The assumption is that @orig is not configured nor opened yet.
455 * So we only care about the attributes that can be set while it's parsed.
456 */
evsel__clone(struct evsel * orig)457 struct evsel *evsel__clone(struct evsel *orig)
458 {
459 struct evsel *evsel;
460
461 BUG_ON(orig->core.fd);
462 BUG_ON(orig->counts);
463 BUG_ON(orig->priv);
464 BUG_ON(orig->per_pkg_mask);
465
466 /* cannot handle BPF objects for now */
467 if (orig->bpf_obj)
468 return NULL;
469
470 evsel = evsel__new(&orig->core.attr);
471 if (evsel == NULL)
472 return NULL;
473
474 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
475 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
476 evsel->core.threads = perf_thread_map__get(orig->core.threads);
477 evsel->core.nr_members = orig->core.nr_members;
478 evsel->core.system_wide = orig->core.system_wide;
479 evsel->core.requires_cpu = orig->core.requires_cpu;
480 evsel->core.is_pmu_core = orig->core.is_pmu_core;
481
482 if (orig->name) {
483 evsel->name = strdup(orig->name);
484 if (evsel->name == NULL)
485 goto out_err;
486 }
487 if (orig->group_name) {
488 evsel->group_name = strdup(orig->group_name);
489 if (evsel->group_name == NULL)
490 goto out_err;
491 }
492 if (orig->group_pmu_name) {
493 evsel->group_pmu_name = strdup(orig->group_pmu_name);
494 if (evsel->group_pmu_name == NULL)
495 goto out_err;
496 }
497 if (orig->filter) {
498 evsel->filter = strdup(orig->filter);
499 if (evsel->filter == NULL)
500 goto out_err;
501 }
502 if (orig->metric_id) {
503 evsel->metric_id = strdup(orig->metric_id);
504 if (evsel->metric_id == NULL)
505 goto out_err;
506 }
507 evsel->cgrp = cgroup__get(orig->cgrp);
508 #ifdef HAVE_LIBTRACEEVENT
509 evsel->tp_format = orig->tp_format;
510 #endif
511 evsel->handler = orig->handler;
512 evsel->core.leader = orig->core.leader;
513
514 evsel->max_events = orig->max_events;
515 free((char *)evsel->unit);
516 evsel->unit = strdup(orig->unit);
517 if (evsel->unit == NULL)
518 goto out_err;
519
520 evsel->scale = orig->scale;
521 evsel->snapshot = orig->snapshot;
522 evsel->per_pkg = orig->per_pkg;
523 evsel->percore = orig->percore;
524 evsel->precise_max = orig->precise_max;
525 evsel->is_libpfm_event = orig->is_libpfm_event;
526
527 evsel->exclude_GH = orig->exclude_GH;
528 evsel->sample_read = orig->sample_read;
529 evsel->auto_merge_stats = orig->auto_merge_stats;
530 evsel->collect_stat = orig->collect_stat;
531 evsel->weak_group = orig->weak_group;
532 evsel->use_config_name = orig->use_config_name;
533 evsel->pmu = orig->pmu;
534
535 if (evsel__copy_config_terms(evsel, orig) < 0)
536 goto out_err;
537
538 evsel->alternate_hw_config = orig->alternate_hw_config;
539
540 return evsel;
541
542 out_err:
543 evsel__delete(evsel);
544 return NULL;
545 }
546
547 /*
548 * Returns pointer with encoded error via <linux/err.h> interface.
549 */
550 #ifdef HAVE_LIBTRACEEVENT
evsel__newtp_idx(const char * sys,const char * name,int idx,bool format)551 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format)
552 {
553 struct evsel *evsel = zalloc(perf_evsel__object.size);
554 int err = -ENOMEM;
555
556 if (evsel == NULL) {
557 goto out_err;
558 } else {
559 struct perf_event_attr attr = {
560 .type = PERF_TYPE_TRACEPOINT,
561 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
562 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
563 };
564
565 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
566 goto out_free;
567
568 event_attr_init(&attr);
569
570 if (format) {
571 evsel->tp_format = trace_event__tp_format(sys, name);
572 if (IS_ERR(evsel->tp_format)) {
573 err = PTR_ERR(evsel->tp_format);
574 goto out_free;
575 }
576 attr.config = evsel->tp_format->id;
577 } else {
578 attr.config = (__u64) -1;
579 }
580
581
582 attr.sample_period = 1;
583 evsel__init(evsel, &attr, idx);
584 }
585
586 return evsel;
587
588 out_free:
589 zfree(&evsel->name);
590 free(evsel);
591 out_err:
592 return ERR_PTR(err);
593 }
594 #endif
595
596 const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = {
597 "cycles",
598 "instructions",
599 "cache-references",
600 "cache-misses",
601 "branches",
602 "branch-misses",
603 "bus-cycles",
604 "stalled-cycles-frontend",
605 "stalled-cycles-backend",
606 "ref-cycles",
607 };
608
609 char *evsel__bpf_counter_events;
610
evsel__match_bpf_counter_events(const char * name)611 bool evsel__match_bpf_counter_events(const char *name)
612 {
613 int name_len;
614 bool match;
615 char *ptr;
616
617 if (!evsel__bpf_counter_events)
618 return false;
619
620 ptr = strstr(evsel__bpf_counter_events, name);
621 name_len = strlen(name);
622
623 /* check name matches a full token in evsel__bpf_counter_events */
624 match = (ptr != NULL) &&
625 ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
626 ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
627
628 return match;
629 }
630
__evsel__hw_name(u64 config)631 static const char *__evsel__hw_name(u64 config)
632 {
633 if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
634 return evsel__hw_names[config];
635
636 return "unknown-hardware";
637 }
638
evsel__add_modifiers(struct evsel * evsel,char * bf,size_t size)639 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
640 {
641 int colon = 0, r = 0;
642 struct perf_event_attr *attr = &evsel->core.attr;
643
644 #define MOD_PRINT(context, mod) do { \
645 if (!attr->exclude_##context) { \
646 if (!colon) colon = ++r; \
647 r += scnprintf(bf + r, size - r, "%c", mod); \
648 } } while(0)
649
650 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
651 MOD_PRINT(kernel, 'k');
652 MOD_PRINT(user, 'u');
653 MOD_PRINT(hv, 'h');
654 }
655
656 if (attr->precise_ip) {
657 if (!colon)
658 colon = ++r;
659 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
660 }
661
662 if (attr->exclude_host || attr->exclude_guest) {
663 MOD_PRINT(host, 'H');
664 MOD_PRINT(guest, 'G');
665 }
666 #undef MOD_PRINT
667 if (colon)
668 bf[colon - 1] = ':';
669 return r;
670 }
671
arch_evsel__hw_name(struct evsel * evsel,char * bf,size_t size)672 int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
673 {
674 return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
675 }
676
evsel__hw_name(struct evsel * evsel,char * bf,size_t size)677 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
678 {
679 int r = arch_evsel__hw_name(evsel, bf, size);
680 return r + evsel__add_modifiers(evsel, bf + r, size - r);
681 }
682
683 const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = {
684 "cpu-clock",
685 "task-clock",
686 "page-faults",
687 "context-switches",
688 "cpu-migrations",
689 "minor-faults",
690 "major-faults",
691 "alignment-faults",
692 "emulation-faults",
693 "dummy",
694 };
695
__evsel__sw_name(u64 config)696 static const char *__evsel__sw_name(u64 config)
697 {
698 if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config])
699 return evsel__sw_names[config];
700 return "unknown-software";
701 }
702
evsel__sw_name(struct evsel * evsel,char * bf,size_t size)703 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
704 {
705 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config));
706 return r + evsel__add_modifiers(evsel, bf + r, size - r);
707 }
708
__evsel__bp_name(char * bf,size_t size,u64 addr,u64 type)709 static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
710 {
711 int r;
712
713 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
714
715 if (type & HW_BREAKPOINT_R)
716 r += scnprintf(bf + r, size - r, "r");
717
718 if (type & HW_BREAKPOINT_W)
719 r += scnprintf(bf + r, size - r, "w");
720
721 if (type & HW_BREAKPOINT_X)
722 r += scnprintf(bf + r, size - r, "x");
723
724 return r;
725 }
726
evsel__bp_name(struct evsel * evsel,char * bf,size_t size)727 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
728 {
729 struct perf_event_attr *attr = &evsel->core.attr;
730 int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
731 return r + evsel__add_modifiers(evsel, bf + r, size - r);
732 }
733
734 const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
735 { "L1-dcache", "l1-d", "l1d", "L1-data", },
736 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
737 { "LLC", "L2", },
738 { "dTLB", "d-tlb", "Data-TLB", },
739 { "iTLB", "i-tlb", "Instruction-TLB", },
740 { "branch", "branches", "bpu", "btb", "bpc", },
741 { "node", },
742 };
743
744 const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
745 { "load", "loads", "read", },
746 { "store", "stores", "write", },
747 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
748 };
749
750 const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
751 { "refs", "Reference", "ops", "access", },
752 { "misses", "miss", },
753 };
754
755 #define C(x) PERF_COUNT_HW_CACHE_##x
756 #define CACHE_READ (1 << C(OP_READ))
757 #define CACHE_WRITE (1 << C(OP_WRITE))
758 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
759 #define COP(x) (1 << x)
760
761 /*
762 * cache operation stat
763 * L1I : Read and prefetch only
764 * ITLB and BPU : Read-only
765 */
766 static const unsigned long evsel__hw_cache_stat[C(MAX)] = {
767 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
768 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
769 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
770 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
771 [C(ITLB)] = (CACHE_READ),
772 [C(BPU)] = (CACHE_READ),
773 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
774 };
775
evsel__is_cache_op_valid(u8 type,u8 op)776 bool evsel__is_cache_op_valid(u8 type, u8 op)
777 {
778 if (evsel__hw_cache_stat[type] & COP(op))
779 return true; /* valid */
780 else
781 return false; /* invalid */
782 }
783
__evsel__hw_cache_type_op_res_name(u8 type,u8 op,u8 result,char * bf,size_t size)784 int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size)
785 {
786 if (result) {
787 return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0],
788 evsel__hw_cache_op[op][0],
789 evsel__hw_cache_result[result][0]);
790 }
791
792 return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0],
793 evsel__hw_cache_op[op][1]);
794 }
795
__evsel__hw_cache_name(u64 config,char * bf,size_t size)796 static int __evsel__hw_cache_name(u64 config, char *bf, size_t size)
797 {
798 u8 op, result, type = (config >> 0) & 0xff;
799 const char *err = "unknown-ext-hardware-cache-type";
800
801 if (type >= PERF_COUNT_HW_CACHE_MAX)
802 goto out_err;
803
804 op = (config >> 8) & 0xff;
805 err = "unknown-ext-hardware-cache-op";
806 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
807 goto out_err;
808
809 result = (config >> 16) & 0xff;
810 err = "unknown-ext-hardware-cache-result";
811 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
812 goto out_err;
813
814 err = "invalid-cache";
815 if (!evsel__is_cache_op_valid(type, op))
816 goto out_err;
817
818 return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
819 out_err:
820 return scnprintf(bf, size, "%s", err);
821 }
822
evsel__hw_cache_name(struct evsel * evsel,char * bf,size_t size)823 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
824 {
825 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size);
826 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
827 }
828
evsel__raw_name(struct evsel * evsel,char * bf,size_t size)829 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
830 {
831 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
832 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
833 }
834
evsel__name(struct evsel * evsel)835 const char *evsel__name(struct evsel *evsel)
836 {
837 char bf[128];
838
839 if (!evsel)
840 goto out_unknown;
841
842 if (evsel->name)
843 return evsel->name;
844
845 switch (evsel->core.attr.type) {
846 case PERF_TYPE_RAW:
847 evsel__raw_name(evsel, bf, sizeof(bf));
848 break;
849
850 case PERF_TYPE_HARDWARE:
851 evsel__hw_name(evsel, bf, sizeof(bf));
852 break;
853
854 case PERF_TYPE_HW_CACHE:
855 evsel__hw_cache_name(evsel, bf, sizeof(bf));
856 break;
857
858 case PERF_TYPE_SOFTWARE:
859 evsel__sw_name(evsel, bf, sizeof(bf));
860 break;
861
862 case PERF_TYPE_TRACEPOINT:
863 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
864 break;
865
866 case PERF_TYPE_BREAKPOINT:
867 evsel__bp_name(evsel, bf, sizeof(bf));
868 break;
869
870 case PERF_PMU_TYPE_TOOL:
871 scnprintf(bf, sizeof(bf), "%s", evsel__tool_pmu_event_name(evsel));
872 break;
873
874 default:
875 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
876 evsel->core.attr.type);
877 break;
878 }
879
880 evsel->name = strdup(bf);
881
882 if (evsel->name)
883 return evsel->name;
884 out_unknown:
885 return "unknown";
886 }
887
evsel__name_is(struct evsel * evsel,const char * name)888 bool evsel__name_is(struct evsel *evsel, const char *name)
889 {
890 return !strcmp(evsel__name(evsel), name);
891 }
892
evsel__metric_id(const struct evsel * evsel)893 const char *evsel__metric_id(const struct evsel *evsel)
894 {
895 if (evsel->metric_id)
896 return evsel->metric_id;
897
898 if (evsel__is_tool(evsel))
899 return evsel__tool_pmu_event_name(evsel);
900
901 return "unknown";
902 }
903
evsel__group_name(struct evsel * evsel)904 const char *evsel__group_name(struct evsel *evsel)
905 {
906 return evsel->group_name ?: "anon group";
907 }
908
909 /*
910 * Returns the group details for the specified leader,
911 * with following rules.
912 *
913 * For record -e '{cycles,instructions}'
914 * 'anon group { cycles:u, instructions:u }'
915 *
916 * For record -e 'cycles,instructions' and report --group
917 * 'cycles:u, instructions:u'
918 */
evsel__group_desc(struct evsel * evsel,char * buf,size_t size)919 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
920 {
921 int ret = 0;
922 bool first = true;
923 struct evsel *pos;
924 const char *group_name = evsel__group_name(evsel);
925
926 if (!evsel->forced_leader)
927 ret = scnprintf(buf, size, "%s { ", group_name);
928
929 for_each_group_evsel(pos, evsel) {
930 if (symbol_conf.skip_empty &&
931 evsel__hists(pos)->stats.nr_samples == 0)
932 continue;
933
934 ret += scnprintf(buf + ret, size - ret, "%s%s",
935 first ? "" : ", ", evsel__name(pos));
936 first = false;
937 }
938
939 if (!evsel->forced_leader)
940 ret += scnprintf(buf + ret, size - ret, " }");
941
942 return ret;
943 }
944
__evsel__config_callchain(struct evsel * evsel,struct record_opts * opts,struct callchain_param * param)945 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
946 struct callchain_param *param)
947 {
948 bool function = evsel__is_function_event(evsel);
949 struct perf_event_attr *attr = &evsel->core.attr;
950
951 evsel__set_sample_bit(evsel, CALLCHAIN);
952
953 attr->sample_max_stack = param->max_stack;
954
955 if (opts->kernel_callchains)
956 attr->exclude_callchain_user = 1;
957 if (opts->user_callchains)
958 attr->exclude_callchain_kernel = 1;
959 if (param->record_mode == CALLCHAIN_LBR) {
960 if (!opts->branch_stack) {
961 if (attr->exclude_user) {
962 pr_warning("LBR callstack option is only available "
963 "to get user callchain information. "
964 "Falling back to framepointers.\n");
965 } else {
966 evsel__set_sample_bit(evsel, BRANCH_STACK);
967 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
968 PERF_SAMPLE_BRANCH_CALL_STACK |
969 PERF_SAMPLE_BRANCH_NO_CYCLES |
970 PERF_SAMPLE_BRANCH_NO_FLAGS |
971 PERF_SAMPLE_BRANCH_HW_INDEX;
972 }
973 } else
974 pr_warning("Cannot use LBR callstack with branch stack. "
975 "Falling back to framepointers.\n");
976 }
977
978 if (param->record_mode == CALLCHAIN_DWARF) {
979 if (!function) {
980 const char *arch = perf_env__arch(evsel__env(evsel));
981
982 evsel__set_sample_bit(evsel, REGS_USER);
983 evsel__set_sample_bit(evsel, STACK_USER);
984 if (opts->sample_user_regs &&
985 DWARF_MINIMAL_REGS(arch) != arch__user_reg_mask()) {
986 attr->sample_regs_user |= DWARF_MINIMAL_REGS(arch);
987 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
988 "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
989 "so the minimal registers set (IP, SP) is explicitly forced.\n");
990 } else {
991 attr->sample_regs_user |= arch__user_reg_mask();
992 }
993 attr->sample_stack_user = param->dump_size;
994 attr->exclude_callchain_user = 1;
995 } else {
996 pr_info("Cannot use DWARF unwind for function trace event,"
997 " falling back to framepointers.\n");
998 }
999 }
1000
1001 if (function) {
1002 pr_info("Disabling user space callchains for function trace event.\n");
1003 attr->exclude_callchain_user = 1;
1004 }
1005 }
1006
evsel__config_callchain(struct evsel * evsel,struct record_opts * opts,struct callchain_param * param)1007 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
1008 struct callchain_param *param)
1009 {
1010 if (param->enabled)
1011 return __evsel__config_callchain(evsel, opts, param);
1012 }
1013
evsel__reset_callgraph(struct evsel * evsel,struct callchain_param * param)1014 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
1015 {
1016 struct perf_event_attr *attr = &evsel->core.attr;
1017
1018 evsel__reset_sample_bit(evsel, CALLCHAIN);
1019 if (param->record_mode == CALLCHAIN_LBR) {
1020 evsel__reset_sample_bit(evsel, BRANCH_STACK);
1021 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
1022 PERF_SAMPLE_BRANCH_CALL_STACK |
1023 PERF_SAMPLE_BRANCH_HW_INDEX);
1024 }
1025 if (param->record_mode == CALLCHAIN_DWARF) {
1026 evsel__reset_sample_bit(evsel, REGS_USER);
1027 evsel__reset_sample_bit(evsel, STACK_USER);
1028 }
1029 }
1030
evsel__apply_config_terms(struct evsel * evsel,struct record_opts * opts,bool track)1031 static void evsel__apply_config_terms(struct evsel *evsel,
1032 struct record_opts *opts, bool track)
1033 {
1034 struct evsel_config_term *term;
1035 struct list_head *config_terms = &evsel->config_terms;
1036 struct perf_event_attr *attr = &evsel->core.attr;
1037 /* callgraph default */
1038 struct callchain_param param = {
1039 .record_mode = callchain_param.record_mode,
1040 };
1041 u32 dump_size = 0;
1042 int max_stack = 0;
1043 const char *callgraph_buf = NULL;
1044
1045 list_for_each_entry(term, config_terms, list) {
1046 switch (term->type) {
1047 case EVSEL__CONFIG_TERM_PERIOD:
1048 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
1049 attr->sample_period = term->val.period;
1050 attr->freq = 0;
1051 evsel__reset_sample_bit(evsel, PERIOD);
1052 }
1053 break;
1054 case EVSEL__CONFIG_TERM_FREQ:
1055 if (!(term->weak && opts->user_freq != UINT_MAX)) {
1056 attr->sample_freq = term->val.freq;
1057 attr->freq = 1;
1058 evsel__set_sample_bit(evsel, PERIOD);
1059 }
1060 break;
1061 case EVSEL__CONFIG_TERM_TIME:
1062 if (term->val.time)
1063 evsel__set_sample_bit(evsel, TIME);
1064 else
1065 evsel__reset_sample_bit(evsel, TIME);
1066 break;
1067 case EVSEL__CONFIG_TERM_CALLGRAPH:
1068 callgraph_buf = term->val.str;
1069 break;
1070 case EVSEL__CONFIG_TERM_BRANCH:
1071 if (term->val.str && strcmp(term->val.str, "no")) {
1072 evsel__set_sample_bit(evsel, BRANCH_STACK);
1073 parse_branch_str(term->val.str,
1074 &attr->branch_sample_type);
1075 } else
1076 evsel__reset_sample_bit(evsel, BRANCH_STACK);
1077 break;
1078 case EVSEL__CONFIG_TERM_STACK_USER:
1079 dump_size = term->val.stack_user;
1080 break;
1081 case EVSEL__CONFIG_TERM_MAX_STACK:
1082 max_stack = term->val.max_stack;
1083 break;
1084 case EVSEL__CONFIG_TERM_MAX_EVENTS:
1085 evsel->max_events = term->val.max_events;
1086 break;
1087 case EVSEL__CONFIG_TERM_INHERIT:
1088 /*
1089 * attr->inherit should has already been set by
1090 * evsel__config. If user explicitly set
1091 * inherit using config terms, override global
1092 * opt->no_inherit setting.
1093 */
1094 attr->inherit = term->val.inherit ? 1 : 0;
1095 break;
1096 case EVSEL__CONFIG_TERM_OVERWRITE:
1097 attr->write_backward = term->val.overwrite ? 1 : 0;
1098 break;
1099 case EVSEL__CONFIG_TERM_DRV_CFG:
1100 break;
1101 case EVSEL__CONFIG_TERM_PERCORE:
1102 break;
1103 case EVSEL__CONFIG_TERM_AUX_OUTPUT:
1104 attr->aux_output = term->val.aux_output ? 1 : 0;
1105 break;
1106 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
1107 /* Already applied by auxtrace */
1108 break;
1109 case EVSEL__CONFIG_TERM_CFG_CHG:
1110 break;
1111 default:
1112 break;
1113 }
1114 }
1115
1116 /* User explicitly set per-event callgraph, clear the old setting and reset. */
1117 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
1118 bool sample_address = false;
1119
1120 if (max_stack) {
1121 param.max_stack = max_stack;
1122 if (callgraph_buf == NULL)
1123 callgraph_buf = "fp";
1124 }
1125
1126 /* parse callgraph parameters */
1127 if (callgraph_buf != NULL) {
1128 if (!strcmp(callgraph_buf, "no")) {
1129 param.enabled = false;
1130 param.record_mode = CALLCHAIN_NONE;
1131 } else {
1132 param.enabled = true;
1133 if (parse_callchain_record(callgraph_buf, ¶m)) {
1134 pr_err("per-event callgraph setting for %s failed. "
1135 "Apply callgraph global setting for it\n",
1136 evsel->name);
1137 return;
1138 }
1139 if (param.record_mode == CALLCHAIN_DWARF)
1140 sample_address = true;
1141 }
1142 }
1143 if (dump_size > 0) {
1144 dump_size = round_up(dump_size, sizeof(u64));
1145 param.dump_size = dump_size;
1146 }
1147
1148 /* If global callgraph set, clear it */
1149 if (callchain_param.enabled)
1150 evsel__reset_callgraph(evsel, &callchain_param);
1151
1152 /* set perf-event callgraph */
1153 if (param.enabled) {
1154 if (sample_address) {
1155 evsel__set_sample_bit(evsel, ADDR);
1156 evsel__set_sample_bit(evsel, DATA_SRC);
1157 evsel->core.attr.mmap_data = track;
1158 }
1159 evsel__config_callchain(evsel, opts, ¶m);
1160 }
1161 }
1162 }
1163
__evsel__get_config_term(struct evsel * evsel,enum evsel_term_type type)1164 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
1165 {
1166 struct evsel_config_term *term, *found_term = NULL;
1167
1168 list_for_each_entry(term, &evsel->config_terms, list) {
1169 if (term->type == type)
1170 found_term = term;
1171 }
1172
1173 return found_term;
1174 }
1175
arch_evsel__set_sample_weight(struct evsel * evsel)1176 void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
1177 {
1178 evsel__set_sample_bit(evsel, WEIGHT);
1179 }
1180
arch__post_evsel_config(struct evsel * evsel __maybe_unused,struct perf_event_attr * attr __maybe_unused)1181 void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
1182 struct perf_event_attr *attr __maybe_unused)
1183 {
1184 }
1185
evsel__set_default_freq_period(struct record_opts * opts,struct perf_event_attr * attr)1186 static void evsel__set_default_freq_period(struct record_opts *opts,
1187 struct perf_event_attr *attr)
1188 {
1189 if (opts->freq) {
1190 attr->freq = 1;
1191 attr->sample_freq = opts->freq;
1192 } else {
1193 attr->sample_period = opts->default_interval;
1194 }
1195 }
1196
evsel__is_offcpu_event(struct evsel * evsel)1197 static bool evsel__is_offcpu_event(struct evsel *evsel)
1198 {
1199 return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT);
1200 }
1201
1202 /*
1203 * The enable_on_exec/disabled value strategy:
1204 *
1205 * 1) For any type of traced program:
1206 * - all independent events and group leaders are disabled
1207 * - all group members are enabled
1208 *
1209 * Group members are ruled by group leaders. They need to
1210 * be enabled, because the group scheduling relies on that.
1211 *
1212 * 2) For traced programs executed by perf:
1213 * - all independent events and group leaders have
1214 * enable_on_exec set
1215 * - we don't specifically enable or disable any event during
1216 * the record command
1217 *
1218 * Independent events and group leaders are initially disabled
1219 * and get enabled by exec. Group members are ruled by group
1220 * leaders as stated in 1).
1221 *
1222 * 3) For traced programs attached by perf (pid/tid):
1223 * - we specifically enable or disable all events during
1224 * the record command
1225 *
1226 * When attaching events to already running traced we
1227 * enable/disable events specifically, as there's no
1228 * initial traced exec call.
1229 */
evsel__config(struct evsel * evsel,struct record_opts * opts,struct callchain_param * callchain)1230 void evsel__config(struct evsel *evsel, struct record_opts *opts,
1231 struct callchain_param *callchain)
1232 {
1233 struct evsel *leader = evsel__leader(evsel);
1234 struct perf_event_attr *attr = &evsel->core.attr;
1235 int track = evsel->tracking;
1236 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
1237
1238 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
1239 attr->inherit = target__has_cpu(&opts->target) ? 0 : !opts->no_inherit;
1240 attr->write_backward = opts->overwrite ? 1 : 0;
1241 attr->read_format = PERF_FORMAT_LOST;
1242
1243 evsel__set_sample_bit(evsel, IP);
1244 evsel__set_sample_bit(evsel, TID);
1245
1246 if (evsel->sample_read) {
1247 evsel__set_sample_bit(evsel, READ);
1248
1249 /*
1250 * We need ID even in case of single event, because
1251 * PERF_SAMPLE_READ process ID specific data.
1252 */
1253 evsel__set_sample_id(evsel, false);
1254
1255 /*
1256 * Apply group format only if we belong to group
1257 * with more than one members.
1258 */
1259 if (leader->core.nr_members > 1) {
1260 attr->read_format |= PERF_FORMAT_GROUP;
1261 }
1262
1263 /*
1264 * Inherit + SAMPLE_READ requires SAMPLE_TID in the read_format
1265 */
1266 if (attr->inherit) {
1267 evsel__set_sample_bit(evsel, TID);
1268 evsel->core.attr.read_format |=
1269 PERF_FORMAT_ID;
1270 }
1271 }
1272
1273 /*
1274 * We default some events to have a default interval. But keep
1275 * it a weak assumption overridable by the user.
1276 */
1277 if ((evsel->is_libpfm_event && !attr->sample_period) ||
1278 (!evsel->is_libpfm_event && (!attr->sample_period ||
1279 opts->user_freq != UINT_MAX ||
1280 opts->user_interval != ULLONG_MAX)))
1281 evsel__set_default_freq_period(opts, attr);
1282
1283 /*
1284 * If attr->freq was set (here or earlier), ask for period
1285 * to be sampled.
1286 */
1287 if (attr->freq)
1288 evsel__set_sample_bit(evsel, PERIOD);
1289
1290 if (opts->no_samples)
1291 attr->sample_freq = 0;
1292
1293 if (opts->inherit_stat) {
1294 evsel->core.attr.read_format |=
1295 PERF_FORMAT_TOTAL_TIME_ENABLED |
1296 PERF_FORMAT_TOTAL_TIME_RUNNING |
1297 PERF_FORMAT_ID;
1298 attr->inherit_stat = 1;
1299 }
1300
1301 if (opts->sample_address) {
1302 evsel__set_sample_bit(evsel, ADDR);
1303 attr->mmap_data = track;
1304 }
1305
1306 /*
1307 * We don't allow user space callchains for function trace
1308 * event, due to issues with page faults while tracing page
1309 * fault handler and its overall trickiness nature.
1310 */
1311 if (evsel__is_function_event(evsel))
1312 evsel->core.attr.exclude_callchain_user = 1;
1313
1314 if (callchain && callchain->enabled && !evsel->no_aux_samples)
1315 evsel__config_callchain(evsel, opts, callchain);
1316
1317 if (opts->sample_intr_regs && !evsel->no_aux_samples &&
1318 !evsel__is_dummy_event(evsel)) {
1319 attr->sample_regs_intr = opts->sample_intr_regs;
1320 evsel__set_sample_bit(evsel, REGS_INTR);
1321 }
1322
1323 if (opts->sample_user_regs && !evsel->no_aux_samples &&
1324 !evsel__is_dummy_event(evsel)) {
1325 attr->sample_regs_user |= opts->sample_user_regs;
1326 evsel__set_sample_bit(evsel, REGS_USER);
1327 }
1328
1329 if (target__has_cpu(&opts->target) || opts->sample_cpu)
1330 evsel__set_sample_bit(evsel, CPU);
1331
1332 /*
1333 * When the user explicitly disabled time don't force it here.
1334 */
1335 if (opts->sample_time &&
1336 (!perf_missing_features.sample_id_all &&
1337 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1338 opts->sample_time_set)))
1339 evsel__set_sample_bit(evsel, TIME);
1340
1341 if (opts->raw_samples && !evsel->no_aux_samples) {
1342 evsel__set_sample_bit(evsel, TIME);
1343 evsel__set_sample_bit(evsel, RAW);
1344 evsel__set_sample_bit(evsel, CPU);
1345 }
1346
1347 if (opts->sample_address)
1348 evsel__set_sample_bit(evsel, DATA_SRC);
1349
1350 if (opts->sample_phys_addr)
1351 evsel__set_sample_bit(evsel, PHYS_ADDR);
1352
1353 if (opts->no_buffering) {
1354 attr->watermark = 0;
1355 attr->wakeup_events = 1;
1356 }
1357 if (opts->branch_stack && !evsel->no_aux_samples) {
1358 evsel__set_sample_bit(evsel, BRANCH_STACK);
1359 attr->branch_sample_type = opts->branch_stack;
1360 }
1361
1362 if (opts->sample_weight)
1363 arch_evsel__set_sample_weight(evsel);
1364
1365 attr->task = track;
1366 attr->mmap = track;
1367 attr->mmap2 = track && !perf_missing_features.mmap2;
1368 attr->comm = track;
1369 attr->build_id = track && opts->build_id;
1370
1371 /*
1372 * ksymbol is tracked separately with text poke because it needs to be
1373 * system wide and enabled immediately.
1374 */
1375 if (!opts->text_poke)
1376 attr->ksymbol = track && !perf_missing_features.ksymbol;
1377 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
1378
1379 if (opts->record_namespaces)
1380 attr->namespaces = track;
1381
1382 if (opts->record_cgroup) {
1383 attr->cgroup = track && !perf_missing_features.cgroup;
1384 evsel__set_sample_bit(evsel, CGROUP);
1385 }
1386
1387 if (opts->sample_data_page_size)
1388 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
1389
1390 if (opts->sample_code_page_size)
1391 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
1392
1393 if (opts->record_switch_events)
1394 attr->context_switch = track;
1395
1396 if (opts->sample_transaction)
1397 evsel__set_sample_bit(evsel, TRANSACTION);
1398
1399 if (opts->running_time) {
1400 evsel->core.attr.read_format |=
1401 PERF_FORMAT_TOTAL_TIME_ENABLED |
1402 PERF_FORMAT_TOTAL_TIME_RUNNING;
1403 }
1404
1405 /*
1406 * XXX see the function comment above
1407 *
1408 * Disabling only independent events or group leaders,
1409 * keeping group members enabled.
1410 */
1411 if (evsel__is_group_leader(evsel))
1412 attr->disabled = 1;
1413
1414 /*
1415 * Setting enable_on_exec for independent events and
1416 * group leaders for traced executed by perf.
1417 */
1418 if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
1419 !opts->target.initial_delay)
1420 attr->enable_on_exec = 1;
1421
1422 if (evsel->immediate) {
1423 attr->disabled = 0;
1424 attr->enable_on_exec = 0;
1425 }
1426
1427 clockid = opts->clockid;
1428 if (opts->use_clockid) {
1429 attr->use_clockid = 1;
1430 attr->clockid = opts->clockid;
1431 }
1432
1433 if (evsel->precise_max)
1434 attr->precise_ip = 3;
1435
1436 if (opts->all_user) {
1437 attr->exclude_kernel = 1;
1438 attr->exclude_user = 0;
1439 }
1440
1441 if (opts->all_kernel) {
1442 attr->exclude_kernel = 0;
1443 attr->exclude_user = 1;
1444 }
1445
1446 if (evsel->core.own_cpus || evsel->unit)
1447 evsel->core.attr.read_format |= PERF_FORMAT_ID;
1448
1449 /*
1450 * Apply event specific term settings,
1451 * it overloads any global configuration.
1452 */
1453 evsel__apply_config_terms(evsel, opts, track);
1454
1455 evsel->ignore_missing_thread = opts->ignore_missing_thread;
1456
1457 /* The --period option takes the precedence. */
1458 if (opts->period_set) {
1459 if (opts->period)
1460 evsel__set_sample_bit(evsel, PERIOD);
1461 else
1462 evsel__reset_sample_bit(evsel, PERIOD);
1463 }
1464
1465 /*
1466 * A dummy event never triggers any actual counter and therefore
1467 * cannot be used with branch_stack.
1468 *
1469 * For initial_delay, a dummy event is added implicitly.
1470 * The software event will trigger -EOPNOTSUPP error out,
1471 * if BRANCH_STACK bit is set.
1472 */
1473 if (evsel__is_dummy_event(evsel))
1474 evsel__reset_sample_bit(evsel, BRANCH_STACK);
1475
1476 if (evsel__is_offcpu_event(evsel))
1477 evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
1478
1479 arch__post_evsel_config(evsel, attr);
1480 }
1481
evsel__set_filter(struct evsel * evsel,const char * filter)1482 int evsel__set_filter(struct evsel *evsel, const char *filter)
1483 {
1484 char *new_filter = strdup(filter);
1485
1486 if (new_filter != NULL) {
1487 free(evsel->filter);
1488 evsel->filter = new_filter;
1489 return 0;
1490 }
1491
1492 return -1;
1493 }
1494
evsel__append_filter(struct evsel * evsel,const char * fmt,const char * filter)1495 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter)
1496 {
1497 char *new_filter;
1498
1499 if (evsel->filter == NULL)
1500 return evsel__set_filter(evsel, filter);
1501
1502 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1503 free(evsel->filter);
1504 evsel->filter = new_filter;
1505 return 0;
1506 }
1507
1508 return -1;
1509 }
1510
evsel__append_tp_filter(struct evsel * evsel,const char * filter)1511 int evsel__append_tp_filter(struct evsel *evsel, const char *filter)
1512 {
1513 return evsel__append_filter(evsel, "(%s) && (%s)", filter);
1514 }
1515
evsel__append_addr_filter(struct evsel * evsel,const char * filter)1516 int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
1517 {
1518 return evsel__append_filter(evsel, "%s,%s", filter);
1519 }
1520
1521 /* Caller has to clear disabled after going through all CPUs. */
evsel__enable_cpu(struct evsel * evsel,int cpu_map_idx)1522 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
1523 {
1524 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
1525 }
1526
evsel__enable(struct evsel * evsel)1527 int evsel__enable(struct evsel *evsel)
1528 {
1529 int err = perf_evsel__enable(&evsel->core);
1530
1531 if (!err)
1532 evsel->disabled = false;
1533 return err;
1534 }
1535
1536 /* Caller has to set disabled after going through all CPUs. */
evsel__disable_cpu(struct evsel * evsel,int cpu_map_idx)1537 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
1538 {
1539 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
1540 }
1541
evsel__disable(struct evsel * evsel)1542 int evsel__disable(struct evsel *evsel)
1543 {
1544 int err = perf_evsel__disable(&evsel->core);
1545 /*
1546 * We mark it disabled here so that tools that disable a event can
1547 * ignore events after they disable it. I.e. the ring buffer may have
1548 * already a few more events queued up before the kernel got the stop
1549 * request.
1550 */
1551 if (!err)
1552 evsel->disabled = true;
1553
1554 return err;
1555 }
1556
free_config_terms(struct list_head * config_terms)1557 void free_config_terms(struct list_head *config_terms)
1558 {
1559 struct evsel_config_term *term, *h;
1560
1561 list_for_each_entry_safe(term, h, config_terms, list) {
1562 list_del_init(&term->list);
1563 if (term->free_str)
1564 zfree(&term->val.str);
1565 free(term);
1566 }
1567 }
1568
evsel__free_config_terms(struct evsel * evsel)1569 static void evsel__free_config_terms(struct evsel *evsel)
1570 {
1571 free_config_terms(&evsel->config_terms);
1572 }
1573
evsel__exit(struct evsel * evsel)1574 void evsel__exit(struct evsel *evsel)
1575 {
1576 assert(list_empty(&evsel->core.node));
1577 assert(evsel->evlist == NULL);
1578 bpf_counter__destroy(evsel);
1579 perf_bpf_filter__destroy(evsel);
1580 evsel__free_counts(evsel);
1581 perf_evsel__free_fd(&evsel->core);
1582 perf_evsel__free_id(&evsel->core);
1583 evsel__free_config_terms(evsel);
1584 cgroup__put(evsel->cgrp);
1585 perf_cpu_map__put(evsel->core.cpus);
1586 perf_cpu_map__put(evsel->core.own_cpus);
1587 perf_thread_map__put(evsel->core.threads);
1588 zfree(&evsel->group_name);
1589 zfree(&evsel->name);
1590 zfree(&evsel->filter);
1591 zfree(&evsel->group_pmu_name);
1592 zfree(&evsel->unit);
1593 zfree(&evsel->metric_id);
1594 evsel__zero_per_pkg(evsel);
1595 hashmap__free(evsel->per_pkg_mask);
1596 evsel->per_pkg_mask = NULL;
1597 zfree(&evsel->metric_events);
1598 perf_evsel__object.fini(evsel);
1599 if (evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME ||
1600 evsel__tool_event(evsel) == TOOL_PMU__EVENT_USER_TIME)
1601 xyarray__delete(evsel->start_times);
1602 }
1603
evsel__delete(struct evsel * evsel)1604 void evsel__delete(struct evsel *evsel)
1605 {
1606 if (!evsel)
1607 return;
1608
1609 evsel__exit(evsel);
1610 free(evsel);
1611 }
1612
evsel__compute_deltas(struct evsel * evsel,int cpu_map_idx,int thread,struct perf_counts_values * count)1613 void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
1614 struct perf_counts_values *count)
1615 {
1616 struct perf_counts_values tmp;
1617
1618 if (!evsel->prev_raw_counts)
1619 return;
1620
1621 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
1622 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
1623
1624 count->val = count->val - tmp.val;
1625 count->ena = count->ena - tmp.ena;
1626 count->run = count->run - tmp.run;
1627 }
1628
evsel__read_one(struct evsel * evsel,int cpu_map_idx,int thread)1629 static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
1630 {
1631 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
1632
1633 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
1634 }
1635
evsel__read_retire_lat(struct evsel * evsel,int cpu_map_idx,int thread)1636 static int evsel__read_retire_lat(struct evsel *evsel, int cpu_map_idx, int thread)
1637 {
1638 return tpebs_set_evsel(evsel, cpu_map_idx, thread);
1639 }
1640
evsel__set_count(struct evsel * counter,int cpu_map_idx,int thread,u64 val,u64 ena,u64 run,u64 lost)1641 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
1642 u64 val, u64 ena, u64 run, u64 lost)
1643 {
1644 struct perf_counts_values *count;
1645
1646 count = perf_counts(counter->counts, cpu_map_idx, thread);
1647
1648 if (counter->retire_lat) {
1649 evsel__read_retire_lat(counter, cpu_map_idx, thread);
1650 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
1651 return;
1652 }
1653
1654 count->val = val;
1655 count->ena = ena;
1656 count->run = run;
1657 count->lost = lost;
1658
1659 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
1660 }
1661
evsel__group_has_tpebs(struct evsel * leader)1662 static bool evsel__group_has_tpebs(struct evsel *leader)
1663 {
1664 struct evsel *evsel;
1665
1666 for_each_group_evsel(evsel, leader) {
1667 if (evsel__is_retire_lat(evsel))
1668 return true;
1669 }
1670 return false;
1671 }
1672
evsel__group_read_nr_members(struct evsel * leader)1673 static u64 evsel__group_read_nr_members(struct evsel *leader)
1674 {
1675 u64 nr = leader->core.nr_members;
1676 struct evsel *evsel;
1677
1678 for_each_group_evsel(evsel, leader) {
1679 if (evsel__is_retire_lat(evsel))
1680 nr--;
1681 }
1682 return nr;
1683 }
1684
evsel__group_read_size(struct evsel * leader)1685 static u64 evsel__group_read_size(struct evsel *leader)
1686 {
1687 u64 read_format = leader->core.attr.read_format;
1688 int entry = sizeof(u64); /* value */
1689 int size = 0;
1690 int nr = 1;
1691
1692 if (!evsel__group_has_tpebs(leader))
1693 return perf_evsel__read_size(&leader->core);
1694
1695 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1696 size += sizeof(u64);
1697
1698 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1699 size += sizeof(u64);
1700
1701 if (read_format & PERF_FORMAT_ID)
1702 entry += sizeof(u64);
1703
1704 if (read_format & PERF_FORMAT_LOST)
1705 entry += sizeof(u64);
1706
1707 if (read_format & PERF_FORMAT_GROUP) {
1708 nr = evsel__group_read_nr_members(leader);
1709 size += sizeof(u64);
1710 }
1711
1712 size += entry * nr;
1713 return size;
1714 }
1715
evsel__process_group_data(struct evsel * leader,int cpu_map_idx,int thread,u64 * data)1716 static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
1717 {
1718 u64 read_format = leader->core.attr.read_format;
1719 struct sample_read_value *v;
1720 u64 nr, ena = 0, run = 0, lost = 0;
1721
1722 nr = *data++;
1723
1724 if (nr != evsel__group_read_nr_members(leader))
1725 return -EINVAL;
1726
1727 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1728 ena = *data++;
1729
1730 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1731 run = *data++;
1732
1733 v = (void *)data;
1734 sample_read_group__for_each(v, nr, read_format) {
1735 struct evsel *counter;
1736
1737 counter = evlist__id2evsel(leader->evlist, v->id);
1738 if (!counter)
1739 return -EINVAL;
1740
1741 if (read_format & PERF_FORMAT_LOST)
1742 lost = v->lost;
1743
1744 evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
1745 }
1746
1747 return 0;
1748 }
1749
evsel__read_group(struct evsel * leader,int cpu_map_idx,int thread)1750 static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
1751 {
1752 struct perf_stat_evsel *ps = leader->stats;
1753 u64 read_format = leader->core.attr.read_format;
1754 int size = evsel__group_read_size(leader);
1755 u64 *data = ps->group_data;
1756
1757 if (!(read_format & PERF_FORMAT_ID))
1758 return -EINVAL;
1759
1760 if (!evsel__is_group_leader(leader))
1761 return -EINVAL;
1762
1763 if (!data) {
1764 data = zalloc(size);
1765 if (!data)
1766 return -ENOMEM;
1767
1768 ps->group_data = data;
1769 }
1770
1771 if (FD(leader, cpu_map_idx, thread) < 0)
1772 return -EINVAL;
1773
1774 if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
1775 return -errno;
1776
1777 return evsel__process_group_data(leader, cpu_map_idx, thread, data);
1778 }
1779
__evsel__match(const struct evsel * evsel,u32 type,u64 config)1780 bool __evsel__match(const struct evsel *evsel, u32 type, u64 config)
1781 {
1782
1783 u32 e_type = evsel->core.attr.type;
1784 u64 e_config = evsel->core.attr.config;
1785
1786 if (e_type != type) {
1787 return type == PERF_TYPE_HARDWARE && evsel->pmu && evsel->pmu->is_core &&
1788 evsel->alternate_hw_config == config;
1789 }
1790
1791 if ((type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) &&
1792 perf_pmus__supports_extended_type())
1793 e_config &= PERF_HW_EVENT_MASK;
1794
1795 return e_config == config;
1796 }
1797
evsel__read_counter(struct evsel * evsel,int cpu_map_idx,int thread)1798 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
1799 {
1800 if (evsel__is_tool(evsel))
1801 return evsel__tool_pmu_read(evsel, cpu_map_idx, thread);
1802
1803 if (evsel__is_hwmon(evsel))
1804 return evsel__hwmon_pmu_read(evsel, cpu_map_idx, thread);
1805
1806 if (evsel__is_retire_lat(evsel))
1807 return evsel__read_retire_lat(evsel, cpu_map_idx, thread);
1808
1809 if (evsel->core.attr.read_format & PERF_FORMAT_GROUP)
1810 return evsel__read_group(evsel, cpu_map_idx, thread);
1811
1812 return evsel__read_one(evsel, cpu_map_idx, thread);
1813 }
1814
__evsel__read_on_cpu(struct evsel * evsel,int cpu_map_idx,int thread,bool scale)1815 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
1816 {
1817 struct perf_counts_values count;
1818 size_t nv = scale ? 3 : 1;
1819
1820 if (FD(evsel, cpu_map_idx, thread) < 0)
1821 return -EINVAL;
1822
1823 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0)
1824 return -ENOMEM;
1825
1826 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
1827 return -errno;
1828
1829 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
1830 perf_counts_values__scale(&count, scale, NULL);
1831 *perf_counts(evsel->counts, cpu_map_idx, thread) = count;
1832 return 0;
1833 }
1834
evsel__match_other_cpu(struct evsel * evsel,struct evsel * other,int cpu_map_idx)1835 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
1836 int cpu_map_idx)
1837 {
1838 struct perf_cpu cpu;
1839
1840 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
1841 return perf_cpu_map__idx(other->core.cpus, cpu);
1842 }
1843
evsel__hybrid_group_cpu_map_idx(struct evsel * evsel,int cpu_map_idx)1844 static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx)
1845 {
1846 struct evsel *leader = evsel__leader(evsel);
1847
1848 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
1849 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
1850 return evsel__match_other_cpu(evsel, leader, cpu_map_idx);
1851 }
1852
1853 return cpu_map_idx;
1854 }
1855
get_group_fd(struct evsel * evsel,int cpu_map_idx,int thread)1856 static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
1857 {
1858 struct evsel *leader = evsel__leader(evsel);
1859 int fd;
1860
1861 if (evsel__is_group_leader(evsel))
1862 return -1;
1863
1864 /*
1865 * Leader must be already processed/open,
1866 * if not it's a bug.
1867 */
1868 BUG_ON(!leader->core.fd);
1869
1870 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx);
1871 if (cpu_map_idx == -1)
1872 return -1;
1873
1874 fd = FD(leader, cpu_map_idx, thread);
1875 BUG_ON(fd == -1 && !leader->skippable);
1876
1877 /*
1878 * When the leader has been skipped, return -2 to distinguish from no
1879 * group leader case.
1880 */
1881 return fd == -1 ? -2 : fd;
1882 }
1883
evsel__remove_fd(struct evsel * pos,int nr_cpus,int nr_threads,int thread_idx)1884 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
1885 {
1886 for (int cpu = 0; cpu < nr_cpus; cpu++)
1887 for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1888 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1889 }
1890
update_fds(struct evsel * evsel,int nr_cpus,int cpu_map_idx,int nr_threads,int thread_idx)1891 static int update_fds(struct evsel *evsel,
1892 int nr_cpus, int cpu_map_idx,
1893 int nr_threads, int thread_idx)
1894 {
1895 struct evsel *pos;
1896
1897 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads)
1898 return -EINVAL;
1899
1900 evlist__for_each_entry(evsel->evlist, pos) {
1901 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
1902
1903 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
1904
1905 /*
1906 * Since fds for next evsel has not been created,
1907 * there is no need to iterate whole event list.
1908 */
1909 if (pos == evsel)
1910 break;
1911 }
1912 return 0;
1913 }
1914
evsel__ignore_missing_thread(struct evsel * evsel,int nr_cpus,int cpu_map_idx,struct perf_thread_map * threads,int thread,int err)1915 static bool evsel__ignore_missing_thread(struct evsel *evsel,
1916 int nr_cpus, int cpu_map_idx,
1917 struct perf_thread_map *threads,
1918 int thread, int err)
1919 {
1920 pid_t ignore_pid = perf_thread_map__pid(threads, thread);
1921
1922 if (!evsel->ignore_missing_thread)
1923 return false;
1924
1925 /* The system wide setup does not work with threads. */
1926 if (evsel->core.system_wide)
1927 return false;
1928
1929 /* The -ESRCH is perf event syscall errno for pid's not found. */
1930 if (err != -ESRCH)
1931 return false;
1932
1933 /* If there's only one thread, let it fail. */
1934 if (threads->nr == 1)
1935 return false;
1936
1937 /*
1938 * We should remove fd for missing_thread first
1939 * because thread_map__remove() will decrease threads->nr.
1940 */
1941 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
1942 return false;
1943
1944 if (thread_map__remove(threads, thread))
1945 return false;
1946
1947 pr_warning("WARNING: Ignored open failure for pid %d\n",
1948 ignore_pid);
1949 return true;
1950 }
1951
__open_attr__fprintf(FILE * fp,const char * name,const char * val,void * priv __maybe_unused)1952 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1953 void *priv __maybe_unused)
1954 {
1955 return fprintf(fp, " %-32s %s\n", name, val);
1956 }
1957
display_attr(struct perf_event_attr * attr)1958 static void display_attr(struct perf_event_attr *attr)
1959 {
1960 if (verbose >= 2 || debug_peo_args) {
1961 fprintf(stderr, "%.60s\n", graph_dotted_line);
1962 fprintf(stderr, "perf_event_attr:\n");
1963 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1964 fprintf(stderr, "%.60s\n", graph_dotted_line);
1965 }
1966 }
1967
evsel__precise_ip_fallback(struct evsel * evsel)1968 bool evsel__precise_ip_fallback(struct evsel *evsel)
1969 {
1970 /* Do not try less precise if not requested. */
1971 if (!evsel->precise_max)
1972 return false;
1973
1974 /*
1975 * We tried all the precise_ip values, and it's
1976 * still failing, so leave it to standard fallback.
1977 */
1978 if (!evsel->core.attr.precise_ip) {
1979 evsel->core.attr.precise_ip = evsel->precise_ip_original;
1980 return false;
1981 }
1982
1983 if (!evsel->precise_ip_original)
1984 evsel->precise_ip_original = evsel->core.attr.precise_ip;
1985
1986 evsel->core.attr.precise_ip--;
1987 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
1988 display_attr(&evsel->core.attr);
1989 return true;
1990 }
1991
1992 static struct perf_cpu_map *empty_cpu_map;
1993 static struct perf_thread_map *empty_thread_map;
1994
__evsel__prepare_open(struct evsel * evsel,struct perf_cpu_map * cpus,struct perf_thread_map * threads)1995 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1996 struct perf_thread_map *threads)
1997 {
1998 int ret = 0;
1999 int nthreads = perf_thread_map__nr(threads);
2000
2001 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
2002 (perf_missing_features.aux_output && evsel->core.attr.aux_output))
2003 return -EINVAL;
2004
2005 if (cpus == NULL) {
2006 if (empty_cpu_map == NULL) {
2007 empty_cpu_map = perf_cpu_map__new_any_cpu();
2008 if (empty_cpu_map == NULL)
2009 return -ENOMEM;
2010 }
2011
2012 cpus = empty_cpu_map;
2013 }
2014
2015 if (threads == NULL) {
2016 if (empty_thread_map == NULL) {
2017 empty_thread_map = thread_map__new_by_tid(-1);
2018 if (empty_thread_map == NULL)
2019 return -ENOMEM;
2020 }
2021
2022 threads = empty_thread_map;
2023 }
2024
2025 if (evsel->core.fd == NULL &&
2026 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
2027 return -ENOMEM;
2028
2029 if (evsel__is_tool(evsel))
2030 ret = evsel__tool_pmu_prepare_open(evsel, cpus, nthreads);
2031
2032 evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
2033 if (evsel->cgrp)
2034 evsel->open_flags |= PERF_FLAG_PID_CGROUP;
2035
2036 return ret;
2037 }
2038
evsel__disable_missing_features(struct evsel * evsel)2039 static void evsel__disable_missing_features(struct evsel *evsel)
2040 {
2041 if (perf_missing_features.inherit_sample_read && evsel->core.attr.inherit &&
2042 (evsel->core.attr.sample_type & PERF_SAMPLE_READ))
2043 evsel->core.attr.inherit = 0;
2044 if (perf_missing_features.branch_counters)
2045 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_COUNTERS;
2046 if (perf_missing_features.read_lost)
2047 evsel->core.attr.read_format &= ~PERF_FORMAT_LOST;
2048 if (perf_missing_features.weight_struct) {
2049 evsel__set_sample_bit(evsel, WEIGHT);
2050 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
2051 }
2052 if (perf_missing_features.clockid_wrong)
2053 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
2054 if (perf_missing_features.clockid) {
2055 evsel->core.attr.use_clockid = 0;
2056 evsel->core.attr.clockid = 0;
2057 }
2058 if (perf_missing_features.cloexec)
2059 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
2060 if (perf_missing_features.mmap2)
2061 evsel->core.attr.mmap2 = 0;
2062 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest)
2063 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
2064 if (perf_missing_features.lbr_flags)
2065 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
2066 PERF_SAMPLE_BRANCH_NO_CYCLES);
2067 if (perf_missing_features.group_read && evsel->core.attr.inherit)
2068 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
2069 if (perf_missing_features.ksymbol)
2070 evsel->core.attr.ksymbol = 0;
2071 if (perf_missing_features.bpf)
2072 evsel->core.attr.bpf_event = 0;
2073 if (perf_missing_features.branch_hw_idx)
2074 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
2075 if (perf_missing_features.sample_id_all)
2076 evsel->core.attr.sample_id_all = 0;
2077 }
2078
evsel__prepare_open(struct evsel * evsel,struct perf_cpu_map * cpus,struct perf_thread_map * threads)2079 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
2080 struct perf_thread_map *threads)
2081 {
2082 int err;
2083
2084 err = __evsel__prepare_open(evsel, cpus, threads);
2085 if (err)
2086 return err;
2087
2088 evsel__disable_missing_features(evsel);
2089
2090 return err;
2091 }
2092
has_attr_feature(struct perf_event_attr * attr,unsigned long flags)2093 static bool has_attr_feature(struct perf_event_attr *attr, unsigned long flags)
2094 {
2095 int fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, /*cpu=*/-1,
2096 /*group_fd=*/-1, flags);
2097 close(fd);
2098
2099 if (fd < 0) {
2100 attr->exclude_kernel = 1;
2101
2102 fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, /*cpu=*/-1,
2103 /*group_fd=*/-1, flags);
2104 close(fd);
2105 }
2106
2107 if (fd < 0) {
2108 attr->exclude_hv = 1;
2109
2110 fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, /*cpu=*/-1,
2111 /*group_fd=*/-1, flags);
2112 close(fd);
2113 }
2114
2115 if (fd < 0) {
2116 attr->exclude_guest = 1;
2117
2118 fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, /*cpu=*/-1,
2119 /*group_fd=*/-1, flags);
2120 close(fd);
2121 }
2122
2123 attr->exclude_kernel = 0;
2124 attr->exclude_guest = 0;
2125 attr->exclude_hv = 0;
2126
2127 return fd >= 0;
2128 }
2129
evsel__detect_missing_pmu_features(struct evsel * evsel)2130 static void evsel__detect_missing_pmu_features(struct evsel *evsel)
2131 {
2132 struct perf_event_attr attr = {
2133 .type = evsel->core.attr.type,
2134 .config = evsel->core.attr.config,
2135 .disabled = 1,
2136 };
2137 struct perf_pmu *pmu = evsel->pmu;
2138 int old_errno;
2139
2140 old_errno = errno;
2141
2142 if (pmu == NULL)
2143 pmu = evsel->pmu = evsel__find_pmu(evsel);
2144
2145 if (pmu == NULL || pmu->missing_features.checked)
2146 goto out;
2147
2148 /*
2149 * Must probe features in the order they were added to the
2150 * perf_event_attr interface. These are kernel core limitation but
2151 * specific to PMUs with branch stack. So we can detect with the given
2152 * hardware event and stop on the first one succeeded.
2153 */
2154
2155 /* Please add new feature detection here. */
2156
2157 attr.exclude_guest = 1;
2158 if (has_attr_feature(&attr, /*flags=*/0))
2159 goto found;
2160 pmu->missing_features.exclude_guest = true;
2161 pr_debug2("switching off exclude_guest for PMU %s\n", pmu->name);
2162
2163 found:
2164 pmu->missing_features.checked = true;
2165 out:
2166 errno = old_errno;
2167 }
2168
evsel__detect_missing_brstack_features(struct evsel * evsel)2169 static void evsel__detect_missing_brstack_features(struct evsel *evsel)
2170 {
2171 static bool detection_done = false;
2172 struct perf_event_attr attr = {
2173 .type = evsel->core.attr.type,
2174 .config = evsel->core.attr.config,
2175 .disabled = 1,
2176 .sample_type = PERF_SAMPLE_BRANCH_STACK,
2177 .sample_period = 1000,
2178 };
2179 int old_errno;
2180
2181 if (detection_done)
2182 return;
2183
2184 old_errno = errno;
2185
2186 /*
2187 * Must probe features in the order they were added to the
2188 * perf_event_attr interface. These are PMU specific limitation
2189 * so we can detect with the given hardware event and stop on the
2190 * first one succeeded.
2191 */
2192
2193 /* Please add new feature detection here. */
2194
2195 attr.branch_sample_type = PERF_SAMPLE_BRANCH_COUNTERS;
2196 if (has_attr_feature(&attr, /*flags=*/0))
2197 goto found;
2198 perf_missing_features.branch_counters = true;
2199 pr_debug2("switching off branch counters support\n");
2200
2201 attr.branch_sample_type = PERF_SAMPLE_BRANCH_HW_INDEX;
2202 if (has_attr_feature(&attr, /*flags=*/0))
2203 goto found;
2204 perf_missing_features.branch_hw_idx = true;
2205 pr_debug2("switching off branch HW index support\n");
2206
2207 attr.branch_sample_type = PERF_SAMPLE_BRANCH_NO_CYCLES | PERF_SAMPLE_BRANCH_NO_FLAGS;
2208 if (has_attr_feature(&attr, /*flags=*/0))
2209 goto found;
2210 perf_missing_features.lbr_flags = true;
2211 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
2212
2213 found:
2214 detection_done = true;
2215 errno = old_errno;
2216 }
2217
evsel__detect_missing_features(struct evsel * evsel)2218 static bool evsel__detect_missing_features(struct evsel *evsel)
2219 {
2220 static bool detection_done = false;
2221 struct perf_event_attr attr = {
2222 .type = PERF_TYPE_SOFTWARE,
2223 .config = PERF_COUNT_SW_TASK_CLOCK,
2224 .disabled = 1,
2225 };
2226 int old_errno;
2227
2228 evsel__detect_missing_pmu_features(evsel);
2229
2230 if (evsel__has_br_stack(evsel))
2231 evsel__detect_missing_brstack_features(evsel);
2232
2233 if (detection_done)
2234 goto check;
2235
2236 old_errno = errno;
2237
2238 /*
2239 * Must probe features in the order they were added to the
2240 * perf_event_attr interface. These are kernel core limitation
2241 * not PMU-specific so we can detect with a software event and
2242 * stop on the first one succeeded.
2243 */
2244
2245 /* Please add new feature detection here. */
2246
2247 attr.inherit = true;
2248 attr.sample_type = PERF_SAMPLE_READ;
2249 if (has_attr_feature(&attr, /*flags=*/0))
2250 goto found;
2251 perf_missing_features.inherit_sample_read = true;
2252 pr_debug2("Using PERF_SAMPLE_READ / :S modifier is not compatible with inherit, falling back to no-inherit.\n");
2253 attr.inherit = false;
2254 attr.sample_type = 0;
2255
2256 attr.read_format = PERF_FORMAT_LOST;
2257 if (has_attr_feature(&attr, /*flags=*/0))
2258 goto found;
2259 perf_missing_features.read_lost = true;
2260 pr_debug2("switching off PERF_FORMAT_LOST support\n");
2261 attr.read_format = 0;
2262
2263 attr.sample_type = PERF_SAMPLE_WEIGHT_STRUCT;
2264 if (has_attr_feature(&attr, /*flags=*/0))
2265 goto found;
2266 perf_missing_features.weight_struct = true;
2267 pr_debug2("switching off weight struct support\n");
2268 attr.sample_type = 0;
2269
2270 attr.sample_type = PERF_SAMPLE_CODE_PAGE_SIZE;
2271 if (has_attr_feature(&attr, /*flags=*/0))
2272 goto found;
2273 perf_missing_features.code_page_size = true;
2274 pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support\n");
2275 attr.sample_type = 0;
2276
2277 attr.sample_type = PERF_SAMPLE_DATA_PAGE_SIZE;
2278 if (has_attr_feature(&attr, /*flags=*/0))
2279 goto found;
2280 perf_missing_features.data_page_size = true;
2281 pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support\n");
2282 attr.sample_type = 0;
2283
2284 attr.cgroup = 1;
2285 if (has_attr_feature(&attr, /*flags=*/0))
2286 goto found;
2287 perf_missing_features.cgroup = true;
2288 pr_debug2_peo("Kernel has no cgroup sampling support\n");
2289 attr.cgroup = 0;
2290
2291 attr.aux_output = 1;
2292 if (has_attr_feature(&attr, /*flags=*/0))
2293 goto found;
2294 perf_missing_features.aux_output = true;
2295 pr_debug2_peo("Kernel has no attr.aux_output support\n");
2296 attr.aux_output = 0;
2297
2298 attr.bpf_event = 1;
2299 if (has_attr_feature(&attr, /*flags=*/0))
2300 goto found;
2301 perf_missing_features.bpf = true;
2302 pr_debug2_peo("switching off bpf_event\n");
2303 attr.bpf_event = 0;
2304
2305 attr.ksymbol = 1;
2306 if (has_attr_feature(&attr, /*flags=*/0))
2307 goto found;
2308 perf_missing_features.ksymbol = true;
2309 pr_debug2_peo("switching off ksymbol\n");
2310 attr.ksymbol = 0;
2311
2312 attr.write_backward = 1;
2313 if (has_attr_feature(&attr, /*flags=*/0))
2314 goto found;
2315 perf_missing_features.write_backward = true;
2316 pr_debug2_peo("switching off write_backward\n");
2317 attr.write_backward = 0;
2318
2319 attr.use_clockid = 1;
2320 attr.clockid = CLOCK_MONOTONIC;
2321 if (has_attr_feature(&attr, /*flags=*/0))
2322 goto found;
2323 perf_missing_features.clockid = true;
2324 pr_debug2_peo("switching off clockid\n");
2325 attr.use_clockid = 0;
2326 attr.clockid = 0;
2327
2328 if (has_attr_feature(&attr, /*flags=*/PERF_FLAG_FD_CLOEXEC))
2329 goto found;
2330 perf_missing_features.cloexec = true;
2331 pr_debug2_peo("switching off cloexec flag\n");
2332
2333 attr.mmap2 = 1;
2334 if (has_attr_feature(&attr, /*flags=*/0))
2335 goto found;
2336 perf_missing_features.mmap2 = true;
2337 pr_debug2_peo("switching off mmap2\n");
2338 attr.mmap2 = 0;
2339
2340 /* set this unconditionally? */
2341 perf_missing_features.sample_id_all = true;
2342 pr_debug2_peo("switching off sample_id_all\n");
2343
2344 attr.inherit = 1;
2345 attr.read_format = PERF_FORMAT_GROUP;
2346 if (has_attr_feature(&attr, /*flags=*/0))
2347 goto found;
2348 perf_missing_features.group_read = true;
2349 pr_debug2_peo("switching off group read\n");
2350 attr.inherit = 0;
2351 attr.read_format = 0;
2352
2353 found:
2354 detection_done = true;
2355 errno = old_errno;
2356
2357 check:
2358 if (evsel->core.attr.inherit &&
2359 (evsel->core.attr.sample_type & PERF_SAMPLE_READ) &&
2360 perf_missing_features.inherit_sample_read)
2361 return true;
2362
2363 if ((evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
2364 perf_missing_features.branch_counters)
2365 return true;
2366
2367 if ((evsel->core.attr.read_format & PERF_FORMAT_LOST) &&
2368 perf_missing_features.read_lost)
2369 return true;
2370
2371 if ((evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT) &&
2372 perf_missing_features.weight_struct)
2373 return true;
2374
2375 if (evsel->core.attr.use_clockid && evsel->core.attr.clockid != CLOCK_MONOTONIC &&
2376 !perf_missing_features.clockid) {
2377 perf_missing_features.clockid_wrong = true;
2378 return true;
2379 }
2380
2381 if (evsel->core.attr.use_clockid && perf_missing_features.clockid)
2382 return true;
2383
2384 if ((evsel->open_flags & PERF_FLAG_FD_CLOEXEC) &&
2385 perf_missing_features.cloexec)
2386 return true;
2387
2388 if (evsel->core.attr.mmap2 && perf_missing_features.mmap2)
2389 return true;
2390
2391 if ((evsel->core.attr.branch_sample_type & (PERF_SAMPLE_BRANCH_NO_FLAGS |
2392 PERF_SAMPLE_BRANCH_NO_CYCLES)) &&
2393 perf_missing_features.lbr_flags)
2394 return true;
2395
2396 if (evsel->core.attr.inherit && (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
2397 perf_missing_features.group_read)
2398 return true;
2399
2400 if (evsel->core.attr.ksymbol && perf_missing_features.ksymbol)
2401 return true;
2402
2403 if (evsel->core.attr.bpf_event && perf_missing_features.bpf)
2404 return true;
2405
2406 if ((evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX) &&
2407 perf_missing_features.branch_hw_idx)
2408 return true;
2409
2410 if (evsel->core.attr.sample_id_all && perf_missing_features.sample_id_all)
2411 return true;
2412
2413 return false;
2414 }
2415
evsel__handle_error_quirks(struct evsel * evsel,int error)2416 static bool evsel__handle_error_quirks(struct evsel *evsel, int error)
2417 {
2418 /*
2419 * AMD core PMU tries to forward events with precise_ip to IBS PMU
2420 * implicitly. But IBS PMU has more restrictions so it can fail with
2421 * supported event attributes. Let's forward it back to the core PMU
2422 * by clearing precise_ip only if it's from precise_max (:P).
2423 */
2424 if ((error == -EINVAL || error == -ENOENT) && x86__is_amd_cpu() &&
2425 evsel->core.attr.precise_ip && evsel->precise_max) {
2426 evsel->core.attr.precise_ip = 0;
2427 pr_debug2_peo("removing precise_ip on AMD\n");
2428 display_attr(&evsel->core.attr);
2429 return true;
2430 }
2431
2432 return false;
2433 }
2434
evsel__open_cpu(struct evsel * evsel,struct perf_cpu_map * cpus,struct perf_thread_map * threads,int start_cpu_map_idx,int end_cpu_map_idx)2435 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
2436 struct perf_thread_map *threads,
2437 int start_cpu_map_idx, int end_cpu_map_idx)
2438 {
2439 int idx, thread, nthreads;
2440 int pid = -1, err, old_errno;
2441 enum rlimit_action set_rlimit = NO_CHANGE;
2442
2443 if (evsel__is_retire_lat(evsel))
2444 return tpebs_start(evsel->evlist);
2445
2446 err = __evsel__prepare_open(evsel, cpus, threads);
2447 if (err)
2448 return err;
2449
2450 if (cpus == NULL)
2451 cpus = empty_cpu_map;
2452
2453 if (threads == NULL)
2454 threads = empty_thread_map;
2455
2456 nthreads = perf_thread_map__nr(threads);
2457
2458 if (evsel->cgrp)
2459 pid = evsel->cgrp->fd;
2460
2461 fallback_missing_features:
2462 evsel__disable_missing_features(evsel);
2463
2464 pr_debug3("Opening: %s\n", evsel__name(evsel));
2465 display_attr(&evsel->core.attr);
2466
2467 if (evsel__is_tool(evsel)) {
2468 return evsel__tool_pmu_open(evsel, threads,
2469 start_cpu_map_idx,
2470 end_cpu_map_idx);
2471 }
2472 if (evsel__is_hwmon(evsel)) {
2473 return evsel__hwmon_pmu_open(evsel, threads,
2474 start_cpu_map_idx,
2475 end_cpu_map_idx);
2476 }
2477
2478 for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
2479
2480 for (thread = 0; thread < nthreads; thread++) {
2481 int fd, group_fd;
2482 retry_open:
2483 if (thread >= nthreads)
2484 break;
2485
2486 if (!evsel->cgrp && !evsel->core.system_wide)
2487 pid = perf_thread_map__pid(threads, thread);
2488
2489 group_fd = get_group_fd(evsel, idx, thread);
2490
2491 if (group_fd == -2) {
2492 pr_debug("broken group leader for %s\n", evsel->name);
2493 err = -EINVAL;
2494 goto out_close;
2495 }
2496
2497 /* Debug message used by test scripts */
2498 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
2499 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
2500
2501 fd = sys_perf_event_open(&evsel->core.attr, pid,
2502 perf_cpu_map__cpu(cpus, idx).cpu,
2503 group_fd, evsel->open_flags);
2504
2505 FD(evsel, idx, thread) = fd;
2506
2507 if (fd < 0) {
2508 err = -errno;
2509
2510 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
2511 err);
2512 goto try_fallback;
2513 }
2514
2515 bpf_counter__install_pe(evsel, idx, fd);
2516
2517 if (unlikely(test_attr__enabled())) {
2518 test_attr__open(&evsel->core.attr, pid,
2519 perf_cpu_map__cpu(cpus, idx),
2520 fd, group_fd, evsel->open_flags);
2521 }
2522
2523 /* Debug message used by test scripts */
2524 pr_debug2_peo(" = %d\n", fd);
2525
2526 if (evsel->bpf_fd >= 0) {
2527 int evt_fd = fd;
2528 int bpf_fd = evsel->bpf_fd;
2529
2530 err = ioctl(evt_fd,
2531 PERF_EVENT_IOC_SET_BPF,
2532 bpf_fd);
2533 if (err && errno != EEXIST) {
2534 pr_err("failed to attach bpf fd %d: %s\n",
2535 bpf_fd, strerror(errno));
2536 err = -EINVAL;
2537 goto out_close;
2538 }
2539 }
2540
2541 set_rlimit = NO_CHANGE;
2542
2543 /*
2544 * If we succeeded but had to kill clockid, fail and
2545 * have evsel__open_strerror() print us a nice error.
2546 */
2547 if (perf_missing_features.clockid ||
2548 perf_missing_features.clockid_wrong) {
2549 err = -EINVAL;
2550 goto out_close;
2551 }
2552 }
2553 }
2554
2555 return 0;
2556
2557 try_fallback:
2558 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
2559 idx, threads, thread, err)) {
2560 /* We just removed 1 thread, so lower the upper nthreads limit. */
2561 nthreads--;
2562
2563 /* ... and pretend like nothing have happened. */
2564 err = 0;
2565 goto retry_open;
2566 }
2567 /*
2568 * perf stat needs between 5 and 22 fds per CPU. When we run out
2569 * of them try to increase the limits.
2570 */
2571 if (err == -EMFILE && rlimit__increase_nofile(&set_rlimit))
2572 goto retry_open;
2573
2574 if (err == -EINVAL && evsel__detect_missing_features(evsel))
2575 goto fallback_missing_features;
2576
2577 if (evsel__precise_ip_fallback(evsel))
2578 goto retry_open;
2579
2580 if (evsel__handle_error_quirks(evsel, err))
2581 goto retry_open;
2582
2583 out_close:
2584 if (err)
2585 threads->err_thread = thread;
2586
2587 old_errno = errno;
2588 do {
2589 while (--thread >= 0) {
2590 if (FD(evsel, idx, thread) >= 0)
2591 close(FD(evsel, idx, thread));
2592 FD(evsel, idx, thread) = -1;
2593 }
2594 thread = nthreads;
2595 } while (--idx >= 0);
2596 errno = old_errno;
2597 return err;
2598 }
2599
evsel__open(struct evsel * evsel,struct perf_cpu_map * cpus,struct perf_thread_map * threads)2600 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
2601 struct perf_thread_map *threads)
2602 {
2603 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
2604 }
2605
evsel__close(struct evsel * evsel)2606 void evsel__close(struct evsel *evsel)
2607 {
2608 if (evsel__is_retire_lat(evsel))
2609 tpebs_delete();
2610 perf_evsel__close(&evsel->core);
2611 perf_evsel__free_id(&evsel->core);
2612 }
2613
evsel__open_per_cpu(struct evsel * evsel,struct perf_cpu_map * cpus,int cpu_map_idx)2614 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
2615 {
2616 if (cpu_map_idx == -1)
2617 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
2618
2619 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
2620 }
2621
evsel__open_per_thread(struct evsel * evsel,struct perf_thread_map * threads)2622 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
2623 {
2624 return evsel__open(evsel, NULL, threads);
2625 }
2626
perf_evsel__parse_id_sample(const struct evsel * evsel,const union perf_event * event,struct perf_sample * sample)2627 static int perf_evsel__parse_id_sample(const struct evsel *evsel,
2628 const union perf_event *event,
2629 struct perf_sample *sample)
2630 {
2631 u64 type = evsel->core.attr.sample_type;
2632 const __u64 *array = event->sample.array;
2633 bool swapped = evsel->needs_swap;
2634 union u64_swap u;
2635
2636 array += ((event->header.size -
2637 sizeof(event->header)) / sizeof(u64)) - 1;
2638
2639 if (type & PERF_SAMPLE_IDENTIFIER) {
2640 sample->id = *array;
2641 array--;
2642 }
2643
2644 if (type & PERF_SAMPLE_CPU) {
2645 u.val64 = *array;
2646 if (swapped) {
2647 /* undo swap of u64, then swap on individual u32s */
2648 u.val64 = bswap_64(u.val64);
2649 u.val32[0] = bswap_32(u.val32[0]);
2650 }
2651
2652 sample->cpu = u.val32[0];
2653 array--;
2654 }
2655
2656 if (type & PERF_SAMPLE_STREAM_ID) {
2657 sample->stream_id = *array;
2658 array--;
2659 }
2660
2661 if (type & PERF_SAMPLE_ID) {
2662 sample->id = *array;
2663 array--;
2664 }
2665
2666 if (type & PERF_SAMPLE_TIME) {
2667 sample->time = *array;
2668 array--;
2669 }
2670
2671 if (type & PERF_SAMPLE_TID) {
2672 u.val64 = *array;
2673 if (swapped) {
2674 /* undo swap of u64, then swap on individual u32s */
2675 u.val64 = bswap_64(u.val64);
2676 u.val32[0] = bswap_32(u.val32[0]);
2677 u.val32[1] = bswap_32(u.val32[1]);
2678 }
2679
2680 sample->pid = u.val32[0];
2681 sample->tid = u.val32[1];
2682 array--;
2683 }
2684
2685 return 0;
2686 }
2687
overflow(const void * endp,u16 max_size,const void * offset,u64 size)2688 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
2689 u64 size)
2690 {
2691 return size > max_size || offset + size > endp;
2692 }
2693
2694 #define OVERFLOW_CHECK(offset, size, max_size) \
2695 do { \
2696 if (overflow(endp, (max_size), (offset), (size))) \
2697 return -EFAULT; \
2698 } while (0)
2699
2700 #define OVERFLOW_CHECK_u64(offset) \
2701 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2702
2703 static int
perf_event__check_size(union perf_event * event,unsigned int sample_size)2704 perf_event__check_size(union perf_event *event, unsigned int sample_size)
2705 {
2706 /*
2707 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2708 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
2709 * check the format does not go past the end of the event.
2710 */
2711 if (sample_size + sizeof(event->header) > event->header.size)
2712 return -EFAULT;
2713
2714 return 0;
2715 }
2716
arch_perf_parse_sample_weight(struct perf_sample * data,const __u64 * array,u64 type __maybe_unused)2717 void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
2718 const __u64 *array,
2719 u64 type __maybe_unused)
2720 {
2721 data->weight = *array;
2722 }
2723
evsel__bitfield_swap_branch_flags(u64 value)2724 u64 evsel__bitfield_swap_branch_flags(u64 value)
2725 {
2726 u64 new_val = 0;
2727
2728 /*
2729 * branch_flags
2730 * union {
2731 * u64 values;
2732 * struct {
2733 * mispred:1 //target mispredicted
2734 * predicted:1 //target predicted
2735 * in_tx:1 //in transaction
2736 * abort:1 //transaction abort
2737 * cycles:16 //cycle count to last branch
2738 * type:4 //branch type
2739 * spec:2 //branch speculation info
2740 * new_type:4 //additional branch type
2741 * priv:3 //privilege level
2742 * reserved:31
2743 * }
2744 * }
2745 *
2746 * Avoid bswap64() the entire branch_flag.value,
2747 * as it has variable bit-field sizes. Instead the
2748 * macro takes the bit-field position/size,
2749 * swaps it based on the host endianness.
2750 */
2751 if (host_is_bigendian()) {
2752 new_val = bitfield_swap(value, 0, 1);
2753 new_val |= bitfield_swap(value, 1, 1);
2754 new_val |= bitfield_swap(value, 2, 1);
2755 new_val |= bitfield_swap(value, 3, 1);
2756 new_val |= bitfield_swap(value, 4, 16);
2757 new_val |= bitfield_swap(value, 20, 4);
2758 new_val |= bitfield_swap(value, 24, 2);
2759 new_val |= bitfield_swap(value, 26, 4);
2760 new_val |= bitfield_swap(value, 30, 3);
2761 new_val |= bitfield_swap(value, 33, 31);
2762 } else {
2763 new_val = bitfield_swap(value, 63, 1);
2764 new_val |= bitfield_swap(value, 62, 1);
2765 new_val |= bitfield_swap(value, 61, 1);
2766 new_val |= bitfield_swap(value, 60, 1);
2767 new_val |= bitfield_swap(value, 44, 16);
2768 new_val |= bitfield_swap(value, 40, 4);
2769 new_val |= bitfield_swap(value, 38, 2);
2770 new_val |= bitfield_swap(value, 34, 4);
2771 new_val |= bitfield_swap(value, 31, 3);
2772 new_val |= bitfield_swap(value, 0, 31);
2773 }
2774
2775 return new_val;
2776 }
2777
evsel__has_branch_counters(const struct evsel * evsel)2778 static inline bool evsel__has_branch_counters(const struct evsel *evsel)
2779 {
2780 struct evsel *leader = evsel__leader(evsel);
2781
2782 /* The branch counters feature only supports group */
2783 if (!leader || !evsel->evlist)
2784 return false;
2785
2786 if (evsel->evlist->nr_br_cntr < 0)
2787 evlist__update_br_cntr(evsel->evlist);
2788
2789 if (leader->br_cntr_nr > 0)
2790 return true;
2791
2792 return false;
2793 }
2794
evsel__parse_sample(struct evsel * evsel,union perf_event * event,struct perf_sample * data)2795 int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
2796 struct perf_sample *data)
2797 {
2798 u64 type = evsel->core.attr.sample_type;
2799 bool swapped = evsel->needs_swap;
2800 const __u64 *array;
2801 u16 max_size = event->header.size;
2802 const void *endp = (void *)event + max_size;
2803 u64 sz;
2804
2805 /*
2806 * used for cross-endian analysis. See git commit 65014ab3
2807 * for why this goofiness is needed.
2808 */
2809 union u64_swap u;
2810
2811 memset(data, 0, sizeof(*data));
2812 data->cpu = data->pid = data->tid = -1;
2813 data->stream_id = data->id = data->time = -1ULL;
2814 data->period = evsel->core.attr.sample_period;
2815 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2816 data->misc = event->header.misc;
2817 data->data_src = PERF_MEM_DATA_SRC_NONE;
2818 data->vcpu = -1;
2819
2820 if (event->header.type != PERF_RECORD_SAMPLE) {
2821 if (!evsel->core.attr.sample_id_all)
2822 return 0;
2823 return perf_evsel__parse_id_sample(evsel, event, data);
2824 }
2825
2826 array = event->sample.array;
2827
2828 if (perf_event__check_size(event, evsel->sample_size))
2829 return -EFAULT;
2830
2831 if (type & PERF_SAMPLE_IDENTIFIER) {
2832 data->id = *array;
2833 array++;
2834 }
2835
2836 if (type & PERF_SAMPLE_IP) {
2837 data->ip = *array;
2838 array++;
2839 }
2840
2841 if (type & PERF_SAMPLE_TID) {
2842 u.val64 = *array;
2843 if (swapped) {
2844 /* undo swap of u64, then swap on individual u32s */
2845 u.val64 = bswap_64(u.val64);
2846 u.val32[0] = bswap_32(u.val32[0]);
2847 u.val32[1] = bswap_32(u.val32[1]);
2848 }
2849
2850 data->pid = u.val32[0];
2851 data->tid = u.val32[1];
2852 array++;
2853 }
2854
2855 if (type & PERF_SAMPLE_TIME) {
2856 data->time = *array;
2857 array++;
2858 }
2859
2860 if (type & PERF_SAMPLE_ADDR) {
2861 data->addr = *array;
2862 array++;
2863 }
2864
2865 if (type & PERF_SAMPLE_ID) {
2866 data->id = *array;
2867 array++;
2868 }
2869
2870 if (type & PERF_SAMPLE_STREAM_ID) {
2871 data->stream_id = *array;
2872 array++;
2873 }
2874
2875 if (type & PERF_SAMPLE_CPU) {
2876
2877 u.val64 = *array;
2878 if (swapped) {
2879 /* undo swap of u64, then swap on individual u32s */
2880 u.val64 = bswap_64(u.val64);
2881 u.val32[0] = bswap_32(u.val32[0]);
2882 }
2883
2884 data->cpu = u.val32[0];
2885 array++;
2886 }
2887
2888 if (type & PERF_SAMPLE_PERIOD) {
2889 data->period = *array;
2890 array++;
2891 }
2892
2893 if (type & PERF_SAMPLE_READ) {
2894 u64 read_format = evsel->core.attr.read_format;
2895
2896 OVERFLOW_CHECK_u64(array);
2897 if (read_format & PERF_FORMAT_GROUP)
2898 data->read.group.nr = *array;
2899 else
2900 data->read.one.value = *array;
2901
2902 array++;
2903
2904 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2905 OVERFLOW_CHECK_u64(array);
2906 data->read.time_enabled = *array;
2907 array++;
2908 }
2909
2910 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2911 OVERFLOW_CHECK_u64(array);
2912 data->read.time_running = *array;
2913 array++;
2914 }
2915
2916 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2917 if (read_format & PERF_FORMAT_GROUP) {
2918 const u64 max_group_nr = UINT64_MAX /
2919 sizeof(struct sample_read_value);
2920
2921 if (data->read.group.nr > max_group_nr)
2922 return -EFAULT;
2923
2924 sz = data->read.group.nr * sample_read_value_size(read_format);
2925 OVERFLOW_CHECK(array, sz, max_size);
2926 data->read.group.values =
2927 (struct sample_read_value *)array;
2928 array = (void *)array + sz;
2929 } else {
2930 OVERFLOW_CHECK_u64(array);
2931 data->read.one.id = *array;
2932 array++;
2933
2934 if (read_format & PERF_FORMAT_LOST) {
2935 OVERFLOW_CHECK_u64(array);
2936 data->read.one.lost = *array;
2937 array++;
2938 }
2939 }
2940 }
2941
2942 if (type & PERF_SAMPLE_CALLCHAIN) {
2943 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2944
2945 OVERFLOW_CHECK_u64(array);
2946 data->callchain = (struct ip_callchain *)array++;
2947 if (data->callchain->nr > max_callchain_nr)
2948 return -EFAULT;
2949 sz = data->callchain->nr * sizeof(u64);
2950 OVERFLOW_CHECK(array, sz, max_size);
2951 array = (void *)array + sz;
2952 }
2953
2954 if (type & PERF_SAMPLE_RAW) {
2955 OVERFLOW_CHECK_u64(array);
2956 u.val64 = *array;
2957
2958 /*
2959 * Undo swap of u64, then swap on individual u32s,
2960 * get the size of the raw area and undo all of the
2961 * swap. The pevent interface handles endianness by
2962 * itself.
2963 */
2964 if (swapped) {
2965 u.val64 = bswap_64(u.val64);
2966 u.val32[0] = bswap_32(u.val32[0]);
2967 u.val32[1] = bswap_32(u.val32[1]);
2968 }
2969 data->raw_size = u.val32[0];
2970
2971 /*
2972 * The raw data is aligned on 64bits including the
2973 * u32 size, so it's safe to use mem_bswap_64.
2974 */
2975 if (swapped)
2976 mem_bswap_64((void *) array, data->raw_size);
2977
2978 array = (void *)array + sizeof(u32);
2979
2980 OVERFLOW_CHECK(array, data->raw_size, max_size);
2981 data->raw_data = (void *)array;
2982 array = (void *)array + data->raw_size;
2983 }
2984
2985 if (type & PERF_SAMPLE_BRANCH_STACK) {
2986 const u64 max_branch_nr = UINT64_MAX /
2987 sizeof(struct branch_entry);
2988 struct branch_entry *e;
2989 unsigned int i;
2990
2991 OVERFLOW_CHECK_u64(array);
2992 data->branch_stack = (struct branch_stack *)array++;
2993
2994 if (data->branch_stack->nr > max_branch_nr)
2995 return -EFAULT;
2996
2997 sz = data->branch_stack->nr * sizeof(struct branch_entry);
2998 if (evsel__has_branch_hw_idx(evsel)) {
2999 sz += sizeof(u64);
3000 e = &data->branch_stack->entries[0];
3001 } else {
3002 data->no_hw_idx = true;
3003 /*
3004 * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied,
3005 * only nr and entries[] will be output by kernel.
3006 */
3007 e = (struct branch_entry *)&data->branch_stack->hw_idx;
3008 }
3009
3010 if (swapped) {
3011 /*
3012 * struct branch_flag does not have endian
3013 * specific bit field definition. And bswap
3014 * will not resolve the issue, since these
3015 * are bit fields.
3016 *
3017 * evsel__bitfield_swap_branch_flags() uses a
3018 * bitfield_swap macro to swap the bit position
3019 * based on the host endians.
3020 */
3021 for (i = 0; i < data->branch_stack->nr; i++, e++)
3022 e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value);
3023 }
3024
3025 OVERFLOW_CHECK(array, sz, max_size);
3026 array = (void *)array + sz;
3027
3028 if (evsel__has_branch_counters(evsel)) {
3029 data->branch_stack_cntr = (u64 *)array;
3030 sz = data->branch_stack->nr * sizeof(u64);
3031
3032 OVERFLOW_CHECK(array, sz, max_size);
3033 array = (void *)array + sz;
3034 }
3035 }
3036
3037 if (type & PERF_SAMPLE_REGS_USER) {
3038 OVERFLOW_CHECK_u64(array);
3039 data->user_regs.abi = *array;
3040 array++;
3041
3042 if (data->user_regs.abi) {
3043 u64 mask = evsel->core.attr.sample_regs_user;
3044
3045 sz = hweight64(mask) * sizeof(u64);
3046 OVERFLOW_CHECK(array, sz, max_size);
3047 data->user_regs.mask = mask;
3048 data->user_regs.regs = (u64 *)array;
3049 array = (void *)array + sz;
3050 }
3051 }
3052
3053 if (type & PERF_SAMPLE_STACK_USER) {
3054 OVERFLOW_CHECK_u64(array);
3055 sz = *array++;
3056
3057 data->user_stack.offset = ((char *)(array - 1)
3058 - (char *) event);
3059
3060 if (!sz) {
3061 data->user_stack.size = 0;
3062 } else {
3063 OVERFLOW_CHECK(array, sz, max_size);
3064 data->user_stack.data = (char *)array;
3065 array = (void *)array + sz;
3066 OVERFLOW_CHECK_u64(array);
3067 data->user_stack.size = *array++;
3068 if (WARN_ONCE(data->user_stack.size > sz,
3069 "user stack dump failure\n"))
3070 return -EFAULT;
3071 }
3072 }
3073
3074 if (type & PERF_SAMPLE_WEIGHT_TYPE) {
3075 OVERFLOW_CHECK_u64(array);
3076 arch_perf_parse_sample_weight(data, array, type);
3077 array++;
3078 }
3079
3080 if (type & PERF_SAMPLE_DATA_SRC) {
3081 OVERFLOW_CHECK_u64(array);
3082 data->data_src = *array;
3083 array++;
3084 }
3085
3086 if (type & PERF_SAMPLE_TRANSACTION) {
3087 OVERFLOW_CHECK_u64(array);
3088 data->transaction = *array;
3089 array++;
3090 }
3091
3092 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
3093 if (type & PERF_SAMPLE_REGS_INTR) {
3094 OVERFLOW_CHECK_u64(array);
3095 data->intr_regs.abi = *array;
3096 array++;
3097
3098 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
3099 u64 mask = evsel->core.attr.sample_regs_intr;
3100
3101 sz = hweight64(mask) * sizeof(u64);
3102 OVERFLOW_CHECK(array, sz, max_size);
3103 data->intr_regs.mask = mask;
3104 data->intr_regs.regs = (u64 *)array;
3105 array = (void *)array + sz;
3106 }
3107 }
3108
3109 data->phys_addr = 0;
3110 if (type & PERF_SAMPLE_PHYS_ADDR) {
3111 data->phys_addr = *array;
3112 array++;
3113 }
3114
3115 data->cgroup = 0;
3116 if (type & PERF_SAMPLE_CGROUP) {
3117 data->cgroup = *array;
3118 array++;
3119 }
3120
3121 data->data_page_size = 0;
3122 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
3123 data->data_page_size = *array;
3124 array++;
3125 }
3126
3127 data->code_page_size = 0;
3128 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
3129 data->code_page_size = *array;
3130 array++;
3131 }
3132
3133 if (type & PERF_SAMPLE_AUX) {
3134 OVERFLOW_CHECK_u64(array);
3135 sz = *array++;
3136
3137 OVERFLOW_CHECK(array, sz, max_size);
3138 /* Undo swap of data */
3139 if (swapped)
3140 mem_bswap_64((char *)array, sz);
3141 data->aux_sample.size = sz;
3142 data->aux_sample.data = (char *)array;
3143 array = (void *)array + sz;
3144 }
3145
3146 return 0;
3147 }
3148
evsel__parse_sample_timestamp(struct evsel * evsel,union perf_event * event,u64 * timestamp)3149 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
3150 u64 *timestamp)
3151 {
3152 u64 type = evsel->core.attr.sample_type;
3153 const __u64 *array;
3154
3155 if (!(type & PERF_SAMPLE_TIME))
3156 return -1;
3157
3158 if (event->header.type != PERF_RECORD_SAMPLE) {
3159 struct perf_sample data = {
3160 .time = -1ULL,
3161 };
3162
3163 if (!evsel->core.attr.sample_id_all)
3164 return -1;
3165 if (perf_evsel__parse_id_sample(evsel, event, &data))
3166 return -1;
3167
3168 *timestamp = data.time;
3169 return 0;
3170 }
3171
3172 array = event->sample.array;
3173
3174 if (perf_event__check_size(event, evsel->sample_size))
3175 return -EFAULT;
3176
3177 if (type & PERF_SAMPLE_IDENTIFIER)
3178 array++;
3179
3180 if (type & PERF_SAMPLE_IP)
3181 array++;
3182
3183 if (type & PERF_SAMPLE_TID)
3184 array++;
3185
3186 if (type & PERF_SAMPLE_TIME)
3187 *timestamp = *array;
3188
3189 return 0;
3190 }
3191
evsel__id_hdr_size(const struct evsel * evsel)3192 u16 evsel__id_hdr_size(const struct evsel *evsel)
3193 {
3194 u64 sample_type = evsel->core.attr.sample_type;
3195 u16 size = 0;
3196
3197 if (sample_type & PERF_SAMPLE_TID)
3198 size += sizeof(u64);
3199
3200 if (sample_type & PERF_SAMPLE_TIME)
3201 size += sizeof(u64);
3202
3203 if (sample_type & PERF_SAMPLE_ID)
3204 size += sizeof(u64);
3205
3206 if (sample_type & PERF_SAMPLE_STREAM_ID)
3207 size += sizeof(u64);
3208
3209 if (sample_type & PERF_SAMPLE_CPU)
3210 size += sizeof(u64);
3211
3212 if (sample_type & PERF_SAMPLE_IDENTIFIER)
3213 size += sizeof(u64);
3214
3215 return size;
3216 }
3217
3218 #ifdef HAVE_LIBTRACEEVENT
evsel__field(struct evsel * evsel,const char * name)3219 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
3220 {
3221 return tep_find_field(evsel->tp_format, name);
3222 }
3223
evsel__common_field(struct evsel * evsel,const char * name)3224 struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name)
3225 {
3226 return tep_find_common_field(evsel->tp_format, name);
3227 }
3228
evsel__rawptr(struct evsel * evsel,struct perf_sample * sample,const char * name)3229 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
3230 {
3231 struct tep_format_field *field = evsel__field(evsel, name);
3232 int offset;
3233
3234 if (!field)
3235 return NULL;
3236
3237 offset = field->offset;
3238
3239 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3240 offset = *(int *)(sample->raw_data + field->offset);
3241 offset &= 0xffff;
3242 if (tep_field_is_relative(field->flags))
3243 offset += field->offset + field->size;
3244 }
3245
3246 return sample->raw_data + offset;
3247 }
3248
format_field__intval(struct tep_format_field * field,struct perf_sample * sample,bool needs_swap)3249 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
3250 bool needs_swap)
3251 {
3252 u64 value;
3253 void *ptr = sample->raw_data + field->offset;
3254
3255 switch (field->size) {
3256 case 1:
3257 return *(u8 *)ptr;
3258 case 2:
3259 value = *(u16 *)ptr;
3260 break;
3261 case 4:
3262 value = *(u32 *)ptr;
3263 break;
3264 case 8:
3265 memcpy(&value, ptr, sizeof(u64));
3266 break;
3267 default:
3268 return 0;
3269 }
3270
3271 if (!needs_swap)
3272 return value;
3273
3274 switch (field->size) {
3275 case 2:
3276 return bswap_16(value);
3277 case 4:
3278 return bswap_32(value);
3279 case 8:
3280 return bswap_64(value);
3281 default:
3282 return 0;
3283 }
3284
3285 return 0;
3286 }
3287
evsel__intval(struct evsel * evsel,struct perf_sample * sample,const char * name)3288 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name)
3289 {
3290 struct tep_format_field *field = evsel__field(evsel, name);
3291
3292 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
3293 }
3294
evsel__intval_common(struct evsel * evsel,struct perf_sample * sample,const char * name)3295 u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name)
3296 {
3297 struct tep_format_field *field = evsel__common_field(evsel, name);
3298
3299 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
3300 }
3301
evsel__taskstate(struct evsel * evsel,struct perf_sample * sample,const char * name)3302 char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name)
3303 {
3304 static struct tep_format_field *prev_state_field;
3305 static const char *states;
3306 struct tep_format_field *field;
3307 unsigned long long val;
3308 unsigned int bit;
3309 char state = '?'; /* '?' denotes unknown task state */
3310
3311 field = evsel__field(evsel, name);
3312
3313 if (!field)
3314 return state;
3315
3316 if (!states || field != prev_state_field) {
3317 states = parse_task_states(field);
3318 if (!states)
3319 return state;
3320 prev_state_field = field;
3321 }
3322
3323 /*
3324 * Note since the kernel exposes TASK_REPORT_MAX to userspace
3325 * to denote the 'preempted' state, we might as welll report
3326 * 'R' for this case, which make senses to users as well.
3327 *
3328 * We can change this if we have a good reason in the future.
3329 */
3330 val = evsel__intval(evsel, sample, name);
3331 bit = val ? ffs(val) : 0;
3332 state = (!bit || bit > strlen(states)) ? 'R' : states[bit-1];
3333 return state;
3334 }
3335 #endif
3336
evsel__fallback(struct evsel * evsel,struct target * target,int err,char * msg,size_t msgsize)3337 bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
3338 char *msg, size_t msgsize)
3339 {
3340 int paranoid;
3341
3342 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
3343 evsel->core.attr.type == PERF_TYPE_HARDWARE &&
3344 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
3345 /*
3346 * If it's cycles then fall back to hrtimer based cpu-clock sw
3347 * counter, which is always available even if no PMU support.
3348 *
3349 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
3350 * b0a873e).
3351 */
3352 evsel->core.attr.type = PERF_TYPE_SOFTWARE;
3353 evsel->core.attr.config = target__has_cpu(target)
3354 ? PERF_COUNT_SW_CPU_CLOCK
3355 : PERF_COUNT_SW_TASK_CLOCK;
3356 scnprintf(msg, msgsize,
3357 "The cycles event is not supported, trying to fall back to %s",
3358 target__has_cpu(target) ? "cpu-clock" : "task-clock");
3359
3360 zfree(&evsel->name);
3361 return true;
3362 } else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
3363 (paranoid = perf_event_paranoid()) > 1) {
3364 const char *name = evsel__name(evsel);
3365 char *new_name;
3366 const char *sep = ":";
3367
3368 /* If event has exclude user then don't exclude kernel. */
3369 if (evsel->core.attr.exclude_user)
3370 return false;
3371
3372 /* Is there already the separator in the name. */
3373 if (strchr(name, '/') ||
3374 (strchr(name, ':') && !evsel->is_libpfm_event))
3375 sep = "";
3376
3377 if (asprintf(&new_name, "%s%su", name, sep) < 0)
3378 return false;
3379
3380 free(evsel->name);
3381 evsel->name = new_name;
3382 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
3383 "to fall back to excluding kernel and hypervisor "
3384 " samples", paranoid);
3385 evsel->core.attr.exclude_kernel = 1;
3386 evsel->core.attr.exclude_hv = 1;
3387
3388 return true;
3389 } else if (err == EOPNOTSUPP && !evsel->core.attr.exclude_guest &&
3390 !evsel->exclude_GH) {
3391 const char *name = evsel__name(evsel);
3392 char *new_name;
3393 const char *sep = ":";
3394
3395 /* Is there already the separator in the name. */
3396 if (strchr(name, '/') ||
3397 (strchr(name, ':') && !evsel->is_libpfm_event))
3398 sep = "";
3399
3400 if (asprintf(&new_name, "%s%sH", name, sep) < 0)
3401 return false;
3402
3403 free(evsel->name);
3404 evsel->name = new_name;
3405 /* Apple M1 requires exclude_guest */
3406 scnprintf(msg, msgsize, "trying to fall back to excluding guest samples");
3407 evsel->core.attr.exclude_guest = 1;
3408
3409 return true;
3410 }
3411
3412 return false;
3413 }
3414
find_process(const char * name)3415 static bool find_process(const char *name)
3416 {
3417 size_t len = strlen(name);
3418 DIR *dir;
3419 struct dirent *d;
3420 int ret = -1;
3421
3422 dir = opendir(procfs__mountpoint());
3423 if (!dir)
3424 return false;
3425
3426 /* Walk through the directory. */
3427 while (ret && (d = readdir(dir)) != NULL) {
3428 char path[PATH_MAX];
3429 char *data;
3430 size_t size;
3431
3432 if ((d->d_type != DT_DIR) ||
3433 !strcmp(".", d->d_name) ||
3434 !strcmp("..", d->d_name))
3435 continue;
3436
3437 scnprintf(path, sizeof(path), "%s/%s/comm",
3438 procfs__mountpoint(), d->d_name);
3439
3440 if (filename__read_str(path, &data, &size))
3441 continue;
3442
3443 ret = strncmp(name, data, len);
3444 free(data);
3445 }
3446
3447 closedir(dir);
3448 return ret ? false : true;
3449 }
3450
arch_evsel__open_strerror(struct evsel * evsel __maybe_unused,char * msg __maybe_unused,size_t size __maybe_unused)3451 int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
3452 char *msg __maybe_unused,
3453 size_t size __maybe_unused)
3454 {
3455 return 0;
3456 }
3457
evsel__open_strerror(struct evsel * evsel,struct target * target,int err,char * msg,size_t size)3458 int evsel__open_strerror(struct evsel *evsel, struct target *target,
3459 int err, char *msg, size_t size)
3460 {
3461 char sbuf[STRERR_BUFSIZE];
3462 int printed = 0, enforced = 0;
3463 int ret;
3464
3465 switch (err) {
3466 case EPERM:
3467 case EACCES:
3468 printed += scnprintf(msg + printed, size - printed,
3469 "Access to performance monitoring and observability operations is limited.\n");
3470
3471 if (!sysfs__read_int("fs/selinux/enforce", &enforced)) {
3472 if (enforced) {
3473 printed += scnprintf(msg + printed, size - printed,
3474 "Enforced MAC policy settings (SELinux) can limit access to performance\n"
3475 "monitoring and observability operations. Inspect system audit records for\n"
3476 "more perf_event access control information and adjusting the policy.\n");
3477 }
3478 }
3479
3480 if (err == EPERM)
3481 printed += scnprintf(msg, size,
3482 "No permission to enable %s event.\n\n", evsel__name(evsel));
3483
3484 return scnprintf(msg + printed, size - printed,
3485 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
3486 "access to performance monitoring and observability operations for processes\n"
3487 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
3488 "More information can be found at 'Perf events and tool security' document:\n"
3489 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n"
3490 "perf_event_paranoid setting is %d:\n"
3491 " -1: Allow use of (almost) all events by all users\n"
3492 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
3493 ">= 0: Disallow raw and ftrace function tracepoint access\n"
3494 ">= 1: Disallow CPU event access\n"
3495 ">= 2: Disallow kernel profiling\n"
3496 "To make the adjusted perf_event_paranoid setting permanent preserve it\n"
3497 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)",
3498 perf_event_paranoid());
3499 case ENOENT:
3500 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel));
3501 case EMFILE:
3502 return scnprintf(msg, size, "%s",
3503 "Too many events are opened.\n"
3504 "Probably the maximum number of open file descriptors has been reached.\n"
3505 "Hint: Try again after reducing the number of events.\n"
3506 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
3507 case ENOMEM:
3508 if (evsel__has_callchain(evsel) &&
3509 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
3510 return scnprintf(msg, size,
3511 "Not enough memory to setup event with callchain.\n"
3512 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
3513 "Hint: Current value: %d", sysctl__max_stack());
3514 break;
3515 case ENODEV:
3516 if (target->cpu_list)
3517 return scnprintf(msg, size, "%s",
3518 "No such device - did you specify an out-of-range profile CPU?");
3519 break;
3520 case EOPNOTSUPP:
3521 if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)
3522 return scnprintf(msg, size,
3523 "%s: PMU Hardware or event type doesn't support branch stack sampling.",
3524 evsel__name(evsel));
3525 if (evsel->core.attr.aux_output)
3526 return scnprintf(msg, size,
3527 "%s: PMU Hardware doesn't support 'aux_output' feature",
3528 evsel__name(evsel));
3529 if (evsel->core.attr.sample_period != 0)
3530 return scnprintf(msg, size,
3531 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
3532 evsel__name(evsel));
3533 if (evsel->core.attr.precise_ip)
3534 return scnprintf(msg, size, "%s",
3535 "\'precise\' request may not be supported. Try removing 'p' modifier.");
3536 #if defined(__i386__) || defined(__x86_64__)
3537 if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
3538 return scnprintf(msg, size, "%s",
3539 "No hardware sampling interrupt available.\n");
3540 #endif
3541 break;
3542 case EBUSY:
3543 if (find_process("oprofiled"))
3544 return scnprintf(msg, size,
3545 "The PMU counters are busy/taken by another profiler.\n"
3546 "We found oprofile daemon running, please stop it and try again.");
3547 break;
3548 case EINVAL:
3549 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
3550 return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel.");
3551 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
3552 return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel.");
3553 if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
3554 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
3555 if (perf_missing_features.clockid)
3556 return scnprintf(msg, size, "clockid feature not supported.");
3557 if (perf_missing_features.clockid_wrong)
3558 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
3559 if (perf_missing_features.aux_output)
3560 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
3561 if (!target__has_cpu(target))
3562 return scnprintf(msg, size,
3563 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
3564 evsel__name(evsel));
3565
3566 break;
3567 case ENODATA:
3568 return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. "
3569 "Please add an auxiliary event in front of the load latency event.");
3570 default:
3571 break;
3572 }
3573
3574 ret = arch_evsel__open_strerror(evsel, msg, size);
3575 if (ret)
3576 return ret;
3577
3578 return scnprintf(msg, size,
3579 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
3580 "\"dmesg | grep -i perf\" may provide additional information.\n",
3581 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
3582 }
3583
evsel__env(struct evsel * evsel)3584 struct perf_env *evsel__env(struct evsel *evsel)
3585 {
3586 if (evsel && evsel->evlist && evsel->evlist->env)
3587 return evsel->evlist->env;
3588 return &perf_env;
3589 }
3590
store_evsel_ids(struct evsel * evsel,struct evlist * evlist)3591 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
3592 {
3593 int cpu_map_idx, thread;
3594
3595 if (evsel__is_retire_lat(evsel))
3596 return 0;
3597
3598 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
3599 for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
3600 thread++) {
3601 int fd = FD(evsel, cpu_map_idx, thread);
3602
3603 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
3604 cpu_map_idx, thread, fd) < 0)
3605 return -1;
3606 }
3607 }
3608
3609 return 0;
3610 }
3611
evsel__store_ids(struct evsel * evsel,struct evlist * evlist)3612 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
3613 {
3614 struct perf_cpu_map *cpus = evsel->core.cpus;
3615 struct perf_thread_map *threads = evsel->core.threads;
3616
3617 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
3618 return -ENOMEM;
3619
3620 return store_evsel_ids(evsel, evlist);
3621 }
3622
evsel__zero_per_pkg(struct evsel * evsel)3623 void evsel__zero_per_pkg(struct evsel *evsel)
3624 {
3625 struct hashmap_entry *cur;
3626 size_t bkt;
3627
3628 if (evsel->per_pkg_mask) {
3629 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
3630 zfree(&cur->pkey);
3631
3632 hashmap__clear(evsel->per_pkg_mask);
3633 }
3634 }
3635
3636 /**
3637 * evsel__is_hybrid - does the evsel have a known PMU that is hybrid. Note, this
3638 * will be false on hybrid systems for hardware and legacy
3639 * cache events.
3640 */
evsel__is_hybrid(const struct evsel * evsel)3641 bool evsel__is_hybrid(const struct evsel *evsel)
3642 {
3643 if (perf_pmus__num_core_pmus() == 1)
3644 return false;
3645
3646 return evsel->core.is_pmu_core;
3647 }
3648
evsel__leader(const struct evsel * evsel)3649 struct evsel *evsel__leader(const struct evsel *evsel)
3650 {
3651 return container_of(evsel->core.leader, struct evsel, core);
3652 }
3653
evsel__has_leader(struct evsel * evsel,struct evsel * leader)3654 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
3655 {
3656 return evsel->core.leader == &leader->core;
3657 }
3658
evsel__is_leader(struct evsel * evsel)3659 bool evsel__is_leader(struct evsel *evsel)
3660 {
3661 return evsel__has_leader(evsel, evsel);
3662 }
3663
evsel__set_leader(struct evsel * evsel,struct evsel * leader)3664 void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
3665 {
3666 evsel->core.leader = &leader->core;
3667 }
3668
evsel__source_count(const struct evsel * evsel)3669 int evsel__source_count(const struct evsel *evsel)
3670 {
3671 struct evsel *pos;
3672 int count = 0;
3673
3674 evlist__for_each_entry(evsel->evlist, pos) {
3675 if (pos->metric_leader == evsel)
3676 count++;
3677 }
3678 return count;
3679 }
3680
arch_evsel__must_be_in_group(const struct evsel * evsel __maybe_unused)3681 bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused)
3682 {
3683 return false;
3684 }
3685
3686 /*
3687 * Remove an event from a given group (leader).
3688 * Some events, e.g., perf metrics Topdown events,
3689 * must always be grouped. Ignore the events.
3690 */
evsel__remove_from_group(struct evsel * evsel,struct evsel * leader)3691 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
3692 {
3693 if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) {
3694 evsel__set_leader(evsel, evsel);
3695 evsel->core.nr_members = 0;
3696 leader->core.nr_members--;
3697 }
3698 }
3699