xref: /linux/tools/perf/util/evsel.c (revision 92ce4c3ea7c44e61ca2b6ef3e5682bfcea851d87)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <byteswap.h>
11 #include <errno.h>
12 #include <inttypes.h>
13 #include <linux/bitops.h>
14 #include <api/fs/fs.h>
15 #include <api/fs/tracing_path.h>
16 #include <traceevent/event-parse.h>
17 #include <linux/hw_breakpoint.h>
18 #include <linux/perf_event.h>
19 #include <linux/compiler.h>
20 #include <linux/err.h>
21 #include <sys/ioctl.h>
22 #include <sys/resource.h>
23 #include <sys/types.h>
24 #include <dirent.h>
25 #include "asm/bug.h"
26 #include "callchain.h"
27 #include "cgroup.h"
28 #include "event.h"
29 #include "evsel.h"
30 #include "evlist.h"
31 #include "util.h"
32 #include "cpumap.h"
33 #include "thread_map.h"
34 #include "target.h"
35 #include "perf_regs.h"
36 #include "debug.h"
37 #include "trace-event.h"
38 #include "stat.h"
39 #include "util/parse-branch-options.h"
40 
41 #include "sane_ctype.h"
42 
43 static struct {
44 	bool sample_id_all;
45 	bool exclude_guest;
46 	bool mmap2;
47 	bool cloexec;
48 	bool clockid;
49 	bool clockid_wrong;
50 	bool lbr_flags;
51 	bool write_backward;
52 	bool group_read;
53 } perf_missing_features;
54 
55 static clockid_t clockid;
56 
57 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
58 {
59 	return 0;
60 }
61 
62 void __weak test_attr__ready(void) { }
63 
64 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
65 {
66 }
67 
68 static struct {
69 	size_t	size;
70 	int	(*init)(struct perf_evsel *evsel);
71 	void	(*fini)(struct perf_evsel *evsel);
72 } perf_evsel__object = {
73 	.size = sizeof(struct perf_evsel),
74 	.init = perf_evsel__no_extra_init,
75 	.fini = perf_evsel__no_extra_fini,
76 };
77 
78 int perf_evsel__object_config(size_t object_size,
79 			      int (*init)(struct perf_evsel *evsel),
80 			      void (*fini)(struct perf_evsel *evsel))
81 {
82 
83 	if (object_size == 0)
84 		goto set_methods;
85 
86 	if (perf_evsel__object.size > object_size)
87 		return -EINVAL;
88 
89 	perf_evsel__object.size = object_size;
90 
91 set_methods:
92 	if (init != NULL)
93 		perf_evsel__object.init = init;
94 
95 	if (fini != NULL)
96 		perf_evsel__object.fini = fini;
97 
98 	return 0;
99 }
100 
101 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
102 
103 int __perf_evsel__sample_size(u64 sample_type)
104 {
105 	u64 mask = sample_type & PERF_SAMPLE_MASK;
106 	int size = 0;
107 	int i;
108 
109 	for (i = 0; i < 64; i++) {
110 		if (mask & (1ULL << i))
111 			size++;
112 	}
113 
114 	size *= sizeof(u64);
115 
116 	return size;
117 }
118 
119 /**
120  * __perf_evsel__calc_id_pos - calculate id_pos.
121  * @sample_type: sample type
122  *
123  * This function returns the position of the event id (PERF_SAMPLE_ID or
124  * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
125  * sample_event.
126  */
127 static int __perf_evsel__calc_id_pos(u64 sample_type)
128 {
129 	int idx = 0;
130 
131 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
132 		return 0;
133 
134 	if (!(sample_type & PERF_SAMPLE_ID))
135 		return -1;
136 
137 	if (sample_type & PERF_SAMPLE_IP)
138 		idx += 1;
139 
140 	if (sample_type & PERF_SAMPLE_TID)
141 		idx += 1;
142 
143 	if (sample_type & PERF_SAMPLE_TIME)
144 		idx += 1;
145 
146 	if (sample_type & PERF_SAMPLE_ADDR)
147 		idx += 1;
148 
149 	return idx;
150 }
151 
152 /**
153  * __perf_evsel__calc_is_pos - calculate is_pos.
154  * @sample_type: sample type
155  *
156  * This function returns the position (counting backwards) of the event id
157  * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
158  * sample_id_all is used there is an id sample appended to non-sample events.
159  */
160 static int __perf_evsel__calc_is_pos(u64 sample_type)
161 {
162 	int idx = 1;
163 
164 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
165 		return 1;
166 
167 	if (!(sample_type & PERF_SAMPLE_ID))
168 		return -1;
169 
170 	if (sample_type & PERF_SAMPLE_CPU)
171 		idx += 1;
172 
173 	if (sample_type & PERF_SAMPLE_STREAM_ID)
174 		idx += 1;
175 
176 	return idx;
177 }
178 
179 void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
180 {
181 	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
182 	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
183 }
184 
185 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
186 				  enum perf_event_sample_format bit)
187 {
188 	if (!(evsel->attr.sample_type & bit)) {
189 		evsel->attr.sample_type |= bit;
190 		evsel->sample_size += sizeof(u64);
191 		perf_evsel__calc_id_pos(evsel);
192 	}
193 }
194 
195 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
196 				    enum perf_event_sample_format bit)
197 {
198 	if (evsel->attr.sample_type & bit) {
199 		evsel->attr.sample_type &= ~bit;
200 		evsel->sample_size -= sizeof(u64);
201 		perf_evsel__calc_id_pos(evsel);
202 	}
203 }
204 
205 void perf_evsel__set_sample_id(struct perf_evsel *evsel,
206 			       bool can_sample_identifier)
207 {
208 	if (can_sample_identifier) {
209 		perf_evsel__reset_sample_bit(evsel, ID);
210 		perf_evsel__set_sample_bit(evsel, IDENTIFIER);
211 	} else {
212 		perf_evsel__set_sample_bit(evsel, ID);
213 	}
214 	evsel->attr.read_format |= PERF_FORMAT_ID;
215 }
216 
217 /**
218  * perf_evsel__is_function_event - Return whether given evsel is a function
219  * trace event
220  *
221  * @evsel - evsel selector to be tested
222  *
223  * Return %true if event is function trace event
224  */
225 bool perf_evsel__is_function_event(struct perf_evsel *evsel)
226 {
227 #define FUNCTION_EVENT "ftrace:function"
228 
229 	return evsel->name &&
230 	       !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
231 
232 #undef FUNCTION_EVENT
233 }
234 
235 void perf_evsel__init(struct perf_evsel *evsel,
236 		      struct perf_event_attr *attr, int idx)
237 {
238 	evsel->idx	   = idx;
239 	evsel->tracking	   = !idx;
240 	evsel->attr	   = *attr;
241 	evsel->leader	   = evsel;
242 	evsel->unit	   = "";
243 	evsel->scale	   = 1.0;
244 	evsel->evlist	   = NULL;
245 	evsel->bpf_fd	   = -1;
246 	INIT_LIST_HEAD(&evsel->node);
247 	INIT_LIST_HEAD(&evsel->config_terms);
248 	perf_evsel__object.init(evsel);
249 	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
250 	perf_evsel__calc_id_pos(evsel);
251 	evsel->cmdline_group_boundary = false;
252 	evsel->metric_expr   = NULL;
253 	evsel->metric_name   = NULL;
254 	evsel->metric_events = NULL;
255 	evsel->collect_stat  = false;
256 }
257 
258 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
259 {
260 	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
261 
262 	if (evsel != NULL)
263 		perf_evsel__init(evsel, attr, idx);
264 
265 	if (perf_evsel__is_bpf_output(evsel)) {
266 		evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
267 					    PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
268 		evsel->attr.sample_period = 1;
269 	}
270 
271 	return evsel;
272 }
273 
274 struct perf_evsel *perf_evsel__new_cycles(bool precise)
275 {
276 	struct perf_event_attr attr = {
277 		.type	= PERF_TYPE_HARDWARE,
278 		.config	= PERF_COUNT_HW_CPU_CYCLES,
279 		.exclude_kernel	= geteuid() != 0,
280 	};
281 	struct perf_evsel *evsel;
282 
283 	event_attr_init(&attr);
284 
285 	if (!precise)
286 		goto new_event;
287 	/*
288 	 * Unnamed union member, not supported as struct member named
289 	 * initializer in older compilers such as gcc 4.4.7
290 	 *
291 	 * Just for probing the precise_ip:
292 	 */
293 	attr.sample_period = 1;
294 
295 	perf_event_attr__set_max_precise_ip(&attr);
296 	/*
297 	 * Now let the usual logic to set up the perf_event_attr defaults
298 	 * to kick in when we return and before perf_evsel__open() is called.
299 	 */
300 	attr.sample_period = 0;
301 new_event:
302 	evsel = perf_evsel__new(&attr);
303 	if (evsel == NULL)
304 		goto out;
305 
306 	/* use asprintf() because free(evsel) assumes name is allocated */
307 	if (asprintf(&evsel->name, "cycles%s%s%.*s",
308 		     (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
309 		     attr.exclude_kernel ? "u" : "",
310 		     attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
311 		goto error_free;
312 out:
313 	return evsel;
314 error_free:
315 	perf_evsel__delete(evsel);
316 	evsel = NULL;
317 	goto out;
318 }
319 
320 /*
321  * Returns pointer with encoded error via <linux/err.h> interface.
322  */
323 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
324 {
325 	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
326 	int err = -ENOMEM;
327 
328 	if (evsel == NULL) {
329 		goto out_err;
330 	} else {
331 		struct perf_event_attr attr = {
332 			.type	       = PERF_TYPE_TRACEPOINT,
333 			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
334 					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
335 		};
336 
337 		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
338 			goto out_free;
339 
340 		evsel->tp_format = trace_event__tp_format(sys, name);
341 		if (IS_ERR(evsel->tp_format)) {
342 			err = PTR_ERR(evsel->tp_format);
343 			goto out_free;
344 		}
345 
346 		event_attr_init(&attr);
347 		attr.config = evsel->tp_format->id;
348 		attr.sample_period = 1;
349 		perf_evsel__init(evsel, &attr, idx);
350 	}
351 
352 	return evsel;
353 
354 out_free:
355 	zfree(&evsel->name);
356 	free(evsel);
357 out_err:
358 	return ERR_PTR(err);
359 }
360 
361 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
362 	"cycles",
363 	"instructions",
364 	"cache-references",
365 	"cache-misses",
366 	"branches",
367 	"branch-misses",
368 	"bus-cycles",
369 	"stalled-cycles-frontend",
370 	"stalled-cycles-backend",
371 	"ref-cycles",
372 };
373 
374 static const char *__perf_evsel__hw_name(u64 config)
375 {
376 	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
377 		return perf_evsel__hw_names[config];
378 
379 	return "unknown-hardware";
380 }
381 
382 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
383 {
384 	int colon = 0, r = 0;
385 	struct perf_event_attr *attr = &evsel->attr;
386 	bool exclude_guest_default = false;
387 
388 #define MOD_PRINT(context, mod)	do {					\
389 		if (!attr->exclude_##context) {				\
390 			if (!colon) colon = ++r;			\
391 			r += scnprintf(bf + r, size - r, "%c", mod);	\
392 		} } while(0)
393 
394 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
395 		MOD_PRINT(kernel, 'k');
396 		MOD_PRINT(user, 'u');
397 		MOD_PRINT(hv, 'h');
398 		exclude_guest_default = true;
399 	}
400 
401 	if (attr->precise_ip) {
402 		if (!colon)
403 			colon = ++r;
404 		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
405 		exclude_guest_default = true;
406 	}
407 
408 	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
409 		MOD_PRINT(host, 'H');
410 		MOD_PRINT(guest, 'G');
411 	}
412 #undef MOD_PRINT
413 	if (colon)
414 		bf[colon - 1] = ':';
415 	return r;
416 }
417 
418 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
419 {
420 	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
421 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
422 }
423 
424 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
425 	"cpu-clock",
426 	"task-clock",
427 	"page-faults",
428 	"context-switches",
429 	"cpu-migrations",
430 	"minor-faults",
431 	"major-faults",
432 	"alignment-faults",
433 	"emulation-faults",
434 	"dummy",
435 };
436 
437 static const char *__perf_evsel__sw_name(u64 config)
438 {
439 	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
440 		return perf_evsel__sw_names[config];
441 	return "unknown-software";
442 }
443 
444 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
445 {
446 	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
447 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
448 }
449 
450 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
451 {
452 	int r;
453 
454 	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
455 
456 	if (type & HW_BREAKPOINT_R)
457 		r += scnprintf(bf + r, size - r, "r");
458 
459 	if (type & HW_BREAKPOINT_W)
460 		r += scnprintf(bf + r, size - r, "w");
461 
462 	if (type & HW_BREAKPOINT_X)
463 		r += scnprintf(bf + r, size - r, "x");
464 
465 	return r;
466 }
467 
468 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
469 {
470 	struct perf_event_attr *attr = &evsel->attr;
471 	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
472 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
473 }
474 
475 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
476 				[PERF_EVSEL__MAX_ALIASES] = {
477  { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
478  { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
479  { "LLC",	"L2",							},
480  { "dTLB",	"d-tlb",	"Data-TLB",				},
481  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
482  { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
483  { "node",								},
484 };
485 
486 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
487 				   [PERF_EVSEL__MAX_ALIASES] = {
488  { "load",	"loads",	"read",					},
489  { "store",	"stores",	"write",				},
490  { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
491 };
492 
493 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
494 				       [PERF_EVSEL__MAX_ALIASES] = {
495  { "refs",	"Reference",	"ops",		"access",		},
496  { "misses",	"miss",							},
497 };
498 
499 #define C(x)		PERF_COUNT_HW_CACHE_##x
500 #define CACHE_READ	(1 << C(OP_READ))
501 #define CACHE_WRITE	(1 << C(OP_WRITE))
502 #define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
503 #define COP(x)		(1 << x)
504 
505 /*
506  * cache operartion stat
507  * L1I : Read and prefetch only
508  * ITLB and BPU : Read-only
509  */
510 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
511  [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
512  [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
513  [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
514  [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
515  [C(ITLB)]	= (CACHE_READ),
516  [C(BPU)]	= (CACHE_READ),
517  [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
518 };
519 
520 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
521 {
522 	if (perf_evsel__hw_cache_stat[type] & COP(op))
523 		return true;	/* valid */
524 	else
525 		return false;	/* invalid */
526 }
527 
528 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
529 					    char *bf, size_t size)
530 {
531 	if (result) {
532 		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
533 				 perf_evsel__hw_cache_op[op][0],
534 				 perf_evsel__hw_cache_result[result][0]);
535 	}
536 
537 	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
538 			 perf_evsel__hw_cache_op[op][1]);
539 }
540 
541 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
542 {
543 	u8 op, result, type = (config >>  0) & 0xff;
544 	const char *err = "unknown-ext-hardware-cache-type";
545 
546 	if (type >= PERF_COUNT_HW_CACHE_MAX)
547 		goto out_err;
548 
549 	op = (config >>  8) & 0xff;
550 	err = "unknown-ext-hardware-cache-op";
551 	if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
552 		goto out_err;
553 
554 	result = (config >> 16) & 0xff;
555 	err = "unknown-ext-hardware-cache-result";
556 	if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
557 		goto out_err;
558 
559 	err = "invalid-cache";
560 	if (!perf_evsel__is_cache_op_valid(type, op))
561 		goto out_err;
562 
563 	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
564 out_err:
565 	return scnprintf(bf, size, "%s", err);
566 }
567 
568 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
569 {
570 	int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
571 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
572 }
573 
574 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
575 {
576 	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
577 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
578 }
579 
580 const char *perf_evsel__name(struct perf_evsel *evsel)
581 {
582 	char bf[128];
583 
584 	if (evsel->name)
585 		return evsel->name;
586 
587 	switch (evsel->attr.type) {
588 	case PERF_TYPE_RAW:
589 		perf_evsel__raw_name(evsel, bf, sizeof(bf));
590 		break;
591 
592 	case PERF_TYPE_HARDWARE:
593 		perf_evsel__hw_name(evsel, bf, sizeof(bf));
594 		break;
595 
596 	case PERF_TYPE_HW_CACHE:
597 		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
598 		break;
599 
600 	case PERF_TYPE_SOFTWARE:
601 		perf_evsel__sw_name(evsel, bf, sizeof(bf));
602 		break;
603 
604 	case PERF_TYPE_TRACEPOINT:
605 		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
606 		break;
607 
608 	case PERF_TYPE_BREAKPOINT:
609 		perf_evsel__bp_name(evsel, bf, sizeof(bf));
610 		break;
611 
612 	default:
613 		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
614 			  evsel->attr.type);
615 		break;
616 	}
617 
618 	evsel->name = strdup(bf);
619 
620 	return evsel->name ?: "unknown";
621 }
622 
623 const char *perf_evsel__group_name(struct perf_evsel *evsel)
624 {
625 	return evsel->group_name ?: "anon group";
626 }
627 
628 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
629 {
630 	int ret;
631 	struct perf_evsel *pos;
632 	const char *group_name = perf_evsel__group_name(evsel);
633 
634 	ret = scnprintf(buf, size, "%s", group_name);
635 
636 	ret += scnprintf(buf + ret, size - ret, " { %s",
637 			 perf_evsel__name(evsel));
638 
639 	for_each_group_member(pos, evsel)
640 		ret += scnprintf(buf + ret, size - ret, ", %s",
641 				 perf_evsel__name(pos));
642 
643 	ret += scnprintf(buf + ret, size - ret, " }");
644 
645 	return ret;
646 }
647 
648 void perf_evsel__config_callchain(struct perf_evsel *evsel,
649 				  struct record_opts *opts,
650 				  struct callchain_param *param)
651 {
652 	bool function = perf_evsel__is_function_event(evsel);
653 	struct perf_event_attr *attr = &evsel->attr;
654 
655 	perf_evsel__set_sample_bit(evsel, CALLCHAIN);
656 
657 	attr->sample_max_stack = param->max_stack;
658 
659 	if (param->record_mode == CALLCHAIN_LBR) {
660 		if (!opts->branch_stack) {
661 			if (attr->exclude_user) {
662 				pr_warning("LBR callstack option is only available "
663 					   "to get user callchain information. "
664 					   "Falling back to framepointers.\n");
665 			} else {
666 				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
667 				attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
668 							PERF_SAMPLE_BRANCH_CALL_STACK |
669 							PERF_SAMPLE_BRANCH_NO_CYCLES |
670 							PERF_SAMPLE_BRANCH_NO_FLAGS;
671 			}
672 		} else
673 			 pr_warning("Cannot use LBR callstack with branch stack. "
674 				    "Falling back to framepointers.\n");
675 	}
676 
677 	if (param->record_mode == CALLCHAIN_DWARF) {
678 		if (!function) {
679 			perf_evsel__set_sample_bit(evsel, REGS_USER);
680 			perf_evsel__set_sample_bit(evsel, STACK_USER);
681 			attr->sample_regs_user = PERF_REGS_MASK;
682 			attr->sample_stack_user = param->dump_size;
683 			attr->exclude_callchain_user = 1;
684 		} else {
685 			pr_info("Cannot use DWARF unwind for function trace event,"
686 				" falling back to framepointers.\n");
687 		}
688 	}
689 
690 	if (function) {
691 		pr_info("Disabling user space callchains for function trace event.\n");
692 		attr->exclude_callchain_user = 1;
693 	}
694 }
695 
696 static void
697 perf_evsel__reset_callgraph(struct perf_evsel *evsel,
698 			    struct callchain_param *param)
699 {
700 	struct perf_event_attr *attr = &evsel->attr;
701 
702 	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
703 	if (param->record_mode == CALLCHAIN_LBR) {
704 		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
705 		attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
706 					      PERF_SAMPLE_BRANCH_CALL_STACK);
707 	}
708 	if (param->record_mode == CALLCHAIN_DWARF) {
709 		perf_evsel__reset_sample_bit(evsel, REGS_USER);
710 		perf_evsel__reset_sample_bit(evsel, STACK_USER);
711 	}
712 }
713 
714 static void apply_config_terms(struct perf_evsel *evsel,
715 			       struct record_opts *opts)
716 {
717 	struct perf_evsel_config_term *term;
718 	struct list_head *config_terms = &evsel->config_terms;
719 	struct perf_event_attr *attr = &evsel->attr;
720 	struct callchain_param param;
721 	u32 dump_size = 0;
722 	int max_stack = 0;
723 	const char *callgraph_buf = NULL;
724 
725 	/* callgraph default */
726 	param.record_mode = callchain_param.record_mode;
727 
728 	list_for_each_entry(term, config_terms, list) {
729 		switch (term->type) {
730 		case PERF_EVSEL__CONFIG_TERM_PERIOD:
731 			attr->sample_period = term->val.period;
732 			attr->freq = 0;
733 			break;
734 		case PERF_EVSEL__CONFIG_TERM_FREQ:
735 			attr->sample_freq = term->val.freq;
736 			attr->freq = 1;
737 			break;
738 		case PERF_EVSEL__CONFIG_TERM_TIME:
739 			if (term->val.time)
740 				perf_evsel__set_sample_bit(evsel, TIME);
741 			else
742 				perf_evsel__reset_sample_bit(evsel, TIME);
743 			break;
744 		case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
745 			callgraph_buf = term->val.callgraph;
746 			break;
747 		case PERF_EVSEL__CONFIG_TERM_BRANCH:
748 			if (term->val.branch && strcmp(term->val.branch, "no")) {
749 				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
750 				parse_branch_str(term->val.branch,
751 						 &attr->branch_sample_type);
752 			} else
753 				perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
754 			break;
755 		case PERF_EVSEL__CONFIG_TERM_STACK_USER:
756 			dump_size = term->val.stack_user;
757 			break;
758 		case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
759 			max_stack = term->val.max_stack;
760 			break;
761 		case PERF_EVSEL__CONFIG_TERM_INHERIT:
762 			/*
763 			 * attr->inherit should has already been set by
764 			 * perf_evsel__config. If user explicitly set
765 			 * inherit using config terms, override global
766 			 * opt->no_inherit setting.
767 			 */
768 			attr->inherit = term->val.inherit ? 1 : 0;
769 			break;
770 		case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
771 			attr->write_backward = term->val.overwrite ? 1 : 0;
772 			break;
773 		default:
774 			break;
775 		}
776 	}
777 
778 	/* User explicitly set per-event callgraph, clear the old setting and reset. */
779 	if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
780 		if (max_stack) {
781 			param.max_stack = max_stack;
782 			if (callgraph_buf == NULL)
783 				callgraph_buf = "fp";
784 		}
785 
786 		/* parse callgraph parameters */
787 		if (callgraph_buf != NULL) {
788 			if (!strcmp(callgraph_buf, "no")) {
789 				param.enabled = false;
790 				param.record_mode = CALLCHAIN_NONE;
791 			} else {
792 				param.enabled = true;
793 				if (parse_callchain_record(callgraph_buf, &param)) {
794 					pr_err("per-event callgraph setting for %s failed. "
795 					       "Apply callgraph global setting for it\n",
796 					       evsel->name);
797 					return;
798 				}
799 			}
800 		}
801 		if (dump_size > 0) {
802 			dump_size = round_up(dump_size, sizeof(u64));
803 			param.dump_size = dump_size;
804 		}
805 
806 		/* If global callgraph set, clear it */
807 		if (callchain_param.enabled)
808 			perf_evsel__reset_callgraph(evsel, &callchain_param);
809 
810 		/* set perf-event callgraph */
811 		if (param.enabled)
812 			perf_evsel__config_callchain(evsel, opts, &param);
813 	}
814 }
815 
816 /*
817  * The enable_on_exec/disabled value strategy:
818  *
819  *  1) For any type of traced program:
820  *    - all independent events and group leaders are disabled
821  *    - all group members are enabled
822  *
823  *     Group members are ruled by group leaders. They need to
824  *     be enabled, because the group scheduling relies on that.
825  *
826  *  2) For traced programs executed by perf:
827  *     - all independent events and group leaders have
828  *       enable_on_exec set
829  *     - we don't specifically enable or disable any event during
830  *       the record command
831  *
832  *     Independent events and group leaders are initially disabled
833  *     and get enabled by exec. Group members are ruled by group
834  *     leaders as stated in 1).
835  *
836  *  3) For traced programs attached by perf (pid/tid):
837  *     - we specifically enable or disable all events during
838  *       the record command
839  *
840  *     When attaching events to already running traced we
841  *     enable/disable events specifically, as there's no
842  *     initial traced exec call.
843  */
844 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
845 			struct callchain_param *callchain)
846 {
847 	struct perf_evsel *leader = evsel->leader;
848 	struct perf_event_attr *attr = &evsel->attr;
849 	int track = evsel->tracking;
850 	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
851 
852 	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
853 	attr->inherit	    = !opts->no_inherit;
854 	attr->write_backward = opts->overwrite ? 1 : 0;
855 
856 	perf_evsel__set_sample_bit(evsel, IP);
857 	perf_evsel__set_sample_bit(evsel, TID);
858 
859 	if (evsel->sample_read) {
860 		perf_evsel__set_sample_bit(evsel, READ);
861 
862 		/*
863 		 * We need ID even in case of single event, because
864 		 * PERF_SAMPLE_READ process ID specific data.
865 		 */
866 		perf_evsel__set_sample_id(evsel, false);
867 
868 		/*
869 		 * Apply group format only if we belong to group
870 		 * with more than one members.
871 		 */
872 		if (leader->nr_members > 1) {
873 			attr->read_format |= PERF_FORMAT_GROUP;
874 			attr->inherit = 0;
875 		}
876 	}
877 
878 	/*
879 	 * We default some events to have a default interval. But keep
880 	 * it a weak assumption overridable by the user.
881 	 */
882 	if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
883 				     opts->user_interval != ULLONG_MAX)) {
884 		if (opts->freq) {
885 			perf_evsel__set_sample_bit(evsel, PERIOD);
886 			attr->freq		= 1;
887 			attr->sample_freq	= opts->freq;
888 		} else {
889 			attr->sample_period = opts->default_interval;
890 		}
891 	}
892 
893 	/*
894 	 * Disable sampling for all group members other
895 	 * than leader in case leader 'leads' the sampling.
896 	 */
897 	if ((leader != evsel) && leader->sample_read) {
898 		attr->sample_freq   = 0;
899 		attr->sample_period = 0;
900 	}
901 
902 	if (opts->no_samples)
903 		attr->sample_freq = 0;
904 
905 	if (opts->inherit_stat) {
906 		evsel->attr.read_format |=
907 			PERF_FORMAT_TOTAL_TIME_ENABLED |
908 			PERF_FORMAT_TOTAL_TIME_RUNNING |
909 			PERF_FORMAT_ID;
910 		attr->inherit_stat = 1;
911 	}
912 
913 	if (opts->sample_address) {
914 		perf_evsel__set_sample_bit(evsel, ADDR);
915 		attr->mmap_data = track;
916 	}
917 
918 	/*
919 	 * We don't allow user space callchains for  function trace
920 	 * event, due to issues with page faults while tracing page
921 	 * fault handler and its overall trickiness nature.
922 	 */
923 	if (perf_evsel__is_function_event(evsel))
924 		evsel->attr.exclude_callchain_user = 1;
925 
926 	if (callchain && callchain->enabled && !evsel->no_aux_samples)
927 		perf_evsel__config_callchain(evsel, opts, callchain);
928 
929 	if (opts->sample_intr_regs) {
930 		attr->sample_regs_intr = opts->sample_intr_regs;
931 		perf_evsel__set_sample_bit(evsel, REGS_INTR);
932 	}
933 
934 	if (target__has_cpu(&opts->target) || opts->sample_cpu)
935 		perf_evsel__set_sample_bit(evsel, CPU);
936 
937 	if (opts->period)
938 		perf_evsel__set_sample_bit(evsel, PERIOD);
939 
940 	/*
941 	 * When the user explicitly disabled time don't force it here.
942 	 */
943 	if (opts->sample_time &&
944 	    (!perf_missing_features.sample_id_all &&
945 	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
946 	     opts->sample_time_set)))
947 		perf_evsel__set_sample_bit(evsel, TIME);
948 
949 	if (opts->raw_samples && !evsel->no_aux_samples) {
950 		perf_evsel__set_sample_bit(evsel, TIME);
951 		perf_evsel__set_sample_bit(evsel, RAW);
952 		perf_evsel__set_sample_bit(evsel, CPU);
953 	}
954 
955 	if (opts->sample_address)
956 		perf_evsel__set_sample_bit(evsel, DATA_SRC);
957 
958 	if (opts->no_buffering) {
959 		attr->watermark = 0;
960 		attr->wakeup_events = 1;
961 	}
962 	if (opts->branch_stack && !evsel->no_aux_samples) {
963 		perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
964 		attr->branch_sample_type = opts->branch_stack;
965 	}
966 
967 	if (opts->sample_weight)
968 		perf_evsel__set_sample_bit(evsel, WEIGHT);
969 
970 	attr->task  = track;
971 	attr->mmap  = track;
972 	attr->mmap2 = track && !perf_missing_features.mmap2;
973 	attr->comm  = track;
974 
975 	if (opts->record_namespaces)
976 		attr->namespaces  = track;
977 
978 	if (opts->record_switch_events)
979 		attr->context_switch = track;
980 
981 	if (opts->sample_transaction)
982 		perf_evsel__set_sample_bit(evsel, TRANSACTION);
983 
984 	if (opts->running_time) {
985 		evsel->attr.read_format |=
986 			PERF_FORMAT_TOTAL_TIME_ENABLED |
987 			PERF_FORMAT_TOTAL_TIME_RUNNING;
988 	}
989 
990 	/*
991 	 * XXX see the function comment above
992 	 *
993 	 * Disabling only independent events or group leaders,
994 	 * keeping group members enabled.
995 	 */
996 	if (perf_evsel__is_group_leader(evsel))
997 		attr->disabled = 1;
998 
999 	/*
1000 	 * Setting enable_on_exec for independent events and
1001 	 * group leaders for traced executed by perf.
1002 	 */
1003 	if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
1004 		!opts->initial_delay)
1005 		attr->enable_on_exec = 1;
1006 
1007 	if (evsel->immediate) {
1008 		attr->disabled = 0;
1009 		attr->enable_on_exec = 0;
1010 	}
1011 
1012 	clockid = opts->clockid;
1013 	if (opts->use_clockid) {
1014 		attr->use_clockid = 1;
1015 		attr->clockid = opts->clockid;
1016 	}
1017 
1018 	if (evsel->precise_max)
1019 		perf_event_attr__set_max_precise_ip(attr);
1020 
1021 	if (opts->all_user) {
1022 		attr->exclude_kernel = 1;
1023 		attr->exclude_user   = 0;
1024 	}
1025 
1026 	if (opts->all_kernel) {
1027 		attr->exclude_kernel = 0;
1028 		attr->exclude_user   = 1;
1029 	}
1030 
1031 	/*
1032 	 * Apply event specific term settings,
1033 	 * it overloads any global configuration.
1034 	 */
1035 	apply_config_terms(evsel, opts);
1036 
1037 	evsel->ignore_missing_thread = opts->ignore_missing_thread;
1038 }
1039 
1040 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
1041 {
1042 	if (evsel->system_wide)
1043 		nthreads = 1;
1044 
1045 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
1046 
1047 	if (evsel->fd) {
1048 		int cpu, thread;
1049 		for (cpu = 0; cpu < ncpus; cpu++) {
1050 			for (thread = 0; thread < nthreads; thread++) {
1051 				FD(evsel, cpu, thread) = -1;
1052 			}
1053 		}
1054 	}
1055 
1056 	return evsel->fd != NULL ? 0 : -ENOMEM;
1057 }
1058 
1059 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
1060 			  int ioc,  void *arg)
1061 {
1062 	int cpu, thread;
1063 
1064 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
1065 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
1066 			int fd = FD(evsel, cpu, thread),
1067 			    err = ioctl(fd, ioc, arg);
1068 
1069 			if (err)
1070 				return err;
1071 		}
1072 	}
1073 
1074 	return 0;
1075 }
1076 
1077 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
1078 {
1079 	return perf_evsel__run_ioctl(evsel,
1080 				     PERF_EVENT_IOC_SET_FILTER,
1081 				     (void *)filter);
1082 }
1083 
1084 int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
1085 {
1086 	char *new_filter = strdup(filter);
1087 
1088 	if (new_filter != NULL) {
1089 		free(evsel->filter);
1090 		evsel->filter = new_filter;
1091 		return 0;
1092 	}
1093 
1094 	return -1;
1095 }
1096 
1097 static int perf_evsel__append_filter(struct perf_evsel *evsel,
1098 				     const char *fmt, const char *filter)
1099 {
1100 	char *new_filter;
1101 
1102 	if (evsel->filter == NULL)
1103 		return perf_evsel__set_filter(evsel, filter);
1104 
1105 	if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1106 		free(evsel->filter);
1107 		evsel->filter = new_filter;
1108 		return 0;
1109 	}
1110 
1111 	return -1;
1112 }
1113 
1114 int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter)
1115 {
1116 	return perf_evsel__append_filter(evsel, "(%s) && (%s)", filter);
1117 }
1118 
1119 int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter)
1120 {
1121 	return perf_evsel__append_filter(evsel, "%s,%s", filter);
1122 }
1123 
1124 int perf_evsel__enable(struct perf_evsel *evsel)
1125 {
1126 	return perf_evsel__run_ioctl(evsel,
1127 				     PERF_EVENT_IOC_ENABLE,
1128 				     0);
1129 }
1130 
1131 int perf_evsel__disable(struct perf_evsel *evsel)
1132 {
1133 	return perf_evsel__run_ioctl(evsel,
1134 				     PERF_EVENT_IOC_DISABLE,
1135 				     0);
1136 }
1137 
1138 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
1139 {
1140 	if (ncpus == 0 || nthreads == 0)
1141 		return 0;
1142 
1143 	if (evsel->system_wide)
1144 		nthreads = 1;
1145 
1146 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
1147 	if (evsel->sample_id == NULL)
1148 		return -ENOMEM;
1149 
1150 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
1151 	if (evsel->id == NULL) {
1152 		xyarray__delete(evsel->sample_id);
1153 		evsel->sample_id = NULL;
1154 		return -ENOMEM;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 static void perf_evsel__free_fd(struct perf_evsel *evsel)
1161 {
1162 	xyarray__delete(evsel->fd);
1163 	evsel->fd = NULL;
1164 }
1165 
1166 static void perf_evsel__free_id(struct perf_evsel *evsel)
1167 {
1168 	xyarray__delete(evsel->sample_id);
1169 	evsel->sample_id = NULL;
1170 	zfree(&evsel->id);
1171 }
1172 
1173 static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
1174 {
1175 	struct perf_evsel_config_term *term, *h;
1176 
1177 	list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
1178 		list_del(&term->list);
1179 		free(term);
1180 	}
1181 }
1182 
1183 void perf_evsel__close_fd(struct perf_evsel *evsel)
1184 {
1185 	int cpu, thread;
1186 
1187 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
1188 		for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
1189 			close(FD(evsel, cpu, thread));
1190 			FD(evsel, cpu, thread) = -1;
1191 		}
1192 }
1193 
1194 void perf_evsel__exit(struct perf_evsel *evsel)
1195 {
1196 	assert(list_empty(&evsel->node));
1197 	assert(evsel->evlist == NULL);
1198 	perf_evsel__free_fd(evsel);
1199 	perf_evsel__free_id(evsel);
1200 	perf_evsel__free_config_terms(evsel);
1201 	close_cgroup(evsel->cgrp);
1202 	cpu_map__put(evsel->cpus);
1203 	cpu_map__put(evsel->own_cpus);
1204 	thread_map__put(evsel->threads);
1205 	zfree(&evsel->group_name);
1206 	zfree(&evsel->name);
1207 	perf_evsel__object.fini(evsel);
1208 }
1209 
1210 void perf_evsel__delete(struct perf_evsel *evsel)
1211 {
1212 	perf_evsel__exit(evsel);
1213 	free(evsel);
1214 }
1215 
1216 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
1217 				struct perf_counts_values *count)
1218 {
1219 	struct perf_counts_values tmp;
1220 
1221 	if (!evsel->prev_raw_counts)
1222 		return;
1223 
1224 	if (cpu == -1) {
1225 		tmp = evsel->prev_raw_counts->aggr;
1226 		evsel->prev_raw_counts->aggr = *count;
1227 	} else {
1228 		tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
1229 		*perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
1230 	}
1231 
1232 	count->val = count->val - tmp.val;
1233 	count->ena = count->ena - tmp.ena;
1234 	count->run = count->run - tmp.run;
1235 }
1236 
1237 void perf_counts_values__scale(struct perf_counts_values *count,
1238 			       bool scale, s8 *pscaled)
1239 {
1240 	s8 scaled = 0;
1241 
1242 	if (scale) {
1243 		if (count->run == 0) {
1244 			scaled = -1;
1245 			count->val = 0;
1246 		} else if (count->run < count->ena) {
1247 			scaled = 1;
1248 			count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
1249 		}
1250 	} else
1251 		count->ena = count->run = 0;
1252 
1253 	if (pscaled)
1254 		*pscaled = scaled;
1255 }
1256 
1257 static int perf_evsel__read_size(struct perf_evsel *evsel)
1258 {
1259 	u64 read_format = evsel->attr.read_format;
1260 	int entry = sizeof(u64); /* value */
1261 	int size = 0;
1262 	int nr = 1;
1263 
1264 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1265 		size += sizeof(u64);
1266 
1267 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1268 		size += sizeof(u64);
1269 
1270 	if (read_format & PERF_FORMAT_ID)
1271 		entry += sizeof(u64);
1272 
1273 	if (read_format & PERF_FORMAT_GROUP) {
1274 		nr = evsel->nr_members;
1275 		size += sizeof(u64);
1276 	}
1277 
1278 	size += entry * nr;
1279 	return size;
1280 }
1281 
1282 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
1283 		     struct perf_counts_values *count)
1284 {
1285 	size_t size = perf_evsel__read_size(evsel);
1286 
1287 	memset(count, 0, sizeof(*count));
1288 
1289 	if (FD(evsel, cpu, thread) < 0)
1290 		return -EINVAL;
1291 
1292 	if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
1293 		return -errno;
1294 
1295 	return 0;
1296 }
1297 
1298 static int
1299 perf_evsel__read_one(struct perf_evsel *evsel, int cpu, int thread)
1300 {
1301 	struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
1302 
1303 	return perf_evsel__read(evsel, cpu, thread, count);
1304 }
1305 
1306 static void
1307 perf_evsel__set_count(struct perf_evsel *counter, int cpu, int thread,
1308 		      u64 val, u64 ena, u64 run)
1309 {
1310 	struct perf_counts_values *count;
1311 
1312 	count = perf_counts(counter->counts, cpu, thread);
1313 
1314 	count->val    = val;
1315 	count->ena    = ena;
1316 	count->run    = run;
1317 	count->loaded = true;
1318 }
1319 
1320 static int
1321 perf_evsel__process_group_data(struct perf_evsel *leader,
1322 			       int cpu, int thread, u64 *data)
1323 {
1324 	u64 read_format = leader->attr.read_format;
1325 	struct sample_read_value *v;
1326 	u64 nr, ena = 0, run = 0, i;
1327 
1328 	nr = *data++;
1329 
1330 	if (nr != (u64) leader->nr_members)
1331 		return -EINVAL;
1332 
1333 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1334 		ena = *data++;
1335 
1336 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1337 		run = *data++;
1338 
1339 	v = (struct sample_read_value *) data;
1340 
1341 	perf_evsel__set_count(leader, cpu, thread,
1342 			      v[0].value, ena, run);
1343 
1344 	for (i = 1; i < nr; i++) {
1345 		struct perf_evsel *counter;
1346 
1347 		counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
1348 		if (!counter)
1349 			return -EINVAL;
1350 
1351 		perf_evsel__set_count(counter, cpu, thread,
1352 				      v[i].value, ena, run);
1353 	}
1354 
1355 	return 0;
1356 }
1357 
1358 static int
1359 perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
1360 {
1361 	struct perf_stat_evsel *ps = leader->priv;
1362 	u64 read_format = leader->attr.read_format;
1363 	int size = perf_evsel__read_size(leader);
1364 	u64 *data = ps->group_data;
1365 
1366 	if (!(read_format & PERF_FORMAT_ID))
1367 		return -EINVAL;
1368 
1369 	if (!perf_evsel__is_group_leader(leader))
1370 		return -EINVAL;
1371 
1372 	if (!data) {
1373 		data = zalloc(size);
1374 		if (!data)
1375 			return -ENOMEM;
1376 
1377 		ps->group_data = data;
1378 	}
1379 
1380 	if (FD(leader, cpu, thread) < 0)
1381 		return -EINVAL;
1382 
1383 	if (readn(FD(leader, cpu, thread), data, size) <= 0)
1384 		return -errno;
1385 
1386 	return perf_evsel__process_group_data(leader, cpu, thread, data);
1387 }
1388 
1389 int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread)
1390 {
1391 	u64 read_format = evsel->attr.read_format;
1392 
1393 	if (read_format & PERF_FORMAT_GROUP)
1394 		return perf_evsel__read_group(evsel, cpu, thread);
1395 	else
1396 		return perf_evsel__read_one(evsel, cpu, thread);
1397 }
1398 
1399 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
1400 			      int cpu, int thread, bool scale)
1401 {
1402 	struct perf_counts_values count;
1403 	size_t nv = scale ? 3 : 1;
1404 
1405 	if (FD(evsel, cpu, thread) < 0)
1406 		return -EINVAL;
1407 
1408 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1409 		return -ENOMEM;
1410 
1411 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
1412 		return -errno;
1413 
1414 	perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1415 	perf_counts_values__scale(&count, scale, NULL);
1416 	*perf_counts(evsel->counts, cpu, thread) = count;
1417 	return 0;
1418 }
1419 
1420 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
1421 {
1422 	struct perf_evsel *leader = evsel->leader;
1423 	int fd;
1424 
1425 	if (perf_evsel__is_group_leader(evsel))
1426 		return -1;
1427 
1428 	/*
1429 	 * Leader must be already processed/open,
1430 	 * if not it's a bug.
1431 	 */
1432 	BUG_ON(!leader->fd);
1433 
1434 	fd = FD(leader, cpu, thread);
1435 	BUG_ON(fd == -1);
1436 
1437 	return fd;
1438 }
1439 
1440 struct bit_names {
1441 	int bit;
1442 	const char *name;
1443 };
1444 
1445 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
1446 {
1447 	bool first_bit = true;
1448 	int i = 0;
1449 
1450 	do {
1451 		if (value & bits[i].bit) {
1452 			buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
1453 			first_bit = false;
1454 		}
1455 	} while (bits[++i].name != NULL);
1456 }
1457 
1458 static void __p_sample_type(char *buf, size_t size, u64 value)
1459 {
1460 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1461 	struct bit_names bits[] = {
1462 		bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1463 		bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1464 		bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1465 		bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1466 		bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
1467 		bit_name(WEIGHT),
1468 		{ .name = NULL, }
1469 	};
1470 #undef bit_name
1471 	__p_bits(buf, size, value, bits);
1472 }
1473 
1474 static void __p_branch_sample_type(char *buf, size_t size, u64 value)
1475 {
1476 #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
1477 	struct bit_names bits[] = {
1478 		bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
1479 		bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
1480 		bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
1481 		bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
1482 		bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
1483 		{ .name = NULL, }
1484 	};
1485 #undef bit_name
1486 	__p_bits(buf, size, value, bits);
1487 }
1488 
1489 static void __p_read_format(char *buf, size_t size, u64 value)
1490 {
1491 #define bit_name(n) { PERF_FORMAT_##n, #n }
1492 	struct bit_names bits[] = {
1493 		bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1494 		bit_name(ID), bit_name(GROUP),
1495 		{ .name = NULL, }
1496 	};
1497 #undef bit_name
1498 	__p_bits(buf, size, value, bits);
1499 }
1500 
1501 #define BUF_SIZE		1024
1502 
1503 #define p_hex(val)		snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1504 #define p_unsigned(val)		snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1505 #define p_signed(val)		snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1506 #define p_sample_type(val)	__p_sample_type(buf, BUF_SIZE, val)
1507 #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
1508 #define p_read_format(val)	__p_read_format(buf, BUF_SIZE, val)
1509 
1510 #define PRINT_ATTRn(_n, _f, _p)				\
1511 do {							\
1512 	if (attr->_f) {					\
1513 		_p(attr->_f);				\
1514 		ret += attr__fprintf(fp, _n, buf, priv);\
1515 	}						\
1516 } while (0)
1517 
1518 #define PRINT_ATTRf(_f, _p)	PRINT_ATTRn(#_f, _f, _p)
1519 
1520 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
1521 			     attr__fprintf_f attr__fprintf, void *priv)
1522 {
1523 	char buf[BUF_SIZE];
1524 	int ret = 0;
1525 
1526 	PRINT_ATTRf(type, p_unsigned);
1527 	PRINT_ATTRf(size, p_unsigned);
1528 	PRINT_ATTRf(config, p_hex);
1529 	PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
1530 	PRINT_ATTRf(sample_type, p_sample_type);
1531 	PRINT_ATTRf(read_format, p_read_format);
1532 
1533 	PRINT_ATTRf(disabled, p_unsigned);
1534 	PRINT_ATTRf(inherit, p_unsigned);
1535 	PRINT_ATTRf(pinned, p_unsigned);
1536 	PRINT_ATTRf(exclusive, p_unsigned);
1537 	PRINT_ATTRf(exclude_user, p_unsigned);
1538 	PRINT_ATTRf(exclude_kernel, p_unsigned);
1539 	PRINT_ATTRf(exclude_hv, p_unsigned);
1540 	PRINT_ATTRf(exclude_idle, p_unsigned);
1541 	PRINT_ATTRf(mmap, p_unsigned);
1542 	PRINT_ATTRf(comm, p_unsigned);
1543 	PRINT_ATTRf(freq, p_unsigned);
1544 	PRINT_ATTRf(inherit_stat, p_unsigned);
1545 	PRINT_ATTRf(enable_on_exec, p_unsigned);
1546 	PRINT_ATTRf(task, p_unsigned);
1547 	PRINT_ATTRf(watermark, p_unsigned);
1548 	PRINT_ATTRf(precise_ip, p_unsigned);
1549 	PRINT_ATTRf(mmap_data, p_unsigned);
1550 	PRINT_ATTRf(sample_id_all, p_unsigned);
1551 	PRINT_ATTRf(exclude_host, p_unsigned);
1552 	PRINT_ATTRf(exclude_guest, p_unsigned);
1553 	PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
1554 	PRINT_ATTRf(exclude_callchain_user, p_unsigned);
1555 	PRINT_ATTRf(mmap2, p_unsigned);
1556 	PRINT_ATTRf(comm_exec, p_unsigned);
1557 	PRINT_ATTRf(use_clockid, p_unsigned);
1558 	PRINT_ATTRf(context_switch, p_unsigned);
1559 	PRINT_ATTRf(write_backward, p_unsigned);
1560 
1561 	PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
1562 	PRINT_ATTRf(bp_type, p_unsigned);
1563 	PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
1564 	PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1565 	PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
1566 	PRINT_ATTRf(sample_regs_user, p_hex);
1567 	PRINT_ATTRf(sample_stack_user, p_unsigned);
1568 	PRINT_ATTRf(clockid, p_signed);
1569 	PRINT_ATTRf(sample_regs_intr, p_hex);
1570 	PRINT_ATTRf(aux_watermark, p_unsigned);
1571 	PRINT_ATTRf(sample_max_stack, p_unsigned);
1572 
1573 	return ret;
1574 }
1575 
1576 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1577 				void *priv __maybe_unused)
1578 {
1579 	return fprintf(fp, "  %-32s %s\n", name, val);
1580 }
1581 
1582 static bool ignore_missing_thread(struct perf_evsel *evsel,
1583 				  struct thread_map *threads,
1584 				  int thread, int err)
1585 {
1586 	if (!evsel->ignore_missing_thread)
1587 		return false;
1588 
1589 	/* The system wide setup does not work with threads. */
1590 	if (evsel->system_wide)
1591 		return false;
1592 
1593 	/* The -ESRCH is perf event syscall errno for pid's not found. */
1594 	if (err != -ESRCH)
1595 		return false;
1596 
1597 	/* If there's only one thread, let it fail. */
1598 	if (threads->nr == 1)
1599 		return false;
1600 
1601 	if (thread_map__remove(threads, thread))
1602 		return false;
1603 
1604 	pr_warning("WARNING: Ignored open failure for pid %d\n",
1605 		   thread_map__pid(threads, thread));
1606 	return true;
1607 }
1608 
1609 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1610 		     struct thread_map *threads)
1611 {
1612 	int cpu, thread, nthreads;
1613 	unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1614 	int pid = -1, err;
1615 	enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1616 
1617 	if (perf_missing_features.write_backward && evsel->attr.write_backward)
1618 		return -EINVAL;
1619 
1620 	if (cpus == NULL) {
1621 		static struct cpu_map *empty_cpu_map;
1622 
1623 		if (empty_cpu_map == NULL) {
1624 			empty_cpu_map = cpu_map__dummy_new();
1625 			if (empty_cpu_map == NULL)
1626 				return -ENOMEM;
1627 		}
1628 
1629 		cpus = empty_cpu_map;
1630 	}
1631 
1632 	if (threads == NULL) {
1633 		static struct thread_map *empty_thread_map;
1634 
1635 		if (empty_thread_map == NULL) {
1636 			empty_thread_map = thread_map__new_by_tid(-1);
1637 			if (empty_thread_map == NULL)
1638 				return -ENOMEM;
1639 		}
1640 
1641 		threads = empty_thread_map;
1642 	}
1643 
1644 	if (evsel->system_wide)
1645 		nthreads = 1;
1646 	else
1647 		nthreads = threads->nr;
1648 
1649 	if (evsel->fd == NULL &&
1650 	    perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
1651 		return -ENOMEM;
1652 
1653 	if (evsel->cgrp) {
1654 		flags |= PERF_FLAG_PID_CGROUP;
1655 		pid = evsel->cgrp->fd;
1656 	}
1657 
1658 fallback_missing_features:
1659 	if (perf_missing_features.clockid_wrong)
1660 		evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
1661 	if (perf_missing_features.clockid) {
1662 		evsel->attr.use_clockid = 0;
1663 		evsel->attr.clockid = 0;
1664 	}
1665 	if (perf_missing_features.cloexec)
1666 		flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1667 	if (perf_missing_features.mmap2)
1668 		evsel->attr.mmap2 = 0;
1669 	if (perf_missing_features.exclude_guest)
1670 		evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1671 	if (perf_missing_features.lbr_flags)
1672 		evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1673 				     PERF_SAMPLE_BRANCH_NO_CYCLES);
1674 	if (perf_missing_features.group_read && evsel->attr.inherit)
1675 		evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1676 retry_sample_id:
1677 	if (perf_missing_features.sample_id_all)
1678 		evsel->attr.sample_id_all = 0;
1679 
1680 	if (verbose >= 2) {
1681 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1682 		fprintf(stderr, "perf_event_attr:\n");
1683 		perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
1684 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1685 	}
1686 
1687 	for (cpu = 0; cpu < cpus->nr; cpu++) {
1688 
1689 		for (thread = 0; thread < nthreads; thread++) {
1690 			int fd, group_fd;
1691 
1692 			if (!evsel->cgrp && !evsel->system_wide)
1693 				pid = thread_map__pid(threads, thread);
1694 
1695 			group_fd = get_group_fd(evsel, cpu, thread);
1696 retry_open:
1697 			pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
1698 				  pid, cpus->map[cpu], group_fd, flags);
1699 
1700 			test_attr__ready();
1701 
1702 			fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
1703 						 group_fd, flags);
1704 
1705 			FD(evsel, cpu, thread) = fd;
1706 
1707 			if (fd < 0) {
1708 				err = -errno;
1709 
1710 				if (ignore_missing_thread(evsel, threads, thread, err)) {
1711 					/*
1712 					 * We just removed 1 thread, so take a step
1713 					 * back on thread index and lower the upper
1714 					 * nthreads limit.
1715 					 */
1716 					nthreads--;
1717 					thread--;
1718 
1719 					/* ... and pretend like nothing have happened. */
1720 					err = 0;
1721 					continue;
1722 				}
1723 
1724 				pr_debug2("\nsys_perf_event_open failed, error %d\n",
1725 					  err);
1726 				goto try_fallback;
1727 			}
1728 
1729 			pr_debug2(" = %d\n", fd);
1730 
1731 			if (evsel->bpf_fd >= 0) {
1732 				int evt_fd = fd;
1733 				int bpf_fd = evsel->bpf_fd;
1734 
1735 				err = ioctl(evt_fd,
1736 					    PERF_EVENT_IOC_SET_BPF,
1737 					    bpf_fd);
1738 				if (err && errno != EEXIST) {
1739 					pr_err("failed to attach bpf fd %d: %s\n",
1740 					       bpf_fd, strerror(errno));
1741 					err = -EINVAL;
1742 					goto out_close;
1743 				}
1744 			}
1745 
1746 			set_rlimit = NO_CHANGE;
1747 
1748 			/*
1749 			 * If we succeeded but had to kill clockid, fail and
1750 			 * have perf_evsel__open_strerror() print us a nice
1751 			 * error.
1752 			 */
1753 			if (perf_missing_features.clockid ||
1754 			    perf_missing_features.clockid_wrong) {
1755 				err = -EINVAL;
1756 				goto out_close;
1757 			}
1758 		}
1759 	}
1760 
1761 	return 0;
1762 
1763 try_fallback:
1764 	/*
1765 	 * perf stat needs between 5 and 22 fds per CPU. When we run out
1766 	 * of them try to increase the limits.
1767 	 */
1768 	if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1769 		struct rlimit l;
1770 		int old_errno = errno;
1771 
1772 		if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1773 			if (set_rlimit == NO_CHANGE)
1774 				l.rlim_cur = l.rlim_max;
1775 			else {
1776 				l.rlim_cur = l.rlim_max + 1000;
1777 				l.rlim_max = l.rlim_cur;
1778 			}
1779 			if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1780 				set_rlimit++;
1781 				errno = old_errno;
1782 				goto retry_open;
1783 			}
1784 		}
1785 		errno = old_errno;
1786 	}
1787 
1788 	if (err != -EINVAL || cpu > 0 || thread > 0)
1789 		goto out_close;
1790 
1791 	/*
1792 	 * Must probe features in the order they were added to the
1793 	 * perf_event_attr interface.
1794 	 */
1795 	if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
1796 		perf_missing_features.write_backward = true;
1797 		pr_debug2("switching off write_backward\n");
1798 		goto out_close;
1799 	} else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
1800 		perf_missing_features.clockid_wrong = true;
1801 		pr_debug2("switching off clockid\n");
1802 		goto fallback_missing_features;
1803 	} else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
1804 		perf_missing_features.clockid = true;
1805 		pr_debug2("switching off use_clockid\n");
1806 		goto fallback_missing_features;
1807 	} else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1808 		perf_missing_features.cloexec = true;
1809 		pr_debug2("switching off cloexec flag\n");
1810 		goto fallback_missing_features;
1811 	} else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
1812 		perf_missing_features.mmap2 = true;
1813 		pr_debug2("switching off mmap2\n");
1814 		goto fallback_missing_features;
1815 	} else if (!perf_missing_features.exclude_guest &&
1816 		   (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
1817 		perf_missing_features.exclude_guest = true;
1818 		pr_debug2("switching off exclude_guest, exclude_host\n");
1819 		goto fallback_missing_features;
1820 	} else if (!perf_missing_features.sample_id_all) {
1821 		perf_missing_features.sample_id_all = true;
1822 		pr_debug2("switching off sample_id_all\n");
1823 		goto retry_sample_id;
1824 	} else if (!perf_missing_features.lbr_flags &&
1825 			(evsel->attr.branch_sample_type &
1826 			 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1827 			  PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1828 		perf_missing_features.lbr_flags = true;
1829 		pr_debug2("switching off branch sample type no (cycles/flags)\n");
1830 		goto fallback_missing_features;
1831 	} else if (!perf_missing_features.group_read &&
1832 		    evsel->attr.inherit &&
1833 		   (evsel->attr.read_format & PERF_FORMAT_GROUP)) {
1834 		perf_missing_features.group_read = true;
1835 		pr_debug2("switching off group read\n");
1836 		goto fallback_missing_features;
1837 	}
1838 out_close:
1839 	do {
1840 		while (--thread >= 0) {
1841 			close(FD(evsel, cpu, thread));
1842 			FD(evsel, cpu, thread) = -1;
1843 		}
1844 		thread = nthreads;
1845 	} while (--cpu >= 0);
1846 	return err;
1847 }
1848 
1849 void perf_evsel__close(struct perf_evsel *evsel)
1850 {
1851 	if (evsel->fd == NULL)
1852 		return;
1853 
1854 	perf_evsel__close_fd(evsel);
1855 	perf_evsel__free_fd(evsel);
1856 }
1857 
1858 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
1859 			     struct cpu_map *cpus)
1860 {
1861 	return perf_evsel__open(evsel, cpus, NULL);
1862 }
1863 
1864 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
1865 				struct thread_map *threads)
1866 {
1867 	return perf_evsel__open(evsel, NULL, threads);
1868 }
1869 
1870 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1871 				       const union perf_event *event,
1872 				       struct perf_sample *sample)
1873 {
1874 	u64 type = evsel->attr.sample_type;
1875 	const u64 *array = event->sample.array;
1876 	bool swapped = evsel->needs_swap;
1877 	union u64_swap u;
1878 
1879 	array += ((event->header.size -
1880 		   sizeof(event->header)) / sizeof(u64)) - 1;
1881 
1882 	if (type & PERF_SAMPLE_IDENTIFIER) {
1883 		sample->id = *array;
1884 		array--;
1885 	}
1886 
1887 	if (type & PERF_SAMPLE_CPU) {
1888 		u.val64 = *array;
1889 		if (swapped) {
1890 			/* undo swap of u64, then swap on individual u32s */
1891 			u.val64 = bswap_64(u.val64);
1892 			u.val32[0] = bswap_32(u.val32[0]);
1893 		}
1894 
1895 		sample->cpu = u.val32[0];
1896 		array--;
1897 	}
1898 
1899 	if (type & PERF_SAMPLE_STREAM_ID) {
1900 		sample->stream_id = *array;
1901 		array--;
1902 	}
1903 
1904 	if (type & PERF_SAMPLE_ID) {
1905 		sample->id = *array;
1906 		array--;
1907 	}
1908 
1909 	if (type & PERF_SAMPLE_TIME) {
1910 		sample->time = *array;
1911 		array--;
1912 	}
1913 
1914 	if (type & PERF_SAMPLE_TID) {
1915 		u.val64 = *array;
1916 		if (swapped) {
1917 			/* undo swap of u64, then swap on individual u32s */
1918 			u.val64 = bswap_64(u.val64);
1919 			u.val32[0] = bswap_32(u.val32[0]);
1920 			u.val32[1] = bswap_32(u.val32[1]);
1921 		}
1922 
1923 		sample->pid = u.val32[0];
1924 		sample->tid = u.val32[1];
1925 		array--;
1926 	}
1927 
1928 	return 0;
1929 }
1930 
1931 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1932 			    u64 size)
1933 {
1934 	return size > max_size || offset + size > endp;
1935 }
1936 
1937 #define OVERFLOW_CHECK(offset, size, max_size)				\
1938 	do {								\
1939 		if (overflow(endp, (max_size), (offset), (size)))	\
1940 			return -EFAULT;					\
1941 	} while (0)
1942 
1943 #define OVERFLOW_CHECK_u64(offset) \
1944 	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1945 
1946 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1947 			     struct perf_sample *data)
1948 {
1949 	u64 type = evsel->attr.sample_type;
1950 	bool swapped = evsel->needs_swap;
1951 	const u64 *array;
1952 	u16 max_size = event->header.size;
1953 	const void *endp = (void *)event + max_size;
1954 	u64 sz;
1955 
1956 	/*
1957 	 * used for cross-endian analysis. See git commit 65014ab3
1958 	 * for why this goofiness is needed.
1959 	 */
1960 	union u64_swap u;
1961 
1962 	memset(data, 0, sizeof(*data));
1963 	data->cpu = data->pid = data->tid = -1;
1964 	data->stream_id = data->id = data->time = -1ULL;
1965 	data->period = evsel->attr.sample_period;
1966 	data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1967 
1968 	if (event->header.type != PERF_RECORD_SAMPLE) {
1969 		if (!evsel->attr.sample_id_all)
1970 			return 0;
1971 		return perf_evsel__parse_id_sample(evsel, event, data);
1972 	}
1973 
1974 	array = event->sample.array;
1975 
1976 	/*
1977 	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1978 	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
1979 	 * check the format does not go past the end of the event.
1980 	 */
1981 	if (evsel->sample_size + sizeof(event->header) > event->header.size)
1982 		return -EFAULT;
1983 
1984 	data->id = -1ULL;
1985 	if (type & PERF_SAMPLE_IDENTIFIER) {
1986 		data->id = *array;
1987 		array++;
1988 	}
1989 
1990 	if (type & PERF_SAMPLE_IP) {
1991 		data->ip = *array;
1992 		array++;
1993 	}
1994 
1995 	if (type & PERF_SAMPLE_TID) {
1996 		u.val64 = *array;
1997 		if (swapped) {
1998 			/* undo swap of u64, then swap on individual u32s */
1999 			u.val64 = bswap_64(u.val64);
2000 			u.val32[0] = bswap_32(u.val32[0]);
2001 			u.val32[1] = bswap_32(u.val32[1]);
2002 		}
2003 
2004 		data->pid = u.val32[0];
2005 		data->tid = u.val32[1];
2006 		array++;
2007 	}
2008 
2009 	if (type & PERF_SAMPLE_TIME) {
2010 		data->time = *array;
2011 		array++;
2012 	}
2013 
2014 	data->addr = 0;
2015 	if (type & PERF_SAMPLE_ADDR) {
2016 		data->addr = *array;
2017 		array++;
2018 	}
2019 
2020 	if (type & PERF_SAMPLE_ID) {
2021 		data->id = *array;
2022 		array++;
2023 	}
2024 
2025 	if (type & PERF_SAMPLE_STREAM_ID) {
2026 		data->stream_id = *array;
2027 		array++;
2028 	}
2029 
2030 	if (type & PERF_SAMPLE_CPU) {
2031 
2032 		u.val64 = *array;
2033 		if (swapped) {
2034 			/* undo swap of u64, then swap on individual u32s */
2035 			u.val64 = bswap_64(u.val64);
2036 			u.val32[0] = bswap_32(u.val32[0]);
2037 		}
2038 
2039 		data->cpu = u.val32[0];
2040 		array++;
2041 	}
2042 
2043 	if (type & PERF_SAMPLE_PERIOD) {
2044 		data->period = *array;
2045 		array++;
2046 	}
2047 
2048 	if (type & PERF_SAMPLE_READ) {
2049 		u64 read_format = evsel->attr.read_format;
2050 
2051 		OVERFLOW_CHECK_u64(array);
2052 		if (read_format & PERF_FORMAT_GROUP)
2053 			data->read.group.nr = *array;
2054 		else
2055 			data->read.one.value = *array;
2056 
2057 		array++;
2058 
2059 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2060 			OVERFLOW_CHECK_u64(array);
2061 			data->read.time_enabled = *array;
2062 			array++;
2063 		}
2064 
2065 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2066 			OVERFLOW_CHECK_u64(array);
2067 			data->read.time_running = *array;
2068 			array++;
2069 		}
2070 
2071 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2072 		if (read_format & PERF_FORMAT_GROUP) {
2073 			const u64 max_group_nr = UINT64_MAX /
2074 					sizeof(struct sample_read_value);
2075 
2076 			if (data->read.group.nr > max_group_nr)
2077 				return -EFAULT;
2078 			sz = data->read.group.nr *
2079 			     sizeof(struct sample_read_value);
2080 			OVERFLOW_CHECK(array, sz, max_size);
2081 			data->read.group.values =
2082 					(struct sample_read_value *)array;
2083 			array = (void *)array + sz;
2084 		} else {
2085 			OVERFLOW_CHECK_u64(array);
2086 			data->read.one.id = *array;
2087 			array++;
2088 		}
2089 	}
2090 
2091 	if (type & PERF_SAMPLE_CALLCHAIN) {
2092 		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2093 
2094 		OVERFLOW_CHECK_u64(array);
2095 		data->callchain = (struct ip_callchain *)array++;
2096 		if (data->callchain->nr > max_callchain_nr)
2097 			return -EFAULT;
2098 		sz = data->callchain->nr * sizeof(u64);
2099 		OVERFLOW_CHECK(array, sz, max_size);
2100 		array = (void *)array + sz;
2101 	}
2102 
2103 	if (type & PERF_SAMPLE_RAW) {
2104 		OVERFLOW_CHECK_u64(array);
2105 		u.val64 = *array;
2106 		if (WARN_ONCE(swapped,
2107 			      "Endianness of raw data not corrected!\n")) {
2108 			/* undo swap of u64, then swap on individual u32s */
2109 			u.val64 = bswap_64(u.val64);
2110 			u.val32[0] = bswap_32(u.val32[0]);
2111 			u.val32[1] = bswap_32(u.val32[1]);
2112 		}
2113 		data->raw_size = u.val32[0];
2114 		array = (void *)array + sizeof(u32);
2115 
2116 		OVERFLOW_CHECK(array, data->raw_size, max_size);
2117 		data->raw_data = (void *)array;
2118 		array = (void *)array + data->raw_size;
2119 	}
2120 
2121 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2122 		const u64 max_branch_nr = UINT64_MAX /
2123 					  sizeof(struct branch_entry);
2124 
2125 		OVERFLOW_CHECK_u64(array);
2126 		data->branch_stack = (struct branch_stack *)array++;
2127 
2128 		if (data->branch_stack->nr > max_branch_nr)
2129 			return -EFAULT;
2130 		sz = data->branch_stack->nr * sizeof(struct branch_entry);
2131 		OVERFLOW_CHECK(array, sz, max_size);
2132 		array = (void *)array + sz;
2133 	}
2134 
2135 	if (type & PERF_SAMPLE_REGS_USER) {
2136 		OVERFLOW_CHECK_u64(array);
2137 		data->user_regs.abi = *array;
2138 		array++;
2139 
2140 		if (data->user_regs.abi) {
2141 			u64 mask = evsel->attr.sample_regs_user;
2142 
2143 			sz = hweight_long(mask) * sizeof(u64);
2144 			OVERFLOW_CHECK(array, sz, max_size);
2145 			data->user_regs.mask = mask;
2146 			data->user_regs.regs = (u64 *)array;
2147 			array = (void *)array + sz;
2148 		}
2149 	}
2150 
2151 	if (type & PERF_SAMPLE_STACK_USER) {
2152 		OVERFLOW_CHECK_u64(array);
2153 		sz = *array++;
2154 
2155 		data->user_stack.offset = ((char *)(array - 1)
2156 					  - (char *) event);
2157 
2158 		if (!sz) {
2159 			data->user_stack.size = 0;
2160 		} else {
2161 			OVERFLOW_CHECK(array, sz, max_size);
2162 			data->user_stack.data = (char *)array;
2163 			array = (void *)array + sz;
2164 			OVERFLOW_CHECK_u64(array);
2165 			data->user_stack.size = *array++;
2166 			if (WARN_ONCE(data->user_stack.size > sz,
2167 				      "user stack dump failure\n"))
2168 				return -EFAULT;
2169 		}
2170 	}
2171 
2172 	if (type & PERF_SAMPLE_WEIGHT) {
2173 		OVERFLOW_CHECK_u64(array);
2174 		data->weight = *array;
2175 		array++;
2176 	}
2177 
2178 	data->data_src = PERF_MEM_DATA_SRC_NONE;
2179 	if (type & PERF_SAMPLE_DATA_SRC) {
2180 		OVERFLOW_CHECK_u64(array);
2181 		data->data_src = *array;
2182 		array++;
2183 	}
2184 
2185 	data->transaction = 0;
2186 	if (type & PERF_SAMPLE_TRANSACTION) {
2187 		OVERFLOW_CHECK_u64(array);
2188 		data->transaction = *array;
2189 		array++;
2190 	}
2191 
2192 	data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2193 	if (type & PERF_SAMPLE_REGS_INTR) {
2194 		OVERFLOW_CHECK_u64(array);
2195 		data->intr_regs.abi = *array;
2196 		array++;
2197 
2198 		if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
2199 			u64 mask = evsel->attr.sample_regs_intr;
2200 
2201 			sz = hweight_long(mask) * sizeof(u64);
2202 			OVERFLOW_CHECK(array, sz, max_size);
2203 			data->intr_regs.mask = mask;
2204 			data->intr_regs.regs = (u64 *)array;
2205 			array = (void *)array + sz;
2206 		}
2207 	}
2208 
2209 	return 0;
2210 }
2211 
2212 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
2213 				     u64 read_format)
2214 {
2215 	size_t sz, result = sizeof(struct sample_event);
2216 
2217 	if (type & PERF_SAMPLE_IDENTIFIER)
2218 		result += sizeof(u64);
2219 
2220 	if (type & PERF_SAMPLE_IP)
2221 		result += sizeof(u64);
2222 
2223 	if (type & PERF_SAMPLE_TID)
2224 		result += sizeof(u64);
2225 
2226 	if (type & PERF_SAMPLE_TIME)
2227 		result += sizeof(u64);
2228 
2229 	if (type & PERF_SAMPLE_ADDR)
2230 		result += sizeof(u64);
2231 
2232 	if (type & PERF_SAMPLE_ID)
2233 		result += sizeof(u64);
2234 
2235 	if (type & PERF_SAMPLE_STREAM_ID)
2236 		result += sizeof(u64);
2237 
2238 	if (type & PERF_SAMPLE_CPU)
2239 		result += sizeof(u64);
2240 
2241 	if (type & PERF_SAMPLE_PERIOD)
2242 		result += sizeof(u64);
2243 
2244 	if (type & PERF_SAMPLE_READ) {
2245 		result += sizeof(u64);
2246 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2247 			result += sizeof(u64);
2248 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2249 			result += sizeof(u64);
2250 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2251 		if (read_format & PERF_FORMAT_GROUP) {
2252 			sz = sample->read.group.nr *
2253 			     sizeof(struct sample_read_value);
2254 			result += sz;
2255 		} else {
2256 			result += sizeof(u64);
2257 		}
2258 	}
2259 
2260 	if (type & PERF_SAMPLE_CALLCHAIN) {
2261 		sz = (sample->callchain->nr + 1) * sizeof(u64);
2262 		result += sz;
2263 	}
2264 
2265 	if (type & PERF_SAMPLE_RAW) {
2266 		result += sizeof(u32);
2267 		result += sample->raw_size;
2268 	}
2269 
2270 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2271 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2272 		sz += sizeof(u64);
2273 		result += sz;
2274 	}
2275 
2276 	if (type & PERF_SAMPLE_REGS_USER) {
2277 		if (sample->user_regs.abi) {
2278 			result += sizeof(u64);
2279 			sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2280 			result += sz;
2281 		} else {
2282 			result += sizeof(u64);
2283 		}
2284 	}
2285 
2286 	if (type & PERF_SAMPLE_STACK_USER) {
2287 		sz = sample->user_stack.size;
2288 		result += sizeof(u64);
2289 		if (sz) {
2290 			result += sz;
2291 			result += sizeof(u64);
2292 		}
2293 	}
2294 
2295 	if (type & PERF_SAMPLE_WEIGHT)
2296 		result += sizeof(u64);
2297 
2298 	if (type & PERF_SAMPLE_DATA_SRC)
2299 		result += sizeof(u64);
2300 
2301 	if (type & PERF_SAMPLE_TRANSACTION)
2302 		result += sizeof(u64);
2303 
2304 	if (type & PERF_SAMPLE_REGS_INTR) {
2305 		if (sample->intr_regs.abi) {
2306 			result += sizeof(u64);
2307 			sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
2308 			result += sz;
2309 		} else {
2310 			result += sizeof(u64);
2311 		}
2312 	}
2313 
2314 	return result;
2315 }
2316 
2317 int perf_event__synthesize_sample(union perf_event *event, u64 type,
2318 				  u64 read_format,
2319 				  const struct perf_sample *sample,
2320 				  bool swapped)
2321 {
2322 	u64 *array;
2323 	size_t sz;
2324 	/*
2325 	 * used for cross-endian analysis. See git commit 65014ab3
2326 	 * for why this goofiness is needed.
2327 	 */
2328 	union u64_swap u;
2329 
2330 	array = event->sample.array;
2331 
2332 	if (type & PERF_SAMPLE_IDENTIFIER) {
2333 		*array = sample->id;
2334 		array++;
2335 	}
2336 
2337 	if (type & PERF_SAMPLE_IP) {
2338 		*array = sample->ip;
2339 		array++;
2340 	}
2341 
2342 	if (type & PERF_SAMPLE_TID) {
2343 		u.val32[0] = sample->pid;
2344 		u.val32[1] = sample->tid;
2345 		if (swapped) {
2346 			/*
2347 			 * Inverse of what is done in perf_evsel__parse_sample
2348 			 */
2349 			u.val32[0] = bswap_32(u.val32[0]);
2350 			u.val32[1] = bswap_32(u.val32[1]);
2351 			u.val64 = bswap_64(u.val64);
2352 		}
2353 
2354 		*array = u.val64;
2355 		array++;
2356 	}
2357 
2358 	if (type & PERF_SAMPLE_TIME) {
2359 		*array = sample->time;
2360 		array++;
2361 	}
2362 
2363 	if (type & PERF_SAMPLE_ADDR) {
2364 		*array = sample->addr;
2365 		array++;
2366 	}
2367 
2368 	if (type & PERF_SAMPLE_ID) {
2369 		*array = sample->id;
2370 		array++;
2371 	}
2372 
2373 	if (type & PERF_SAMPLE_STREAM_ID) {
2374 		*array = sample->stream_id;
2375 		array++;
2376 	}
2377 
2378 	if (type & PERF_SAMPLE_CPU) {
2379 		u.val32[0] = sample->cpu;
2380 		if (swapped) {
2381 			/*
2382 			 * Inverse of what is done in perf_evsel__parse_sample
2383 			 */
2384 			u.val32[0] = bswap_32(u.val32[0]);
2385 			u.val64 = bswap_64(u.val64);
2386 		}
2387 		*array = u.val64;
2388 		array++;
2389 	}
2390 
2391 	if (type & PERF_SAMPLE_PERIOD) {
2392 		*array = sample->period;
2393 		array++;
2394 	}
2395 
2396 	if (type & PERF_SAMPLE_READ) {
2397 		if (read_format & PERF_FORMAT_GROUP)
2398 			*array = sample->read.group.nr;
2399 		else
2400 			*array = sample->read.one.value;
2401 		array++;
2402 
2403 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2404 			*array = sample->read.time_enabled;
2405 			array++;
2406 		}
2407 
2408 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2409 			*array = sample->read.time_running;
2410 			array++;
2411 		}
2412 
2413 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2414 		if (read_format & PERF_FORMAT_GROUP) {
2415 			sz = sample->read.group.nr *
2416 			     sizeof(struct sample_read_value);
2417 			memcpy(array, sample->read.group.values, sz);
2418 			array = (void *)array + sz;
2419 		} else {
2420 			*array = sample->read.one.id;
2421 			array++;
2422 		}
2423 	}
2424 
2425 	if (type & PERF_SAMPLE_CALLCHAIN) {
2426 		sz = (sample->callchain->nr + 1) * sizeof(u64);
2427 		memcpy(array, sample->callchain, sz);
2428 		array = (void *)array + sz;
2429 	}
2430 
2431 	if (type & PERF_SAMPLE_RAW) {
2432 		u.val32[0] = sample->raw_size;
2433 		if (WARN_ONCE(swapped,
2434 			      "Endianness of raw data not corrected!\n")) {
2435 			/*
2436 			 * Inverse of what is done in perf_evsel__parse_sample
2437 			 */
2438 			u.val32[0] = bswap_32(u.val32[0]);
2439 			u.val32[1] = bswap_32(u.val32[1]);
2440 			u.val64 = bswap_64(u.val64);
2441 		}
2442 		*array = u.val64;
2443 		array = (void *)array + sizeof(u32);
2444 
2445 		memcpy(array, sample->raw_data, sample->raw_size);
2446 		array = (void *)array + sample->raw_size;
2447 	}
2448 
2449 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2450 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2451 		sz += sizeof(u64);
2452 		memcpy(array, sample->branch_stack, sz);
2453 		array = (void *)array + sz;
2454 	}
2455 
2456 	if (type & PERF_SAMPLE_REGS_USER) {
2457 		if (sample->user_regs.abi) {
2458 			*array++ = sample->user_regs.abi;
2459 			sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2460 			memcpy(array, sample->user_regs.regs, sz);
2461 			array = (void *)array + sz;
2462 		} else {
2463 			*array++ = 0;
2464 		}
2465 	}
2466 
2467 	if (type & PERF_SAMPLE_STACK_USER) {
2468 		sz = sample->user_stack.size;
2469 		*array++ = sz;
2470 		if (sz) {
2471 			memcpy(array, sample->user_stack.data, sz);
2472 			array = (void *)array + sz;
2473 			*array++ = sz;
2474 		}
2475 	}
2476 
2477 	if (type & PERF_SAMPLE_WEIGHT) {
2478 		*array = sample->weight;
2479 		array++;
2480 	}
2481 
2482 	if (type & PERF_SAMPLE_DATA_SRC) {
2483 		*array = sample->data_src;
2484 		array++;
2485 	}
2486 
2487 	if (type & PERF_SAMPLE_TRANSACTION) {
2488 		*array = sample->transaction;
2489 		array++;
2490 	}
2491 
2492 	if (type & PERF_SAMPLE_REGS_INTR) {
2493 		if (sample->intr_regs.abi) {
2494 			*array++ = sample->intr_regs.abi;
2495 			sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
2496 			memcpy(array, sample->intr_regs.regs, sz);
2497 			array = (void *)array + sz;
2498 		} else {
2499 			*array++ = 0;
2500 		}
2501 	}
2502 
2503 	return 0;
2504 }
2505 
2506 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
2507 {
2508 	return pevent_find_field(evsel->tp_format, name);
2509 }
2510 
2511 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
2512 			 const char *name)
2513 {
2514 	struct format_field *field = perf_evsel__field(evsel, name);
2515 	int offset;
2516 
2517 	if (!field)
2518 		return NULL;
2519 
2520 	offset = field->offset;
2521 
2522 	if (field->flags & FIELD_IS_DYNAMIC) {
2523 		offset = *(int *)(sample->raw_data + field->offset);
2524 		offset &= 0xffff;
2525 	}
2526 
2527 	return sample->raw_data + offset;
2528 }
2529 
2530 u64 format_field__intval(struct format_field *field, struct perf_sample *sample,
2531 			 bool needs_swap)
2532 {
2533 	u64 value;
2534 	void *ptr = sample->raw_data + field->offset;
2535 
2536 	switch (field->size) {
2537 	case 1:
2538 		return *(u8 *)ptr;
2539 	case 2:
2540 		value = *(u16 *)ptr;
2541 		break;
2542 	case 4:
2543 		value = *(u32 *)ptr;
2544 		break;
2545 	case 8:
2546 		memcpy(&value, ptr, sizeof(u64));
2547 		break;
2548 	default:
2549 		return 0;
2550 	}
2551 
2552 	if (!needs_swap)
2553 		return value;
2554 
2555 	switch (field->size) {
2556 	case 2:
2557 		return bswap_16(value);
2558 	case 4:
2559 		return bswap_32(value);
2560 	case 8:
2561 		return bswap_64(value);
2562 	default:
2563 		return 0;
2564 	}
2565 
2566 	return 0;
2567 }
2568 
2569 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
2570 		       const char *name)
2571 {
2572 	struct format_field *field = perf_evsel__field(evsel, name);
2573 
2574 	if (!field)
2575 		return 0;
2576 
2577 	return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2578 }
2579 
2580 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2581 			  char *msg, size_t msgsize)
2582 {
2583 	int paranoid;
2584 
2585 	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2586 	    evsel->attr.type   == PERF_TYPE_HARDWARE &&
2587 	    evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2588 		/*
2589 		 * If it's cycles then fall back to hrtimer based
2590 		 * cpu-clock-tick sw counter, which is always available even if
2591 		 * no PMU support.
2592 		 *
2593 		 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2594 		 * b0a873e).
2595 		 */
2596 		scnprintf(msg, msgsize, "%s",
2597 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2598 
2599 		evsel->attr.type   = PERF_TYPE_SOFTWARE;
2600 		evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
2601 
2602 		zfree(&evsel->name);
2603 		return true;
2604 	} else if (err == EACCES && !evsel->attr.exclude_kernel &&
2605 		   (paranoid = perf_event_paranoid()) > 1) {
2606 		const char *name = perf_evsel__name(evsel);
2607 		char *new_name;
2608 
2609 		if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0)
2610 			return false;
2611 
2612 		if (evsel->name)
2613 			free(evsel->name);
2614 		evsel->name = new_name;
2615 		scnprintf(msg, msgsize,
2616 "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
2617 		evsel->attr.exclude_kernel = 1;
2618 
2619 		return true;
2620 	}
2621 
2622 	return false;
2623 }
2624 
2625 static bool find_process(const char *name)
2626 {
2627 	size_t len = strlen(name);
2628 	DIR *dir;
2629 	struct dirent *d;
2630 	int ret = -1;
2631 
2632 	dir = opendir(procfs__mountpoint());
2633 	if (!dir)
2634 		return false;
2635 
2636 	/* Walk through the directory. */
2637 	while (ret && (d = readdir(dir)) != NULL) {
2638 		char path[PATH_MAX];
2639 		char *data;
2640 		size_t size;
2641 
2642 		if ((d->d_type != DT_DIR) ||
2643 		     !strcmp(".", d->d_name) ||
2644 		     !strcmp("..", d->d_name))
2645 			continue;
2646 
2647 		scnprintf(path, sizeof(path), "%s/%s/comm",
2648 			  procfs__mountpoint(), d->d_name);
2649 
2650 		if (filename__read_str(path, &data, &size))
2651 			continue;
2652 
2653 		ret = strncmp(name, data, len);
2654 		free(data);
2655 	}
2656 
2657 	closedir(dir);
2658 	return ret ? false : true;
2659 }
2660 
2661 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2662 			      int err, char *msg, size_t size)
2663 {
2664 	char sbuf[STRERR_BUFSIZE];
2665 	int printed = 0;
2666 
2667 	switch (err) {
2668 	case EPERM:
2669 	case EACCES:
2670 		if (err == EPERM)
2671 			printed = scnprintf(msg, size,
2672 				"No permission to enable %s event.\n\n",
2673 				perf_evsel__name(evsel));
2674 
2675 		return scnprintf(msg + printed, size - printed,
2676 		 "You may not have permission to collect %sstats.\n\n"
2677 		 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
2678 		 "which controls use of the performance events system by\n"
2679 		 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
2680 		 "The current value is %d:\n\n"
2681 		 "  -1: Allow use of (almost) all events by all users\n"
2682 		 "      Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2683 		 ">= 0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN\n"
2684 		 "      Disallow raw tracepoint access by users without CAP_SYS_ADMIN\n"
2685 		 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
2686 		 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN\n\n"
2687 		 "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
2688 		 "	kernel.perf_event_paranoid = -1\n" ,
2689 				 target->system_wide ? "system-wide " : "",
2690 				 perf_event_paranoid());
2691 	case ENOENT:
2692 		return scnprintf(msg, size, "The %s event is not supported.",
2693 				 perf_evsel__name(evsel));
2694 	case EMFILE:
2695 		return scnprintf(msg, size, "%s",
2696 			 "Too many events are opened.\n"
2697 			 "Probably the maximum number of open file descriptors has been reached.\n"
2698 			 "Hint: Try again after reducing the number of events.\n"
2699 			 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2700 	case ENOMEM:
2701 		if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0 &&
2702 		    access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2703 			return scnprintf(msg, size,
2704 					 "Not enough memory to setup event with callchain.\n"
2705 					 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2706 					 "Hint: Current value: %d", sysctl_perf_event_max_stack);
2707 		break;
2708 	case ENODEV:
2709 		if (target->cpu_list)
2710 			return scnprintf(msg, size, "%s",
2711 	 "No such device - did you specify an out-of-range profile CPU?");
2712 		break;
2713 	case EOPNOTSUPP:
2714 		if (evsel->attr.sample_period != 0)
2715 			return scnprintf(msg, size, "%s",
2716 	"PMU Hardware doesn't support sampling/overflow-interrupts.");
2717 		if (evsel->attr.precise_ip)
2718 			return scnprintf(msg, size, "%s",
2719 	"\'precise\' request may not be supported. Try removing 'p' modifier.");
2720 #if defined(__i386__) || defined(__x86_64__)
2721 		if (evsel->attr.type == PERF_TYPE_HARDWARE)
2722 			return scnprintf(msg, size, "%s",
2723 	"No hardware sampling interrupt available.\n"
2724 	"No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2725 #endif
2726 		break;
2727 	case EBUSY:
2728 		if (find_process("oprofiled"))
2729 			return scnprintf(msg, size,
2730 	"The PMU counters are busy/taken by another profiler.\n"
2731 	"We found oprofile daemon running, please stop it and try again.");
2732 		break;
2733 	case EINVAL:
2734 		if (evsel->attr.write_backward && perf_missing_features.write_backward)
2735 			return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
2736 		if (perf_missing_features.clockid)
2737 			return scnprintf(msg, size, "clockid feature not supported.");
2738 		if (perf_missing_features.clockid_wrong)
2739 			return scnprintf(msg, size, "wrong clockid (%d).", clockid);
2740 		break;
2741 	default:
2742 		break;
2743 	}
2744 
2745 	return scnprintf(msg, size,
2746 	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2747 	"/bin/dmesg may provide additional information.\n"
2748 	"No CONFIG_PERF_EVENTS=y kernel support configured?",
2749 			 err, str_error_r(err, sbuf, sizeof(sbuf)),
2750 			 perf_evsel__name(evsel));
2751 }
2752 
2753 char *perf_evsel__env_arch(struct perf_evsel *evsel)
2754 {
2755 	if (evsel && evsel->evlist && evsel->evlist->env)
2756 		return evsel->evlist->env->arch;
2757 	return NULL;
2758 }
2759 
2760 char *perf_evsel__env_cpuid(struct perf_evsel *evsel)
2761 {
2762 	if (evsel && evsel->evlist && evsel->evlist->env)
2763 		return evsel->evlist->env->cpuid;
2764 	return NULL;
2765 }
2766