xref: /linux/tools/perf/util/evsel.c (revision cf2f33a4e54096f90652cca3511fd6a456ea5abe)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <byteswap.h>
11 #include <linux/bitops.h>
12 #include <api/fs/debugfs.h>
13 #include <traceevent/event-parse.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/perf_event.h>
16 #include <sys/resource.h>
17 #include "asm/bug.h"
18 #include "callchain.h"
19 #include "cgroup.h"
20 #include "evsel.h"
21 #include "evlist.h"
22 #include "util.h"
23 #include "cpumap.h"
24 #include "thread_map.h"
25 #include "target.h"
26 #include "perf_regs.h"
27 #include "debug.h"
28 #include "trace-event.h"
29 #include "stat.h"
30 
31 static struct {
32 	bool sample_id_all;
33 	bool exclude_guest;
34 	bool mmap2;
35 	bool cloexec;
36 	bool clockid;
37 	bool clockid_wrong;
38 } perf_missing_features;
39 
40 static clockid_t clockid;
41 
42 static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
43 {
44 	return 0;
45 }
46 
47 static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
48 {
49 }
50 
51 static struct {
52 	size_t	size;
53 	int	(*init)(struct perf_evsel *evsel);
54 	void	(*fini)(struct perf_evsel *evsel);
55 } perf_evsel__object = {
56 	.size = sizeof(struct perf_evsel),
57 	.init = perf_evsel__no_extra_init,
58 	.fini = perf_evsel__no_extra_fini,
59 };
60 
61 int perf_evsel__object_config(size_t object_size,
62 			      int (*init)(struct perf_evsel *evsel),
63 			      void (*fini)(struct perf_evsel *evsel))
64 {
65 
66 	if (object_size == 0)
67 		goto set_methods;
68 
69 	if (perf_evsel__object.size > object_size)
70 		return -EINVAL;
71 
72 	perf_evsel__object.size = object_size;
73 
74 set_methods:
75 	if (init != NULL)
76 		perf_evsel__object.init = init;
77 
78 	if (fini != NULL)
79 		perf_evsel__object.fini = fini;
80 
81 	return 0;
82 }
83 
84 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
85 
86 int __perf_evsel__sample_size(u64 sample_type)
87 {
88 	u64 mask = sample_type & PERF_SAMPLE_MASK;
89 	int size = 0;
90 	int i;
91 
92 	for (i = 0; i < 64; i++) {
93 		if (mask & (1ULL << i))
94 			size++;
95 	}
96 
97 	size *= sizeof(u64);
98 
99 	return size;
100 }
101 
102 /**
103  * __perf_evsel__calc_id_pos - calculate id_pos.
104  * @sample_type: sample type
105  *
106  * This function returns the position of the event id (PERF_SAMPLE_ID or
107  * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
108  * sample_event.
109  */
110 static int __perf_evsel__calc_id_pos(u64 sample_type)
111 {
112 	int idx = 0;
113 
114 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
115 		return 0;
116 
117 	if (!(sample_type & PERF_SAMPLE_ID))
118 		return -1;
119 
120 	if (sample_type & PERF_SAMPLE_IP)
121 		idx += 1;
122 
123 	if (sample_type & PERF_SAMPLE_TID)
124 		idx += 1;
125 
126 	if (sample_type & PERF_SAMPLE_TIME)
127 		idx += 1;
128 
129 	if (sample_type & PERF_SAMPLE_ADDR)
130 		idx += 1;
131 
132 	return idx;
133 }
134 
135 /**
136  * __perf_evsel__calc_is_pos - calculate is_pos.
137  * @sample_type: sample type
138  *
139  * This function returns the position (counting backwards) of the event id
140  * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
141  * sample_id_all is used there is an id sample appended to non-sample events.
142  */
143 static int __perf_evsel__calc_is_pos(u64 sample_type)
144 {
145 	int idx = 1;
146 
147 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
148 		return 1;
149 
150 	if (!(sample_type & PERF_SAMPLE_ID))
151 		return -1;
152 
153 	if (sample_type & PERF_SAMPLE_CPU)
154 		idx += 1;
155 
156 	if (sample_type & PERF_SAMPLE_STREAM_ID)
157 		idx += 1;
158 
159 	return idx;
160 }
161 
162 void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
163 {
164 	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
165 	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
166 }
167 
168 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
169 				  enum perf_event_sample_format bit)
170 {
171 	if (!(evsel->attr.sample_type & bit)) {
172 		evsel->attr.sample_type |= bit;
173 		evsel->sample_size += sizeof(u64);
174 		perf_evsel__calc_id_pos(evsel);
175 	}
176 }
177 
178 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
179 				    enum perf_event_sample_format bit)
180 {
181 	if (evsel->attr.sample_type & bit) {
182 		evsel->attr.sample_type &= ~bit;
183 		evsel->sample_size -= sizeof(u64);
184 		perf_evsel__calc_id_pos(evsel);
185 	}
186 }
187 
188 void perf_evsel__set_sample_id(struct perf_evsel *evsel,
189 			       bool can_sample_identifier)
190 {
191 	if (can_sample_identifier) {
192 		perf_evsel__reset_sample_bit(evsel, ID);
193 		perf_evsel__set_sample_bit(evsel, IDENTIFIER);
194 	} else {
195 		perf_evsel__set_sample_bit(evsel, ID);
196 	}
197 	evsel->attr.read_format |= PERF_FORMAT_ID;
198 }
199 
200 void perf_evsel__init(struct perf_evsel *evsel,
201 		      struct perf_event_attr *attr, int idx)
202 {
203 	evsel->idx	   = idx;
204 	evsel->tracking	   = !idx;
205 	evsel->attr	   = *attr;
206 	evsel->leader	   = evsel;
207 	evsel->unit	   = "";
208 	evsel->scale	   = 1.0;
209 	evsel->evlist	   = NULL;
210 	INIT_LIST_HEAD(&evsel->node);
211 	INIT_LIST_HEAD(&evsel->config_terms);
212 	perf_evsel__object.init(evsel);
213 	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
214 	perf_evsel__calc_id_pos(evsel);
215 	evsel->cmdline_group_boundary = false;
216 }
217 
218 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
219 {
220 	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
221 
222 	if (evsel != NULL)
223 		perf_evsel__init(evsel, attr, idx);
224 
225 	return evsel;
226 }
227 
228 struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
229 {
230 	struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
231 
232 	if (evsel != NULL) {
233 		struct perf_event_attr attr = {
234 			.type	       = PERF_TYPE_TRACEPOINT,
235 			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
236 					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
237 		};
238 
239 		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
240 			goto out_free;
241 
242 		evsel->tp_format = trace_event__tp_format(sys, name);
243 		if (evsel->tp_format == NULL)
244 			goto out_free;
245 
246 		event_attr_init(&attr);
247 		attr.config = evsel->tp_format->id;
248 		attr.sample_period = 1;
249 		perf_evsel__init(evsel, &attr, idx);
250 	}
251 
252 	return evsel;
253 
254 out_free:
255 	zfree(&evsel->name);
256 	free(evsel);
257 	return NULL;
258 }
259 
260 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
261 	"cycles",
262 	"instructions",
263 	"cache-references",
264 	"cache-misses",
265 	"branches",
266 	"branch-misses",
267 	"bus-cycles",
268 	"stalled-cycles-frontend",
269 	"stalled-cycles-backend",
270 	"ref-cycles",
271 };
272 
273 static const char *__perf_evsel__hw_name(u64 config)
274 {
275 	if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
276 		return perf_evsel__hw_names[config];
277 
278 	return "unknown-hardware";
279 }
280 
281 static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
282 {
283 	int colon = 0, r = 0;
284 	struct perf_event_attr *attr = &evsel->attr;
285 	bool exclude_guest_default = false;
286 
287 #define MOD_PRINT(context, mod)	do {					\
288 		if (!attr->exclude_##context) {				\
289 			if (!colon) colon = ++r;			\
290 			r += scnprintf(bf + r, size - r, "%c", mod);	\
291 		} } while(0)
292 
293 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
294 		MOD_PRINT(kernel, 'k');
295 		MOD_PRINT(user, 'u');
296 		MOD_PRINT(hv, 'h');
297 		exclude_guest_default = true;
298 	}
299 
300 	if (attr->precise_ip) {
301 		if (!colon)
302 			colon = ++r;
303 		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
304 		exclude_guest_default = true;
305 	}
306 
307 	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
308 		MOD_PRINT(host, 'H');
309 		MOD_PRINT(guest, 'G');
310 	}
311 #undef MOD_PRINT
312 	if (colon)
313 		bf[colon - 1] = ':';
314 	return r;
315 }
316 
317 static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
318 {
319 	int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
320 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
321 }
322 
323 const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
324 	"cpu-clock",
325 	"task-clock",
326 	"page-faults",
327 	"context-switches",
328 	"cpu-migrations",
329 	"minor-faults",
330 	"major-faults",
331 	"alignment-faults",
332 	"emulation-faults",
333 	"dummy",
334 };
335 
336 static const char *__perf_evsel__sw_name(u64 config)
337 {
338 	if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
339 		return perf_evsel__sw_names[config];
340 	return "unknown-software";
341 }
342 
343 static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
344 {
345 	int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
346 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
347 }
348 
349 static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
350 {
351 	int r;
352 
353 	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
354 
355 	if (type & HW_BREAKPOINT_R)
356 		r += scnprintf(bf + r, size - r, "r");
357 
358 	if (type & HW_BREAKPOINT_W)
359 		r += scnprintf(bf + r, size - r, "w");
360 
361 	if (type & HW_BREAKPOINT_X)
362 		r += scnprintf(bf + r, size - r, "x");
363 
364 	return r;
365 }
366 
367 static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
368 {
369 	struct perf_event_attr *attr = &evsel->attr;
370 	int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
371 	return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
372 }
373 
374 const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
375 				[PERF_EVSEL__MAX_ALIASES] = {
376  { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
377  { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
378  { "LLC",	"L2",							},
379  { "dTLB",	"d-tlb",	"Data-TLB",				},
380  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
381  { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
382  { "node",								},
383 };
384 
385 const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
386 				   [PERF_EVSEL__MAX_ALIASES] = {
387  { "load",	"loads",	"read",					},
388  { "store",	"stores",	"write",				},
389  { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
390 };
391 
392 const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
393 				       [PERF_EVSEL__MAX_ALIASES] = {
394  { "refs",	"Reference",	"ops",		"access",		},
395  { "misses",	"miss",							},
396 };
397 
398 #define C(x)		PERF_COUNT_HW_CACHE_##x
399 #define CACHE_READ	(1 << C(OP_READ))
400 #define CACHE_WRITE	(1 << C(OP_WRITE))
401 #define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
402 #define COP(x)		(1 << x)
403 
404 /*
405  * cache operartion stat
406  * L1I : Read and prefetch only
407  * ITLB and BPU : Read-only
408  */
409 static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
410  [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
411  [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
412  [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
413  [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
414  [C(ITLB)]	= (CACHE_READ),
415  [C(BPU)]	= (CACHE_READ),
416  [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
417 };
418 
419 bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
420 {
421 	if (perf_evsel__hw_cache_stat[type] & COP(op))
422 		return true;	/* valid */
423 	else
424 		return false;	/* invalid */
425 }
426 
427 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
428 					    char *bf, size_t size)
429 {
430 	if (result) {
431 		return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
432 				 perf_evsel__hw_cache_op[op][0],
433 				 perf_evsel__hw_cache_result[result][0]);
434 	}
435 
436 	return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
437 			 perf_evsel__hw_cache_op[op][1]);
438 }
439 
440 static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
441 {
442 	u8 op, result, type = (config >>  0) & 0xff;
443 	const char *err = "unknown-ext-hardware-cache-type";
444 
445 	if (type > PERF_COUNT_HW_CACHE_MAX)
446 		goto out_err;
447 
448 	op = (config >>  8) & 0xff;
449 	err = "unknown-ext-hardware-cache-op";
450 	if (op > PERF_COUNT_HW_CACHE_OP_MAX)
451 		goto out_err;
452 
453 	result = (config >> 16) & 0xff;
454 	err = "unknown-ext-hardware-cache-result";
455 	if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
456 		goto out_err;
457 
458 	err = "invalid-cache";
459 	if (!perf_evsel__is_cache_op_valid(type, op))
460 		goto out_err;
461 
462 	return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
463 out_err:
464 	return scnprintf(bf, size, "%s", err);
465 }
466 
467 static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
468 {
469 	int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
470 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
471 }
472 
473 static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
474 {
475 	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
476 	return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
477 }
478 
479 const char *perf_evsel__name(struct perf_evsel *evsel)
480 {
481 	char bf[128];
482 
483 	if (evsel->name)
484 		return evsel->name;
485 
486 	switch (evsel->attr.type) {
487 	case PERF_TYPE_RAW:
488 		perf_evsel__raw_name(evsel, bf, sizeof(bf));
489 		break;
490 
491 	case PERF_TYPE_HARDWARE:
492 		perf_evsel__hw_name(evsel, bf, sizeof(bf));
493 		break;
494 
495 	case PERF_TYPE_HW_CACHE:
496 		perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
497 		break;
498 
499 	case PERF_TYPE_SOFTWARE:
500 		perf_evsel__sw_name(evsel, bf, sizeof(bf));
501 		break;
502 
503 	case PERF_TYPE_TRACEPOINT:
504 		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
505 		break;
506 
507 	case PERF_TYPE_BREAKPOINT:
508 		perf_evsel__bp_name(evsel, bf, sizeof(bf));
509 		break;
510 
511 	default:
512 		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
513 			  evsel->attr.type);
514 		break;
515 	}
516 
517 	evsel->name = strdup(bf);
518 
519 	return evsel->name ?: "unknown";
520 }
521 
522 const char *perf_evsel__group_name(struct perf_evsel *evsel)
523 {
524 	return evsel->group_name ?: "anon group";
525 }
526 
527 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
528 {
529 	int ret;
530 	struct perf_evsel *pos;
531 	const char *group_name = perf_evsel__group_name(evsel);
532 
533 	ret = scnprintf(buf, size, "%s", group_name);
534 
535 	ret += scnprintf(buf + ret, size - ret, " { %s",
536 			 perf_evsel__name(evsel));
537 
538 	for_each_group_member(pos, evsel)
539 		ret += scnprintf(buf + ret, size - ret, ", %s",
540 				 perf_evsel__name(pos));
541 
542 	ret += scnprintf(buf + ret, size - ret, " }");
543 
544 	return ret;
545 }
546 
547 static void
548 perf_evsel__config_callgraph(struct perf_evsel *evsel,
549 			     struct record_opts *opts,
550 			     struct callchain_param *param)
551 {
552 	bool function = perf_evsel__is_function_event(evsel);
553 	struct perf_event_attr *attr = &evsel->attr;
554 
555 	perf_evsel__set_sample_bit(evsel, CALLCHAIN);
556 
557 	if (param->record_mode == CALLCHAIN_LBR) {
558 		if (!opts->branch_stack) {
559 			if (attr->exclude_user) {
560 				pr_warning("LBR callstack option is only available "
561 					   "to get user callchain information. "
562 					   "Falling back to framepointers.\n");
563 			} else {
564 				perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
565 				attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
566 							PERF_SAMPLE_BRANCH_CALL_STACK;
567 			}
568 		} else
569 			 pr_warning("Cannot use LBR callstack with branch stack. "
570 				    "Falling back to framepointers.\n");
571 	}
572 
573 	if (param->record_mode == CALLCHAIN_DWARF) {
574 		if (!function) {
575 			perf_evsel__set_sample_bit(evsel, REGS_USER);
576 			perf_evsel__set_sample_bit(evsel, STACK_USER);
577 			attr->sample_regs_user = PERF_REGS_MASK;
578 			attr->sample_stack_user = param->dump_size;
579 			attr->exclude_callchain_user = 1;
580 		} else {
581 			pr_info("Cannot use DWARF unwind for function trace event,"
582 				" falling back to framepointers.\n");
583 		}
584 	}
585 
586 	if (function) {
587 		pr_info("Disabling user space callchains for function trace event.\n");
588 		attr->exclude_callchain_user = 1;
589 	}
590 }
591 
592 static void
593 perf_evsel__reset_callgraph(struct perf_evsel *evsel,
594 			    struct callchain_param *param)
595 {
596 	struct perf_event_attr *attr = &evsel->attr;
597 
598 	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
599 	if (param->record_mode == CALLCHAIN_LBR) {
600 		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
601 		attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
602 					      PERF_SAMPLE_BRANCH_CALL_STACK);
603 	}
604 	if (param->record_mode == CALLCHAIN_DWARF) {
605 		perf_evsel__reset_sample_bit(evsel, REGS_USER);
606 		perf_evsel__reset_sample_bit(evsel, STACK_USER);
607 	}
608 }
609 
610 static void apply_config_terms(struct perf_evsel *evsel,
611 			       struct record_opts *opts)
612 {
613 	struct perf_evsel_config_term *term;
614 	struct list_head *config_terms = &evsel->config_terms;
615 	struct perf_event_attr *attr = &evsel->attr;
616 	struct callchain_param param;
617 	u32 dump_size = 0;
618 	char *callgraph_buf = NULL;
619 
620 	/* callgraph default */
621 	param.record_mode = callchain_param.record_mode;
622 
623 	list_for_each_entry(term, config_terms, list) {
624 		switch (term->type) {
625 		case PERF_EVSEL__CONFIG_TERM_PERIOD:
626 			attr->sample_period = term->val.period;
627 			attr->freq = 0;
628 			break;
629 		case PERF_EVSEL__CONFIG_TERM_FREQ:
630 			attr->sample_freq = term->val.freq;
631 			attr->freq = 1;
632 			break;
633 		case PERF_EVSEL__CONFIG_TERM_TIME:
634 			if (term->val.time)
635 				perf_evsel__set_sample_bit(evsel, TIME);
636 			else
637 				perf_evsel__reset_sample_bit(evsel, TIME);
638 			break;
639 		case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
640 			callgraph_buf = term->val.callgraph;
641 			break;
642 		case PERF_EVSEL__CONFIG_TERM_STACK_USER:
643 			dump_size = term->val.stack_user;
644 			break;
645 		default:
646 			break;
647 		}
648 	}
649 
650 	/* User explicitly set per-event callgraph, clear the old setting and reset. */
651 	if ((callgraph_buf != NULL) || (dump_size > 0)) {
652 
653 		/* parse callgraph parameters */
654 		if (callgraph_buf != NULL) {
655 			if (!strcmp(callgraph_buf, "no")) {
656 				param.enabled = false;
657 				param.record_mode = CALLCHAIN_NONE;
658 			} else {
659 				param.enabled = true;
660 				if (parse_callchain_record(callgraph_buf, &param)) {
661 					pr_err("per-event callgraph setting for %s failed. "
662 					       "Apply callgraph global setting for it\n",
663 					       evsel->name);
664 					return;
665 				}
666 			}
667 		}
668 		if (dump_size > 0) {
669 			dump_size = round_up(dump_size, sizeof(u64));
670 			param.dump_size = dump_size;
671 		}
672 
673 		/* If global callgraph set, clear it */
674 		if (callchain_param.enabled)
675 			perf_evsel__reset_callgraph(evsel, &callchain_param);
676 
677 		/* set perf-event callgraph */
678 		if (param.enabled)
679 			perf_evsel__config_callgraph(evsel, opts, &param);
680 	}
681 }
682 
683 /*
684  * The enable_on_exec/disabled value strategy:
685  *
686  *  1) For any type of traced program:
687  *    - all independent events and group leaders are disabled
688  *    - all group members are enabled
689  *
690  *     Group members are ruled by group leaders. They need to
691  *     be enabled, because the group scheduling relies on that.
692  *
693  *  2) For traced programs executed by perf:
694  *     - all independent events and group leaders have
695  *       enable_on_exec set
696  *     - we don't specifically enable or disable any event during
697  *       the record command
698  *
699  *     Independent events and group leaders are initially disabled
700  *     and get enabled by exec. Group members are ruled by group
701  *     leaders as stated in 1).
702  *
703  *  3) For traced programs attached by perf (pid/tid):
704  *     - we specifically enable or disable all events during
705  *       the record command
706  *
707  *     When attaching events to already running traced we
708  *     enable/disable events specifically, as there's no
709  *     initial traced exec call.
710  */
711 void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
712 {
713 	struct perf_evsel *leader = evsel->leader;
714 	struct perf_event_attr *attr = &evsel->attr;
715 	int track = evsel->tracking;
716 	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
717 
718 	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
719 	attr->inherit	    = !opts->no_inherit;
720 
721 	perf_evsel__set_sample_bit(evsel, IP);
722 	perf_evsel__set_sample_bit(evsel, TID);
723 
724 	if (evsel->sample_read) {
725 		perf_evsel__set_sample_bit(evsel, READ);
726 
727 		/*
728 		 * We need ID even in case of single event, because
729 		 * PERF_SAMPLE_READ process ID specific data.
730 		 */
731 		perf_evsel__set_sample_id(evsel, false);
732 
733 		/*
734 		 * Apply group format only if we belong to group
735 		 * with more than one members.
736 		 */
737 		if (leader->nr_members > 1) {
738 			attr->read_format |= PERF_FORMAT_GROUP;
739 			attr->inherit = 0;
740 		}
741 	}
742 
743 	/*
744 	 * We default some events to have a default interval. But keep
745 	 * it a weak assumption overridable by the user.
746 	 */
747 	if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
748 				     opts->user_interval != ULLONG_MAX)) {
749 		if (opts->freq) {
750 			perf_evsel__set_sample_bit(evsel, PERIOD);
751 			attr->freq		= 1;
752 			attr->sample_freq	= opts->freq;
753 		} else {
754 			attr->sample_period = opts->default_interval;
755 		}
756 	}
757 
758 	/*
759 	 * Disable sampling for all group members other
760 	 * than leader in case leader 'leads' the sampling.
761 	 */
762 	if ((leader != evsel) && leader->sample_read) {
763 		attr->sample_freq   = 0;
764 		attr->sample_period = 0;
765 	}
766 
767 	if (opts->no_samples)
768 		attr->sample_freq = 0;
769 
770 	if (opts->inherit_stat)
771 		attr->inherit_stat = 1;
772 
773 	if (opts->sample_address) {
774 		perf_evsel__set_sample_bit(evsel, ADDR);
775 		attr->mmap_data = track;
776 	}
777 
778 	/*
779 	 * We don't allow user space callchains for  function trace
780 	 * event, due to issues with page faults while tracing page
781 	 * fault handler and its overall trickiness nature.
782 	 */
783 	if (perf_evsel__is_function_event(evsel))
784 		evsel->attr.exclude_callchain_user = 1;
785 
786 	if (callchain_param.enabled && !evsel->no_aux_samples)
787 		perf_evsel__config_callgraph(evsel, opts, &callchain_param);
788 
789 	if (opts->sample_intr_regs) {
790 		attr->sample_regs_intr = opts->sample_intr_regs;
791 		perf_evsel__set_sample_bit(evsel, REGS_INTR);
792 	}
793 
794 	if (target__has_cpu(&opts->target))
795 		perf_evsel__set_sample_bit(evsel, CPU);
796 
797 	if (opts->period)
798 		perf_evsel__set_sample_bit(evsel, PERIOD);
799 
800 	/*
801 	 * When the user explicitely disabled time don't force it here.
802 	 */
803 	if (opts->sample_time &&
804 	    (!perf_missing_features.sample_id_all &&
805 	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
806 	     opts->sample_time_set)))
807 		perf_evsel__set_sample_bit(evsel, TIME);
808 
809 	if (opts->raw_samples && !evsel->no_aux_samples) {
810 		perf_evsel__set_sample_bit(evsel, TIME);
811 		perf_evsel__set_sample_bit(evsel, RAW);
812 		perf_evsel__set_sample_bit(evsel, CPU);
813 	}
814 
815 	if (opts->sample_address)
816 		perf_evsel__set_sample_bit(evsel, DATA_SRC);
817 
818 	if (opts->no_buffering) {
819 		attr->watermark = 0;
820 		attr->wakeup_events = 1;
821 	}
822 	if (opts->branch_stack && !evsel->no_aux_samples) {
823 		perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
824 		attr->branch_sample_type = opts->branch_stack;
825 	}
826 
827 	if (opts->sample_weight)
828 		perf_evsel__set_sample_bit(evsel, WEIGHT);
829 
830 	attr->task  = track;
831 	attr->mmap  = track;
832 	attr->mmap2 = track && !perf_missing_features.mmap2;
833 	attr->comm  = track;
834 
835 	if (opts->record_switch_events)
836 		attr->context_switch = track;
837 
838 	if (opts->sample_transaction)
839 		perf_evsel__set_sample_bit(evsel, TRANSACTION);
840 
841 	if (opts->running_time) {
842 		evsel->attr.read_format |=
843 			PERF_FORMAT_TOTAL_TIME_ENABLED |
844 			PERF_FORMAT_TOTAL_TIME_RUNNING;
845 	}
846 
847 	/*
848 	 * XXX see the function comment above
849 	 *
850 	 * Disabling only independent events or group leaders,
851 	 * keeping group members enabled.
852 	 */
853 	if (perf_evsel__is_group_leader(evsel))
854 		attr->disabled = 1;
855 
856 	/*
857 	 * Setting enable_on_exec for independent events and
858 	 * group leaders for traced executed by perf.
859 	 */
860 	if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
861 		!opts->initial_delay)
862 		attr->enable_on_exec = 1;
863 
864 	if (evsel->immediate) {
865 		attr->disabled = 0;
866 		attr->enable_on_exec = 0;
867 	}
868 
869 	clockid = opts->clockid;
870 	if (opts->use_clockid) {
871 		attr->use_clockid = 1;
872 		attr->clockid = opts->clockid;
873 	}
874 
875 	/*
876 	 * Apply event specific term settings,
877 	 * it overloads any global configuration.
878 	 */
879 	apply_config_terms(evsel, opts);
880 }
881 
882 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
883 {
884 	int cpu, thread;
885 
886 	if (evsel->system_wide)
887 		nthreads = 1;
888 
889 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
890 
891 	if (evsel->fd) {
892 		for (cpu = 0; cpu < ncpus; cpu++) {
893 			for (thread = 0; thread < nthreads; thread++) {
894 				FD(evsel, cpu, thread) = -1;
895 			}
896 		}
897 	}
898 
899 	return evsel->fd != NULL ? 0 : -ENOMEM;
900 }
901 
902 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
903 			  int ioc,  void *arg)
904 {
905 	int cpu, thread;
906 
907 	if (evsel->system_wide)
908 		nthreads = 1;
909 
910 	for (cpu = 0; cpu < ncpus; cpu++) {
911 		for (thread = 0; thread < nthreads; thread++) {
912 			int fd = FD(evsel, cpu, thread),
913 			    err = ioctl(fd, ioc, arg);
914 
915 			if (err)
916 				return err;
917 		}
918 	}
919 
920 	return 0;
921 }
922 
923 int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
924 			     const char *filter)
925 {
926 	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
927 				     PERF_EVENT_IOC_SET_FILTER,
928 				     (void *)filter);
929 }
930 
931 int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
932 {
933 	char *new_filter = strdup(filter);
934 
935 	if (new_filter != NULL) {
936 		free(evsel->filter);
937 		evsel->filter = new_filter;
938 		return 0;
939 	}
940 
941 	return -1;
942 }
943 
944 int perf_evsel__append_filter(struct perf_evsel *evsel,
945 			      const char *op, const char *filter)
946 {
947 	char *new_filter;
948 
949 	if (evsel->filter == NULL)
950 		return perf_evsel__set_filter(evsel, filter);
951 
952 	if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) {
953 		free(evsel->filter);
954 		evsel->filter = new_filter;
955 		return 0;
956 	}
957 
958 	return -1;
959 }
960 
961 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
962 {
963 	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
964 				     PERF_EVENT_IOC_ENABLE,
965 				     0);
966 }
967 
968 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
969 {
970 	if (ncpus == 0 || nthreads == 0)
971 		return 0;
972 
973 	if (evsel->system_wide)
974 		nthreads = 1;
975 
976 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
977 	if (evsel->sample_id == NULL)
978 		return -ENOMEM;
979 
980 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
981 	if (evsel->id == NULL) {
982 		xyarray__delete(evsel->sample_id);
983 		evsel->sample_id = NULL;
984 		return -ENOMEM;
985 	}
986 
987 	return 0;
988 }
989 
990 static void perf_evsel__free_fd(struct perf_evsel *evsel)
991 {
992 	xyarray__delete(evsel->fd);
993 	evsel->fd = NULL;
994 }
995 
996 static void perf_evsel__free_id(struct perf_evsel *evsel)
997 {
998 	xyarray__delete(evsel->sample_id);
999 	evsel->sample_id = NULL;
1000 	zfree(&evsel->id);
1001 }
1002 
1003 static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
1004 {
1005 	struct perf_evsel_config_term *term, *h;
1006 
1007 	list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
1008 		list_del(&term->list);
1009 		free(term);
1010 	}
1011 }
1012 
1013 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
1014 {
1015 	int cpu, thread;
1016 
1017 	if (evsel->system_wide)
1018 		nthreads = 1;
1019 
1020 	for (cpu = 0; cpu < ncpus; cpu++)
1021 		for (thread = 0; thread < nthreads; ++thread) {
1022 			close(FD(evsel, cpu, thread));
1023 			FD(evsel, cpu, thread) = -1;
1024 		}
1025 }
1026 
1027 void perf_evsel__exit(struct perf_evsel *evsel)
1028 {
1029 	assert(list_empty(&evsel->node));
1030 	assert(evsel->evlist == NULL);
1031 	perf_evsel__free_fd(evsel);
1032 	perf_evsel__free_id(evsel);
1033 	perf_evsel__free_config_terms(evsel);
1034 	close_cgroup(evsel->cgrp);
1035 	cpu_map__put(evsel->cpus);
1036 	thread_map__put(evsel->threads);
1037 	zfree(&evsel->group_name);
1038 	zfree(&evsel->name);
1039 	perf_evsel__object.fini(evsel);
1040 }
1041 
1042 void perf_evsel__delete(struct perf_evsel *evsel)
1043 {
1044 	perf_evsel__exit(evsel);
1045 	free(evsel);
1046 }
1047 
1048 void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
1049 				struct perf_counts_values *count)
1050 {
1051 	struct perf_counts_values tmp;
1052 
1053 	if (!evsel->prev_raw_counts)
1054 		return;
1055 
1056 	if (cpu == -1) {
1057 		tmp = evsel->prev_raw_counts->aggr;
1058 		evsel->prev_raw_counts->aggr = *count;
1059 	} else {
1060 		tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
1061 		*perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
1062 	}
1063 
1064 	count->val = count->val - tmp.val;
1065 	count->ena = count->ena - tmp.ena;
1066 	count->run = count->run - tmp.run;
1067 }
1068 
1069 void perf_counts_values__scale(struct perf_counts_values *count,
1070 			       bool scale, s8 *pscaled)
1071 {
1072 	s8 scaled = 0;
1073 
1074 	if (scale) {
1075 		if (count->run == 0) {
1076 			scaled = -1;
1077 			count->val = 0;
1078 		} else if (count->run < count->ena) {
1079 			scaled = 1;
1080 			count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
1081 		}
1082 	} else
1083 		count->ena = count->run = 0;
1084 
1085 	if (pscaled)
1086 		*pscaled = scaled;
1087 }
1088 
1089 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
1090 		     struct perf_counts_values *count)
1091 {
1092 	memset(count, 0, sizeof(*count));
1093 
1094 	if (FD(evsel, cpu, thread) < 0)
1095 		return -EINVAL;
1096 
1097 	if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
1098 		return -errno;
1099 
1100 	return 0;
1101 }
1102 
1103 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
1104 			      int cpu, int thread, bool scale)
1105 {
1106 	struct perf_counts_values count;
1107 	size_t nv = scale ? 3 : 1;
1108 
1109 	if (FD(evsel, cpu, thread) < 0)
1110 		return -EINVAL;
1111 
1112 	if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
1113 		return -ENOMEM;
1114 
1115 	if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
1116 		return -errno;
1117 
1118 	perf_evsel__compute_deltas(evsel, cpu, thread, &count);
1119 	perf_counts_values__scale(&count, scale, NULL);
1120 	*perf_counts(evsel->counts, cpu, thread) = count;
1121 	return 0;
1122 }
1123 
1124 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
1125 {
1126 	struct perf_evsel *leader = evsel->leader;
1127 	int fd;
1128 
1129 	if (perf_evsel__is_group_leader(evsel))
1130 		return -1;
1131 
1132 	/*
1133 	 * Leader must be already processed/open,
1134 	 * if not it's a bug.
1135 	 */
1136 	BUG_ON(!leader->fd);
1137 
1138 	fd = FD(leader, cpu, thread);
1139 	BUG_ON(fd == -1);
1140 
1141 	return fd;
1142 }
1143 
1144 struct bit_names {
1145 	int bit;
1146 	const char *name;
1147 };
1148 
1149 static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
1150 {
1151 	bool first_bit = true;
1152 	int i = 0;
1153 
1154 	do {
1155 		if (value & bits[i].bit) {
1156 			buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
1157 			first_bit = false;
1158 		}
1159 	} while (bits[++i].name != NULL);
1160 }
1161 
1162 static void __p_sample_type(char *buf, size_t size, u64 value)
1163 {
1164 #define bit_name(n) { PERF_SAMPLE_##n, #n }
1165 	struct bit_names bits[] = {
1166 		bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1167 		bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1168 		bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1169 		bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1170 		bit_name(IDENTIFIER), bit_name(REGS_INTR),
1171 		{ .name = NULL, }
1172 	};
1173 #undef bit_name
1174 	__p_bits(buf, size, value, bits);
1175 }
1176 
1177 static void __p_read_format(char *buf, size_t size, u64 value)
1178 {
1179 #define bit_name(n) { PERF_FORMAT_##n, #n }
1180 	struct bit_names bits[] = {
1181 		bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1182 		bit_name(ID), bit_name(GROUP),
1183 		{ .name = NULL, }
1184 	};
1185 #undef bit_name
1186 	__p_bits(buf, size, value, bits);
1187 }
1188 
1189 #define BUF_SIZE		1024
1190 
1191 #define p_hex(val)		snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
1192 #define p_unsigned(val)		snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
1193 #define p_signed(val)		snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
1194 #define p_sample_type(val)	__p_sample_type(buf, BUF_SIZE, val)
1195 #define p_read_format(val)	__p_read_format(buf, BUF_SIZE, val)
1196 
1197 #define PRINT_ATTRn(_n, _f, _p)				\
1198 do {							\
1199 	if (attr->_f) {					\
1200 		_p(attr->_f);				\
1201 		ret += attr__fprintf(fp, _n, buf, priv);\
1202 	}						\
1203 } while (0)
1204 
1205 #define PRINT_ATTRf(_f, _p)	PRINT_ATTRn(#_f, _f, _p)
1206 
1207 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
1208 			     attr__fprintf_f attr__fprintf, void *priv)
1209 {
1210 	char buf[BUF_SIZE];
1211 	int ret = 0;
1212 
1213 	PRINT_ATTRf(type, p_unsigned);
1214 	PRINT_ATTRf(size, p_unsigned);
1215 	PRINT_ATTRf(config, p_hex);
1216 	PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
1217 	PRINT_ATTRf(sample_type, p_sample_type);
1218 	PRINT_ATTRf(read_format, p_read_format);
1219 
1220 	PRINT_ATTRf(disabled, p_unsigned);
1221 	PRINT_ATTRf(inherit, p_unsigned);
1222 	PRINT_ATTRf(pinned, p_unsigned);
1223 	PRINT_ATTRf(exclusive, p_unsigned);
1224 	PRINT_ATTRf(exclude_user, p_unsigned);
1225 	PRINT_ATTRf(exclude_kernel, p_unsigned);
1226 	PRINT_ATTRf(exclude_hv, p_unsigned);
1227 	PRINT_ATTRf(exclude_idle, p_unsigned);
1228 	PRINT_ATTRf(mmap, p_unsigned);
1229 	PRINT_ATTRf(comm, p_unsigned);
1230 	PRINT_ATTRf(freq, p_unsigned);
1231 	PRINT_ATTRf(inherit_stat, p_unsigned);
1232 	PRINT_ATTRf(enable_on_exec, p_unsigned);
1233 	PRINT_ATTRf(task, p_unsigned);
1234 	PRINT_ATTRf(watermark, p_unsigned);
1235 	PRINT_ATTRf(precise_ip, p_unsigned);
1236 	PRINT_ATTRf(mmap_data, p_unsigned);
1237 	PRINT_ATTRf(sample_id_all, p_unsigned);
1238 	PRINT_ATTRf(exclude_host, p_unsigned);
1239 	PRINT_ATTRf(exclude_guest, p_unsigned);
1240 	PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
1241 	PRINT_ATTRf(exclude_callchain_user, p_unsigned);
1242 	PRINT_ATTRf(mmap2, p_unsigned);
1243 	PRINT_ATTRf(comm_exec, p_unsigned);
1244 	PRINT_ATTRf(use_clockid, p_unsigned);
1245 	PRINT_ATTRf(context_switch, p_unsigned);
1246 
1247 	PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
1248 	PRINT_ATTRf(bp_type, p_unsigned);
1249 	PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
1250 	PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
1251 	PRINT_ATTRf(sample_regs_user, p_hex);
1252 	PRINT_ATTRf(sample_stack_user, p_unsigned);
1253 	PRINT_ATTRf(clockid, p_signed);
1254 	PRINT_ATTRf(sample_regs_intr, p_hex);
1255 	PRINT_ATTRf(aux_watermark, p_unsigned);
1256 
1257 	return ret;
1258 }
1259 
1260 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1261 				void *priv __attribute__((unused)))
1262 {
1263 	return fprintf(fp, "  %-32s %s\n", name, val);
1264 }
1265 
1266 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1267 			      struct thread_map *threads)
1268 {
1269 	int cpu, thread, nthreads;
1270 	unsigned long flags = PERF_FLAG_FD_CLOEXEC;
1271 	int pid = -1, err;
1272 	enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
1273 
1274 	if (evsel->system_wide)
1275 		nthreads = 1;
1276 	else
1277 		nthreads = threads->nr;
1278 
1279 	if (evsel->fd == NULL &&
1280 	    perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
1281 		return -ENOMEM;
1282 
1283 	if (evsel->cgrp) {
1284 		flags |= PERF_FLAG_PID_CGROUP;
1285 		pid = evsel->cgrp->fd;
1286 	}
1287 
1288 fallback_missing_features:
1289 	if (perf_missing_features.clockid_wrong)
1290 		evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
1291 	if (perf_missing_features.clockid) {
1292 		evsel->attr.use_clockid = 0;
1293 		evsel->attr.clockid = 0;
1294 	}
1295 	if (perf_missing_features.cloexec)
1296 		flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1297 	if (perf_missing_features.mmap2)
1298 		evsel->attr.mmap2 = 0;
1299 	if (perf_missing_features.exclude_guest)
1300 		evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1301 retry_sample_id:
1302 	if (perf_missing_features.sample_id_all)
1303 		evsel->attr.sample_id_all = 0;
1304 
1305 	if (verbose >= 2) {
1306 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1307 		fprintf(stderr, "perf_event_attr:\n");
1308 		perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
1309 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1310 	}
1311 
1312 	for (cpu = 0; cpu < cpus->nr; cpu++) {
1313 
1314 		for (thread = 0; thread < nthreads; thread++) {
1315 			int group_fd;
1316 
1317 			if (!evsel->cgrp && !evsel->system_wide)
1318 				pid = thread_map__pid(threads, thread);
1319 
1320 			group_fd = get_group_fd(evsel, cpu, thread);
1321 retry_open:
1322 			pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx\n",
1323 				  pid, cpus->map[cpu], group_fd, flags);
1324 
1325 			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
1326 								     pid,
1327 								     cpus->map[cpu],
1328 								     group_fd, flags);
1329 			if (FD(evsel, cpu, thread) < 0) {
1330 				err = -errno;
1331 				pr_debug2("sys_perf_event_open failed, error %d\n",
1332 					  err);
1333 				goto try_fallback;
1334 			}
1335 			set_rlimit = NO_CHANGE;
1336 
1337 			/*
1338 			 * If we succeeded but had to kill clockid, fail and
1339 			 * have perf_evsel__open_strerror() print us a nice
1340 			 * error.
1341 			 */
1342 			if (perf_missing_features.clockid ||
1343 			    perf_missing_features.clockid_wrong) {
1344 				err = -EINVAL;
1345 				goto out_close;
1346 			}
1347 		}
1348 	}
1349 
1350 	return 0;
1351 
1352 try_fallback:
1353 	/*
1354 	 * perf stat needs between 5 and 22 fds per CPU. When we run out
1355 	 * of them try to increase the limits.
1356 	 */
1357 	if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1358 		struct rlimit l;
1359 		int old_errno = errno;
1360 
1361 		if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1362 			if (set_rlimit == NO_CHANGE)
1363 				l.rlim_cur = l.rlim_max;
1364 			else {
1365 				l.rlim_cur = l.rlim_max + 1000;
1366 				l.rlim_max = l.rlim_cur;
1367 			}
1368 			if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1369 				set_rlimit++;
1370 				errno = old_errno;
1371 				goto retry_open;
1372 			}
1373 		}
1374 		errno = old_errno;
1375 	}
1376 
1377 	if (err != -EINVAL || cpu > 0 || thread > 0)
1378 		goto out_close;
1379 
1380 	/*
1381 	 * Must probe features in the order they were added to the
1382 	 * perf_event_attr interface.
1383 	 */
1384 	if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
1385 		perf_missing_features.clockid_wrong = true;
1386 		goto fallback_missing_features;
1387 	} else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
1388 		perf_missing_features.clockid = true;
1389 		goto fallback_missing_features;
1390 	} else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1391 		perf_missing_features.cloexec = true;
1392 		goto fallback_missing_features;
1393 	} else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
1394 		perf_missing_features.mmap2 = true;
1395 		goto fallback_missing_features;
1396 	} else if (!perf_missing_features.exclude_guest &&
1397 		   (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
1398 		perf_missing_features.exclude_guest = true;
1399 		goto fallback_missing_features;
1400 	} else if (!perf_missing_features.sample_id_all) {
1401 		perf_missing_features.sample_id_all = true;
1402 		goto retry_sample_id;
1403 	}
1404 
1405 out_close:
1406 	do {
1407 		while (--thread >= 0) {
1408 			close(FD(evsel, cpu, thread));
1409 			FD(evsel, cpu, thread) = -1;
1410 		}
1411 		thread = nthreads;
1412 	} while (--cpu >= 0);
1413 	return err;
1414 }
1415 
1416 void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
1417 {
1418 	if (evsel->fd == NULL)
1419 		return;
1420 
1421 	perf_evsel__close_fd(evsel, ncpus, nthreads);
1422 	perf_evsel__free_fd(evsel);
1423 }
1424 
1425 static struct {
1426 	struct cpu_map map;
1427 	int cpus[1];
1428 } empty_cpu_map = {
1429 	.map.nr	= 1,
1430 	.cpus	= { -1, },
1431 };
1432 
1433 static struct {
1434 	struct thread_map map;
1435 	int threads[1];
1436 } empty_thread_map = {
1437 	.map.nr	 = 1,
1438 	.threads = { -1, },
1439 };
1440 
1441 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1442 		     struct thread_map *threads)
1443 {
1444 	if (cpus == NULL) {
1445 		/* Work around old compiler warnings about strict aliasing */
1446 		cpus = &empty_cpu_map.map;
1447 	}
1448 
1449 	if (threads == NULL)
1450 		threads = &empty_thread_map.map;
1451 
1452 	return __perf_evsel__open(evsel, cpus, threads);
1453 }
1454 
1455 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
1456 			     struct cpu_map *cpus)
1457 {
1458 	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
1459 }
1460 
1461 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
1462 				struct thread_map *threads)
1463 {
1464 	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
1465 }
1466 
1467 static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1468 				       const union perf_event *event,
1469 				       struct perf_sample *sample)
1470 {
1471 	u64 type = evsel->attr.sample_type;
1472 	const u64 *array = event->sample.array;
1473 	bool swapped = evsel->needs_swap;
1474 	union u64_swap u;
1475 
1476 	array += ((event->header.size -
1477 		   sizeof(event->header)) / sizeof(u64)) - 1;
1478 
1479 	if (type & PERF_SAMPLE_IDENTIFIER) {
1480 		sample->id = *array;
1481 		array--;
1482 	}
1483 
1484 	if (type & PERF_SAMPLE_CPU) {
1485 		u.val64 = *array;
1486 		if (swapped) {
1487 			/* undo swap of u64, then swap on individual u32s */
1488 			u.val64 = bswap_64(u.val64);
1489 			u.val32[0] = bswap_32(u.val32[0]);
1490 		}
1491 
1492 		sample->cpu = u.val32[0];
1493 		array--;
1494 	}
1495 
1496 	if (type & PERF_SAMPLE_STREAM_ID) {
1497 		sample->stream_id = *array;
1498 		array--;
1499 	}
1500 
1501 	if (type & PERF_SAMPLE_ID) {
1502 		sample->id = *array;
1503 		array--;
1504 	}
1505 
1506 	if (type & PERF_SAMPLE_TIME) {
1507 		sample->time = *array;
1508 		array--;
1509 	}
1510 
1511 	if (type & PERF_SAMPLE_TID) {
1512 		u.val64 = *array;
1513 		if (swapped) {
1514 			/* undo swap of u64, then swap on individual u32s */
1515 			u.val64 = bswap_64(u.val64);
1516 			u.val32[0] = bswap_32(u.val32[0]);
1517 			u.val32[1] = bswap_32(u.val32[1]);
1518 		}
1519 
1520 		sample->pid = u.val32[0];
1521 		sample->tid = u.val32[1];
1522 		array--;
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1529 			    u64 size)
1530 {
1531 	return size > max_size || offset + size > endp;
1532 }
1533 
1534 #define OVERFLOW_CHECK(offset, size, max_size)				\
1535 	do {								\
1536 		if (overflow(endp, (max_size), (offset), (size)))	\
1537 			return -EFAULT;					\
1538 	} while (0)
1539 
1540 #define OVERFLOW_CHECK_u64(offset) \
1541 	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1542 
1543 int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1544 			     struct perf_sample *data)
1545 {
1546 	u64 type = evsel->attr.sample_type;
1547 	bool swapped = evsel->needs_swap;
1548 	const u64 *array;
1549 	u16 max_size = event->header.size;
1550 	const void *endp = (void *)event + max_size;
1551 	u64 sz;
1552 
1553 	/*
1554 	 * used for cross-endian analysis. See git commit 65014ab3
1555 	 * for why this goofiness is needed.
1556 	 */
1557 	union u64_swap u;
1558 
1559 	memset(data, 0, sizeof(*data));
1560 	data->cpu = data->pid = data->tid = -1;
1561 	data->stream_id = data->id = data->time = -1ULL;
1562 	data->period = evsel->attr.sample_period;
1563 	data->weight = 0;
1564 
1565 	if (event->header.type != PERF_RECORD_SAMPLE) {
1566 		if (!evsel->attr.sample_id_all)
1567 			return 0;
1568 		return perf_evsel__parse_id_sample(evsel, event, data);
1569 	}
1570 
1571 	array = event->sample.array;
1572 
1573 	/*
1574 	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1575 	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
1576 	 * check the format does not go past the end of the event.
1577 	 */
1578 	if (evsel->sample_size + sizeof(event->header) > event->header.size)
1579 		return -EFAULT;
1580 
1581 	data->id = -1ULL;
1582 	if (type & PERF_SAMPLE_IDENTIFIER) {
1583 		data->id = *array;
1584 		array++;
1585 	}
1586 
1587 	if (type & PERF_SAMPLE_IP) {
1588 		data->ip = *array;
1589 		array++;
1590 	}
1591 
1592 	if (type & PERF_SAMPLE_TID) {
1593 		u.val64 = *array;
1594 		if (swapped) {
1595 			/* undo swap of u64, then swap on individual u32s */
1596 			u.val64 = bswap_64(u.val64);
1597 			u.val32[0] = bswap_32(u.val32[0]);
1598 			u.val32[1] = bswap_32(u.val32[1]);
1599 		}
1600 
1601 		data->pid = u.val32[0];
1602 		data->tid = u.val32[1];
1603 		array++;
1604 	}
1605 
1606 	if (type & PERF_SAMPLE_TIME) {
1607 		data->time = *array;
1608 		array++;
1609 	}
1610 
1611 	data->addr = 0;
1612 	if (type & PERF_SAMPLE_ADDR) {
1613 		data->addr = *array;
1614 		array++;
1615 	}
1616 
1617 	if (type & PERF_SAMPLE_ID) {
1618 		data->id = *array;
1619 		array++;
1620 	}
1621 
1622 	if (type & PERF_SAMPLE_STREAM_ID) {
1623 		data->stream_id = *array;
1624 		array++;
1625 	}
1626 
1627 	if (type & PERF_SAMPLE_CPU) {
1628 
1629 		u.val64 = *array;
1630 		if (swapped) {
1631 			/* undo swap of u64, then swap on individual u32s */
1632 			u.val64 = bswap_64(u.val64);
1633 			u.val32[0] = bswap_32(u.val32[0]);
1634 		}
1635 
1636 		data->cpu = u.val32[0];
1637 		array++;
1638 	}
1639 
1640 	if (type & PERF_SAMPLE_PERIOD) {
1641 		data->period = *array;
1642 		array++;
1643 	}
1644 
1645 	if (type & PERF_SAMPLE_READ) {
1646 		u64 read_format = evsel->attr.read_format;
1647 
1648 		OVERFLOW_CHECK_u64(array);
1649 		if (read_format & PERF_FORMAT_GROUP)
1650 			data->read.group.nr = *array;
1651 		else
1652 			data->read.one.value = *array;
1653 
1654 		array++;
1655 
1656 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1657 			OVERFLOW_CHECK_u64(array);
1658 			data->read.time_enabled = *array;
1659 			array++;
1660 		}
1661 
1662 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1663 			OVERFLOW_CHECK_u64(array);
1664 			data->read.time_running = *array;
1665 			array++;
1666 		}
1667 
1668 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1669 		if (read_format & PERF_FORMAT_GROUP) {
1670 			const u64 max_group_nr = UINT64_MAX /
1671 					sizeof(struct sample_read_value);
1672 
1673 			if (data->read.group.nr > max_group_nr)
1674 				return -EFAULT;
1675 			sz = data->read.group.nr *
1676 			     sizeof(struct sample_read_value);
1677 			OVERFLOW_CHECK(array, sz, max_size);
1678 			data->read.group.values =
1679 					(struct sample_read_value *)array;
1680 			array = (void *)array + sz;
1681 		} else {
1682 			OVERFLOW_CHECK_u64(array);
1683 			data->read.one.id = *array;
1684 			array++;
1685 		}
1686 	}
1687 
1688 	if (type & PERF_SAMPLE_CALLCHAIN) {
1689 		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
1690 
1691 		OVERFLOW_CHECK_u64(array);
1692 		data->callchain = (struct ip_callchain *)array++;
1693 		if (data->callchain->nr > max_callchain_nr)
1694 			return -EFAULT;
1695 		sz = data->callchain->nr * sizeof(u64);
1696 		OVERFLOW_CHECK(array, sz, max_size);
1697 		array = (void *)array + sz;
1698 	}
1699 
1700 	if (type & PERF_SAMPLE_RAW) {
1701 		OVERFLOW_CHECK_u64(array);
1702 		u.val64 = *array;
1703 		if (WARN_ONCE(swapped,
1704 			      "Endianness of raw data not corrected!\n")) {
1705 			/* undo swap of u64, then swap on individual u32s */
1706 			u.val64 = bswap_64(u.val64);
1707 			u.val32[0] = bswap_32(u.val32[0]);
1708 			u.val32[1] = bswap_32(u.val32[1]);
1709 		}
1710 		data->raw_size = u.val32[0];
1711 		array = (void *)array + sizeof(u32);
1712 
1713 		OVERFLOW_CHECK(array, data->raw_size, max_size);
1714 		data->raw_data = (void *)array;
1715 		array = (void *)array + data->raw_size;
1716 	}
1717 
1718 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1719 		const u64 max_branch_nr = UINT64_MAX /
1720 					  sizeof(struct branch_entry);
1721 
1722 		OVERFLOW_CHECK_u64(array);
1723 		data->branch_stack = (struct branch_stack *)array++;
1724 
1725 		if (data->branch_stack->nr > max_branch_nr)
1726 			return -EFAULT;
1727 		sz = data->branch_stack->nr * sizeof(struct branch_entry);
1728 		OVERFLOW_CHECK(array, sz, max_size);
1729 		array = (void *)array + sz;
1730 	}
1731 
1732 	if (type & PERF_SAMPLE_REGS_USER) {
1733 		OVERFLOW_CHECK_u64(array);
1734 		data->user_regs.abi = *array;
1735 		array++;
1736 
1737 		if (data->user_regs.abi) {
1738 			u64 mask = evsel->attr.sample_regs_user;
1739 
1740 			sz = hweight_long(mask) * sizeof(u64);
1741 			OVERFLOW_CHECK(array, sz, max_size);
1742 			data->user_regs.mask = mask;
1743 			data->user_regs.regs = (u64 *)array;
1744 			array = (void *)array + sz;
1745 		}
1746 	}
1747 
1748 	if (type & PERF_SAMPLE_STACK_USER) {
1749 		OVERFLOW_CHECK_u64(array);
1750 		sz = *array++;
1751 
1752 		data->user_stack.offset = ((char *)(array - 1)
1753 					  - (char *) event);
1754 
1755 		if (!sz) {
1756 			data->user_stack.size = 0;
1757 		} else {
1758 			OVERFLOW_CHECK(array, sz, max_size);
1759 			data->user_stack.data = (char *)array;
1760 			array = (void *)array + sz;
1761 			OVERFLOW_CHECK_u64(array);
1762 			data->user_stack.size = *array++;
1763 			if (WARN_ONCE(data->user_stack.size > sz,
1764 				      "user stack dump failure\n"))
1765 				return -EFAULT;
1766 		}
1767 	}
1768 
1769 	data->weight = 0;
1770 	if (type & PERF_SAMPLE_WEIGHT) {
1771 		OVERFLOW_CHECK_u64(array);
1772 		data->weight = *array;
1773 		array++;
1774 	}
1775 
1776 	data->data_src = PERF_MEM_DATA_SRC_NONE;
1777 	if (type & PERF_SAMPLE_DATA_SRC) {
1778 		OVERFLOW_CHECK_u64(array);
1779 		data->data_src = *array;
1780 		array++;
1781 	}
1782 
1783 	data->transaction = 0;
1784 	if (type & PERF_SAMPLE_TRANSACTION) {
1785 		OVERFLOW_CHECK_u64(array);
1786 		data->transaction = *array;
1787 		array++;
1788 	}
1789 
1790 	data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
1791 	if (type & PERF_SAMPLE_REGS_INTR) {
1792 		OVERFLOW_CHECK_u64(array);
1793 		data->intr_regs.abi = *array;
1794 		array++;
1795 
1796 		if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
1797 			u64 mask = evsel->attr.sample_regs_intr;
1798 
1799 			sz = hweight_long(mask) * sizeof(u64);
1800 			OVERFLOW_CHECK(array, sz, max_size);
1801 			data->intr_regs.mask = mask;
1802 			data->intr_regs.regs = (u64 *)array;
1803 			array = (void *)array + sz;
1804 		}
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
1811 				     u64 read_format)
1812 {
1813 	size_t sz, result = sizeof(struct sample_event);
1814 
1815 	if (type & PERF_SAMPLE_IDENTIFIER)
1816 		result += sizeof(u64);
1817 
1818 	if (type & PERF_SAMPLE_IP)
1819 		result += sizeof(u64);
1820 
1821 	if (type & PERF_SAMPLE_TID)
1822 		result += sizeof(u64);
1823 
1824 	if (type & PERF_SAMPLE_TIME)
1825 		result += sizeof(u64);
1826 
1827 	if (type & PERF_SAMPLE_ADDR)
1828 		result += sizeof(u64);
1829 
1830 	if (type & PERF_SAMPLE_ID)
1831 		result += sizeof(u64);
1832 
1833 	if (type & PERF_SAMPLE_STREAM_ID)
1834 		result += sizeof(u64);
1835 
1836 	if (type & PERF_SAMPLE_CPU)
1837 		result += sizeof(u64);
1838 
1839 	if (type & PERF_SAMPLE_PERIOD)
1840 		result += sizeof(u64);
1841 
1842 	if (type & PERF_SAMPLE_READ) {
1843 		result += sizeof(u64);
1844 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1845 			result += sizeof(u64);
1846 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1847 			result += sizeof(u64);
1848 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1849 		if (read_format & PERF_FORMAT_GROUP) {
1850 			sz = sample->read.group.nr *
1851 			     sizeof(struct sample_read_value);
1852 			result += sz;
1853 		} else {
1854 			result += sizeof(u64);
1855 		}
1856 	}
1857 
1858 	if (type & PERF_SAMPLE_CALLCHAIN) {
1859 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1860 		result += sz;
1861 	}
1862 
1863 	if (type & PERF_SAMPLE_RAW) {
1864 		result += sizeof(u32);
1865 		result += sample->raw_size;
1866 	}
1867 
1868 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1869 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1870 		sz += sizeof(u64);
1871 		result += sz;
1872 	}
1873 
1874 	if (type & PERF_SAMPLE_REGS_USER) {
1875 		if (sample->user_regs.abi) {
1876 			result += sizeof(u64);
1877 			sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
1878 			result += sz;
1879 		} else {
1880 			result += sizeof(u64);
1881 		}
1882 	}
1883 
1884 	if (type & PERF_SAMPLE_STACK_USER) {
1885 		sz = sample->user_stack.size;
1886 		result += sizeof(u64);
1887 		if (sz) {
1888 			result += sz;
1889 			result += sizeof(u64);
1890 		}
1891 	}
1892 
1893 	if (type & PERF_SAMPLE_WEIGHT)
1894 		result += sizeof(u64);
1895 
1896 	if (type & PERF_SAMPLE_DATA_SRC)
1897 		result += sizeof(u64);
1898 
1899 	if (type & PERF_SAMPLE_TRANSACTION)
1900 		result += sizeof(u64);
1901 
1902 	if (type & PERF_SAMPLE_REGS_INTR) {
1903 		if (sample->intr_regs.abi) {
1904 			result += sizeof(u64);
1905 			sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
1906 			result += sz;
1907 		} else {
1908 			result += sizeof(u64);
1909 		}
1910 	}
1911 
1912 	return result;
1913 }
1914 
1915 int perf_event__synthesize_sample(union perf_event *event, u64 type,
1916 				  u64 read_format,
1917 				  const struct perf_sample *sample,
1918 				  bool swapped)
1919 {
1920 	u64 *array;
1921 	size_t sz;
1922 	/*
1923 	 * used for cross-endian analysis. See git commit 65014ab3
1924 	 * for why this goofiness is needed.
1925 	 */
1926 	union u64_swap u;
1927 
1928 	array = event->sample.array;
1929 
1930 	if (type & PERF_SAMPLE_IDENTIFIER) {
1931 		*array = sample->id;
1932 		array++;
1933 	}
1934 
1935 	if (type & PERF_SAMPLE_IP) {
1936 		*array = sample->ip;
1937 		array++;
1938 	}
1939 
1940 	if (type & PERF_SAMPLE_TID) {
1941 		u.val32[0] = sample->pid;
1942 		u.val32[1] = sample->tid;
1943 		if (swapped) {
1944 			/*
1945 			 * Inverse of what is done in perf_evsel__parse_sample
1946 			 */
1947 			u.val32[0] = bswap_32(u.val32[0]);
1948 			u.val32[1] = bswap_32(u.val32[1]);
1949 			u.val64 = bswap_64(u.val64);
1950 		}
1951 
1952 		*array = u.val64;
1953 		array++;
1954 	}
1955 
1956 	if (type & PERF_SAMPLE_TIME) {
1957 		*array = sample->time;
1958 		array++;
1959 	}
1960 
1961 	if (type & PERF_SAMPLE_ADDR) {
1962 		*array = sample->addr;
1963 		array++;
1964 	}
1965 
1966 	if (type & PERF_SAMPLE_ID) {
1967 		*array = sample->id;
1968 		array++;
1969 	}
1970 
1971 	if (type & PERF_SAMPLE_STREAM_ID) {
1972 		*array = sample->stream_id;
1973 		array++;
1974 	}
1975 
1976 	if (type & PERF_SAMPLE_CPU) {
1977 		u.val32[0] = sample->cpu;
1978 		if (swapped) {
1979 			/*
1980 			 * Inverse of what is done in perf_evsel__parse_sample
1981 			 */
1982 			u.val32[0] = bswap_32(u.val32[0]);
1983 			u.val64 = bswap_64(u.val64);
1984 		}
1985 		*array = u.val64;
1986 		array++;
1987 	}
1988 
1989 	if (type & PERF_SAMPLE_PERIOD) {
1990 		*array = sample->period;
1991 		array++;
1992 	}
1993 
1994 	if (type & PERF_SAMPLE_READ) {
1995 		if (read_format & PERF_FORMAT_GROUP)
1996 			*array = sample->read.group.nr;
1997 		else
1998 			*array = sample->read.one.value;
1999 		array++;
2000 
2001 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2002 			*array = sample->read.time_enabled;
2003 			array++;
2004 		}
2005 
2006 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2007 			*array = sample->read.time_running;
2008 			array++;
2009 		}
2010 
2011 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2012 		if (read_format & PERF_FORMAT_GROUP) {
2013 			sz = sample->read.group.nr *
2014 			     sizeof(struct sample_read_value);
2015 			memcpy(array, sample->read.group.values, sz);
2016 			array = (void *)array + sz;
2017 		} else {
2018 			*array = sample->read.one.id;
2019 			array++;
2020 		}
2021 	}
2022 
2023 	if (type & PERF_SAMPLE_CALLCHAIN) {
2024 		sz = (sample->callchain->nr + 1) * sizeof(u64);
2025 		memcpy(array, sample->callchain, sz);
2026 		array = (void *)array + sz;
2027 	}
2028 
2029 	if (type & PERF_SAMPLE_RAW) {
2030 		u.val32[0] = sample->raw_size;
2031 		if (WARN_ONCE(swapped,
2032 			      "Endianness of raw data not corrected!\n")) {
2033 			/*
2034 			 * Inverse of what is done in perf_evsel__parse_sample
2035 			 */
2036 			u.val32[0] = bswap_32(u.val32[0]);
2037 			u.val32[1] = bswap_32(u.val32[1]);
2038 			u.val64 = bswap_64(u.val64);
2039 		}
2040 		*array = u.val64;
2041 		array = (void *)array + sizeof(u32);
2042 
2043 		memcpy(array, sample->raw_data, sample->raw_size);
2044 		array = (void *)array + sample->raw_size;
2045 	}
2046 
2047 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2048 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
2049 		sz += sizeof(u64);
2050 		memcpy(array, sample->branch_stack, sz);
2051 		array = (void *)array + sz;
2052 	}
2053 
2054 	if (type & PERF_SAMPLE_REGS_USER) {
2055 		if (sample->user_regs.abi) {
2056 			*array++ = sample->user_regs.abi;
2057 			sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
2058 			memcpy(array, sample->user_regs.regs, sz);
2059 			array = (void *)array + sz;
2060 		} else {
2061 			*array++ = 0;
2062 		}
2063 	}
2064 
2065 	if (type & PERF_SAMPLE_STACK_USER) {
2066 		sz = sample->user_stack.size;
2067 		*array++ = sz;
2068 		if (sz) {
2069 			memcpy(array, sample->user_stack.data, sz);
2070 			array = (void *)array + sz;
2071 			*array++ = sz;
2072 		}
2073 	}
2074 
2075 	if (type & PERF_SAMPLE_WEIGHT) {
2076 		*array = sample->weight;
2077 		array++;
2078 	}
2079 
2080 	if (type & PERF_SAMPLE_DATA_SRC) {
2081 		*array = sample->data_src;
2082 		array++;
2083 	}
2084 
2085 	if (type & PERF_SAMPLE_TRANSACTION) {
2086 		*array = sample->transaction;
2087 		array++;
2088 	}
2089 
2090 	if (type & PERF_SAMPLE_REGS_INTR) {
2091 		if (sample->intr_regs.abi) {
2092 			*array++ = sample->intr_regs.abi;
2093 			sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
2094 			memcpy(array, sample->intr_regs.regs, sz);
2095 			array = (void *)array + sz;
2096 		} else {
2097 			*array++ = 0;
2098 		}
2099 	}
2100 
2101 	return 0;
2102 }
2103 
2104 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
2105 {
2106 	return pevent_find_field(evsel->tp_format, name);
2107 }
2108 
2109 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
2110 			 const char *name)
2111 {
2112 	struct format_field *field = perf_evsel__field(evsel, name);
2113 	int offset;
2114 
2115 	if (!field)
2116 		return NULL;
2117 
2118 	offset = field->offset;
2119 
2120 	if (field->flags & FIELD_IS_DYNAMIC) {
2121 		offset = *(int *)(sample->raw_data + field->offset);
2122 		offset &= 0xffff;
2123 	}
2124 
2125 	return sample->raw_data + offset;
2126 }
2127 
2128 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
2129 		       const char *name)
2130 {
2131 	struct format_field *field = perf_evsel__field(evsel, name);
2132 	void *ptr;
2133 	u64 value;
2134 
2135 	if (!field)
2136 		return 0;
2137 
2138 	ptr = sample->raw_data + field->offset;
2139 
2140 	switch (field->size) {
2141 	case 1:
2142 		return *(u8 *)ptr;
2143 	case 2:
2144 		value = *(u16 *)ptr;
2145 		break;
2146 	case 4:
2147 		value = *(u32 *)ptr;
2148 		break;
2149 	case 8:
2150 		memcpy(&value, ptr, sizeof(u64));
2151 		break;
2152 	default:
2153 		return 0;
2154 	}
2155 
2156 	if (!evsel->needs_swap)
2157 		return value;
2158 
2159 	switch (field->size) {
2160 	case 2:
2161 		return bswap_16(value);
2162 	case 4:
2163 		return bswap_32(value);
2164 	case 8:
2165 		return bswap_64(value);
2166 	default:
2167 		return 0;
2168 	}
2169 
2170 	return 0;
2171 }
2172 
2173 static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
2174 {
2175 	va_list args;
2176 	int ret = 0;
2177 
2178 	if (!*first) {
2179 		ret += fprintf(fp, ",");
2180 	} else {
2181 		ret += fprintf(fp, ":");
2182 		*first = false;
2183 	}
2184 
2185 	va_start(args, fmt);
2186 	ret += vfprintf(fp, fmt, args);
2187 	va_end(args);
2188 	return ret;
2189 }
2190 
2191 static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
2192 {
2193 	return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
2194 }
2195 
2196 int perf_evsel__fprintf(struct perf_evsel *evsel,
2197 			struct perf_attr_details *details, FILE *fp)
2198 {
2199 	bool first = true;
2200 	int printed = 0;
2201 
2202 	if (details->event_group) {
2203 		struct perf_evsel *pos;
2204 
2205 		if (!perf_evsel__is_group_leader(evsel))
2206 			return 0;
2207 
2208 		if (evsel->nr_members > 1)
2209 			printed += fprintf(fp, "%s{", evsel->group_name ?: "");
2210 
2211 		printed += fprintf(fp, "%s", perf_evsel__name(evsel));
2212 		for_each_group_member(pos, evsel)
2213 			printed += fprintf(fp, ",%s", perf_evsel__name(pos));
2214 
2215 		if (evsel->nr_members > 1)
2216 			printed += fprintf(fp, "}");
2217 		goto out;
2218 	}
2219 
2220 	printed += fprintf(fp, "%s", perf_evsel__name(evsel));
2221 
2222 	if (details->verbose) {
2223 		printed += perf_event_attr__fprintf(fp, &evsel->attr,
2224 						    __print_attr__fprintf, &first);
2225 	} else if (details->freq) {
2226 		const char *term = "sample_freq";
2227 
2228 		if (!evsel->attr.freq)
2229 			term = "sample_period";
2230 
2231 		printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
2232 					 term, (u64)evsel->attr.sample_freq);
2233 	}
2234 out:
2235 	fputc('\n', fp);
2236 	return ++printed;
2237 }
2238 
2239 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2240 			  char *msg, size_t msgsize)
2241 {
2242 	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2243 	    evsel->attr.type   == PERF_TYPE_HARDWARE &&
2244 	    evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2245 		/*
2246 		 * If it's cycles then fall back to hrtimer based
2247 		 * cpu-clock-tick sw counter, which is always available even if
2248 		 * no PMU support.
2249 		 *
2250 		 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2251 		 * b0a873e).
2252 		 */
2253 		scnprintf(msg, msgsize, "%s",
2254 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2255 
2256 		evsel->attr.type   = PERF_TYPE_SOFTWARE;
2257 		evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
2258 
2259 		zfree(&evsel->name);
2260 		return true;
2261 	}
2262 
2263 	return false;
2264 }
2265 
2266 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
2267 			      int err, char *msg, size_t size)
2268 {
2269 	char sbuf[STRERR_BUFSIZE];
2270 
2271 	switch (err) {
2272 	case EPERM:
2273 	case EACCES:
2274 		return scnprintf(msg, size,
2275 		 "You may not have permission to collect %sstats.\n"
2276 		 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
2277 		 " -1 - Not paranoid at all\n"
2278 		 "  0 - Disallow raw tracepoint access for unpriv\n"
2279 		 "  1 - Disallow cpu events for unpriv\n"
2280 		 "  2 - Disallow kernel profiling for unpriv",
2281 				 target->system_wide ? "system-wide " : "");
2282 	case ENOENT:
2283 		return scnprintf(msg, size, "The %s event is not supported.",
2284 				 perf_evsel__name(evsel));
2285 	case EMFILE:
2286 		return scnprintf(msg, size, "%s",
2287 			 "Too many events are opened.\n"
2288 			 "Probably the maximum number of open file descriptors has been reached.\n"
2289 			 "Hint: Try again after reducing the number of events.\n"
2290 			 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2291 	case ENODEV:
2292 		if (target->cpu_list)
2293 			return scnprintf(msg, size, "%s",
2294 	 "No such device - did you specify an out-of-range profile CPU?\n");
2295 		break;
2296 	case EOPNOTSUPP:
2297 		if (evsel->attr.precise_ip)
2298 			return scnprintf(msg, size, "%s",
2299 	"\'precise\' request may not be supported. Try removing 'p' modifier.");
2300 #if defined(__i386__) || defined(__x86_64__)
2301 		if (evsel->attr.type == PERF_TYPE_HARDWARE)
2302 			return scnprintf(msg, size, "%s",
2303 	"No hardware sampling interrupt available.\n"
2304 	"No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2305 #endif
2306 		break;
2307 	case EBUSY:
2308 		if (find_process("oprofiled"))
2309 			return scnprintf(msg, size,
2310 	"The PMU counters are busy/taken by another profiler.\n"
2311 	"We found oprofile daemon running, please stop it and try again.");
2312 		break;
2313 	case EINVAL:
2314 		if (perf_missing_features.clockid)
2315 			return scnprintf(msg, size, "clockid feature not supported.");
2316 		if (perf_missing_features.clockid_wrong)
2317 			return scnprintf(msg, size, "wrong clockid (%d).", clockid);
2318 		break;
2319 	default:
2320 		break;
2321 	}
2322 
2323 	return scnprintf(msg, size,
2324 	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
2325 	"/bin/dmesg may provide additional information.\n"
2326 	"No CONFIG_PERF_EVENTS=y kernel support configured?\n",
2327 			 err, strerror_r(err, sbuf, sizeof(sbuf)),
2328 			 perf_evsel__name(evsel));
2329 }
2330