xref: /linux/tools/perf/util/intel-bts.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * intel-bts.c: Intel Processor Trace support
3  * Copyright (c) 2013-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <endian.h>
17 #include <errno.h>
18 #include <byteswap.h>
19 #include <inttypes.h>
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/bitops.h>
23 #include <linux/log2.h>
24 
25 #include "cpumap.h"
26 #include "color.h"
27 #include "evsel.h"
28 #include "evlist.h"
29 #include "machine.h"
30 #include "session.h"
31 #include "util.h"
32 #include "thread.h"
33 #include "thread-stack.h"
34 #include "debug.h"
35 #include "tsc.h"
36 #include "auxtrace.h"
37 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
38 #include "intel-bts.h"
39 
40 #define MAX_TIMESTAMP (~0ULL)
41 
42 #define INTEL_BTS_ERR_NOINSN  5
43 #define INTEL_BTS_ERR_LOST    9
44 
45 #if __BYTE_ORDER == __BIG_ENDIAN
46 #define le64_to_cpu bswap_64
47 #else
48 #define le64_to_cpu
49 #endif
50 
51 struct intel_bts {
52 	struct auxtrace			auxtrace;
53 	struct auxtrace_queues		queues;
54 	struct auxtrace_heap		heap;
55 	u32				auxtrace_type;
56 	struct perf_session		*session;
57 	struct machine			*machine;
58 	bool				sampling_mode;
59 	bool				snapshot_mode;
60 	bool				data_queued;
61 	u32				pmu_type;
62 	struct perf_tsc_conversion	tc;
63 	bool				cap_user_time_zero;
64 	struct itrace_synth_opts	synth_opts;
65 	bool				sample_branches;
66 	u32				branches_filter;
67 	u64				branches_sample_type;
68 	u64				branches_id;
69 	size_t				branches_event_size;
70 	unsigned long			num_events;
71 };
72 
73 struct intel_bts_queue {
74 	struct intel_bts	*bts;
75 	unsigned int		queue_nr;
76 	struct auxtrace_buffer	*buffer;
77 	bool			on_heap;
78 	bool			done;
79 	pid_t			pid;
80 	pid_t			tid;
81 	int			cpu;
82 	u64			time;
83 	struct intel_pt_insn	intel_pt_insn;
84 	u32			sample_flags;
85 };
86 
87 struct branch {
88 	u64 from;
89 	u64 to;
90 	u64 misc;
91 };
92 
93 static void intel_bts_dump(struct intel_bts *bts __maybe_unused,
94 			   unsigned char *buf, size_t len)
95 {
96 	struct branch *branch;
97 	size_t i, pos = 0, br_sz = sizeof(struct branch), sz;
98 	const char *color = PERF_COLOR_BLUE;
99 
100 	color_fprintf(stdout, color,
101 		      ". ... Intel BTS data: size %zu bytes\n",
102 		      len);
103 
104 	while (len) {
105 		if (len >= br_sz)
106 			sz = br_sz;
107 		else
108 			sz = len;
109 		printf(".");
110 		color_fprintf(stdout, color, "  %08x: ", pos);
111 		for (i = 0; i < sz; i++)
112 			color_fprintf(stdout, color, " %02x", buf[i]);
113 		for (; i < br_sz; i++)
114 			color_fprintf(stdout, color, "   ");
115 		if (len >= br_sz) {
116 			branch = (struct branch *)buf;
117 			color_fprintf(stdout, color, " %"PRIx64" -> %"PRIx64" %s\n",
118 				      le64_to_cpu(branch->from),
119 				      le64_to_cpu(branch->to),
120 				      le64_to_cpu(branch->misc) & 0x10 ?
121 							"pred" : "miss");
122 		} else {
123 			color_fprintf(stdout, color, " Bad record!\n");
124 		}
125 		pos += sz;
126 		buf += sz;
127 		len -= sz;
128 	}
129 }
130 
131 static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf,
132 				 size_t len)
133 {
134 	printf(".\n");
135 	intel_bts_dump(bts, buf, len);
136 }
137 
138 static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
139 {
140 	union perf_event event;
141 	int err;
142 
143 	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
144 			     INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
145 			     sample->tid, 0, "Lost trace data");
146 
147 	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
148 	if (err)
149 		pr_err("Intel BTS: failed to deliver error event, error %d\n",
150 		       err);
151 
152 	return err;
153 }
154 
155 static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts,
156 						     unsigned int queue_nr)
157 {
158 	struct intel_bts_queue *btsq;
159 
160 	btsq = zalloc(sizeof(struct intel_bts_queue));
161 	if (!btsq)
162 		return NULL;
163 
164 	btsq->bts = bts;
165 	btsq->queue_nr = queue_nr;
166 	btsq->pid = -1;
167 	btsq->tid = -1;
168 	btsq->cpu = -1;
169 
170 	return btsq;
171 }
172 
173 static int intel_bts_setup_queue(struct intel_bts *bts,
174 				 struct auxtrace_queue *queue,
175 				 unsigned int queue_nr)
176 {
177 	struct intel_bts_queue *btsq = queue->priv;
178 
179 	if (list_empty(&queue->head))
180 		return 0;
181 
182 	if (!btsq) {
183 		btsq = intel_bts_alloc_queue(bts, queue_nr);
184 		if (!btsq)
185 			return -ENOMEM;
186 		queue->priv = btsq;
187 
188 		if (queue->cpu != -1)
189 			btsq->cpu = queue->cpu;
190 		btsq->tid = queue->tid;
191 	}
192 
193 	if (bts->sampling_mode)
194 		return 0;
195 
196 	if (!btsq->on_heap && !btsq->buffer) {
197 		int ret;
198 
199 		btsq->buffer = auxtrace_buffer__next(queue, NULL);
200 		if (!btsq->buffer)
201 			return 0;
202 
203 		ret = auxtrace_heap__add(&bts->heap, queue_nr,
204 					 btsq->buffer->reference);
205 		if (ret)
206 			return ret;
207 		btsq->on_heap = true;
208 	}
209 
210 	return 0;
211 }
212 
213 static int intel_bts_setup_queues(struct intel_bts *bts)
214 {
215 	unsigned int i;
216 	int ret;
217 
218 	for (i = 0; i < bts->queues.nr_queues; i++) {
219 		ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
220 					    i);
221 		if (ret)
222 			return ret;
223 	}
224 	return 0;
225 }
226 
227 static inline int intel_bts_update_queues(struct intel_bts *bts)
228 {
229 	if (bts->queues.new_data) {
230 		bts->queues.new_data = false;
231 		return intel_bts_setup_queues(bts);
232 	}
233 	return 0;
234 }
235 
236 static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a,
237 					     unsigned char *buf_b, size_t len_b)
238 {
239 	size_t offs, len;
240 
241 	if (len_a > len_b)
242 		offs = len_a - len_b;
243 	else
244 		offs = 0;
245 
246 	for (; offs < len_a; offs += sizeof(struct branch)) {
247 		len = len_a - offs;
248 		if (!memcmp(buf_a + offs, buf_b, len))
249 			return buf_b + len;
250 	}
251 
252 	return buf_b;
253 }
254 
255 static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue,
256 				    struct auxtrace_buffer *b)
257 {
258 	struct auxtrace_buffer *a;
259 	void *start;
260 
261 	if (b->list.prev == &queue->head)
262 		return 0;
263 	a = list_entry(b->list.prev, struct auxtrace_buffer, list);
264 	start = intel_bts_find_overlap(a->data, a->size, b->data, b->size);
265 	if (!start)
266 		return -EINVAL;
267 	b->use_size = b->data + b->size - start;
268 	b->use_data = start;
269 	return 0;
270 }
271 
272 static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
273 					 struct branch *branch)
274 {
275 	int ret;
276 	struct intel_bts *bts = btsq->bts;
277 	union perf_event event;
278 	struct perf_sample sample = { .ip = 0, };
279 
280 	if (bts->synth_opts.initial_skip &&
281 	    bts->num_events++ <= bts->synth_opts.initial_skip)
282 		return 0;
283 
284 	event.sample.header.type = PERF_RECORD_SAMPLE;
285 	event.sample.header.misc = PERF_RECORD_MISC_USER;
286 	event.sample.header.size = sizeof(struct perf_event_header);
287 
288 	sample.cpumode = PERF_RECORD_MISC_USER;
289 	sample.ip = le64_to_cpu(branch->from);
290 	sample.pid = btsq->pid;
291 	sample.tid = btsq->tid;
292 	sample.addr = le64_to_cpu(branch->to);
293 	sample.id = btsq->bts->branches_id;
294 	sample.stream_id = btsq->bts->branches_id;
295 	sample.period = 1;
296 	sample.cpu = btsq->cpu;
297 	sample.flags = btsq->sample_flags;
298 	sample.insn_len = btsq->intel_pt_insn.length;
299 	memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ);
300 
301 	if (bts->synth_opts.inject) {
302 		event.sample.header.size = bts->branches_event_size;
303 		ret = perf_event__synthesize_sample(&event,
304 						    bts->branches_sample_type,
305 						    0, &sample);
306 		if (ret)
307 			return ret;
308 	}
309 
310 	ret = perf_session__deliver_synth_event(bts->session, &event, &sample);
311 	if (ret)
312 		pr_err("Intel BTS: failed to deliver branch event, error %d\n",
313 		       ret);
314 
315 	return ret;
316 }
317 
318 static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
319 {
320 	struct machine *machine = btsq->bts->machine;
321 	struct thread *thread;
322 	struct addr_location al;
323 	unsigned char buf[INTEL_PT_INSN_BUF_SZ];
324 	ssize_t len;
325 	int x86_64;
326 	uint8_t cpumode;
327 	int err = -1;
328 
329 	if (machine__kernel_ip(machine, ip))
330 		cpumode = PERF_RECORD_MISC_KERNEL;
331 	else
332 		cpumode = PERF_RECORD_MISC_USER;
333 
334 	thread = machine__find_thread(machine, -1, btsq->tid);
335 	if (!thread)
336 		return -1;
337 
338 	if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
339 		goto out_put;
340 
341 	len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
342 				  INTEL_PT_INSN_BUF_SZ);
343 	if (len <= 0)
344 		goto out_put;
345 
346 	/* Load maps to ensure dso->is_64_bit has been updated */
347 	map__load(al.map);
348 
349 	x86_64 = al.map->dso->is_64_bit;
350 
351 	if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
352 		goto out_put;
353 
354 	err = 0;
355 out_put:
356 	thread__put(thread);
357 	return err;
358 }
359 
360 static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
361 				 pid_t tid, u64 ip)
362 {
363 	union perf_event event;
364 	int err;
365 
366 	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
367 			     INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
368 			     "Failed to get instruction");
369 
370 	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
371 	if (err)
372 		pr_err("Intel BTS: failed to deliver error event, error %d\n",
373 		       err);
374 
375 	return err;
376 }
377 
378 static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
379 				     struct branch *branch)
380 {
381 	int err;
382 
383 	if (!branch->from) {
384 		if (branch->to)
385 			btsq->sample_flags = PERF_IP_FLAG_BRANCH |
386 					     PERF_IP_FLAG_TRACE_BEGIN;
387 		else
388 			btsq->sample_flags = 0;
389 		btsq->intel_pt_insn.length = 0;
390 	} else if (!branch->to) {
391 		btsq->sample_flags = PERF_IP_FLAG_BRANCH |
392 				     PERF_IP_FLAG_TRACE_END;
393 		btsq->intel_pt_insn.length = 0;
394 	} else {
395 		err = intel_bts_get_next_insn(btsq, branch->from);
396 		if (err) {
397 			btsq->sample_flags = 0;
398 			btsq->intel_pt_insn.length = 0;
399 			if (!btsq->bts->synth_opts.errors)
400 				return 0;
401 			err = intel_bts_synth_error(btsq->bts, btsq->cpu,
402 						    btsq->pid, btsq->tid,
403 						    branch->from);
404 			return err;
405 		}
406 		btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op);
407 		/* Check for an async branch into the kernel */
408 		if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
409 		    machine__kernel_ip(btsq->bts->machine, branch->to) &&
410 		    btsq->sample_flags != (PERF_IP_FLAG_BRANCH |
411 					   PERF_IP_FLAG_CALL |
412 					   PERF_IP_FLAG_SYSCALLRET))
413 			btsq->sample_flags = PERF_IP_FLAG_BRANCH |
414 					     PERF_IP_FLAG_CALL |
415 					     PERF_IP_FLAG_ASYNC |
416 					     PERF_IP_FLAG_INTERRUPT;
417 	}
418 
419 	return 0;
420 }
421 
422 static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
423 				    struct auxtrace_buffer *buffer,
424 				    struct thread *thread)
425 {
426 	struct branch *branch;
427 	size_t sz, bsz = sizeof(struct branch);
428 	u32 filter = btsq->bts->branches_filter;
429 	int err = 0;
430 
431 	if (buffer->use_data) {
432 		sz = buffer->use_size;
433 		branch = buffer->use_data;
434 	} else {
435 		sz = buffer->size;
436 		branch = buffer->data;
437 	}
438 
439 	if (!btsq->bts->sample_branches)
440 		return 0;
441 
442 	for (; sz > bsz; branch += 1, sz -= bsz) {
443 		if (!branch->from && !branch->to)
444 			continue;
445 		intel_bts_get_branch_type(btsq, branch);
446 		if (btsq->bts->synth_opts.thread_stack)
447 			thread_stack__event(thread, btsq->sample_flags,
448 					    le64_to_cpu(branch->from),
449 					    le64_to_cpu(branch->to),
450 					    btsq->intel_pt_insn.length,
451 					    buffer->buffer_nr + 1);
452 		if (filter && !(filter & btsq->sample_flags))
453 			continue;
454 		err = intel_bts_synth_branch_sample(btsq, branch);
455 		if (err)
456 			break;
457 	}
458 	return err;
459 }
460 
461 static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
462 {
463 	struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer;
464 	struct auxtrace_queue *queue;
465 	struct thread *thread;
466 	int err;
467 
468 	if (btsq->done)
469 		return 1;
470 
471 	if (btsq->pid == -1) {
472 		thread = machine__find_thread(btsq->bts->machine, -1,
473 					      btsq->tid);
474 		if (thread)
475 			btsq->pid = thread->pid_;
476 	} else {
477 		thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
478 						 btsq->tid);
479 	}
480 
481 	queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
482 
483 	if (!buffer)
484 		buffer = auxtrace_buffer__next(queue, NULL);
485 
486 	if (!buffer) {
487 		if (!btsq->bts->sampling_mode)
488 			btsq->done = 1;
489 		err = 1;
490 		goto out_put;
491 	}
492 
493 	/* Currently there is no support for split buffers */
494 	if (buffer->consecutive) {
495 		err = -EINVAL;
496 		goto out_put;
497 	}
498 
499 	if (!buffer->data) {
500 		int fd = perf_data__fd(btsq->bts->session->data);
501 
502 		buffer->data = auxtrace_buffer__get_data(buffer, fd);
503 		if (!buffer->data) {
504 			err = -ENOMEM;
505 			goto out_put;
506 		}
507 	}
508 
509 	if (btsq->bts->snapshot_mode && !buffer->consecutive &&
510 	    intel_bts_do_fix_overlap(queue, buffer)) {
511 		err = -ENOMEM;
512 		goto out_put;
513 	}
514 
515 	if (!btsq->bts->synth_opts.callchain &&
516 	    !btsq->bts->synth_opts.thread_stack && thread &&
517 	    (!old_buffer || btsq->bts->sampling_mode ||
518 	     (btsq->bts->snapshot_mode && !buffer->consecutive)))
519 		thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
520 
521 	err = intel_bts_process_buffer(btsq, buffer, thread);
522 
523 	auxtrace_buffer__drop_data(buffer);
524 
525 	btsq->buffer = auxtrace_buffer__next(queue, buffer);
526 	if (btsq->buffer) {
527 		if (timestamp)
528 			*timestamp = btsq->buffer->reference;
529 	} else {
530 		if (!btsq->bts->sampling_mode)
531 			btsq->done = 1;
532 	}
533 out_put:
534 	thread__put(thread);
535 	return err;
536 }
537 
538 static int intel_bts_flush_queue(struct intel_bts_queue *btsq)
539 {
540 	u64 ts = 0;
541 	int ret;
542 
543 	while (1) {
544 		ret = intel_bts_process_queue(btsq, &ts);
545 		if (ret < 0)
546 			return ret;
547 		if (ret)
548 			break;
549 	}
550 	return 0;
551 }
552 
553 static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid)
554 {
555 	struct auxtrace_queues *queues = &bts->queues;
556 	unsigned int i;
557 
558 	for (i = 0; i < queues->nr_queues; i++) {
559 		struct auxtrace_queue *queue = &bts->queues.queue_array[i];
560 		struct intel_bts_queue *btsq = queue->priv;
561 
562 		if (btsq && btsq->tid == tid)
563 			return intel_bts_flush_queue(btsq);
564 	}
565 	return 0;
566 }
567 
568 static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
569 {
570 	while (1) {
571 		unsigned int queue_nr;
572 		struct auxtrace_queue *queue;
573 		struct intel_bts_queue *btsq;
574 		u64 ts = 0;
575 		int ret;
576 
577 		if (!bts->heap.heap_cnt)
578 			return 0;
579 
580 		if (bts->heap.heap_array[0].ordinal > timestamp)
581 			return 0;
582 
583 		queue_nr = bts->heap.heap_array[0].queue_nr;
584 		queue = &bts->queues.queue_array[queue_nr];
585 		btsq = queue->priv;
586 
587 		auxtrace_heap__pop(&bts->heap);
588 
589 		ret = intel_bts_process_queue(btsq, &ts);
590 		if (ret < 0) {
591 			auxtrace_heap__add(&bts->heap, queue_nr, ts);
592 			return ret;
593 		}
594 
595 		if (!ret) {
596 			ret = auxtrace_heap__add(&bts->heap, queue_nr, ts);
597 			if (ret < 0)
598 				return ret;
599 		} else {
600 			btsq->on_heap = false;
601 		}
602 	}
603 
604 	return 0;
605 }
606 
607 static int intel_bts_process_event(struct perf_session *session,
608 				   union perf_event *event,
609 				   struct perf_sample *sample,
610 				   struct perf_tool *tool)
611 {
612 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
613 					     auxtrace);
614 	u64 timestamp;
615 	int err;
616 
617 	if (dump_trace)
618 		return 0;
619 
620 	if (!tool->ordered_events) {
621 		pr_err("Intel BTS requires ordered events\n");
622 		return -EINVAL;
623 	}
624 
625 	if (sample->time && sample->time != (u64)-1)
626 		timestamp = perf_time_to_tsc(sample->time, &bts->tc);
627 	else
628 		timestamp = 0;
629 
630 	err = intel_bts_update_queues(bts);
631 	if (err)
632 		return err;
633 
634 	err = intel_bts_process_queues(bts, timestamp);
635 	if (err)
636 		return err;
637 	if (event->header.type == PERF_RECORD_EXIT) {
638 		err = intel_bts_process_tid_exit(bts, event->fork.tid);
639 		if (err)
640 			return err;
641 	}
642 
643 	if (event->header.type == PERF_RECORD_AUX &&
644 	    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
645 	    bts->synth_opts.errors)
646 		err = intel_bts_lost(bts, sample);
647 
648 	return err;
649 }
650 
651 static int intel_bts_process_auxtrace_event(struct perf_session *session,
652 					    union perf_event *event,
653 					    struct perf_tool *tool __maybe_unused)
654 {
655 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
656 					     auxtrace);
657 
658 	if (bts->sampling_mode)
659 		return 0;
660 
661 	if (!bts->data_queued) {
662 		struct auxtrace_buffer *buffer;
663 		off_t data_offset;
664 		int fd = perf_data__fd(session->data);
665 		int err;
666 
667 		if (perf_data__is_pipe(session->data)) {
668 			data_offset = 0;
669 		} else {
670 			data_offset = lseek(fd, 0, SEEK_CUR);
671 			if (data_offset == -1)
672 				return -errno;
673 		}
674 
675 		err = auxtrace_queues__add_event(&bts->queues, session, event,
676 						 data_offset, &buffer);
677 		if (err)
678 			return err;
679 
680 		/* Dump here now we have copied a piped trace out of the pipe */
681 		if (dump_trace) {
682 			if (auxtrace_buffer__get_data(buffer, fd)) {
683 				intel_bts_dump_event(bts, buffer->data,
684 						     buffer->size);
685 				auxtrace_buffer__put_data(buffer);
686 			}
687 		}
688 	}
689 
690 	return 0;
691 }
692 
693 static int intel_bts_flush(struct perf_session *session,
694 			   struct perf_tool *tool __maybe_unused)
695 {
696 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
697 					     auxtrace);
698 	int ret;
699 
700 	if (dump_trace || bts->sampling_mode)
701 		return 0;
702 
703 	if (!tool->ordered_events)
704 		return -EINVAL;
705 
706 	ret = intel_bts_update_queues(bts);
707 	if (ret < 0)
708 		return ret;
709 
710 	return intel_bts_process_queues(bts, MAX_TIMESTAMP);
711 }
712 
713 static void intel_bts_free_queue(void *priv)
714 {
715 	struct intel_bts_queue *btsq = priv;
716 
717 	if (!btsq)
718 		return;
719 	free(btsq);
720 }
721 
722 static void intel_bts_free_events(struct perf_session *session)
723 {
724 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
725 					     auxtrace);
726 	struct auxtrace_queues *queues = &bts->queues;
727 	unsigned int i;
728 
729 	for (i = 0; i < queues->nr_queues; i++) {
730 		intel_bts_free_queue(queues->queue_array[i].priv);
731 		queues->queue_array[i].priv = NULL;
732 	}
733 	auxtrace_queues__free(queues);
734 }
735 
736 static void intel_bts_free(struct perf_session *session)
737 {
738 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
739 					     auxtrace);
740 
741 	auxtrace_heap__free(&bts->heap);
742 	intel_bts_free_events(session);
743 	session->auxtrace = NULL;
744 	free(bts);
745 }
746 
747 struct intel_bts_synth {
748 	struct perf_tool dummy_tool;
749 	struct perf_session *session;
750 };
751 
752 static int intel_bts_event_synth(struct perf_tool *tool,
753 				 union perf_event *event,
754 				 struct perf_sample *sample __maybe_unused,
755 				 struct machine *machine __maybe_unused)
756 {
757 	struct intel_bts_synth *intel_bts_synth =
758 			container_of(tool, struct intel_bts_synth, dummy_tool);
759 
760 	return perf_session__deliver_synth_event(intel_bts_synth->session,
761 						 event, NULL);
762 }
763 
764 static int intel_bts_synth_event(struct perf_session *session,
765 				 struct perf_event_attr *attr, u64 id)
766 {
767 	struct intel_bts_synth intel_bts_synth;
768 
769 	memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth));
770 	intel_bts_synth.session = session;
771 
772 	return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1,
773 					   &id, intel_bts_event_synth);
774 }
775 
776 static int intel_bts_synth_events(struct intel_bts *bts,
777 				  struct perf_session *session)
778 {
779 	struct perf_evlist *evlist = session->evlist;
780 	struct perf_evsel *evsel;
781 	struct perf_event_attr attr;
782 	bool found = false;
783 	u64 id;
784 	int err;
785 
786 	evlist__for_each_entry(evlist, evsel) {
787 		if (evsel->attr.type == bts->pmu_type && evsel->ids) {
788 			found = true;
789 			break;
790 		}
791 	}
792 
793 	if (!found) {
794 		pr_debug("There are no selected events with Intel BTS data\n");
795 		return 0;
796 	}
797 
798 	memset(&attr, 0, sizeof(struct perf_event_attr));
799 	attr.size = sizeof(struct perf_event_attr);
800 	attr.type = PERF_TYPE_HARDWARE;
801 	attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
802 	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
803 			    PERF_SAMPLE_PERIOD;
804 	attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
805 	attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
806 	attr.exclude_user = evsel->attr.exclude_user;
807 	attr.exclude_kernel = evsel->attr.exclude_kernel;
808 	attr.exclude_hv = evsel->attr.exclude_hv;
809 	attr.exclude_host = evsel->attr.exclude_host;
810 	attr.exclude_guest = evsel->attr.exclude_guest;
811 	attr.sample_id_all = evsel->attr.sample_id_all;
812 	attr.read_format = evsel->attr.read_format;
813 
814 	id = evsel->id[0] + 1000000000;
815 	if (!id)
816 		id = 1;
817 
818 	if (bts->synth_opts.branches) {
819 		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
820 		attr.sample_period = 1;
821 		attr.sample_type |= PERF_SAMPLE_ADDR;
822 		pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
823 			 id, (u64)attr.sample_type);
824 		err = intel_bts_synth_event(session, &attr, id);
825 		if (err) {
826 			pr_err("%s: failed to synthesize 'branches' event type\n",
827 			       __func__);
828 			return err;
829 		}
830 		bts->sample_branches = true;
831 		bts->branches_sample_type = attr.sample_type;
832 		bts->branches_id = id;
833 		/*
834 		 * We only use sample types from PERF_SAMPLE_MASK so we can use
835 		 * __perf_evsel__sample_size() here.
836 		 */
837 		bts->branches_event_size = sizeof(struct sample_event) +
838 				__perf_evsel__sample_size(attr.sample_type);
839 	}
840 
841 	return 0;
842 }
843 
844 static const char * const intel_bts_info_fmts[] = {
845 	[INTEL_BTS_PMU_TYPE]		= "  PMU Type           %"PRId64"\n",
846 	[INTEL_BTS_TIME_SHIFT]		= "  Time Shift         %"PRIu64"\n",
847 	[INTEL_BTS_TIME_MULT]		= "  Time Muliplier     %"PRIu64"\n",
848 	[INTEL_BTS_TIME_ZERO]		= "  Time Zero          %"PRIu64"\n",
849 	[INTEL_BTS_CAP_USER_TIME_ZERO]	= "  Cap Time Zero      %"PRId64"\n",
850 	[INTEL_BTS_SNAPSHOT_MODE]	= "  Snapshot mode      %"PRId64"\n",
851 };
852 
853 static void intel_bts_print_info(u64 *arr, int start, int finish)
854 {
855 	int i;
856 
857 	if (!dump_trace)
858 		return;
859 
860 	for (i = start; i <= finish; i++)
861 		fprintf(stdout, intel_bts_info_fmts[i], arr[i]);
862 }
863 
864 int intel_bts_process_auxtrace_info(union perf_event *event,
865 				    struct perf_session *session)
866 {
867 	struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
868 	size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE;
869 	struct intel_bts *bts;
870 	int err;
871 
872 	if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
873 					min_sz)
874 		return -EINVAL;
875 
876 	bts = zalloc(sizeof(struct intel_bts));
877 	if (!bts)
878 		return -ENOMEM;
879 
880 	err = auxtrace_queues__init(&bts->queues);
881 	if (err)
882 		goto err_free;
883 
884 	bts->session = session;
885 	bts->machine = &session->machines.host; /* No kvm support */
886 	bts->auxtrace_type = auxtrace_info->type;
887 	bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE];
888 	bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT];
889 	bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT];
890 	bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO];
891 	bts->cap_user_time_zero =
892 			auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO];
893 	bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE];
894 
895 	bts->sampling_mode = false;
896 
897 	bts->auxtrace.process_event = intel_bts_process_event;
898 	bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event;
899 	bts->auxtrace.flush_events = intel_bts_flush;
900 	bts->auxtrace.free_events = intel_bts_free_events;
901 	bts->auxtrace.free = intel_bts_free;
902 	session->auxtrace = &bts->auxtrace;
903 
904 	intel_bts_print_info(&auxtrace_info->priv[0], INTEL_BTS_PMU_TYPE,
905 			     INTEL_BTS_SNAPSHOT_MODE);
906 
907 	if (dump_trace)
908 		return 0;
909 
910 	if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
911 		bts->synth_opts = *session->itrace_synth_opts;
912 	} else {
913 		itrace_synth_opts__set_default(&bts->synth_opts);
914 		if (session->itrace_synth_opts)
915 			bts->synth_opts.thread_stack =
916 				session->itrace_synth_opts->thread_stack;
917 	}
918 
919 	if (bts->synth_opts.calls)
920 		bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
921 					PERF_IP_FLAG_TRACE_END;
922 	if (bts->synth_opts.returns)
923 		bts->branches_filter |= PERF_IP_FLAG_RETURN |
924 					PERF_IP_FLAG_TRACE_BEGIN;
925 
926 	err = intel_bts_synth_events(bts, session);
927 	if (err)
928 		goto err_free_queues;
929 
930 	err = auxtrace_queues__process_index(&bts->queues, session);
931 	if (err)
932 		goto err_free_queues;
933 
934 	if (bts->queues.populated)
935 		bts->data_queued = true;
936 
937 	return 0;
938 
939 err_free_queues:
940 	auxtrace_queues__free(&bts->queues);
941 	session->auxtrace = NULL;
942 err_free:
943 	free(bts);
944 	return err;
945 }
946