xref: /linux/tools/perf/util/data-convert-bt.c (revision fbfb858552fb9a4c869e22f3303c7c7365367509)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CTF writing support via babeltrace.
4  *
5  * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
6  * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7  */
8 
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/zalloc.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include "asm/bug.h"
23 #include "data-convert.h"
24 #include "session.h"
25 #include "debug.h"
26 #include "tool.h"
27 #include "evlist.h"
28 #include "evsel.h"
29 #include "machine.h"
30 #include "config.h"
31 #include <linux/ctype.h>
32 #include <linux/err.h>
33 #include <linux/time64.h>
34 #include "util.h"
35 #include "clockid.h"
36 #include "util/sample.h"
37 #include "util/time-utils.h"
38 #include "header.h"
39 
40 #ifdef HAVE_LIBTRACEEVENT
41 #include <event-parse.h>
42 #endif
43 
44 #define pr_N(n, fmt, ...) \
45 	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
46 
47 #define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
48 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
49 
50 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
51 
52 struct evsel_priv {
53 	struct bt_ctf_event_class *event_class;
54 };
55 
56 #define MAX_CPUS	4096
57 
58 struct ctf_stream {
59 	struct bt_ctf_stream *stream;
60 	int cpu;
61 	u32 count;
62 };
63 
64 struct ctf_writer {
65 	/* writer primitives */
66 	struct bt_ctf_writer		 *writer;
67 	struct ctf_stream		**stream;
68 	int				  stream_cnt;
69 	struct bt_ctf_stream_class	 *stream_class;
70 	struct bt_ctf_clock		 *clock;
71 
72 	/* data types */
73 	union {
74 		struct {
75 			struct bt_ctf_field_type	*s64;
76 			struct bt_ctf_field_type	*u64;
77 			struct bt_ctf_field_type	*s32;
78 			struct bt_ctf_field_type	*u32;
79 			struct bt_ctf_field_type	*string;
80 			struct bt_ctf_field_type	*u32_hex;
81 			struct bt_ctf_field_type	*u64_hex;
82 		};
83 		struct bt_ctf_field_type *array[6];
84 	} data;
85 	struct bt_ctf_event_class	*comm_class;
86 	struct bt_ctf_event_class	*exit_class;
87 	struct bt_ctf_event_class	*fork_class;
88 	struct bt_ctf_event_class	*mmap_class;
89 	struct bt_ctf_event_class	*mmap2_class;
90 };
91 
92 struct convert {
93 	struct perf_tool	tool;
94 	struct ctf_writer	writer;
95 
96 	struct perf_time_interval *ptime_range;
97 	int range_size;
98 	int range_num;
99 
100 	u64			events_size;
101 	u64			events_count;
102 	u64			non_sample_count;
103 	u64			skipped;
104 
105 	/* Ordered events configured queue size. */
106 	u64			queue_size;
107 };
108 
109 static int value_set(struct bt_ctf_field_type *type,
110 		     struct bt_ctf_event *event,
111 		     const char *name, u64 val)
112 {
113 	struct bt_ctf_field *field;
114 	bool sign = bt_ctf_field_type_integer_get_signed(type);
115 	int ret;
116 
117 	field = bt_ctf_field_create(type);
118 	if (!field) {
119 		pr_err("failed to create a field %s\n", name);
120 		return -1;
121 	}
122 
123 	if (sign) {
124 		ret = bt_ctf_field_signed_integer_set_value(field, val);
125 		if (ret) {
126 			pr_err("failed to set field value %s\n", name);
127 			goto err;
128 		}
129 	} else {
130 		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
131 		if (ret) {
132 			pr_err("failed to set field value %s\n", name);
133 			goto err;
134 		}
135 	}
136 
137 	ret = bt_ctf_event_set_payload(event, name, field);
138 	if (ret) {
139 		pr_err("failed to set payload %s\n", name);
140 		goto err;
141 	}
142 
143 	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
144 
145 err:
146 	bt_ctf_field_put(field);
147 	return ret;
148 }
149 
150 #define __FUNC_VALUE_SET(_name, _val_type)				\
151 static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
152 			     struct bt_ctf_event *event,		\
153 			     const char *name,				\
154 			     _val_type val)				\
155 {									\
156 	struct bt_ctf_field_type *type = cw->data._name;		\
157 	return value_set(type, event, name, (u64) val);			\
158 }
159 
160 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
161 
162 FUNC_VALUE_SET(s32)
163 FUNC_VALUE_SET(u32)
164 FUNC_VALUE_SET(s64)
165 FUNC_VALUE_SET(u64)
166 __FUNC_VALUE_SET(u64_hex, u64)
167 
168 static int string_set_value(struct bt_ctf_field *field, const char *string);
169 static __maybe_unused int
170 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
171 		 const char *name, const char *string)
172 {
173 	struct bt_ctf_field_type *type = cw->data.string;
174 	struct bt_ctf_field *field;
175 	int ret = 0;
176 
177 	field = bt_ctf_field_create(type);
178 	if (!field) {
179 		pr_err("failed to create a field %s\n", name);
180 		return -1;
181 	}
182 
183 	ret = string_set_value(field, string);
184 	if (ret) {
185 		pr_err("failed to set value %s\n", name);
186 		goto err_put_field;
187 	}
188 
189 	ret = bt_ctf_event_set_payload(event, name, field);
190 	if (ret)
191 		pr_err("failed to set payload %s\n", name);
192 
193 err_put_field:
194 	bt_ctf_field_put(field);
195 	return ret;
196 }
197 
198 static struct bt_ctf_field_type*
199 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
200 {
201 	unsigned long flags = field->flags;
202 
203 	if (flags & TEP_FIELD_IS_STRING)
204 		return cw->data.string;
205 
206 	if (!(flags & TEP_FIELD_IS_SIGNED)) {
207 		/* unsigned long are mostly pointers */
208 		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
209 			return cw->data.u64_hex;
210 	}
211 
212 	if (flags & TEP_FIELD_IS_SIGNED) {
213 		if (field->size == 8)
214 			return cw->data.s64;
215 		else
216 			return cw->data.s32;
217 	}
218 
219 	if (field->size == 8)
220 		return cw->data.u64;
221 	else
222 		return cw->data.u32;
223 }
224 
225 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
226 {
227 	unsigned long long value_mask;
228 
229 	/*
230 	 * value_mask = (1 << (size * 8 - 1)) - 1.
231 	 * Directly set value_mask for code readers.
232 	 */
233 	switch (size) {
234 	case 1:
235 		value_mask = 0x7fULL;
236 		break;
237 	case 2:
238 		value_mask = 0x7fffULL;
239 		break;
240 	case 4:
241 		value_mask = 0x7fffffffULL;
242 		break;
243 	case 8:
244 		/*
245 		 * For 64 bit value, return it self. There is no need
246 		 * to fill high bit.
247 		 */
248 		/* Fall through */
249 	default:
250 		/* BUG! */
251 		return value_int;
252 	}
253 
254 	/* If it is a positive value, don't adjust. */
255 	if ((value_int & (~0ULL - value_mask)) == 0)
256 		return value_int;
257 
258 	/* Fill upper part of value_int with 1 to make it a negative long long. */
259 	return (value_int & value_mask) | ~value_mask;
260 }
261 
262 static int string_set_value(struct bt_ctf_field *field, const char *string)
263 {
264 	char *buffer = NULL;
265 	size_t len = strlen(string), i, p;
266 	int err;
267 
268 	for (i = p = 0; i < len; i++, p++) {
269 		if (isprint(string[i])) {
270 			if (!buffer)
271 				continue;
272 			buffer[p] = string[i];
273 		} else {
274 			char numstr[5];
275 
276 			snprintf(numstr, sizeof(numstr), "\\x%02x",
277 				 (unsigned int)(string[i]) & 0xff);
278 
279 			if (!buffer) {
280 				buffer = zalloc(i + (len - i) * 4 + 2);
281 				if (!buffer) {
282 					pr_err("failed to set unprintable string '%s'\n", string);
283 					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
284 				}
285 				if (i > 0)
286 					strncpy(buffer, string, i);
287 			}
288 			memcpy(buffer + p, numstr, 4);
289 			p += 3;
290 		}
291 	}
292 
293 	if (!buffer)
294 		return bt_ctf_field_string_set_value(field, string);
295 	err = bt_ctf_field_string_set_value(field, buffer);
296 	free(buffer);
297 	return err;
298 }
299 
300 static int add_tracepoint_field_value(struct ctf_writer *cw,
301 				      struct bt_ctf_event_class *event_class,
302 				      struct bt_ctf_event *event,
303 				      struct perf_sample *sample,
304 				      struct tep_format_field *fmtf)
305 {
306 	struct bt_ctf_field_type *type;
307 	struct bt_ctf_field *array_field;
308 	struct bt_ctf_field *field;
309 	const char *name = fmtf->name;
310 	void *data = sample->raw_data;
311 	unsigned long flags = fmtf->flags;
312 	unsigned int n_items;
313 	unsigned int i;
314 	unsigned int offset;
315 	unsigned int len;
316 	int ret;
317 
318 	name = fmtf->alias;
319 	offset = fmtf->offset;
320 	len = fmtf->size;
321 	if (flags & TEP_FIELD_IS_STRING)
322 		flags &= ~TEP_FIELD_IS_ARRAY;
323 
324 	if (flags & TEP_FIELD_IS_DYNAMIC) {
325 		unsigned long long tmp_val;
326 
327 		tmp_val = tep_read_number(fmtf->event->tep,
328 					  data + offset, len);
329 		offset = tmp_val;
330 		len = offset >> 16;
331 		offset &= 0xffff;
332 		if (tep_field_is_relative(flags))
333 			offset += fmtf->offset + fmtf->size;
334 	}
335 
336 	if (flags & TEP_FIELD_IS_ARRAY) {
337 
338 		type = bt_ctf_event_class_get_field_by_name(
339 				event_class, name);
340 		array_field = bt_ctf_field_create(type);
341 		bt_ctf_field_type_put(type);
342 		if (!array_field) {
343 			pr_err("Failed to create array type %s\n", name);
344 			return -1;
345 		}
346 
347 		len = fmtf->size / fmtf->arraylen;
348 		n_items = fmtf->arraylen;
349 	} else {
350 		n_items = 1;
351 		array_field = NULL;
352 	}
353 
354 	type = get_tracepoint_field_type(cw, fmtf);
355 
356 	for (i = 0; i < n_items; i++) {
357 		if (flags & TEP_FIELD_IS_ARRAY)
358 			field = bt_ctf_field_array_get_field(array_field, i);
359 		else
360 			field = bt_ctf_field_create(type);
361 
362 		if (!field) {
363 			pr_err("failed to create a field %s\n", name);
364 			return -1;
365 		}
366 
367 		if (flags & TEP_FIELD_IS_STRING)
368 			ret = string_set_value(field, data + offset + i * len);
369 		else {
370 			unsigned long long value_int;
371 
372 			value_int = tep_read_number(
373 					fmtf->event->tep,
374 					data + offset + i * len, len);
375 
376 			if (!(flags & TEP_FIELD_IS_SIGNED))
377 				ret = bt_ctf_field_unsigned_integer_set_value(
378 						field, value_int);
379 			else
380 				ret = bt_ctf_field_signed_integer_set_value(
381 						field, adjust_signedness(value_int, len));
382 		}
383 
384 		if (ret) {
385 			pr_err("failed to set file value %s\n", name);
386 			goto err_put_field;
387 		}
388 		if (!(flags & TEP_FIELD_IS_ARRAY)) {
389 			ret = bt_ctf_event_set_payload(event, name, field);
390 			if (ret) {
391 				pr_err("failed to set payload %s\n", name);
392 				goto err_put_field;
393 			}
394 		}
395 		bt_ctf_field_put(field);
396 	}
397 	if (flags & TEP_FIELD_IS_ARRAY) {
398 		ret = bt_ctf_event_set_payload(event, name, array_field);
399 		if (ret) {
400 			pr_err("Failed add payload array %s\n", name);
401 			return -1;
402 		}
403 		bt_ctf_field_put(array_field);
404 	}
405 	return 0;
406 
407 err_put_field:
408 	bt_ctf_field_put(field);
409 	return -1;
410 }
411 
412 static int add_tracepoint_fields_values(struct ctf_writer *cw,
413 					struct bt_ctf_event_class *event_class,
414 					struct bt_ctf_event *event,
415 					struct tep_format_field *fields,
416 					struct perf_sample *sample)
417 {
418 	struct tep_format_field *field;
419 	int ret;
420 
421 	for (field = fields; field; field = field->next) {
422 		ret = add_tracepoint_field_value(cw, event_class, event, sample,
423 				field);
424 		if (ret)
425 			return -1;
426 	}
427 	return 0;
428 }
429 
430 static int add_tracepoint_values(struct ctf_writer *cw,
431 				 struct bt_ctf_event_class *event_class,
432 				 struct bt_ctf_event *event,
433 				 struct evsel *evsel,
434 				 struct perf_sample *sample)
435 {
436 	const struct tep_event *tp_format = evsel__tp_format(evsel);
437 	struct tep_format_field *common_fields = tp_format->format.common_fields;
438 	struct tep_format_field *fields        = tp_format->format.fields;
439 	int ret;
440 
441 	ret = add_tracepoint_fields_values(cw, event_class, event,
442 					   common_fields, sample);
443 	if (!ret)
444 		ret = add_tracepoint_fields_values(cw, event_class, event,
445 						   fields, sample);
446 
447 	return ret;
448 }
449 
450 static int
451 add_bpf_output_values(struct bt_ctf_event_class *event_class,
452 		      struct bt_ctf_event *event,
453 		      struct perf_sample *sample)
454 {
455 	struct bt_ctf_field_type *len_type, *seq_type;
456 	struct bt_ctf_field *len_field, *seq_field;
457 	unsigned int raw_size = sample->raw_size;
458 	unsigned int nr_elements = raw_size / sizeof(u32);
459 	unsigned int i;
460 	int ret;
461 
462 	if (nr_elements * sizeof(u32) != raw_size)
463 		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
464 			   raw_size, nr_elements * sizeof(u32) - raw_size);
465 
466 	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
467 	len_field = bt_ctf_field_create(len_type);
468 	if (!len_field) {
469 		pr_err("failed to create 'raw_len' for bpf output event\n");
470 		ret = -1;
471 		goto put_len_type;
472 	}
473 
474 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
475 	if (ret) {
476 		pr_err("failed to set field value for raw_len\n");
477 		goto put_len_field;
478 	}
479 	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
480 	if (ret) {
481 		pr_err("failed to set payload to raw_len\n");
482 		goto put_len_field;
483 	}
484 
485 	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
486 	seq_field = bt_ctf_field_create(seq_type);
487 	if (!seq_field) {
488 		pr_err("failed to create 'raw_data' for bpf output event\n");
489 		ret = -1;
490 		goto put_seq_type;
491 	}
492 
493 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
494 	if (ret) {
495 		pr_err("failed to set length of 'raw_data'\n");
496 		goto put_seq_field;
497 	}
498 
499 	for (i = 0; i < nr_elements; i++) {
500 		struct bt_ctf_field *elem_field =
501 			bt_ctf_field_sequence_get_field(seq_field, i);
502 
503 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
504 				((u32 *)(sample->raw_data))[i]);
505 
506 		bt_ctf_field_put(elem_field);
507 		if (ret) {
508 			pr_err("failed to set raw_data[%d]\n", i);
509 			goto put_seq_field;
510 		}
511 	}
512 
513 	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
514 	if (ret)
515 		pr_err("failed to set payload for raw_data\n");
516 
517 put_seq_field:
518 	bt_ctf_field_put(seq_field);
519 put_seq_type:
520 	bt_ctf_field_type_put(seq_type);
521 put_len_field:
522 	bt_ctf_field_put(len_field);
523 put_len_type:
524 	bt_ctf_field_type_put(len_type);
525 	return ret;
526 }
527 
528 static int
529 add_callchain_output_values(struct bt_ctf_event_class *event_class,
530 		      struct bt_ctf_event *event,
531 		      struct ip_callchain *callchain)
532 {
533 	struct bt_ctf_field_type *len_type, *seq_type;
534 	struct bt_ctf_field *len_field, *seq_field;
535 	unsigned int nr_elements = callchain->nr;
536 	unsigned int i;
537 	int ret;
538 
539 	len_type = bt_ctf_event_class_get_field_by_name(
540 			event_class, "perf_callchain_size");
541 	len_field = bt_ctf_field_create(len_type);
542 	if (!len_field) {
543 		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
544 		ret = -1;
545 		goto put_len_type;
546 	}
547 
548 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
549 	if (ret) {
550 		pr_err("failed to set field value for perf_callchain_size\n");
551 		goto put_len_field;
552 	}
553 	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
554 	if (ret) {
555 		pr_err("failed to set payload to perf_callchain_size\n");
556 		goto put_len_field;
557 	}
558 
559 	seq_type = bt_ctf_event_class_get_field_by_name(
560 			event_class, "perf_callchain");
561 	seq_field = bt_ctf_field_create(seq_type);
562 	if (!seq_field) {
563 		pr_err("failed to create 'perf_callchain' for callchain output event\n");
564 		ret = -1;
565 		goto put_seq_type;
566 	}
567 
568 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
569 	if (ret) {
570 		pr_err("failed to set length of 'perf_callchain'\n");
571 		goto put_seq_field;
572 	}
573 
574 	for (i = 0; i < nr_elements; i++) {
575 		struct bt_ctf_field *elem_field =
576 			bt_ctf_field_sequence_get_field(seq_field, i);
577 
578 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
579 				((u64 *)(callchain->ips))[i]);
580 
581 		bt_ctf_field_put(elem_field);
582 		if (ret) {
583 			pr_err("failed to set callchain[%d]\n", i);
584 			goto put_seq_field;
585 		}
586 	}
587 
588 	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
589 	if (ret)
590 		pr_err("failed to set payload for raw_data\n");
591 
592 put_seq_field:
593 	bt_ctf_field_put(seq_field);
594 put_seq_type:
595 	bt_ctf_field_type_put(seq_type);
596 put_len_field:
597 	bt_ctf_field_put(len_field);
598 put_len_type:
599 	bt_ctf_field_type_put(len_type);
600 	return ret;
601 }
602 
603 static int add_generic_values(struct ctf_writer *cw,
604 			      struct bt_ctf_event *event,
605 			      struct evsel *evsel,
606 			      struct perf_sample *sample)
607 {
608 	u64 type = evsel->core.attr.sample_type;
609 	int ret;
610 
611 	/*
612 	 * missing:
613 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
614 	 *                              ctf event header
615 	 *   PERF_SAMPLE_READ         - TODO
616 	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
617 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
618 	 *   PERF_SAMPLE_REGS_USER    - TODO
619 	 *   PERF_SAMPLE_STACK_USER   - TODO
620 	 */
621 
622 	if (type & PERF_SAMPLE_IP) {
623 		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
624 		if (ret)
625 			return -1;
626 	}
627 
628 	if (type & PERF_SAMPLE_TID) {
629 		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
630 		if (ret)
631 			return -1;
632 
633 		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
634 		if (ret)
635 			return -1;
636 	}
637 
638 	if ((type & PERF_SAMPLE_ID) ||
639 	    (type & PERF_SAMPLE_IDENTIFIER)) {
640 		ret = value_set_u64(cw, event, "perf_id", sample->id);
641 		if (ret)
642 			return -1;
643 	}
644 
645 	if (type & PERF_SAMPLE_STREAM_ID) {
646 		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
647 		if (ret)
648 			return -1;
649 	}
650 
651 	if (type & PERF_SAMPLE_PERIOD) {
652 		ret = value_set_u64(cw, event, "perf_period", sample->period);
653 		if (ret)
654 			return -1;
655 	}
656 
657 	if (type & PERF_SAMPLE_WEIGHT) {
658 		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
659 		if (ret)
660 			return -1;
661 	}
662 
663 	if (type & PERF_SAMPLE_DATA_SRC) {
664 		ret = value_set_u64(cw, event, "perf_data_src",
665 				sample->data_src);
666 		if (ret)
667 			return -1;
668 	}
669 
670 	if (type & PERF_SAMPLE_TRANSACTION) {
671 		ret = value_set_u64(cw, event, "perf_transaction",
672 				sample->transaction);
673 		if (ret)
674 			return -1;
675 	}
676 
677 	return 0;
678 }
679 
680 static int ctf_stream__flush(struct ctf_stream *cs)
681 {
682 	int err = 0;
683 
684 	if (cs) {
685 		err = bt_ctf_stream_flush(cs->stream);
686 		if (err)
687 			pr_err("CTF stream %d flush failed\n", cs->cpu);
688 
689 		pr("Flush stream for cpu %d (%u samples)\n",
690 		   cs->cpu, cs->count);
691 
692 		cs->count = 0;
693 	}
694 
695 	return err;
696 }
697 
698 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
699 {
700 	struct ctf_stream *cs;
701 	struct bt_ctf_field *pkt_ctx   = NULL;
702 	struct bt_ctf_field *cpu_field = NULL;
703 	struct bt_ctf_stream *stream   = NULL;
704 	int ret;
705 
706 	cs = zalloc(sizeof(*cs));
707 	if (!cs) {
708 		pr_err("Failed to allocate ctf stream\n");
709 		return NULL;
710 	}
711 
712 	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
713 	if (!stream) {
714 		pr_err("Failed to create CTF stream\n");
715 		goto out;
716 	}
717 
718 	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
719 	if (!pkt_ctx) {
720 		pr_err("Failed to obtain packet context\n");
721 		goto out;
722 	}
723 
724 	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
725 	bt_ctf_field_put(pkt_ctx);
726 	if (!cpu_field) {
727 		pr_err("Failed to obtain cpu field\n");
728 		goto out;
729 	}
730 
731 	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
732 	if (ret) {
733 		pr_err("Failed to update CPU number\n");
734 		goto out;
735 	}
736 
737 	bt_ctf_field_put(cpu_field);
738 
739 	cs->cpu    = cpu;
740 	cs->stream = stream;
741 	return cs;
742 
743 out:
744 	if (cpu_field)
745 		bt_ctf_field_put(cpu_field);
746 	if (stream)
747 		bt_ctf_stream_put(stream);
748 
749 	free(cs);
750 	return NULL;
751 }
752 
753 static void ctf_stream__delete(struct ctf_stream *cs)
754 {
755 	if (cs) {
756 		bt_ctf_stream_put(cs->stream);
757 		free(cs);
758 	}
759 }
760 
761 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
762 {
763 	struct ctf_stream *cs = cw->stream[cpu];
764 
765 	if (!cs) {
766 		cs = ctf_stream__create(cw, cpu);
767 		cw->stream[cpu] = cs;
768 	}
769 
770 	return cs;
771 }
772 
773 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
774 			  struct evsel *evsel)
775 {
776 	int cpu = 0;
777 
778 	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
779 		cpu = sample->cpu;
780 
781 	if (cpu > cw->stream_cnt) {
782 		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
783 			cpu, cw->stream_cnt);
784 		cpu = 0;
785 	}
786 
787 	return cpu;
788 }
789 
790 #define STREAM_FLUSH_COUNT 100000
791 
792 /*
793  * Currently we have no other way to determine the
794  * time for the stream flush other than keep track
795  * of the number of events and check it against
796  * threshold.
797  */
798 static bool is_flush_needed(struct ctf_stream *cs)
799 {
800 	return cs->count >= STREAM_FLUSH_COUNT;
801 }
802 
803 static int process_sample_event(const struct perf_tool *tool,
804 				union perf_event *_event,
805 				struct perf_sample *sample,
806 				struct evsel *evsel,
807 				struct machine *machine __maybe_unused)
808 {
809 	struct convert *c = container_of(tool, struct convert, tool);
810 	struct evsel_priv *priv = evsel->priv;
811 	struct ctf_writer *cw = &c->writer;
812 	struct ctf_stream *cs;
813 	struct bt_ctf_event_class *event_class;
814 	struct bt_ctf_event *event;
815 	int ret;
816 	unsigned long type = evsel->core.attr.sample_type;
817 
818 	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
819 		return 0;
820 
821 	if (perf_time__ranges_skip_sample(c->ptime_range, c->range_num, sample->time)) {
822 		++c->skipped;
823 		return 0;
824 	}
825 
826 	event_class = priv->event_class;
827 
828 	/* update stats */
829 	c->events_count++;
830 	c->events_size += _event->header.size;
831 
832 	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
833 
834 	event = bt_ctf_event_create(event_class);
835 	if (!event) {
836 		pr_err("Failed to create an CTF event\n");
837 		return -1;
838 	}
839 
840 	bt_ctf_clock_set_time(cw->clock, sample->time);
841 
842 	ret = add_generic_values(cw, event, evsel, sample);
843 	if (ret)
844 		return -1;
845 
846 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
847 		ret = add_tracepoint_values(cw, event_class, event,
848 					    evsel, sample);
849 		if (ret)
850 			return -1;
851 	}
852 
853 	if (type & PERF_SAMPLE_CALLCHAIN) {
854 		ret = add_callchain_output_values(event_class,
855 				event, sample->callchain);
856 		if (ret)
857 			return -1;
858 	}
859 
860 	if (evsel__is_bpf_output(evsel)) {
861 		ret = add_bpf_output_values(event_class, event, sample);
862 		if (ret)
863 			return -1;
864 	}
865 
866 	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
867 	if (cs) {
868 		if (is_flush_needed(cs))
869 			ctf_stream__flush(cs);
870 
871 		cs->count++;
872 		bt_ctf_stream_append_event(cs->stream, event);
873 	}
874 
875 	bt_ctf_event_put(event);
876 	return cs ? 0 : -1;
877 }
878 
879 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
880 do {							\
881 	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
882 	if (ret)					\
883 		return -1;				\
884 } while(0)
885 
886 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
887 static int process_##_name##_event(const struct perf_tool *tool,	\
888 				   union perf_event *_event,	\
889 				   struct perf_sample *sample,	\
890 				   struct machine *machine)	\
891 {								\
892 	struct convert *c = container_of(tool, struct convert, tool);\
893 	struct ctf_writer *cw = &c->writer;			\
894 	struct bt_ctf_event_class *event_class = cw->_name##_class;\
895 	struct bt_ctf_event *event;				\
896 	struct ctf_stream *cs;					\
897 	int ret;						\
898 								\
899 	c->non_sample_count++;					\
900 	c->events_size += _event->header.size;			\
901 	event = bt_ctf_event_create(event_class);		\
902 	if (!event) {						\
903 		pr_err("Failed to create an CTF event\n");	\
904 		return -1;					\
905 	}							\
906 								\
907 	bt_ctf_clock_set_time(cw->clock, sample->time);		\
908 	body							\
909 	cs = ctf_stream(cw, 0);					\
910 	if (cs) {						\
911 		if (is_flush_needed(cs))			\
912 			ctf_stream__flush(cs);			\
913 								\
914 		cs->count++;					\
915 		bt_ctf_stream_append_event(cs->stream, event);	\
916 	}							\
917 	bt_ctf_event_put(event);				\
918 								\
919 	return perf_event__process_##_name(tool, _event, sample, machine);\
920 }
921 
922 __FUNC_PROCESS_NON_SAMPLE(comm,
923 	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
924 	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
925 	__NON_SAMPLE_SET_FIELD(comm, string, comm);
926 )
927 __FUNC_PROCESS_NON_SAMPLE(fork,
928 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
929 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
930 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
931 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
932 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
933 )
934 
935 __FUNC_PROCESS_NON_SAMPLE(exit,
936 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
937 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
938 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
939 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
940 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
941 )
942 __FUNC_PROCESS_NON_SAMPLE(mmap,
943 	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
944 	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
945 	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
946 	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
947 )
948 __FUNC_PROCESS_NON_SAMPLE(mmap2,
949 	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
950 	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
951 	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
952 	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
953 )
954 #undef __NON_SAMPLE_SET_FIELD
955 #undef __FUNC_PROCESS_NON_SAMPLE
956 
957 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
958 static char *change_name(char *name, char *orig_name, int dup)
959 {
960 	char *new_name = NULL;
961 	size_t len;
962 
963 	if (!name)
964 		name = orig_name;
965 
966 	if (dup >= 10)
967 		goto out;
968 	/*
969 	 * Add '_' prefix to potential keywork.  According to
970 	 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
971 	 * further CTF spec updating may require us to use '$'.
972 	 */
973 	if (dup < 0)
974 		len = strlen(name) + sizeof("_");
975 	else
976 		len = strlen(orig_name) + sizeof("_dupl_X");
977 
978 	new_name = malloc(len);
979 	if (!new_name)
980 		goto out;
981 
982 	if (dup < 0)
983 		snprintf(new_name, len, "_%s", name);
984 	else
985 		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
986 
987 out:
988 	if (name != orig_name)
989 		free(name);
990 	return new_name;
991 }
992 
993 static int event_class_add_field(struct bt_ctf_event_class *event_class,
994 		struct bt_ctf_field_type *type,
995 		struct tep_format_field *field)
996 {
997 	struct bt_ctf_field_type *t = NULL;
998 	char *name;
999 	int dup = 1;
1000 	int ret;
1001 
1002 	/* alias was already assigned */
1003 	if (field->alias != field->name)
1004 		return bt_ctf_event_class_add_field(event_class, type,
1005 				(char *)field->alias);
1006 
1007 	name = field->name;
1008 
1009 	/* If 'name' is a keywork, add prefix. */
1010 	if (bt_ctf_validate_identifier(name))
1011 		name = change_name(name, field->name, -1);
1012 
1013 	if (!name) {
1014 		pr_err("Failed to fix invalid identifier.");
1015 		return -1;
1016 	}
1017 	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1018 		bt_ctf_field_type_put(t);
1019 		name = change_name(name, field->name, dup++);
1020 		if (!name) {
1021 			pr_err("Failed to create dup name for '%s'\n", field->name);
1022 			return -1;
1023 		}
1024 	}
1025 
1026 	ret = bt_ctf_event_class_add_field(event_class, type, name);
1027 	if (!ret)
1028 		field->alias = name;
1029 
1030 	return ret;
1031 }
1032 
1033 static int add_tracepoint_fields_types(struct ctf_writer *cw,
1034 				       struct tep_format_field *fields,
1035 				       struct bt_ctf_event_class *event_class)
1036 {
1037 	struct tep_format_field *field;
1038 	int ret;
1039 
1040 	for (field = fields; field; field = field->next) {
1041 		struct bt_ctf_field_type *type;
1042 		unsigned long flags = field->flags;
1043 
1044 		pr2("  field '%s'\n", field->name);
1045 
1046 		type = get_tracepoint_field_type(cw, field);
1047 		if (!type)
1048 			return -1;
1049 
1050 		/*
1051 		 * A string is an array of chars. For this we use the string
1052 		 * type and don't care that it is an array. What we don't
1053 		 * support is an array of strings.
1054 		 */
1055 		if (flags & TEP_FIELD_IS_STRING)
1056 			flags &= ~TEP_FIELD_IS_ARRAY;
1057 
1058 		if (flags & TEP_FIELD_IS_ARRAY)
1059 			type = bt_ctf_field_type_array_create(type, field->arraylen);
1060 
1061 		ret = event_class_add_field(event_class, type, field);
1062 
1063 		if (flags & TEP_FIELD_IS_ARRAY)
1064 			bt_ctf_field_type_put(type);
1065 
1066 		if (ret) {
1067 			pr_err("Failed to add field '%s': %d\n",
1068 					field->name, ret);
1069 			return -1;
1070 		}
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 static int add_tracepoint_types(struct ctf_writer *cw,
1077 				struct evsel *evsel,
1078 				struct bt_ctf_event_class *class)
1079 {
1080 	const struct tep_event *tp_format = evsel__tp_format(evsel);
1081 	struct tep_format_field *common_fields = tp_format ? tp_format->format.common_fields : NULL;
1082 	struct tep_format_field *fields        = tp_format ? tp_format->format.fields : NULL;
1083 	int ret;
1084 
1085 	ret = add_tracepoint_fields_types(cw, common_fields, class);
1086 	if (!ret)
1087 		ret = add_tracepoint_fields_types(cw, fields, class);
1088 
1089 	return ret;
1090 }
1091 
1092 static int add_bpf_output_types(struct ctf_writer *cw,
1093 				struct bt_ctf_event_class *class)
1094 {
1095 	struct bt_ctf_field_type *len_type = cw->data.u32;
1096 	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1097 	struct bt_ctf_field_type *seq_type;
1098 	int ret;
1099 
1100 	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1101 	if (ret)
1102 		return ret;
1103 
1104 	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1105 	if (!seq_type)
1106 		return -1;
1107 
1108 	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1109 }
1110 
1111 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1112 			     struct bt_ctf_event_class *event_class)
1113 {
1114 	u64 type = evsel->core.attr.sample_type;
1115 
1116 	/*
1117 	 * missing:
1118 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1119 	 *                              ctf event header
1120 	 *   PERF_SAMPLE_READ         - TODO
1121 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1122 	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1123 	 *                              are handled separately
1124 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1125 	 *   PERF_SAMPLE_REGS_USER    - TODO
1126 	 *   PERF_SAMPLE_STACK_USER   - TODO
1127 	 */
1128 
1129 #define ADD_FIELD(cl, t, n)						\
1130 	do {								\
1131 		pr2("  field '%s'\n", n);				\
1132 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1133 			pr_err("Failed to add field '%s';\n", n);	\
1134 			return -1;					\
1135 		}							\
1136 	} while (0)
1137 
1138 	if (type & PERF_SAMPLE_IP)
1139 		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1140 
1141 	if (type & PERF_SAMPLE_TID) {
1142 		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1143 		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1144 	}
1145 
1146 	if ((type & PERF_SAMPLE_ID) ||
1147 	    (type & PERF_SAMPLE_IDENTIFIER))
1148 		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1149 
1150 	if (type & PERF_SAMPLE_STREAM_ID)
1151 		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1152 
1153 	if (type & PERF_SAMPLE_PERIOD)
1154 		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1155 
1156 	if (type & PERF_SAMPLE_WEIGHT)
1157 		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1158 
1159 	if (type & PERF_SAMPLE_DATA_SRC)
1160 		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1161 
1162 	if (type & PERF_SAMPLE_TRANSACTION)
1163 		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1164 
1165 	if (type & PERF_SAMPLE_CALLCHAIN) {
1166 		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1167 		ADD_FIELD(event_class,
1168 			bt_ctf_field_type_sequence_create(
1169 				cw->data.u64_hex, "perf_callchain_size"),
1170 			"perf_callchain");
1171 	}
1172 
1173 #undef ADD_FIELD
1174 	return 0;
1175 }
1176 
1177 static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1178 {
1179 	struct bt_ctf_event_class *event_class;
1180 	struct evsel_priv *priv;
1181 	const char *name = evsel__name(evsel);
1182 	int ret;
1183 
1184 	if (evsel->priv) {
1185 		pr_err("Error: attempt to add already added event %s\n", name);
1186 		return -1;
1187 	}
1188 	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1189 
1190 	event_class = bt_ctf_event_class_create(name);
1191 	if (!event_class)
1192 		return -1;
1193 
1194 	ret = add_generic_types(cw, evsel, event_class);
1195 	if (ret)
1196 		goto err;
1197 
1198 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1199 		ret = add_tracepoint_types(cw, evsel, event_class);
1200 		if (ret)
1201 			goto err;
1202 	}
1203 
1204 	if (evsel__is_bpf_output(evsel)) {
1205 		ret = add_bpf_output_types(cw, event_class);
1206 		if (ret)
1207 			goto err;
1208 	}
1209 
1210 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1211 	if (ret) {
1212 		pr("Failed to add event class into stream.\n");
1213 		goto err;
1214 	}
1215 
1216 	priv = malloc(sizeof(*priv));
1217 	if (!priv)
1218 		goto err;
1219 
1220 	priv->event_class = event_class;
1221 	evsel->priv       = priv;
1222 	return 0;
1223 
1224 err:
1225 	bt_ctf_event_class_put(event_class);
1226 	pr_err("Failed to add event '%s'.\n", name);
1227 	return -1;
1228 }
1229 
1230 enum setup_events_type {
1231 	SETUP_EVENTS_ALL,
1232 	SETUP_EVENTS_NOT_TRACEPOINT,
1233 	SETUP_EVENTS_TRACEPOINT_ONLY,
1234 };
1235 
1236 static int setup_events(struct ctf_writer *cw, struct perf_session *session,
1237 			enum setup_events_type type)
1238 {
1239 	struct evlist *evlist = session->evlist;
1240 	struct evsel *evsel;
1241 	int ret;
1242 
1243 	evlist__for_each_entry(evlist, evsel) {
1244 		bool is_tracepoint = evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
1245 
1246 		if (is_tracepoint && type == SETUP_EVENTS_NOT_TRACEPOINT)
1247 			continue;
1248 
1249 		if (!is_tracepoint && type == SETUP_EVENTS_TRACEPOINT_ONLY)
1250 			continue;
1251 
1252 		ret = add_event(cw, evsel);
1253 		if (ret)
1254 			return ret;
1255 	}
1256 	return 0;
1257 }
1258 
1259 #define __NON_SAMPLE_ADD_FIELD(t, n)						\
1260 	do {							\
1261 		pr2("  field '%s'\n", #n);			\
1262 		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1263 			pr_err("Failed to add field '%s';\n", #n);\
1264 			return -1;				\
1265 		}						\
1266 	} while(0)
1267 
1268 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1269 static int add_##_name##_event(struct ctf_writer *cw)		\
1270 {								\
1271 	struct bt_ctf_event_class *event_class;			\
1272 	int ret;						\
1273 								\
1274 	pr("Adding "#_name" event\n");				\
1275 	event_class = bt_ctf_event_class_create("perf_" #_name);\
1276 	if (!event_class)					\
1277 		return -1;					\
1278 	body							\
1279 								\
1280 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1281 	if (ret) {						\
1282 		pr("Failed to add event class '"#_name"' into stream.\n");\
1283 		return ret;					\
1284 	}							\
1285 								\
1286 	cw->_name##_class = event_class;			\
1287 	bt_ctf_event_class_put(event_class);			\
1288 	return 0;						\
1289 }
1290 
1291 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1292 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1293 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1294 	__NON_SAMPLE_ADD_FIELD(string, comm);
1295 )
1296 
1297 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1298 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1299 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1300 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1301 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1302 	__NON_SAMPLE_ADD_FIELD(u64, time);
1303 )
1304 
1305 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1306 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1307 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1308 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1309 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1310 	__NON_SAMPLE_ADD_FIELD(u64, time);
1311 )
1312 
1313 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1314 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1315 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1316 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1317 	__NON_SAMPLE_ADD_FIELD(string, filename);
1318 )
1319 
1320 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1321 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1322 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1323 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1324 	__NON_SAMPLE_ADD_FIELD(string, filename);
1325 )
1326 #undef __NON_SAMPLE_ADD_FIELD
1327 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1328 
1329 static int setup_non_sample_events(struct ctf_writer *cw,
1330 				   struct perf_session *session __maybe_unused)
1331 {
1332 	int ret;
1333 
1334 	ret = add_comm_event(cw);
1335 	if (ret)
1336 		return ret;
1337 	ret = add_exit_event(cw);
1338 	if (ret)
1339 		return ret;
1340 	ret = add_fork_event(cw);
1341 	if (ret)
1342 		return ret;
1343 	ret = add_mmap_event(cw);
1344 	if (ret)
1345 		return ret;
1346 	ret = add_mmap2_event(cw);
1347 	if (ret)
1348 		return ret;
1349 	return 0;
1350 }
1351 
1352 static void cleanup_events(struct perf_session *session)
1353 {
1354 	struct evlist *evlist = session->evlist;
1355 	struct evsel *evsel;
1356 
1357 	evlist__for_each_entry(evlist, evsel) {
1358 		struct evsel_priv *priv;
1359 
1360 		priv = evsel->priv;
1361 		if (priv)
1362 			bt_ctf_event_class_put(priv->event_class);
1363 		zfree(&evsel->priv);
1364 	}
1365 
1366 	evlist__delete(evlist);
1367 	session->evlist = NULL;
1368 }
1369 
1370 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1371 {
1372 	struct ctf_stream **stream;
1373 	struct perf_env *env = perf_session__env(session);
1374 	int ncpus;
1375 
1376 	/*
1377 	 * Try to get the number of cpus used in the data file,
1378 	 * if not present fallback to the MAX_CPUS.
1379 	 */
1380 	ncpus = env->nr_cpus_avail ?: MAX_CPUS;
1381 
1382 	stream = calloc(ncpus, sizeof(*stream));
1383 	if (!stream) {
1384 		pr_err("Failed to allocate streams.\n");
1385 		return -ENOMEM;
1386 	}
1387 
1388 	cw->stream     = stream;
1389 	cw->stream_cnt = ncpus;
1390 	return 0;
1391 }
1392 
1393 static void free_streams(struct ctf_writer *cw)
1394 {
1395 	int cpu;
1396 
1397 	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1398 		ctf_stream__delete(cw->stream[cpu]);
1399 
1400 	zfree(&cw->stream);
1401 }
1402 
1403 static int ctf_writer__setup_env(struct ctf_writer *cw,
1404 				 struct perf_session *session)
1405 {
1406 	struct perf_env *env = perf_session__env(session);
1407 	struct bt_ctf_writer *writer = cw->writer;
1408 
1409 #define ADD(__n, __v)							\
1410 do {									\
1411 	if (__v && bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1412 		return -1;						\
1413 } while (0)
1414 
1415 	ADD("host",    env->hostname);
1416 	ADD("sysname", "Linux");
1417 	ADD("release", env->os_release);
1418 	ADD("version", env->version);
1419 	ADD("machine", env->arch);
1420 	ADD("domain", "kernel");
1421 	ADD("tracer_name", "perf");
1422 
1423 #undef ADD
1424 	return 0;
1425 }
1426 
1427 static int process_feature_event(const struct perf_tool *tool,
1428 				 struct perf_session *session,
1429 				 union perf_event *event)
1430 {
1431 	struct convert *c = container_of(tool, struct convert, tool);
1432 	struct ctf_writer *cw = &c->writer;
1433 	struct perf_record_header_feature *fe = &event->feat;
1434 	int ret = perf_event__process_feature(tool, session, event);
1435 
1436 	if (ret)
1437 		return ret;
1438 
1439 	switch (fe->feat_id) {
1440 	case HEADER_EVENT_DESC:
1441 		/*
1442 		 * In non-pipe mode (not here) the evsels combine the desc with
1443 		 * the perf_event_attr when it is parsed. In pipe mode the
1444 		 * perf_event_attr events appear first and then the event desc
1445 		 * feature events that set the names appear after. Once we have
1446 		 * the full evsel data we can generate the babeltrace
1447 		 * events. For tracepoint events we still don't have the tracing
1448 		 * data and so need to wait until the tracing data event to add
1449 		 * those events to babeltrace.
1450 		 */
1451 		return setup_events(cw, session, SETUP_EVENTS_NOT_TRACEPOINT);
1452 	case HEADER_HOSTNAME:
1453 		if (session->header.env.hostname) {
1454 			return bt_ctf_writer_add_environment_field(cw->writer, "host",
1455 								   session->header.env.hostname);
1456 		}
1457 		break;
1458 	case HEADER_OSRELEASE:
1459 		if (session->header.env.os_release) {
1460 			return bt_ctf_writer_add_environment_field(cw->writer, "release",
1461 								   session->header.env.os_release);
1462 		}
1463 		break;
1464 	case HEADER_VERSION:
1465 		if (session->header.env.version) {
1466 			return bt_ctf_writer_add_environment_field(cw->writer, "version",
1467 								   session->header.env.version);
1468 		}
1469 		break;
1470 	case HEADER_ARCH:
1471 		if (session->header.env.arch) {
1472 			return bt_ctf_writer_add_environment_field(cw->writer, "machine",
1473 								   session->header.env.arch);
1474 		}
1475 		break;
1476 	default:
1477 		break;
1478 	}
1479 	return 0;
1480 }
1481 
1482 static int process_tracing_data(const struct perf_tool *tool,
1483 				struct perf_session *session,
1484 				union perf_event *event)
1485 {
1486 	struct convert *c = container_of(tool, struct convert, tool);
1487 	struct ctf_writer *cw = &c->writer;
1488 	int ret;
1489 
1490 	ret = perf_event__process_tracing_data(tool, session, event);
1491 	if (ret < 0)
1492 		return ret;
1493 
1494 	/*
1495 	 * Now the attr was set up by the attr event, the name by the feature
1496 	 * event desc event and the tracepoint data set up above, the tracepoint
1497 	 * babeltrace events can be added.
1498 	 */
1499 	return setup_events(cw, session, SETUP_EVENTS_TRACEPOINT_ONLY);
1500 }
1501 
1502 static int ctf_writer__setup_clock(struct ctf_writer *cw,
1503 				   struct perf_session *session,
1504 				   bool tod)
1505 {
1506 	struct bt_ctf_clock *clock = cw->clock;
1507 	const char *desc = "perf clock";
1508 	int64_t offset = 0;
1509 
1510 	if (tod) {
1511 		struct perf_env *env = perf_session__env(session);
1512 
1513 		if (!env->clock.enabled) {
1514 			pr_err("Can't provide --tod time, missing clock data. "
1515 			       "Please record with -k/--clockid option.\n");
1516 			return -1;
1517 		}
1518 
1519 		desc   = clockid_name(env->clock.clockid);
1520 		offset = env->clock.tod_ns - env->clock.clockid_ns;
1521 	}
1522 
1523 #define SET(__n, __v)				\
1524 do {						\
1525 	if (bt_ctf_clock_set_##__n(clock, __v))	\
1526 		return -1;			\
1527 } while (0)
1528 
1529 	SET(frequency,   1000000000);
1530 	SET(offset,      offset);
1531 	SET(description, desc);
1532 	SET(precision,   10);
1533 	SET(is_absolute, 0);
1534 
1535 #undef SET
1536 	return 0;
1537 }
1538 
1539 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1540 {
1541 	struct bt_ctf_field_type *type;
1542 
1543 	type = bt_ctf_field_type_integer_create(size);
1544 	if (!type)
1545 		return NULL;
1546 
1547 	if (sign &&
1548 	    bt_ctf_field_type_integer_set_signed(type, 1))
1549 		goto err;
1550 
1551 	if (hex &&
1552 	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1553 		goto err;
1554 
1555 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1556 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1557 #else
1558 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1559 #endif
1560 
1561 	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1562 	    size, sign ? "un" : "", hex ? "hex" : "");
1563 	return type;
1564 
1565 err:
1566 	bt_ctf_field_type_put(type);
1567 	return NULL;
1568 }
1569 
1570 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1571 {
1572 	unsigned int i;
1573 
1574 	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1575 		bt_ctf_field_type_put(cw->data.array[i]);
1576 }
1577 
1578 static int ctf_writer__init_data(struct ctf_writer *cw)
1579 {
1580 #define CREATE_INT_TYPE(type, size, sign, hex)		\
1581 do {							\
1582 	(type) = create_int_type(size, sign, hex);	\
1583 	if (!(type))					\
1584 		goto err;				\
1585 } while (0)
1586 
1587 	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1588 	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1589 	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1590 	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1591 	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1592 	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1593 
1594 	cw->data.string  = bt_ctf_field_type_string_create();
1595 	if (cw->data.string)
1596 		return 0;
1597 
1598 err:
1599 	ctf_writer__cleanup_data(cw);
1600 	pr_err("Failed to create data types.\n");
1601 	return -1;
1602 }
1603 
1604 static void ctf_writer__cleanup(struct ctf_writer *cw)
1605 {
1606 	ctf_writer__cleanup_data(cw);
1607 
1608 	bt_ctf_clock_put(cw->clock);
1609 	free_streams(cw);
1610 	bt_ctf_stream_class_put(cw->stream_class);
1611 	bt_ctf_writer_put(cw->writer);
1612 
1613 	/* and NULL all the pointers */
1614 	memset(cw, 0, sizeof(*cw));
1615 }
1616 
1617 static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1618 			    struct perf_session *session, bool tod)
1619 {
1620 	struct bt_ctf_writer		*writer;
1621 	struct bt_ctf_stream_class	*stream_class;
1622 	struct bt_ctf_clock		*clock;
1623 	struct bt_ctf_field_type	*pkt_ctx_type;
1624 	int				ret;
1625 
1626 	/* CTF writer */
1627 	writer = bt_ctf_writer_create(path);
1628 	if (!writer)
1629 		goto err;
1630 
1631 	cw->writer = writer;
1632 
1633 	/* CTF clock */
1634 	clock = bt_ctf_clock_create("perf_clock");
1635 	if (!clock) {
1636 		pr("Failed to create CTF clock.\n");
1637 		goto err_cleanup;
1638 	}
1639 
1640 	cw->clock = clock;
1641 
1642 	if (ctf_writer__setup_clock(cw, session, tod)) {
1643 		pr("Failed to setup CTF clock.\n");
1644 		goto err_cleanup;
1645 	}
1646 
1647 	/* CTF stream class */
1648 	stream_class = bt_ctf_stream_class_create("perf_stream");
1649 	if (!stream_class) {
1650 		pr("Failed to create CTF stream class.\n");
1651 		goto err_cleanup;
1652 	}
1653 
1654 	cw->stream_class = stream_class;
1655 
1656 	/* CTF clock stream setup */
1657 	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1658 		pr("Failed to assign CTF clock to stream class.\n");
1659 		goto err_cleanup;
1660 	}
1661 
1662 	if (ctf_writer__init_data(cw))
1663 		goto err_cleanup;
1664 
1665 	/* Add cpu_id for packet context */
1666 	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1667 	if (!pkt_ctx_type)
1668 		goto err_cleanup;
1669 
1670 	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1671 	bt_ctf_field_type_put(pkt_ctx_type);
1672 	if (ret)
1673 		goto err_cleanup;
1674 
1675 	/* CTF clock writer setup */
1676 	if (bt_ctf_writer_add_clock(writer, clock)) {
1677 		pr("Failed to assign CTF clock to writer.\n");
1678 		goto err_cleanup;
1679 	}
1680 
1681 	return 0;
1682 
1683 err_cleanup:
1684 	ctf_writer__cleanup(cw);
1685 err:
1686 	pr_err("Failed to setup CTF writer.\n");
1687 	return -1;
1688 }
1689 
1690 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1691 {
1692 	int cpu, ret = 0;
1693 
1694 	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1695 		ret = ctf_stream__flush(cw->stream[cpu]);
1696 
1697 	return ret;
1698 }
1699 
1700 static int convert__config(const char *var, const char *value, void *cb)
1701 {
1702 	struct convert *c = cb;
1703 
1704 	if (!strcmp(var, "convert.queue-size"))
1705 		return perf_config_u64(&c->queue_size, var, value);
1706 
1707 	return 0;
1708 }
1709 
1710 int bt_convert__perf2ctf(const char *input, const char *path,
1711 			 struct perf_data_convert_opts *opts)
1712 {
1713 	struct perf_session *session;
1714 	struct perf_data data = {
1715 		.path	   = input,
1716 		.mode      = PERF_DATA_MODE_READ,
1717 		.force     = opts->force,
1718 	};
1719 	struct convert c = {};
1720 	struct ctf_writer *cw = &c.writer;
1721 	int err;
1722 
1723 	perf_tool__init(&c.tool, /*ordered_events=*/true);
1724 	c.tool.sample          = process_sample_event;
1725 	c.tool.mmap            = perf_event__process_mmap;
1726 	c.tool.mmap2           = perf_event__process_mmap2;
1727 	c.tool.comm            = perf_event__process_comm;
1728 	c.tool.exit            = perf_event__process_exit;
1729 	c.tool.fork            = perf_event__process_fork;
1730 	c.tool.lost            = perf_event__process_lost;
1731 	c.tool.tracing_data    = process_tracing_data;
1732 	c.tool.build_id        = perf_event__process_build_id;
1733 	c.tool.namespaces      = perf_event__process_namespaces;
1734 	c.tool.finished_round  = perf_event__process_finished_round;
1735 	c.tool.attr            = perf_event__process_attr;
1736 	c.tool.feature         = process_feature_event;
1737 	c.tool.ordering_requires_timestamps = true;
1738 
1739 	if (opts->all) {
1740 		c.tool.comm = process_comm_event;
1741 		c.tool.exit = process_exit_event;
1742 		c.tool.fork = process_fork_event;
1743 		c.tool.mmap = process_mmap_event;
1744 		c.tool.mmap2 = process_mmap2_event;
1745 	}
1746 
1747 	err = perf_config(convert__config, &c);
1748 	if (err)
1749 		return err;
1750 
1751 	err = -1;
1752 	/* perf.data session */
1753 	session = perf_session__new(&data, &c.tool);
1754 	if (IS_ERR(session))
1755 		return PTR_ERR(session);
1756 
1757 	if (opts->time_str) {
1758 		err = perf_time__parse_for_ranges(opts->time_str, session,
1759 						  &c.ptime_range,
1760 						  &c.range_size,
1761 						  &c.range_num);
1762 		if (err < 0)
1763 			goto free_session;
1764 	}
1765 
1766 	/* CTF writer */
1767 	if (ctf_writer__init(cw, path, session, opts->tod))
1768 		goto free_session;
1769 
1770 	if (c.queue_size) {
1771 		ordered_events__set_alloc_size(&session->ordered_events,
1772 					       c.queue_size);
1773 	}
1774 
1775 	/* CTF writer env/clock setup  */
1776 	if (ctf_writer__setup_env(cw, session))
1777 		goto free_writer;
1778 
1779 	/*
1780 	 * CTF events setup. Note, in pipe mode no events exist yet (they come
1781 	 * in via header feature events) and so this does nothing.
1782 	 */
1783 	if (setup_events(cw, session, SETUP_EVENTS_ALL))
1784 		goto free_writer;
1785 
1786 	if (opts->all && setup_non_sample_events(cw, session))
1787 		goto free_writer;
1788 
1789 	if (setup_streams(cw, session))
1790 		goto free_writer;
1791 
1792 	err = perf_session__process_events(session);
1793 	if (!err)
1794 		err = ctf_writer__flush_streams(cw);
1795 	else
1796 		pr_err("Error during conversion.\n");
1797 
1798 	fprintf(stderr,	"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1799 		data.path, path);
1800 
1801 	fprintf(stderr,	"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1802 		(double) c.events_size / 1024.0 / 1024.0,
1803 		c.events_count);
1804 
1805 	if (!c.non_sample_count)
1806 		fprintf(stderr, ") ]\n");
1807 	else
1808 		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1809 
1810 	if (c.skipped) {
1811 		fprintf(stderr,	"[ perf data convert: Skipped %" PRIu64 " samples ]\n",
1812 			c.skipped);
1813 	}
1814 
1815 	if (c.ptime_range)
1816 		zfree(&c.ptime_range);
1817 
1818 	cleanup_events(session);
1819 	perf_session__delete(session);
1820 	ctf_writer__cleanup(cw);
1821 
1822 	return err;
1823 
1824 free_writer:
1825 	ctf_writer__cleanup(cw);
1826 free_session:
1827 	if (c.ptime_range)
1828 		zfree(&c.ptime_range);
1829 
1830 	perf_session__delete(session);
1831 	pr_err("Error during conversion setup.\n");
1832 	return err;
1833 }
1834