xref: /linux/tools/perf/util/data-convert-bt.c (revision 1672f3707a6ef4b386c30bb76df2f62e58a39430)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CTF writing support via babeltrace.
4  *
5  * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
6  * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7  */
8 
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/zalloc.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include "asm/bug.h"
23 #include "data-convert.h"
24 #include "session.h"
25 #include "debug.h"
26 #include "tool.h"
27 #include "evlist.h"
28 #include "evsel.h"
29 #include "machine.h"
30 #include "config.h"
31 #include <linux/ctype.h>
32 #include <linux/err.h>
33 #include <linux/time64.h>
34 #include "util.h"
35 #include "clockid.h"
36 #include "util/sample.h"
37 #include "util/time-utils.h"
38 
39 #ifdef HAVE_LIBTRACEEVENT
40 #include <event-parse.h>
41 #endif
42 
43 #define pr_N(n, fmt, ...) \
44 	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
45 
46 #define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
47 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
48 
49 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
50 
51 struct evsel_priv {
52 	struct bt_ctf_event_class *event_class;
53 };
54 
55 #define MAX_CPUS	4096
56 
57 struct ctf_stream {
58 	struct bt_ctf_stream *stream;
59 	int cpu;
60 	u32 count;
61 };
62 
63 struct ctf_writer {
64 	/* writer primitives */
65 	struct bt_ctf_writer		 *writer;
66 	struct ctf_stream		**stream;
67 	int				  stream_cnt;
68 	struct bt_ctf_stream_class	 *stream_class;
69 	struct bt_ctf_clock		 *clock;
70 
71 	/* data types */
72 	union {
73 		struct {
74 			struct bt_ctf_field_type	*s64;
75 			struct bt_ctf_field_type	*u64;
76 			struct bt_ctf_field_type	*s32;
77 			struct bt_ctf_field_type	*u32;
78 			struct bt_ctf_field_type	*string;
79 			struct bt_ctf_field_type	*u32_hex;
80 			struct bt_ctf_field_type	*u64_hex;
81 		};
82 		struct bt_ctf_field_type *array[6];
83 	} data;
84 	struct bt_ctf_event_class	*comm_class;
85 	struct bt_ctf_event_class	*exit_class;
86 	struct bt_ctf_event_class	*fork_class;
87 	struct bt_ctf_event_class	*mmap_class;
88 	struct bt_ctf_event_class	*mmap2_class;
89 };
90 
91 struct convert {
92 	struct perf_tool	tool;
93 	struct ctf_writer	writer;
94 
95 	struct perf_time_interval *ptime_range;
96 	int range_size;
97 	int range_num;
98 
99 	u64			events_size;
100 	u64			events_count;
101 	u64			non_sample_count;
102 	u64			skipped;
103 
104 	/* Ordered events configured queue size. */
105 	u64			queue_size;
106 };
107 
108 static int value_set(struct bt_ctf_field_type *type,
109 		     struct bt_ctf_event *event,
110 		     const char *name, u64 val)
111 {
112 	struct bt_ctf_field *field;
113 	bool sign = bt_ctf_field_type_integer_get_signed(type);
114 	int ret;
115 
116 	field = bt_ctf_field_create(type);
117 	if (!field) {
118 		pr_err("failed to create a field %s\n", name);
119 		return -1;
120 	}
121 
122 	if (sign) {
123 		ret = bt_ctf_field_signed_integer_set_value(field, val);
124 		if (ret) {
125 			pr_err("failed to set field value %s\n", name);
126 			goto err;
127 		}
128 	} else {
129 		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
130 		if (ret) {
131 			pr_err("failed to set field value %s\n", name);
132 			goto err;
133 		}
134 	}
135 
136 	ret = bt_ctf_event_set_payload(event, name, field);
137 	if (ret) {
138 		pr_err("failed to set payload %s\n", name);
139 		goto err;
140 	}
141 
142 	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
143 
144 err:
145 	bt_ctf_field_put(field);
146 	return ret;
147 }
148 
149 #define __FUNC_VALUE_SET(_name, _val_type)				\
150 static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
151 			     struct bt_ctf_event *event,		\
152 			     const char *name,				\
153 			     _val_type val)				\
154 {									\
155 	struct bt_ctf_field_type *type = cw->data._name;		\
156 	return value_set(type, event, name, (u64) val);			\
157 }
158 
159 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
160 
161 FUNC_VALUE_SET(s32)
162 FUNC_VALUE_SET(u32)
163 FUNC_VALUE_SET(s64)
164 FUNC_VALUE_SET(u64)
165 __FUNC_VALUE_SET(u64_hex, u64)
166 
167 static int string_set_value(struct bt_ctf_field *field, const char *string);
168 static __maybe_unused int
169 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
170 		 const char *name, const char *string)
171 {
172 	struct bt_ctf_field_type *type = cw->data.string;
173 	struct bt_ctf_field *field;
174 	int ret = 0;
175 
176 	field = bt_ctf_field_create(type);
177 	if (!field) {
178 		pr_err("failed to create a field %s\n", name);
179 		return -1;
180 	}
181 
182 	ret = string_set_value(field, string);
183 	if (ret) {
184 		pr_err("failed to set value %s\n", name);
185 		goto err_put_field;
186 	}
187 
188 	ret = bt_ctf_event_set_payload(event, name, field);
189 	if (ret)
190 		pr_err("failed to set payload %s\n", name);
191 
192 err_put_field:
193 	bt_ctf_field_put(field);
194 	return ret;
195 }
196 
197 static struct bt_ctf_field_type*
198 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
199 {
200 	unsigned long flags = field->flags;
201 
202 	if (flags & TEP_FIELD_IS_STRING)
203 		return cw->data.string;
204 
205 	if (!(flags & TEP_FIELD_IS_SIGNED)) {
206 		/* unsigned long are mostly pointers */
207 		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
208 			return cw->data.u64_hex;
209 	}
210 
211 	if (flags & TEP_FIELD_IS_SIGNED) {
212 		if (field->size == 8)
213 			return cw->data.s64;
214 		else
215 			return cw->data.s32;
216 	}
217 
218 	if (field->size == 8)
219 		return cw->data.u64;
220 	else
221 		return cw->data.u32;
222 }
223 
224 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
225 {
226 	unsigned long long value_mask;
227 
228 	/*
229 	 * value_mask = (1 << (size * 8 - 1)) - 1.
230 	 * Directly set value_mask for code readers.
231 	 */
232 	switch (size) {
233 	case 1:
234 		value_mask = 0x7fULL;
235 		break;
236 	case 2:
237 		value_mask = 0x7fffULL;
238 		break;
239 	case 4:
240 		value_mask = 0x7fffffffULL;
241 		break;
242 	case 8:
243 		/*
244 		 * For 64 bit value, return it self. There is no need
245 		 * to fill high bit.
246 		 */
247 		/* Fall through */
248 	default:
249 		/* BUG! */
250 		return value_int;
251 	}
252 
253 	/* If it is a positive value, don't adjust. */
254 	if ((value_int & (~0ULL - value_mask)) == 0)
255 		return value_int;
256 
257 	/* Fill upper part of value_int with 1 to make it a negative long long. */
258 	return (value_int & value_mask) | ~value_mask;
259 }
260 
261 static int string_set_value(struct bt_ctf_field *field, const char *string)
262 {
263 	char *buffer = NULL;
264 	size_t len = strlen(string), i, p;
265 	int err;
266 
267 	for (i = p = 0; i < len; i++, p++) {
268 		if (isprint(string[i])) {
269 			if (!buffer)
270 				continue;
271 			buffer[p] = string[i];
272 		} else {
273 			char numstr[5];
274 
275 			snprintf(numstr, sizeof(numstr), "\\x%02x",
276 				 (unsigned int)(string[i]) & 0xff);
277 
278 			if (!buffer) {
279 				buffer = zalloc(i + (len - i) * 4 + 2);
280 				if (!buffer) {
281 					pr_err("failed to set unprintable string '%s'\n", string);
282 					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
283 				}
284 				if (i > 0)
285 					strncpy(buffer, string, i);
286 			}
287 			memcpy(buffer + p, numstr, 4);
288 			p += 3;
289 		}
290 	}
291 
292 	if (!buffer)
293 		return bt_ctf_field_string_set_value(field, string);
294 	err = bt_ctf_field_string_set_value(field, buffer);
295 	free(buffer);
296 	return err;
297 }
298 
299 static int add_tracepoint_field_value(struct ctf_writer *cw,
300 				      struct bt_ctf_event_class *event_class,
301 				      struct bt_ctf_event *event,
302 				      struct perf_sample *sample,
303 				      struct tep_format_field *fmtf)
304 {
305 	struct bt_ctf_field_type *type;
306 	struct bt_ctf_field *array_field;
307 	struct bt_ctf_field *field;
308 	const char *name = fmtf->name;
309 	void *data = sample->raw_data;
310 	unsigned long flags = fmtf->flags;
311 	unsigned int n_items;
312 	unsigned int i;
313 	unsigned int offset;
314 	unsigned int len;
315 	int ret;
316 
317 	name = fmtf->alias;
318 	offset = fmtf->offset;
319 	len = fmtf->size;
320 	if (flags & TEP_FIELD_IS_STRING)
321 		flags &= ~TEP_FIELD_IS_ARRAY;
322 
323 	if (flags & TEP_FIELD_IS_DYNAMIC) {
324 		unsigned long long tmp_val;
325 
326 		tmp_val = tep_read_number(fmtf->event->tep,
327 					  data + offset, len);
328 		offset = tmp_val;
329 		len = offset >> 16;
330 		offset &= 0xffff;
331 		if (tep_field_is_relative(flags))
332 			offset += fmtf->offset + fmtf->size;
333 	}
334 
335 	if (flags & TEP_FIELD_IS_ARRAY) {
336 
337 		type = bt_ctf_event_class_get_field_by_name(
338 				event_class, name);
339 		array_field = bt_ctf_field_create(type);
340 		bt_ctf_field_type_put(type);
341 		if (!array_field) {
342 			pr_err("Failed to create array type %s\n", name);
343 			return -1;
344 		}
345 
346 		len = fmtf->size / fmtf->arraylen;
347 		n_items = fmtf->arraylen;
348 	} else {
349 		n_items = 1;
350 		array_field = NULL;
351 	}
352 
353 	type = get_tracepoint_field_type(cw, fmtf);
354 
355 	for (i = 0; i < n_items; i++) {
356 		if (flags & TEP_FIELD_IS_ARRAY)
357 			field = bt_ctf_field_array_get_field(array_field, i);
358 		else
359 			field = bt_ctf_field_create(type);
360 
361 		if (!field) {
362 			pr_err("failed to create a field %s\n", name);
363 			return -1;
364 		}
365 
366 		if (flags & TEP_FIELD_IS_STRING)
367 			ret = string_set_value(field, data + offset + i * len);
368 		else {
369 			unsigned long long value_int;
370 
371 			value_int = tep_read_number(
372 					fmtf->event->tep,
373 					data + offset + i * len, len);
374 
375 			if (!(flags & TEP_FIELD_IS_SIGNED))
376 				ret = bt_ctf_field_unsigned_integer_set_value(
377 						field, value_int);
378 			else
379 				ret = bt_ctf_field_signed_integer_set_value(
380 						field, adjust_signedness(value_int, len));
381 		}
382 
383 		if (ret) {
384 			pr_err("failed to set file value %s\n", name);
385 			goto err_put_field;
386 		}
387 		if (!(flags & TEP_FIELD_IS_ARRAY)) {
388 			ret = bt_ctf_event_set_payload(event, name, field);
389 			if (ret) {
390 				pr_err("failed to set payload %s\n", name);
391 				goto err_put_field;
392 			}
393 		}
394 		bt_ctf_field_put(field);
395 	}
396 	if (flags & TEP_FIELD_IS_ARRAY) {
397 		ret = bt_ctf_event_set_payload(event, name, array_field);
398 		if (ret) {
399 			pr_err("Failed add payload array %s\n", name);
400 			return -1;
401 		}
402 		bt_ctf_field_put(array_field);
403 	}
404 	return 0;
405 
406 err_put_field:
407 	bt_ctf_field_put(field);
408 	return -1;
409 }
410 
411 static int add_tracepoint_fields_values(struct ctf_writer *cw,
412 					struct bt_ctf_event_class *event_class,
413 					struct bt_ctf_event *event,
414 					struct tep_format_field *fields,
415 					struct perf_sample *sample)
416 {
417 	struct tep_format_field *field;
418 	int ret;
419 
420 	for (field = fields; field; field = field->next) {
421 		ret = add_tracepoint_field_value(cw, event_class, event, sample,
422 				field);
423 		if (ret)
424 			return -1;
425 	}
426 	return 0;
427 }
428 
429 static int add_tracepoint_values(struct ctf_writer *cw,
430 				 struct bt_ctf_event_class *event_class,
431 				 struct bt_ctf_event *event,
432 				 struct evsel *evsel,
433 				 struct perf_sample *sample)
434 {
435 	const struct tep_event *tp_format = evsel__tp_format(evsel);
436 	struct tep_format_field *common_fields = tp_format->format.common_fields;
437 	struct tep_format_field *fields        = tp_format->format.fields;
438 	int ret;
439 
440 	ret = add_tracepoint_fields_values(cw, event_class, event,
441 					   common_fields, sample);
442 	if (!ret)
443 		ret = add_tracepoint_fields_values(cw, event_class, event,
444 						   fields, sample);
445 
446 	return ret;
447 }
448 
449 static int
450 add_bpf_output_values(struct bt_ctf_event_class *event_class,
451 		      struct bt_ctf_event *event,
452 		      struct perf_sample *sample)
453 {
454 	struct bt_ctf_field_type *len_type, *seq_type;
455 	struct bt_ctf_field *len_field, *seq_field;
456 	unsigned int raw_size = sample->raw_size;
457 	unsigned int nr_elements = raw_size / sizeof(u32);
458 	unsigned int i;
459 	int ret;
460 
461 	if (nr_elements * sizeof(u32) != raw_size)
462 		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
463 			   raw_size, nr_elements * sizeof(u32) - raw_size);
464 
465 	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
466 	len_field = bt_ctf_field_create(len_type);
467 	if (!len_field) {
468 		pr_err("failed to create 'raw_len' for bpf output event\n");
469 		ret = -1;
470 		goto put_len_type;
471 	}
472 
473 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
474 	if (ret) {
475 		pr_err("failed to set field value for raw_len\n");
476 		goto put_len_field;
477 	}
478 	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
479 	if (ret) {
480 		pr_err("failed to set payload to raw_len\n");
481 		goto put_len_field;
482 	}
483 
484 	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
485 	seq_field = bt_ctf_field_create(seq_type);
486 	if (!seq_field) {
487 		pr_err("failed to create 'raw_data' for bpf output event\n");
488 		ret = -1;
489 		goto put_seq_type;
490 	}
491 
492 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
493 	if (ret) {
494 		pr_err("failed to set length of 'raw_data'\n");
495 		goto put_seq_field;
496 	}
497 
498 	for (i = 0; i < nr_elements; i++) {
499 		struct bt_ctf_field *elem_field =
500 			bt_ctf_field_sequence_get_field(seq_field, i);
501 
502 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
503 				((u32 *)(sample->raw_data))[i]);
504 
505 		bt_ctf_field_put(elem_field);
506 		if (ret) {
507 			pr_err("failed to set raw_data[%d]\n", i);
508 			goto put_seq_field;
509 		}
510 	}
511 
512 	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
513 	if (ret)
514 		pr_err("failed to set payload for raw_data\n");
515 
516 put_seq_field:
517 	bt_ctf_field_put(seq_field);
518 put_seq_type:
519 	bt_ctf_field_type_put(seq_type);
520 put_len_field:
521 	bt_ctf_field_put(len_field);
522 put_len_type:
523 	bt_ctf_field_type_put(len_type);
524 	return ret;
525 }
526 
527 static int
528 add_callchain_output_values(struct bt_ctf_event_class *event_class,
529 		      struct bt_ctf_event *event,
530 		      struct ip_callchain *callchain)
531 {
532 	struct bt_ctf_field_type *len_type, *seq_type;
533 	struct bt_ctf_field *len_field, *seq_field;
534 	unsigned int nr_elements = callchain->nr;
535 	unsigned int i;
536 	int ret;
537 
538 	len_type = bt_ctf_event_class_get_field_by_name(
539 			event_class, "perf_callchain_size");
540 	len_field = bt_ctf_field_create(len_type);
541 	if (!len_field) {
542 		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
543 		ret = -1;
544 		goto put_len_type;
545 	}
546 
547 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
548 	if (ret) {
549 		pr_err("failed to set field value for perf_callchain_size\n");
550 		goto put_len_field;
551 	}
552 	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
553 	if (ret) {
554 		pr_err("failed to set payload to perf_callchain_size\n");
555 		goto put_len_field;
556 	}
557 
558 	seq_type = bt_ctf_event_class_get_field_by_name(
559 			event_class, "perf_callchain");
560 	seq_field = bt_ctf_field_create(seq_type);
561 	if (!seq_field) {
562 		pr_err("failed to create 'perf_callchain' for callchain output event\n");
563 		ret = -1;
564 		goto put_seq_type;
565 	}
566 
567 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
568 	if (ret) {
569 		pr_err("failed to set length of 'perf_callchain'\n");
570 		goto put_seq_field;
571 	}
572 
573 	for (i = 0; i < nr_elements; i++) {
574 		struct bt_ctf_field *elem_field =
575 			bt_ctf_field_sequence_get_field(seq_field, i);
576 
577 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
578 				((u64 *)(callchain->ips))[i]);
579 
580 		bt_ctf_field_put(elem_field);
581 		if (ret) {
582 			pr_err("failed to set callchain[%d]\n", i);
583 			goto put_seq_field;
584 		}
585 	}
586 
587 	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
588 	if (ret)
589 		pr_err("failed to set payload for raw_data\n");
590 
591 put_seq_field:
592 	bt_ctf_field_put(seq_field);
593 put_seq_type:
594 	bt_ctf_field_type_put(seq_type);
595 put_len_field:
596 	bt_ctf_field_put(len_field);
597 put_len_type:
598 	bt_ctf_field_type_put(len_type);
599 	return ret;
600 }
601 
602 static int add_generic_values(struct ctf_writer *cw,
603 			      struct bt_ctf_event *event,
604 			      struct evsel *evsel,
605 			      struct perf_sample *sample)
606 {
607 	u64 type = evsel->core.attr.sample_type;
608 	int ret;
609 
610 	/*
611 	 * missing:
612 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
613 	 *                              ctf event header
614 	 *   PERF_SAMPLE_READ         - TODO
615 	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
616 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
617 	 *   PERF_SAMPLE_REGS_USER    - TODO
618 	 *   PERF_SAMPLE_STACK_USER   - TODO
619 	 */
620 
621 	if (type & PERF_SAMPLE_IP) {
622 		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
623 		if (ret)
624 			return -1;
625 	}
626 
627 	if (type & PERF_SAMPLE_TID) {
628 		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
629 		if (ret)
630 			return -1;
631 
632 		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
633 		if (ret)
634 			return -1;
635 	}
636 
637 	if ((type & PERF_SAMPLE_ID) ||
638 	    (type & PERF_SAMPLE_IDENTIFIER)) {
639 		ret = value_set_u64(cw, event, "perf_id", sample->id);
640 		if (ret)
641 			return -1;
642 	}
643 
644 	if (type & PERF_SAMPLE_STREAM_ID) {
645 		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
646 		if (ret)
647 			return -1;
648 	}
649 
650 	if (type & PERF_SAMPLE_PERIOD) {
651 		ret = value_set_u64(cw, event, "perf_period", sample->period);
652 		if (ret)
653 			return -1;
654 	}
655 
656 	if (type & PERF_SAMPLE_WEIGHT) {
657 		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
658 		if (ret)
659 			return -1;
660 	}
661 
662 	if (type & PERF_SAMPLE_DATA_SRC) {
663 		ret = value_set_u64(cw, event, "perf_data_src",
664 				sample->data_src);
665 		if (ret)
666 			return -1;
667 	}
668 
669 	if (type & PERF_SAMPLE_TRANSACTION) {
670 		ret = value_set_u64(cw, event, "perf_transaction",
671 				sample->transaction);
672 		if (ret)
673 			return -1;
674 	}
675 
676 	return 0;
677 }
678 
679 static int ctf_stream__flush(struct ctf_stream *cs)
680 {
681 	int err = 0;
682 
683 	if (cs) {
684 		err = bt_ctf_stream_flush(cs->stream);
685 		if (err)
686 			pr_err("CTF stream %d flush failed\n", cs->cpu);
687 
688 		pr("Flush stream for cpu %d (%u samples)\n",
689 		   cs->cpu, cs->count);
690 
691 		cs->count = 0;
692 	}
693 
694 	return err;
695 }
696 
697 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
698 {
699 	struct ctf_stream *cs;
700 	struct bt_ctf_field *pkt_ctx   = NULL;
701 	struct bt_ctf_field *cpu_field = NULL;
702 	struct bt_ctf_stream *stream   = NULL;
703 	int ret;
704 
705 	cs = zalloc(sizeof(*cs));
706 	if (!cs) {
707 		pr_err("Failed to allocate ctf stream\n");
708 		return NULL;
709 	}
710 
711 	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
712 	if (!stream) {
713 		pr_err("Failed to create CTF stream\n");
714 		goto out;
715 	}
716 
717 	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
718 	if (!pkt_ctx) {
719 		pr_err("Failed to obtain packet context\n");
720 		goto out;
721 	}
722 
723 	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
724 	bt_ctf_field_put(pkt_ctx);
725 	if (!cpu_field) {
726 		pr_err("Failed to obtain cpu field\n");
727 		goto out;
728 	}
729 
730 	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
731 	if (ret) {
732 		pr_err("Failed to update CPU number\n");
733 		goto out;
734 	}
735 
736 	bt_ctf_field_put(cpu_field);
737 
738 	cs->cpu    = cpu;
739 	cs->stream = stream;
740 	return cs;
741 
742 out:
743 	if (cpu_field)
744 		bt_ctf_field_put(cpu_field);
745 	if (stream)
746 		bt_ctf_stream_put(stream);
747 
748 	free(cs);
749 	return NULL;
750 }
751 
752 static void ctf_stream__delete(struct ctf_stream *cs)
753 {
754 	if (cs) {
755 		bt_ctf_stream_put(cs->stream);
756 		free(cs);
757 	}
758 }
759 
760 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
761 {
762 	struct ctf_stream *cs = cw->stream[cpu];
763 
764 	if (!cs) {
765 		cs = ctf_stream__create(cw, cpu);
766 		cw->stream[cpu] = cs;
767 	}
768 
769 	return cs;
770 }
771 
772 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
773 			  struct evsel *evsel)
774 {
775 	int cpu = 0;
776 
777 	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
778 		cpu = sample->cpu;
779 
780 	if (cpu > cw->stream_cnt) {
781 		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
782 			cpu, cw->stream_cnt);
783 		cpu = 0;
784 	}
785 
786 	return cpu;
787 }
788 
789 #define STREAM_FLUSH_COUNT 100000
790 
791 /*
792  * Currently we have no other way to determine the
793  * time for the stream flush other than keep track
794  * of the number of events and check it against
795  * threshold.
796  */
797 static bool is_flush_needed(struct ctf_stream *cs)
798 {
799 	return cs->count >= STREAM_FLUSH_COUNT;
800 }
801 
802 static int process_sample_event(const struct perf_tool *tool,
803 				union perf_event *_event,
804 				struct perf_sample *sample,
805 				struct evsel *evsel,
806 				struct machine *machine __maybe_unused)
807 {
808 	struct convert *c = container_of(tool, struct convert, tool);
809 	struct evsel_priv *priv = evsel->priv;
810 	struct ctf_writer *cw = &c->writer;
811 	struct ctf_stream *cs;
812 	struct bt_ctf_event_class *event_class;
813 	struct bt_ctf_event *event;
814 	int ret;
815 	unsigned long type = evsel->core.attr.sample_type;
816 
817 	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
818 		return 0;
819 
820 	if (perf_time__ranges_skip_sample(c->ptime_range, c->range_num, sample->time)) {
821 		++c->skipped;
822 		return 0;
823 	}
824 
825 	event_class = priv->event_class;
826 
827 	/* update stats */
828 	c->events_count++;
829 	c->events_size += _event->header.size;
830 
831 	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
832 
833 	event = bt_ctf_event_create(event_class);
834 	if (!event) {
835 		pr_err("Failed to create an CTF event\n");
836 		return -1;
837 	}
838 
839 	bt_ctf_clock_set_time(cw->clock, sample->time);
840 
841 	ret = add_generic_values(cw, event, evsel, sample);
842 	if (ret)
843 		return -1;
844 
845 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
846 		ret = add_tracepoint_values(cw, event_class, event,
847 					    evsel, sample);
848 		if (ret)
849 			return -1;
850 	}
851 
852 	if (type & PERF_SAMPLE_CALLCHAIN) {
853 		ret = add_callchain_output_values(event_class,
854 				event, sample->callchain);
855 		if (ret)
856 			return -1;
857 	}
858 
859 	if (evsel__is_bpf_output(evsel)) {
860 		ret = add_bpf_output_values(event_class, event, sample);
861 		if (ret)
862 			return -1;
863 	}
864 
865 	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
866 	if (cs) {
867 		if (is_flush_needed(cs))
868 			ctf_stream__flush(cs);
869 
870 		cs->count++;
871 		bt_ctf_stream_append_event(cs->stream, event);
872 	}
873 
874 	bt_ctf_event_put(event);
875 	return cs ? 0 : -1;
876 }
877 
878 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
879 do {							\
880 	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
881 	if (ret)					\
882 		return -1;				\
883 } while(0)
884 
885 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
886 static int process_##_name##_event(const struct perf_tool *tool,	\
887 				   union perf_event *_event,	\
888 				   struct perf_sample *sample,	\
889 				   struct machine *machine)	\
890 {								\
891 	struct convert *c = container_of(tool, struct convert, tool);\
892 	struct ctf_writer *cw = &c->writer;			\
893 	struct bt_ctf_event_class *event_class = cw->_name##_class;\
894 	struct bt_ctf_event *event;				\
895 	struct ctf_stream *cs;					\
896 	int ret;						\
897 								\
898 	c->non_sample_count++;					\
899 	c->events_size += _event->header.size;			\
900 	event = bt_ctf_event_create(event_class);		\
901 	if (!event) {						\
902 		pr_err("Failed to create an CTF event\n");	\
903 		return -1;					\
904 	}							\
905 								\
906 	bt_ctf_clock_set_time(cw->clock, sample->time);		\
907 	body							\
908 	cs = ctf_stream(cw, 0);					\
909 	if (cs) {						\
910 		if (is_flush_needed(cs))			\
911 			ctf_stream__flush(cs);			\
912 								\
913 		cs->count++;					\
914 		bt_ctf_stream_append_event(cs->stream, event);	\
915 	}							\
916 	bt_ctf_event_put(event);				\
917 								\
918 	return perf_event__process_##_name(tool, _event, sample, machine);\
919 }
920 
921 __FUNC_PROCESS_NON_SAMPLE(comm,
922 	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
923 	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
924 	__NON_SAMPLE_SET_FIELD(comm, string, comm);
925 )
926 __FUNC_PROCESS_NON_SAMPLE(fork,
927 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
928 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
929 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
930 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
931 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
932 )
933 
934 __FUNC_PROCESS_NON_SAMPLE(exit,
935 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
936 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
937 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
938 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
939 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
940 )
941 __FUNC_PROCESS_NON_SAMPLE(mmap,
942 	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
943 	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
944 	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
945 	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
946 )
947 __FUNC_PROCESS_NON_SAMPLE(mmap2,
948 	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
949 	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
950 	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
951 	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
952 )
953 #undef __NON_SAMPLE_SET_FIELD
954 #undef __FUNC_PROCESS_NON_SAMPLE
955 
956 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
957 static char *change_name(char *name, char *orig_name, int dup)
958 {
959 	char *new_name = NULL;
960 	size_t len;
961 
962 	if (!name)
963 		name = orig_name;
964 
965 	if (dup >= 10)
966 		goto out;
967 	/*
968 	 * Add '_' prefix to potential keywork.  According to
969 	 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
970 	 * further CTF spec updating may require us to use '$'.
971 	 */
972 	if (dup < 0)
973 		len = strlen(name) + sizeof("_");
974 	else
975 		len = strlen(orig_name) + sizeof("_dupl_X");
976 
977 	new_name = malloc(len);
978 	if (!new_name)
979 		goto out;
980 
981 	if (dup < 0)
982 		snprintf(new_name, len, "_%s", name);
983 	else
984 		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
985 
986 out:
987 	if (name != orig_name)
988 		free(name);
989 	return new_name;
990 }
991 
992 static int event_class_add_field(struct bt_ctf_event_class *event_class,
993 		struct bt_ctf_field_type *type,
994 		struct tep_format_field *field)
995 {
996 	struct bt_ctf_field_type *t = NULL;
997 	char *name;
998 	int dup = 1;
999 	int ret;
1000 
1001 	/* alias was already assigned */
1002 	if (field->alias != field->name)
1003 		return bt_ctf_event_class_add_field(event_class, type,
1004 				(char *)field->alias);
1005 
1006 	name = field->name;
1007 
1008 	/* If 'name' is a keywork, add prefix. */
1009 	if (bt_ctf_validate_identifier(name))
1010 		name = change_name(name, field->name, -1);
1011 
1012 	if (!name) {
1013 		pr_err("Failed to fix invalid identifier.");
1014 		return -1;
1015 	}
1016 	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1017 		bt_ctf_field_type_put(t);
1018 		name = change_name(name, field->name, dup++);
1019 		if (!name) {
1020 			pr_err("Failed to create dup name for '%s'\n", field->name);
1021 			return -1;
1022 		}
1023 	}
1024 
1025 	ret = bt_ctf_event_class_add_field(event_class, type, name);
1026 	if (!ret)
1027 		field->alias = name;
1028 
1029 	return ret;
1030 }
1031 
1032 static int add_tracepoint_fields_types(struct ctf_writer *cw,
1033 				       struct tep_format_field *fields,
1034 				       struct bt_ctf_event_class *event_class)
1035 {
1036 	struct tep_format_field *field;
1037 	int ret;
1038 
1039 	for (field = fields; field; field = field->next) {
1040 		struct bt_ctf_field_type *type;
1041 		unsigned long flags = field->flags;
1042 
1043 		pr2("  field '%s'\n", field->name);
1044 
1045 		type = get_tracepoint_field_type(cw, field);
1046 		if (!type)
1047 			return -1;
1048 
1049 		/*
1050 		 * A string is an array of chars. For this we use the string
1051 		 * type and don't care that it is an array. What we don't
1052 		 * support is an array of strings.
1053 		 */
1054 		if (flags & TEP_FIELD_IS_STRING)
1055 			flags &= ~TEP_FIELD_IS_ARRAY;
1056 
1057 		if (flags & TEP_FIELD_IS_ARRAY)
1058 			type = bt_ctf_field_type_array_create(type, field->arraylen);
1059 
1060 		ret = event_class_add_field(event_class, type, field);
1061 
1062 		if (flags & TEP_FIELD_IS_ARRAY)
1063 			bt_ctf_field_type_put(type);
1064 
1065 		if (ret) {
1066 			pr_err("Failed to add field '%s': %d\n",
1067 					field->name, ret);
1068 			return -1;
1069 		}
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 static int add_tracepoint_types(struct ctf_writer *cw,
1076 				struct evsel *evsel,
1077 				struct bt_ctf_event_class *class)
1078 {
1079 	const struct tep_event *tp_format = evsel__tp_format(evsel);
1080 	struct tep_format_field *common_fields = tp_format ? tp_format->format.common_fields : NULL;
1081 	struct tep_format_field *fields        = tp_format ? tp_format->format.fields : NULL;
1082 	int ret;
1083 
1084 	ret = add_tracepoint_fields_types(cw, common_fields, class);
1085 	if (!ret)
1086 		ret = add_tracepoint_fields_types(cw, fields, class);
1087 
1088 	return ret;
1089 }
1090 
1091 static int add_bpf_output_types(struct ctf_writer *cw,
1092 				struct bt_ctf_event_class *class)
1093 {
1094 	struct bt_ctf_field_type *len_type = cw->data.u32;
1095 	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1096 	struct bt_ctf_field_type *seq_type;
1097 	int ret;
1098 
1099 	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1100 	if (ret)
1101 		return ret;
1102 
1103 	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1104 	if (!seq_type)
1105 		return -1;
1106 
1107 	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1108 }
1109 
1110 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1111 			     struct bt_ctf_event_class *event_class)
1112 {
1113 	u64 type = evsel->core.attr.sample_type;
1114 
1115 	/*
1116 	 * missing:
1117 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1118 	 *                              ctf event header
1119 	 *   PERF_SAMPLE_READ         - TODO
1120 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1121 	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1122 	 *                              are handled separately
1123 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1124 	 *   PERF_SAMPLE_REGS_USER    - TODO
1125 	 *   PERF_SAMPLE_STACK_USER   - TODO
1126 	 */
1127 
1128 #define ADD_FIELD(cl, t, n)						\
1129 	do {								\
1130 		pr2("  field '%s'\n", n);				\
1131 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1132 			pr_err("Failed to add field '%s';\n", n);	\
1133 			return -1;					\
1134 		}							\
1135 	} while (0)
1136 
1137 	if (type & PERF_SAMPLE_IP)
1138 		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1139 
1140 	if (type & PERF_SAMPLE_TID) {
1141 		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1142 		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1143 	}
1144 
1145 	if ((type & PERF_SAMPLE_ID) ||
1146 	    (type & PERF_SAMPLE_IDENTIFIER))
1147 		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1148 
1149 	if (type & PERF_SAMPLE_STREAM_ID)
1150 		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1151 
1152 	if (type & PERF_SAMPLE_PERIOD)
1153 		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1154 
1155 	if (type & PERF_SAMPLE_WEIGHT)
1156 		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1157 
1158 	if (type & PERF_SAMPLE_DATA_SRC)
1159 		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1160 
1161 	if (type & PERF_SAMPLE_TRANSACTION)
1162 		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1163 
1164 	if (type & PERF_SAMPLE_CALLCHAIN) {
1165 		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1166 		ADD_FIELD(event_class,
1167 			bt_ctf_field_type_sequence_create(
1168 				cw->data.u64_hex, "perf_callchain_size"),
1169 			"perf_callchain");
1170 	}
1171 
1172 #undef ADD_FIELD
1173 	return 0;
1174 }
1175 
1176 static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1177 {
1178 	struct bt_ctf_event_class *event_class;
1179 	struct evsel_priv *priv;
1180 	const char *name = evsel__name(evsel);
1181 	int ret;
1182 
1183 	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1184 
1185 	event_class = bt_ctf_event_class_create(name);
1186 	if (!event_class)
1187 		return -1;
1188 
1189 	ret = add_generic_types(cw, evsel, event_class);
1190 	if (ret)
1191 		goto err;
1192 
1193 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1194 		ret = add_tracepoint_types(cw, evsel, event_class);
1195 		if (ret)
1196 			goto err;
1197 	}
1198 
1199 	if (evsel__is_bpf_output(evsel)) {
1200 		ret = add_bpf_output_types(cw, event_class);
1201 		if (ret)
1202 			goto err;
1203 	}
1204 
1205 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1206 	if (ret) {
1207 		pr("Failed to add event class into stream.\n");
1208 		goto err;
1209 	}
1210 
1211 	priv = malloc(sizeof(*priv));
1212 	if (!priv)
1213 		goto err;
1214 
1215 	priv->event_class = event_class;
1216 	evsel->priv       = priv;
1217 	return 0;
1218 
1219 err:
1220 	bt_ctf_event_class_put(event_class);
1221 	pr_err("Failed to add event '%s'.\n", name);
1222 	return -1;
1223 }
1224 
1225 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1226 {
1227 	struct evlist *evlist = session->evlist;
1228 	struct evsel *evsel;
1229 	int ret;
1230 
1231 	evlist__for_each_entry(evlist, evsel) {
1232 		ret = add_event(cw, evsel);
1233 		if (ret)
1234 			return ret;
1235 	}
1236 	return 0;
1237 }
1238 
1239 #define __NON_SAMPLE_ADD_FIELD(t, n)						\
1240 	do {							\
1241 		pr2("  field '%s'\n", #n);			\
1242 		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1243 			pr_err("Failed to add field '%s';\n", #n);\
1244 			return -1;				\
1245 		}						\
1246 	} while(0)
1247 
1248 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1249 static int add_##_name##_event(struct ctf_writer *cw)		\
1250 {								\
1251 	struct bt_ctf_event_class *event_class;			\
1252 	int ret;						\
1253 								\
1254 	pr("Adding "#_name" event\n");				\
1255 	event_class = bt_ctf_event_class_create("perf_" #_name);\
1256 	if (!event_class)					\
1257 		return -1;					\
1258 	body							\
1259 								\
1260 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1261 	if (ret) {						\
1262 		pr("Failed to add event class '"#_name"' into stream.\n");\
1263 		return ret;					\
1264 	}							\
1265 								\
1266 	cw->_name##_class = event_class;			\
1267 	bt_ctf_event_class_put(event_class);			\
1268 	return 0;						\
1269 }
1270 
1271 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1272 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1273 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1274 	__NON_SAMPLE_ADD_FIELD(string, comm);
1275 )
1276 
1277 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1278 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1279 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1280 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1281 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1282 	__NON_SAMPLE_ADD_FIELD(u64, time);
1283 )
1284 
1285 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1286 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1287 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1288 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1289 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1290 	__NON_SAMPLE_ADD_FIELD(u64, time);
1291 )
1292 
1293 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1294 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1295 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1296 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1297 	__NON_SAMPLE_ADD_FIELD(string, filename);
1298 )
1299 
1300 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1301 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1302 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1303 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1304 	__NON_SAMPLE_ADD_FIELD(string, filename);
1305 )
1306 #undef __NON_SAMPLE_ADD_FIELD
1307 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1308 
1309 static int setup_non_sample_events(struct ctf_writer *cw,
1310 				   struct perf_session *session __maybe_unused)
1311 {
1312 	int ret;
1313 
1314 	ret = add_comm_event(cw);
1315 	if (ret)
1316 		return ret;
1317 	ret = add_exit_event(cw);
1318 	if (ret)
1319 		return ret;
1320 	ret = add_fork_event(cw);
1321 	if (ret)
1322 		return ret;
1323 	ret = add_mmap_event(cw);
1324 	if (ret)
1325 		return ret;
1326 	ret = add_mmap2_event(cw);
1327 	if (ret)
1328 		return ret;
1329 	return 0;
1330 }
1331 
1332 static void cleanup_events(struct perf_session *session)
1333 {
1334 	struct evlist *evlist = session->evlist;
1335 	struct evsel *evsel;
1336 
1337 	evlist__for_each_entry(evlist, evsel) {
1338 		struct evsel_priv *priv;
1339 
1340 		priv = evsel->priv;
1341 		bt_ctf_event_class_put(priv->event_class);
1342 		zfree(&evsel->priv);
1343 	}
1344 
1345 	evlist__delete(evlist);
1346 	session->evlist = NULL;
1347 }
1348 
1349 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1350 {
1351 	struct ctf_stream **stream;
1352 	struct perf_env *env = perf_session__env(session);
1353 	int ncpus;
1354 
1355 	/*
1356 	 * Try to get the number of cpus used in the data file,
1357 	 * if not present fallback to the MAX_CPUS.
1358 	 */
1359 	ncpus = env->nr_cpus_avail ?: MAX_CPUS;
1360 
1361 	stream = zalloc(sizeof(*stream) * ncpus);
1362 	if (!stream) {
1363 		pr_err("Failed to allocate streams.\n");
1364 		return -ENOMEM;
1365 	}
1366 
1367 	cw->stream     = stream;
1368 	cw->stream_cnt = ncpus;
1369 	return 0;
1370 }
1371 
1372 static void free_streams(struct ctf_writer *cw)
1373 {
1374 	int cpu;
1375 
1376 	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1377 		ctf_stream__delete(cw->stream[cpu]);
1378 
1379 	zfree(&cw->stream);
1380 }
1381 
1382 static int ctf_writer__setup_env(struct ctf_writer *cw,
1383 				 struct perf_session *session)
1384 {
1385 	struct perf_env *env = perf_session__env(session);
1386 	struct bt_ctf_writer *writer = cw->writer;
1387 
1388 #define ADD(__n, __v)							\
1389 do {									\
1390 	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1391 		return -1;						\
1392 } while (0)
1393 
1394 	ADD("host",    env->hostname);
1395 	ADD("sysname", "Linux");
1396 	ADD("release", env->os_release);
1397 	ADD("version", env->version);
1398 	ADD("machine", env->arch);
1399 	ADD("domain", "kernel");
1400 	ADD("tracer_name", "perf");
1401 
1402 #undef ADD
1403 	return 0;
1404 }
1405 
1406 static int ctf_writer__setup_clock(struct ctf_writer *cw,
1407 				   struct perf_session *session,
1408 				   bool tod)
1409 {
1410 	struct bt_ctf_clock *clock = cw->clock;
1411 	const char *desc = "perf clock";
1412 	int64_t offset = 0;
1413 
1414 	if (tod) {
1415 		struct perf_env *env = perf_session__env(session);
1416 
1417 		if (!env->clock.enabled) {
1418 			pr_err("Can't provide --tod time, missing clock data. "
1419 			       "Please record with -k/--clockid option.\n");
1420 			return -1;
1421 		}
1422 
1423 		desc   = clockid_name(env->clock.clockid);
1424 		offset = env->clock.tod_ns - env->clock.clockid_ns;
1425 	}
1426 
1427 #define SET(__n, __v)				\
1428 do {						\
1429 	if (bt_ctf_clock_set_##__n(clock, __v))	\
1430 		return -1;			\
1431 } while (0)
1432 
1433 	SET(frequency,   1000000000);
1434 	SET(offset,      offset);
1435 	SET(description, desc);
1436 	SET(precision,   10);
1437 	SET(is_absolute, 0);
1438 
1439 #undef SET
1440 	return 0;
1441 }
1442 
1443 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1444 {
1445 	struct bt_ctf_field_type *type;
1446 
1447 	type = bt_ctf_field_type_integer_create(size);
1448 	if (!type)
1449 		return NULL;
1450 
1451 	if (sign &&
1452 	    bt_ctf_field_type_integer_set_signed(type, 1))
1453 		goto err;
1454 
1455 	if (hex &&
1456 	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1457 		goto err;
1458 
1459 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1460 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1461 #else
1462 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1463 #endif
1464 
1465 	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1466 	    size, sign ? "un" : "", hex ? "hex" : "");
1467 	return type;
1468 
1469 err:
1470 	bt_ctf_field_type_put(type);
1471 	return NULL;
1472 }
1473 
1474 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1475 {
1476 	unsigned int i;
1477 
1478 	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1479 		bt_ctf_field_type_put(cw->data.array[i]);
1480 }
1481 
1482 static int ctf_writer__init_data(struct ctf_writer *cw)
1483 {
1484 #define CREATE_INT_TYPE(type, size, sign, hex)		\
1485 do {							\
1486 	(type) = create_int_type(size, sign, hex);	\
1487 	if (!(type))					\
1488 		goto err;				\
1489 } while (0)
1490 
1491 	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1492 	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1493 	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1494 	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1495 	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1496 	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1497 
1498 	cw->data.string  = bt_ctf_field_type_string_create();
1499 	if (cw->data.string)
1500 		return 0;
1501 
1502 err:
1503 	ctf_writer__cleanup_data(cw);
1504 	pr_err("Failed to create data types.\n");
1505 	return -1;
1506 }
1507 
1508 static void ctf_writer__cleanup(struct ctf_writer *cw)
1509 {
1510 	ctf_writer__cleanup_data(cw);
1511 
1512 	bt_ctf_clock_put(cw->clock);
1513 	free_streams(cw);
1514 	bt_ctf_stream_class_put(cw->stream_class);
1515 	bt_ctf_writer_put(cw->writer);
1516 
1517 	/* and NULL all the pointers */
1518 	memset(cw, 0, sizeof(*cw));
1519 }
1520 
1521 static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1522 			    struct perf_session *session, bool tod)
1523 {
1524 	struct bt_ctf_writer		*writer;
1525 	struct bt_ctf_stream_class	*stream_class;
1526 	struct bt_ctf_clock		*clock;
1527 	struct bt_ctf_field_type	*pkt_ctx_type;
1528 	int				ret;
1529 
1530 	/* CTF writer */
1531 	writer = bt_ctf_writer_create(path);
1532 	if (!writer)
1533 		goto err;
1534 
1535 	cw->writer = writer;
1536 
1537 	/* CTF clock */
1538 	clock = bt_ctf_clock_create("perf_clock");
1539 	if (!clock) {
1540 		pr("Failed to create CTF clock.\n");
1541 		goto err_cleanup;
1542 	}
1543 
1544 	cw->clock = clock;
1545 
1546 	if (ctf_writer__setup_clock(cw, session, tod)) {
1547 		pr("Failed to setup CTF clock.\n");
1548 		goto err_cleanup;
1549 	}
1550 
1551 	/* CTF stream class */
1552 	stream_class = bt_ctf_stream_class_create("perf_stream");
1553 	if (!stream_class) {
1554 		pr("Failed to create CTF stream class.\n");
1555 		goto err_cleanup;
1556 	}
1557 
1558 	cw->stream_class = stream_class;
1559 
1560 	/* CTF clock stream setup */
1561 	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1562 		pr("Failed to assign CTF clock to stream class.\n");
1563 		goto err_cleanup;
1564 	}
1565 
1566 	if (ctf_writer__init_data(cw))
1567 		goto err_cleanup;
1568 
1569 	/* Add cpu_id for packet context */
1570 	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1571 	if (!pkt_ctx_type)
1572 		goto err_cleanup;
1573 
1574 	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1575 	bt_ctf_field_type_put(pkt_ctx_type);
1576 	if (ret)
1577 		goto err_cleanup;
1578 
1579 	/* CTF clock writer setup */
1580 	if (bt_ctf_writer_add_clock(writer, clock)) {
1581 		pr("Failed to assign CTF clock to writer.\n");
1582 		goto err_cleanup;
1583 	}
1584 
1585 	return 0;
1586 
1587 err_cleanup:
1588 	ctf_writer__cleanup(cw);
1589 err:
1590 	pr_err("Failed to setup CTF writer.\n");
1591 	return -1;
1592 }
1593 
1594 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1595 {
1596 	int cpu, ret = 0;
1597 
1598 	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1599 		ret = ctf_stream__flush(cw->stream[cpu]);
1600 
1601 	return ret;
1602 }
1603 
1604 static int convert__config(const char *var, const char *value, void *cb)
1605 {
1606 	struct convert *c = cb;
1607 
1608 	if (!strcmp(var, "convert.queue-size"))
1609 		return perf_config_u64(&c->queue_size, var, value);
1610 
1611 	return 0;
1612 }
1613 
1614 int bt_convert__perf2ctf(const char *input, const char *path,
1615 			 struct perf_data_convert_opts *opts)
1616 {
1617 	struct perf_session *session;
1618 	struct perf_data data = {
1619 		.path	   = input,
1620 		.mode      = PERF_DATA_MODE_READ,
1621 		.force     = opts->force,
1622 	};
1623 	struct convert c = {};
1624 	struct ctf_writer *cw = &c.writer;
1625 	int err;
1626 
1627 	perf_tool__init(&c.tool, /*ordered_events=*/true);
1628 	c.tool.sample          = process_sample_event;
1629 	c.tool.mmap            = perf_event__process_mmap;
1630 	c.tool.mmap2           = perf_event__process_mmap2;
1631 	c.tool.comm            = perf_event__process_comm;
1632 	c.tool.exit            = perf_event__process_exit;
1633 	c.tool.fork            = perf_event__process_fork;
1634 	c.tool.lost            = perf_event__process_lost;
1635 	c.tool.tracing_data    = perf_event__process_tracing_data;
1636 	c.tool.build_id        = perf_event__process_build_id;
1637 	c.tool.namespaces      = perf_event__process_namespaces;
1638 	c.tool.ordering_requires_timestamps = true;
1639 
1640 	if (opts->all) {
1641 		c.tool.comm = process_comm_event;
1642 		c.tool.exit = process_exit_event;
1643 		c.tool.fork = process_fork_event;
1644 		c.tool.mmap = process_mmap_event;
1645 		c.tool.mmap2 = process_mmap2_event;
1646 	}
1647 
1648 	err = perf_config(convert__config, &c);
1649 	if (err)
1650 		return err;
1651 
1652 	err = -1;
1653 	/* perf.data session */
1654 	session = perf_session__new(&data, &c.tool);
1655 	if (IS_ERR(session))
1656 		return PTR_ERR(session);
1657 
1658 	if (opts->time_str) {
1659 		err = perf_time__parse_for_ranges(opts->time_str, session,
1660 						  &c.ptime_range,
1661 						  &c.range_size,
1662 						  &c.range_num);
1663 		if (err < 0)
1664 			goto free_session;
1665 	}
1666 
1667 	/* CTF writer */
1668 	if (ctf_writer__init(cw, path, session, opts->tod))
1669 		goto free_session;
1670 
1671 	if (c.queue_size) {
1672 		ordered_events__set_alloc_size(&session->ordered_events,
1673 					       c.queue_size);
1674 	}
1675 
1676 	/* CTF writer env/clock setup  */
1677 	if (ctf_writer__setup_env(cw, session))
1678 		goto free_writer;
1679 
1680 	/* CTF events setup */
1681 	if (setup_events(cw, session))
1682 		goto free_writer;
1683 
1684 	if (opts->all && setup_non_sample_events(cw, session))
1685 		goto free_writer;
1686 
1687 	if (setup_streams(cw, session))
1688 		goto free_writer;
1689 
1690 	err = perf_session__process_events(session);
1691 	if (!err)
1692 		err = ctf_writer__flush_streams(cw);
1693 	else
1694 		pr_err("Error during conversion.\n");
1695 
1696 	fprintf(stderr,	"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1697 		data.path, path);
1698 
1699 	fprintf(stderr,	"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1700 		(double) c.events_size / 1024.0 / 1024.0,
1701 		c.events_count);
1702 
1703 	if (!c.non_sample_count)
1704 		fprintf(stderr, ") ]\n");
1705 	else
1706 		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1707 
1708 	if (c.skipped) {
1709 		fprintf(stderr,	"[ perf data convert: Skipped %" PRIu64 " samples ]\n",
1710 			c.skipped);
1711 	}
1712 
1713 	if (c.ptime_range)
1714 		zfree(&c.ptime_range);
1715 
1716 	cleanup_events(session);
1717 	perf_session__delete(session);
1718 	ctf_writer__cleanup(cw);
1719 
1720 	return err;
1721 
1722 free_writer:
1723 	ctf_writer__cleanup(cw);
1724 free_session:
1725 	if (c.ptime_range)
1726 		zfree(&c.ptime_range);
1727 
1728 	perf_session__delete(session);
1729 	pr_err("Error during conversion setup.\n");
1730 	return err;
1731 }
1732