xref: /linux/tools/perf/util/data-convert-bt.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CTF writing support via babeltrace.
4  *
5  * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
6  * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7  */
8 
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/zalloc.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include "asm/bug.h"
23 #include "data-convert.h"
24 #include "session.h"
25 #include "debug.h"
26 #include "tool.h"
27 #include "evlist.h"
28 #include "evsel.h"
29 #include "machine.h"
30 #include "config.h"
31 #include <linux/ctype.h>
32 #include <linux/err.h>
33 #include <linux/time64.h>
34 #include "util.h"
35 #include "clockid.h"
36 #include "util/sample.h"
37 
38 #ifdef HAVE_LIBTRACEEVENT
39 #include <traceevent/event-parse.h>
40 #endif
41 
42 #define pr_N(n, fmt, ...) \
43 	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
44 
45 #define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
46 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
47 
48 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
49 
50 struct evsel_priv {
51 	struct bt_ctf_event_class *event_class;
52 };
53 
54 #define MAX_CPUS	4096
55 
56 struct ctf_stream {
57 	struct bt_ctf_stream *stream;
58 	int cpu;
59 	u32 count;
60 };
61 
62 struct ctf_writer {
63 	/* writer primitives */
64 	struct bt_ctf_writer		 *writer;
65 	struct ctf_stream		**stream;
66 	int				  stream_cnt;
67 	struct bt_ctf_stream_class	 *stream_class;
68 	struct bt_ctf_clock		 *clock;
69 
70 	/* data types */
71 	union {
72 		struct {
73 			struct bt_ctf_field_type	*s64;
74 			struct bt_ctf_field_type	*u64;
75 			struct bt_ctf_field_type	*s32;
76 			struct bt_ctf_field_type	*u32;
77 			struct bt_ctf_field_type	*string;
78 			struct bt_ctf_field_type	*u32_hex;
79 			struct bt_ctf_field_type	*u64_hex;
80 		};
81 		struct bt_ctf_field_type *array[6];
82 	} data;
83 	struct bt_ctf_event_class	*comm_class;
84 	struct bt_ctf_event_class	*exit_class;
85 	struct bt_ctf_event_class	*fork_class;
86 	struct bt_ctf_event_class	*mmap_class;
87 	struct bt_ctf_event_class	*mmap2_class;
88 };
89 
90 struct convert {
91 	struct perf_tool	tool;
92 	struct ctf_writer	writer;
93 
94 	u64			events_size;
95 	u64			events_count;
96 	u64			non_sample_count;
97 
98 	/* Ordered events configured queue size. */
99 	u64			queue_size;
100 };
101 
102 static int value_set(struct bt_ctf_field_type *type,
103 		     struct bt_ctf_event *event,
104 		     const char *name, u64 val)
105 {
106 	struct bt_ctf_field *field;
107 	bool sign = bt_ctf_field_type_integer_get_signed(type);
108 	int ret;
109 
110 	field = bt_ctf_field_create(type);
111 	if (!field) {
112 		pr_err("failed to create a field %s\n", name);
113 		return -1;
114 	}
115 
116 	if (sign) {
117 		ret = bt_ctf_field_signed_integer_set_value(field, val);
118 		if (ret) {
119 			pr_err("failed to set field value %s\n", name);
120 			goto err;
121 		}
122 	} else {
123 		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
124 		if (ret) {
125 			pr_err("failed to set field value %s\n", name);
126 			goto err;
127 		}
128 	}
129 
130 	ret = bt_ctf_event_set_payload(event, name, field);
131 	if (ret) {
132 		pr_err("failed to set payload %s\n", name);
133 		goto err;
134 	}
135 
136 	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
137 
138 err:
139 	bt_ctf_field_put(field);
140 	return ret;
141 }
142 
143 #define __FUNC_VALUE_SET(_name, _val_type)				\
144 static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
145 			     struct bt_ctf_event *event,		\
146 			     const char *name,				\
147 			     _val_type val)				\
148 {									\
149 	struct bt_ctf_field_type *type = cw->data._name;		\
150 	return value_set(type, event, name, (u64) val);			\
151 }
152 
153 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
154 
155 FUNC_VALUE_SET(s32)
156 FUNC_VALUE_SET(u32)
157 FUNC_VALUE_SET(s64)
158 FUNC_VALUE_SET(u64)
159 __FUNC_VALUE_SET(u64_hex, u64)
160 
161 static int string_set_value(struct bt_ctf_field *field, const char *string);
162 static __maybe_unused int
163 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
164 		 const char *name, const char *string)
165 {
166 	struct bt_ctf_field_type *type = cw->data.string;
167 	struct bt_ctf_field *field;
168 	int ret = 0;
169 
170 	field = bt_ctf_field_create(type);
171 	if (!field) {
172 		pr_err("failed to create a field %s\n", name);
173 		return -1;
174 	}
175 
176 	ret = string_set_value(field, string);
177 	if (ret) {
178 		pr_err("failed to set value %s\n", name);
179 		goto err_put_field;
180 	}
181 
182 	ret = bt_ctf_event_set_payload(event, name, field);
183 	if (ret)
184 		pr_err("failed to set payload %s\n", name);
185 
186 err_put_field:
187 	bt_ctf_field_put(field);
188 	return ret;
189 }
190 
191 static struct bt_ctf_field_type*
192 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
193 {
194 	unsigned long flags = field->flags;
195 
196 	if (flags & TEP_FIELD_IS_STRING)
197 		return cw->data.string;
198 
199 	if (!(flags & TEP_FIELD_IS_SIGNED)) {
200 		/* unsigned long are mostly pointers */
201 		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
202 			return cw->data.u64_hex;
203 	}
204 
205 	if (flags & TEP_FIELD_IS_SIGNED) {
206 		if (field->size == 8)
207 			return cw->data.s64;
208 		else
209 			return cw->data.s32;
210 	}
211 
212 	if (field->size == 8)
213 		return cw->data.u64;
214 	else
215 		return cw->data.u32;
216 }
217 
218 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
219 {
220 	unsigned long long value_mask;
221 
222 	/*
223 	 * value_mask = (1 << (size * 8 - 1)) - 1.
224 	 * Directly set value_mask for code readers.
225 	 */
226 	switch (size) {
227 	case 1:
228 		value_mask = 0x7fULL;
229 		break;
230 	case 2:
231 		value_mask = 0x7fffULL;
232 		break;
233 	case 4:
234 		value_mask = 0x7fffffffULL;
235 		break;
236 	case 8:
237 		/*
238 		 * For 64 bit value, return it self. There is no need
239 		 * to fill high bit.
240 		 */
241 		/* Fall through */
242 	default:
243 		/* BUG! */
244 		return value_int;
245 	}
246 
247 	/* If it is a positive value, don't adjust. */
248 	if ((value_int & (~0ULL - value_mask)) == 0)
249 		return value_int;
250 
251 	/* Fill upper part of value_int with 1 to make it a negative long long. */
252 	return (value_int & value_mask) | ~value_mask;
253 }
254 
255 static int string_set_value(struct bt_ctf_field *field, const char *string)
256 {
257 	char *buffer = NULL;
258 	size_t len = strlen(string), i, p;
259 	int err;
260 
261 	for (i = p = 0; i < len; i++, p++) {
262 		if (isprint(string[i])) {
263 			if (!buffer)
264 				continue;
265 			buffer[p] = string[i];
266 		} else {
267 			char numstr[5];
268 
269 			snprintf(numstr, sizeof(numstr), "\\x%02x",
270 				 (unsigned int)(string[i]) & 0xff);
271 
272 			if (!buffer) {
273 				buffer = zalloc(i + (len - i) * 4 + 2);
274 				if (!buffer) {
275 					pr_err("failed to set unprintable string '%s'\n", string);
276 					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
277 				}
278 				if (i > 0)
279 					strncpy(buffer, string, i);
280 			}
281 			memcpy(buffer + p, numstr, 4);
282 			p += 3;
283 		}
284 	}
285 
286 	if (!buffer)
287 		return bt_ctf_field_string_set_value(field, string);
288 	err = bt_ctf_field_string_set_value(field, buffer);
289 	free(buffer);
290 	return err;
291 }
292 
293 static int add_tracepoint_field_value(struct ctf_writer *cw,
294 				      struct bt_ctf_event_class *event_class,
295 				      struct bt_ctf_event *event,
296 				      struct perf_sample *sample,
297 				      struct tep_format_field *fmtf)
298 {
299 	struct bt_ctf_field_type *type;
300 	struct bt_ctf_field *array_field;
301 	struct bt_ctf_field *field;
302 	const char *name = fmtf->name;
303 	void *data = sample->raw_data;
304 	unsigned long flags = fmtf->flags;
305 	unsigned int n_items;
306 	unsigned int i;
307 	unsigned int offset;
308 	unsigned int len;
309 	int ret;
310 
311 	name = fmtf->alias;
312 	offset = fmtf->offset;
313 	len = fmtf->size;
314 	if (flags & TEP_FIELD_IS_STRING)
315 		flags &= ~TEP_FIELD_IS_ARRAY;
316 
317 	if (flags & TEP_FIELD_IS_DYNAMIC) {
318 		unsigned long long tmp_val;
319 
320 		tmp_val = tep_read_number(fmtf->event->tep,
321 					  data + offset, len);
322 		offset = tmp_val;
323 		len = offset >> 16;
324 		offset &= 0xffff;
325 #ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE
326 		if (flags & TEP_FIELD_IS_RELATIVE)
327 			offset += fmtf->offset + fmtf->size;
328 #endif
329 	}
330 
331 	if (flags & TEP_FIELD_IS_ARRAY) {
332 
333 		type = bt_ctf_event_class_get_field_by_name(
334 				event_class, name);
335 		array_field = bt_ctf_field_create(type);
336 		bt_ctf_field_type_put(type);
337 		if (!array_field) {
338 			pr_err("Failed to create array type %s\n", name);
339 			return -1;
340 		}
341 
342 		len = fmtf->size / fmtf->arraylen;
343 		n_items = fmtf->arraylen;
344 	} else {
345 		n_items = 1;
346 		array_field = NULL;
347 	}
348 
349 	type = get_tracepoint_field_type(cw, fmtf);
350 
351 	for (i = 0; i < n_items; i++) {
352 		if (flags & TEP_FIELD_IS_ARRAY)
353 			field = bt_ctf_field_array_get_field(array_field, i);
354 		else
355 			field = bt_ctf_field_create(type);
356 
357 		if (!field) {
358 			pr_err("failed to create a field %s\n", name);
359 			return -1;
360 		}
361 
362 		if (flags & TEP_FIELD_IS_STRING)
363 			ret = string_set_value(field, data + offset + i * len);
364 		else {
365 			unsigned long long value_int;
366 
367 			value_int = tep_read_number(
368 					fmtf->event->tep,
369 					data + offset + i * len, len);
370 
371 			if (!(flags & TEP_FIELD_IS_SIGNED))
372 				ret = bt_ctf_field_unsigned_integer_set_value(
373 						field, value_int);
374 			else
375 				ret = bt_ctf_field_signed_integer_set_value(
376 						field, adjust_signedness(value_int, len));
377 		}
378 
379 		if (ret) {
380 			pr_err("failed to set file value %s\n", name);
381 			goto err_put_field;
382 		}
383 		if (!(flags & TEP_FIELD_IS_ARRAY)) {
384 			ret = bt_ctf_event_set_payload(event, name, field);
385 			if (ret) {
386 				pr_err("failed to set payload %s\n", name);
387 				goto err_put_field;
388 			}
389 		}
390 		bt_ctf_field_put(field);
391 	}
392 	if (flags & TEP_FIELD_IS_ARRAY) {
393 		ret = bt_ctf_event_set_payload(event, name, array_field);
394 		if (ret) {
395 			pr_err("Failed add payload array %s\n", name);
396 			return -1;
397 		}
398 		bt_ctf_field_put(array_field);
399 	}
400 	return 0;
401 
402 err_put_field:
403 	bt_ctf_field_put(field);
404 	return -1;
405 }
406 
407 static int add_tracepoint_fields_values(struct ctf_writer *cw,
408 					struct bt_ctf_event_class *event_class,
409 					struct bt_ctf_event *event,
410 					struct tep_format_field *fields,
411 					struct perf_sample *sample)
412 {
413 	struct tep_format_field *field;
414 	int ret;
415 
416 	for (field = fields; field; field = field->next) {
417 		ret = add_tracepoint_field_value(cw, event_class, event, sample,
418 				field);
419 		if (ret)
420 			return -1;
421 	}
422 	return 0;
423 }
424 
425 static int add_tracepoint_values(struct ctf_writer *cw,
426 				 struct bt_ctf_event_class *event_class,
427 				 struct bt_ctf_event *event,
428 				 struct evsel *evsel,
429 				 struct perf_sample *sample)
430 {
431 	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
432 	struct tep_format_field *fields        = evsel->tp_format->format.fields;
433 	int ret;
434 
435 	ret = add_tracepoint_fields_values(cw, event_class, event,
436 					   common_fields, sample);
437 	if (!ret)
438 		ret = add_tracepoint_fields_values(cw, event_class, event,
439 						   fields, sample);
440 
441 	return ret;
442 }
443 
444 static int
445 add_bpf_output_values(struct bt_ctf_event_class *event_class,
446 		      struct bt_ctf_event *event,
447 		      struct perf_sample *sample)
448 {
449 	struct bt_ctf_field_type *len_type, *seq_type;
450 	struct bt_ctf_field *len_field, *seq_field;
451 	unsigned int raw_size = sample->raw_size;
452 	unsigned int nr_elements = raw_size / sizeof(u32);
453 	unsigned int i;
454 	int ret;
455 
456 	if (nr_elements * sizeof(u32) != raw_size)
457 		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
458 			   raw_size, nr_elements * sizeof(u32) - raw_size);
459 
460 	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
461 	len_field = bt_ctf_field_create(len_type);
462 	if (!len_field) {
463 		pr_err("failed to create 'raw_len' for bpf output event\n");
464 		ret = -1;
465 		goto put_len_type;
466 	}
467 
468 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
469 	if (ret) {
470 		pr_err("failed to set field value for raw_len\n");
471 		goto put_len_field;
472 	}
473 	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
474 	if (ret) {
475 		pr_err("failed to set payload to raw_len\n");
476 		goto put_len_field;
477 	}
478 
479 	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
480 	seq_field = bt_ctf_field_create(seq_type);
481 	if (!seq_field) {
482 		pr_err("failed to create 'raw_data' for bpf output event\n");
483 		ret = -1;
484 		goto put_seq_type;
485 	}
486 
487 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
488 	if (ret) {
489 		pr_err("failed to set length of 'raw_data'\n");
490 		goto put_seq_field;
491 	}
492 
493 	for (i = 0; i < nr_elements; i++) {
494 		struct bt_ctf_field *elem_field =
495 			bt_ctf_field_sequence_get_field(seq_field, i);
496 
497 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
498 				((u32 *)(sample->raw_data))[i]);
499 
500 		bt_ctf_field_put(elem_field);
501 		if (ret) {
502 			pr_err("failed to set raw_data[%d]\n", i);
503 			goto put_seq_field;
504 		}
505 	}
506 
507 	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
508 	if (ret)
509 		pr_err("failed to set payload for raw_data\n");
510 
511 put_seq_field:
512 	bt_ctf_field_put(seq_field);
513 put_seq_type:
514 	bt_ctf_field_type_put(seq_type);
515 put_len_field:
516 	bt_ctf_field_put(len_field);
517 put_len_type:
518 	bt_ctf_field_type_put(len_type);
519 	return ret;
520 }
521 
522 static int
523 add_callchain_output_values(struct bt_ctf_event_class *event_class,
524 		      struct bt_ctf_event *event,
525 		      struct ip_callchain *callchain)
526 {
527 	struct bt_ctf_field_type *len_type, *seq_type;
528 	struct bt_ctf_field *len_field, *seq_field;
529 	unsigned int nr_elements = callchain->nr;
530 	unsigned int i;
531 	int ret;
532 
533 	len_type = bt_ctf_event_class_get_field_by_name(
534 			event_class, "perf_callchain_size");
535 	len_field = bt_ctf_field_create(len_type);
536 	if (!len_field) {
537 		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
538 		ret = -1;
539 		goto put_len_type;
540 	}
541 
542 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
543 	if (ret) {
544 		pr_err("failed to set field value for perf_callchain_size\n");
545 		goto put_len_field;
546 	}
547 	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
548 	if (ret) {
549 		pr_err("failed to set payload to perf_callchain_size\n");
550 		goto put_len_field;
551 	}
552 
553 	seq_type = bt_ctf_event_class_get_field_by_name(
554 			event_class, "perf_callchain");
555 	seq_field = bt_ctf_field_create(seq_type);
556 	if (!seq_field) {
557 		pr_err("failed to create 'perf_callchain' for callchain output event\n");
558 		ret = -1;
559 		goto put_seq_type;
560 	}
561 
562 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
563 	if (ret) {
564 		pr_err("failed to set length of 'perf_callchain'\n");
565 		goto put_seq_field;
566 	}
567 
568 	for (i = 0; i < nr_elements; i++) {
569 		struct bt_ctf_field *elem_field =
570 			bt_ctf_field_sequence_get_field(seq_field, i);
571 
572 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
573 				((u64 *)(callchain->ips))[i]);
574 
575 		bt_ctf_field_put(elem_field);
576 		if (ret) {
577 			pr_err("failed to set callchain[%d]\n", i);
578 			goto put_seq_field;
579 		}
580 	}
581 
582 	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
583 	if (ret)
584 		pr_err("failed to set payload for raw_data\n");
585 
586 put_seq_field:
587 	bt_ctf_field_put(seq_field);
588 put_seq_type:
589 	bt_ctf_field_type_put(seq_type);
590 put_len_field:
591 	bt_ctf_field_put(len_field);
592 put_len_type:
593 	bt_ctf_field_type_put(len_type);
594 	return ret;
595 }
596 
597 static int add_generic_values(struct ctf_writer *cw,
598 			      struct bt_ctf_event *event,
599 			      struct evsel *evsel,
600 			      struct perf_sample *sample)
601 {
602 	u64 type = evsel->core.attr.sample_type;
603 	int ret;
604 
605 	/*
606 	 * missing:
607 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
608 	 *                              ctf event header
609 	 *   PERF_SAMPLE_READ         - TODO
610 	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
611 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
612 	 *   PERF_SAMPLE_REGS_USER    - TODO
613 	 *   PERF_SAMPLE_STACK_USER   - TODO
614 	 */
615 
616 	if (type & PERF_SAMPLE_IP) {
617 		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
618 		if (ret)
619 			return -1;
620 	}
621 
622 	if (type & PERF_SAMPLE_TID) {
623 		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
624 		if (ret)
625 			return -1;
626 
627 		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
628 		if (ret)
629 			return -1;
630 	}
631 
632 	if ((type & PERF_SAMPLE_ID) ||
633 	    (type & PERF_SAMPLE_IDENTIFIER)) {
634 		ret = value_set_u64(cw, event, "perf_id", sample->id);
635 		if (ret)
636 			return -1;
637 	}
638 
639 	if (type & PERF_SAMPLE_STREAM_ID) {
640 		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
641 		if (ret)
642 			return -1;
643 	}
644 
645 	if (type & PERF_SAMPLE_PERIOD) {
646 		ret = value_set_u64(cw, event, "perf_period", sample->period);
647 		if (ret)
648 			return -1;
649 	}
650 
651 	if (type & PERF_SAMPLE_WEIGHT) {
652 		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
653 		if (ret)
654 			return -1;
655 	}
656 
657 	if (type & PERF_SAMPLE_DATA_SRC) {
658 		ret = value_set_u64(cw, event, "perf_data_src",
659 				sample->data_src);
660 		if (ret)
661 			return -1;
662 	}
663 
664 	if (type & PERF_SAMPLE_TRANSACTION) {
665 		ret = value_set_u64(cw, event, "perf_transaction",
666 				sample->transaction);
667 		if (ret)
668 			return -1;
669 	}
670 
671 	return 0;
672 }
673 
674 static int ctf_stream__flush(struct ctf_stream *cs)
675 {
676 	int err = 0;
677 
678 	if (cs) {
679 		err = bt_ctf_stream_flush(cs->stream);
680 		if (err)
681 			pr_err("CTF stream %d flush failed\n", cs->cpu);
682 
683 		pr("Flush stream for cpu %d (%u samples)\n",
684 		   cs->cpu, cs->count);
685 
686 		cs->count = 0;
687 	}
688 
689 	return err;
690 }
691 
692 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
693 {
694 	struct ctf_stream *cs;
695 	struct bt_ctf_field *pkt_ctx   = NULL;
696 	struct bt_ctf_field *cpu_field = NULL;
697 	struct bt_ctf_stream *stream   = NULL;
698 	int ret;
699 
700 	cs = zalloc(sizeof(*cs));
701 	if (!cs) {
702 		pr_err("Failed to allocate ctf stream\n");
703 		return NULL;
704 	}
705 
706 	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
707 	if (!stream) {
708 		pr_err("Failed to create CTF stream\n");
709 		goto out;
710 	}
711 
712 	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
713 	if (!pkt_ctx) {
714 		pr_err("Failed to obtain packet context\n");
715 		goto out;
716 	}
717 
718 	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
719 	bt_ctf_field_put(pkt_ctx);
720 	if (!cpu_field) {
721 		pr_err("Failed to obtain cpu field\n");
722 		goto out;
723 	}
724 
725 	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
726 	if (ret) {
727 		pr_err("Failed to update CPU number\n");
728 		goto out;
729 	}
730 
731 	bt_ctf_field_put(cpu_field);
732 
733 	cs->cpu    = cpu;
734 	cs->stream = stream;
735 	return cs;
736 
737 out:
738 	if (cpu_field)
739 		bt_ctf_field_put(cpu_field);
740 	if (stream)
741 		bt_ctf_stream_put(stream);
742 
743 	free(cs);
744 	return NULL;
745 }
746 
747 static void ctf_stream__delete(struct ctf_stream *cs)
748 {
749 	if (cs) {
750 		bt_ctf_stream_put(cs->stream);
751 		free(cs);
752 	}
753 }
754 
755 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
756 {
757 	struct ctf_stream *cs = cw->stream[cpu];
758 
759 	if (!cs) {
760 		cs = ctf_stream__create(cw, cpu);
761 		cw->stream[cpu] = cs;
762 	}
763 
764 	return cs;
765 }
766 
767 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
768 			  struct evsel *evsel)
769 {
770 	int cpu = 0;
771 
772 	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
773 		cpu = sample->cpu;
774 
775 	if (cpu > cw->stream_cnt) {
776 		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
777 			cpu, cw->stream_cnt);
778 		cpu = 0;
779 	}
780 
781 	return cpu;
782 }
783 
784 #define STREAM_FLUSH_COUNT 100000
785 
786 /*
787  * Currently we have no other way to determine the
788  * time for the stream flush other than keep track
789  * of the number of events and check it against
790  * threshold.
791  */
792 static bool is_flush_needed(struct ctf_stream *cs)
793 {
794 	return cs->count >= STREAM_FLUSH_COUNT;
795 }
796 
797 static int process_sample_event(struct perf_tool *tool,
798 				union perf_event *_event,
799 				struct perf_sample *sample,
800 				struct evsel *evsel,
801 				struct machine *machine __maybe_unused)
802 {
803 	struct convert *c = container_of(tool, struct convert, tool);
804 	struct evsel_priv *priv = evsel->priv;
805 	struct ctf_writer *cw = &c->writer;
806 	struct ctf_stream *cs;
807 	struct bt_ctf_event_class *event_class;
808 	struct bt_ctf_event *event;
809 	int ret;
810 	unsigned long type = evsel->core.attr.sample_type;
811 
812 	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
813 		return 0;
814 
815 	event_class = priv->event_class;
816 
817 	/* update stats */
818 	c->events_count++;
819 	c->events_size += _event->header.size;
820 
821 	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
822 
823 	event = bt_ctf_event_create(event_class);
824 	if (!event) {
825 		pr_err("Failed to create an CTF event\n");
826 		return -1;
827 	}
828 
829 	bt_ctf_clock_set_time(cw->clock, sample->time);
830 
831 	ret = add_generic_values(cw, event, evsel, sample);
832 	if (ret)
833 		return -1;
834 
835 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
836 		ret = add_tracepoint_values(cw, event_class, event,
837 					    evsel, sample);
838 		if (ret)
839 			return -1;
840 	}
841 
842 	if (type & PERF_SAMPLE_CALLCHAIN) {
843 		ret = add_callchain_output_values(event_class,
844 				event, sample->callchain);
845 		if (ret)
846 			return -1;
847 	}
848 
849 	if (evsel__is_bpf_output(evsel)) {
850 		ret = add_bpf_output_values(event_class, event, sample);
851 		if (ret)
852 			return -1;
853 	}
854 
855 	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
856 	if (cs) {
857 		if (is_flush_needed(cs))
858 			ctf_stream__flush(cs);
859 
860 		cs->count++;
861 		bt_ctf_stream_append_event(cs->stream, event);
862 	}
863 
864 	bt_ctf_event_put(event);
865 	return cs ? 0 : -1;
866 }
867 
868 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
869 do {							\
870 	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
871 	if (ret)					\
872 		return -1;				\
873 } while(0)
874 
875 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
876 static int process_##_name##_event(struct perf_tool *tool,	\
877 				   union perf_event *_event,	\
878 				   struct perf_sample *sample,	\
879 				   struct machine *machine)	\
880 {								\
881 	struct convert *c = container_of(tool, struct convert, tool);\
882 	struct ctf_writer *cw = &c->writer;			\
883 	struct bt_ctf_event_class *event_class = cw->_name##_class;\
884 	struct bt_ctf_event *event;				\
885 	struct ctf_stream *cs;					\
886 	int ret;						\
887 								\
888 	c->non_sample_count++;					\
889 	c->events_size += _event->header.size;			\
890 	event = bt_ctf_event_create(event_class);		\
891 	if (!event) {						\
892 		pr_err("Failed to create an CTF event\n");	\
893 		return -1;					\
894 	}							\
895 								\
896 	bt_ctf_clock_set_time(cw->clock, sample->time);		\
897 	body							\
898 	cs = ctf_stream(cw, 0);					\
899 	if (cs) {						\
900 		if (is_flush_needed(cs))			\
901 			ctf_stream__flush(cs);			\
902 								\
903 		cs->count++;					\
904 		bt_ctf_stream_append_event(cs->stream, event);	\
905 	}							\
906 	bt_ctf_event_put(event);				\
907 								\
908 	return perf_event__process_##_name(tool, _event, sample, machine);\
909 }
910 
911 __FUNC_PROCESS_NON_SAMPLE(comm,
912 	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
913 	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
914 	__NON_SAMPLE_SET_FIELD(comm, string, comm);
915 )
916 __FUNC_PROCESS_NON_SAMPLE(fork,
917 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
918 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
919 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
920 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
921 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
922 )
923 
924 __FUNC_PROCESS_NON_SAMPLE(exit,
925 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
926 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
927 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
928 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
929 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
930 )
931 __FUNC_PROCESS_NON_SAMPLE(mmap,
932 	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
933 	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
934 	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
935 	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
936 )
937 __FUNC_PROCESS_NON_SAMPLE(mmap2,
938 	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
939 	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
940 	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
941 	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
942 )
943 #undef __NON_SAMPLE_SET_FIELD
944 #undef __FUNC_PROCESS_NON_SAMPLE
945 
946 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
947 static char *change_name(char *name, char *orig_name, int dup)
948 {
949 	char *new_name = NULL;
950 	size_t len;
951 
952 	if (!name)
953 		name = orig_name;
954 
955 	if (dup >= 10)
956 		goto out;
957 	/*
958 	 * Add '_' prefix to potential keywork.  According to
959 	 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
960 	 * further CTF spec updating may require us to use '$'.
961 	 */
962 	if (dup < 0)
963 		len = strlen(name) + sizeof("_");
964 	else
965 		len = strlen(orig_name) + sizeof("_dupl_X");
966 
967 	new_name = malloc(len);
968 	if (!new_name)
969 		goto out;
970 
971 	if (dup < 0)
972 		snprintf(new_name, len, "_%s", name);
973 	else
974 		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
975 
976 out:
977 	if (name != orig_name)
978 		free(name);
979 	return new_name;
980 }
981 
982 static int event_class_add_field(struct bt_ctf_event_class *event_class,
983 		struct bt_ctf_field_type *type,
984 		struct tep_format_field *field)
985 {
986 	struct bt_ctf_field_type *t = NULL;
987 	char *name;
988 	int dup = 1;
989 	int ret;
990 
991 	/* alias was already assigned */
992 	if (field->alias != field->name)
993 		return bt_ctf_event_class_add_field(event_class, type,
994 				(char *)field->alias);
995 
996 	name = field->name;
997 
998 	/* If 'name' is a keywork, add prefix. */
999 	if (bt_ctf_validate_identifier(name))
1000 		name = change_name(name, field->name, -1);
1001 
1002 	if (!name) {
1003 		pr_err("Failed to fix invalid identifier.");
1004 		return -1;
1005 	}
1006 	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1007 		bt_ctf_field_type_put(t);
1008 		name = change_name(name, field->name, dup++);
1009 		if (!name) {
1010 			pr_err("Failed to create dup name for '%s'\n", field->name);
1011 			return -1;
1012 		}
1013 	}
1014 
1015 	ret = bt_ctf_event_class_add_field(event_class, type, name);
1016 	if (!ret)
1017 		field->alias = name;
1018 
1019 	return ret;
1020 }
1021 
1022 static int add_tracepoint_fields_types(struct ctf_writer *cw,
1023 				       struct tep_format_field *fields,
1024 				       struct bt_ctf_event_class *event_class)
1025 {
1026 	struct tep_format_field *field;
1027 	int ret;
1028 
1029 	for (field = fields; field; field = field->next) {
1030 		struct bt_ctf_field_type *type;
1031 		unsigned long flags = field->flags;
1032 
1033 		pr2("  field '%s'\n", field->name);
1034 
1035 		type = get_tracepoint_field_type(cw, field);
1036 		if (!type)
1037 			return -1;
1038 
1039 		/*
1040 		 * A string is an array of chars. For this we use the string
1041 		 * type and don't care that it is an array. What we don't
1042 		 * support is an array of strings.
1043 		 */
1044 		if (flags & TEP_FIELD_IS_STRING)
1045 			flags &= ~TEP_FIELD_IS_ARRAY;
1046 
1047 		if (flags & TEP_FIELD_IS_ARRAY)
1048 			type = bt_ctf_field_type_array_create(type, field->arraylen);
1049 
1050 		ret = event_class_add_field(event_class, type, field);
1051 
1052 		if (flags & TEP_FIELD_IS_ARRAY)
1053 			bt_ctf_field_type_put(type);
1054 
1055 		if (ret) {
1056 			pr_err("Failed to add field '%s': %d\n",
1057 					field->name, ret);
1058 			return -1;
1059 		}
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 static int add_tracepoint_types(struct ctf_writer *cw,
1066 				struct evsel *evsel,
1067 				struct bt_ctf_event_class *class)
1068 {
1069 	struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
1070 	struct tep_format_field *fields        = evsel->tp_format->format.fields;
1071 	int ret;
1072 
1073 	ret = add_tracepoint_fields_types(cw, common_fields, class);
1074 	if (!ret)
1075 		ret = add_tracepoint_fields_types(cw, fields, class);
1076 
1077 	return ret;
1078 }
1079 
1080 static int add_bpf_output_types(struct ctf_writer *cw,
1081 				struct bt_ctf_event_class *class)
1082 {
1083 	struct bt_ctf_field_type *len_type = cw->data.u32;
1084 	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1085 	struct bt_ctf_field_type *seq_type;
1086 	int ret;
1087 
1088 	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1089 	if (ret)
1090 		return ret;
1091 
1092 	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1093 	if (!seq_type)
1094 		return -1;
1095 
1096 	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1097 }
1098 
1099 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1100 			     struct bt_ctf_event_class *event_class)
1101 {
1102 	u64 type = evsel->core.attr.sample_type;
1103 
1104 	/*
1105 	 * missing:
1106 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1107 	 *                              ctf event header
1108 	 *   PERF_SAMPLE_READ         - TODO
1109 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1110 	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1111 	 *                              are handled separately
1112 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1113 	 *   PERF_SAMPLE_REGS_USER    - TODO
1114 	 *   PERF_SAMPLE_STACK_USER   - TODO
1115 	 */
1116 
1117 #define ADD_FIELD(cl, t, n)						\
1118 	do {								\
1119 		pr2("  field '%s'\n", n);				\
1120 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1121 			pr_err("Failed to add field '%s';\n", n);	\
1122 			return -1;					\
1123 		}							\
1124 	} while (0)
1125 
1126 	if (type & PERF_SAMPLE_IP)
1127 		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1128 
1129 	if (type & PERF_SAMPLE_TID) {
1130 		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1131 		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1132 	}
1133 
1134 	if ((type & PERF_SAMPLE_ID) ||
1135 	    (type & PERF_SAMPLE_IDENTIFIER))
1136 		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1137 
1138 	if (type & PERF_SAMPLE_STREAM_ID)
1139 		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1140 
1141 	if (type & PERF_SAMPLE_PERIOD)
1142 		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1143 
1144 	if (type & PERF_SAMPLE_WEIGHT)
1145 		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1146 
1147 	if (type & PERF_SAMPLE_DATA_SRC)
1148 		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1149 
1150 	if (type & PERF_SAMPLE_TRANSACTION)
1151 		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1152 
1153 	if (type & PERF_SAMPLE_CALLCHAIN) {
1154 		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1155 		ADD_FIELD(event_class,
1156 			bt_ctf_field_type_sequence_create(
1157 				cw->data.u64_hex, "perf_callchain_size"),
1158 			"perf_callchain");
1159 	}
1160 
1161 #undef ADD_FIELD
1162 	return 0;
1163 }
1164 
1165 static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1166 {
1167 	struct bt_ctf_event_class *event_class;
1168 	struct evsel_priv *priv;
1169 	const char *name = evsel__name(evsel);
1170 	int ret;
1171 
1172 	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1173 
1174 	event_class = bt_ctf_event_class_create(name);
1175 	if (!event_class)
1176 		return -1;
1177 
1178 	ret = add_generic_types(cw, evsel, event_class);
1179 	if (ret)
1180 		goto err;
1181 
1182 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1183 		ret = add_tracepoint_types(cw, evsel, event_class);
1184 		if (ret)
1185 			goto err;
1186 	}
1187 
1188 	if (evsel__is_bpf_output(evsel)) {
1189 		ret = add_bpf_output_types(cw, event_class);
1190 		if (ret)
1191 			goto err;
1192 	}
1193 
1194 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1195 	if (ret) {
1196 		pr("Failed to add event class into stream.\n");
1197 		goto err;
1198 	}
1199 
1200 	priv = malloc(sizeof(*priv));
1201 	if (!priv)
1202 		goto err;
1203 
1204 	priv->event_class = event_class;
1205 	evsel->priv       = priv;
1206 	return 0;
1207 
1208 err:
1209 	bt_ctf_event_class_put(event_class);
1210 	pr_err("Failed to add event '%s'.\n", name);
1211 	return -1;
1212 }
1213 
1214 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1215 {
1216 	struct evlist *evlist = session->evlist;
1217 	struct evsel *evsel;
1218 	int ret;
1219 
1220 	evlist__for_each_entry(evlist, evsel) {
1221 		ret = add_event(cw, evsel);
1222 		if (ret)
1223 			return ret;
1224 	}
1225 	return 0;
1226 }
1227 
1228 #define __NON_SAMPLE_ADD_FIELD(t, n)						\
1229 	do {							\
1230 		pr2("  field '%s'\n", #n);			\
1231 		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1232 			pr_err("Failed to add field '%s';\n", #n);\
1233 			return -1;				\
1234 		}						\
1235 	} while(0)
1236 
1237 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1238 static int add_##_name##_event(struct ctf_writer *cw)		\
1239 {								\
1240 	struct bt_ctf_event_class *event_class;			\
1241 	int ret;						\
1242 								\
1243 	pr("Adding "#_name" event\n");				\
1244 	event_class = bt_ctf_event_class_create("perf_" #_name);\
1245 	if (!event_class)					\
1246 		return -1;					\
1247 	body							\
1248 								\
1249 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1250 	if (ret) {						\
1251 		pr("Failed to add event class '"#_name"' into stream.\n");\
1252 		return ret;					\
1253 	}							\
1254 								\
1255 	cw->_name##_class = event_class;			\
1256 	bt_ctf_event_class_put(event_class);			\
1257 	return 0;						\
1258 }
1259 
1260 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1261 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1262 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1263 	__NON_SAMPLE_ADD_FIELD(string, comm);
1264 )
1265 
1266 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1267 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1268 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1269 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1270 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1271 	__NON_SAMPLE_ADD_FIELD(u64, time);
1272 )
1273 
1274 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1275 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1276 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1277 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1278 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1279 	__NON_SAMPLE_ADD_FIELD(u64, time);
1280 )
1281 
1282 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1283 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1284 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1285 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1286 	__NON_SAMPLE_ADD_FIELD(string, filename);
1287 )
1288 
1289 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1290 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1291 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1292 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1293 	__NON_SAMPLE_ADD_FIELD(string, filename);
1294 )
1295 #undef __NON_SAMPLE_ADD_FIELD
1296 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1297 
1298 static int setup_non_sample_events(struct ctf_writer *cw,
1299 				   struct perf_session *session __maybe_unused)
1300 {
1301 	int ret;
1302 
1303 	ret = add_comm_event(cw);
1304 	if (ret)
1305 		return ret;
1306 	ret = add_exit_event(cw);
1307 	if (ret)
1308 		return ret;
1309 	ret = add_fork_event(cw);
1310 	if (ret)
1311 		return ret;
1312 	ret = add_mmap_event(cw);
1313 	if (ret)
1314 		return ret;
1315 	ret = add_mmap2_event(cw);
1316 	if (ret)
1317 		return ret;
1318 	return 0;
1319 }
1320 
1321 static void cleanup_events(struct perf_session *session)
1322 {
1323 	struct evlist *evlist = session->evlist;
1324 	struct evsel *evsel;
1325 
1326 	evlist__for_each_entry(evlist, evsel) {
1327 		struct evsel_priv *priv;
1328 
1329 		priv = evsel->priv;
1330 		bt_ctf_event_class_put(priv->event_class);
1331 		zfree(&evsel->priv);
1332 	}
1333 
1334 	evlist__delete(evlist);
1335 	session->evlist = NULL;
1336 }
1337 
1338 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1339 {
1340 	struct ctf_stream **stream;
1341 	struct perf_header *ph = &session->header;
1342 	int ncpus;
1343 
1344 	/*
1345 	 * Try to get the number of cpus used in the data file,
1346 	 * if not present fallback to the MAX_CPUS.
1347 	 */
1348 	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1349 
1350 	stream = zalloc(sizeof(*stream) * ncpus);
1351 	if (!stream) {
1352 		pr_err("Failed to allocate streams.\n");
1353 		return -ENOMEM;
1354 	}
1355 
1356 	cw->stream     = stream;
1357 	cw->stream_cnt = ncpus;
1358 	return 0;
1359 }
1360 
1361 static void free_streams(struct ctf_writer *cw)
1362 {
1363 	int cpu;
1364 
1365 	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1366 		ctf_stream__delete(cw->stream[cpu]);
1367 
1368 	zfree(&cw->stream);
1369 }
1370 
1371 static int ctf_writer__setup_env(struct ctf_writer *cw,
1372 				 struct perf_session *session)
1373 {
1374 	struct perf_header *header = &session->header;
1375 	struct bt_ctf_writer *writer = cw->writer;
1376 
1377 #define ADD(__n, __v)							\
1378 do {									\
1379 	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1380 		return -1;						\
1381 } while (0)
1382 
1383 	ADD("host",    header->env.hostname);
1384 	ADD("sysname", "Linux");
1385 	ADD("release", header->env.os_release);
1386 	ADD("version", header->env.version);
1387 	ADD("machine", header->env.arch);
1388 	ADD("domain", "kernel");
1389 	ADD("tracer_name", "perf");
1390 
1391 #undef ADD
1392 	return 0;
1393 }
1394 
1395 static int ctf_writer__setup_clock(struct ctf_writer *cw,
1396 				   struct perf_session *session,
1397 				   bool tod)
1398 {
1399 	struct bt_ctf_clock *clock = cw->clock;
1400 	const char *desc = "perf clock";
1401 	int64_t offset = 0;
1402 
1403 	if (tod) {
1404 		struct perf_env *env = &session->header.env;
1405 
1406 		if (!env->clock.enabled) {
1407 			pr_err("Can't provide --tod time, missing clock data. "
1408 			       "Please record with -k/--clockid option.\n");
1409 			return -1;
1410 		}
1411 
1412 		desc   = clockid_name(env->clock.clockid);
1413 		offset = env->clock.tod_ns - env->clock.clockid_ns;
1414 	}
1415 
1416 #define SET(__n, __v)				\
1417 do {						\
1418 	if (bt_ctf_clock_set_##__n(clock, __v))	\
1419 		return -1;			\
1420 } while (0)
1421 
1422 	SET(frequency,   1000000000);
1423 	SET(offset,      offset);
1424 	SET(description, desc);
1425 	SET(precision,   10);
1426 	SET(is_absolute, 0);
1427 
1428 #undef SET
1429 	return 0;
1430 }
1431 
1432 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1433 {
1434 	struct bt_ctf_field_type *type;
1435 
1436 	type = bt_ctf_field_type_integer_create(size);
1437 	if (!type)
1438 		return NULL;
1439 
1440 	if (sign &&
1441 	    bt_ctf_field_type_integer_set_signed(type, 1))
1442 		goto err;
1443 
1444 	if (hex &&
1445 	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1446 		goto err;
1447 
1448 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1449 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1450 #else
1451 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1452 #endif
1453 
1454 	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1455 	    size, sign ? "un" : "", hex ? "hex" : "");
1456 	return type;
1457 
1458 err:
1459 	bt_ctf_field_type_put(type);
1460 	return NULL;
1461 }
1462 
1463 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1464 {
1465 	unsigned int i;
1466 
1467 	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1468 		bt_ctf_field_type_put(cw->data.array[i]);
1469 }
1470 
1471 static int ctf_writer__init_data(struct ctf_writer *cw)
1472 {
1473 #define CREATE_INT_TYPE(type, size, sign, hex)		\
1474 do {							\
1475 	(type) = create_int_type(size, sign, hex);	\
1476 	if (!(type))					\
1477 		goto err;				\
1478 } while (0)
1479 
1480 	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1481 	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1482 	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1483 	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1484 	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1485 	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1486 
1487 	cw->data.string  = bt_ctf_field_type_string_create();
1488 	if (cw->data.string)
1489 		return 0;
1490 
1491 err:
1492 	ctf_writer__cleanup_data(cw);
1493 	pr_err("Failed to create data types.\n");
1494 	return -1;
1495 }
1496 
1497 static void ctf_writer__cleanup(struct ctf_writer *cw)
1498 {
1499 	ctf_writer__cleanup_data(cw);
1500 
1501 	bt_ctf_clock_put(cw->clock);
1502 	free_streams(cw);
1503 	bt_ctf_stream_class_put(cw->stream_class);
1504 	bt_ctf_writer_put(cw->writer);
1505 
1506 	/* and NULL all the pointers */
1507 	memset(cw, 0, sizeof(*cw));
1508 }
1509 
1510 static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1511 			    struct perf_session *session, bool tod)
1512 {
1513 	struct bt_ctf_writer		*writer;
1514 	struct bt_ctf_stream_class	*stream_class;
1515 	struct bt_ctf_clock		*clock;
1516 	struct bt_ctf_field_type	*pkt_ctx_type;
1517 	int				ret;
1518 
1519 	/* CTF writer */
1520 	writer = bt_ctf_writer_create(path);
1521 	if (!writer)
1522 		goto err;
1523 
1524 	cw->writer = writer;
1525 
1526 	/* CTF clock */
1527 	clock = bt_ctf_clock_create("perf_clock");
1528 	if (!clock) {
1529 		pr("Failed to create CTF clock.\n");
1530 		goto err_cleanup;
1531 	}
1532 
1533 	cw->clock = clock;
1534 
1535 	if (ctf_writer__setup_clock(cw, session, tod)) {
1536 		pr("Failed to setup CTF clock.\n");
1537 		goto err_cleanup;
1538 	}
1539 
1540 	/* CTF stream class */
1541 	stream_class = bt_ctf_stream_class_create("perf_stream");
1542 	if (!stream_class) {
1543 		pr("Failed to create CTF stream class.\n");
1544 		goto err_cleanup;
1545 	}
1546 
1547 	cw->stream_class = stream_class;
1548 
1549 	/* CTF clock stream setup */
1550 	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1551 		pr("Failed to assign CTF clock to stream class.\n");
1552 		goto err_cleanup;
1553 	}
1554 
1555 	if (ctf_writer__init_data(cw))
1556 		goto err_cleanup;
1557 
1558 	/* Add cpu_id for packet context */
1559 	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1560 	if (!pkt_ctx_type)
1561 		goto err_cleanup;
1562 
1563 	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1564 	bt_ctf_field_type_put(pkt_ctx_type);
1565 	if (ret)
1566 		goto err_cleanup;
1567 
1568 	/* CTF clock writer setup */
1569 	if (bt_ctf_writer_add_clock(writer, clock)) {
1570 		pr("Failed to assign CTF clock to writer.\n");
1571 		goto err_cleanup;
1572 	}
1573 
1574 	return 0;
1575 
1576 err_cleanup:
1577 	ctf_writer__cleanup(cw);
1578 err:
1579 	pr_err("Failed to setup CTF writer.\n");
1580 	return -1;
1581 }
1582 
1583 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1584 {
1585 	int cpu, ret = 0;
1586 
1587 	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1588 		ret = ctf_stream__flush(cw->stream[cpu]);
1589 
1590 	return ret;
1591 }
1592 
1593 static int convert__config(const char *var, const char *value, void *cb)
1594 {
1595 	struct convert *c = cb;
1596 
1597 	if (!strcmp(var, "convert.queue-size"))
1598 		return perf_config_u64(&c->queue_size, var, value);
1599 
1600 	return 0;
1601 }
1602 
1603 int bt_convert__perf2ctf(const char *input, const char *path,
1604 			 struct perf_data_convert_opts *opts)
1605 {
1606 	struct perf_session *session;
1607 	struct perf_data data = {
1608 		.path	   = input,
1609 		.mode      = PERF_DATA_MODE_READ,
1610 		.force     = opts->force,
1611 	};
1612 	struct convert c = {
1613 		.tool = {
1614 			.sample          = process_sample_event,
1615 			.mmap            = perf_event__process_mmap,
1616 			.mmap2           = perf_event__process_mmap2,
1617 			.comm            = perf_event__process_comm,
1618 			.exit            = perf_event__process_exit,
1619 			.fork            = perf_event__process_fork,
1620 			.lost            = perf_event__process_lost,
1621 			.tracing_data    = perf_event__process_tracing_data,
1622 			.build_id        = perf_event__process_build_id,
1623 			.namespaces      = perf_event__process_namespaces,
1624 			.ordered_events  = true,
1625 			.ordering_requires_timestamps = true,
1626 		},
1627 	};
1628 	struct ctf_writer *cw = &c.writer;
1629 	int err;
1630 
1631 	if (opts->all) {
1632 		c.tool.comm = process_comm_event;
1633 		c.tool.exit = process_exit_event;
1634 		c.tool.fork = process_fork_event;
1635 		c.tool.mmap = process_mmap_event;
1636 		c.tool.mmap2 = process_mmap2_event;
1637 	}
1638 
1639 	err = perf_config(convert__config, &c);
1640 	if (err)
1641 		return err;
1642 
1643 	err = -1;
1644 	/* perf.data session */
1645 	session = perf_session__new(&data, &c.tool);
1646 	if (IS_ERR(session))
1647 		return PTR_ERR(session);
1648 
1649 	/* CTF writer */
1650 	if (ctf_writer__init(cw, path, session, opts->tod))
1651 		goto free_session;
1652 
1653 	if (c.queue_size) {
1654 		ordered_events__set_alloc_size(&session->ordered_events,
1655 					       c.queue_size);
1656 	}
1657 
1658 	/* CTF writer env/clock setup  */
1659 	if (ctf_writer__setup_env(cw, session))
1660 		goto free_writer;
1661 
1662 	/* CTF events setup */
1663 	if (setup_events(cw, session))
1664 		goto free_writer;
1665 
1666 	if (opts->all && setup_non_sample_events(cw, session))
1667 		goto free_writer;
1668 
1669 	if (setup_streams(cw, session))
1670 		goto free_writer;
1671 
1672 	err = perf_session__process_events(session);
1673 	if (!err)
1674 		err = ctf_writer__flush_streams(cw);
1675 	else
1676 		pr_err("Error during conversion.\n");
1677 
1678 	fprintf(stderr,
1679 		"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1680 		data.path, path);
1681 
1682 	fprintf(stderr,
1683 		"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1684 		(double) c.events_size / 1024.0 / 1024.0,
1685 		c.events_count);
1686 
1687 	if (!c.non_sample_count)
1688 		fprintf(stderr, ") ]\n");
1689 	else
1690 		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1691 
1692 	cleanup_events(session);
1693 	perf_session__delete(session);
1694 	ctf_writer__cleanup(cw);
1695 
1696 	return err;
1697 
1698 free_writer:
1699 	ctf_writer__cleanup(cw);
1700 free_session:
1701 	perf_session__delete(session);
1702 	pr_err("Error during conversion setup.\n");
1703 	return err;
1704 }
1705