xref: /linux/tools/perf/util/data-convert-bt.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CTF writing support via babeltrace.
4  *
5  * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
6  * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7  */
8 
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/compiler.h>
12 #include <linux/kernel.h>
13 #include <linux/zalloc.h>
14 #include <babeltrace/ctf-writer/writer.h>
15 #include <babeltrace/ctf-writer/clock.h>
16 #include <babeltrace/ctf-writer/stream.h>
17 #include <babeltrace/ctf-writer/event.h>
18 #include <babeltrace/ctf-writer/event-types.h>
19 #include <babeltrace/ctf-writer/event-fields.h>
20 #include <babeltrace/ctf-ir/utils.h>
21 #include <babeltrace/ctf/events.h>
22 #include "asm/bug.h"
23 #include "data-convert.h"
24 #include "session.h"
25 #include "debug.h"
26 #include "tool.h"
27 #include "evlist.h"
28 #include "evsel.h"
29 #include "machine.h"
30 #include "config.h"
31 #include <linux/ctype.h>
32 #include <linux/err.h>
33 #include <linux/time64.h>
34 #include "util.h"
35 #include "clockid.h"
36 #include "util/sample.h"
37 #include "util/time-utils.h"
38 #include "header.h"
39 
40 #ifdef HAVE_LIBTRACEEVENT
41 #include <event-parse.h>
42 #endif
43 
44 #define pr_N(n, fmt, ...) \
45 	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
46 
47 #define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
48 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
49 
50 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
51 
52 struct evsel_priv {
53 	struct bt_ctf_event_class *event_class;
54 };
55 
56 #define MAX_CPUS	4096
57 
58 struct ctf_stream {
59 	struct bt_ctf_stream *stream;
60 	int cpu;
61 	u32 count;
62 };
63 
64 struct ctf_writer {
65 	/* writer primitives */
66 	struct bt_ctf_writer		 *writer;
67 	struct ctf_stream		**stream;
68 	int				  stream_cnt;
69 	struct bt_ctf_stream_class	 *stream_class;
70 	struct bt_ctf_clock		 *clock;
71 
72 	/* data types */
73 	union {
74 		struct {
75 			struct bt_ctf_field_type	*s64;
76 			struct bt_ctf_field_type	*u64;
77 			struct bt_ctf_field_type	*s32;
78 			struct bt_ctf_field_type	*u32;
79 			struct bt_ctf_field_type	*string;
80 			struct bt_ctf_field_type	*u32_hex;
81 			struct bt_ctf_field_type	*u64_hex;
82 		};
83 		struct bt_ctf_field_type *array[6];
84 	} data;
85 	struct bt_ctf_event_class	*comm_class;
86 	struct bt_ctf_event_class	*exit_class;
87 	struct bt_ctf_event_class	*fork_class;
88 	struct bt_ctf_event_class	*mmap_class;
89 	struct bt_ctf_event_class	*mmap2_class;
90 };
91 
92 struct convert {
93 	struct perf_tool	tool;
94 	struct ctf_writer	writer;
95 
96 	struct perf_time_interval *ptime_range;
97 	int range_size;
98 	int range_num;
99 
100 	u64			events_size;
101 	u64			events_count;
102 	u64			non_sample_count;
103 	u64			skipped;
104 
105 	/* Ordered events configured queue size. */
106 	u64			queue_size;
107 };
108 
109 static int value_set(struct bt_ctf_field_type *type,
110 		     struct bt_ctf_event *event,
111 		     const char *name, u64 val)
112 {
113 	struct bt_ctf_field *field;
114 	bool sign = bt_ctf_field_type_integer_get_signed(type);
115 	int ret;
116 
117 	field = bt_ctf_field_create(type);
118 	if (!field) {
119 		pr_err("failed to create a field %s\n", name);
120 		return -1;
121 	}
122 
123 	if (sign) {
124 		ret = bt_ctf_field_signed_integer_set_value(field, val);
125 		if (ret) {
126 			pr_err("failed to set field value %s\n", name);
127 			goto err;
128 		}
129 	} else {
130 		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
131 		if (ret) {
132 			pr_err("failed to set field value %s\n", name);
133 			goto err;
134 		}
135 	}
136 
137 	ret = bt_ctf_event_set_payload(event, name, field);
138 	if (ret) {
139 		pr_err("failed to set payload %s\n", name);
140 		goto err;
141 	}
142 
143 	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
144 
145 err:
146 	bt_ctf_field_put(field);
147 	return ret;
148 }
149 
150 #define __FUNC_VALUE_SET(_name, _val_type)				\
151 static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
152 			     struct bt_ctf_event *event,		\
153 			     const char *name,				\
154 			     _val_type val)				\
155 {									\
156 	struct bt_ctf_field_type *type = cw->data._name;		\
157 	return value_set(type, event, name, (u64) val);			\
158 }
159 
160 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
161 
162 FUNC_VALUE_SET(s32)
163 FUNC_VALUE_SET(u32)
164 FUNC_VALUE_SET(s64)
165 FUNC_VALUE_SET(u64)
166 __FUNC_VALUE_SET(u64_hex, u64)
167 
168 static int string_set_value(struct bt_ctf_field *field, const char *string);
169 static __maybe_unused int
170 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
171 		 const char *name, const char *string)
172 {
173 	struct bt_ctf_field_type *type = cw->data.string;
174 	struct bt_ctf_field *field;
175 	int ret = 0;
176 
177 	field = bt_ctf_field_create(type);
178 	if (!field) {
179 		pr_err("failed to create a field %s\n", name);
180 		return -1;
181 	}
182 
183 	ret = string_set_value(field, string);
184 	if (ret) {
185 		pr_err("failed to set value %s\n", name);
186 		goto err_put_field;
187 	}
188 
189 	ret = bt_ctf_event_set_payload(event, name, field);
190 	if (ret)
191 		pr_err("failed to set payload %s\n", name);
192 
193 err_put_field:
194 	bt_ctf_field_put(field);
195 	return ret;
196 }
197 
198 static struct bt_ctf_field_type*
199 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
200 {
201 	unsigned long flags = field->flags;
202 
203 	if (flags & TEP_FIELD_IS_STRING)
204 		return cw->data.string;
205 
206 	if (!(flags & TEP_FIELD_IS_SIGNED)) {
207 		/* unsigned long are mostly pointers */
208 		if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
209 			return cw->data.u64_hex;
210 	}
211 
212 	if (flags & TEP_FIELD_IS_SIGNED) {
213 		if (field->size == 8)
214 			return cw->data.s64;
215 		else
216 			return cw->data.s32;
217 	}
218 
219 	if (field->size == 8)
220 		return cw->data.u64;
221 	else
222 		return cw->data.u32;
223 }
224 
225 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
226 {
227 	unsigned long long value_mask;
228 
229 	/*
230 	 * value_mask = (1 << (size * 8 - 1)) - 1.
231 	 * Directly set value_mask for code readers.
232 	 */
233 	switch (size) {
234 	case 1:
235 		value_mask = 0x7fULL;
236 		break;
237 	case 2:
238 		value_mask = 0x7fffULL;
239 		break;
240 	case 4:
241 		value_mask = 0x7fffffffULL;
242 		break;
243 	case 8:
244 		/*
245 		 * For 64 bit value, return it self. There is no need
246 		 * to fill high bit.
247 		 */
248 		/* Fall through */
249 	default:
250 		/* BUG! */
251 		return value_int;
252 	}
253 
254 	/* If it is a positive value, don't adjust. */
255 	if ((value_int & (~0ULL - value_mask)) == 0)
256 		return value_int;
257 
258 	/* Fill upper part of value_int with 1 to make it a negative long long. */
259 	return (value_int & value_mask) | ~value_mask;
260 }
261 
262 static int string_set_value(struct bt_ctf_field *field, const char *string)
263 {
264 	char *buffer = NULL;
265 	size_t len = strlen(string), i, p;
266 	int err;
267 
268 	for (i = p = 0; i < len; i++, p++) {
269 		if (isprint(string[i])) {
270 			if (!buffer)
271 				continue;
272 			buffer[p] = string[i];
273 		} else {
274 			char numstr[5];
275 
276 			snprintf(numstr, sizeof(numstr), "\\x%02x",
277 				 (unsigned int)(string[i]) & 0xff);
278 
279 			if (!buffer) {
280 				buffer = zalloc(i + (len - i) * 4 + 2);
281 				if (!buffer) {
282 					pr_err("failed to set unprintable string '%s'\n", string);
283 					return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
284 				}
285 				if (i > 0)
286 					strncpy(buffer, string, i);
287 			}
288 			memcpy(buffer + p, numstr, 4);
289 			p += 3;
290 		}
291 	}
292 
293 	if (!buffer)
294 		return bt_ctf_field_string_set_value(field, string);
295 	err = bt_ctf_field_string_set_value(field, buffer);
296 	free(buffer);
297 	return err;
298 }
299 
300 static int add_tracepoint_field_value(struct ctf_writer *cw,
301 				      struct bt_ctf_event_class *event_class,
302 				      struct bt_ctf_event *event,
303 				      struct perf_sample *sample,
304 				      struct tep_format_field *fmtf)
305 {
306 	struct bt_ctf_field_type *type;
307 	struct bt_ctf_field *array_field;
308 	struct bt_ctf_field *field;
309 	const char *name = fmtf->name;
310 	void *data = sample->raw_data;
311 	unsigned long flags = fmtf->flags;
312 	unsigned int n_items;
313 	unsigned int i;
314 	unsigned int offset;
315 	unsigned int len;
316 	int ret;
317 
318 	name = fmtf->alias;
319 	offset = fmtf->offset;
320 	len = fmtf->size;
321 	if (flags & TEP_FIELD_IS_STRING)
322 		flags &= ~TEP_FIELD_IS_ARRAY;
323 
324 	if (flags & TEP_FIELD_IS_DYNAMIC) {
325 		unsigned long long tmp_val;
326 
327 		tmp_val = tep_read_number(fmtf->event->tep,
328 					  data + offset, len);
329 		offset = tmp_val;
330 		len = offset >> 16;
331 		offset &= 0xffff;
332 		if (tep_field_is_relative(flags))
333 			offset += fmtf->offset + fmtf->size;
334 	}
335 
336 	if (flags & TEP_FIELD_IS_ARRAY) {
337 
338 		type = bt_ctf_event_class_get_field_by_name(
339 				event_class, name);
340 		array_field = bt_ctf_field_create(type);
341 		bt_ctf_field_type_put(type);
342 		if (!array_field) {
343 			pr_err("Failed to create array type %s\n", name);
344 			return -1;
345 		}
346 
347 		len = fmtf->size / fmtf->arraylen;
348 		n_items = fmtf->arraylen;
349 	} else {
350 		n_items = 1;
351 		array_field = NULL;
352 	}
353 
354 	type = get_tracepoint_field_type(cw, fmtf);
355 
356 	for (i = 0; i < n_items; i++) {
357 		if (flags & TEP_FIELD_IS_ARRAY)
358 			field = bt_ctf_field_array_get_field(array_field, i);
359 		else
360 			field = bt_ctf_field_create(type);
361 
362 		if (!field) {
363 			pr_err("failed to create a field %s\n", name);
364 			return -1;
365 		}
366 
367 		if (flags & TEP_FIELD_IS_STRING)
368 			ret = string_set_value(field, data + offset + i * len);
369 		else {
370 			unsigned long long value_int;
371 
372 			value_int = tep_read_number(
373 					fmtf->event->tep,
374 					data + offset + i * len, len);
375 
376 			if (!(flags & TEP_FIELD_IS_SIGNED))
377 				ret = bt_ctf_field_unsigned_integer_set_value(
378 						field, value_int);
379 			else
380 				ret = bt_ctf_field_signed_integer_set_value(
381 						field, adjust_signedness(value_int, len));
382 		}
383 
384 		if (ret) {
385 			pr_err("failed to set file value %s\n", name);
386 			goto err_put_field;
387 		}
388 		if (!(flags & TEP_FIELD_IS_ARRAY)) {
389 			ret = bt_ctf_event_set_payload(event, name, field);
390 			if (ret) {
391 				pr_err("failed to set payload %s\n", name);
392 				goto err_put_field;
393 			}
394 		}
395 		bt_ctf_field_put(field);
396 	}
397 	if (flags & TEP_FIELD_IS_ARRAY) {
398 		ret = bt_ctf_event_set_payload(event, name, array_field);
399 		if (ret) {
400 			pr_err("Failed add payload array %s\n", name);
401 			return -1;
402 		}
403 		bt_ctf_field_put(array_field);
404 	}
405 	return 0;
406 
407 err_put_field:
408 	bt_ctf_field_put(field);
409 	return -1;
410 }
411 
412 static int add_tracepoint_fields_values(struct ctf_writer *cw,
413 					struct bt_ctf_event_class *event_class,
414 					struct bt_ctf_event *event,
415 					struct tep_format_field *fields,
416 					struct perf_sample *sample)
417 {
418 	struct tep_format_field *field;
419 	int ret;
420 
421 	for (field = fields; field; field = field->next) {
422 		ret = add_tracepoint_field_value(cw, event_class, event, sample,
423 				field);
424 		if (ret)
425 			return -1;
426 	}
427 	return 0;
428 }
429 
430 static int add_tracepoint_values(struct ctf_writer *cw,
431 				 struct bt_ctf_event_class *event_class,
432 				 struct bt_ctf_event *event,
433 				 struct evsel *evsel,
434 				 struct perf_sample *sample)
435 {
436 	const struct tep_event *tp_format = evsel__tp_format(evsel);
437 	struct tep_format_field *common_fields = tp_format->format.common_fields;
438 	struct tep_format_field *fields        = tp_format->format.fields;
439 	int ret;
440 
441 	ret = add_tracepoint_fields_values(cw, event_class, event,
442 					   common_fields, sample);
443 	if (!ret)
444 		ret = add_tracepoint_fields_values(cw, event_class, event,
445 						   fields, sample);
446 
447 	return ret;
448 }
449 
450 static int
451 add_bpf_output_values(struct bt_ctf_event_class *event_class,
452 		      struct bt_ctf_event *event,
453 		      struct perf_sample *sample)
454 {
455 	struct bt_ctf_field_type *len_type, *seq_type;
456 	struct bt_ctf_field *len_field, *seq_field;
457 	unsigned int raw_size = sample->raw_size;
458 	unsigned int nr_elements = raw_size / sizeof(u32);
459 	unsigned int i;
460 	int ret;
461 
462 	if (nr_elements * sizeof(u32) != raw_size)
463 		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
464 			   raw_size, nr_elements * sizeof(u32) - raw_size);
465 
466 	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
467 	len_field = bt_ctf_field_create(len_type);
468 	if (!len_field) {
469 		pr_err("failed to create 'raw_len' for bpf output event\n");
470 		ret = -1;
471 		goto put_len_type;
472 	}
473 
474 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
475 	if (ret) {
476 		pr_err("failed to set field value for raw_len\n");
477 		goto put_len_field;
478 	}
479 	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
480 	if (ret) {
481 		pr_err("failed to set payload to raw_len\n");
482 		goto put_len_field;
483 	}
484 
485 	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
486 	seq_field = bt_ctf_field_create(seq_type);
487 	if (!seq_field) {
488 		pr_err("failed to create 'raw_data' for bpf output event\n");
489 		ret = -1;
490 		goto put_seq_type;
491 	}
492 
493 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
494 	if (ret) {
495 		pr_err("failed to set length of 'raw_data'\n");
496 		goto put_seq_field;
497 	}
498 
499 	for (i = 0; i < nr_elements; i++) {
500 		struct bt_ctf_field *elem_field =
501 			bt_ctf_field_sequence_get_field(seq_field, i);
502 
503 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
504 				((u32 *)(sample->raw_data))[i]);
505 
506 		bt_ctf_field_put(elem_field);
507 		if (ret) {
508 			pr_err("failed to set raw_data[%d]\n", i);
509 			goto put_seq_field;
510 		}
511 	}
512 
513 	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
514 	if (ret)
515 		pr_err("failed to set payload for raw_data\n");
516 
517 put_seq_field:
518 	bt_ctf_field_put(seq_field);
519 put_seq_type:
520 	bt_ctf_field_type_put(seq_type);
521 put_len_field:
522 	bt_ctf_field_put(len_field);
523 put_len_type:
524 	bt_ctf_field_type_put(len_type);
525 	return ret;
526 }
527 
528 static int
529 add_callchain_output_values(struct bt_ctf_event_class *event_class,
530 		      struct bt_ctf_event *event,
531 		      struct ip_callchain *callchain)
532 {
533 	struct bt_ctf_field_type *len_type, *seq_type;
534 	struct bt_ctf_field *len_field, *seq_field;
535 	unsigned int nr_elements = callchain->nr;
536 	unsigned int i;
537 	int ret;
538 
539 	len_type = bt_ctf_event_class_get_field_by_name(
540 			event_class, "perf_callchain_size");
541 	len_field = bt_ctf_field_create(len_type);
542 	if (!len_field) {
543 		pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
544 		ret = -1;
545 		goto put_len_type;
546 	}
547 
548 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
549 	if (ret) {
550 		pr_err("failed to set field value for perf_callchain_size\n");
551 		goto put_len_field;
552 	}
553 	ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
554 	if (ret) {
555 		pr_err("failed to set payload to perf_callchain_size\n");
556 		goto put_len_field;
557 	}
558 
559 	seq_type = bt_ctf_event_class_get_field_by_name(
560 			event_class, "perf_callchain");
561 	seq_field = bt_ctf_field_create(seq_type);
562 	if (!seq_field) {
563 		pr_err("failed to create 'perf_callchain' for callchain output event\n");
564 		ret = -1;
565 		goto put_seq_type;
566 	}
567 
568 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
569 	if (ret) {
570 		pr_err("failed to set length of 'perf_callchain'\n");
571 		goto put_seq_field;
572 	}
573 
574 	for (i = 0; i < nr_elements; i++) {
575 		struct bt_ctf_field *elem_field =
576 			bt_ctf_field_sequence_get_field(seq_field, i);
577 
578 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
579 				((u64 *)(callchain->ips))[i]);
580 
581 		bt_ctf_field_put(elem_field);
582 		if (ret) {
583 			pr_err("failed to set callchain[%d]\n", i);
584 			goto put_seq_field;
585 		}
586 	}
587 
588 	ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
589 	if (ret)
590 		pr_err("failed to set payload for raw_data\n");
591 
592 put_seq_field:
593 	bt_ctf_field_put(seq_field);
594 put_seq_type:
595 	bt_ctf_field_type_put(seq_type);
596 put_len_field:
597 	bt_ctf_field_put(len_field);
598 put_len_type:
599 	bt_ctf_field_type_put(len_type);
600 	return ret;
601 }
602 
603 static int add_generic_values(struct ctf_writer *cw,
604 			      struct bt_ctf_event *event,
605 			      struct evsel *evsel,
606 			      struct perf_sample *sample)
607 {
608 	u64 type = evsel->core.attr.sample_type;
609 	int ret;
610 
611 	/*
612 	 * missing:
613 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
614 	 *                              ctf event header
615 	 *   PERF_SAMPLE_READ         - TODO
616 	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
617 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
618 	 *   PERF_SAMPLE_REGS_USER    - TODO
619 	 *   PERF_SAMPLE_STACK_USER   - TODO
620 	 */
621 
622 	if (type & PERF_SAMPLE_IP) {
623 		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
624 		if (ret)
625 			return -1;
626 	}
627 
628 	if (type & PERF_SAMPLE_TID) {
629 		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
630 		if (ret)
631 			return -1;
632 
633 		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
634 		if (ret)
635 			return -1;
636 	}
637 
638 	if ((type & PERF_SAMPLE_ID) ||
639 	    (type & PERF_SAMPLE_IDENTIFIER)) {
640 		ret = value_set_u64(cw, event, "perf_id", sample->id);
641 		if (ret)
642 			return -1;
643 	}
644 
645 	if (type & PERF_SAMPLE_STREAM_ID) {
646 		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
647 		if (ret)
648 			return -1;
649 	}
650 
651 	if (type & PERF_SAMPLE_PERIOD) {
652 		ret = value_set_u64(cw, event, "perf_period", sample->period);
653 		if (ret)
654 			return -1;
655 	}
656 
657 	if (type & PERF_SAMPLE_WEIGHT) {
658 		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
659 		if (ret)
660 			return -1;
661 	}
662 
663 	if (type & PERF_SAMPLE_DATA_SRC) {
664 		ret = value_set_u64(cw, event, "perf_data_src",
665 				sample->data_src);
666 		if (ret)
667 			return -1;
668 	}
669 
670 	if (type & PERF_SAMPLE_TRANSACTION) {
671 		ret = value_set_u64(cw, event, "perf_transaction",
672 				sample->transaction);
673 		if (ret)
674 			return -1;
675 	}
676 
677 	return 0;
678 }
679 
680 static int ctf_stream__flush(struct ctf_stream *cs)
681 {
682 	int err = 0;
683 
684 	if (cs) {
685 		err = bt_ctf_stream_flush(cs->stream);
686 		if (err)
687 			pr_err("CTF stream %d flush failed\n", cs->cpu);
688 
689 		pr("Flush stream for cpu %d (%u samples)\n",
690 		   cs->cpu, cs->count);
691 
692 		cs->count = 0;
693 	}
694 
695 	return err;
696 }
697 
698 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
699 {
700 	struct ctf_stream *cs;
701 	struct bt_ctf_field *pkt_ctx   = NULL;
702 	struct bt_ctf_field *cpu_field = NULL;
703 	struct bt_ctf_stream *stream   = NULL;
704 	int ret;
705 
706 	cs = zalloc(sizeof(*cs));
707 	if (!cs) {
708 		pr_err("Failed to allocate ctf stream\n");
709 		return NULL;
710 	}
711 
712 	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
713 	if (!stream) {
714 		pr_err("Failed to create CTF stream\n");
715 		goto out;
716 	}
717 
718 	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
719 	if (!pkt_ctx) {
720 		pr_err("Failed to obtain packet context\n");
721 		goto out;
722 	}
723 
724 	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
725 	bt_ctf_field_put(pkt_ctx);
726 	if (!cpu_field) {
727 		pr_err("Failed to obtain cpu field\n");
728 		goto out;
729 	}
730 
731 	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
732 	if (ret) {
733 		pr_err("Failed to update CPU number\n");
734 		goto out;
735 	}
736 
737 	bt_ctf_field_put(cpu_field);
738 
739 	cs->cpu    = cpu;
740 	cs->stream = stream;
741 	return cs;
742 
743 out:
744 	if (cpu_field)
745 		bt_ctf_field_put(cpu_field);
746 	if (stream)
747 		bt_ctf_stream_put(stream);
748 
749 	free(cs);
750 	return NULL;
751 }
752 
753 static void ctf_stream__delete(struct ctf_stream *cs)
754 {
755 	if (cs) {
756 		bt_ctf_stream_put(cs->stream);
757 		free(cs);
758 	}
759 }
760 
761 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
762 {
763 	struct ctf_stream *cs = cw->stream[cpu];
764 
765 	if (!cs) {
766 		cs = ctf_stream__create(cw, cpu);
767 		cw->stream[cpu] = cs;
768 	}
769 
770 	return cs;
771 }
772 
773 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
774 			  struct evsel *evsel)
775 {
776 	int cpu = 0;
777 
778 	if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU)
779 		cpu = sample->cpu;
780 
781 	if (cpu > cw->stream_cnt) {
782 		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
783 			cpu, cw->stream_cnt);
784 		cpu = 0;
785 	}
786 
787 	return cpu;
788 }
789 
790 #define STREAM_FLUSH_COUNT 100000
791 
792 /*
793  * Currently we have no other way to determine the
794  * time for the stream flush other than keep track
795  * of the number of events and check it against
796  * threshold.
797  */
798 static bool is_flush_needed(struct ctf_stream *cs)
799 {
800 	return cs->count >= STREAM_FLUSH_COUNT;
801 }
802 
803 static int process_sample_event(const struct perf_tool *tool,
804 				union perf_event *_event,
805 				struct perf_sample *sample,
806 				struct evsel *evsel,
807 				struct machine *machine __maybe_unused)
808 {
809 	struct convert *c = container_of(tool, struct convert, tool);
810 	struct evsel_priv *priv = evsel->priv;
811 	struct ctf_writer *cw = &c->writer;
812 	struct ctf_stream *cs;
813 	struct bt_ctf_event_class *event_class;
814 	struct bt_ctf_event *event;
815 	int ret;
816 	unsigned long type = evsel->core.attr.sample_type;
817 
818 	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
819 		return 0;
820 
821 	if (perf_time__ranges_skip_sample(c->ptime_range, c->range_num, sample->time)) {
822 		++c->skipped;
823 		return 0;
824 	}
825 
826 	event_class = priv->event_class;
827 
828 	/* update stats */
829 	c->events_count++;
830 	c->events_size += _event->header.size;
831 
832 	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
833 
834 	event = bt_ctf_event_create(event_class);
835 	if (!event) {
836 		pr_err("Failed to create an CTF event\n");
837 		return -1;
838 	}
839 
840 	bt_ctf_clock_set_time(cw->clock, sample->time);
841 
842 	ret = add_generic_values(cw, event, evsel, sample);
843 	if (ret)
844 		return -1;
845 
846 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
847 		ret = add_tracepoint_values(cw, event_class, event,
848 					    evsel, sample);
849 		if (ret)
850 			return -1;
851 	}
852 
853 	if (type & PERF_SAMPLE_CALLCHAIN) {
854 		ret = add_callchain_output_values(event_class,
855 				event, sample->callchain);
856 		if (ret)
857 			return -1;
858 	}
859 
860 	if (evsel__is_bpf_output(evsel)) {
861 		ret = add_bpf_output_values(event_class, event, sample);
862 		if (ret)
863 			return -1;
864 	}
865 
866 	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
867 	if (cs) {
868 		if (is_flush_needed(cs))
869 			ctf_stream__flush(cs);
870 
871 		cs->count++;
872 		bt_ctf_stream_append_event(cs->stream, event);
873 	}
874 
875 	bt_ctf_event_put(event);
876 	return cs ? 0 : -1;
877 }
878 
879 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) 	\
880 do {							\
881 	ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
882 	if (ret)					\
883 		return -1;				\
884 } while(0)
885 
886 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) 	\
887 static int process_##_name##_event(const struct perf_tool *tool,	\
888 				   union perf_event *_event,	\
889 				   struct perf_sample *sample,	\
890 				   struct machine *machine)	\
891 {								\
892 	struct convert *c = container_of(tool, struct convert, tool);\
893 	struct ctf_writer *cw = &c->writer;			\
894 	struct bt_ctf_event_class *event_class = cw->_name##_class;\
895 	struct bt_ctf_event *event;				\
896 	struct ctf_stream *cs;					\
897 	int ret;						\
898 								\
899 	c->non_sample_count++;					\
900 	c->events_size += _event->header.size;			\
901 	event = bt_ctf_event_create(event_class);		\
902 	if (!event) {						\
903 		pr_err("Failed to create an CTF event\n");	\
904 		return -1;					\
905 	}							\
906 								\
907 	bt_ctf_clock_set_time(cw->clock, sample->time);		\
908 	body							\
909 	cs = ctf_stream(cw, 0);					\
910 	if (cs) {						\
911 		if (is_flush_needed(cs))			\
912 			ctf_stream__flush(cs);			\
913 								\
914 		cs->count++;					\
915 		bt_ctf_stream_append_event(cs->stream, event);	\
916 	}							\
917 	bt_ctf_event_put(event);				\
918 								\
919 	return perf_event__process_##_name(tool, _event, sample, machine);\
920 }
921 
922 __FUNC_PROCESS_NON_SAMPLE(comm,
923 	__NON_SAMPLE_SET_FIELD(comm, u32, pid);
924 	__NON_SAMPLE_SET_FIELD(comm, u32, tid);
925 	__NON_SAMPLE_SET_FIELD(comm, string, comm);
926 )
927 __FUNC_PROCESS_NON_SAMPLE(fork,
928 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
929 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
930 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
931 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
932 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
933 )
934 
935 __FUNC_PROCESS_NON_SAMPLE(exit,
936 	__NON_SAMPLE_SET_FIELD(fork, u32, pid);
937 	__NON_SAMPLE_SET_FIELD(fork, u32, ppid);
938 	__NON_SAMPLE_SET_FIELD(fork, u32, tid);
939 	__NON_SAMPLE_SET_FIELD(fork, u32, ptid);
940 	__NON_SAMPLE_SET_FIELD(fork, u64, time);
941 )
942 __FUNC_PROCESS_NON_SAMPLE(mmap,
943 	__NON_SAMPLE_SET_FIELD(mmap, u32, pid);
944 	__NON_SAMPLE_SET_FIELD(mmap, u32, tid);
945 	__NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
946 	__NON_SAMPLE_SET_FIELD(mmap, string, filename);
947 )
948 __FUNC_PROCESS_NON_SAMPLE(mmap2,
949 	__NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
950 	__NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
951 	__NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
952 	__NON_SAMPLE_SET_FIELD(mmap2, string, filename);
953 )
954 #undef __NON_SAMPLE_SET_FIELD
955 #undef __FUNC_PROCESS_NON_SAMPLE
956 
957 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
958 static char *change_name(char *name, char *orig_name, int dup)
959 {
960 	char *new_name = NULL;
961 	size_t len;
962 
963 	if (!name)
964 		name = orig_name;
965 
966 	if (dup >= 10)
967 		goto out;
968 	/*
969 	 * Add '_' prefix to potential keywork.  According to
970 	 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
971 	 * further CTF spec updating may require us to use '$'.
972 	 */
973 	if (dup < 0)
974 		len = strlen(name) + sizeof("_");
975 	else
976 		len = strlen(orig_name) + sizeof("_dupl_X");
977 
978 	new_name = malloc(len);
979 	if (!new_name)
980 		goto out;
981 
982 	if (dup < 0)
983 		snprintf(new_name, len, "_%s", name);
984 	else
985 		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
986 
987 out:
988 	if (name != orig_name)
989 		free(name);
990 	return new_name;
991 }
992 
993 static int event_class_add_field(struct bt_ctf_event_class *event_class,
994 		struct bt_ctf_field_type *type,
995 		struct tep_format_field *field)
996 {
997 	struct bt_ctf_field_type *t = NULL;
998 	char *name;
999 	int dup = 1;
1000 	int ret;
1001 
1002 	/* alias was already assigned */
1003 	if (field->alias != field->name)
1004 		return bt_ctf_event_class_add_field(event_class, type,
1005 				(char *)field->alias);
1006 
1007 	name = field->name;
1008 
1009 	/* If 'name' is a keywork, add prefix. */
1010 	if (bt_ctf_validate_identifier(name))
1011 		name = change_name(name, field->name, -1);
1012 
1013 	if (!name) {
1014 		pr_err("Failed to fix invalid identifier.");
1015 		return -1;
1016 	}
1017 	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
1018 		bt_ctf_field_type_put(t);
1019 		name = change_name(name, field->name, dup++);
1020 		if (!name) {
1021 			pr_err("Failed to create dup name for '%s'\n", field->name);
1022 			return -1;
1023 		}
1024 	}
1025 
1026 	ret = bt_ctf_event_class_add_field(event_class, type, name);
1027 	if (!ret)
1028 		field->alias = name;
1029 
1030 	return ret;
1031 }
1032 
1033 static int add_tracepoint_fields_types(struct ctf_writer *cw,
1034 				       struct tep_format_field *fields,
1035 				       struct bt_ctf_event_class *event_class)
1036 {
1037 	struct tep_format_field *field;
1038 	int ret;
1039 
1040 	for (field = fields; field; field = field->next) {
1041 		struct bt_ctf_field_type *type;
1042 		unsigned long flags = field->flags;
1043 
1044 		pr2("  field '%s'\n", field->name);
1045 
1046 		type = get_tracepoint_field_type(cw, field);
1047 		if (!type)
1048 			return -1;
1049 
1050 		/*
1051 		 * A string is an array of chars. For this we use the string
1052 		 * type and don't care that it is an array. What we don't
1053 		 * support is an array of strings.
1054 		 */
1055 		if (flags & TEP_FIELD_IS_STRING)
1056 			flags &= ~TEP_FIELD_IS_ARRAY;
1057 
1058 		if (flags & TEP_FIELD_IS_ARRAY)
1059 			type = bt_ctf_field_type_array_create(type, field->arraylen);
1060 
1061 		ret = event_class_add_field(event_class, type, field);
1062 
1063 		if (flags & TEP_FIELD_IS_ARRAY)
1064 			bt_ctf_field_type_put(type);
1065 
1066 		if (ret) {
1067 			pr_err("Failed to add field '%s': %d\n",
1068 					field->name, ret);
1069 			return -1;
1070 		}
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 static int add_tracepoint_types(struct ctf_writer *cw,
1077 				struct evsel *evsel,
1078 				struct bt_ctf_event_class *class)
1079 {
1080 	const struct tep_event *tp_format = evsel__tp_format(evsel);
1081 	struct tep_format_field *common_fields = tp_format ? tp_format->format.common_fields : NULL;
1082 	struct tep_format_field *fields        = tp_format ? tp_format->format.fields : NULL;
1083 	int ret;
1084 
1085 	ret = add_tracepoint_fields_types(cw, common_fields, class);
1086 	if (!ret)
1087 		ret = add_tracepoint_fields_types(cw, fields, class);
1088 
1089 	return ret;
1090 }
1091 
1092 static int add_bpf_output_types(struct ctf_writer *cw,
1093 				struct bt_ctf_event_class *class)
1094 {
1095 	struct bt_ctf_field_type *len_type = cw->data.u32;
1096 	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
1097 	struct bt_ctf_field_type *seq_type;
1098 	int ret;
1099 
1100 	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
1101 	if (ret)
1102 		return ret;
1103 
1104 	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
1105 	if (!seq_type)
1106 		return -1;
1107 
1108 	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
1109 }
1110 
1111 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel,
1112 			     struct bt_ctf_event_class *event_class)
1113 {
1114 	u64 type = evsel->core.attr.sample_type;
1115 
1116 	/*
1117 	 * missing:
1118 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
1119 	 *                              ctf event header
1120 	 *   PERF_SAMPLE_READ         - TODO
1121 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
1122 	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
1123 	 *                              are handled separately
1124 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
1125 	 *   PERF_SAMPLE_REGS_USER    - TODO
1126 	 *   PERF_SAMPLE_STACK_USER   - TODO
1127 	 */
1128 
1129 #define ADD_FIELD(cl, t, n)						\
1130 	do {								\
1131 		pr2("  field '%s'\n", n);				\
1132 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
1133 			pr_err("Failed to add field '%s';\n", n);	\
1134 			return -1;					\
1135 		}							\
1136 	} while (0)
1137 
1138 	if (type & PERF_SAMPLE_IP)
1139 		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
1140 
1141 	if (type & PERF_SAMPLE_TID) {
1142 		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
1143 		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
1144 	}
1145 
1146 	if ((type & PERF_SAMPLE_ID) ||
1147 	    (type & PERF_SAMPLE_IDENTIFIER))
1148 		ADD_FIELD(event_class, cw->data.u64, "perf_id");
1149 
1150 	if (type & PERF_SAMPLE_STREAM_ID)
1151 		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
1152 
1153 	if (type & PERF_SAMPLE_PERIOD)
1154 		ADD_FIELD(event_class, cw->data.u64, "perf_period");
1155 
1156 	if (type & PERF_SAMPLE_WEIGHT)
1157 		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
1158 
1159 	if (type & PERF_SAMPLE_DATA_SRC)
1160 		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
1161 
1162 	if (type & PERF_SAMPLE_TRANSACTION)
1163 		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
1164 
1165 	if (type & PERF_SAMPLE_CALLCHAIN) {
1166 		ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
1167 		ADD_FIELD(event_class,
1168 			bt_ctf_field_type_sequence_create(
1169 				cw->data.u64_hex, "perf_callchain_size"),
1170 			"perf_callchain");
1171 	}
1172 
1173 #undef ADD_FIELD
1174 	return 0;
1175 }
1176 
1177 static int add_event(struct ctf_writer *cw, struct evsel *evsel)
1178 {
1179 	struct bt_ctf_event_class *event_class;
1180 	struct evsel_priv *priv;
1181 	const char *name = evsel__name(evsel);
1182 	int ret;
1183 
1184 	pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type);
1185 
1186 	event_class = bt_ctf_event_class_create(name);
1187 	if (!event_class)
1188 		return -1;
1189 
1190 	ret = add_generic_types(cw, evsel, event_class);
1191 	if (ret)
1192 		goto err;
1193 
1194 	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
1195 		ret = add_tracepoint_types(cw, evsel, event_class);
1196 		if (ret)
1197 			goto err;
1198 	}
1199 
1200 	if (evsel__is_bpf_output(evsel)) {
1201 		ret = add_bpf_output_types(cw, event_class);
1202 		if (ret)
1203 			goto err;
1204 	}
1205 
1206 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
1207 	if (ret) {
1208 		pr("Failed to add event class into stream.\n");
1209 		goto err;
1210 	}
1211 
1212 	priv = malloc(sizeof(*priv));
1213 	if (!priv)
1214 		goto err;
1215 
1216 	priv->event_class = event_class;
1217 	evsel->priv       = priv;
1218 	return 0;
1219 
1220 err:
1221 	bt_ctf_event_class_put(event_class);
1222 	pr_err("Failed to add event '%s'.\n", name);
1223 	return -1;
1224 }
1225 
1226 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
1227 {
1228 	struct evlist *evlist = session->evlist;
1229 	struct evsel *evsel;
1230 	int ret;
1231 
1232 	evlist__for_each_entry(evlist, evsel) {
1233 		ret = add_event(cw, evsel);
1234 		if (ret)
1235 			return ret;
1236 	}
1237 	return 0;
1238 }
1239 
1240 #define __NON_SAMPLE_ADD_FIELD(t, n)						\
1241 	do {							\
1242 		pr2("  field '%s'\n", #n);			\
1243 		if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
1244 			pr_err("Failed to add field '%s';\n", #n);\
1245 			return -1;				\
1246 		}						\
1247 	} while(0)
1248 
1249 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) 		\
1250 static int add_##_name##_event(struct ctf_writer *cw)		\
1251 {								\
1252 	struct bt_ctf_event_class *event_class;			\
1253 	int ret;						\
1254 								\
1255 	pr("Adding "#_name" event\n");				\
1256 	event_class = bt_ctf_event_class_create("perf_" #_name);\
1257 	if (!event_class)					\
1258 		return -1;					\
1259 	body							\
1260 								\
1261 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
1262 	if (ret) {						\
1263 		pr("Failed to add event class '"#_name"' into stream.\n");\
1264 		return ret;					\
1265 	}							\
1266 								\
1267 	cw->_name##_class = event_class;			\
1268 	bt_ctf_event_class_put(event_class);			\
1269 	return 0;						\
1270 }
1271 
1272 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
1273 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1274 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1275 	__NON_SAMPLE_ADD_FIELD(string, comm);
1276 )
1277 
1278 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
1279 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1280 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1281 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1282 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1283 	__NON_SAMPLE_ADD_FIELD(u64, time);
1284 )
1285 
1286 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
1287 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1288 	__NON_SAMPLE_ADD_FIELD(u32, ppid);
1289 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1290 	__NON_SAMPLE_ADD_FIELD(u32, ptid);
1291 	__NON_SAMPLE_ADD_FIELD(u64, time);
1292 )
1293 
1294 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
1295 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1296 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1297 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1298 	__NON_SAMPLE_ADD_FIELD(string, filename);
1299 )
1300 
1301 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
1302 	__NON_SAMPLE_ADD_FIELD(u32, pid);
1303 	__NON_SAMPLE_ADD_FIELD(u32, tid);
1304 	__NON_SAMPLE_ADD_FIELD(u64_hex, start);
1305 	__NON_SAMPLE_ADD_FIELD(string, filename);
1306 )
1307 #undef __NON_SAMPLE_ADD_FIELD
1308 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
1309 
1310 static int setup_non_sample_events(struct ctf_writer *cw,
1311 				   struct perf_session *session __maybe_unused)
1312 {
1313 	int ret;
1314 
1315 	ret = add_comm_event(cw);
1316 	if (ret)
1317 		return ret;
1318 	ret = add_exit_event(cw);
1319 	if (ret)
1320 		return ret;
1321 	ret = add_fork_event(cw);
1322 	if (ret)
1323 		return ret;
1324 	ret = add_mmap_event(cw);
1325 	if (ret)
1326 		return ret;
1327 	ret = add_mmap2_event(cw);
1328 	if (ret)
1329 		return ret;
1330 	return 0;
1331 }
1332 
1333 static void cleanup_events(struct perf_session *session)
1334 {
1335 	struct evlist *evlist = session->evlist;
1336 	struct evsel *evsel;
1337 
1338 	evlist__for_each_entry(evlist, evsel) {
1339 		struct evsel_priv *priv;
1340 
1341 		priv = evsel->priv;
1342 		if (priv)
1343 			bt_ctf_event_class_put(priv->event_class);
1344 		zfree(&evsel->priv);
1345 	}
1346 
1347 	evlist__delete(evlist);
1348 	session->evlist = NULL;
1349 }
1350 
1351 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1352 {
1353 	struct ctf_stream **stream;
1354 	struct perf_env *env = perf_session__env(session);
1355 	int ncpus;
1356 
1357 	/*
1358 	 * Try to get the number of cpus used in the data file,
1359 	 * if not present fallback to the MAX_CPUS.
1360 	 */
1361 	ncpus = env->nr_cpus_avail ?: MAX_CPUS;
1362 
1363 	stream = zalloc(sizeof(*stream) * ncpus);
1364 	if (!stream) {
1365 		pr_err("Failed to allocate streams.\n");
1366 		return -ENOMEM;
1367 	}
1368 
1369 	cw->stream     = stream;
1370 	cw->stream_cnt = ncpus;
1371 	return 0;
1372 }
1373 
1374 static void free_streams(struct ctf_writer *cw)
1375 {
1376 	int cpu;
1377 
1378 	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1379 		ctf_stream__delete(cw->stream[cpu]);
1380 
1381 	zfree(&cw->stream);
1382 }
1383 
1384 static int ctf_writer__setup_env(struct ctf_writer *cw,
1385 				 struct perf_session *session)
1386 {
1387 	struct perf_env *env = perf_session__env(session);
1388 	struct bt_ctf_writer *writer = cw->writer;
1389 
1390 #define ADD(__n, __v)							\
1391 do {									\
1392 	if (__v && bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1393 		return -1;						\
1394 } while (0)
1395 
1396 	ADD("host",    env->hostname);
1397 	ADD("sysname", "Linux");
1398 	ADD("release", env->os_release);
1399 	ADD("version", env->version);
1400 	ADD("machine", env->arch);
1401 	ADD("domain", "kernel");
1402 	ADD("tracer_name", "perf");
1403 
1404 #undef ADD
1405 	return 0;
1406 }
1407 
1408 static int process_feature_event(const struct perf_tool *tool,
1409 				 struct perf_session *session,
1410 				 union perf_event *event)
1411 {
1412 	struct convert *c = container_of(tool, struct convert, tool);
1413 	struct ctf_writer *cw = &c->writer;
1414 	struct perf_record_header_feature *fe = &event->feat;
1415 
1416 	if (event->feat.feat_id < HEADER_LAST_FEATURE) {
1417 		int ret = perf_event__process_feature(session, event);
1418 
1419 		if (ret)
1420 			return ret;
1421 	}
1422 
1423 	switch (fe->feat_id) {
1424 	case HEADER_HOSTNAME:
1425 		if (session->header.env.hostname) {
1426 			return bt_ctf_writer_add_environment_field(cw->writer, "host",
1427 								   session->header.env.hostname);
1428 		}
1429 		break;
1430 	case HEADER_OSRELEASE:
1431 		if (session->header.env.os_release) {
1432 			return bt_ctf_writer_add_environment_field(cw->writer, "release",
1433 								   session->header.env.os_release);
1434 		}
1435 		break;
1436 	case HEADER_VERSION:
1437 		if (session->header.env.version) {
1438 			return bt_ctf_writer_add_environment_field(cw->writer, "version",
1439 								   session->header.env.version);
1440 		}
1441 		break;
1442 	case HEADER_ARCH:
1443 		if (session->header.env.arch) {
1444 			return bt_ctf_writer_add_environment_field(cw->writer, "machine",
1445 								   session->header.env.arch);
1446 		}
1447 		break;
1448 	default:
1449 		break;
1450 	}
1451 	return 0;
1452 }
1453 
1454 static int ctf_writer__setup_clock(struct ctf_writer *cw,
1455 				   struct perf_session *session,
1456 				   bool tod)
1457 {
1458 	struct bt_ctf_clock *clock = cw->clock;
1459 	const char *desc = "perf clock";
1460 	int64_t offset = 0;
1461 
1462 	if (tod) {
1463 		struct perf_env *env = perf_session__env(session);
1464 
1465 		if (!env->clock.enabled) {
1466 			pr_err("Can't provide --tod time, missing clock data. "
1467 			       "Please record with -k/--clockid option.\n");
1468 			return -1;
1469 		}
1470 
1471 		desc   = clockid_name(env->clock.clockid);
1472 		offset = env->clock.tod_ns - env->clock.clockid_ns;
1473 	}
1474 
1475 #define SET(__n, __v)				\
1476 do {						\
1477 	if (bt_ctf_clock_set_##__n(clock, __v))	\
1478 		return -1;			\
1479 } while (0)
1480 
1481 	SET(frequency,   1000000000);
1482 	SET(offset,      offset);
1483 	SET(description, desc);
1484 	SET(precision,   10);
1485 	SET(is_absolute, 0);
1486 
1487 #undef SET
1488 	return 0;
1489 }
1490 
1491 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1492 {
1493 	struct bt_ctf_field_type *type;
1494 
1495 	type = bt_ctf_field_type_integer_create(size);
1496 	if (!type)
1497 		return NULL;
1498 
1499 	if (sign &&
1500 	    bt_ctf_field_type_integer_set_signed(type, 1))
1501 		goto err;
1502 
1503 	if (hex &&
1504 	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1505 		goto err;
1506 
1507 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1508 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1509 #else
1510 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1511 #endif
1512 
1513 	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1514 	    size, sign ? "un" : "", hex ? "hex" : "");
1515 	return type;
1516 
1517 err:
1518 	bt_ctf_field_type_put(type);
1519 	return NULL;
1520 }
1521 
1522 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1523 {
1524 	unsigned int i;
1525 
1526 	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1527 		bt_ctf_field_type_put(cw->data.array[i]);
1528 }
1529 
1530 static int ctf_writer__init_data(struct ctf_writer *cw)
1531 {
1532 #define CREATE_INT_TYPE(type, size, sign, hex)		\
1533 do {							\
1534 	(type) = create_int_type(size, sign, hex);	\
1535 	if (!(type))					\
1536 		goto err;				\
1537 } while (0)
1538 
1539 	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1540 	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1541 	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1542 	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1543 	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1544 	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1545 
1546 	cw->data.string  = bt_ctf_field_type_string_create();
1547 	if (cw->data.string)
1548 		return 0;
1549 
1550 err:
1551 	ctf_writer__cleanup_data(cw);
1552 	pr_err("Failed to create data types.\n");
1553 	return -1;
1554 }
1555 
1556 static void ctf_writer__cleanup(struct ctf_writer *cw)
1557 {
1558 	ctf_writer__cleanup_data(cw);
1559 
1560 	bt_ctf_clock_put(cw->clock);
1561 	free_streams(cw);
1562 	bt_ctf_stream_class_put(cw->stream_class);
1563 	bt_ctf_writer_put(cw->writer);
1564 
1565 	/* and NULL all the pointers */
1566 	memset(cw, 0, sizeof(*cw));
1567 }
1568 
1569 static int ctf_writer__init(struct ctf_writer *cw, const char *path,
1570 			    struct perf_session *session, bool tod)
1571 {
1572 	struct bt_ctf_writer		*writer;
1573 	struct bt_ctf_stream_class	*stream_class;
1574 	struct bt_ctf_clock		*clock;
1575 	struct bt_ctf_field_type	*pkt_ctx_type;
1576 	int				ret;
1577 
1578 	/* CTF writer */
1579 	writer = bt_ctf_writer_create(path);
1580 	if (!writer)
1581 		goto err;
1582 
1583 	cw->writer = writer;
1584 
1585 	/* CTF clock */
1586 	clock = bt_ctf_clock_create("perf_clock");
1587 	if (!clock) {
1588 		pr("Failed to create CTF clock.\n");
1589 		goto err_cleanup;
1590 	}
1591 
1592 	cw->clock = clock;
1593 
1594 	if (ctf_writer__setup_clock(cw, session, tod)) {
1595 		pr("Failed to setup CTF clock.\n");
1596 		goto err_cleanup;
1597 	}
1598 
1599 	/* CTF stream class */
1600 	stream_class = bt_ctf_stream_class_create("perf_stream");
1601 	if (!stream_class) {
1602 		pr("Failed to create CTF stream class.\n");
1603 		goto err_cleanup;
1604 	}
1605 
1606 	cw->stream_class = stream_class;
1607 
1608 	/* CTF clock stream setup */
1609 	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1610 		pr("Failed to assign CTF clock to stream class.\n");
1611 		goto err_cleanup;
1612 	}
1613 
1614 	if (ctf_writer__init_data(cw))
1615 		goto err_cleanup;
1616 
1617 	/* Add cpu_id for packet context */
1618 	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1619 	if (!pkt_ctx_type)
1620 		goto err_cleanup;
1621 
1622 	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1623 	bt_ctf_field_type_put(pkt_ctx_type);
1624 	if (ret)
1625 		goto err_cleanup;
1626 
1627 	/* CTF clock writer setup */
1628 	if (bt_ctf_writer_add_clock(writer, clock)) {
1629 		pr("Failed to assign CTF clock to writer.\n");
1630 		goto err_cleanup;
1631 	}
1632 
1633 	return 0;
1634 
1635 err_cleanup:
1636 	ctf_writer__cleanup(cw);
1637 err:
1638 	pr_err("Failed to setup CTF writer.\n");
1639 	return -1;
1640 }
1641 
1642 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1643 {
1644 	int cpu, ret = 0;
1645 
1646 	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1647 		ret = ctf_stream__flush(cw->stream[cpu]);
1648 
1649 	return ret;
1650 }
1651 
1652 static int convert__config(const char *var, const char *value, void *cb)
1653 {
1654 	struct convert *c = cb;
1655 
1656 	if (!strcmp(var, "convert.queue-size"))
1657 		return perf_config_u64(&c->queue_size, var, value);
1658 
1659 	return 0;
1660 }
1661 
1662 int bt_convert__perf2ctf(const char *input, const char *path,
1663 			 struct perf_data_convert_opts *opts)
1664 {
1665 	struct perf_session *session;
1666 	struct perf_data data = {
1667 		.path	   = input,
1668 		.mode      = PERF_DATA_MODE_READ,
1669 		.force     = opts->force,
1670 	};
1671 	struct convert c = {};
1672 	struct ctf_writer *cw = &c.writer;
1673 	int err;
1674 
1675 	perf_tool__init(&c.tool, /*ordered_events=*/true);
1676 	c.tool.sample          = process_sample_event;
1677 	c.tool.mmap            = perf_event__process_mmap;
1678 	c.tool.mmap2           = perf_event__process_mmap2;
1679 	c.tool.comm            = perf_event__process_comm;
1680 	c.tool.exit            = perf_event__process_exit;
1681 	c.tool.fork            = perf_event__process_fork;
1682 	c.tool.lost            = perf_event__process_lost;
1683 	c.tool.tracing_data    = perf_event__process_tracing_data;
1684 	c.tool.build_id        = perf_event__process_build_id;
1685 	c.tool.namespaces      = perf_event__process_namespaces;
1686 	c.tool.attr            = perf_event__process_attr;
1687 	c.tool.feature         = process_feature_event;
1688 	c.tool.ordering_requires_timestamps = true;
1689 
1690 	if (opts->all) {
1691 		c.tool.comm = process_comm_event;
1692 		c.tool.exit = process_exit_event;
1693 		c.tool.fork = process_fork_event;
1694 		c.tool.mmap = process_mmap_event;
1695 		c.tool.mmap2 = process_mmap2_event;
1696 	}
1697 
1698 	err = perf_config(convert__config, &c);
1699 	if (err)
1700 		return err;
1701 
1702 	err = -1;
1703 	/* perf.data session */
1704 	session = perf_session__new(&data, &c.tool);
1705 	if (IS_ERR(session))
1706 		return PTR_ERR(session);
1707 
1708 	if (opts->time_str) {
1709 		err = perf_time__parse_for_ranges(opts->time_str, session,
1710 						  &c.ptime_range,
1711 						  &c.range_size,
1712 						  &c.range_num);
1713 		if (err < 0)
1714 			goto free_session;
1715 	}
1716 
1717 	/* CTF writer */
1718 	if (ctf_writer__init(cw, path, session, opts->tod))
1719 		goto free_session;
1720 
1721 	if (c.queue_size) {
1722 		ordered_events__set_alloc_size(&session->ordered_events,
1723 					       c.queue_size);
1724 	}
1725 
1726 	/* CTF writer env/clock setup  */
1727 	if (ctf_writer__setup_env(cw, session))
1728 		goto free_writer;
1729 
1730 	/* CTF events setup */
1731 	if (setup_events(cw, session))
1732 		goto free_writer;
1733 
1734 	if (opts->all && setup_non_sample_events(cw, session))
1735 		goto free_writer;
1736 
1737 	if (setup_streams(cw, session))
1738 		goto free_writer;
1739 
1740 	err = perf_session__process_events(session);
1741 	if (!err)
1742 		err = ctf_writer__flush_streams(cw);
1743 	else
1744 		pr_err("Error during conversion.\n");
1745 
1746 	fprintf(stderr,	"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1747 		data.path, path);
1748 
1749 	fprintf(stderr,	"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
1750 		(double) c.events_size / 1024.0 / 1024.0,
1751 		c.events_count);
1752 
1753 	if (!c.non_sample_count)
1754 		fprintf(stderr, ") ]\n");
1755 	else
1756 		fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
1757 
1758 	if (c.skipped) {
1759 		fprintf(stderr,	"[ perf data convert: Skipped %" PRIu64 " samples ]\n",
1760 			c.skipped);
1761 	}
1762 
1763 	if (c.ptime_range)
1764 		zfree(&c.ptime_range);
1765 
1766 	cleanup_events(session);
1767 	perf_session__delete(session);
1768 	ctf_writer__cleanup(cw);
1769 
1770 	return err;
1771 
1772 free_writer:
1773 	ctf_writer__cleanup(cw);
1774 free_session:
1775 	if (c.ptime_range)
1776 		zfree(&c.ptime_range);
1777 
1778 	perf_session__delete(session);
1779 	pr_err("Error during conversion setup.\n");
1780 	return err;
1781 }
1782