1 /* 2 * CTF writing support via babeltrace. 3 * 4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com> 5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de> 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <inttypes.h> 11 #include <linux/compiler.h> 12 #include <linux/kernel.h> 13 #include <babeltrace/ctf-writer/writer.h> 14 #include <babeltrace/ctf-writer/clock.h> 15 #include <babeltrace/ctf-writer/stream.h> 16 #include <babeltrace/ctf-writer/event.h> 17 #include <babeltrace/ctf-writer/event-types.h> 18 #include <babeltrace/ctf-writer/event-fields.h> 19 #include <babeltrace/ctf-ir/utils.h> 20 #include <babeltrace/ctf/events.h> 21 #include <traceevent/event-parse.h> 22 #include "asm/bug.h" 23 #include "data-convert-bt.h" 24 #include "session.h" 25 #include "util.h" 26 #include "debug.h" 27 #include "tool.h" 28 #include "evlist.h" 29 #include "evsel.h" 30 #include "machine.h" 31 #include "config.h" 32 #include "sane_ctype.h" 33 34 #define pr_N(n, fmt, ...) \ 35 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) 36 37 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) 38 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) 39 40 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) 41 42 struct evsel_priv { 43 struct bt_ctf_event_class *event_class; 44 }; 45 46 #define MAX_CPUS 4096 47 48 struct ctf_stream { 49 struct bt_ctf_stream *stream; 50 int cpu; 51 u32 count; 52 }; 53 54 struct ctf_writer { 55 /* writer primitives */ 56 struct bt_ctf_writer *writer; 57 struct ctf_stream **stream; 58 int stream_cnt; 59 struct bt_ctf_stream_class *stream_class; 60 struct bt_ctf_clock *clock; 61 62 /* data types */ 63 union { 64 struct { 65 struct bt_ctf_field_type *s64; 66 struct bt_ctf_field_type *u64; 67 struct bt_ctf_field_type *s32; 68 struct bt_ctf_field_type *u32; 69 struct bt_ctf_field_type *string; 70 struct bt_ctf_field_type *u32_hex; 71 struct bt_ctf_field_type *u64_hex; 72 }; 73 struct bt_ctf_field_type *array[6]; 74 } data; 75 struct bt_ctf_event_class *comm_class; 76 struct bt_ctf_event_class *exit_class; 77 struct bt_ctf_event_class *fork_class; 78 }; 79 80 struct convert { 81 struct perf_tool tool; 82 struct ctf_writer writer; 83 84 u64 events_size; 85 u64 events_count; 86 u64 non_sample_count; 87 88 /* Ordered events configured queue size. */ 89 u64 queue_size; 90 }; 91 92 static int value_set(struct bt_ctf_field_type *type, 93 struct bt_ctf_event *event, 94 const char *name, u64 val) 95 { 96 struct bt_ctf_field *field; 97 bool sign = bt_ctf_field_type_integer_get_signed(type); 98 int ret; 99 100 field = bt_ctf_field_create(type); 101 if (!field) { 102 pr_err("failed to create a field %s\n", name); 103 return -1; 104 } 105 106 if (sign) { 107 ret = bt_ctf_field_signed_integer_set_value(field, val); 108 if (ret) { 109 pr_err("failed to set field value %s\n", name); 110 goto err; 111 } 112 } else { 113 ret = bt_ctf_field_unsigned_integer_set_value(field, val); 114 if (ret) { 115 pr_err("failed to set field value %s\n", name); 116 goto err; 117 } 118 } 119 120 ret = bt_ctf_event_set_payload(event, name, field); 121 if (ret) { 122 pr_err("failed to set payload %s\n", name); 123 goto err; 124 } 125 126 pr2(" SET [%s = %" PRIu64 "]\n", name, val); 127 128 err: 129 bt_ctf_field_put(field); 130 return ret; 131 } 132 133 #define __FUNC_VALUE_SET(_name, _val_type) \ 134 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ 135 struct bt_ctf_event *event, \ 136 const char *name, \ 137 _val_type val) \ 138 { \ 139 struct bt_ctf_field_type *type = cw->data._name; \ 140 return value_set(type, event, name, (u64) val); \ 141 } 142 143 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) 144 145 FUNC_VALUE_SET(s32) 146 FUNC_VALUE_SET(u32) 147 FUNC_VALUE_SET(s64) 148 FUNC_VALUE_SET(u64) 149 __FUNC_VALUE_SET(u64_hex, u64) 150 151 static int string_set_value(struct bt_ctf_field *field, const char *string); 152 static __maybe_unused int 153 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event, 154 const char *name, const char *string) 155 { 156 struct bt_ctf_field_type *type = cw->data.string; 157 struct bt_ctf_field *field; 158 int ret = 0; 159 160 field = bt_ctf_field_create(type); 161 if (!field) { 162 pr_err("failed to create a field %s\n", name); 163 return -1; 164 } 165 166 ret = string_set_value(field, string); 167 if (ret) { 168 pr_err("failed to set value %s\n", name); 169 goto err_put_field; 170 } 171 172 ret = bt_ctf_event_set_payload(event, name, field); 173 if (ret) 174 pr_err("failed to set payload %s\n", name); 175 176 err_put_field: 177 bt_ctf_field_put(field); 178 return ret; 179 } 180 181 static struct bt_ctf_field_type* 182 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field) 183 { 184 unsigned long flags = field->flags; 185 186 if (flags & FIELD_IS_STRING) 187 return cw->data.string; 188 189 if (!(flags & FIELD_IS_SIGNED)) { 190 /* unsigned long are mostly pointers */ 191 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER) 192 return cw->data.u64_hex; 193 } 194 195 if (flags & FIELD_IS_SIGNED) { 196 if (field->size == 8) 197 return cw->data.s64; 198 else 199 return cw->data.s32; 200 } 201 202 if (field->size == 8) 203 return cw->data.u64; 204 else 205 return cw->data.u32; 206 } 207 208 static unsigned long long adjust_signedness(unsigned long long value_int, int size) 209 { 210 unsigned long long value_mask; 211 212 /* 213 * value_mask = (1 << (size * 8 - 1)) - 1. 214 * Directly set value_mask for code readers. 215 */ 216 switch (size) { 217 case 1: 218 value_mask = 0x7fULL; 219 break; 220 case 2: 221 value_mask = 0x7fffULL; 222 break; 223 case 4: 224 value_mask = 0x7fffffffULL; 225 break; 226 case 8: 227 /* 228 * For 64 bit value, return it self. There is no need 229 * to fill high bit. 230 */ 231 /* Fall through */ 232 default: 233 /* BUG! */ 234 return value_int; 235 } 236 237 /* If it is a positive value, don't adjust. */ 238 if ((value_int & (~0ULL - value_mask)) == 0) 239 return value_int; 240 241 /* Fill upper part of value_int with 1 to make it a negative long long. */ 242 return (value_int & value_mask) | ~value_mask; 243 } 244 245 static int string_set_value(struct bt_ctf_field *field, const char *string) 246 { 247 char *buffer = NULL; 248 size_t len = strlen(string), i, p; 249 int err; 250 251 for (i = p = 0; i < len; i++, p++) { 252 if (isprint(string[i])) { 253 if (!buffer) 254 continue; 255 buffer[p] = string[i]; 256 } else { 257 char numstr[5]; 258 259 snprintf(numstr, sizeof(numstr), "\\x%02x", 260 (unsigned int)(string[i]) & 0xff); 261 262 if (!buffer) { 263 buffer = zalloc(i + (len - i) * 4 + 2); 264 if (!buffer) { 265 pr_err("failed to set unprintable string '%s'\n", string); 266 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); 267 } 268 if (i > 0) 269 strncpy(buffer, string, i); 270 } 271 strncat(buffer + p, numstr, 4); 272 p += 3; 273 } 274 } 275 276 if (!buffer) 277 return bt_ctf_field_string_set_value(field, string); 278 err = bt_ctf_field_string_set_value(field, buffer); 279 free(buffer); 280 return err; 281 } 282 283 static int add_tracepoint_field_value(struct ctf_writer *cw, 284 struct bt_ctf_event_class *event_class, 285 struct bt_ctf_event *event, 286 struct perf_sample *sample, 287 struct format_field *fmtf) 288 { 289 struct bt_ctf_field_type *type; 290 struct bt_ctf_field *array_field; 291 struct bt_ctf_field *field; 292 const char *name = fmtf->name; 293 void *data = sample->raw_data; 294 unsigned long flags = fmtf->flags; 295 unsigned int n_items; 296 unsigned int i; 297 unsigned int offset; 298 unsigned int len; 299 int ret; 300 301 name = fmtf->alias; 302 offset = fmtf->offset; 303 len = fmtf->size; 304 if (flags & FIELD_IS_STRING) 305 flags &= ~FIELD_IS_ARRAY; 306 307 if (flags & FIELD_IS_DYNAMIC) { 308 unsigned long long tmp_val; 309 310 tmp_val = pevent_read_number(fmtf->event->pevent, 311 data + offset, len); 312 offset = tmp_val; 313 len = offset >> 16; 314 offset &= 0xffff; 315 } 316 317 if (flags & FIELD_IS_ARRAY) { 318 319 type = bt_ctf_event_class_get_field_by_name( 320 event_class, name); 321 array_field = bt_ctf_field_create(type); 322 bt_ctf_field_type_put(type); 323 if (!array_field) { 324 pr_err("Failed to create array type %s\n", name); 325 return -1; 326 } 327 328 len = fmtf->size / fmtf->arraylen; 329 n_items = fmtf->arraylen; 330 } else { 331 n_items = 1; 332 array_field = NULL; 333 } 334 335 type = get_tracepoint_field_type(cw, fmtf); 336 337 for (i = 0; i < n_items; i++) { 338 if (flags & FIELD_IS_ARRAY) 339 field = bt_ctf_field_array_get_field(array_field, i); 340 else 341 field = bt_ctf_field_create(type); 342 343 if (!field) { 344 pr_err("failed to create a field %s\n", name); 345 return -1; 346 } 347 348 if (flags & FIELD_IS_STRING) 349 ret = string_set_value(field, data + offset + i * len); 350 else { 351 unsigned long long value_int; 352 353 value_int = pevent_read_number( 354 fmtf->event->pevent, 355 data + offset + i * len, len); 356 357 if (!(flags & FIELD_IS_SIGNED)) 358 ret = bt_ctf_field_unsigned_integer_set_value( 359 field, value_int); 360 else 361 ret = bt_ctf_field_signed_integer_set_value( 362 field, adjust_signedness(value_int, len)); 363 } 364 365 if (ret) { 366 pr_err("failed to set file value %s\n", name); 367 goto err_put_field; 368 } 369 if (!(flags & FIELD_IS_ARRAY)) { 370 ret = bt_ctf_event_set_payload(event, name, field); 371 if (ret) { 372 pr_err("failed to set payload %s\n", name); 373 goto err_put_field; 374 } 375 } 376 bt_ctf_field_put(field); 377 } 378 if (flags & FIELD_IS_ARRAY) { 379 ret = bt_ctf_event_set_payload(event, name, array_field); 380 if (ret) { 381 pr_err("Failed add payload array %s\n", name); 382 return -1; 383 } 384 bt_ctf_field_put(array_field); 385 } 386 return 0; 387 388 err_put_field: 389 bt_ctf_field_put(field); 390 return -1; 391 } 392 393 static int add_tracepoint_fields_values(struct ctf_writer *cw, 394 struct bt_ctf_event_class *event_class, 395 struct bt_ctf_event *event, 396 struct format_field *fields, 397 struct perf_sample *sample) 398 { 399 struct format_field *field; 400 int ret; 401 402 for (field = fields; field; field = field->next) { 403 ret = add_tracepoint_field_value(cw, event_class, event, sample, 404 field); 405 if (ret) 406 return -1; 407 } 408 return 0; 409 } 410 411 static int add_tracepoint_values(struct ctf_writer *cw, 412 struct bt_ctf_event_class *event_class, 413 struct bt_ctf_event *event, 414 struct perf_evsel *evsel, 415 struct perf_sample *sample) 416 { 417 struct format_field *common_fields = evsel->tp_format->format.common_fields; 418 struct format_field *fields = evsel->tp_format->format.fields; 419 int ret; 420 421 ret = add_tracepoint_fields_values(cw, event_class, event, 422 common_fields, sample); 423 if (!ret) 424 ret = add_tracepoint_fields_values(cw, event_class, event, 425 fields, sample); 426 427 return ret; 428 } 429 430 static int 431 add_bpf_output_values(struct bt_ctf_event_class *event_class, 432 struct bt_ctf_event *event, 433 struct perf_sample *sample) 434 { 435 struct bt_ctf_field_type *len_type, *seq_type; 436 struct bt_ctf_field *len_field, *seq_field; 437 unsigned int raw_size = sample->raw_size; 438 unsigned int nr_elements = raw_size / sizeof(u32); 439 unsigned int i; 440 int ret; 441 442 if (nr_elements * sizeof(u32) != raw_size) 443 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n", 444 raw_size, nr_elements * sizeof(u32) - raw_size); 445 446 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); 447 len_field = bt_ctf_field_create(len_type); 448 if (!len_field) { 449 pr_err("failed to create 'raw_len' for bpf output event\n"); 450 ret = -1; 451 goto put_len_type; 452 } 453 454 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 455 if (ret) { 456 pr_err("failed to set field value for raw_len\n"); 457 goto put_len_field; 458 } 459 ret = bt_ctf_event_set_payload(event, "raw_len", len_field); 460 if (ret) { 461 pr_err("failed to set payload to raw_len\n"); 462 goto put_len_field; 463 } 464 465 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data"); 466 seq_field = bt_ctf_field_create(seq_type); 467 if (!seq_field) { 468 pr_err("failed to create 'raw_data' for bpf output event\n"); 469 ret = -1; 470 goto put_seq_type; 471 } 472 473 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 474 if (ret) { 475 pr_err("failed to set length of 'raw_data'\n"); 476 goto put_seq_field; 477 } 478 479 for (i = 0; i < nr_elements; i++) { 480 struct bt_ctf_field *elem_field = 481 bt_ctf_field_sequence_get_field(seq_field, i); 482 483 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 484 ((u32 *)(sample->raw_data))[i]); 485 486 bt_ctf_field_put(elem_field); 487 if (ret) { 488 pr_err("failed to set raw_data[%d]\n", i); 489 goto put_seq_field; 490 } 491 } 492 493 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); 494 if (ret) 495 pr_err("failed to set payload for raw_data\n"); 496 497 put_seq_field: 498 bt_ctf_field_put(seq_field); 499 put_seq_type: 500 bt_ctf_field_type_put(seq_type); 501 put_len_field: 502 bt_ctf_field_put(len_field); 503 put_len_type: 504 bt_ctf_field_type_put(len_type); 505 return ret; 506 } 507 508 static int add_generic_values(struct ctf_writer *cw, 509 struct bt_ctf_event *event, 510 struct perf_evsel *evsel, 511 struct perf_sample *sample) 512 { 513 u64 type = evsel->attr.sample_type; 514 int ret; 515 516 /* 517 * missing: 518 * PERF_SAMPLE_TIME - not needed as we have it in 519 * ctf event header 520 * PERF_SAMPLE_READ - TODO 521 * PERF_SAMPLE_CALLCHAIN - TODO 522 * PERF_SAMPLE_RAW - tracepoint fields are handled separately 523 * PERF_SAMPLE_BRANCH_STACK - TODO 524 * PERF_SAMPLE_REGS_USER - TODO 525 * PERF_SAMPLE_STACK_USER - TODO 526 */ 527 528 if (type & PERF_SAMPLE_IP) { 529 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); 530 if (ret) 531 return -1; 532 } 533 534 if (type & PERF_SAMPLE_TID) { 535 ret = value_set_s32(cw, event, "perf_tid", sample->tid); 536 if (ret) 537 return -1; 538 539 ret = value_set_s32(cw, event, "perf_pid", sample->pid); 540 if (ret) 541 return -1; 542 } 543 544 if ((type & PERF_SAMPLE_ID) || 545 (type & PERF_SAMPLE_IDENTIFIER)) { 546 ret = value_set_u64(cw, event, "perf_id", sample->id); 547 if (ret) 548 return -1; 549 } 550 551 if (type & PERF_SAMPLE_STREAM_ID) { 552 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); 553 if (ret) 554 return -1; 555 } 556 557 if (type & PERF_SAMPLE_PERIOD) { 558 ret = value_set_u64(cw, event, "perf_period", sample->period); 559 if (ret) 560 return -1; 561 } 562 563 if (type & PERF_SAMPLE_WEIGHT) { 564 ret = value_set_u64(cw, event, "perf_weight", sample->weight); 565 if (ret) 566 return -1; 567 } 568 569 if (type & PERF_SAMPLE_DATA_SRC) { 570 ret = value_set_u64(cw, event, "perf_data_src", 571 sample->data_src); 572 if (ret) 573 return -1; 574 } 575 576 if (type & PERF_SAMPLE_TRANSACTION) { 577 ret = value_set_u64(cw, event, "perf_transaction", 578 sample->transaction); 579 if (ret) 580 return -1; 581 } 582 583 return 0; 584 } 585 586 static int ctf_stream__flush(struct ctf_stream *cs) 587 { 588 int err = 0; 589 590 if (cs) { 591 err = bt_ctf_stream_flush(cs->stream); 592 if (err) 593 pr_err("CTF stream %d flush failed\n", cs->cpu); 594 595 pr("Flush stream for cpu %d (%u samples)\n", 596 cs->cpu, cs->count); 597 598 cs->count = 0; 599 } 600 601 return err; 602 } 603 604 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) 605 { 606 struct ctf_stream *cs; 607 struct bt_ctf_field *pkt_ctx = NULL; 608 struct bt_ctf_field *cpu_field = NULL; 609 struct bt_ctf_stream *stream = NULL; 610 int ret; 611 612 cs = zalloc(sizeof(*cs)); 613 if (!cs) { 614 pr_err("Failed to allocate ctf stream\n"); 615 return NULL; 616 } 617 618 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); 619 if (!stream) { 620 pr_err("Failed to create CTF stream\n"); 621 goto out; 622 } 623 624 pkt_ctx = bt_ctf_stream_get_packet_context(stream); 625 if (!pkt_ctx) { 626 pr_err("Failed to obtain packet context\n"); 627 goto out; 628 } 629 630 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); 631 bt_ctf_field_put(pkt_ctx); 632 if (!cpu_field) { 633 pr_err("Failed to obtain cpu field\n"); 634 goto out; 635 } 636 637 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); 638 if (ret) { 639 pr_err("Failed to update CPU number\n"); 640 goto out; 641 } 642 643 bt_ctf_field_put(cpu_field); 644 645 cs->cpu = cpu; 646 cs->stream = stream; 647 return cs; 648 649 out: 650 if (cpu_field) 651 bt_ctf_field_put(cpu_field); 652 if (stream) 653 bt_ctf_stream_put(stream); 654 655 free(cs); 656 return NULL; 657 } 658 659 static void ctf_stream__delete(struct ctf_stream *cs) 660 { 661 if (cs) { 662 bt_ctf_stream_put(cs->stream); 663 free(cs); 664 } 665 } 666 667 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) 668 { 669 struct ctf_stream *cs = cw->stream[cpu]; 670 671 if (!cs) { 672 cs = ctf_stream__create(cw, cpu); 673 cw->stream[cpu] = cs; 674 } 675 676 return cs; 677 } 678 679 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, 680 struct perf_evsel *evsel) 681 { 682 int cpu = 0; 683 684 if (evsel->attr.sample_type & PERF_SAMPLE_CPU) 685 cpu = sample->cpu; 686 687 if (cpu > cw->stream_cnt) { 688 pr_err("Event was recorded for CPU %d, limit is at %d.\n", 689 cpu, cw->stream_cnt); 690 cpu = 0; 691 } 692 693 return cpu; 694 } 695 696 #define STREAM_FLUSH_COUNT 100000 697 698 /* 699 * Currently we have no other way to determine the 700 * time for the stream flush other than keep track 701 * of the number of events and check it against 702 * threshold. 703 */ 704 static bool is_flush_needed(struct ctf_stream *cs) 705 { 706 return cs->count >= STREAM_FLUSH_COUNT; 707 } 708 709 static int process_sample_event(struct perf_tool *tool, 710 union perf_event *_event, 711 struct perf_sample *sample, 712 struct perf_evsel *evsel, 713 struct machine *machine __maybe_unused) 714 { 715 struct convert *c = container_of(tool, struct convert, tool); 716 struct evsel_priv *priv = evsel->priv; 717 struct ctf_writer *cw = &c->writer; 718 struct ctf_stream *cs; 719 struct bt_ctf_event_class *event_class; 720 struct bt_ctf_event *event; 721 int ret; 722 723 if (WARN_ONCE(!priv, "Failed to setup all events.\n")) 724 return 0; 725 726 event_class = priv->event_class; 727 728 /* update stats */ 729 c->events_count++; 730 c->events_size += _event->header.size; 731 732 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); 733 734 event = bt_ctf_event_create(event_class); 735 if (!event) { 736 pr_err("Failed to create an CTF event\n"); 737 return -1; 738 } 739 740 bt_ctf_clock_set_time(cw->clock, sample->time); 741 742 ret = add_generic_values(cw, event, evsel, sample); 743 if (ret) 744 return -1; 745 746 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 747 ret = add_tracepoint_values(cw, event_class, event, 748 evsel, sample); 749 if (ret) 750 return -1; 751 } 752 753 if (perf_evsel__is_bpf_output(evsel)) { 754 ret = add_bpf_output_values(event_class, event, sample); 755 if (ret) 756 return -1; 757 } 758 759 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); 760 if (cs) { 761 if (is_flush_needed(cs)) 762 ctf_stream__flush(cs); 763 764 cs->count++; 765 bt_ctf_stream_append_event(cs->stream, event); 766 } 767 768 bt_ctf_event_put(event); 769 return cs ? 0 : -1; 770 } 771 772 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \ 773 do { \ 774 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\ 775 if (ret) \ 776 return -1; \ 777 } while(0) 778 779 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \ 780 static int process_##_name##_event(struct perf_tool *tool, \ 781 union perf_event *_event, \ 782 struct perf_sample *sample, \ 783 struct machine *machine) \ 784 { \ 785 struct convert *c = container_of(tool, struct convert, tool);\ 786 struct ctf_writer *cw = &c->writer; \ 787 struct bt_ctf_event_class *event_class = cw->_name##_class;\ 788 struct bt_ctf_event *event; \ 789 struct ctf_stream *cs; \ 790 int ret; \ 791 \ 792 c->non_sample_count++; \ 793 c->events_size += _event->header.size; \ 794 event = bt_ctf_event_create(event_class); \ 795 if (!event) { \ 796 pr_err("Failed to create an CTF event\n"); \ 797 return -1; \ 798 } \ 799 \ 800 bt_ctf_clock_set_time(cw->clock, sample->time); \ 801 body \ 802 cs = ctf_stream(cw, 0); \ 803 if (cs) { \ 804 if (is_flush_needed(cs)) \ 805 ctf_stream__flush(cs); \ 806 \ 807 cs->count++; \ 808 bt_ctf_stream_append_event(cs->stream, event); \ 809 } \ 810 bt_ctf_event_put(event); \ 811 \ 812 return perf_event__process_##_name(tool, _event, sample, machine);\ 813 } 814 815 __FUNC_PROCESS_NON_SAMPLE(comm, 816 __NON_SAMPLE_SET_FIELD(comm, u32, pid); 817 __NON_SAMPLE_SET_FIELD(comm, u32, tid); 818 __NON_SAMPLE_SET_FIELD(comm, string, comm); 819 ) 820 __FUNC_PROCESS_NON_SAMPLE(fork, 821 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 822 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 823 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 824 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 825 __NON_SAMPLE_SET_FIELD(fork, u64, time); 826 ) 827 828 __FUNC_PROCESS_NON_SAMPLE(exit, 829 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 830 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 831 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 832 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 833 __NON_SAMPLE_SET_FIELD(fork, u64, time); 834 ) 835 #undef __NON_SAMPLE_SET_FIELD 836 #undef __FUNC_PROCESS_NON_SAMPLE 837 838 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ 839 static char *change_name(char *name, char *orig_name, int dup) 840 { 841 char *new_name = NULL; 842 size_t len; 843 844 if (!name) 845 name = orig_name; 846 847 if (dup >= 10) 848 goto out; 849 /* 850 * Add '_' prefix to potential keywork. According to 851 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652), 852 * futher CTF spec updating may require us to use '$'. 853 */ 854 if (dup < 0) 855 len = strlen(name) + sizeof("_"); 856 else 857 len = strlen(orig_name) + sizeof("_dupl_X"); 858 859 new_name = malloc(len); 860 if (!new_name) 861 goto out; 862 863 if (dup < 0) 864 snprintf(new_name, len, "_%s", name); 865 else 866 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); 867 868 out: 869 if (name != orig_name) 870 free(name); 871 return new_name; 872 } 873 874 static int event_class_add_field(struct bt_ctf_event_class *event_class, 875 struct bt_ctf_field_type *type, 876 struct format_field *field) 877 { 878 struct bt_ctf_field_type *t = NULL; 879 char *name; 880 int dup = 1; 881 int ret; 882 883 /* alias was already assigned */ 884 if (field->alias != field->name) 885 return bt_ctf_event_class_add_field(event_class, type, 886 (char *)field->alias); 887 888 name = field->name; 889 890 /* If 'name' is a keywork, add prefix. */ 891 if (bt_ctf_validate_identifier(name)) 892 name = change_name(name, field->name, -1); 893 894 if (!name) { 895 pr_err("Failed to fix invalid identifier."); 896 return -1; 897 } 898 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { 899 bt_ctf_field_type_put(t); 900 name = change_name(name, field->name, dup++); 901 if (!name) { 902 pr_err("Failed to create dup name for '%s'\n", field->name); 903 return -1; 904 } 905 } 906 907 ret = bt_ctf_event_class_add_field(event_class, type, name); 908 if (!ret) 909 field->alias = name; 910 911 return ret; 912 } 913 914 static int add_tracepoint_fields_types(struct ctf_writer *cw, 915 struct format_field *fields, 916 struct bt_ctf_event_class *event_class) 917 { 918 struct format_field *field; 919 int ret; 920 921 for (field = fields; field; field = field->next) { 922 struct bt_ctf_field_type *type; 923 unsigned long flags = field->flags; 924 925 pr2(" field '%s'\n", field->name); 926 927 type = get_tracepoint_field_type(cw, field); 928 if (!type) 929 return -1; 930 931 /* 932 * A string is an array of chars. For this we use the string 933 * type and don't care that it is an array. What we don't 934 * support is an array of strings. 935 */ 936 if (flags & FIELD_IS_STRING) 937 flags &= ~FIELD_IS_ARRAY; 938 939 if (flags & FIELD_IS_ARRAY) 940 type = bt_ctf_field_type_array_create(type, field->arraylen); 941 942 ret = event_class_add_field(event_class, type, field); 943 944 if (flags & FIELD_IS_ARRAY) 945 bt_ctf_field_type_put(type); 946 947 if (ret) { 948 pr_err("Failed to add field '%s': %d\n", 949 field->name, ret); 950 return -1; 951 } 952 } 953 954 return 0; 955 } 956 957 static int add_tracepoint_types(struct ctf_writer *cw, 958 struct perf_evsel *evsel, 959 struct bt_ctf_event_class *class) 960 { 961 struct format_field *common_fields = evsel->tp_format->format.common_fields; 962 struct format_field *fields = evsel->tp_format->format.fields; 963 int ret; 964 965 ret = add_tracepoint_fields_types(cw, common_fields, class); 966 if (!ret) 967 ret = add_tracepoint_fields_types(cw, fields, class); 968 969 return ret; 970 } 971 972 static int add_bpf_output_types(struct ctf_writer *cw, 973 struct bt_ctf_event_class *class) 974 { 975 struct bt_ctf_field_type *len_type = cw->data.u32; 976 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex; 977 struct bt_ctf_field_type *seq_type; 978 int ret; 979 980 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len"); 981 if (ret) 982 return ret; 983 984 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len"); 985 if (!seq_type) 986 return -1; 987 988 return bt_ctf_event_class_add_field(class, seq_type, "raw_data"); 989 } 990 991 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel, 992 struct bt_ctf_event_class *event_class) 993 { 994 u64 type = evsel->attr.sample_type; 995 996 /* 997 * missing: 998 * PERF_SAMPLE_TIME - not needed as we have it in 999 * ctf event header 1000 * PERF_SAMPLE_READ - TODO 1001 * PERF_SAMPLE_CALLCHAIN - TODO 1002 * PERF_SAMPLE_RAW - tracepoint fields and BPF output 1003 * are handled separately 1004 * PERF_SAMPLE_BRANCH_STACK - TODO 1005 * PERF_SAMPLE_REGS_USER - TODO 1006 * PERF_SAMPLE_STACK_USER - TODO 1007 */ 1008 1009 #define ADD_FIELD(cl, t, n) \ 1010 do { \ 1011 pr2(" field '%s'\n", n); \ 1012 if (bt_ctf_event_class_add_field(cl, t, n)) { \ 1013 pr_err("Failed to add field '%s';\n", n); \ 1014 return -1; \ 1015 } \ 1016 } while (0) 1017 1018 if (type & PERF_SAMPLE_IP) 1019 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); 1020 1021 if (type & PERF_SAMPLE_TID) { 1022 ADD_FIELD(event_class, cw->data.s32, "perf_tid"); 1023 ADD_FIELD(event_class, cw->data.s32, "perf_pid"); 1024 } 1025 1026 if ((type & PERF_SAMPLE_ID) || 1027 (type & PERF_SAMPLE_IDENTIFIER)) 1028 ADD_FIELD(event_class, cw->data.u64, "perf_id"); 1029 1030 if (type & PERF_SAMPLE_STREAM_ID) 1031 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); 1032 1033 if (type & PERF_SAMPLE_PERIOD) 1034 ADD_FIELD(event_class, cw->data.u64, "perf_period"); 1035 1036 if (type & PERF_SAMPLE_WEIGHT) 1037 ADD_FIELD(event_class, cw->data.u64, "perf_weight"); 1038 1039 if (type & PERF_SAMPLE_DATA_SRC) 1040 ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); 1041 1042 if (type & PERF_SAMPLE_TRANSACTION) 1043 ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); 1044 1045 #undef ADD_FIELD 1046 return 0; 1047 } 1048 1049 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel) 1050 { 1051 struct bt_ctf_event_class *event_class; 1052 struct evsel_priv *priv; 1053 const char *name = perf_evsel__name(evsel); 1054 int ret; 1055 1056 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type); 1057 1058 event_class = bt_ctf_event_class_create(name); 1059 if (!event_class) 1060 return -1; 1061 1062 ret = add_generic_types(cw, evsel, event_class); 1063 if (ret) 1064 goto err; 1065 1066 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 1067 ret = add_tracepoint_types(cw, evsel, event_class); 1068 if (ret) 1069 goto err; 1070 } 1071 1072 if (perf_evsel__is_bpf_output(evsel)) { 1073 ret = add_bpf_output_types(cw, event_class); 1074 if (ret) 1075 goto err; 1076 } 1077 1078 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); 1079 if (ret) { 1080 pr("Failed to add event class into stream.\n"); 1081 goto err; 1082 } 1083 1084 priv = malloc(sizeof(*priv)); 1085 if (!priv) 1086 goto err; 1087 1088 priv->event_class = event_class; 1089 evsel->priv = priv; 1090 return 0; 1091 1092 err: 1093 bt_ctf_event_class_put(event_class); 1094 pr_err("Failed to add event '%s'.\n", name); 1095 return -1; 1096 } 1097 1098 static int setup_events(struct ctf_writer *cw, struct perf_session *session) 1099 { 1100 struct perf_evlist *evlist = session->evlist; 1101 struct perf_evsel *evsel; 1102 int ret; 1103 1104 evlist__for_each_entry(evlist, evsel) { 1105 ret = add_event(cw, evsel); 1106 if (ret) 1107 return ret; 1108 } 1109 return 0; 1110 } 1111 1112 #define __NON_SAMPLE_ADD_FIELD(t, n) \ 1113 do { \ 1114 pr2(" field '%s'\n", #n); \ 1115 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\ 1116 pr_err("Failed to add field '%s';\n", #n);\ 1117 return -1; \ 1118 } \ 1119 } while(0) 1120 1121 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \ 1122 static int add_##_name##_event(struct ctf_writer *cw) \ 1123 { \ 1124 struct bt_ctf_event_class *event_class; \ 1125 int ret; \ 1126 \ 1127 pr("Adding "#_name" event\n"); \ 1128 event_class = bt_ctf_event_class_create("perf_" #_name);\ 1129 if (!event_class) \ 1130 return -1; \ 1131 body \ 1132 \ 1133 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\ 1134 if (ret) { \ 1135 pr("Failed to add event class '"#_name"' into stream.\n");\ 1136 return ret; \ 1137 } \ 1138 \ 1139 cw->_name##_class = event_class; \ 1140 bt_ctf_event_class_put(event_class); \ 1141 return 0; \ 1142 } 1143 1144 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm, 1145 __NON_SAMPLE_ADD_FIELD(u32, pid); 1146 __NON_SAMPLE_ADD_FIELD(u32, tid); 1147 __NON_SAMPLE_ADD_FIELD(string, comm); 1148 ) 1149 1150 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork, 1151 __NON_SAMPLE_ADD_FIELD(u32, pid); 1152 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1153 __NON_SAMPLE_ADD_FIELD(u32, tid); 1154 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1155 __NON_SAMPLE_ADD_FIELD(u64, time); 1156 ) 1157 1158 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit, 1159 __NON_SAMPLE_ADD_FIELD(u32, pid); 1160 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1161 __NON_SAMPLE_ADD_FIELD(u32, tid); 1162 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1163 __NON_SAMPLE_ADD_FIELD(u64, time); 1164 ) 1165 1166 #undef __NON_SAMPLE_ADD_FIELD 1167 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS 1168 1169 static int setup_non_sample_events(struct ctf_writer *cw, 1170 struct perf_session *session __maybe_unused) 1171 { 1172 int ret; 1173 1174 ret = add_comm_event(cw); 1175 if (ret) 1176 return ret; 1177 ret = add_exit_event(cw); 1178 if (ret) 1179 return ret; 1180 ret = add_fork_event(cw); 1181 if (ret) 1182 return ret; 1183 return 0; 1184 } 1185 1186 static void cleanup_events(struct perf_session *session) 1187 { 1188 struct perf_evlist *evlist = session->evlist; 1189 struct perf_evsel *evsel; 1190 1191 evlist__for_each_entry(evlist, evsel) { 1192 struct evsel_priv *priv; 1193 1194 priv = evsel->priv; 1195 bt_ctf_event_class_put(priv->event_class); 1196 zfree(&evsel->priv); 1197 } 1198 1199 perf_evlist__delete(evlist); 1200 session->evlist = NULL; 1201 } 1202 1203 static int setup_streams(struct ctf_writer *cw, struct perf_session *session) 1204 { 1205 struct ctf_stream **stream; 1206 struct perf_header *ph = &session->header; 1207 int ncpus; 1208 1209 /* 1210 * Try to get the number of cpus used in the data file, 1211 * if not present fallback to the MAX_CPUS. 1212 */ 1213 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; 1214 1215 stream = zalloc(sizeof(*stream) * ncpus); 1216 if (!stream) { 1217 pr_err("Failed to allocate streams.\n"); 1218 return -ENOMEM; 1219 } 1220 1221 cw->stream = stream; 1222 cw->stream_cnt = ncpus; 1223 return 0; 1224 } 1225 1226 static void free_streams(struct ctf_writer *cw) 1227 { 1228 int cpu; 1229 1230 for (cpu = 0; cpu < cw->stream_cnt; cpu++) 1231 ctf_stream__delete(cw->stream[cpu]); 1232 1233 free(cw->stream); 1234 } 1235 1236 static int ctf_writer__setup_env(struct ctf_writer *cw, 1237 struct perf_session *session) 1238 { 1239 struct perf_header *header = &session->header; 1240 struct bt_ctf_writer *writer = cw->writer; 1241 1242 #define ADD(__n, __v) \ 1243 do { \ 1244 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ 1245 return -1; \ 1246 } while (0) 1247 1248 ADD("host", header->env.hostname); 1249 ADD("sysname", "Linux"); 1250 ADD("release", header->env.os_release); 1251 ADD("version", header->env.version); 1252 ADD("machine", header->env.arch); 1253 ADD("domain", "kernel"); 1254 ADD("tracer_name", "perf"); 1255 1256 #undef ADD 1257 return 0; 1258 } 1259 1260 static int ctf_writer__setup_clock(struct ctf_writer *cw) 1261 { 1262 struct bt_ctf_clock *clock = cw->clock; 1263 1264 bt_ctf_clock_set_description(clock, "perf clock"); 1265 1266 #define SET(__n, __v) \ 1267 do { \ 1268 if (bt_ctf_clock_set_##__n(clock, __v)) \ 1269 return -1; \ 1270 } while (0) 1271 1272 SET(frequency, 1000000000); 1273 SET(offset_s, 0); 1274 SET(offset, 0); 1275 SET(precision, 10); 1276 SET(is_absolute, 0); 1277 1278 #undef SET 1279 return 0; 1280 } 1281 1282 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) 1283 { 1284 struct bt_ctf_field_type *type; 1285 1286 type = bt_ctf_field_type_integer_create(size); 1287 if (!type) 1288 return NULL; 1289 1290 if (sign && 1291 bt_ctf_field_type_integer_set_signed(type, 1)) 1292 goto err; 1293 1294 if (hex && 1295 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) 1296 goto err; 1297 1298 #if __BYTE_ORDER == __BIG_ENDIAN 1299 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN); 1300 #else 1301 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN); 1302 #endif 1303 1304 pr2("Created type: INTEGER %d-bit %ssigned %s\n", 1305 size, sign ? "un" : "", hex ? "hex" : ""); 1306 return type; 1307 1308 err: 1309 bt_ctf_field_type_put(type); 1310 return NULL; 1311 } 1312 1313 static void ctf_writer__cleanup_data(struct ctf_writer *cw) 1314 { 1315 unsigned int i; 1316 1317 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) 1318 bt_ctf_field_type_put(cw->data.array[i]); 1319 } 1320 1321 static int ctf_writer__init_data(struct ctf_writer *cw) 1322 { 1323 #define CREATE_INT_TYPE(type, size, sign, hex) \ 1324 do { \ 1325 (type) = create_int_type(size, sign, hex); \ 1326 if (!(type)) \ 1327 goto err; \ 1328 } while (0) 1329 1330 CREATE_INT_TYPE(cw->data.s64, 64, true, false); 1331 CREATE_INT_TYPE(cw->data.u64, 64, false, false); 1332 CREATE_INT_TYPE(cw->data.s32, 32, true, false); 1333 CREATE_INT_TYPE(cw->data.u32, 32, false, false); 1334 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true); 1335 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); 1336 1337 cw->data.string = bt_ctf_field_type_string_create(); 1338 if (cw->data.string) 1339 return 0; 1340 1341 err: 1342 ctf_writer__cleanup_data(cw); 1343 pr_err("Failed to create data types.\n"); 1344 return -1; 1345 } 1346 1347 static void ctf_writer__cleanup(struct ctf_writer *cw) 1348 { 1349 ctf_writer__cleanup_data(cw); 1350 1351 bt_ctf_clock_put(cw->clock); 1352 free_streams(cw); 1353 bt_ctf_stream_class_put(cw->stream_class); 1354 bt_ctf_writer_put(cw->writer); 1355 1356 /* and NULL all the pointers */ 1357 memset(cw, 0, sizeof(*cw)); 1358 } 1359 1360 static int ctf_writer__init(struct ctf_writer *cw, const char *path) 1361 { 1362 struct bt_ctf_writer *writer; 1363 struct bt_ctf_stream_class *stream_class; 1364 struct bt_ctf_clock *clock; 1365 struct bt_ctf_field_type *pkt_ctx_type; 1366 int ret; 1367 1368 /* CTF writer */ 1369 writer = bt_ctf_writer_create(path); 1370 if (!writer) 1371 goto err; 1372 1373 cw->writer = writer; 1374 1375 /* CTF clock */ 1376 clock = bt_ctf_clock_create("perf_clock"); 1377 if (!clock) { 1378 pr("Failed to create CTF clock.\n"); 1379 goto err_cleanup; 1380 } 1381 1382 cw->clock = clock; 1383 1384 if (ctf_writer__setup_clock(cw)) { 1385 pr("Failed to setup CTF clock.\n"); 1386 goto err_cleanup; 1387 } 1388 1389 /* CTF stream class */ 1390 stream_class = bt_ctf_stream_class_create("perf_stream"); 1391 if (!stream_class) { 1392 pr("Failed to create CTF stream class.\n"); 1393 goto err_cleanup; 1394 } 1395 1396 cw->stream_class = stream_class; 1397 1398 /* CTF clock stream setup */ 1399 if (bt_ctf_stream_class_set_clock(stream_class, clock)) { 1400 pr("Failed to assign CTF clock to stream class.\n"); 1401 goto err_cleanup; 1402 } 1403 1404 if (ctf_writer__init_data(cw)) 1405 goto err_cleanup; 1406 1407 /* Add cpu_id for packet context */ 1408 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); 1409 if (!pkt_ctx_type) 1410 goto err_cleanup; 1411 1412 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); 1413 bt_ctf_field_type_put(pkt_ctx_type); 1414 if (ret) 1415 goto err_cleanup; 1416 1417 /* CTF clock writer setup */ 1418 if (bt_ctf_writer_add_clock(writer, clock)) { 1419 pr("Failed to assign CTF clock to writer.\n"); 1420 goto err_cleanup; 1421 } 1422 1423 return 0; 1424 1425 err_cleanup: 1426 ctf_writer__cleanup(cw); 1427 err: 1428 pr_err("Failed to setup CTF writer.\n"); 1429 return -1; 1430 } 1431 1432 static int ctf_writer__flush_streams(struct ctf_writer *cw) 1433 { 1434 int cpu, ret = 0; 1435 1436 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) 1437 ret = ctf_stream__flush(cw->stream[cpu]); 1438 1439 return ret; 1440 } 1441 1442 static int convert__config(const char *var, const char *value, void *cb) 1443 { 1444 struct convert *c = cb; 1445 1446 if (!strcmp(var, "convert.queue-size")) { 1447 c->queue_size = perf_config_u64(var, value); 1448 return 0; 1449 } 1450 1451 return 0; 1452 } 1453 1454 int bt_convert__perf2ctf(const char *input, const char *path, 1455 struct perf_data_convert_opts *opts) 1456 { 1457 struct perf_session *session; 1458 struct perf_data_file file = { 1459 .path = input, 1460 .mode = PERF_DATA_MODE_READ, 1461 .force = opts->force, 1462 }; 1463 struct convert c = { 1464 .tool = { 1465 .sample = process_sample_event, 1466 .mmap = perf_event__process_mmap, 1467 .mmap2 = perf_event__process_mmap2, 1468 .comm = perf_event__process_comm, 1469 .exit = perf_event__process_exit, 1470 .fork = perf_event__process_fork, 1471 .lost = perf_event__process_lost, 1472 .tracing_data = perf_event__process_tracing_data, 1473 .build_id = perf_event__process_build_id, 1474 .namespaces = perf_event__process_namespaces, 1475 .ordered_events = true, 1476 .ordering_requires_timestamps = true, 1477 }, 1478 }; 1479 struct ctf_writer *cw = &c.writer; 1480 int err; 1481 1482 if (opts->all) { 1483 c.tool.comm = process_comm_event; 1484 c.tool.exit = process_exit_event; 1485 c.tool.fork = process_fork_event; 1486 } 1487 1488 err = perf_config(convert__config, &c); 1489 if (err) 1490 return err; 1491 1492 /* CTF writer */ 1493 if (ctf_writer__init(cw, path)) 1494 return -1; 1495 1496 err = -1; 1497 /* perf.data session */ 1498 session = perf_session__new(&file, 0, &c.tool); 1499 if (!session) 1500 goto free_writer; 1501 1502 if (c.queue_size) { 1503 ordered_events__set_alloc_size(&session->ordered_events, 1504 c.queue_size); 1505 } 1506 1507 /* CTF writer env/clock setup */ 1508 if (ctf_writer__setup_env(cw, session)) 1509 goto free_session; 1510 1511 /* CTF events setup */ 1512 if (setup_events(cw, session)) 1513 goto free_session; 1514 1515 if (opts->all && setup_non_sample_events(cw, session)) 1516 goto free_session; 1517 1518 if (setup_streams(cw, session)) 1519 goto free_session; 1520 1521 err = perf_session__process_events(session); 1522 if (!err) 1523 err = ctf_writer__flush_streams(cw); 1524 else 1525 pr_err("Error during conversion.\n"); 1526 1527 fprintf(stderr, 1528 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1529 file.path, path); 1530 1531 fprintf(stderr, 1532 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples", 1533 (double) c.events_size / 1024.0 / 1024.0, 1534 c.events_count); 1535 1536 if (!c.non_sample_count) 1537 fprintf(stderr, ") ]\n"); 1538 else 1539 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count); 1540 1541 cleanup_events(session); 1542 perf_session__delete(session); 1543 ctf_writer__cleanup(cw); 1544 1545 return err; 1546 1547 free_session: 1548 perf_session__delete(session); 1549 free_writer: 1550 ctf_writer__cleanup(cw); 1551 pr_err("Error during conversion setup.\n"); 1552 return err; 1553 } 1554