1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CTF writing support via babeltrace. 4 * 5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com> 6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de> 7 */ 8 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <linux/compiler.h> 12 #include <linux/kernel.h> 13 #include <linux/zalloc.h> 14 #include <babeltrace/ctf-writer/writer.h> 15 #include <babeltrace/ctf-writer/clock.h> 16 #include <babeltrace/ctf-writer/stream.h> 17 #include <babeltrace/ctf-writer/event.h> 18 #include <babeltrace/ctf-writer/event-types.h> 19 #include <babeltrace/ctf-writer/event-fields.h> 20 #include <babeltrace/ctf-ir/utils.h> 21 #include <babeltrace/ctf/events.h> 22 #include <traceevent/event-parse.h> 23 #include "asm/bug.h" 24 #include "data-convert.h" 25 #include "session.h" 26 #include "debug.h" 27 #include "tool.h" 28 #include "evlist.h" 29 #include "evsel.h" 30 #include "machine.h" 31 #include "config.h" 32 #include <linux/ctype.h> 33 #include <linux/err.h> 34 #include <linux/time64.h> 35 #include "util.h" 36 #include "clockid.h" 37 #include "util/sample.h" 38 39 #define pr_N(n, fmt, ...) \ 40 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) 41 42 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) 43 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) 44 45 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) 46 47 struct evsel_priv { 48 struct bt_ctf_event_class *event_class; 49 }; 50 51 #define MAX_CPUS 4096 52 53 struct ctf_stream { 54 struct bt_ctf_stream *stream; 55 int cpu; 56 u32 count; 57 }; 58 59 struct ctf_writer { 60 /* writer primitives */ 61 struct bt_ctf_writer *writer; 62 struct ctf_stream **stream; 63 int stream_cnt; 64 struct bt_ctf_stream_class *stream_class; 65 struct bt_ctf_clock *clock; 66 67 /* data types */ 68 union { 69 struct { 70 struct bt_ctf_field_type *s64; 71 struct bt_ctf_field_type *u64; 72 struct bt_ctf_field_type *s32; 73 struct bt_ctf_field_type *u32; 74 struct bt_ctf_field_type *string; 75 struct bt_ctf_field_type *u32_hex; 76 struct bt_ctf_field_type *u64_hex; 77 }; 78 struct bt_ctf_field_type *array[6]; 79 } data; 80 struct bt_ctf_event_class *comm_class; 81 struct bt_ctf_event_class *exit_class; 82 struct bt_ctf_event_class *fork_class; 83 struct bt_ctf_event_class *mmap_class; 84 struct bt_ctf_event_class *mmap2_class; 85 }; 86 87 struct convert { 88 struct perf_tool tool; 89 struct ctf_writer writer; 90 91 u64 events_size; 92 u64 events_count; 93 u64 non_sample_count; 94 95 /* Ordered events configured queue size. */ 96 u64 queue_size; 97 }; 98 99 static int value_set(struct bt_ctf_field_type *type, 100 struct bt_ctf_event *event, 101 const char *name, u64 val) 102 { 103 struct bt_ctf_field *field; 104 bool sign = bt_ctf_field_type_integer_get_signed(type); 105 int ret; 106 107 field = bt_ctf_field_create(type); 108 if (!field) { 109 pr_err("failed to create a field %s\n", name); 110 return -1; 111 } 112 113 if (sign) { 114 ret = bt_ctf_field_signed_integer_set_value(field, val); 115 if (ret) { 116 pr_err("failed to set field value %s\n", name); 117 goto err; 118 } 119 } else { 120 ret = bt_ctf_field_unsigned_integer_set_value(field, val); 121 if (ret) { 122 pr_err("failed to set field value %s\n", name); 123 goto err; 124 } 125 } 126 127 ret = bt_ctf_event_set_payload(event, name, field); 128 if (ret) { 129 pr_err("failed to set payload %s\n", name); 130 goto err; 131 } 132 133 pr2(" SET [%s = %" PRIu64 "]\n", name, val); 134 135 err: 136 bt_ctf_field_put(field); 137 return ret; 138 } 139 140 #define __FUNC_VALUE_SET(_name, _val_type) \ 141 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ 142 struct bt_ctf_event *event, \ 143 const char *name, \ 144 _val_type val) \ 145 { \ 146 struct bt_ctf_field_type *type = cw->data._name; \ 147 return value_set(type, event, name, (u64) val); \ 148 } 149 150 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) 151 152 FUNC_VALUE_SET(s32) 153 FUNC_VALUE_SET(u32) 154 FUNC_VALUE_SET(s64) 155 FUNC_VALUE_SET(u64) 156 __FUNC_VALUE_SET(u64_hex, u64) 157 158 static int string_set_value(struct bt_ctf_field *field, const char *string); 159 static __maybe_unused int 160 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event, 161 const char *name, const char *string) 162 { 163 struct bt_ctf_field_type *type = cw->data.string; 164 struct bt_ctf_field *field; 165 int ret = 0; 166 167 field = bt_ctf_field_create(type); 168 if (!field) { 169 pr_err("failed to create a field %s\n", name); 170 return -1; 171 } 172 173 ret = string_set_value(field, string); 174 if (ret) { 175 pr_err("failed to set value %s\n", name); 176 goto err_put_field; 177 } 178 179 ret = bt_ctf_event_set_payload(event, name, field); 180 if (ret) 181 pr_err("failed to set payload %s\n", name); 182 183 err_put_field: 184 bt_ctf_field_put(field); 185 return ret; 186 } 187 188 static struct bt_ctf_field_type* 189 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field) 190 { 191 unsigned long flags = field->flags; 192 193 if (flags & TEP_FIELD_IS_STRING) 194 return cw->data.string; 195 196 if (!(flags & TEP_FIELD_IS_SIGNED)) { 197 /* unsigned long are mostly pointers */ 198 if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER) 199 return cw->data.u64_hex; 200 } 201 202 if (flags & TEP_FIELD_IS_SIGNED) { 203 if (field->size == 8) 204 return cw->data.s64; 205 else 206 return cw->data.s32; 207 } 208 209 if (field->size == 8) 210 return cw->data.u64; 211 else 212 return cw->data.u32; 213 } 214 215 static unsigned long long adjust_signedness(unsigned long long value_int, int size) 216 { 217 unsigned long long value_mask; 218 219 /* 220 * value_mask = (1 << (size * 8 - 1)) - 1. 221 * Directly set value_mask for code readers. 222 */ 223 switch (size) { 224 case 1: 225 value_mask = 0x7fULL; 226 break; 227 case 2: 228 value_mask = 0x7fffULL; 229 break; 230 case 4: 231 value_mask = 0x7fffffffULL; 232 break; 233 case 8: 234 /* 235 * For 64 bit value, return it self. There is no need 236 * to fill high bit. 237 */ 238 /* Fall through */ 239 default: 240 /* BUG! */ 241 return value_int; 242 } 243 244 /* If it is a positive value, don't adjust. */ 245 if ((value_int & (~0ULL - value_mask)) == 0) 246 return value_int; 247 248 /* Fill upper part of value_int with 1 to make it a negative long long. */ 249 return (value_int & value_mask) | ~value_mask; 250 } 251 252 static int string_set_value(struct bt_ctf_field *field, const char *string) 253 { 254 char *buffer = NULL; 255 size_t len = strlen(string), i, p; 256 int err; 257 258 for (i = p = 0; i < len; i++, p++) { 259 if (isprint(string[i])) { 260 if (!buffer) 261 continue; 262 buffer[p] = string[i]; 263 } else { 264 char numstr[5]; 265 266 snprintf(numstr, sizeof(numstr), "\\x%02x", 267 (unsigned int)(string[i]) & 0xff); 268 269 if (!buffer) { 270 buffer = zalloc(i + (len - i) * 4 + 2); 271 if (!buffer) { 272 pr_err("failed to set unprintable string '%s'\n", string); 273 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); 274 } 275 if (i > 0) 276 strncpy(buffer, string, i); 277 } 278 memcpy(buffer + p, numstr, 4); 279 p += 3; 280 } 281 } 282 283 if (!buffer) 284 return bt_ctf_field_string_set_value(field, string); 285 err = bt_ctf_field_string_set_value(field, buffer); 286 free(buffer); 287 return err; 288 } 289 290 static int add_tracepoint_field_value(struct ctf_writer *cw, 291 struct bt_ctf_event_class *event_class, 292 struct bt_ctf_event *event, 293 struct perf_sample *sample, 294 struct tep_format_field *fmtf) 295 { 296 struct bt_ctf_field_type *type; 297 struct bt_ctf_field *array_field; 298 struct bt_ctf_field *field; 299 const char *name = fmtf->name; 300 void *data = sample->raw_data; 301 unsigned long flags = fmtf->flags; 302 unsigned int n_items; 303 unsigned int i; 304 unsigned int offset; 305 unsigned int len; 306 int ret; 307 308 name = fmtf->alias; 309 offset = fmtf->offset; 310 len = fmtf->size; 311 if (flags & TEP_FIELD_IS_STRING) 312 flags &= ~TEP_FIELD_IS_ARRAY; 313 314 if (flags & TEP_FIELD_IS_DYNAMIC) { 315 unsigned long long tmp_val; 316 317 tmp_val = tep_read_number(fmtf->event->tep, 318 data + offset, len); 319 offset = tmp_val; 320 len = offset >> 16; 321 offset &= 0xffff; 322 if (flags & TEP_FIELD_IS_RELATIVE) 323 offset += fmtf->offset + fmtf->size; 324 } 325 326 if (flags & TEP_FIELD_IS_ARRAY) { 327 328 type = bt_ctf_event_class_get_field_by_name( 329 event_class, name); 330 array_field = bt_ctf_field_create(type); 331 bt_ctf_field_type_put(type); 332 if (!array_field) { 333 pr_err("Failed to create array type %s\n", name); 334 return -1; 335 } 336 337 len = fmtf->size / fmtf->arraylen; 338 n_items = fmtf->arraylen; 339 } else { 340 n_items = 1; 341 array_field = NULL; 342 } 343 344 type = get_tracepoint_field_type(cw, fmtf); 345 346 for (i = 0; i < n_items; i++) { 347 if (flags & TEP_FIELD_IS_ARRAY) 348 field = bt_ctf_field_array_get_field(array_field, i); 349 else 350 field = bt_ctf_field_create(type); 351 352 if (!field) { 353 pr_err("failed to create a field %s\n", name); 354 return -1; 355 } 356 357 if (flags & TEP_FIELD_IS_STRING) 358 ret = string_set_value(field, data + offset + i * len); 359 else { 360 unsigned long long value_int; 361 362 value_int = tep_read_number( 363 fmtf->event->tep, 364 data + offset + i * len, len); 365 366 if (!(flags & TEP_FIELD_IS_SIGNED)) 367 ret = bt_ctf_field_unsigned_integer_set_value( 368 field, value_int); 369 else 370 ret = bt_ctf_field_signed_integer_set_value( 371 field, adjust_signedness(value_int, len)); 372 } 373 374 if (ret) { 375 pr_err("failed to set file value %s\n", name); 376 goto err_put_field; 377 } 378 if (!(flags & TEP_FIELD_IS_ARRAY)) { 379 ret = bt_ctf_event_set_payload(event, name, field); 380 if (ret) { 381 pr_err("failed to set payload %s\n", name); 382 goto err_put_field; 383 } 384 } 385 bt_ctf_field_put(field); 386 } 387 if (flags & TEP_FIELD_IS_ARRAY) { 388 ret = bt_ctf_event_set_payload(event, name, array_field); 389 if (ret) { 390 pr_err("Failed add payload array %s\n", name); 391 return -1; 392 } 393 bt_ctf_field_put(array_field); 394 } 395 return 0; 396 397 err_put_field: 398 bt_ctf_field_put(field); 399 return -1; 400 } 401 402 static int add_tracepoint_fields_values(struct ctf_writer *cw, 403 struct bt_ctf_event_class *event_class, 404 struct bt_ctf_event *event, 405 struct tep_format_field *fields, 406 struct perf_sample *sample) 407 { 408 struct tep_format_field *field; 409 int ret; 410 411 for (field = fields; field; field = field->next) { 412 ret = add_tracepoint_field_value(cw, event_class, event, sample, 413 field); 414 if (ret) 415 return -1; 416 } 417 return 0; 418 } 419 420 static int add_tracepoint_values(struct ctf_writer *cw, 421 struct bt_ctf_event_class *event_class, 422 struct bt_ctf_event *event, 423 struct evsel *evsel, 424 struct perf_sample *sample) 425 { 426 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields; 427 struct tep_format_field *fields = evsel->tp_format->format.fields; 428 int ret; 429 430 ret = add_tracepoint_fields_values(cw, event_class, event, 431 common_fields, sample); 432 if (!ret) 433 ret = add_tracepoint_fields_values(cw, event_class, event, 434 fields, sample); 435 436 return ret; 437 } 438 439 static int 440 add_bpf_output_values(struct bt_ctf_event_class *event_class, 441 struct bt_ctf_event *event, 442 struct perf_sample *sample) 443 { 444 struct bt_ctf_field_type *len_type, *seq_type; 445 struct bt_ctf_field *len_field, *seq_field; 446 unsigned int raw_size = sample->raw_size; 447 unsigned int nr_elements = raw_size / sizeof(u32); 448 unsigned int i; 449 int ret; 450 451 if (nr_elements * sizeof(u32) != raw_size) 452 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n", 453 raw_size, nr_elements * sizeof(u32) - raw_size); 454 455 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); 456 len_field = bt_ctf_field_create(len_type); 457 if (!len_field) { 458 pr_err("failed to create 'raw_len' for bpf output event\n"); 459 ret = -1; 460 goto put_len_type; 461 } 462 463 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 464 if (ret) { 465 pr_err("failed to set field value for raw_len\n"); 466 goto put_len_field; 467 } 468 ret = bt_ctf_event_set_payload(event, "raw_len", len_field); 469 if (ret) { 470 pr_err("failed to set payload to raw_len\n"); 471 goto put_len_field; 472 } 473 474 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data"); 475 seq_field = bt_ctf_field_create(seq_type); 476 if (!seq_field) { 477 pr_err("failed to create 'raw_data' for bpf output event\n"); 478 ret = -1; 479 goto put_seq_type; 480 } 481 482 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 483 if (ret) { 484 pr_err("failed to set length of 'raw_data'\n"); 485 goto put_seq_field; 486 } 487 488 for (i = 0; i < nr_elements; i++) { 489 struct bt_ctf_field *elem_field = 490 bt_ctf_field_sequence_get_field(seq_field, i); 491 492 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 493 ((u32 *)(sample->raw_data))[i]); 494 495 bt_ctf_field_put(elem_field); 496 if (ret) { 497 pr_err("failed to set raw_data[%d]\n", i); 498 goto put_seq_field; 499 } 500 } 501 502 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); 503 if (ret) 504 pr_err("failed to set payload for raw_data\n"); 505 506 put_seq_field: 507 bt_ctf_field_put(seq_field); 508 put_seq_type: 509 bt_ctf_field_type_put(seq_type); 510 put_len_field: 511 bt_ctf_field_put(len_field); 512 put_len_type: 513 bt_ctf_field_type_put(len_type); 514 return ret; 515 } 516 517 static int 518 add_callchain_output_values(struct bt_ctf_event_class *event_class, 519 struct bt_ctf_event *event, 520 struct ip_callchain *callchain) 521 { 522 struct bt_ctf_field_type *len_type, *seq_type; 523 struct bt_ctf_field *len_field, *seq_field; 524 unsigned int nr_elements = callchain->nr; 525 unsigned int i; 526 int ret; 527 528 len_type = bt_ctf_event_class_get_field_by_name( 529 event_class, "perf_callchain_size"); 530 len_field = bt_ctf_field_create(len_type); 531 if (!len_field) { 532 pr_err("failed to create 'perf_callchain_size' for callchain output event\n"); 533 ret = -1; 534 goto put_len_type; 535 } 536 537 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 538 if (ret) { 539 pr_err("failed to set field value for perf_callchain_size\n"); 540 goto put_len_field; 541 } 542 ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field); 543 if (ret) { 544 pr_err("failed to set payload to perf_callchain_size\n"); 545 goto put_len_field; 546 } 547 548 seq_type = bt_ctf_event_class_get_field_by_name( 549 event_class, "perf_callchain"); 550 seq_field = bt_ctf_field_create(seq_type); 551 if (!seq_field) { 552 pr_err("failed to create 'perf_callchain' for callchain output event\n"); 553 ret = -1; 554 goto put_seq_type; 555 } 556 557 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 558 if (ret) { 559 pr_err("failed to set length of 'perf_callchain'\n"); 560 goto put_seq_field; 561 } 562 563 for (i = 0; i < nr_elements; i++) { 564 struct bt_ctf_field *elem_field = 565 bt_ctf_field_sequence_get_field(seq_field, i); 566 567 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 568 ((u64 *)(callchain->ips))[i]); 569 570 bt_ctf_field_put(elem_field); 571 if (ret) { 572 pr_err("failed to set callchain[%d]\n", i); 573 goto put_seq_field; 574 } 575 } 576 577 ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field); 578 if (ret) 579 pr_err("failed to set payload for raw_data\n"); 580 581 put_seq_field: 582 bt_ctf_field_put(seq_field); 583 put_seq_type: 584 bt_ctf_field_type_put(seq_type); 585 put_len_field: 586 bt_ctf_field_put(len_field); 587 put_len_type: 588 bt_ctf_field_type_put(len_type); 589 return ret; 590 } 591 592 static int add_generic_values(struct ctf_writer *cw, 593 struct bt_ctf_event *event, 594 struct evsel *evsel, 595 struct perf_sample *sample) 596 { 597 u64 type = evsel->core.attr.sample_type; 598 int ret; 599 600 /* 601 * missing: 602 * PERF_SAMPLE_TIME - not needed as we have it in 603 * ctf event header 604 * PERF_SAMPLE_READ - TODO 605 * PERF_SAMPLE_RAW - tracepoint fields are handled separately 606 * PERF_SAMPLE_BRANCH_STACK - TODO 607 * PERF_SAMPLE_REGS_USER - TODO 608 * PERF_SAMPLE_STACK_USER - TODO 609 */ 610 611 if (type & PERF_SAMPLE_IP) { 612 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); 613 if (ret) 614 return -1; 615 } 616 617 if (type & PERF_SAMPLE_TID) { 618 ret = value_set_s32(cw, event, "perf_tid", sample->tid); 619 if (ret) 620 return -1; 621 622 ret = value_set_s32(cw, event, "perf_pid", sample->pid); 623 if (ret) 624 return -1; 625 } 626 627 if ((type & PERF_SAMPLE_ID) || 628 (type & PERF_SAMPLE_IDENTIFIER)) { 629 ret = value_set_u64(cw, event, "perf_id", sample->id); 630 if (ret) 631 return -1; 632 } 633 634 if (type & PERF_SAMPLE_STREAM_ID) { 635 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); 636 if (ret) 637 return -1; 638 } 639 640 if (type & PERF_SAMPLE_PERIOD) { 641 ret = value_set_u64(cw, event, "perf_period", sample->period); 642 if (ret) 643 return -1; 644 } 645 646 if (type & PERF_SAMPLE_WEIGHT) { 647 ret = value_set_u64(cw, event, "perf_weight", sample->weight); 648 if (ret) 649 return -1; 650 } 651 652 if (type & PERF_SAMPLE_DATA_SRC) { 653 ret = value_set_u64(cw, event, "perf_data_src", 654 sample->data_src); 655 if (ret) 656 return -1; 657 } 658 659 if (type & PERF_SAMPLE_TRANSACTION) { 660 ret = value_set_u64(cw, event, "perf_transaction", 661 sample->transaction); 662 if (ret) 663 return -1; 664 } 665 666 return 0; 667 } 668 669 static int ctf_stream__flush(struct ctf_stream *cs) 670 { 671 int err = 0; 672 673 if (cs) { 674 err = bt_ctf_stream_flush(cs->stream); 675 if (err) 676 pr_err("CTF stream %d flush failed\n", cs->cpu); 677 678 pr("Flush stream for cpu %d (%u samples)\n", 679 cs->cpu, cs->count); 680 681 cs->count = 0; 682 } 683 684 return err; 685 } 686 687 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) 688 { 689 struct ctf_stream *cs; 690 struct bt_ctf_field *pkt_ctx = NULL; 691 struct bt_ctf_field *cpu_field = NULL; 692 struct bt_ctf_stream *stream = NULL; 693 int ret; 694 695 cs = zalloc(sizeof(*cs)); 696 if (!cs) { 697 pr_err("Failed to allocate ctf stream\n"); 698 return NULL; 699 } 700 701 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); 702 if (!stream) { 703 pr_err("Failed to create CTF stream\n"); 704 goto out; 705 } 706 707 pkt_ctx = bt_ctf_stream_get_packet_context(stream); 708 if (!pkt_ctx) { 709 pr_err("Failed to obtain packet context\n"); 710 goto out; 711 } 712 713 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); 714 bt_ctf_field_put(pkt_ctx); 715 if (!cpu_field) { 716 pr_err("Failed to obtain cpu field\n"); 717 goto out; 718 } 719 720 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); 721 if (ret) { 722 pr_err("Failed to update CPU number\n"); 723 goto out; 724 } 725 726 bt_ctf_field_put(cpu_field); 727 728 cs->cpu = cpu; 729 cs->stream = stream; 730 return cs; 731 732 out: 733 if (cpu_field) 734 bt_ctf_field_put(cpu_field); 735 if (stream) 736 bt_ctf_stream_put(stream); 737 738 free(cs); 739 return NULL; 740 } 741 742 static void ctf_stream__delete(struct ctf_stream *cs) 743 { 744 if (cs) { 745 bt_ctf_stream_put(cs->stream); 746 free(cs); 747 } 748 } 749 750 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) 751 { 752 struct ctf_stream *cs = cw->stream[cpu]; 753 754 if (!cs) { 755 cs = ctf_stream__create(cw, cpu); 756 cw->stream[cpu] = cs; 757 } 758 759 return cs; 760 } 761 762 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, 763 struct evsel *evsel) 764 { 765 int cpu = 0; 766 767 if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU) 768 cpu = sample->cpu; 769 770 if (cpu > cw->stream_cnt) { 771 pr_err("Event was recorded for CPU %d, limit is at %d.\n", 772 cpu, cw->stream_cnt); 773 cpu = 0; 774 } 775 776 return cpu; 777 } 778 779 #define STREAM_FLUSH_COUNT 100000 780 781 /* 782 * Currently we have no other way to determine the 783 * time for the stream flush other than keep track 784 * of the number of events and check it against 785 * threshold. 786 */ 787 static bool is_flush_needed(struct ctf_stream *cs) 788 { 789 return cs->count >= STREAM_FLUSH_COUNT; 790 } 791 792 static int process_sample_event(struct perf_tool *tool, 793 union perf_event *_event, 794 struct perf_sample *sample, 795 struct evsel *evsel, 796 struct machine *machine __maybe_unused) 797 { 798 struct convert *c = container_of(tool, struct convert, tool); 799 struct evsel_priv *priv = evsel->priv; 800 struct ctf_writer *cw = &c->writer; 801 struct ctf_stream *cs; 802 struct bt_ctf_event_class *event_class; 803 struct bt_ctf_event *event; 804 int ret; 805 unsigned long type = evsel->core.attr.sample_type; 806 807 if (WARN_ONCE(!priv, "Failed to setup all events.\n")) 808 return 0; 809 810 event_class = priv->event_class; 811 812 /* update stats */ 813 c->events_count++; 814 c->events_size += _event->header.size; 815 816 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); 817 818 event = bt_ctf_event_create(event_class); 819 if (!event) { 820 pr_err("Failed to create an CTF event\n"); 821 return -1; 822 } 823 824 bt_ctf_clock_set_time(cw->clock, sample->time); 825 826 ret = add_generic_values(cw, event, evsel, sample); 827 if (ret) 828 return -1; 829 830 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 831 ret = add_tracepoint_values(cw, event_class, event, 832 evsel, sample); 833 if (ret) 834 return -1; 835 } 836 837 if (type & PERF_SAMPLE_CALLCHAIN) { 838 ret = add_callchain_output_values(event_class, 839 event, sample->callchain); 840 if (ret) 841 return -1; 842 } 843 844 if (evsel__is_bpf_output(evsel)) { 845 ret = add_bpf_output_values(event_class, event, sample); 846 if (ret) 847 return -1; 848 } 849 850 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); 851 if (cs) { 852 if (is_flush_needed(cs)) 853 ctf_stream__flush(cs); 854 855 cs->count++; 856 bt_ctf_stream_append_event(cs->stream, event); 857 } 858 859 bt_ctf_event_put(event); 860 return cs ? 0 : -1; 861 } 862 863 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \ 864 do { \ 865 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\ 866 if (ret) \ 867 return -1; \ 868 } while(0) 869 870 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \ 871 static int process_##_name##_event(struct perf_tool *tool, \ 872 union perf_event *_event, \ 873 struct perf_sample *sample, \ 874 struct machine *machine) \ 875 { \ 876 struct convert *c = container_of(tool, struct convert, tool);\ 877 struct ctf_writer *cw = &c->writer; \ 878 struct bt_ctf_event_class *event_class = cw->_name##_class;\ 879 struct bt_ctf_event *event; \ 880 struct ctf_stream *cs; \ 881 int ret; \ 882 \ 883 c->non_sample_count++; \ 884 c->events_size += _event->header.size; \ 885 event = bt_ctf_event_create(event_class); \ 886 if (!event) { \ 887 pr_err("Failed to create an CTF event\n"); \ 888 return -1; \ 889 } \ 890 \ 891 bt_ctf_clock_set_time(cw->clock, sample->time); \ 892 body \ 893 cs = ctf_stream(cw, 0); \ 894 if (cs) { \ 895 if (is_flush_needed(cs)) \ 896 ctf_stream__flush(cs); \ 897 \ 898 cs->count++; \ 899 bt_ctf_stream_append_event(cs->stream, event); \ 900 } \ 901 bt_ctf_event_put(event); \ 902 \ 903 return perf_event__process_##_name(tool, _event, sample, machine);\ 904 } 905 906 __FUNC_PROCESS_NON_SAMPLE(comm, 907 __NON_SAMPLE_SET_FIELD(comm, u32, pid); 908 __NON_SAMPLE_SET_FIELD(comm, u32, tid); 909 __NON_SAMPLE_SET_FIELD(comm, string, comm); 910 ) 911 __FUNC_PROCESS_NON_SAMPLE(fork, 912 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 913 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 914 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 915 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 916 __NON_SAMPLE_SET_FIELD(fork, u64, time); 917 ) 918 919 __FUNC_PROCESS_NON_SAMPLE(exit, 920 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 921 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 922 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 923 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 924 __NON_SAMPLE_SET_FIELD(fork, u64, time); 925 ) 926 __FUNC_PROCESS_NON_SAMPLE(mmap, 927 __NON_SAMPLE_SET_FIELD(mmap, u32, pid); 928 __NON_SAMPLE_SET_FIELD(mmap, u32, tid); 929 __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start); 930 __NON_SAMPLE_SET_FIELD(mmap, string, filename); 931 ) 932 __FUNC_PROCESS_NON_SAMPLE(mmap2, 933 __NON_SAMPLE_SET_FIELD(mmap2, u32, pid); 934 __NON_SAMPLE_SET_FIELD(mmap2, u32, tid); 935 __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start); 936 __NON_SAMPLE_SET_FIELD(mmap2, string, filename); 937 ) 938 #undef __NON_SAMPLE_SET_FIELD 939 #undef __FUNC_PROCESS_NON_SAMPLE 940 941 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ 942 static char *change_name(char *name, char *orig_name, int dup) 943 { 944 char *new_name = NULL; 945 size_t len; 946 947 if (!name) 948 name = orig_name; 949 950 if (dup >= 10) 951 goto out; 952 /* 953 * Add '_' prefix to potential keywork. According to 954 * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com), 955 * further CTF spec updating may require us to use '$'. 956 */ 957 if (dup < 0) 958 len = strlen(name) + sizeof("_"); 959 else 960 len = strlen(orig_name) + sizeof("_dupl_X"); 961 962 new_name = malloc(len); 963 if (!new_name) 964 goto out; 965 966 if (dup < 0) 967 snprintf(new_name, len, "_%s", name); 968 else 969 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); 970 971 out: 972 if (name != orig_name) 973 free(name); 974 return new_name; 975 } 976 977 static int event_class_add_field(struct bt_ctf_event_class *event_class, 978 struct bt_ctf_field_type *type, 979 struct tep_format_field *field) 980 { 981 struct bt_ctf_field_type *t = NULL; 982 char *name; 983 int dup = 1; 984 int ret; 985 986 /* alias was already assigned */ 987 if (field->alias != field->name) 988 return bt_ctf_event_class_add_field(event_class, type, 989 (char *)field->alias); 990 991 name = field->name; 992 993 /* If 'name' is a keywork, add prefix. */ 994 if (bt_ctf_validate_identifier(name)) 995 name = change_name(name, field->name, -1); 996 997 if (!name) { 998 pr_err("Failed to fix invalid identifier."); 999 return -1; 1000 } 1001 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { 1002 bt_ctf_field_type_put(t); 1003 name = change_name(name, field->name, dup++); 1004 if (!name) { 1005 pr_err("Failed to create dup name for '%s'\n", field->name); 1006 return -1; 1007 } 1008 } 1009 1010 ret = bt_ctf_event_class_add_field(event_class, type, name); 1011 if (!ret) 1012 field->alias = name; 1013 1014 return ret; 1015 } 1016 1017 static int add_tracepoint_fields_types(struct ctf_writer *cw, 1018 struct tep_format_field *fields, 1019 struct bt_ctf_event_class *event_class) 1020 { 1021 struct tep_format_field *field; 1022 int ret; 1023 1024 for (field = fields; field; field = field->next) { 1025 struct bt_ctf_field_type *type; 1026 unsigned long flags = field->flags; 1027 1028 pr2(" field '%s'\n", field->name); 1029 1030 type = get_tracepoint_field_type(cw, field); 1031 if (!type) 1032 return -1; 1033 1034 /* 1035 * A string is an array of chars. For this we use the string 1036 * type and don't care that it is an array. What we don't 1037 * support is an array of strings. 1038 */ 1039 if (flags & TEP_FIELD_IS_STRING) 1040 flags &= ~TEP_FIELD_IS_ARRAY; 1041 1042 if (flags & TEP_FIELD_IS_ARRAY) 1043 type = bt_ctf_field_type_array_create(type, field->arraylen); 1044 1045 ret = event_class_add_field(event_class, type, field); 1046 1047 if (flags & TEP_FIELD_IS_ARRAY) 1048 bt_ctf_field_type_put(type); 1049 1050 if (ret) { 1051 pr_err("Failed to add field '%s': %d\n", 1052 field->name, ret); 1053 return -1; 1054 } 1055 } 1056 1057 return 0; 1058 } 1059 1060 static int add_tracepoint_types(struct ctf_writer *cw, 1061 struct evsel *evsel, 1062 struct bt_ctf_event_class *class) 1063 { 1064 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields; 1065 struct tep_format_field *fields = evsel->tp_format->format.fields; 1066 int ret; 1067 1068 ret = add_tracepoint_fields_types(cw, common_fields, class); 1069 if (!ret) 1070 ret = add_tracepoint_fields_types(cw, fields, class); 1071 1072 return ret; 1073 } 1074 1075 static int add_bpf_output_types(struct ctf_writer *cw, 1076 struct bt_ctf_event_class *class) 1077 { 1078 struct bt_ctf_field_type *len_type = cw->data.u32; 1079 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex; 1080 struct bt_ctf_field_type *seq_type; 1081 int ret; 1082 1083 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len"); 1084 if (ret) 1085 return ret; 1086 1087 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len"); 1088 if (!seq_type) 1089 return -1; 1090 1091 return bt_ctf_event_class_add_field(class, seq_type, "raw_data"); 1092 } 1093 1094 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel, 1095 struct bt_ctf_event_class *event_class) 1096 { 1097 u64 type = evsel->core.attr.sample_type; 1098 1099 /* 1100 * missing: 1101 * PERF_SAMPLE_TIME - not needed as we have it in 1102 * ctf event header 1103 * PERF_SAMPLE_READ - TODO 1104 * PERF_SAMPLE_CALLCHAIN - TODO 1105 * PERF_SAMPLE_RAW - tracepoint fields and BPF output 1106 * are handled separately 1107 * PERF_SAMPLE_BRANCH_STACK - TODO 1108 * PERF_SAMPLE_REGS_USER - TODO 1109 * PERF_SAMPLE_STACK_USER - TODO 1110 */ 1111 1112 #define ADD_FIELD(cl, t, n) \ 1113 do { \ 1114 pr2(" field '%s'\n", n); \ 1115 if (bt_ctf_event_class_add_field(cl, t, n)) { \ 1116 pr_err("Failed to add field '%s';\n", n); \ 1117 return -1; \ 1118 } \ 1119 } while (0) 1120 1121 if (type & PERF_SAMPLE_IP) 1122 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); 1123 1124 if (type & PERF_SAMPLE_TID) { 1125 ADD_FIELD(event_class, cw->data.s32, "perf_tid"); 1126 ADD_FIELD(event_class, cw->data.s32, "perf_pid"); 1127 } 1128 1129 if ((type & PERF_SAMPLE_ID) || 1130 (type & PERF_SAMPLE_IDENTIFIER)) 1131 ADD_FIELD(event_class, cw->data.u64, "perf_id"); 1132 1133 if (type & PERF_SAMPLE_STREAM_ID) 1134 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); 1135 1136 if (type & PERF_SAMPLE_PERIOD) 1137 ADD_FIELD(event_class, cw->data.u64, "perf_period"); 1138 1139 if (type & PERF_SAMPLE_WEIGHT) 1140 ADD_FIELD(event_class, cw->data.u64, "perf_weight"); 1141 1142 if (type & PERF_SAMPLE_DATA_SRC) 1143 ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); 1144 1145 if (type & PERF_SAMPLE_TRANSACTION) 1146 ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); 1147 1148 if (type & PERF_SAMPLE_CALLCHAIN) { 1149 ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size"); 1150 ADD_FIELD(event_class, 1151 bt_ctf_field_type_sequence_create( 1152 cw->data.u64_hex, "perf_callchain_size"), 1153 "perf_callchain"); 1154 } 1155 1156 #undef ADD_FIELD 1157 return 0; 1158 } 1159 1160 static int add_event(struct ctf_writer *cw, struct evsel *evsel) 1161 { 1162 struct bt_ctf_event_class *event_class; 1163 struct evsel_priv *priv; 1164 const char *name = evsel__name(evsel); 1165 int ret; 1166 1167 pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type); 1168 1169 event_class = bt_ctf_event_class_create(name); 1170 if (!event_class) 1171 return -1; 1172 1173 ret = add_generic_types(cw, evsel, event_class); 1174 if (ret) 1175 goto err; 1176 1177 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 1178 ret = add_tracepoint_types(cw, evsel, event_class); 1179 if (ret) 1180 goto err; 1181 } 1182 1183 if (evsel__is_bpf_output(evsel)) { 1184 ret = add_bpf_output_types(cw, event_class); 1185 if (ret) 1186 goto err; 1187 } 1188 1189 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); 1190 if (ret) { 1191 pr("Failed to add event class into stream.\n"); 1192 goto err; 1193 } 1194 1195 priv = malloc(sizeof(*priv)); 1196 if (!priv) 1197 goto err; 1198 1199 priv->event_class = event_class; 1200 evsel->priv = priv; 1201 return 0; 1202 1203 err: 1204 bt_ctf_event_class_put(event_class); 1205 pr_err("Failed to add event '%s'.\n", name); 1206 return -1; 1207 } 1208 1209 static int setup_events(struct ctf_writer *cw, struct perf_session *session) 1210 { 1211 struct evlist *evlist = session->evlist; 1212 struct evsel *evsel; 1213 int ret; 1214 1215 evlist__for_each_entry(evlist, evsel) { 1216 ret = add_event(cw, evsel); 1217 if (ret) 1218 return ret; 1219 } 1220 return 0; 1221 } 1222 1223 #define __NON_SAMPLE_ADD_FIELD(t, n) \ 1224 do { \ 1225 pr2(" field '%s'\n", #n); \ 1226 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\ 1227 pr_err("Failed to add field '%s';\n", #n);\ 1228 return -1; \ 1229 } \ 1230 } while(0) 1231 1232 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \ 1233 static int add_##_name##_event(struct ctf_writer *cw) \ 1234 { \ 1235 struct bt_ctf_event_class *event_class; \ 1236 int ret; \ 1237 \ 1238 pr("Adding "#_name" event\n"); \ 1239 event_class = bt_ctf_event_class_create("perf_" #_name);\ 1240 if (!event_class) \ 1241 return -1; \ 1242 body \ 1243 \ 1244 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\ 1245 if (ret) { \ 1246 pr("Failed to add event class '"#_name"' into stream.\n");\ 1247 return ret; \ 1248 } \ 1249 \ 1250 cw->_name##_class = event_class; \ 1251 bt_ctf_event_class_put(event_class); \ 1252 return 0; \ 1253 } 1254 1255 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm, 1256 __NON_SAMPLE_ADD_FIELD(u32, pid); 1257 __NON_SAMPLE_ADD_FIELD(u32, tid); 1258 __NON_SAMPLE_ADD_FIELD(string, comm); 1259 ) 1260 1261 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork, 1262 __NON_SAMPLE_ADD_FIELD(u32, pid); 1263 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1264 __NON_SAMPLE_ADD_FIELD(u32, tid); 1265 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1266 __NON_SAMPLE_ADD_FIELD(u64, time); 1267 ) 1268 1269 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit, 1270 __NON_SAMPLE_ADD_FIELD(u32, pid); 1271 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1272 __NON_SAMPLE_ADD_FIELD(u32, tid); 1273 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1274 __NON_SAMPLE_ADD_FIELD(u64, time); 1275 ) 1276 1277 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap, 1278 __NON_SAMPLE_ADD_FIELD(u32, pid); 1279 __NON_SAMPLE_ADD_FIELD(u32, tid); 1280 __NON_SAMPLE_ADD_FIELD(u64_hex, start); 1281 __NON_SAMPLE_ADD_FIELD(string, filename); 1282 ) 1283 1284 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2, 1285 __NON_SAMPLE_ADD_FIELD(u32, pid); 1286 __NON_SAMPLE_ADD_FIELD(u32, tid); 1287 __NON_SAMPLE_ADD_FIELD(u64_hex, start); 1288 __NON_SAMPLE_ADD_FIELD(string, filename); 1289 ) 1290 #undef __NON_SAMPLE_ADD_FIELD 1291 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS 1292 1293 static int setup_non_sample_events(struct ctf_writer *cw, 1294 struct perf_session *session __maybe_unused) 1295 { 1296 int ret; 1297 1298 ret = add_comm_event(cw); 1299 if (ret) 1300 return ret; 1301 ret = add_exit_event(cw); 1302 if (ret) 1303 return ret; 1304 ret = add_fork_event(cw); 1305 if (ret) 1306 return ret; 1307 ret = add_mmap_event(cw); 1308 if (ret) 1309 return ret; 1310 ret = add_mmap2_event(cw); 1311 if (ret) 1312 return ret; 1313 return 0; 1314 } 1315 1316 static void cleanup_events(struct perf_session *session) 1317 { 1318 struct evlist *evlist = session->evlist; 1319 struct evsel *evsel; 1320 1321 evlist__for_each_entry(evlist, evsel) { 1322 struct evsel_priv *priv; 1323 1324 priv = evsel->priv; 1325 bt_ctf_event_class_put(priv->event_class); 1326 zfree(&evsel->priv); 1327 } 1328 1329 evlist__delete(evlist); 1330 session->evlist = NULL; 1331 } 1332 1333 static int setup_streams(struct ctf_writer *cw, struct perf_session *session) 1334 { 1335 struct ctf_stream **stream; 1336 struct perf_header *ph = &session->header; 1337 int ncpus; 1338 1339 /* 1340 * Try to get the number of cpus used in the data file, 1341 * if not present fallback to the MAX_CPUS. 1342 */ 1343 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; 1344 1345 stream = zalloc(sizeof(*stream) * ncpus); 1346 if (!stream) { 1347 pr_err("Failed to allocate streams.\n"); 1348 return -ENOMEM; 1349 } 1350 1351 cw->stream = stream; 1352 cw->stream_cnt = ncpus; 1353 return 0; 1354 } 1355 1356 static void free_streams(struct ctf_writer *cw) 1357 { 1358 int cpu; 1359 1360 for (cpu = 0; cpu < cw->stream_cnt; cpu++) 1361 ctf_stream__delete(cw->stream[cpu]); 1362 1363 zfree(&cw->stream); 1364 } 1365 1366 static int ctf_writer__setup_env(struct ctf_writer *cw, 1367 struct perf_session *session) 1368 { 1369 struct perf_header *header = &session->header; 1370 struct bt_ctf_writer *writer = cw->writer; 1371 1372 #define ADD(__n, __v) \ 1373 do { \ 1374 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ 1375 return -1; \ 1376 } while (0) 1377 1378 ADD("host", header->env.hostname); 1379 ADD("sysname", "Linux"); 1380 ADD("release", header->env.os_release); 1381 ADD("version", header->env.version); 1382 ADD("machine", header->env.arch); 1383 ADD("domain", "kernel"); 1384 ADD("tracer_name", "perf"); 1385 1386 #undef ADD 1387 return 0; 1388 } 1389 1390 static int ctf_writer__setup_clock(struct ctf_writer *cw, 1391 struct perf_session *session, 1392 bool tod) 1393 { 1394 struct bt_ctf_clock *clock = cw->clock; 1395 const char *desc = "perf clock"; 1396 int64_t offset = 0; 1397 1398 if (tod) { 1399 struct perf_env *env = &session->header.env; 1400 1401 if (!env->clock.enabled) { 1402 pr_err("Can't provide --tod time, missing clock data. " 1403 "Please record with -k/--clockid option.\n"); 1404 return -1; 1405 } 1406 1407 desc = clockid_name(env->clock.clockid); 1408 offset = env->clock.tod_ns - env->clock.clockid_ns; 1409 } 1410 1411 #define SET(__n, __v) \ 1412 do { \ 1413 if (bt_ctf_clock_set_##__n(clock, __v)) \ 1414 return -1; \ 1415 } while (0) 1416 1417 SET(frequency, 1000000000); 1418 SET(offset, offset); 1419 SET(description, desc); 1420 SET(precision, 10); 1421 SET(is_absolute, 0); 1422 1423 #undef SET 1424 return 0; 1425 } 1426 1427 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) 1428 { 1429 struct bt_ctf_field_type *type; 1430 1431 type = bt_ctf_field_type_integer_create(size); 1432 if (!type) 1433 return NULL; 1434 1435 if (sign && 1436 bt_ctf_field_type_integer_set_signed(type, 1)) 1437 goto err; 1438 1439 if (hex && 1440 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) 1441 goto err; 1442 1443 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 1444 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN); 1445 #else 1446 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN); 1447 #endif 1448 1449 pr2("Created type: INTEGER %d-bit %ssigned %s\n", 1450 size, sign ? "un" : "", hex ? "hex" : ""); 1451 return type; 1452 1453 err: 1454 bt_ctf_field_type_put(type); 1455 return NULL; 1456 } 1457 1458 static void ctf_writer__cleanup_data(struct ctf_writer *cw) 1459 { 1460 unsigned int i; 1461 1462 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) 1463 bt_ctf_field_type_put(cw->data.array[i]); 1464 } 1465 1466 static int ctf_writer__init_data(struct ctf_writer *cw) 1467 { 1468 #define CREATE_INT_TYPE(type, size, sign, hex) \ 1469 do { \ 1470 (type) = create_int_type(size, sign, hex); \ 1471 if (!(type)) \ 1472 goto err; \ 1473 } while (0) 1474 1475 CREATE_INT_TYPE(cw->data.s64, 64, true, false); 1476 CREATE_INT_TYPE(cw->data.u64, 64, false, false); 1477 CREATE_INT_TYPE(cw->data.s32, 32, true, false); 1478 CREATE_INT_TYPE(cw->data.u32, 32, false, false); 1479 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true); 1480 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); 1481 1482 cw->data.string = bt_ctf_field_type_string_create(); 1483 if (cw->data.string) 1484 return 0; 1485 1486 err: 1487 ctf_writer__cleanup_data(cw); 1488 pr_err("Failed to create data types.\n"); 1489 return -1; 1490 } 1491 1492 static void ctf_writer__cleanup(struct ctf_writer *cw) 1493 { 1494 ctf_writer__cleanup_data(cw); 1495 1496 bt_ctf_clock_put(cw->clock); 1497 free_streams(cw); 1498 bt_ctf_stream_class_put(cw->stream_class); 1499 bt_ctf_writer_put(cw->writer); 1500 1501 /* and NULL all the pointers */ 1502 memset(cw, 0, sizeof(*cw)); 1503 } 1504 1505 static int ctf_writer__init(struct ctf_writer *cw, const char *path, 1506 struct perf_session *session, bool tod) 1507 { 1508 struct bt_ctf_writer *writer; 1509 struct bt_ctf_stream_class *stream_class; 1510 struct bt_ctf_clock *clock; 1511 struct bt_ctf_field_type *pkt_ctx_type; 1512 int ret; 1513 1514 /* CTF writer */ 1515 writer = bt_ctf_writer_create(path); 1516 if (!writer) 1517 goto err; 1518 1519 cw->writer = writer; 1520 1521 /* CTF clock */ 1522 clock = bt_ctf_clock_create("perf_clock"); 1523 if (!clock) { 1524 pr("Failed to create CTF clock.\n"); 1525 goto err_cleanup; 1526 } 1527 1528 cw->clock = clock; 1529 1530 if (ctf_writer__setup_clock(cw, session, tod)) { 1531 pr("Failed to setup CTF clock.\n"); 1532 goto err_cleanup; 1533 } 1534 1535 /* CTF stream class */ 1536 stream_class = bt_ctf_stream_class_create("perf_stream"); 1537 if (!stream_class) { 1538 pr("Failed to create CTF stream class.\n"); 1539 goto err_cleanup; 1540 } 1541 1542 cw->stream_class = stream_class; 1543 1544 /* CTF clock stream setup */ 1545 if (bt_ctf_stream_class_set_clock(stream_class, clock)) { 1546 pr("Failed to assign CTF clock to stream class.\n"); 1547 goto err_cleanup; 1548 } 1549 1550 if (ctf_writer__init_data(cw)) 1551 goto err_cleanup; 1552 1553 /* Add cpu_id for packet context */ 1554 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); 1555 if (!pkt_ctx_type) 1556 goto err_cleanup; 1557 1558 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); 1559 bt_ctf_field_type_put(pkt_ctx_type); 1560 if (ret) 1561 goto err_cleanup; 1562 1563 /* CTF clock writer setup */ 1564 if (bt_ctf_writer_add_clock(writer, clock)) { 1565 pr("Failed to assign CTF clock to writer.\n"); 1566 goto err_cleanup; 1567 } 1568 1569 return 0; 1570 1571 err_cleanup: 1572 ctf_writer__cleanup(cw); 1573 err: 1574 pr_err("Failed to setup CTF writer.\n"); 1575 return -1; 1576 } 1577 1578 static int ctf_writer__flush_streams(struct ctf_writer *cw) 1579 { 1580 int cpu, ret = 0; 1581 1582 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) 1583 ret = ctf_stream__flush(cw->stream[cpu]); 1584 1585 return ret; 1586 } 1587 1588 static int convert__config(const char *var, const char *value, void *cb) 1589 { 1590 struct convert *c = cb; 1591 1592 if (!strcmp(var, "convert.queue-size")) 1593 return perf_config_u64(&c->queue_size, var, value); 1594 1595 return 0; 1596 } 1597 1598 int bt_convert__perf2ctf(const char *input, const char *path, 1599 struct perf_data_convert_opts *opts) 1600 { 1601 struct perf_session *session; 1602 struct perf_data data = { 1603 .path = input, 1604 .mode = PERF_DATA_MODE_READ, 1605 .force = opts->force, 1606 }; 1607 struct convert c = { 1608 .tool = { 1609 .sample = process_sample_event, 1610 .mmap = perf_event__process_mmap, 1611 .mmap2 = perf_event__process_mmap2, 1612 .comm = perf_event__process_comm, 1613 .exit = perf_event__process_exit, 1614 .fork = perf_event__process_fork, 1615 .lost = perf_event__process_lost, 1616 .tracing_data = perf_event__process_tracing_data, 1617 .build_id = perf_event__process_build_id, 1618 .namespaces = perf_event__process_namespaces, 1619 .ordered_events = true, 1620 .ordering_requires_timestamps = true, 1621 }, 1622 }; 1623 struct ctf_writer *cw = &c.writer; 1624 int err; 1625 1626 if (opts->all) { 1627 c.tool.comm = process_comm_event; 1628 c.tool.exit = process_exit_event; 1629 c.tool.fork = process_fork_event; 1630 c.tool.mmap = process_mmap_event; 1631 c.tool.mmap2 = process_mmap2_event; 1632 } 1633 1634 err = perf_config(convert__config, &c); 1635 if (err) 1636 return err; 1637 1638 err = -1; 1639 /* perf.data session */ 1640 session = perf_session__new(&data, &c.tool); 1641 if (IS_ERR(session)) 1642 return PTR_ERR(session); 1643 1644 /* CTF writer */ 1645 if (ctf_writer__init(cw, path, session, opts->tod)) 1646 goto free_session; 1647 1648 if (c.queue_size) { 1649 ordered_events__set_alloc_size(&session->ordered_events, 1650 c.queue_size); 1651 } 1652 1653 /* CTF writer env/clock setup */ 1654 if (ctf_writer__setup_env(cw, session)) 1655 goto free_writer; 1656 1657 /* CTF events setup */ 1658 if (setup_events(cw, session)) 1659 goto free_writer; 1660 1661 if (opts->all && setup_non_sample_events(cw, session)) 1662 goto free_writer; 1663 1664 if (setup_streams(cw, session)) 1665 goto free_writer; 1666 1667 err = perf_session__process_events(session); 1668 if (!err) 1669 err = ctf_writer__flush_streams(cw); 1670 else 1671 pr_err("Error during conversion.\n"); 1672 1673 fprintf(stderr, 1674 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1675 data.path, path); 1676 1677 fprintf(stderr, 1678 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples", 1679 (double) c.events_size / 1024.0 / 1024.0, 1680 c.events_count); 1681 1682 if (!c.non_sample_count) 1683 fprintf(stderr, ") ]\n"); 1684 else 1685 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count); 1686 1687 cleanup_events(session); 1688 perf_session__delete(session); 1689 ctf_writer__cleanup(cw); 1690 1691 return err; 1692 1693 free_writer: 1694 ctf_writer__cleanup(cw); 1695 free_session: 1696 perf_session__delete(session); 1697 pr_err("Error during conversion setup.\n"); 1698 return err; 1699 } 1700