1 /* 2 * CTF writing support via babeltrace. 3 * 4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com> 5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de> 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <linux/compiler.h> 11 #include <babeltrace/ctf-writer/writer.h> 12 #include <babeltrace/ctf-writer/clock.h> 13 #include <babeltrace/ctf-writer/stream.h> 14 #include <babeltrace/ctf-writer/event.h> 15 #include <babeltrace/ctf-writer/event-types.h> 16 #include <babeltrace/ctf-writer/event-fields.h> 17 #include <babeltrace/ctf-ir/utils.h> 18 #include <babeltrace/ctf/events.h> 19 #include <traceevent/event-parse.h> 20 #include "asm/bug.h" 21 #include "data-convert-bt.h" 22 #include "session.h" 23 #include "util.h" 24 #include "debug.h" 25 #include "tool.h" 26 #include "evlist.h" 27 #include "evsel.h" 28 #include "machine.h" 29 30 #define pr_N(n, fmt, ...) \ 31 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) 32 33 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) 34 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) 35 36 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) 37 38 struct evsel_priv { 39 struct bt_ctf_event_class *event_class; 40 }; 41 42 #define MAX_CPUS 4096 43 44 struct ctf_stream { 45 struct bt_ctf_stream *stream; 46 int cpu; 47 u32 count; 48 }; 49 50 struct ctf_writer { 51 /* writer primitives */ 52 struct bt_ctf_writer *writer; 53 struct ctf_stream **stream; 54 int stream_cnt; 55 struct bt_ctf_stream_class *stream_class; 56 struct bt_ctf_clock *clock; 57 58 /* data types */ 59 union { 60 struct { 61 struct bt_ctf_field_type *s64; 62 struct bt_ctf_field_type *u64; 63 struct bt_ctf_field_type *s32; 64 struct bt_ctf_field_type *u32; 65 struct bt_ctf_field_type *string; 66 struct bt_ctf_field_type *u32_hex; 67 struct bt_ctf_field_type *u64_hex; 68 }; 69 struct bt_ctf_field_type *array[6]; 70 } data; 71 }; 72 73 struct convert { 74 struct perf_tool tool; 75 struct ctf_writer writer; 76 77 u64 events_size; 78 u64 events_count; 79 80 /* Ordered events configured queue size. */ 81 u64 queue_size; 82 }; 83 84 static int value_set(struct bt_ctf_field_type *type, 85 struct bt_ctf_event *event, 86 const char *name, u64 val) 87 { 88 struct bt_ctf_field *field; 89 bool sign = bt_ctf_field_type_integer_get_signed(type); 90 int ret; 91 92 field = bt_ctf_field_create(type); 93 if (!field) { 94 pr_err("failed to create a field %s\n", name); 95 return -1; 96 } 97 98 if (sign) { 99 ret = bt_ctf_field_signed_integer_set_value(field, val); 100 if (ret) { 101 pr_err("failed to set field value %s\n", name); 102 goto err; 103 } 104 } else { 105 ret = bt_ctf_field_unsigned_integer_set_value(field, val); 106 if (ret) { 107 pr_err("failed to set field value %s\n", name); 108 goto err; 109 } 110 } 111 112 ret = bt_ctf_event_set_payload(event, name, field); 113 if (ret) { 114 pr_err("failed to set payload %s\n", name); 115 goto err; 116 } 117 118 pr2(" SET [%s = %" PRIu64 "]\n", name, val); 119 120 err: 121 bt_ctf_field_put(field); 122 return ret; 123 } 124 125 #define __FUNC_VALUE_SET(_name, _val_type) \ 126 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ 127 struct bt_ctf_event *event, \ 128 const char *name, \ 129 _val_type val) \ 130 { \ 131 struct bt_ctf_field_type *type = cw->data._name; \ 132 return value_set(type, event, name, (u64) val); \ 133 } 134 135 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) 136 137 FUNC_VALUE_SET(s32) 138 FUNC_VALUE_SET(u32) 139 FUNC_VALUE_SET(s64) 140 FUNC_VALUE_SET(u64) 141 __FUNC_VALUE_SET(u64_hex, u64) 142 143 static struct bt_ctf_field_type* 144 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field) 145 { 146 unsigned long flags = field->flags; 147 148 if (flags & FIELD_IS_STRING) 149 return cw->data.string; 150 151 if (!(flags & FIELD_IS_SIGNED)) { 152 /* unsigned long are mostly pointers */ 153 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER) 154 return cw->data.u64_hex; 155 } 156 157 if (flags & FIELD_IS_SIGNED) { 158 if (field->size == 8) 159 return cw->data.s64; 160 else 161 return cw->data.s32; 162 } 163 164 if (field->size == 8) 165 return cw->data.u64; 166 else 167 return cw->data.u32; 168 } 169 170 static unsigned long long adjust_signedness(unsigned long long value_int, int size) 171 { 172 unsigned long long value_mask; 173 174 /* 175 * value_mask = (1 << (size * 8 - 1)) - 1. 176 * Directly set value_mask for code readers. 177 */ 178 switch (size) { 179 case 1: 180 value_mask = 0x7fULL; 181 break; 182 case 2: 183 value_mask = 0x7fffULL; 184 break; 185 case 4: 186 value_mask = 0x7fffffffULL; 187 break; 188 case 8: 189 /* 190 * For 64 bit value, return it self. There is no need 191 * to fill high bit. 192 */ 193 /* Fall through */ 194 default: 195 /* BUG! */ 196 return value_int; 197 } 198 199 /* If it is a positive value, don't adjust. */ 200 if ((value_int & (~0ULL - value_mask)) == 0) 201 return value_int; 202 203 /* Fill upper part of value_int with 1 to make it a negative long long. */ 204 return (value_int & value_mask) | ~value_mask; 205 } 206 207 static int string_set_value(struct bt_ctf_field *field, const char *string) 208 { 209 char *buffer = NULL; 210 size_t len = strlen(string), i, p; 211 int err; 212 213 for (i = p = 0; i < len; i++, p++) { 214 if (isprint(string[i])) { 215 if (!buffer) 216 continue; 217 buffer[p] = string[i]; 218 } else { 219 char numstr[5]; 220 221 snprintf(numstr, sizeof(numstr), "\\x%02x", 222 (unsigned int)(string[i]) & 0xff); 223 224 if (!buffer) { 225 buffer = zalloc(i + (len - i) * 4 + 2); 226 if (!buffer) { 227 pr_err("failed to set unprintable string '%s'\n", string); 228 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); 229 } 230 if (i > 0) 231 strncpy(buffer, string, i); 232 } 233 strncat(buffer + p, numstr, 4); 234 p += 3; 235 } 236 } 237 238 if (!buffer) 239 return bt_ctf_field_string_set_value(field, string); 240 err = bt_ctf_field_string_set_value(field, buffer); 241 free(buffer); 242 return err; 243 } 244 245 static int add_tracepoint_field_value(struct ctf_writer *cw, 246 struct bt_ctf_event_class *event_class, 247 struct bt_ctf_event *event, 248 struct perf_sample *sample, 249 struct format_field *fmtf) 250 { 251 struct bt_ctf_field_type *type; 252 struct bt_ctf_field *array_field; 253 struct bt_ctf_field *field; 254 const char *name = fmtf->name; 255 void *data = sample->raw_data; 256 unsigned long flags = fmtf->flags; 257 unsigned int n_items; 258 unsigned int i; 259 unsigned int offset; 260 unsigned int len; 261 int ret; 262 263 name = fmtf->alias; 264 offset = fmtf->offset; 265 len = fmtf->size; 266 if (flags & FIELD_IS_STRING) 267 flags &= ~FIELD_IS_ARRAY; 268 269 if (flags & FIELD_IS_DYNAMIC) { 270 unsigned long long tmp_val; 271 272 tmp_val = pevent_read_number(fmtf->event->pevent, 273 data + offset, len); 274 offset = tmp_val; 275 len = offset >> 16; 276 offset &= 0xffff; 277 } 278 279 if (flags & FIELD_IS_ARRAY) { 280 281 type = bt_ctf_event_class_get_field_by_name( 282 event_class, name); 283 array_field = bt_ctf_field_create(type); 284 bt_ctf_field_type_put(type); 285 if (!array_field) { 286 pr_err("Failed to create array type %s\n", name); 287 return -1; 288 } 289 290 len = fmtf->size / fmtf->arraylen; 291 n_items = fmtf->arraylen; 292 } else { 293 n_items = 1; 294 array_field = NULL; 295 } 296 297 type = get_tracepoint_field_type(cw, fmtf); 298 299 for (i = 0; i < n_items; i++) { 300 if (flags & FIELD_IS_ARRAY) 301 field = bt_ctf_field_array_get_field(array_field, i); 302 else 303 field = bt_ctf_field_create(type); 304 305 if (!field) { 306 pr_err("failed to create a field %s\n", name); 307 return -1; 308 } 309 310 if (flags & FIELD_IS_STRING) 311 ret = string_set_value(field, data + offset + i * len); 312 else { 313 unsigned long long value_int; 314 315 value_int = pevent_read_number( 316 fmtf->event->pevent, 317 data + offset + i * len, len); 318 319 if (!(flags & FIELD_IS_SIGNED)) 320 ret = bt_ctf_field_unsigned_integer_set_value( 321 field, value_int); 322 else 323 ret = bt_ctf_field_signed_integer_set_value( 324 field, adjust_signedness(value_int, len)); 325 } 326 327 if (ret) { 328 pr_err("failed to set file value %s\n", name); 329 goto err_put_field; 330 } 331 if (!(flags & FIELD_IS_ARRAY)) { 332 ret = bt_ctf_event_set_payload(event, name, field); 333 if (ret) { 334 pr_err("failed to set payload %s\n", name); 335 goto err_put_field; 336 } 337 } 338 bt_ctf_field_put(field); 339 } 340 if (flags & FIELD_IS_ARRAY) { 341 ret = bt_ctf_event_set_payload(event, name, array_field); 342 if (ret) { 343 pr_err("Failed add payload array %s\n", name); 344 return -1; 345 } 346 bt_ctf_field_put(array_field); 347 } 348 return 0; 349 350 err_put_field: 351 bt_ctf_field_put(field); 352 return -1; 353 } 354 355 static int add_tracepoint_fields_values(struct ctf_writer *cw, 356 struct bt_ctf_event_class *event_class, 357 struct bt_ctf_event *event, 358 struct format_field *fields, 359 struct perf_sample *sample) 360 { 361 struct format_field *field; 362 int ret; 363 364 for (field = fields; field; field = field->next) { 365 ret = add_tracepoint_field_value(cw, event_class, event, sample, 366 field); 367 if (ret) 368 return -1; 369 } 370 return 0; 371 } 372 373 static int add_tracepoint_values(struct ctf_writer *cw, 374 struct bt_ctf_event_class *event_class, 375 struct bt_ctf_event *event, 376 struct perf_evsel *evsel, 377 struct perf_sample *sample) 378 { 379 struct format_field *common_fields = evsel->tp_format->format.common_fields; 380 struct format_field *fields = evsel->tp_format->format.fields; 381 int ret; 382 383 ret = add_tracepoint_fields_values(cw, event_class, event, 384 common_fields, sample); 385 if (!ret) 386 ret = add_tracepoint_fields_values(cw, event_class, event, 387 fields, sample); 388 389 return ret; 390 } 391 392 static int 393 add_bpf_output_values(struct bt_ctf_event_class *event_class, 394 struct bt_ctf_event *event, 395 struct perf_sample *sample) 396 { 397 struct bt_ctf_field_type *len_type, *seq_type; 398 struct bt_ctf_field *len_field, *seq_field; 399 unsigned int raw_size = sample->raw_size; 400 unsigned int nr_elements = raw_size / sizeof(u32); 401 unsigned int i; 402 int ret; 403 404 if (nr_elements * sizeof(u32) != raw_size) 405 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n", 406 raw_size, nr_elements * sizeof(u32) - raw_size); 407 408 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); 409 len_field = bt_ctf_field_create(len_type); 410 if (!len_field) { 411 pr_err("failed to create 'raw_len' for bpf output event\n"); 412 ret = -1; 413 goto put_len_type; 414 } 415 416 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 417 if (ret) { 418 pr_err("failed to set field value for raw_len\n"); 419 goto put_len_field; 420 } 421 ret = bt_ctf_event_set_payload(event, "raw_len", len_field); 422 if (ret) { 423 pr_err("failed to set payload to raw_len\n"); 424 goto put_len_field; 425 } 426 427 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data"); 428 seq_field = bt_ctf_field_create(seq_type); 429 if (!seq_field) { 430 pr_err("failed to create 'raw_data' for bpf output event\n"); 431 ret = -1; 432 goto put_seq_type; 433 } 434 435 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 436 if (ret) { 437 pr_err("failed to set length of 'raw_data'\n"); 438 goto put_seq_field; 439 } 440 441 for (i = 0; i < nr_elements; i++) { 442 struct bt_ctf_field *elem_field = 443 bt_ctf_field_sequence_get_field(seq_field, i); 444 445 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 446 ((u32 *)(sample->raw_data))[i]); 447 448 bt_ctf_field_put(elem_field); 449 if (ret) { 450 pr_err("failed to set raw_data[%d]\n", i); 451 goto put_seq_field; 452 } 453 } 454 455 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); 456 if (ret) 457 pr_err("failed to set payload for raw_data\n"); 458 459 put_seq_field: 460 bt_ctf_field_put(seq_field); 461 put_seq_type: 462 bt_ctf_field_type_put(seq_type); 463 put_len_field: 464 bt_ctf_field_put(len_field); 465 put_len_type: 466 bt_ctf_field_type_put(len_type); 467 return ret; 468 } 469 470 static int add_generic_values(struct ctf_writer *cw, 471 struct bt_ctf_event *event, 472 struct perf_evsel *evsel, 473 struct perf_sample *sample) 474 { 475 u64 type = evsel->attr.sample_type; 476 int ret; 477 478 /* 479 * missing: 480 * PERF_SAMPLE_TIME - not needed as we have it in 481 * ctf event header 482 * PERF_SAMPLE_READ - TODO 483 * PERF_SAMPLE_CALLCHAIN - TODO 484 * PERF_SAMPLE_RAW - tracepoint fields are handled separately 485 * PERF_SAMPLE_BRANCH_STACK - TODO 486 * PERF_SAMPLE_REGS_USER - TODO 487 * PERF_SAMPLE_STACK_USER - TODO 488 */ 489 490 if (type & PERF_SAMPLE_IP) { 491 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); 492 if (ret) 493 return -1; 494 } 495 496 if (type & PERF_SAMPLE_TID) { 497 ret = value_set_s32(cw, event, "perf_tid", sample->tid); 498 if (ret) 499 return -1; 500 501 ret = value_set_s32(cw, event, "perf_pid", sample->pid); 502 if (ret) 503 return -1; 504 } 505 506 if ((type & PERF_SAMPLE_ID) || 507 (type & PERF_SAMPLE_IDENTIFIER)) { 508 ret = value_set_u64(cw, event, "perf_id", sample->id); 509 if (ret) 510 return -1; 511 } 512 513 if (type & PERF_SAMPLE_STREAM_ID) { 514 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); 515 if (ret) 516 return -1; 517 } 518 519 if (type & PERF_SAMPLE_PERIOD) { 520 ret = value_set_u64(cw, event, "perf_period", sample->period); 521 if (ret) 522 return -1; 523 } 524 525 if (type & PERF_SAMPLE_WEIGHT) { 526 ret = value_set_u64(cw, event, "perf_weight", sample->weight); 527 if (ret) 528 return -1; 529 } 530 531 if (type & PERF_SAMPLE_DATA_SRC) { 532 ret = value_set_u64(cw, event, "perf_data_src", 533 sample->data_src); 534 if (ret) 535 return -1; 536 } 537 538 if (type & PERF_SAMPLE_TRANSACTION) { 539 ret = value_set_u64(cw, event, "perf_transaction", 540 sample->transaction); 541 if (ret) 542 return -1; 543 } 544 545 return 0; 546 } 547 548 static int ctf_stream__flush(struct ctf_stream *cs) 549 { 550 int err = 0; 551 552 if (cs) { 553 err = bt_ctf_stream_flush(cs->stream); 554 if (err) 555 pr_err("CTF stream %d flush failed\n", cs->cpu); 556 557 pr("Flush stream for cpu %d (%u samples)\n", 558 cs->cpu, cs->count); 559 560 cs->count = 0; 561 } 562 563 return err; 564 } 565 566 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) 567 { 568 struct ctf_stream *cs; 569 struct bt_ctf_field *pkt_ctx = NULL; 570 struct bt_ctf_field *cpu_field = NULL; 571 struct bt_ctf_stream *stream = NULL; 572 int ret; 573 574 cs = zalloc(sizeof(*cs)); 575 if (!cs) { 576 pr_err("Failed to allocate ctf stream\n"); 577 return NULL; 578 } 579 580 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); 581 if (!stream) { 582 pr_err("Failed to create CTF stream\n"); 583 goto out; 584 } 585 586 pkt_ctx = bt_ctf_stream_get_packet_context(stream); 587 if (!pkt_ctx) { 588 pr_err("Failed to obtain packet context\n"); 589 goto out; 590 } 591 592 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); 593 bt_ctf_field_put(pkt_ctx); 594 if (!cpu_field) { 595 pr_err("Failed to obtain cpu field\n"); 596 goto out; 597 } 598 599 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); 600 if (ret) { 601 pr_err("Failed to update CPU number\n"); 602 goto out; 603 } 604 605 bt_ctf_field_put(cpu_field); 606 607 cs->cpu = cpu; 608 cs->stream = stream; 609 return cs; 610 611 out: 612 if (cpu_field) 613 bt_ctf_field_put(cpu_field); 614 if (stream) 615 bt_ctf_stream_put(stream); 616 617 free(cs); 618 return NULL; 619 } 620 621 static void ctf_stream__delete(struct ctf_stream *cs) 622 { 623 if (cs) { 624 bt_ctf_stream_put(cs->stream); 625 free(cs); 626 } 627 } 628 629 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) 630 { 631 struct ctf_stream *cs = cw->stream[cpu]; 632 633 if (!cs) { 634 cs = ctf_stream__create(cw, cpu); 635 cw->stream[cpu] = cs; 636 } 637 638 return cs; 639 } 640 641 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, 642 struct perf_evsel *evsel) 643 { 644 int cpu = 0; 645 646 if (evsel->attr.sample_type & PERF_SAMPLE_CPU) 647 cpu = sample->cpu; 648 649 if (cpu > cw->stream_cnt) { 650 pr_err("Event was recorded for CPU %d, limit is at %d.\n", 651 cpu, cw->stream_cnt); 652 cpu = 0; 653 } 654 655 return cpu; 656 } 657 658 #define STREAM_FLUSH_COUNT 100000 659 660 /* 661 * Currently we have no other way to determine the 662 * time for the stream flush other than keep track 663 * of the number of events and check it against 664 * threshold. 665 */ 666 static bool is_flush_needed(struct ctf_stream *cs) 667 { 668 return cs->count >= STREAM_FLUSH_COUNT; 669 } 670 671 static int process_sample_event(struct perf_tool *tool, 672 union perf_event *_event, 673 struct perf_sample *sample, 674 struct perf_evsel *evsel, 675 struct machine *machine __maybe_unused) 676 { 677 struct convert *c = container_of(tool, struct convert, tool); 678 struct evsel_priv *priv = evsel->priv; 679 struct ctf_writer *cw = &c->writer; 680 struct ctf_stream *cs; 681 struct bt_ctf_event_class *event_class; 682 struct bt_ctf_event *event; 683 int ret; 684 685 if (WARN_ONCE(!priv, "Failed to setup all events.\n")) 686 return 0; 687 688 event_class = priv->event_class; 689 690 /* update stats */ 691 c->events_count++; 692 c->events_size += _event->header.size; 693 694 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); 695 696 event = bt_ctf_event_create(event_class); 697 if (!event) { 698 pr_err("Failed to create an CTF event\n"); 699 return -1; 700 } 701 702 bt_ctf_clock_set_time(cw->clock, sample->time); 703 704 ret = add_generic_values(cw, event, evsel, sample); 705 if (ret) 706 return -1; 707 708 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 709 ret = add_tracepoint_values(cw, event_class, event, 710 evsel, sample); 711 if (ret) 712 return -1; 713 } 714 715 if (perf_evsel__is_bpf_output(evsel)) { 716 ret = add_bpf_output_values(event_class, event, sample); 717 if (ret) 718 return -1; 719 } 720 721 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); 722 if (cs) { 723 if (is_flush_needed(cs)) 724 ctf_stream__flush(cs); 725 726 cs->count++; 727 bt_ctf_stream_append_event(cs->stream, event); 728 } 729 730 bt_ctf_event_put(event); 731 return cs ? 0 : -1; 732 } 733 734 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ 735 static char *change_name(char *name, char *orig_name, int dup) 736 { 737 char *new_name = NULL; 738 size_t len; 739 740 if (!name) 741 name = orig_name; 742 743 if (dup >= 10) 744 goto out; 745 /* 746 * Add '_' prefix to potential keywork. According to 747 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652), 748 * futher CTF spec updating may require us to use '$'. 749 */ 750 if (dup < 0) 751 len = strlen(name) + sizeof("_"); 752 else 753 len = strlen(orig_name) + sizeof("_dupl_X"); 754 755 new_name = malloc(len); 756 if (!new_name) 757 goto out; 758 759 if (dup < 0) 760 snprintf(new_name, len, "_%s", name); 761 else 762 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); 763 764 out: 765 if (name != orig_name) 766 free(name); 767 return new_name; 768 } 769 770 static int event_class_add_field(struct bt_ctf_event_class *event_class, 771 struct bt_ctf_field_type *type, 772 struct format_field *field) 773 { 774 struct bt_ctf_field_type *t = NULL; 775 char *name; 776 int dup = 1; 777 int ret; 778 779 /* alias was already assigned */ 780 if (field->alias != field->name) 781 return bt_ctf_event_class_add_field(event_class, type, 782 (char *)field->alias); 783 784 name = field->name; 785 786 /* If 'name' is a keywork, add prefix. */ 787 if (bt_ctf_validate_identifier(name)) 788 name = change_name(name, field->name, -1); 789 790 if (!name) { 791 pr_err("Failed to fix invalid identifier."); 792 return -1; 793 } 794 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { 795 bt_ctf_field_type_put(t); 796 name = change_name(name, field->name, dup++); 797 if (!name) { 798 pr_err("Failed to create dup name for '%s'\n", field->name); 799 return -1; 800 } 801 } 802 803 ret = bt_ctf_event_class_add_field(event_class, type, name); 804 if (!ret) 805 field->alias = name; 806 807 return ret; 808 } 809 810 static int add_tracepoint_fields_types(struct ctf_writer *cw, 811 struct format_field *fields, 812 struct bt_ctf_event_class *event_class) 813 { 814 struct format_field *field; 815 int ret; 816 817 for (field = fields; field; field = field->next) { 818 struct bt_ctf_field_type *type; 819 unsigned long flags = field->flags; 820 821 pr2(" field '%s'\n", field->name); 822 823 type = get_tracepoint_field_type(cw, field); 824 if (!type) 825 return -1; 826 827 /* 828 * A string is an array of chars. For this we use the string 829 * type and don't care that it is an array. What we don't 830 * support is an array of strings. 831 */ 832 if (flags & FIELD_IS_STRING) 833 flags &= ~FIELD_IS_ARRAY; 834 835 if (flags & FIELD_IS_ARRAY) 836 type = bt_ctf_field_type_array_create(type, field->arraylen); 837 838 ret = event_class_add_field(event_class, type, field); 839 840 if (flags & FIELD_IS_ARRAY) 841 bt_ctf_field_type_put(type); 842 843 if (ret) { 844 pr_err("Failed to add field '%s': %d\n", 845 field->name, ret); 846 return -1; 847 } 848 } 849 850 return 0; 851 } 852 853 static int add_tracepoint_types(struct ctf_writer *cw, 854 struct perf_evsel *evsel, 855 struct bt_ctf_event_class *class) 856 { 857 struct format_field *common_fields = evsel->tp_format->format.common_fields; 858 struct format_field *fields = evsel->tp_format->format.fields; 859 int ret; 860 861 ret = add_tracepoint_fields_types(cw, common_fields, class); 862 if (!ret) 863 ret = add_tracepoint_fields_types(cw, fields, class); 864 865 return ret; 866 } 867 868 static int add_bpf_output_types(struct ctf_writer *cw, 869 struct bt_ctf_event_class *class) 870 { 871 struct bt_ctf_field_type *len_type = cw->data.u32; 872 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex; 873 struct bt_ctf_field_type *seq_type; 874 int ret; 875 876 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len"); 877 if (ret) 878 return ret; 879 880 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len"); 881 if (!seq_type) 882 return -1; 883 884 return bt_ctf_event_class_add_field(class, seq_type, "raw_data"); 885 } 886 887 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel, 888 struct bt_ctf_event_class *event_class) 889 { 890 u64 type = evsel->attr.sample_type; 891 892 /* 893 * missing: 894 * PERF_SAMPLE_TIME - not needed as we have it in 895 * ctf event header 896 * PERF_SAMPLE_READ - TODO 897 * PERF_SAMPLE_CALLCHAIN - TODO 898 * PERF_SAMPLE_RAW - tracepoint fields and BPF output 899 * are handled separately 900 * PERF_SAMPLE_BRANCH_STACK - TODO 901 * PERF_SAMPLE_REGS_USER - TODO 902 * PERF_SAMPLE_STACK_USER - TODO 903 */ 904 905 #define ADD_FIELD(cl, t, n) \ 906 do { \ 907 pr2(" field '%s'\n", n); \ 908 if (bt_ctf_event_class_add_field(cl, t, n)) { \ 909 pr_err("Failed to add field '%s';\n", n); \ 910 return -1; \ 911 } \ 912 } while (0) 913 914 if (type & PERF_SAMPLE_IP) 915 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); 916 917 if (type & PERF_SAMPLE_TID) { 918 ADD_FIELD(event_class, cw->data.s32, "perf_tid"); 919 ADD_FIELD(event_class, cw->data.s32, "perf_pid"); 920 } 921 922 if ((type & PERF_SAMPLE_ID) || 923 (type & PERF_SAMPLE_IDENTIFIER)) 924 ADD_FIELD(event_class, cw->data.u64, "perf_id"); 925 926 if (type & PERF_SAMPLE_STREAM_ID) 927 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); 928 929 if (type & PERF_SAMPLE_PERIOD) 930 ADD_FIELD(event_class, cw->data.u64, "perf_period"); 931 932 if (type & PERF_SAMPLE_WEIGHT) 933 ADD_FIELD(event_class, cw->data.u64, "perf_weight"); 934 935 if (type & PERF_SAMPLE_DATA_SRC) 936 ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); 937 938 if (type & PERF_SAMPLE_TRANSACTION) 939 ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); 940 941 #undef ADD_FIELD 942 return 0; 943 } 944 945 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel) 946 { 947 struct bt_ctf_event_class *event_class; 948 struct evsel_priv *priv; 949 const char *name = perf_evsel__name(evsel); 950 int ret; 951 952 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type); 953 954 event_class = bt_ctf_event_class_create(name); 955 if (!event_class) 956 return -1; 957 958 ret = add_generic_types(cw, evsel, event_class); 959 if (ret) 960 goto err; 961 962 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 963 ret = add_tracepoint_types(cw, evsel, event_class); 964 if (ret) 965 goto err; 966 } 967 968 if (perf_evsel__is_bpf_output(evsel)) { 969 ret = add_bpf_output_types(cw, event_class); 970 if (ret) 971 goto err; 972 } 973 974 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); 975 if (ret) { 976 pr("Failed to add event class into stream.\n"); 977 goto err; 978 } 979 980 priv = malloc(sizeof(*priv)); 981 if (!priv) 982 goto err; 983 984 priv->event_class = event_class; 985 evsel->priv = priv; 986 return 0; 987 988 err: 989 bt_ctf_event_class_put(event_class); 990 pr_err("Failed to add event '%s'.\n", name); 991 return -1; 992 } 993 994 static int setup_events(struct ctf_writer *cw, struct perf_session *session) 995 { 996 struct perf_evlist *evlist = session->evlist; 997 struct perf_evsel *evsel; 998 int ret; 999 1000 evlist__for_each(evlist, evsel) { 1001 ret = add_event(cw, evsel); 1002 if (ret) 1003 return ret; 1004 } 1005 return 0; 1006 } 1007 1008 static void cleanup_events(struct perf_session *session) 1009 { 1010 struct perf_evlist *evlist = session->evlist; 1011 struct perf_evsel *evsel; 1012 1013 evlist__for_each(evlist, evsel) { 1014 struct evsel_priv *priv; 1015 1016 priv = evsel->priv; 1017 bt_ctf_event_class_put(priv->event_class); 1018 zfree(&evsel->priv); 1019 } 1020 1021 perf_evlist__delete(evlist); 1022 session->evlist = NULL; 1023 } 1024 1025 static int setup_streams(struct ctf_writer *cw, struct perf_session *session) 1026 { 1027 struct ctf_stream **stream; 1028 struct perf_header *ph = &session->header; 1029 int ncpus; 1030 1031 /* 1032 * Try to get the number of cpus used in the data file, 1033 * if not present fallback to the MAX_CPUS. 1034 */ 1035 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; 1036 1037 stream = zalloc(sizeof(*stream) * ncpus); 1038 if (!stream) { 1039 pr_err("Failed to allocate streams.\n"); 1040 return -ENOMEM; 1041 } 1042 1043 cw->stream = stream; 1044 cw->stream_cnt = ncpus; 1045 return 0; 1046 } 1047 1048 static void free_streams(struct ctf_writer *cw) 1049 { 1050 int cpu; 1051 1052 for (cpu = 0; cpu < cw->stream_cnt; cpu++) 1053 ctf_stream__delete(cw->stream[cpu]); 1054 1055 free(cw->stream); 1056 } 1057 1058 static int ctf_writer__setup_env(struct ctf_writer *cw, 1059 struct perf_session *session) 1060 { 1061 struct perf_header *header = &session->header; 1062 struct bt_ctf_writer *writer = cw->writer; 1063 1064 #define ADD(__n, __v) \ 1065 do { \ 1066 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ 1067 return -1; \ 1068 } while (0) 1069 1070 ADD("host", header->env.hostname); 1071 ADD("sysname", "Linux"); 1072 ADD("release", header->env.os_release); 1073 ADD("version", header->env.version); 1074 ADD("machine", header->env.arch); 1075 ADD("domain", "kernel"); 1076 ADD("tracer_name", "perf"); 1077 1078 #undef ADD 1079 return 0; 1080 } 1081 1082 static int ctf_writer__setup_clock(struct ctf_writer *cw) 1083 { 1084 struct bt_ctf_clock *clock = cw->clock; 1085 1086 bt_ctf_clock_set_description(clock, "perf clock"); 1087 1088 #define SET(__n, __v) \ 1089 do { \ 1090 if (bt_ctf_clock_set_##__n(clock, __v)) \ 1091 return -1; \ 1092 } while (0) 1093 1094 SET(frequency, 1000000000); 1095 SET(offset_s, 0); 1096 SET(offset, 0); 1097 SET(precision, 10); 1098 SET(is_absolute, 0); 1099 1100 #undef SET 1101 return 0; 1102 } 1103 1104 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) 1105 { 1106 struct bt_ctf_field_type *type; 1107 1108 type = bt_ctf_field_type_integer_create(size); 1109 if (!type) 1110 return NULL; 1111 1112 if (sign && 1113 bt_ctf_field_type_integer_set_signed(type, 1)) 1114 goto err; 1115 1116 if (hex && 1117 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) 1118 goto err; 1119 1120 #if __BYTE_ORDER == __BIG_ENDIAN 1121 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN); 1122 #else 1123 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN); 1124 #endif 1125 1126 pr2("Created type: INTEGER %d-bit %ssigned %s\n", 1127 size, sign ? "un" : "", hex ? "hex" : ""); 1128 return type; 1129 1130 err: 1131 bt_ctf_field_type_put(type); 1132 return NULL; 1133 } 1134 1135 static void ctf_writer__cleanup_data(struct ctf_writer *cw) 1136 { 1137 unsigned int i; 1138 1139 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) 1140 bt_ctf_field_type_put(cw->data.array[i]); 1141 } 1142 1143 static int ctf_writer__init_data(struct ctf_writer *cw) 1144 { 1145 #define CREATE_INT_TYPE(type, size, sign, hex) \ 1146 do { \ 1147 (type) = create_int_type(size, sign, hex); \ 1148 if (!(type)) \ 1149 goto err; \ 1150 } while (0) 1151 1152 CREATE_INT_TYPE(cw->data.s64, 64, true, false); 1153 CREATE_INT_TYPE(cw->data.u64, 64, false, false); 1154 CREATE_INT_TYPE(cw->data.s32, 32, true, false); 1155 CREATE_INT_TYPE(cw->data.u32, 32, false, false); 1156 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true); 1157 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); 1158 1159 cw->data.string = bt_ctf_field_type_string_create(); 1160 if (cw->data.string) 1161 return 0; 1162 1163 err: 1164 ctf_writer__cleanup_data(cw); 1165 pr_err("Failed to create data types.\n"); 1166 return -1; 1167 } 1168 1169 static void ctf_writer__cleanup(struct ctf_writer *cw) 1170 { 1171 ctf_writer__cleanup_data(cw); 1172 1173 bt_ctf_clock_put(cw->clock); 1174 free_streams(cw); 1175 bt_ctf_stream_class_put(cw->stream_class); 1176 bt_ctf_writer_put(cw->writer); 1177 1178 /* and NULL all the pointers */ 1179 memset(cw, 0, sizeof(*cw)); 1180 } 1181 1182 static int ctf_writer__init(struct ctf_writer *cw, const char *path) 1183 { 1184 struct bt_ctf_writer *writer; 1185 struct bt_ctf_stream_class *stream_class; 1186 struct bt_ctf_clock *clock; 1187 struct bt_ctf_field_type *pkt_ctx_type; 1188 int ret; 1189 1190 /* CTF writer */ 1191 writer = bt_ctf_writer_create(path); 1192 if (!writer) 1193 goto err; 1194 1195 cw->writer = writer; 1196 1197 /* CTF clock */ 1198 clock = bt_ctf_clock_create("perf_clock"); 1199 if (!clock) { 1200 pr("Failed to create CTF clock.\n"); 1201 goto err_cleanup; 1202 } 1203 1204 cw->clock = clock; 1205 1206 if (ctf_writer__setup_clock(cw)) { 1207 pr("Failed to setup CTF clock.\n"); 1208 goto err_cleanup; 1209 } 1210 1211 /* CTF stream class */ 1212 stream_class = bt_ctf_stream_class_create("perf_stream"); 1213 if (!stream_class) { 1214 pr("Failed to create CTF stream class.\n"); 1215 goto err_cleanup; 1216 } 1217 1218 cw->stream_class = stream_class; 1219 1220 /* CTF clock stream setup */ 1221 if (bt_ctf_stream_class_set_clock(stream_class, clock)) { 1222 pr("Failed to assign CTF clock to stream class.\n"); 1223 goto err_cleanup; 1224 } 1225 1226 if (ctf_writer__init_data(cw)) 1227 goto err_cleanup; 1228 1229 /* Add cpu_id for packet context */ 1230 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); 1231 if (!pkt_ctx_type) 1232 goto err_cleanup; 1233 1234 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); 1235 bt_ctf_field_type_put(pkt_ctx_type); 1236 if (ret) 1237 goto err_cleanup; 1238 1239 /* CTF clock writer setup */ 1240 if (bt_ctf_writer_add_clock(writer, clock)) { 1241 pr("Failed to assign CTF clock to writer.\n"); 1242 goto err_cleanup; 1243 } 1244 1245 return 0; 1246 1247 err_cleanup: 1248 ctf_writer__cleanup(cw); 1249 err: 1250 pr_err("Failed to setup CTF writer.\n"); 1251 return -1; 1252 } 1253 1254 static int ctf_writer__flush_streams(struct ctf_writer *cw) 1255 { 1256 int cpu, ret = 0; 1257 1258 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) 1259 ret = ctf_stream__flush(cw->stream[cpu]); 1260 1261 return ret; 1262 } 1263 1264 static int convert__config(const char *var, const char *value, void *cb) 1265 { 1266 struct convert *c = cb; 1267 1268 if (!strcmp(var, "convert.queue-size")) { 1269 c->queue_size = perf_config_u64(var, value); 1270 return 0; 1271 } 1272 1273 return 0; 1274 } 1275 1276 int bt_convert__perf2ctf(const char *input, const char *path, bool force) 1277 { 1278 struct perf_session *session; 1279 struct perf_data_file file = { 1280 .path = input, 1281 .mode = PERF_DATA_MODE_READ, 1282 .force = force, 1283 }; 1284 struct convert c = { 1285 .tool = { 1286 .sample = process_sample_event, 1287 .mmap = perf_event__process_mmap, 1288 .mmap2 = perf_event__process_mmap2, 1289 .comm = perf_event__process_comm, 1290 .exit = perf_event__process_exit, 1291 .fork = perf_event__process_fork, 1292 .lost = perf_event__process_lost, 1293 .tracing_data = perf_event__process_tracing_data, 1294 .build_id = perf_event__process_build_id, 1295 .ordered_events = true, 1296 .ordering_requires_timestamps = true, 1297 }, 1298 }; 1299 struct ctf_writer *cw = &c.writer; 1300 int err = -1; 1301 1302 perf_config(convert__config, &c); 1303 1304 /* CTF writer */ 1305 if (ctf_writer__init(cw, path)) 1306 return -1; 1307 1308 /* perf.data session */ 1309 session = perf_session__new(&file, 0, &c.tool); 1310 if (!session) 1311 goto free_writer; 1312 1313 if (c.queue_size) { 1314 ordered_events__set_alloc_size(&session->ordered_events, 1315 c.queue_size); 1316 } 1317 1318 /* CTF writer env/clock setup */ 1319 if (ctf_writer__setup_env(cw, session)) 1320 goto free_session; 1321 1322 /* CTF events setup */ 1323 if (setup_events(cw, session)) 1324 goto free_session; 1325 1326 if (setup_streams(cw, session)) 1327 goto free_session; 1328 1329 err = perf_session__process_events(session); 1330 if (!err) 1331 err = ctf_writer__flush_streams(cw); 1332 else 1333 pr_err("Error during conversion.\n"); 1334 1335 fprintf(stderr, 1336 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1337 file.path, path); 1338 1339 fprintf(stderr, 1340 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n", 1341 (double) c.events_size / 1024.0 / 1024.0, 1342 c.events_count); 1343 1344 cleanup_events(session); 1345 perf_session__delete(session); 1346 ctf_writer__cleanup(cw); 1347 1348 return err; 1349 1350 free_session: 1351 perf_session__delete(session); 1352 free_writer: 1353 ctf_writer__cleanup(cw); 1354 pr_err("Error during conversion setup.\n"); 1355 return err; 1356 } 1357