1 // SPDX-License-Identifier: GPL-2.0 2 #include "builtin.h" 3 #include "perf.h" 4 5 #include "util/build-id.h" 6 #include "util/evsel.h" 7 #include "util/evlist.h" 8 #include "util/mmap.h" 9 #include "util/term.h" 10 #include "util/symbol.h" 11 #include "util/thread.h" 12 #include "util/header.h" 13 #include "util/session.h" 14 #include "util/intlist.h" 15 #include <subcmd/pager.h> 16 #include <subcmd/parse-options.h> 17 #include "util/trace-event.h" 18 #include "util/debug.h" 19 #include "util/tool.h" 20 #include "util/stat.h" 21 #include "util/synthetic-events.h" 22 #include "util/top.h" 23 #include "util/data.h" 24 #include "util/ordered-events.h" 25 #include "util/kvm-stat.h" 26 #include "util/util.h" 27 #include "ui/browsers/hists.h" 28 #include "ui/progress.h" 29 #include "ui/ui.h" 30 #include "util/string2.h" 31 32 #include <sys/prctl.h> 33 #ifdef HAVE_TIMERFD_SUPPORT 34 #include <sys/timerfd.h> 35 #endif 36 #include <sys/time.h> 37 #include <sys/types.h> 38 #include <sys/stat.h> 39 #include <fcntl.h> 40 41 #include <linux/err.h> 42 #include <linux/kernel.h> 43 #include <linux/string.h> 44 #include <linux/time64.h> 45 #include <linux/zalloc.h> 46 #include <errno.h> 47 #include <inttypes.h> 48 #include <poll.h> 49 #include <termios.h> 50 #include <semaphore.h> 51 #include <signal.h> 52 #include <math.h> 53 #include <perf/mmap.h> 54 55 #if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 56 #define GET_EVENT_KEY(func, field) \ 57 static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \ 58 { \ 59 if (vcpu == -1) \ 60 return event->total.field; \ 61 \ 62 if (vcpu >= event->max_vcpu) \ 63 return 0; \ 64 \ 65 return event->vcpu[vcpu].field; \ 66 } 67 68 #define COMPARE_EVENT_KEY(func, field) \ 69 GET_EVENT_KEY(func, field) \ 70 static int64_t cmp_event_ ## func(struct kvm_event *one, \ 71 struct kvm_event *two, int vcpu) \ 72 { \ 73 return get_event_ ##func(one, vcpu) - \ 74 get_event_ ##func(two, vcpu); \ 75 } 76 77 COMPARE_EVENT_KEY(time, time); 78 COMPARE_EVENT_KEY(max, stats.max); 79 COMPARE_EVENT_KEY(min, stats.min); 80 COMPARE_EVENT_KEY(count, stats.n); 81 COMPARE_EVENT_KEY(mean, stats.mean); 82 83 struct kvm_hists { 84 struct hists hists; 85 struct perf_hpp_list list; 86 }; 87 88 struct kvm_dimension { 89 const char *name; 90 const char *header; 91 int width; 92 int64_t (*cmp)(struct perf_hpp_fmt *fmt, struct hist_entry *left, 93 struct hist_entry *right); 94 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 95 struct hist_entry *he); 96 }; 97 98 struct kvm_fmt { 99 struct perf_hpp_fmt fmt; 100 struct kvm_dimension *dim; 101 }; 102 103 static struct kvm_hists kvm_hists; 104 105 static int64_t ev_name_cmp(struct perf_hpp_fmt *fmt __maybe_unused, 106 struct hist_entry *left, 107 struct hist_entry *right) 108 { 109 /* Return opposite number for sorting in alphabetical order */ 110 return -strcmp(left->kvm_info->name, right->kvm_info->name); 111 } 112 113 static int fmt_width(struct perf_hpp_fmt *fmt, 114 struct perf_hpp *hpp __maybe_unused, 115 struct hists *hists __maybe_unused); 116 117 static int ev_name_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 118 struct hist_entry *he) 119 { 120 int width = fmt_width(fmt, hpp, he->hists); 121 122 return scnprintf(hpp->buf, hpp->size, "%*s", width, he->kvm_info->name); 123 } 124 125 static struct kvm_dimension dim_event = { 126 .header = "Event name", 127 .name = "ev_name", 128 .cmp = ev_name_cmp, 129 .entry = ev_name_entry, 130 .width = 40, 131 }; 132 133 #define EV_METRIC_CMP(metric) \ 134 static int64_t ev_cmp_##metric(struct perf_hpp_fmt *fmt __maybe_unused, \ 135 struct hist_entry *left, \ 136 struct hist_entry *right) \ 137 { \ 138 struct kvm_event *event_left; \ 139 struct kvm_event *event_right; \ 140 struct perf_kvm_stat *perf_kvm; \ 141 \ 142 event_left = container_of(left, struct kvm_event, he); \ 143 event_right = container_of(right, struct kvm_event, he); \ 144 \ 145 perf_kvm = event_left->perf_kvm; \ 146 return cmp_event_##metric(event_left, event_right, \ 147 perf_kvm->trace_vcpu); \ 148 } 149 150 EV_METRIC_CMP(time) 151 EV_METRIC_CMP(count) 152 EV_METRIC_CMP(max) 153 EV_METRIC_CMP(min) 154 EV_METRIC_CMP(mean) 155 156 #define EV_METRIC_ENTRY(metric) \ 157 static int ev_entry_##metric(struct perf_hpp_fmt *fmt, \ 158 struct perf_hpp *hpp, \ 159 struct hist_entry *he) \ 160 { \ 161 struct kvm_event *event; \ 162 int width = fmt_width(fmt, hpp, he->hists); \ 163 struct perf_kvm_stat *perf_kvm; \ 164 \ 165 event = container_of(he, struct kvm_event, he); \ 166 perf_kvm = event->perf_kvm; \ 167 return scnprintf(hpp->buf, hpp->size, "%*lu", width, \ 168 get_event_##metric(event, perf_kvm->trace_vcpu)); \ 169 } 170 171 EV_METRIC_ENTRY(time) 172 EV_METRIC_ENTRY(count) 173 EV_METRIC_ENTRY(max) 174 EV_METRIC_ENTRY(min) 175 176 static struct kvm_dimension dim_time = { 177 .header = "Time (ns)", 178 .name = "time", 179 .cmp = ev_cmp_time, 180 .entry = ev_entry_time, 181 .width = 12, 182 }; 183 184 static struct kvm_dimension dim_count = { 185 .header = "Samples", 186 .name = "sample", 187 .cmp = ev_cmp_count, 188 .entry = ev_entry_count, 189 .width = 12, 190 }; 191 192 static struct kvm_dimension dim_max_time = { 193 .header = "Max Time (ns)", 194 .name = "max_t", 195 .cmp = ev_cmp_max, 196 .entry = ev_entry_max, 197 .width = 14, 198 }; 199 200 static struct kvm_dimension dim_min_time = { 201 .header = "Min Time (ns)", 202 .name = "min_t", 203 .cmp = ev_cmp_min, 204 .entry = ev_entry_min, 205 .width = 14, 206 }; 207 208 static int ev_entry_mean(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 209 struct hist_entry *he) 210 { 211 struct kvm_event *event; 212 int width = fmt_width(fmt, hpp, he->hists); 213 struct perf_kvm_stat *perf_kvm; 214 215 event = container_of(he, struct kvm_event, he); 216 perf_kvm = event->perf_kvm; 217 return scnprintf(hpp->buf, hpp->size, "%*lu", width, 218 get_event_mean(event, perf_kvm->trace_vcpu)); 219 } 220 221 static struct kvm_dimension dim_mean_time = { 222 .header = "Mean Time (ns)", 223 .name = "mean_t", 224 .cmp = ev_cmp_mean, 225 .entry = ev_entry_mean, 226 .width = 14, 227 }; 228 229 #define PERC_STR(__s, __v) \ 230 ({ \ 231 scnprintf(__s, sizeof(__s), "%.2F%%", __v); \ 232 __s; \ 233 }) 234 235 static double percent(u64 st, u64 tot) 236 { 237 return tot ? 100. * (double) st / (double) tot : 0; 238 } 239 240 #define EV_METRIC_PERCENT(metric) \ 241 static int ev_percent_##metric(struct hist_entry *he) \ 242 { \ 243 struct kvm_event *event; \ 244 struct perf_kvm_stat *perf_kvm; \ 245 \ 246 event = container_of(he, struct kvm_event, he); \ 247 perf_kvm = event->perf_kvm; \ 248 \ 249 return percent(get_event_##metric(event, perf_kvm->trace_vcpu), \ 250 perf_kvm->total_##metric); \ 251 } 252 253 EV_METRIC_PERCENT(time) 254 EV_METRIC_PERCENT(count) 255 256 static int ev_entry_time_precent(struct perf_hpp_fmt *fmt, 257 struct perf_hpp *hpp, 258 struct hist_entry *he) 259 { 260 int width = fmt_width(fmt, hpp, he->hists); 261 double per; 262 char buf[10]; 263 264 per = ev_percent_time(he); 265 return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per)); 266 } 267 268 static int64_t 269 ev_cmp_time_precent(struct perf_hpp_fmt *fmt __maybe_unused, 270 struct hist_entry *left, struct hist_entry *right) 271 { 272 double per_left; 273 double per_right; 274 275 per_left = ev_percent_time(left); 276 per_right = ev_percent_time(right); 277 278 return per_left - per_right; 279 } 280 281 static struct kvm_dimension dim_time_percent = { 282 .header = "Time%", 283 .name = "percent_time", 284 .cmp = ev_cmp_time_precent, 285 .entry = ev_entry_time_precent, 286 .width = 12, 287 }; 288 289 static int ev_entry_count_precent(struct perf_hpp_fmt *fmt, 290 struct perf_hpp *hpp, 291 struct hist_entry *he) 292 { 293 int width = fmt_width(fmt, hpp, he->hists); 294 double per; 295 char buf[10]; 296 297 per = ev_percent_count(he); 298 return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per)); 299 } 300 301 static int64_t 302 ev_cmp_count_precent(struct perf_hpp_fmt *fmt __maybe_unused, 303 struct hist_entry *left, struct hist_entry *right) 304 { 305 double per_left; 306 double per_right; 307 308 per_left = ev_percent_count(left); 309 per_right = ev_percent_count(right); 310 311 return per_left - per_right; 312 } 313 314 static struct kvm_dimension dim_count_percent = { 315 .header = "Sample%", 316 .name = "percent_sample", 317 .cmp = ev_cmp_count_precent, 318 .entry = ev_entry_count_precent, 319 .width = 12, 320 }; 321 322 static struct kvm_dimension *dimensions[] = { 323 &dim_event, 324 &dim_time, 325 &dim_time_percent, 326 &dim_count, 327 &dim_count_percent, 328 &dim_max_time, 329 &dim_min_time, 330 &dim_mean_time, 331 NULL, 332 }; 333 334 static int fmt_width(struct perf_hpp_fmt *fmt, 335 struct perf_hpp *hpp __maybe_unused, 336 struct hists *hists __maybe_unused) 337 { 338 struct kvm_fmt *kvm_fmt; 339 340 kvm_fmt = container_of(fmt, struct kvm_fmt, fmt); 341 return kvm_fmt->dim->width; 342 } 343 344 static int fmt_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 345 struct hists *hists, int line __maybe_unused, 346 int *span __maybe_unused) 347 { 348 struct kvm_fmt *kvm_fmt; 349 struct kvm_dimension *dim; 350 int width = fmt_width(fmt, hpp, hists); 351 352 kvm_fmt = container_of(fmt, struct kvm_fmt, fmt); 353 dim = kvm_fmt->dim; 354 355 return scnprintf(hpp->buf, hpp->size, "%*s", width, dim->header); 356 } 357 358 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 359 { 360 struct kvm_fmt *kvm_fmt_a = container_of(a, struct kvm_fmt, fmt); 361 struct kvm_fmt *kvm_fmt_b = container_of(b, struct kvm_fmt, fmt); 362 363 return kvm_fmt_a->dim == kvm_fmt_b->dim; 364 } 365 366 static void fmt_free(struct perf_hpp_fmt *fmt) 367 { 368 struct kvm_fmt *kvm_fmt; 369 370 kvm_fmt = container_of(fmt, struct kvm_fmt, fmt); 371 free(kvm_fmt); 372 } 373 374 static struct kvm_dimension *get_dimension(const char *name) 375 { 376 unsigned int i; 377 378 for (i = 0; dimensions[i] != NULL; i++) { 379 if (!strcmp(dimensions[i]->name, name)) 380 return dimensions[i]; 381 } 382 383 return NULL; 384 } 385 386 static struct kvm_fmt *get_format(const char *name) 387 { 388 struct kvm_dimension *dim = get_dimension(name); 389 struct kvm_fmt *kvm_fmt; 390 struct perf_hpp_fmt *fmt; 391 392 if (!dim) 393 return NULL; 394 395 kvm_fmt = zalloc(sizeof(*kvm_fmt)); 396 if (!kvm_fmt) 397 return NULL; 398 399 kvm_fmt->dim = dim; 400 401 fmt = &kvm_fmt->fmt; 402 INIT_LIST_HEAD(&fmt->list); 403 INIT_LIST_HEAD(&fmt->sort_list); 404 fmt->cmp = dim->cmp; 405 fmt->sort = dim->cmp; 406 fmt->color = NULL; 407 fmt->entry = dim->entry; 408 fmt->header = fmt_header; 409 fmt->width = fmt_width; 410 fmt->collapse = dim->cmp; 411 fmt->equal = fmt_equal; 412 fmt->free = fmt_free; 413 414 return kvm_fmt; 415 } 416 417 static int kvm_hists__init_output(struct perf_hpp_list *hpp_list, char *name) 418 { 419 struct kvm_fmt *kvm_fmt = get_format(name); 420 421 if (!kvm_fmt) { 422 pr_warning("Fail to find format for output field %s.\n", name); 423 return -EINVAL; 424 } 425 426 perf_hpp_list__column_register(hpp_list, &kvm_fmt->fmt); 427 return 0; 428 } 429 430 static int kvm_hists__init_sort(struct perf_hpp_list *hpp_list, char *name) 431 { 432 struct kvm_fmt *kvm_fmt = get_format(name); 433 434 if (!kvm_fmt) { 435 pr_warning("Fail to find format for sorting %s.\n", name); 436 return -EINVAL; 437 } 438 439 perf_hpp_list__register_sort_field(hpp_list, &kvm_fmt->fmt); 440 return 0; 441 } 442 443 static int kvm_hpp_list__init(char *list, 444 struct perf_hpp_list *hpp_list, 445 int (*fn)(struct perf_hpp_list *hpp_list, 446 char *name)) 447 { 448 char *tmp, *tok; 449 int ret; 450 451 if (!list || !fn) 452 return 0; 453 454 for (tok = strtok_r(list, ", ", &tmp); tok; 455 tok = strtok_r(NULL, ", ", &tmp)) { 456 ret = fn(hpp_list, tok); 457 if (!ret) 458 continue; 459 460 /* Handle errors */ 461 if (ret == -EINVAL) 462 pr_err("Invalid field key: '%s'", tok); 463 else if (ret == -ESRCH) 464 pr_err("Unknown field key: '%s'", tok); 465 else 466 pr_err("Fail to initialize for field key: '%s'", tok); 467 468 break; 469 } 470 471 return ret; 472 } 473 474 static int kvm_hpp_list__parse(struct perf_hpp_list *hpp_list, 475 const char *output_, const char *sort_) 476 { 477 char *output = output_ ? strdup(output_) : NULL; 478 char *sort = sort_ ? strdup(sort_) : NULL; 479 int ret; 480 481 ret = kvm_hpp_list__init(output, hpp_list, kvm_hists__init_output); 482 if (ret) 483 goto out; 484 485 ret = kvm_hpp_list__init(sort, hpp_list, kvm_hists__init_sort); 486 if (ret) 487 goto out; 488 489 /* Copy sort keys to output fields */ 490 perf_hpp__setup_output_field(hpp_list); 491 492 /* and then copy output fields to sort keys */ 493 perf_hpp__append_sort_keys(hpp_list); 494 out: 495 free(output); 496 free(sort); 497 return ret; 498 } 499 500 static int kvm_hists__init(void) 501 { 502 kvm_hists.list.nr_header_lines = 1; 503 __hists__init(&kvm_hists.hists, &kvm_hists.list); 504 perf_hpp_list__init(&kvm_hists.list); 505 return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name"); 506 } 507 508 static int kvm_hists__reinit(const char *output, const char *sort) 509 { 510 perf_hpp__reset_output_field(&kvm_hists.list); 511 return kvm_hpp_list__parse(&kvm_hists.list, output, sort); 512 } 513 static void print_result(struct perf_kvm_stat *kvm); 514 515 #ifdef HAVE_SLANG_SUPPORT 516 static void kvm_browser__update_nr_entries(struct hist_browser *hb) 517 { 518 struct rb_node *nd = rb_first_cached(&hb->hists->entries); 519 u64 nr_entries = 0; 520 521 for (; nd; nd = rb_next(nd)) { 522 struct hist_entry *he = rb_entry(nd, struct hist_entry, 523 rb_node); 524 525 if (!he->filtered) 526 nr_entries++; 527 } 528 529 hb->nr_non_filtered_entries = nr_entries; 530 } 531 532 static int kvm_browser__title(struct hist_browser *browser, 533 char *buf, size_t size) 534 { 535 scnprintf(buf, size, "KVM event statistics (%lu entries)", 536 browser->nr_non_filtered_entries); 537 return 0; 538 } 539 540 static struct hist_browser* 541 perf_kvm_browser__new(struct hists *hists) 542 { 543 struct hist_browser *browser = hist_browser__new(hists); 544 545 if (browser) 546 browser->title = kvm_browser__title; 547 548 return browser; 549 } 550 551 static int kvm__hists_browse(struct hists *hists) 552 { 553 struct hist_browser *browser; 554 int key = -1; 555 556 browser = perf_kvm_browser__new(hists); 557 if (browser == NULL) 558 return -1; 559 560 /* reset abort key so that it can get Ctrl-C as a key */ 561 SLang_reset_tty(); 562 SLang_init_tty(0, 0, 0); 563 564 kvm_browser__update_nr_entries(browser); 565 566 while (1) { 567 key = hist_browser__run(browser, "? - help", true, 0); 568 569 switch (key) { 570 case 'q': 571 goto out; 572 default: 573 break; 574 } 575 } 576 577 out: 578 hist_browser__delete(browser); 579 return 0; 580 } 581 582 static void kvm_display(struct perf_kvm_stat *kvm) 583 { 584 if (!use_browser) 585 print_result(kvm); 586 else 587 kvm__hists_browse(&kvm_hists.hists); 588 } 589 590 #else 591 592 static void kvm_display(struct perf_kvm_stat *kvm) 593 { 594 use_browser = 0; 595 print_result(kvm); 596 } 597 598 #endif /* HAVE_SLANG_SUPPORT */ 599 600 #endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 601 602 static const char *get_filename_for_perf_kvm(void) 603 { 604 const char *filename; 605 606 if (perf_host && !perf_guest) 607 filename = strdup("perf.data.host"); 608 else if (!perf_host && perf_guest) 609 filename = strdup("perf.data.guest"); 610 else 611 filename = strdup("perf.data.kvm"); 612 613 return filename; 614 } 615 616 #if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 617 618 void exit_event_get_key(struct evsel *evsel, 619 struct perf_sample *sample, 620 struct event_key *key) 621 { 622 key->info = 0; 623 key->key = evsel__intval(evsel, sample, kvm_exit_reason); 624 } 625 626 bool kvm_exit_event(struct evsel *evsel) 627 { 628 return evsel__name_is(evsel, kvm_exit_trace); 629 } 630 631 bool exit_event_begin(struct evsel *evsel, 632 struct perf_sample *sample, struct event_key *key) 633 { 634 if (kvm_exit_event(evsel)) { 635 exit_event_get_key(evsel, sample, key); 636 return true; 637 } 638 639 return false; 640 } 641 642 bool kvm_entry_event(struct evsel *evsel) 643 { 644 return evsel__name_is(evsel, kvm_entry_trace); 645 } 646 647 bool exit_event_end(struct evsel *evsel, 648 struct perf_sample *sample __maybe_unused, 649 struct event_key *key __maybe_unused) 650 { 651 return kvm_entry_event(evsel); 652 } 653 654 static const char *get_exit_reason(struct perf_kvm_stat *kvm, 655 struct exit_reasons_table *tbl, 656 u64 exit_code) 657 { 658 while (tbl->reason != NULL) { 659 if (tbl->exit_code == exit_code) 660 return tbl->reason; 661 tbl++; 662 } 663 664 pr_err("unknown kvm exit code:%lld on %s\n", 665 (unsigned long long)exit_code, kvm->exit_reasons_isa); 666 return "UNKNOWN"; 667 } 668 669 void exit_event_decode_key(struct perf_kvm_stat *kvm, 670 struct event_key *key, 671 char *decode) 672 { 673 const char *exit_reason = get_exit_reason(kvm, key->exit_reasons, 674 key->key); 675 676 scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", exit_reason); 677 } 678 679 static bool register_kvm_events_ops(struct perf_kvm_stat *kvm) 680 { 681 struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops; 682 683 for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) { 684 if (!strcmp(events_ops->name, kvm->report_event)) { 685 kvm->events_ops = events_ops->ops; 686 return true; 687 } 688 } 689 690 return false; 691 } 692 693 struct vcpu_event_record { 694 int vcpu_id; 695 u64 start_time; 696 struct kvm_event *last_event; 697 }; 698 699 #ifdef HAVE_TIMERFD_SUPPORT 700 static void clear_events_cache_stats(void) 701 { 702 struct rb_root_cached *root; 703 struct rb_node *nd; 704 struct kvm_event *event; 705 int i; 706 707 if (hists__has(&kvm_hists.hists, need_collapse)) 708 root = &kvm_hists.hists.entries_collapsed; 709 else 710 root = kvm_hists.hists.entries_in; 711 712 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 713 struct hist_entry *he; 714 715 he = rb_entry(nd, struct hist_entry, rb_node_in); 716 event = container_of(he, struct kvm_event, he); 717 718 /* reset stats for event */ 719 event->total.time = 0; 720 init_stats(&event->total.stats); 721 722 for (i = 0; i < event->max_vcpu; ++i) { 723 event->vcpu[i].time = 0; 724 init_stats(&event->vcpu[i].stats); 725 } 726 } 727 } 728 #endif 729 730 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) 731 { 732 int old_max_vcpu = event->max_vcpu; 733 void *prev; 734 735 if (vcpu_id < event->max_vcpu) 736 return true; 737 738 while (event->max_vcpu <= vcpu_id) 739 event->max_vcpu += DEFAULT_VCPU_NUM; 740 741 prev = event->vcpu; 742 event->vcpu = realloc(event->vcpu, 743 event->max_vcpu * sizeof(*event->vcpu)); 744 if (!event->vcpu) { 745 free(prev); 746 pr_err("Not enough memory\n"); 747 return false; 748 } 749 750 memset(event->vcpu + old_max_vcpu, 0, 751 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); 752 return true; 753 } 754 755 static void *kvm_he_zalloc(size_t size) 756 { 757 struct kvm_event *kvm_ev; 758 759 kvm_ev = zalloc(size + sizeof(*kvm_ev)); 760 if (!kvm_ev) 761 return NULL; 762 763 init_stats(&kvm_ev->total.stats); 764 hists__inc_nr_samples(&kvm_hists.hists, 0); 765 return &kvm_ev->he; 766 } 767 768 static void kvm_he_free(void *he) 769 { 770 struct kvm_event *kvm_ev; 771 772 kvm_ev = container_of(he, struct kvm_event, he); 773 free(kvm_ev); 774 } 775 776 static struct hist_entry_ops kvm_ev_entry_ops = { 777 .new = kvm_he_zalloc, 778 .free = kvm_he_free, 779 }; 780 781 static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm, 782 struct event_key *key, 783 struct perf_sample *sample) 784 { 785 struct kvm_event *event; 786 struct hist_entry *he; 787 struct kvm_info *ki; 788 789 BUG_ON(key->key == INVALID_KEY); 790 791 ki = kvm_info__new(); 792 if (!ki) { 793 pr_err("Failed to allocate kvm info\n"); 794 return NULL; 795 } 796 797 kvm->events_ops->decode_key(kvm, key, ki->name); 798 he = hists__add_entry_ops(&kvm_hists.hists, &kvm_ev_entry_ops, 799 &kvm->al, NULL, NULL, NULL, ki, sample, true); 800 if (he == NULL) { 801 pr_err("Failed to allocate hist entry\n"); 802 free(ki); 803 return NULL; 804 } 805 806 event = container_of(he, struct kvm_event, he); 807 if (!event->perf_kvm) { 808 event->perf_kvm = kvm; 809 event->key = *key; 810 } 811 812 return event; 813 } 814 815 static bool handle_begin_event(struct perf_kvm_stat *kvm, 816 struct vcpu_event_record *vcpu_record, 817 struct event_key *key, 818 struct perf_sample *sample) 819 { 820 struct kvm_event *event = NULL; 821 822 if (key->key != INVALID_KEY) 823 event = find_create_kvm_event(kvm, key, sample); 824 825 vcpu_record->last_event = event; 826 vcpu_record->start_time = sample->time; 827 return true; 828 } 829 830 static void 831 kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff) 832 { 833 kvm_stats->time += time_diff; 834 update_stats(&kvm_stats->stats, time_diff); 835 } 836 837 static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) 838 { 839 struct kvm_event_stats *kvm_stats = &event->total; 840 841 if (vcpu_id != -1) 842 kvm_stats = &event->vcpu[vcpu_id]; 843 844 return rel_stddev_stats(stddev_stats(&kvm_stats->stats), 845 avg_stats(&kvm_stats->stats)); 846 } 847 848 static bool update_kvm_event(struct perf_kvm_stat *kvm, 849 struct kvm_event *event, int vcpu_id, 850 u64 time_diff) 851 { 852 /* Update overall statistics */ 853 kvm->total_count++; 854 kvm->total_time += time_diff; 855 856 if (vcpu_id == -1) { 857 kvm_update_event_stats(&event->total, time_diff); 858 return true; 859 } 860 861 if (!kvm_event_expand(event, vcpu_id)) 862 return false; 863 864 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); 865 return true; 866 } 867 868 static bool is_child_event(struct perf_kvm_stat *kvm, 869 struct evsel *evsel, 870 struct perf_sample *sample, 871 struct event_key *key) 872 { 873 struct child_event_ops *child_ops; 874 875 child_ops = kvm->events_ops->child_ops; 876 877 if (!child_ops) 878 return false; 879 880 for (; child_ops->name; child_ops++) { 881 if (evsel__name_is(evsel, child_ops->name)) { 882 child_ops->get_key(evsel, sample, key); 883 return true; 884 } 885 } 886 887 return false; 888 } 889 890 static bool handle_child_event(struct perf_kvm_stat *kvm, 891 struct vcpu_event_record *vcpu_record, 892 struct event_key *key, 893 struct perf_sample *sample) 894 { 895 struct kvm_event *event = NULL; 896 897 if (key->key != INVALID_KEY) 898 event = find_create_kvm_event(kvm, key, sample); 899 900 vcpu_record->last_event = event; 901 902 return true; 903 } 904 905 static bool skip_event(const char *event) 906 { 907 const char * const *skip_events; 908 909 for (skip_events = kvm_skip_events; *skip_events; skip_events++) 910 if (!strcmp(event, *skip_events)) 911 return true; 912 913 return false; 914 } 915 916 static bool handle_end_event(struct perf_kvm_stat *kvm, 917 struct vcpu_event_record *vcpu_record, 918 struct event_key *key, 919 struct perf_sample *sample) 920 { 921 struct kvm_event *event; 922 u64 time_begin, time_diff; 923 int vcpu; 924 925 if (kvm->trace_vcpu == -1) 926 vcpu = -1; 927 else 928 vcpu = vcpu_record->vcpu_id; 929 930 event = vcpu_record->last_event; 931 time_begin = vcpu_record->start_time; 932 933 /* The begin event is not caught. */ 934 if (!time_begin) 935 return true; 936 937 /* 938 * In some case, the 'begin event' only records the start timestamp, 939 * the actual event is recognized in the 'end event' (e.g. mmio-event). 940 */ 941 942 /* Both begin and end events did not get the key. */ 943 if (!event && key->key == INVALID_KEY) 944 return true; 945 946 if (!event) 947 event = find_create_kvm_event(kvm, key, sample); 948 949 if (!event) 950 return false; 951 952 vcpu_record->last_event = NULL; 953 vcpu_record->start_time = 0; 954 955 /* seems to happen once in a while during live mode */ 956 if (sample->time < time_begin) { 957 pr_debug("End time before begin time; skipping event.\n"); 958 return true; 959 } 960 961 time_diff = sample->time - time_begin; 962 963 if (kvm->duration && time_diff > kvm->duration) { 964 char decode[KVM_EVENT_NAME_LEN]; 965 966 kvm->events_ops->decode_key(kvm, &event->key, decode); 967 if (!skip_event(decode)) { 968 pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n", 969 sample->time, sample->pid, vcpu_record->vcpu_id, 970 decode, time_diff / NSEC_PER_USEC); 971 } 972 } 973 974 return update_kvm_event(kvm, event, vcpu, time_diff); 975 } 976 977 static 978 struct vcpu_event_record *per_vcpu_record(struct thread *thread, 979 struct evsel *evsel, 980 struct perf_sample *sample) 981 { 982 /* Only kvm_entry records vcpu id. */ 983 if (!thread__priv(thread) && kvm_entry_event(evsel)) { 984 struct vcpu_event_record *vcpu_record; 985 986 vcpu_record = zalloc(sizeof(*vcpu_record)); 987 if (!vcpu_record) { 988 pr_err("%s: Not enough memory\n", __func__); 989 return NULL; 990 } 991 992 vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str); 993 thread__set_priv(thread, vcpu_record); 994 } 995 996 return thread__priv(thread); 997 } 998 999 static bool handle_kvm_event(struct perf_kvm_stat *kvm, 1000 struct thread *thread, 1001 struct evsel *evsel, 1002 struct perf_sample *sample) 1003 { 1004 struct vcpu_event_record *vcpu_record; 1005 struct event_key key = { .key = INVALID_KEY, 1006 .exit_reasons = kvm->exit_reasons }; 1007 1008 vcpu_record = per_vcpu_record(thread, evsel, sample); 1009 if (!vcpu_record) 1010 return true; 1011 1012 /* only process events for vcpus user cares about */ 1013 if ((kvm->trace_vcpu != -1) && 1014 (kvm->trace_vcpu != vcpu_record->vcpu_id)) 1015 return true; 1016 1017 if (kvm->events_ops->is_begin_event(evsel, sample, &key)) 1018 return handle_begin_event(kvm, vcpu_record, &key, sample); 1019 1020 if (is_child_event(kvm, evsel, sample, &key)) 1021 return handle_child_event(kvm, vcpu_record, &key, sample); 1022 1023 if (kvm->events_ops->is_end_event(evsel, sample, &key)) 1024 return handle_end_event(kvm, vcpu_record, &key, sample); 1025 1026 return true; 1027 } 1028 1029 static bool is_valid_key(struct perf_kvm_stat *kvm) 1030 { 1031 static const char *key_array[] = { 1032 "ev_name", "sample", "time", "max_t", "min_t", "mean_t", 1033 }; 1034 unsigned int i; 1035 1036 for (i = 0; i < ARRAY_SIZE(key_array); i++) 1037 if (!strcmp(key_array[i], kvm->sort_key)) 1038 return true; 1039 1040 pr_err("Unsupported sort key: %s\n", kvm->sort_key); 1041 return false; 1042 } 1043 1044 static bool event_is_valid(struct kvm_event *event, int vcpu) 1045 { 1046 return !!get_event_count(event, vcpu); 1047 } 1048 1049 static int filter_cb(struct hist_entry *he, void *arg __maybe_unused) 1050 { 1051 struct kvm_event *event; 1052 struct perf_kvm_stat *perf_kvm; 1053 1054 event = container_of(he, struct kvm_event, he); 1055 perf_kvm = event->perf_kvm; 1056 if (!event_is_valid(event, perf_kvm->trace_vcpu)) 1057 he->filtered = 1; 1058 else 1059 he->filtered = 0; 1060 return 0; 1061 } 1062 1063 static void sort_result(struct perf_kvm_stat *kvm) 1064 { 1065 struct ui_progress prog; 1066 const char *output_columns = "ev_name,sample,percent_sample," 1067 "time,percent_time,max_t,min_t,mean_t"; 1068 1069 kvm_hists__reinit(output_columns, kvm->sort_key); 1070 ui_progress__init(&prog, kvm_hists.hists.nr_entries, "Sorting..."); 1071 hists__collapse_resort(&kvm_hists.hists, NULL); 1072 hists__output_resort_cb(&kvm_hists.hists, NULL, filter_cb); 1073 ui_progress__finish(); 1074 } 1075 1076 static void print_vcpu_info(struct perf_kvm_stat *kvm) 1077 { 1078 int vcpu = kvm->trace_vcpu; 1079 1080 pr_info("Analyze events for "); 1081 1082 if (kvm->opts.target.system_wide) 1083 pr_info("all VMs, "); 1084 else if (kvm->opts.target.pid) 1085 pr_info("pid(s) %s, ", kvm->opts.target.pid); 1086 else 1087 pr_info("dazed and confused on what is monitored, "); 1088 1089 if (vcpu == -1) 1090 pr_info("all VCPUs:\n\n"); 1091 else 1092 pr_info("VCPU %d:\n\n", vcpu); 1093 } 1094 1095 static void show_timeofday(void) 1096 { 1097 char date[64]; 1098 struct timeval tv; 1099 struct tm ltime; 1100 1101 gettimeofday(&tv, NULL); 1102 if (localtime_r(&tv.tv_sec, <ime)) { 1103 strftime(date, sizeof(date), "%H:%M:%S", <ime); 1104 pr_info("%s.%06ld", date, tv.tv_usec); 1105 } else 1106 pr_info("00:00:00.000000"); 1107 1108 return; 1109 } 1110 1111 static void print_result(struct perf_kvm_stat *kvm) 1112 { 1113 char decode[KVM_EVENT_NAME_LEN]; 1114 struct kvm_event *event; 1115 int vcpu = kvm->trace_vcpu; 1116 struct rb_node *nd; 1117 1118 if (kvm->live) { 1119 puts(CONSOLE_CLEAR); 1120 show_timeofday(); 1121 } 1122 1123 pr_info("\n\n"); 1124 print_vcpu_info(kvm); 1125 pr_info("%*s ", KVM_EVENT_NAME_LEN, kvm->events_ops->name); 1126 pr_info("%10s ", "Samples"); 1127 pr_info("%9s ", "Samples%"); 1128 1129 pr_info("%9s ", "Time%"); 1130 pr_info("%11s ", "Min Time"); 1131 pr_info("%11s ", "Max Time"); 1132 pr_info("%16s ", "Avg time"); 1133 pr_info("\n\n"); 1134 1135 for (nd = rb_first_cached(&kvm_hists.hists.entries); nd; nd = rb_next(nd)) { 1136 struct hist_entry *he; 1137 u64 ecount, etime, max, min; 1138 1139 he = rb_entry(nd, struct hist_entry, rb_node); 1140 if (he->filtered) 1141 continue; 1142 1143 event = container_of(he, struct kvm_event, he); 1144 ecount = get_event_count(event, vcpu); 1145 etime = get_event_time(event, vcpu); 1146 max = get_event_max(event, vcpu); 1147 min = get_event_min(event, vcpu); 1148 1149 kvm->events_ops->decode_key(kvm, &event->key, decode); 1150 pr_info("%*s ", KVM_EVENT_NAME_LEN, decode); 1151 pr_info("%10llu ", (unsigned long long)ecount); 1152 pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100); 1153 pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100); 1154 pr_info("%9.2fus ", (double)min / NSEC_PER_USEC); 1155 pr_info("%9.2fus ", (double)max / NSEC_PER_USEC); 1156 pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount / NSEC_PER_USEC, 1157 kvm_event_rel_stddev(vcpu, event)); 1158 pr_info("\n"); 1159 } 1160 1161 pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n", 1162 kvm->total_count, kvm->total_time / (double)NSEC_PER_USEC); 1163 1164 if (kvm->lost_events) 1165 pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events); 1166 } 1167 1168 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 1169 static int process_lost_event(const struct perf_tool *tool, 1170 union perf_event *event __maybe_unused, 1171 struct perf_sample *sample __maybe_unused, 1172 struct machine *machine __maybe_unused) 1173 { 1174 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool); 1175 1176 kvm->lost_events++; 1177 return 0; 1178 } 1179 #endif 1180 1181 static bool skip_sample(struct perf_kvm_stat *kvm, 1182 struct perf_sample *sample) 1183 { 1184 if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL) 1185 return true; 1186 1187 return false; 1188 } 1189 1190 static int process_sample_event(const struct perf_tool *tool, 1191 union perf_event *event, 1192 struct perf_sample *sample, 1193 struct evsel *evsel, 1194 struct machine *machine) 1195 { 1196 int err = 0; 1197 struct thread *thread; 1198 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, 1199 tool); 1200 1201 if (skip_sample(kvm, sample)) 1202 return 0; 1203 1204 if (machine__resolve(machine, &kvm->al, sample) < 0) { 1205 pr_warning("Fail to resolve address location, skip sample.\n"); 1206 return 0; 1207 } 1208 1209 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 1210 if (thread == NULL) { 1211 pr_debug("problem processing %d event, skipping it.\n", 1212 event->header.type); 1213 return -1; 1214 } 1215 1216 if (!handle_kvm_event(kvm, thread, evsel, sample)) 1217 err = -1; 1218 1219 thread__put(thread); 1220 return err; 1221 } 1222 1223 static int cpu_isa_config(struct perf_kvm_stat *kvm) 1224 { 1225 char buf[128], *cpuid; 1226 int err; 1227 1228 if (kvm->live) { 1229 struct perf_cpu cpu = {-1}; 1230 1231 err = get_cpuid(buf, sizeof(buf), cpu); 1232 if (err != 0) { 1233 pr_err("Failed to look up CPU type: %s\n", 1234 str_error_r(err, buf, sizeof(buf))); 1235 return -err; 1236 } 1237 cpuid = buf; 1238 } else 1239 cpuid = kvm->session->header.env.cpuid; 1240 1241 if (!cpuid) { 1242 pr_err("Failed to look up CPU type\n"); 1243 return -EINVAL; 1244 } 1245 1246 err = cpu_isa_init(kvm, cpuid); 1247 if (err == -ENOTSUP) 1248 pr_err("CPU %s is not supported.\n", cpuid); 1249 1250 return err; 1251 } 1252 1253 static bool verify_vcpu(int vcpu) 1254 { 1255 if (vcpu != -1 && vcpu < 0) { 1256 pr_err("Invalid vcpu:%d.\n", vcpu); 1257 return false; 1258 } 1259 1260 return true; 1261 } 1262 1263 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 1264 /* keeping the max events to a modest level to keep 1265 * the processing of samples per mmap smooth. 1266 */ 1267 #define PERF_KVM__MAX_EVENTS_PER_MMAP 25 1268 1269 static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx, 1270 u64 *mmap_time) 1271 { 1272 struct evlist *evlist = kvm->evlist; 1273 union perf_event *event; 1274 struct mmap *md; 1275 u64 timestamp; 1276 s64 n = 0; 1277 int err; 1278 1279 *mmap_time = ULLONG_MAX; 1280 md = &evlist->mmap[idx]; 1281 err = perf_mmap__read_init(&md->core); 1282 if (err < 0) 1283 return (err == -EAGAIN) ? 0 : -1; 1284 1285 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 1286 err = evlist__parse_sample_timestamp(evlist, event, ×tamp); 1287 if (err) { 1288 perf_mmap__consume(&md->core); 1289 pr_err("Failed to parse sample\n"); 1290 return -1; 1291 } 1292 1293 err = perf_session__queue_event(kvm->session, event, timestamp, 0, NULL); 1294 /* 1295 * FIXME: Here we can't consume the event, as perf_session__queue_event will 1296 * point to it, and it'll get possibly overwritten by the kernel. 1297 */ 1298 perf_mmap__consume(&md->core); 1299 1300 if (err) { 1301 pr_err("Failed to enqueue sample: %d\n", err); 1302 return -1; 1303 } 1304 1305 /* save time stamp of our first sample for this mmap */ 1306 if (n == 0) 1307 *mmap_time = timestamp; 1308 1309 /* limit events per mmap handled all at once */ 1310 n++; 1311 if (n == PERF_KVM__MAX_EVENTS_PER_MMAP) 1312 break; 1313 } 1314 1315 perf_mmap__read_done(&md->core); 1316 return n; 1317 } 1318 1319 static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm) 1320 { 1321 int i, err, throttled = 0; 1322 s64 n, ntotal = 0; 1323 u64 flush_time = ULLONG_MAX, mmap_time; 1324 1325 for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) { 1326 n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time); 1327 if (n < 0) 1328 return -1; 1329 1330 /* flush time is going to be the minimum of all the individual 1331 * mmap times. Essentially, we flush all the samples queued up 1332 * from the last pass under our minimal start time -- that leaves 1333 * a very small race for samples to come in with a lower timestamp. 1334 * The ioctl to return the perf_clock timestamp should close the 1335 * race entirely. 1336 */ 1337 if (mmap_time < flush_time) 1338 flush_time = mmap_time; 1339 1340 ntotal += n; 1341 if (n == PERF_KVM__MAX_EVENTS_PER_MMAP) 1342 throttled = 1; 1343 } 1344 1345 /* flush queue after each round in which we processed events */ 1346 if (ntotal) { 1347 struct ordered_events *oe = &kvm->session->ordered_events; 1348 1349 oe->next_flush = flush_time; 1350 err = ordered_events__flush(oe, OE_FLUSH__ROUND); 1351 if (err) { 1352 if (kvm->lost_events) 1353 pr_info("\nLost events: %" PRIu64 "\n\n", 1354 kvm->lost_events); 1355 return err; 1356 } 1357 } 1358 1359 return throttled; 1360 } 1361 1362 static volatile int done; 1363 1364 static void sig_handler(int sig __maybe_unused) 1365 { 1366 done = 1; 1367 } 1368 1369 static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm) 1370 { 1371 struct itimerspec new_value; 1372 int rc = -1; 1373 1374 kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); 1375 if (kvm->timerfd < 0) { 1376 pr_err("timerfd_create failed\n"); 1377 goto out; 1378 } 1379 1380 new_value.it_value.tv_sec = kvm->display_time; 1381 new_value.it_value.tv_nsec = 0; 1382 new_value.it_interval.tv_sec = kvm->display_time; 1383 new_value.it_interval.tv_nsec = 0; 1384 1385 if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) { 1386 pr_err("timerfd_settime failed: %d\n", errno); 1387 close(kvm->timerfd); 1388 goto out; 1389 } 1390 1391 rc = 0; 1392 out: 1393 return rc; 1394 } 1395 1396 static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm) 1397 { 1398 uint64_t c; 1399 int rc; 1400 1401 rc = read(kvm->timerfd, &c, sizeof(uint64_t)); 1402 if (rc < 0) { 1403 if (errno == EAGAIN) 1404 return 0; 1405 1406 pr_err("Failed to read timer fd: %d\n", errno); 1407 return -1; 1408 } 1409 1410 if (rc != sizeof(uint64_t)) { 1411 pr_err("Error reading timer fd - invalid size returned\n"); 1412 return -1; 1413 } 1414 1415 if (c != 1) 1416 pr_debug("Missed timer beats: %" PRIu64 "\n", c-1); 1417 1418 /* update display */ 1419 sort_result(kvm); 1420 print_result(kvm); 1421 1422 /* Reset sort list to "ev_name" */ 1423 kvm_hists__reinit(NULL, "ev_name"); 1424 1425 /* reset counts */ 1426 clear_events_cache_stats(); 1427 kvm->total_count = 0; 1428 kvm->total_time = 0; 1429 kvm->lost_events = 0; 1430 1431 return 0; 1432 } 1433 1434 static int fd_set_nonblock(int fd) 1435 { 1436 long arg = 0; 1437 1438 arg = fcntl(fd, F_GETFL); 1439 if (arg < 0) { 1440 pr_err("Failed to get current flags for fd %d\n", fd); 1441 return -1; 1442 } 1443 1444 if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) { 1445 pr_err("Failed to set non-block option on fd %d\n", fd); 1446 return -1; 1447 } 1448 1449 return 0; 1450 } 1451 1452 static int perf_kvm__handle_stdin(void) 1453 { 1454 int c; 1455 1456 c = getc(stdin); 1457 if (c == 'q') 1458 return 1; 1459 1460 return 0; 1461 } 1462 1463 static int kvm_events_live_report(struct perf_kvm_stat *kvm) 1464 { 1465 int nr_stdin, ret, err = -EINVAL; 1466 struct termios save; 1467 1468 /* live flag must be set first */ 1469 kvm->live = true; 1470 1471 ret = cpu_isa_config(kvm); 1472 if (ret < 0) 1473 return ret; 1474 1475 if (!verify_vcpu(kvm->trace_vcpu) || 1476 !is_valid_key(kvm) || 1477 !register_kvm_events_ops(kvm)) { 1478 goto out; 1479 } 1480 1481 set_term_quiet_input(&save); 1482 1483 kvm_hists__init(); 1484 1485 signal(SIGINT, sig_handler); 1486 signal(SIGTERM, sig_handler); 1487 1488 /* add timer fd */ 1489 if (perf_kvm__timerfd_create(kvm) < 0) { 1490 err = -1; 1491 goto out; 1492 } 1493 1494 if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0) 1495 goto out; 1496 1497 nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin)); 1498 if (nr_stdin < 0) 1499 goto out; 1500 1501 if (fd_set_nonblock(fileno(stdin)) != 0) 1502 goto out; 1503 1504 /* everything is good - enable the events and process */ 1505 evlist__enable(kvm->evlist); 1506 1507 while (!done) { 1508 struct fdarray *fda = &kvm->evlist->core.pollfd; 1509 int rc; 1510 1511 rc = perf_kvm__mmap_read(kvm); 1512 if (rc < 0) 1513 break; 1514 1515 err = perf_kvm__handle_timerfd(kvm); 1516 if (err) 1517 goto out; 1518 1519 if (fda->entries[nr_stdin].revents & POLLIN) 1520 done = perf_kvm__handle_stdin(); 1521 1522 if (!rc && !done) 1523 err = evlist__poll(kvm->evlist, 100); 1524 } 1525 1526 evlist__disable(kvm->evlist); 1527 1528 if (err == 0) { 1529 sort_result(kvm); 1530 print_result(kvm); 1531 } 1532 1533 out: 1534 hists__delete_entries(&kvm_hists.hists); 1535 1536 if (kvm->timerfd >= 0) 1537 close(kvm->timerfd); 1538 1539 tcsetattr(0, TCSAFLUSH, &save); 1540 return err; 1541 } 1542 1543 static int kvm_live_open_events(struct perf_kvm_stat *kvm) 1544 { 1545 int err, rc = -1; 1546 struct evsel *pos; 1547 struct evlist *evlist = kvm->evlist; 1548 char sbuf[STRERR_BUFSIZE]; 1549 1550 evlist__config(evlist, &kvm->opts, NULL); 1551 1552 /* 1553 * Note: exclude_{guest,host} do not apply here. 1554 * This command processes KVM tracepoints from host only 1555 */ 1556 evlist__for_each_entry(evlist, pos) { 1557 struct perf_event_attr *attr = &pos->core.attr; 1558 1559 /* make sure these *are* set */ 1560 evsel__set_sample_bit(pos, TID); 1561 evsel__set_sample_bit(pos, TIME); 1562 evsel__set_sample_bit(pos, CPU); 1563 evsel__set_sample_bit(pos, RAW); 1564 /* make sure these are *not*; want as small a sample as possible */ 1565 evsel__reset_sample_bit(pos, PERIOD); 1566 evsel__reset_sample_bit(pos, IP); 1567 evsel__reset_sample_bit(pos, CALLCHAIN); 1568 evsel__reset_sample_bit(pos, ADDR); 1569 evsel__reset_sample_bit(pos, READ); 1570 attr->mmap = 0; 1571 attr->comm = 0; 1572 attr->task = 0; 1573 1574 attr->sample_period = 1; 1575 1576 attr->watermark = 0; 1577 attr->wakeup_events = 1000; 1578 1579 /* will enable all once we are ready */ 1580 attr->disabled = 1; 1581 } 1582 1583 err = evlist__open(evlist); 1584 if (err < 0) { 1585 printf("Couldn't create the events: %s\n", 1586 str_error_r(errno, sbuf, sizeof(sbuf))); 1587 goto out; 1588 } 1589 1590 if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) { 1591 ui__error("Failed to mmap the events: %s\n", 1592 str_error_r(errno, sbuf, sizeof(sbuf))); 1593 evlist__close(evlist); 1594 goto out; 1595 } 1596 1597 rc = 0; 1598 1599 out: 1600 return rc; 1601 } 1602 #endif 1603 1604 static int read_events(struct perf_kvm_stat *kvm) 1605 { 1606 int ret; 1607 1608 struct perf_data file = { 1609 .path = kvm->file_name, 1610 .mode = PERF_DATA_MODE_READ, 1611 .force = kvm->force, 1612 }; 1613 1614 perf_tool__init(&kvm->tool, /*ordered_events=*/true); 1615 kvm->tool.sample = process_sample_event; 1616 kvm->tool.comm = perf_event__process_comm; 1617 kvm->tool.namespaces = perf_event__process_namespaces; 1618 1619 kvm->session = perf_session__new(&file, &kvm->tool); 1620 if (IS_ERR(kvm->session)) { 1621 pr_err("Initializing perf session failed\n"); 1622 return PTR_ERR(kvm->session); 1623 } 1624 1625 symbol__init(&kvm->session->header.env); 1626 1627 if (!perf_session__has_traces(kvm->session, "kvm record")) { 1628 ret = -EINVAL; 1629 goto out_delete; 1630 } 1631 1632 /* 1633 * Do not use 'isa' recorded in kvm_exit tracepoint since it is not 1634 * traced in the old kernel. 1635 */ 1636 ret = cpu_isa_config(kvm); 1637 if (ret < 0) 1638 goto out_delete; 1639 1640 ret = perf_session__process_events(kvm->session); 1641 1642 out_delete: 1643 perf_session__delete(kvm->session); 1644 return ret; 1645 } 1646 1647 static int parse_target_str(struct perf_kvm_stat *kvm) 1648 { 1649 if (kvm->opts.target.pid) { 1650 kvm->pid_list = intlist__new(kvm->opts.target.pid); 1651 if (kvm->pid_list == NULL) { 1652 pr_err("Error parsing process id string\n"); 1653 return -EINVAL; 1654 } 1655 } 1656 1657 return 0; 1658 } 1659 1660 static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm) 1661 { 1662 int ret = -EINVAL; 1663 int vcpu = kvm->trace_vcpu; 1664 1665 if (parse_target_str(kvm) != 0) 1666 goto exit; 1667 1668 if (!verify_vcpu(vcpu)) 1669 goto exit; 1670 1671 if (!is_valid_key(kvm)) 1672 goto exit; 1673 1674 if (!register_kvm_events_ops(kvm)) 1675 goto exit; 1676 1677 if (kvm->use_stdio) { 1678 use_browser = 0; 1679 setup_pager(); 1680 } else { 1681 use_browser = 1; 1682 } 1683 1684 setup_browser(false); 1685 1686 kvm_hists__init(); 1687 1688 ret = read_events(kvm); 1689 if (ret) 1690 goto exit; 1691 1692 sort_result(kvm); 1693 kvm_display(kvm); 1694 1695 exit: 1696 hists__delete_entries(&kvm_hists.hists); 1697 return ret; 1698 } 1699 1700 #define STRDUP_FAIL_EXIT(s) \ 1701 ({ char *_p; \ 1702 _p = strdup(s); \ 1703 if (!_p) \ 1704 return -ENOMEM; \ 1705 _p; \ 1706 }) 1707 1708 int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused) 1709 { 1710 return 0; 1711 } 1712 1713 static int 1714 kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv) 1715 { 1716 unsigned int rec_argc, i, j, events_tp_size; 1717 const char **rec_argv; 1718 const char * const record_args[] = { 1719 "record", 1720 "-R", 1721 "-m", "1024", 1722 "-c", "1", 1723 }; 1724 const char * const kvm_stat_record_usage[] = { 1725 "perf kvm stat record [<options>]", 1726 NULL 1727 }; 1728 const char * const *events_tp; 1729 int ret; 1730 1731 events_tp_size = 0; 1732 ret = setup_kvm_events_tp(kvm); 1733 if (ret < 0) { 1734 pr_err("Unable to setup the kvm tracepoints\n"); 1735 return ret; 1736 } 1737 1738 for (events_tp = kvm_events_tp; *events_tp; events_tp++) 1739 events_tp_size++; 1740 1741 rec_argc = ARRAY_SIZE(record_args) + argc + 2 + 1742 2 * events_tp_size; 1743 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1744 1745 if (rec_argv == NULL) 1746 return -ENOMEM; 1747 1748 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1749 rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]); 1750 1751 for (j = 0; j < events_tp_size; j++) { 1752 rec_argv[i++] = "-e"; 1753 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]); 1754 } 1755 1756 rec_argv[i++] = STRDUP_FAIL_EXIT("-o"); 1757 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name); 1758 1759 for (j = 1; j < (unsigned int)argc; j++, i++) 1760 rec_argv[i] = argv[j]; 1761 1762 set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN); 1763 set_option_flag(record_options, 0, "filter", PARSE_OPT_HIDDEN); 1764 set_option_flag(record_options, 'R', "raw-samples", PARSE_OPT_HIDDEN); 1765 1766 set_option_flag(record_options, 'F', "freq", PARSE_OPT_DISABLED); 1767 set_option_flag(record_options, 0, "group", PARSE_OPT_DISABLED); 1768 set_option_flag(record_options, 'g', NULL, PARSE_OPT_DISABLED); 1769 set_option_flag(record_options, 0, "call-graph", PARSE_OPT_DISABLED); 1770 set_option_flag(record_options, 'd', "data", PARSE_OPT_DISABLED); 1771 set_option_flag(record_options, 'T', "timestamp", PARSE_OPT_DISABLED); 1772 set_option_flag(record_options, 'P', "period", PARSE_OPT_DISABLED); 1773 set_option_flag(record_options, 'n', "no-samples", PARSE_OPT_DISABLED); 1774 set_option_flag(record_options, 'N', "no-buildid-cache", PARSE_OPT_DISABLED); 1775 set_option_flag(record_options, 'B', "no-buildid", PARSE_OPT_DISABLED); 1776 set_option_flag(record_options, 'G', "cgroup", PARSE_OPT_DISABLED); 1777 set_option_flag(record_options, 'b', "branch-any", PARSE_OPT_DISABLED); 1778 set_option_flag(record_options, 'j', "branch-filter", PARSE_OPT_DISABLED); 1779 set_option_flag(record_options, 'W', "weight", PARSE_OPT_DISABLED); 1780 set_option_flag(record_options, 0, "transaction", PARSE_OPT_DISABLED); 1781 1782 record_usage = kvm_stat_record_usage; 1783 return cmd_record(i, rec_argv); 1784 } 1785 1786 static int 1787 kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv) 1788 { 1789 const struct option kvm_events_report_options[] = { 1790 OPT_STRING(0, "event", &kvm->report_event, "report event", 1791 "event for reporting: vmexit, " 1792 "mmio (x86 only), ioport (x86 only)"), 1793 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu, 1794 "vcpu id to report"), 1795 OPT_STRING('k', "key", &kvm->sort_key, "sort-key", 1796 "key for sorting: sample(sort by samples number)" 1797 " time (sort by avg time)"), 1798 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid", 1799 "analyze events only for given process id(s)"), 1800 OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"), 1801 OPT_BOOLEAN(0, "stdio", &kvm->use_stdio, "use the stdio interface"), 1802 OPT_END() 1803 }; 1804 1805 const char * const kvm_events_report_usage[] = { 1806 "perf kvm stat report [<options>]", 1807 NULL 1808 }; 1809 1810 if (argc) { 1811 argc = parse_options(argc, argv, 1812 kvm_events_report_options, 1813 kvm_events_report_usage, 0); 1814 if (argc) 1815 usage_with_options(kvm_events_report_usage, 1816 kvm_events_report_options); 1817 } 1818 1819 #ifndef HAVE_SLANG_SUPPORT 1820 kvm->use_stdio = true; 1821 #endif 1822 1823 if (!kvm->opts.target.pid) 1824 kvm->opts.target.system_wide = true; 1825 1826 return kvm_events_report_vcpu(kvm); 1827 } 1828 1829 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 1830 static struct evlist *kvm_live_event_list(void) 1831 { 1832 struct evlist *evlist; 1833 char *tp, *name, *sys; 1834 int err = -1; 1835 const char * const *events_tp; 1836 1837 evlist = evlist__new(); 1838 if (evlist == NULL) 1839 return NULL; 1840 1841 for (events_tp = kvm_events_tp; *events_tp; events_tp++) { 1842 1843 tp = strdup(*events_tp); 1844 if (tp == NULL) 1845 goto out; 1846 1847 /* split tracepoint into subsystem and name */ 1848 sys = tp; 1849 name = strchr(tp, ':'); 1850 if (name == NULL) { 1851 pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n", 1852 *events_tp); 1853 free(tp); 1854 goto out; 1855 } 1856 *name = '\0'; 1857 name++; 1858 1859 if (evlist__add_newtp(evlist, sys, name, NULL)) { 1860 pr_err("Failed to add %s tracepoint to the list\n", *events_tp); 1861 free(tp); 1862 goto out; 1863 } 1864 1865 free(tp); 1866 } 1867 1868 err = 0; 1869 1870 out: 1871 if (err) { 1872 evlist__delete(evlist); 1873 evlist = NULL; 1874 } 1875 1876 return evlist; 1877 } 1878 1879 static int kvm_events_live(struct perf_kvm_stat *kvm, 1880 int argc, const char **argv) 1881 { 1882 char errbuf[BUFSIZ]; 1883 int err; 1884 1885 const struct option live_options[] = { 1886 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid", 1887 "record events on existing process id"), 1888 OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages", 1889 "number of mmap data pages", evlist__parse_mmap_pages), 1890 OPT_INCR('v', "verbose", &verbose, 1891 "be more verbose (show counter open errors, etc)"), 1892 OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide, 1893 "system-wide collection from all CPUs"), 1894 OPT_UINTEGER('d', "display", &kvm->display_time, 1895 "time in seconds between display updates"), 1896 OPT_STRING(0, "event", &kvm->report_event, "report event", 1897 "event for reporting: " 1898 "vmexit, mmio (x86 only), ioport (x86 only)"), 1899 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu, 1900 "vcpu id to report"), 1901 OPT_STRING('k', "key", &kvm->sort_key, "sort-key", 1902 "key for sorting: sample(sort by samples number)" 1903 " time (sort by avg time)"), 1904 OPT_U64(0, "duration", &kvm->duration, 1905 "show events other than" 1906 " HLT (x86 only) or Wait state (s390 only)" 1907 " that take longer than duration usecs"), 1908 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 1909 "per thread proc mmap processing timeout in ms"), 1910 OPT_END() 1911 }; 1912 const char * const live_usage[] = { 1913 "perf kvm stat live [<options>]", 1914 NULL 1915 }; 1916 struct perf_data data = { 1917 .mode = PERF_DATA_MODE_WRITE, 1918 }; 1919 1920 1921 /* event handling */ 1922 perf_tool__init(&kvm->tool, /*ordered_events=*/true); 1923 kvm->tool.sample = process_sample_event; 1924 kvm->tool.comm = perf_event__process_comm; 1925 kvm->tool.exit = perf_event__process_exit; 1926 kvm->tool.fork = perf_event__process_fork; 1927 kvm->tool.lost = process_lost_event; 1928 kvm->tool.namespaces = perf_event__process_namespaces; 1929 1930 /* set defaults */ 1931 kvm->display_time = 1; 1932 kvm->opts.user_interval = 1; 1933 kvm->opts.mmap_pages = 512; 1934 kvm->opts.target.uses_mmap = false; 1935 kvm->opts.target.uid_str = NULL; 1936 kvm->opts.target.uid = UINT_MAX; 1937 1938 symbol__init(NULL); 1939 disable_buildid_cache(); 1940 1941 use_browser = 0; 1942 1943 if (argc) { 1944 argc = parse_options(argc, argv, live_options, 1945 live_usage, 0); 1946 if (argc) 1947 usage_with_options(live_usage, live_options); 1948 } 1949 1950 kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */ 1951 1952 /* 1953 * target related setups 1954 */ 1955 err = target__validate(&kvm->opts.target); 1956 if (err) { 1957 target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ); 1958 ui__warning("%s", errbuf); 1959 } 1960 1961 if (target__none(&kvm->opts.target)) 1962 kvm->opts.target.system_wide = true; 1963 1964 1965 /* 1966 * generate the event list 1967 */ 1968 err = setup_kvm_events_tp(kvm); 1969 if (err < 0) { 1970 pr_err("Unable to setup the kvm tracepoints\n"); 1971 return err; 1972 } 1973 1974 kvm->evlist = kvm_live_event_list(); 1975 if (kvm->evlist == NULL) { 1976 err = -1; 1977 goto out; 1978 } 1979 1980 if (evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0) 1981 usage_with_options(live_usage, live_options); 1982 1983 /* 1984 * perf session 1985 */ 1986 kvm->session = perf_session__new(&data, &kvm->tool); 1987 if (IS_ERR(kvm->session)) { 1988 err = PTR_ERR(kvm->session); 1989 goto out; 1990 } 1991 kvm->session->evlist = kvm->evlist; 1992 perf_session__set_id_hdr_size(kvm->session); 1993 ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true); 1994 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target, 1995 kvm->evlist->core.threads, true, false, 1); 1996 err = kvm_live_open_events(kvm); 1997 if (err) 1998 goto out; 1999 2000 err = kvm_events_live_report(kvm); 2001 2002 out: 2003 perf_session__delete(kvm->session); 2004 kvm->session = NULL; 2005 evlist__delete(kvm->evlist); 2006 2007 return err; 2008 } 2009 #endif 2010 2011 static void print_kvm_stat_usage(void) 2012 { 2013 printf("Usage: perf kvm stat <command>\n\n"); 2014 2015 printf("# Available commands:\n"); 2016 printf("\trecord: record kvm events\n"); 2017 printf("\treport: report statistical data of kvm events\n"); 2018 printf("\tlive: live reporting of statistical data of kvm events\n"); 2019 2020 printf("\nOtherwise, it is the alias of 'perf stat':\n"); 2021 } 2022 2023 static int kvm_cmd_stat(const char *file_name, int argc, const char **argv) 2024 { 2025 struct perf_kvm_stat kvm = { 2026 .file_name = file_name, 2027 2028 .trace_vcpu = -1, 2029 .report_event = "vmexit", 2030 .sort_key = "sample", 2031 2032 }; 2033 2034 if (argc == 1) { 2035 print_kvm_stat_usage(); 2036 goto perf_stat; 2037 } 2038 2039 if (strlen(argv[1]) > 2 && strstarts("record", argv[1])) 2040 return kvm_events_record(&kvm, argc - 1, argv + 1); 2041 2042 if (strlen(argv[1]) > 2 && strstarts("report", argv[1])) 2043 return kvm_events_report(&kvm, argc - 1 , argv + 1); 2044 2045 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 2046 if (!strncmp(argv[1], "live", 4)) 2047 return kvm_events_live(&kvm, argc - 1 , argv + 1); 2048 #endif 2049 2050 perf_stat: 2051 return cmd_stat(argc, argv); 2052 } 2053 #endif /* HAVE_KVM_STAT_SUPPORT */ 2054 2055 int __weak kvm_add_default_arch_event(int *argc __maybe_unused, 2056 const char **argv __maybe_unused) 2057 { 2058 return 0; 2059 } 2060 2061 static int __cmd_record(const char *file_name, int argc, const char **argv) 2062 { 2063 int rec_argc, i = 0, j, ret; 2064 const char **rec_argv; 2065 2066 ret = kvm_add_default_arch_event(&argc, argv); 2067 if (ret) 2068 return -EINVAL; 2069 2070 rec_argc = argc + 2; 2071 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 2072 rec_argv[i++] = strdup("record"); 2073 rec_argv[i++] = strdup("-o"); 2074 rec_argv[i++] = strdup(file_name); 2075 for (j = 1; j < argc; j++, i++) 2076 rec_argv[i] = argv[j]; 2077 2078 BUG_ON(i != rec_argc); 2079 2080 return cmd_record(i, rec_argv); 2081 } 2082 2083 static int __cmd_report(const char *file_name, int argc, const char **argv) 2084 { 2085 int rec_argc, i = 0, j; 2086 const char **rec_argv; 2087 2088 rec_argc = argc + 2; 2089 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 2090 rec_argv[i++] = strdup("report"); 2091 rec_argv[i++] = strdup("-i"); 2092 rec_argv[i++] = strdup(file_name); 2093 for (j = 1; j < argc; j++, i++) 2094 rec_argv[i] = argv[j]; 2095 2096 BUG_ON(i != rec_argc); 2097 2098 return cmd_report(i, rec_argv); 2099 } 2100 2101 static int 2102 __cmd_buildid_list(const char *file_name, int argc, const char **argv) 2103 { 2104 int rec_argc, i = 0, j; 2105 const char **rec_argv; 2106 2107 rec_argc = argc + 2; 2108 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 2109 rec_argv[i++] = strdup("buildid-list"); 2110 rec_argv[i++] = strdup("-i"); 2111 rec_argv[i++] = strdup(file_name); 2112 for (j = 1; j < argc; j++, i++) 2113 rec_argv[i] = argv[j]; 2114 2115 BUG_ON(i != rec_argc); 2116 2117 return cmd_buildid_list(i, rec_argv); 2118 } 2119 2120 int cmd_kvm(int argc, const char **argv) 2121 { 2122 const char *file_name = NULL; 2123 const struct option kvm_options[] = { 2124 OPT_STRING('i', "input", &file_name, "file", 2125 "Input file name"), 2126 OPT_STRING('o', "output", &file_name, "file", 2127 "Output file name"), 2128 OPT_BOOLEAN(0, "guest", &perf_guest, 2129 "Collect guest os data"), 2130 OPT_BOOLEAN(0, "host", &perf_host, 2131 "Collect host os data"), 2132 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory", 2133 "guest mount directory under which every guest os" 2134 " instance has a subdir"), 2135 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name, 2136 "file", "file saving guest os vmlinux"), 2137 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms, 2138 "file", "file saving guest os /proc/kallsyms"), 2139 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules, 2140 "file", "file saving guest os /proc/modules"), 2141 OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code, 2142 "Guest code can be found in hypervisor process"), 2143 OPT_INCR('v', "verbose", &verbose, 2144 "be more verbose (show counter open errors, etc)"), 2145 OPT_END() 2146 }; 2147 2148 const char *const kvm_subcommands[] = { "top", "record", "report", "diff", 2149 "buildid-list", "stat", NULL }; 2150 const char *kvm_usage[] = { NULL, NULL }; 2151 2152 exclude_GH_default = true; 2153 perf_host = 0; 2154 perf_guest = 1; 2155 2156 argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage, 2157 PARSE_OPT_STOP_AT_NON_OPTION); 2158 if (!argc) 2159 usage_with_options(kvm_usage, kvm_options); 2160 2161 if (!perf_host) 2162 perf_guest = 1; 2163 2164 if (!file_name) { 2165 file_name = get_filename_for_perf_kvm(); 2166 2167 if (!file_name) { 2168 pr_err("Failed to allocate memory for filename\n"); 2169 return -ENOMEM; 2170 } 2171 } 2172 2173 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) 2174 return __cmd_record(file_name, argc, argv); 2175 else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) 2176 return __cmd_report(file_name, argc, argv); 2177 else if (strlen(argv[0]) > 2 && strstarts("diff", argv[0])) 2178 return cmd_diff(argc, argv); 2179 else if (!strcmp(argv[0], "top")) 2180 return cmd_top(argc, argv); 2181 else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0])) 2182 return __cmd_buildid_list(file_name, argc, argv); 2183 #if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 2184 else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0])) 2185 return kvm_cmd_stat(file_name, argc, argv); 2186 #endif 2187 else 2188 usage_with_options(kvm_usage, kvm_options); 2189 2190 /* free usage string allocated by parse_options_subcommand */ 2191 free((void *)kvm_usage[0]); 2192 2193 return 0; 2194 } 2195