1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf.h"
4
5 #include <dwarf-regs.h>
6 #include "util/build-id.h"
7 #include "util/evsel.h"
8 #include "util/evlist.h"
9 #include "util/mmap.h"
10 #include "util/term.h"
11 #include "util/symbol.h"
12 #include "util/thread.h"
13 #include "util/header.h"
14 #include "util/session.h"
15 #include "util/intlist.h"
16 #include <subcmd/pager.h>
17 #include <subcmd/parse-options.h>
18 #include "util/trace-event.h"
19 #include "util/debug.h"
20 #include "util/tool.h"
21 #include "util/stat.h"
22 #include "util/synthetic-events.h"
23 #include "util/top.h"
24 #include "util/data.h"
25 #include "util/ordered-events.h"
26 #include "util/kvm-stat.h"
27 #include "util/util.h"
28 #include "ui/browsers/hists.h"
29 #include "ui/progress.h"
30 #include "ui/ui.h"
31 #include "util/string2.h"
32
33 #include <sys/prctl.h>
34 #ifdef HAVE_TIMERFD_SUPPORT
35 #include <sys/timerfd.h>
36 #endif
37 #include <sys/time.h>
38 #include <sys/types.h>
39 #include <sys/stat.h>
40 #include <fcntl.h>
41
42 #include <linux/err.h>
43 #include <linux/kernel.h>
44 #include <linux/string.h>
45 #include <linux/time64.h>
46 #include <linux/zalloc.h>
47 #include <errno.h>
48 #include <inttypes.h>
49 #include <poll.h>
50 #include <termios.h>
51 #include <semaphore.h>
52 #include <signal.h>
53 #include <math.h>
54 #include <perf/mmap.h>
55
56 #if defined(HAVE_LIBTRACEEVENT)
57 #define GET_EVENT_KEY(func, field) \
58 static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
59 { \
60 if (vcpu == -1) \
61 return event->total.field; \
62 \
63 if (vcpu >= event->max_vcpu) \
64 return 0; \
65 \
66 return event->vcpu[vcpu].field; \
67 }
68
69 #define COMPARE_EVENT_KEY(func, field) \
70 GET_EVENT_KEY(func, field) \
71 static int64_t cmp_event_ ## func(struct kvm_event *one, \
72 struct kvm_event *two, int vcpu) \
73 { \
74 return get_event_ ##func(one, vcpu) - \
75 get_event_ ##func(two, vcpu); \
76 }
77
78 COMPARE_EVENT_KEY(time, time);
79 COMPARE_EVENT_KEY(max, stats.max);
80 COMPARE_EVENT_KEY(min, stats.min);
81 COMPARE_EVENT_KEY(count, stats.n);
82 COMPARE_EVENT_KEY(mean, stats.mean);
83
84 struct kvm_hists {
85 struct hists hists;
86 struct perf_hpp_list list;
87 };
88
89 struct kvm_dimension {
90 const char *name;
91 const char *header;
92 int width;
93 int64_t (*cmp)(struct perf_hpp_fmt *fmt, struct hist_entry *left,
94 struct hist_entry *right);
95 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
96 struct hist_entry *he);
97 };
98
99 struct kvm_fmt {
100 struct perf_hpp_fmt fmt;
101 struct kvm_dimension *dim;
102 };
103
104 static struct kvm_hists kvm_hists;
105
ev_name_cmp(struct perf_hpp_fmt * fmt __maybe_unused,struct hist_entry * left,struct hist_entry * right)106 static int64_t ev_name_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
107 struct hist_entry *left,
108 struct hist_entry *right)
109 {
110 /* Return opposite number for sorting in alphabetical order */
111 return -strcmp(left->kvm_info->name, right->kvm_info->name);
112 }
113
114 static int fmt_width(struct perf_hpp_fmt *fmt,
115 struct perf_hpp *hpp __maybe_unused,
116 struct hists *hists __maybe_unused);
117
ev_name_entry(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)118 static int ev_name_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
119 struct hist_entry *he)
120 {
121 int width = fmt_width(fmt, hpp, he->hists);
122
123 return scnprintf(hpp->buf, hpp->size, "%*s", width, he->kvm_info->name);
124 }
125
126 static struct kvm_dimension dim_event = {
127 .header = "Event name",
128 .name = "ev_name",
129 .cmp = ev_name_cmp,
130 .entry = ev_name_entry,
131 .width = 40,
132 };
133
134 #define EV_METRIC_CMP(metric) \
135 static int64_t ev_cmp_##metric(struct perf_hpp_fmt *fmt __maybe_unused, \
136 struct hist_entry *left, \
137 struct hist_entry *right) \
138 { \
139 struct kvm_event *event_left; \
140 struct kvm_event *event_right; \
141 struct perf_kvm_stat *perf_kvm; \
142 \
143 event_left = container_of(left, struct kvm_event, he); \
144 event_right = container_of(right, struct kvm_event, he); \
145 \
146 perf_kvm = event_left->perf_kvm; \
147 return cmp_event_##metric(event_left, event_right, \
148 perf_kvm->trace_vcpu); \
149 }
150
151 EV_METRIC_CMP(time)
152 EV_METRIC_CMP(count)
153 EV_METRIC_CMP(max)
154 EV_METRIC_CMP(min)
155 EV_METRIC_CMP(mean)
156
157 #define EV_METRIC_ENTRY(metric) \
158 static int ev_entry_##metric(struct perf_hpp_fmt *fmt, \
159 struct perf_hpp *hpp, \
160 struct hist_entry *he) \
161 { \
162 struct kvm_event *event; \
163 int width = fmt_width(fmt, hpp, he->hists); \
164 struct perf_kvm_stat *perf_kvm; \
165 \
166 event = container_of(he, struct kvm_event, he); \
167 perf_kvm = event->perf_kvm; \
168 return scnprintf(hpp->buf, hpp->size, "%*lu", width, \
169 get_event_##metric(event, perf_kvm->trace_vcpu)); \
170 }
171
172 EV_METRIC_ENTRY(time)
173 EV_METRIC_ENTRY(count)
174 EV_METRIC_ENTRY(max)
175 EV_METRIC_ENTRY(min)
176
177 static struct kvm_dimension dim_time = {
178 .header = "Time (ns)",
179 .name = "time",
180 .cmp = ev_cmp_time,
181 .entry = ev_entry_time,
182 .width = 12,
183 };
184
185 static struct kvm_dimension dim_count = {
186 .header = "Samples",
187 .name = "sample",
188 .cmp = ev_cmp_count,
189 .entry = ev_entry_count,
190 .width = 12,
191 };
192
193 static struct kvm_dimension dim_max_time = {
194 .header = "Max Time (ns)",
195 .name = "max_t",
196 .cmp = ev_cmp_max,
197 .entry = ev_entry_max,
198 .width = 14,
199 };
200
201 static struct kvm_dimension dim_min_time = {
202 .header = "Min Time (ns)",
203 .name = "min_t",
204 .cmp = ev_cmp_min,
205 .entry = ev_entry_min,
206 .width = 14,
207 };
208
ev_entry_mean(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)209 static int ev_entry_mean(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
210 struct hist_entry *he)
211 {
212 struct kvm_event *event;
213 int width = fmt_width(fmt, hpp, he->hists);
214 struct perf_kvm_stat *perf_kvm;
215
216 event = container_of(he, struct kvm_event, he);
217 perf_kvm = event->perf_kvm;
218 return scnprintf(hpp->buf, hpp->size, "%*lu", width,
219 get_event_mean(event, perf_kvm->trace_vcpu));
220 }
221
222 static struct kvm_dimension dim_mean_time = {
223 .header = "Mean Time (ns)",
224 .name = "mean_t",
225 .cmp = ev_cmp_mean,
226 .entry = ev_entry_mean,
227 .width = 14,
228 };
229
230 #define PERC_STR(__s, __v) \
231 ({ \
232 scnprintf(__s, sizeof(__s), "%.2F%%", __v); \
233 __s; \
234 })
235
percent(u64 st,u64 tot)236 static double percent(u64 st, u64 tot)
237 {
238 return tot ? 100. * (double) st / (double) tot : 0;
239 }
240
241 #define EV_METRIC_PERCENT(metric) \
242 static int ev_percent_##metric(struct hist_entry *he) \
243 { \
244 struct kvm_event *event; \
245 struct perf_kvm_stat *perf_kvm; \
246 \
247 event = container_of(he, struct kvm_event, he); \
248 perf_kvm = event->perf_kvm; \
249 \
250 return percent(get_event_##metric(event, perf_kvm->trace_vcpu), \
251 perf_kvm->total_##metric); \
252 }
253
254 EV_METRIC_PERCENT(time)
EV_METRIC_PERCENT(count)255 EV_METRIC_PERCENT(count)
256
257 static int ev_entry_time_precent(struct perf_hpp_fmt *fmt,
258 struct perf_hpp *hpp,
259 struct hist_entry *he)
260 {
261 int width = fmt_width(fmt, hpp, he->hists);
262 double per;
263 char buf[10];
264
265 per = ev_percent_time(he);
266 return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
267 }
268
269 static int64_t
ev_cmp_time_precent(struct perf_hpp_fmt * fmt __maybe_unused,struct hist_entry * left,struct hist_entry * right)270 ev_cmp_time_precent(struct perf_hpp_fmt *fmt __maybe_unused,
271 struct hist_entry *left, struct hist_entry *right)
272 {
273 double per_left;
274 double per_right;
275
276 per_left = ev_percent_time(left);
277 per_right = ev_percent_time(right);
278
279 return per_left - per_right;
280 }
281
282 static struct kvm_dimension dim_time_percent = {
283 .header = "Time%",
284 .name = "percent_time",
285 .cmp = ev_cmp_time_precent,
286 .entry = ev_entry_time_precent,
287 .width = 12,
288 };
289
ev_entry_count_precent(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hist_entry * he)290 static int ev_entry_count_precent(struct perf_hpp_fmt *fmt,
291 struct perf_hpp *hpp,
292 struct hist_entry *he)
293 {
294 int width = fmt_width(fmt, hpp, he->hists);
295 double per;
296 char buf[10];
297
298 per = ev_percent_count(he);
299 return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
300 }
301
302 static int64_t
ev_cmp_count_precent(struct perf_hpp_fmt * fmt __maybe_unused,struct hist_entry * left,struct hist_entry * right)303 ev_cmp_count_precent(struct perf_hpp_fmt *fmt __maybe_unused,
304 struct hist_entry *left, struct hist_entry *right)
305 {
306 double per_left;
307 double per_right;
308
309 per_left = ev_percent_count(left);
310 per_right = ev_percent_count(right);
311
312 return per_left - per_right;
313 }
314
315 static struct kvm_dimension dim_count_percent = {
316 .header = "Sample%",
317 .name = "percent_sample",
318 .cmp = ev_cmp_count_precent,
319 .entry = ev_entry_count_precent,
320 .width = 12,
321 };
322
323 static struct kvm_dimension *dimensions[] = {
324 &dim_event,
325 &dim_time,
326 &dim_time_percent,
327 &dim_count,
328 &dim_count_percent,
329 &dim_max_time,
330 &dim_min_time,
331 &dim_mean_time,
332 NULL,
333 };
334
fmt_width(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp __maybe_unused,struct hists * hists __maybe_unused)335 static int fmt_width(struct perf_hpp_fmt *fmt,
336 struct perf_hpp *hpp __maybe_unused,
337 struct hists *hists __maybe_unused)
338 {
339 struct kvm_fmt *kvm_fmt;
340
341 kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
342 return kvm_fmt->dim->width;
343 }
344
fmt_header(struct perf_hpp_fmt * fmt,struct perf_hpp * hpp,struct hists * hists,int line __maybe_unused,int * span __maybe_unused)345 static int fmt_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
346 struct hists *hists, int line __maybe_unused,
347 int *span __maybe_unused)
348 {
349 struct kvm_fmt *kvm_fmt;
350 struct kvm_dimension *dim;
351 int width = fmt_width(fmt, hpp, hists);
352
353 kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
354 dim = kvm_fmt->dim;
355
356 return scnprintf(hpp->buf, hpp->size, "%*s", width, dim->header);
357 }
358
fmt_equal(struct perf_hpp_fmt * a,struct perf_hpp_fmt * b)359 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
360 {
361 struct kvm_fmt *kvm_fmt_a = container_of(a, struct kvm_fmt, fmt);
362 struct kvm_fmt *kvm_fmt_b = container_of(b, struct kvm_fmt, fmt);
363
364 return kvm_fmt_a->dim == kvm_fmt_b->dim;
365 }
366
fmt_free(struct perf_hpp_fmt * fmt)367 static void fmt_free(struct perf_hpp_fmt *fmt)
368 {
369 struct kvm_fmt *kvm_fmt;
370
371 kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
372 free(kvm_fmt);
373 }
374
get_dimension(const char * name)375 static struct kvm_dimension *get_dimension(const char *name)
376 {
377 unsigned int i;
378
379 for (i = 0; dimensions[i] != NULL; i++) {
380 if (!strcmp(dimensions[i]->name, name))
381 return dimensions[i];
382 }
383
384 return NULL;
385 }
386
get_format(const char * name)387 static struct kvm_fmt *get_format(const char *name)
388 {
389 struct kvm_dimension *dim = get_dimension(name);
390 struct kvm_fmt *kvm_fmt;
391 struct perf_hpp_fmt *fmt;
392
393 if (!dim)
394 return NULL;
395
396 kvm_fmt = zalloc(sizeof(*kvm_fmt));
397 if (!kvm_fmt)
398 return NULL;
399
400 kvm_fmt->dim = dim;
401
402 fmt = &kvm_fmt->fmt;
403 INIT_LIST_HEAD(&fmt->list);
404 INIT_LIST_HEAD(&fmt->sort_list);
405 fmt->cmp = dim->cmp;
406 fmt->sort = dim->cmp;
407 fmt->color = NULL;
408 fmt->entry = dim->entry;
409 fmt->header = fmt_header;
410 fmt->width = fmt_width;
411 fmt->collapse = dim->cmp;
412 fmt->equal = fmt_equal;
413 fmt->free = fmt_free;
414
415 return kvm_fmt;
416 }
417
kvm_hists__init_output(struct perf_hpp_list * hpp_list,char * name)418 static int kvm_hists__init_output(struct perf_hpp_list *hpp_list, char *name)
419 {
420 struct kvm_fmt *kvm_fmt = get_format(name);
421
422 if (!kvm_fmt) {
423 pr_warning("Fail to find format for output field %s.\n", name);
424 return -EINVAL;
425 }
426
427 perf_hpp_list__column_register(hpp_list, &kvm_fmt->fmt);
428 return 0;
429 }
430
kvm_hists__init_sort(struct perf_hpp_list * hpp_list,char * name)431 static int kvm_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
432 {
433 struct kvm_fmt *kvm_fmt = get_format(name);
434
435 if (!kvm_fmt) {
436 pr_warning("Fail to find format for sorting %s.\n", name);
437 return -EINVAL;
438 }
439
440 perf_hpp_list__register_sort_field(hpp_list, &kvm_fmt->fmt);
441 return 0;
442 }
443
kvm_hpp_list__init(char * list,struct perf_hpp_list * hpp_list,int (* fn)(struct perf_hpp_list * hpp_list,char * name))444 static int kvm_hpp_list__init(char *list,
445 struct perf_hpp_list *hpp_list,
446 int (*fn)(struct perf_hpp_list *hpp_list,
447 char *name))
448 {
449 char *tmp, *tok;
450 int ret;
451
452 if (!list || !fn)
453 return 0;
454
455 for (tok = strtok_r(list, ", ", &tmp); tok;
456 tok = strtok_r(NULL, ", ", &tmp)) {
457 ret = fn(hpp_list, tok);
458 if (!ret)
459 continue;
460
461 /* Handle errors */
462 if (ret == -EINVAL)
463 pr_err("Invalid field key: '%s'", tok);
464 else if (ret == -ESRCH)
465 pr_err("Unknown field key: '%s'", tok);
466 else
467 pr_err("Fail to initialize for field key: '%s'", tok);
468
469 break;
470 }
471
472 return ret;
473 }
474
kvm_hpp_list__parse(struct perf_hpp_list * hpp_list,const char * output_,const char * sort_)475 static int kvm_hpp_list__parse(struct perf_hpp_list *hpp_list,
476 const char *output_, const char *sort_)
477 {
478 char *output = output_ ? strdup(output_) : NULL;
479 char *sort = sort_ ? strdup(sort_) : NULL;
480 int ret;
481
482 ret = kvm_hpp_list__init(output, hpp_list, kvm_hists__init_output);
483 if (ret)
484 goto out;
485
486 ret = kvm_hpp_list__init(sort, hpp_list, kvm_hists__init_sort);
487 if (ret)
488 goto out;
489
490 /* Copy sort keys to output fields */
491 perf_hpp__setup_output_field(hpp_list);
492
493 /* and then copy output fields to sort keys */
494 perf_hpp__append_sort_keys(hpp_list);
495 out:
496 free(output);
497 free(sort);
498 return ret;
499 }
500
kvm_hists__init(void)501 static int kvm_hists__init(void)
502 {
503 kvm_hists.list.nr_header_lines = 1;
504 __hists__init(&kvm_hists.hists, &kvm_hists.list);
505 perf_hpp_list__init(&kvm_hists.list);
506 return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name");
507 }
508
kvm_hists__reinit(const char * output,const char * sort)509 static int kvm_hists__reinit(const char *output, const char *sort)
510 {
511 perf_hpp__reset_output_field(&kvm_hists.list);
512 return kvm_hpp_list__parse(&kvm_hists.list, output, sort);
513 }
514 static void print_result(struct perf_kvm_stat *kvm);
515
516 #ifdef HAVE_SLANG_SUPPORT
kvm_browser__update_nr_entries(struct hist_browser * hb)517 static void kvm_browser__update_nr_entries(struct hist_browser *hb)
518 {
519 struct rb_node *nd = rb_first_cached(&hb->hists->entries);
520 u64 nr_entries = 0;
521
522 for (; nd; nd = rb_next(nd)) {
523 struct hist_entry *he = rb_entry(nd, struct hist_entry,
524 rb_node);
525
526 if (!he->filtered)
527 nr_entries++;
528 }
529
530 hb->nr_non_filtered_entries = nr_entries;
531 }
532
kvm_browser__title(struct hist_browser * browser,char * buf,size_t size)533 static int kvm_browser__title(struct hist_browser *browser,
534 char *buf, size_t size)
535 {
536 scnprintf(buf, size, "KVM event statistics (%lu entries)",
537 browser->nr_non_filtered_entries);
538 return 0;
539 }
540
541 static struct hist_browser*
perf_kvm_browser__new(struct hists * hists)542 perf_kvm_browser__new(struct hists *hists)
543 {
544 struct hist_browser *browser = hist_browser__new(hists);
545
546 if (browser)
547 browser->title = kvm_browser__title;
548
549 return browser;
550 }
551
kvm__hists_browse(struct hists * hists)552 static int kvm__hists_browse(struct hists *hists)
553 {
554 struct hist_browser *browser;
555 int key = -1;
556
557 browser = perf_kvm_browser__new(hists);
558 if (browser == NULL)
559 return -1;
560
561 /* reset abort key so that it can get Ctrl-C as a key */
562 SLang_reset_tty();
563 SLang_init_tty(0, 0, 0);
564
565 kvm_browser__update_nr_entries(browser);
566
567 while (1) {
568 key = hist_browser__run(browser, "? - help", true, 0);
569
570 switch (key) {
571 case 'q':
572 goto out;
573 default:
574 break;
575 }
576 }
577
578 out:
579 hist_browser__delete(browser);
580 return 0;
581 }
582
kvm_display(struct perf_kvm_stat * kvm)583 static void kvm_display(struct perf_kvm_stat *kvm)
584 {
585 if (!use_browser)
586 print_result(kvm);
587 else
588 kvm__hists_browse(&kvm_hists.hists);
589 }
590
591 #else
592
kvm_display(struct perf_kvm_stat * kvm)593 static void kvm_display(struct perf_kvm_stat *kvm)
594 {
595 use_browser = 0;
596 print_result(kvm);
597 }
598
599 #endif /* HAVE_SLANG_SUPPORT */
600
601 #endif // defined(HAVE_LIBTRACEEVENT)
602
get_filename_for_perf_kvm(void)603 static const char *get_filename_for_perf_kvm(void)
604 {
605 const char *filename;
606
607 if (perf_host && !perf_guest)
608 filename = strdup("perf.data.host");
609 else if (!perf_host && perf_guest)
610 filename = strdup("perf.data.guest");
611 else
612 filename = strdup("perf.data.kvm");
613
614 return filename;
615 }
616
617 #if defined(HAVE_LIBTRACEEVENT)
618
register_kvm_events_ops(struct perf_kvm_stat * kvm,uint16_t e_machine)619 static bool register_kvm_events_ops(struct perf_kvm_stat *kvm, uint16_t e_machine)
620 {
621 const struct kvm_reg_events_ops *events_ops;
622
623 for (events_ops = kvm_reg_events_ops(e_machine); events_ops->name; events_ops++) {
624 if (!strcmp(events_ops->name, kvm->report_event)) {
625 kvm->events_ops = events_ops->ops;
626 return true;
627 }
628 }
629
630 return false;
631 }
632
633 struct vcpu_event_record {
634 int vcpu_id;
635 u64 start_time;
636 struct kvm_event *last_event;
637 };
638
639 #ifdef HAVE_TIMERFD_SUPPORT
clear_events_cache_stats(void)640 static void clear_events_cache_stats(void)
641 {
642 struct rb_root_cached *root;
643 struct rb_node *nd;
644 struct kvm_event *event;
645 int i;
646
647 if (hists__has(&kvm_hists.hists, need_collapse))
648 root = &kvm_hists.hists.entries_collapsed;
649 else
650 root = kvm_hists.hists.entries_in;
651
652 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
653 struct hist_entry *he;
654
655 he = rb_entry(nd, struct hist_entry, rb_node_in);
656 event = container_of(he, struct kvm_event, he);
657
658 /* reset stats for event */
659 event->total.time = 0;
660 init_stats(&event->total.stats);
661
662 for (i = 0; i < event->max_vcpu; ++i) {
663 event->vcpu[i].time = 0;
664 init_stats(&event->vcpu[i].stats);
665 }
666 }
667 }
668 #endif
669
kvm_event_expand(struct kvm_event * event,int vcpu_id)670 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
671 {
672 int old_max_vcpu = event->max_vcpu;
673 void *prev;
674
675 if (vcpu_id < event->max_vcpu)
676 return true;
677
678 while (event->max_vcpu <= vcpu_id)
679 event->max_vcpu += DEFAULT_VCPU_NUM;
680
681 prev = event->vcpu;
682 event->vcpu = realloc(event->vcpu,
683 event->max_vcpu * sizeof(*event->vcpu));
684 if (!event->vcpu) {
685 free(prev);
686 pr_err("Not enough memory\n");
687 return false;
688 }
689
690 memset(event->vcpu + old_max_vcpu, 0,
691 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
692 return true;
693 }
694
kvm_he_zalloc(size_t size)695 static void *kvm_he_zalloc(size_t size)
696 {
697 struct kvm_event *kvm_ev;
698
699 kvm_ev = zalloc(size + sizeof(*kvm_ev));
700 if (!kvm_ev)
701 return NULL;
702
703 init_stats(&kvm_ev->total.stats);
704 hists__inc_nr_samples(&kvm_hists.hists, 0);
705 return &kvm_ev->he;
706 }
707
kvm_he_free(void * he)708 static void kvm_he_free(void *he)
709 {
710 struct kvm_event *kvm_ev;
711
712 kvm_ev = container_of(he, struct kvm_event, he);
713 free(kvm_ev);
714 }
715
716 static struct hist_entry_ops kvm_ev_entry_ops = {
717 .new = kvm_he_zalloc,
718 .free = kvm_he_free,
719 };
720
find_create_kvm_event(struct perf_kvm_stat * kvm,struct event_key * key,struct perf_sample * sample)721 static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
722 struct event_key *key,
723 struct perf_sample *sample)
724 {
725 struct kvm_event *event;
726 struct hist_entry *he;
727 struct kvm_info *ki;
728
729 BUG_ON(key->key == INVALID_KEY);
730
731 ki = kvm_info__new();
732 if (!ki) {
733 pr_err("Failed to allocate kvm info\n");
734 return NULL;
735 }
736
737 kvm->events_ops->decode_key(kvm, key, ki->name);
738 he = hists__add_entry_ops(&kvm_hists.hists, &kvm_ev_entry_ops,
739 &kvm->al, NULL, NULL, NULL, ki, sample, true);
740 if (he == NULL) {
741 pr_err("Failed to allocate hist entry\n");
742 free(ki);
743 return NULL;
744 }
745
746 event = container_of(he, struct kvm_event, he);
747 if (!event->perf_kvm) {
748 event->perf_kvm = kvm;
749 event->key = *key;
750 }
751
752 return event;
753 }
754
handle_begin_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,struct perf_sample * sample)755 static bool handle_begin_event(struct perf_kvm_stat *kvm,
756 struct vcpu_event_record *vcpu_record,
757 struct event_key *key,
758 struct perf_sample *sample)
759 {
760 struct kvm_event *event = NULL;
761
762 if (key->key != INVALID_KEY)
763 event = find_create_kvm_event(kvm, key, sample);
764
765 vcpu_record->last_event = event;
766 vcpu_record->start_time = sample->time;
767 return true;
768 }
769
770 static void
kvm_update_event_stats(struct kvm_event_stats * kvm_stats,u64 time_diff)771 kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
772 {
773 kvm_stats->time += time_diff;
774 update_stats(&kvm_stats->stats, time_diff);
775 }
776
kvm_event_rel_stddev(int vcpu_id,struct kvm_event * event)777 static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
778 {
779 struct kvm_event_stats *kvm_stats = &event->total;
780
781 if (vcpu_id != -1)
782 kvm_stats = &event->vcpu[vcpu_id];
783
784 return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
785 avg_stats(&kvm_stats->stats));
786 }
787
update_kvm_event(struct perf_kvm_stat * kvm,struct kvm_event * event,int vcpu_id,u64 time_diff)788 static bool update_kvm_event(struct perf_kvm_stat *kvm,
789 struct kvm_event *event, int vcpu_id,
790 u64 time_diff)
791 {
792 /* Update overall statistics */
793 kvm->total_count++;
794 kvm->total_time += time_diff;
795
796 if (vcpu_id == -1) {
797 kvm_update_event_stats(&event->total, time_diff);
798 return true;
799 }
800
801 if (!kvm_event_expand(event, vcpu_id))
802 return false;
803
804 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
805 return true;
806 }
807
is_child_event(struct perf_kvm_stat * kvm,struct evsel * evsel,struct perf_sample * sample,struct event_key * key)808 static bool is_child_event(struct perf_kvm_stat *kvm,
809 struct evsel *evsel,
810 struct perf_sample *sample,
811 struct event_key *key)
812 {
813 const struct child_event_ops *child_ops;
814
815 child_ops = kvm->events_ops->child_ops;
816
817 if (!child_ops)
818 return false;
819
820 for (; child_ops->name; child_ops++) {
821 if (evsel__name_is(evsel, child_ops->name)) {
822 child_ops->get_key(evsel, sample, key);
823 return true;
824 }
825 }
826
827 return false;
828 }
829
handle_child_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,struct perf_sample * sample)830 static bool handle_child_event(struct perf_kvm_stat *kvm,
831 struct vcpu_event_record *vcpu_record,
832 struct event_key *key,
833 struct perf_sample *sample)
834 {
835 struct kvm_event *event = NULL;
836
837 if (key->key != INVALID_KEY)
838 event = find_create_kvm_event(kvm, key, sample);
839
840 vcpu_record->last_event = event;
841
842 return true;
843 }
844
skip_event(uint16_t e_machine,const char * event)845 static bool skip_event(uint16_t e_machine, const char *event)
846 {
847 const char * const *skip_events;
848
849 for (skip_events = kvm_skip_events(e_machine); *skip_events; skip_events++)
850 if (!strcmp(event, *skip_events))
851 return true;
852
853 return false;
854 }
855
handle_end_event(struct perf_kvm_stat * kvm,struct vcpu_event_record * vcpu_record,struct event_key * key,struct perf_sample * sample)856 static bool handle_end_event(struct perf_kvm_stat *kvm,
857 struct vcpu_event_record *vcpu_record,
858 struct event_key *key,
859 struct perf_sample *sample)
860 {
861 struct kvm_event *event;
862 u64 time_begin, time_diff;
863 int vcpu;
864
865 if (kvm->trace_vcpu == -1)
866 vcpu = -1;
867 else
868 vcpu = vcpu_record->vcpu_id;
869
870 event = vcpu_record->last_event;
871 time_begin = vcpu_record->start_time;
872
873 /* The begin event is not caught. */
874 if (!time_begin)
875 return true;
876
877 /*
878 * In some case, the 'begin event' only records the start timestamp,
879 * the actual event is recognized in the 'end event' (e.g. mmio-event).
880 */
881
882 /* Both begin and end events did not get the key. */
883 if (!event && key->key == INVALID_KEY)
884 return true;
885
886 if (!event)
887 event = find_create_kvm_event(kvm, key, sample);
888
889 if (!event)
890 return false;
891
892 vcpu_record->last_event = NULL;
893 vcpu_record->start_time = 0;
894
895 /* seems to happen once in a while during live mode */
896 if (sample->time < time_begin) {
897 pr_debug("End time before begin time; skipping event.\n");
898 return true;
899 }
900
901 time_diff = sample->time - time_begin;
902
903 if (kvm->duration && time_diff > kvm->duration) {
904 char decode[KVM_EVENT_NAME_LEN];
905 uint16_t e_machine = perf_session__e_machine(kvm->session, /*e_flags=*/NULL);
906
907 kvm->events_ops->decode_key(kvm, &event->key, decode);
908 if (!skip_event(e_machine, decode)) {
909 pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
910 sample->time, sample->pid, vcpu_record->vcpu_id,
911 decode, time_diff / NSEC_PER_USEC);
912 }
913 }
914
915 return update_kvm_event(kvm, event, vcpu, time_diff);
916 }
917
918 static
per_vcpu_record(struct thread * thread,struct evsel * evsel,struct perf_sample * sample)919 struct vcpu_event_record *per_vcpu_record(struct thread *thread,
920 struct evsel *evsel,
921 struct perf_sample *sample)
922 {
923 /* Only kvm_entry records vcpu id. */
924 if (!thread__priv(thread) && kvm_entry_event(evsel)) {
925 struct vcpu_event_record *vcpu_record;
926 struct machine *machine = maps__machine(thread__maps(thread));
927 uint16_t e_machine = thread__e_machine(thread, machine, /*e_flags=*/NULL);
928
929 vcpu_record = zalloc(sizeof(*vcpu_record));
930 if (!vcpu_record) {
931 pr_err("%s: Not enough memory\n", __func__);
932 return NULL;
933 }
934
935 vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str(e_machine));
936 thread__set_priv(thread, vcpu_record);
937 }
938
939 return thread__priv(thread);
940 }
941
handle_kvm_event(struct perf_kvm_stat * kvm,struct thread * thread,struct evsel * evsel,struct perf_sample * sample)942 static bool handle_kvm_event(struct perf_kvm_stat *kvm,
943 struct thread *thread,
944 struct evsel *evsel,
945 struct perf_sample *sample)
946 {
947 struct vcpu_event_record *vcpu_record;
948 struct event_key key = { .key = INVALID_KEY,
949 .exit_reasons = kvm->exit_reasons };
950
951 vcpu_record = per_vcpu_record(thread, evsel, sample);
952 if (!vcpu_record)
953 return true;
954
955 /* only process events for vcpus user cares about */
956 if ((kvm->trace_vcpu != -1) &&
957 (kvm->trace_vcpu != vcpu_record->vcpu_id))
958 return true;
959
960 if (kvm->events_ops->is_begin_event(evsel, sample, &key))
961 return handle_begin_event(kvm, vcpu_record, &key, sample);
962
963 if (is_child_event(kvm, evsel, sample, &key))
964 return handle_child_event(kvm, vcpu_record, &key, sample);
965
966 if (kvm->events_ops->is_end_event(evsel, sample, &key))
967 return handle_end_event(kvm, vcpu_record, &key, sample);
968
969 return true;
970 }
971
is_valid_key(struct perf_kvm_stat * kvm)972 static bool is_valid_key(struct perf_kvm_stat *kvm)
973 {
974 static const char *key_array[] = {
975 "ev_name", "sample", "time", "max_t", "min_t", "mean_t",
976 };
977 unsigned int i;
978
979 for (i = 0; i < ARRAY_SIZE(key_array); i++)
980 if (!strcmp(key_array[i], kvm->sort_key))
981 return true;
982
983 pr_err("Unsupported sort key: %s\n", kvm->sort_key);
984 return false;
985 }
986
event_is_valid(struct kvm_event * event,int vcpu)987 static bool event_is_valid(struct kvm_event *event, int vcpu)
988 {
989 return !!get_event_count(event, vcpu);
990 }
991
filter_cb(struct hist_entry * he,void * arg __maybe_unused)992 static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
993 {
994 struct kvm_event *event;
995 struct perf_kvm_stat *perf_kvm;
996
997 event = container_of(he, struct kvm_event, he);
998 perf_kvm = event->perf_kvm;
999 if (!event_is_valid(event, perf_kvm->trace_vcpu))
1000 he->filtered = 1;
1001 else
1002 he->filtered = 0;
1003 return 0;
1004 }
1005
sort_result(struct perf_kvm_stat * kvm)1006 static void sort_result(struct perf_kvm_stat *kvm)
1007 {
1008 struct ui_progress prog;
1009 const char *output_columns = "ev_name,sample,percent_sample,"
1010 "time,percent_time,max_t,min_t,mean_t";
1011
1012 kvm_hists__reinit(output_columns, kvm->sort_key);
1013 ui_progress__init(&prog, kvm_hists.hists.nr_entries, "Sorting...");
1014 hists__collapse_resort(&kvm_hists.hists, NULL);
1015 hists__output_resort_cb(&kvm_hists.hists, NULL, filter_cb);
1016 ui_progress__finish();
1017 }
1018
print_vcpu_info(struct perf_kvm_stat * kvm)1019 static void print_vcpu_info(struct perf_kvm_stat *kvm)
1020 {
1021 int vcpu = kvm->trace_vcpu;
1022
1023 pr_info("Analyze events for ");
1024
1025 if (kvm->opts.target.system_wide)
1026 pr_info("all VMs, ");
1027 else if (kvm->opts.target.pid)
1028 pr_info("pid(s) %s, ", kvm->opts.target.pid);
1029 else
1030 pr_info("dazed and confused on what is monitored, ");
1031
1032 if (vcpu == -1)
1033 pr_info("all VCPUs:\n\n");
1034 else
1035 pr_info("VCPU %d:\n\n", vcpu);
1036 }
1037
show_timeofday(void)1038 static void show_timeofday(void)
1039 {
1040 char date[64];
1041 struct timeval tv;
1042 struct tm ltime;
1043
1044 gettimeofday(&tv, NULL);
1045 if (localtime_r(&tv.tv_sec, <ime)) {
1046 strftime(date, sizeof(date), "%H:%M:%S", <ime);
1047 pr_info("%s.%06ld", date, tv.tv_usec);
1048 } else
1049 pr_info("00:00:00.000000");
1050
1051 return;
1052 }
1053
print_result(struct perf_kvm_stat * kvm)1054 static void print_result(struct perf_kvm_stat *kvm)
1055 {
1056 char decode[KVM_EVENT_NAME_LEN];
1057 struct kvm_event *event;
1058 int vcpu = kvm->trace_vcpu;
1059 struct rb_node *nd;
1060
1061 if (kvm->live) {
1062 puts(CONSOLE_CLEAR);
1063 show_timeofday();
1064 }
1065
1066 pr_info("\n\n");
1067 print_vcpu_info(kvm);
1068 pr_info("%*s ", KVM_EVENT_NAME_LEN, kvm->events_ops->name);
1069 pr_info("%10s ", "Samples");
1070 pr_info("%9s ", "Samples%");
1071
1072 pr_info("%9s ", "Time%");
1073 pr_info("%11s ", "Min Time");
1074 pr_info("%11s ", "Max Time");
1075 pr_info("%16s ", "Avg time");
1076 pr_info("\n\n");
1077
1078 for (nd = rb_first_cached(&kvm_hists.hists.entries); nd; nd = rb_next(nd)) {
1079 struct hist_entry *he;
1080 u64 ecount, etime, max, min;
1081
1082 he = rb_entry(nd, struct hist_entry, rb_node);
1083 if (he->filtered)
1084 continue;
1085
1086 event = container_of(he, struct kvm_event, he);
1087 ecount = get_event_count(event, vcpu);
1088 etime = get_event_time(event, vcpu);
1089 max = get_event_max(event, vcpu);
1090 min = get_event_min(event, vcpu);
1091
1092 kvm->events_ops->decode_key(kvm, &event->key, decode);
1093 pr_info("%*s ", KVM_EVENT_NAME_LEN, decode);
1094 pr_info("%10llu ", (unsigned long long)ecount);
1095 pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
1096 pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
1097 pr_info("%9.2fus ", (double)min / NSEC_PER_USEC);
1098 pr_info("%9.2fus ", (double)max / NSEC_PER_USEC);
1099 pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount / NSEC_PER_USEC,
1100 kvm_event_rel_stddev(vcpu, event));
1101 pr_info("\n");
1102 }
1103
1104 pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
1105 kvm->total_count, kvm->total_time / (double)NSEC_PER_USEC);
1106
1107 if (kvm->lost_events)
1108 pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
1109 }
1110
1111 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
process_lost_event(const struct perf_tool * tool,union perf_event * event __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1112 static int process_lost_event(const struct perf_tool *tool,
1113 union perf_event *event __maybe_unused,
1114 struct perf_sample *sample __maybe_unused,
1115 struct machine *machine __maybe_unused)
1116 {
1117 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
1118
1119 kvm->lost_events++;
1120 return 0;
1121 }
1122 #endif
1123
skip_sample(struct perf_kvm_stat * kvm,struct perf_sample * sample)1124 static bool skip_sample(struct perf_kvm_stat *kvm,
1125 struct perf_sample *sample)
1126 {
1127 if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
1128 return true;
1129
1130 return false;
1131 }
1132
process_sample_event(const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1133 static int process_sample_event(const struct perf_tool *tool,
1134 union perf_event *event,
1135 struct perf_sample *sample,
1136 struct evsel *evsel,
1137 struct machine *machine)
1138 {
1139 int err = 0;
1140 struct thread *thread;
1141 struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
1142 tool);
1143
1144 if (skip_sample(kvm, sample))
1145 return 0;
1146
1147 if (machine__resolve(machine, &kvm->al, sample) < 0) {
1148 pr_warning("Fail to resolve address location, skip sample.\n");
1149 return 0;
1150 }
1151
1152 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
1153 if (thread == NULL) {
1154 pr_debug("problem processing %d event, skipping it.\n",
1155 event->header.type);
1156 return -1;
1157 }
1158
1159 if (!handle_kvm_event(kvm, thread, evsel, sample))
1160 err = -1;
1161
1162 thread__put(thread);
1163 return err;
1164 }
1165
cpu_isa_config(struct perf_kvm_stat * kvm)1166 static int cpu_isa_config(struct perf_kvm_stat *kvm)
1167 {
1168 char buf[128], *cpuid;
1169 int err;
1170 uint16_t e_machine;
1171
1172 if (kvm->live) {
1173 struct perf_cpu cpu = {-1};
1174
1175 err = get_cpuid(buf, sizeof(buf), cpu);
1176 if (err != 0) {
1177 pr_err("Failed to look up CPU type: %s\n",
1178 str_error_r(err, buf, sizeof(buf)));
1179 return -err;
1180 }
1181 cpuid = buf;
1182 } else
1183 cpuid = perf_session__env(kvm->session)->cpuid;
1184
1185 if (!cpuid) {
1186 pr_err("Failed to look up CPU type\n");
1187 return -EINVAL;
1188 }
1189
1190 e_machine = perf_session__e_machine(kvm->session, /*e_flags=*/NULL);
1191 err = cpu_isa_init(kvm, e_machine, cpuid);
1192 if (err == -ENOTSUP)
1193 pr_err("CPU %s is not supported.\n", cpuid);
1194
1195 return err;
1196 }
1197
verify_vcpu(int vcpu)1198 static bool verify_vcpu(int vcpu)
1199 {
1200 if (vcpu != -1 && vcpu < 0) {
1201 pr_err("Invalid vcpu:%d.\n", vcpu);
1202 return false;
1203 }
1204
1205 return true;
1206 }
1207
1208 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
1209 /* keeping the max events to a modest level to keep
1210 * the processing of samples per mmap smooth.
1211 */
1212 #define PERF_KVM__MAX_EVENTS_PER_MMAP 25
1213
perf_kvm__mmap_read_idx(struct perf_kvm_stat * kvm,int idx,u64 * mmap_time)1214 static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
1215 u64 *mmap_time)
1216 {
1217 struct evlist *evlist = kvm->evlist;
1218 union perf_event *event;
1219 struct mmap *md;
1220 u64 timestamp;
1221 s64 n = 0;
1222 int err;
1223
1224 *mmap_time = ULLONG_MAX;
1225 md = &evlist->mmap[idx];
1226 err = perf_mmap__read_init(&md->core);
1227 if (err < 0)
1228 return (err == -EAGAIN) ? 0 : -1;
1229
1230 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
1231 err = evlist__parse_sample_timestamp(evlist, event, ×tamp);
1232 if (err) {
1233 perf_mmap__consume(&md->core);
1234 pr_err("Failed to parse sample\n");
1235 return -1;
1236 }
1237
1238 err = perf_session__queue_event(kvm->session, event, timestamp, 0, NULL);
1239 /*
1240 * FIXME: Here we can't consume the event, as perf_session__queue_event will
1241 * point to it, and it'll get possibly overwritten by the kernel.
1242 */
1243 perf_mmap__consume(&md->core);
1244
1245 if (err) {
1246 pr_err("Failed to enqueue sample: %d\n", err);
1247 return -1;
1248 }
1249
1250 /* save time stamp of our first sample for this mmap */
1251 if (n == 0)
1252 *mmap_time = timestamp;
1253
1254 /* limit events per mmap handled all at once */
1255 n++;
1256 if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
1257 break;
1258 }
1259
1260 perf_mmap__read_done(&md->core);
1261 return n;
1262 }
1263
perf_kvm__mmap_read(struct perf_kvm_stat * kvm)1264 static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
1265 {
1266 int i, err, throttled = 0;
1267 s64 n, ntotal = 0;
1268 u64 flush_time = ULLONG_MAX, mmap_time;
1269
1270 for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
1271 n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
1272 if (n < 0)
1273 return -1;
1274
1275 /* flush time is going to be the minimum of all the individual
1276 * mmap times. Essentially, we flush all the samples queued up
1277 * from the last pass under our minimal start time -- that leaves
1278 * a very small race for samples to come in with a lower timestamp.
1279 * The ioctl to return the perf_clock timestamp should close the
1280 * race entirely.
1281 */
1282 if (mmap_time < flush_time)
1283 flush_time = mmap_time;
1284
1285 ntotal += n;
1286 if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
1287 throttled = 1;
1288 }
1289
1290 /* flush queue after each round in which we processed events */
1291 if (ntotal) {
1292 struct ordered_events *oe = &kvm->session->ordered_events;
1293
1294 oe->next_flush = flush_time;
1295 err = ordered_events__flush(oe, OE_FLUSH__ROUND);
1296 if (err) {
1297 if (kvm->lost_events)
1298 pr_info("\nLost events: %" PRIu64 "\n\n",
1299 kvm->lost_events);
1300 return err;
1301 }
1302 }
1303
1304 return throttled;
1305 }
1306
1307 static volatile int done;
1308
sig_handler(int sig __maybe_unused)1309 static void sig_handler(int sig __maybe_unused)
1310 {
1311 done = 1;
1312 }
1313
perf_kvm__timerfd_create(struct perf_kvm_stat * kvm)1314 static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
1315 {
1316 struct itimerspec new_value;
1317 int rc = -1;
1318
1319 kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
1320 if (kvm->timerfd < 0) {
1321 pr_err("timerfd_create failed\n");
1322 goto out;
1323 }
1324
1325 new_value.it_value.tv_sec = kvm->display_time;
1326 new_value.it_value.tv_nsec = 0;
1327 new_value.it_interval.tv_sec = kvm->display_time;
1328 new_value.it_interval.tv_nsec = 0;
1329
1330 if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
1331 pr_err("timerfd_settime failed: %d\n", errno);
1332 close(kvm->timerfd);
1333 goto out;
1334 }
1335
1336 rc = 0;
1337 out:
1338 return rc;
1339 }
1340
perf_kvm__handle_timerfd(struct perf_kvm_stat * kvm)1341 static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
1342 {
1343 uint64_t c;
1344 int rc;
1345
1346 rc = read(kvm->timerfd, &c, sizeof(uint64_t));
1347 if (rc < 0) {
1348 if (errno == EAGAIN)
1349 return 0;
1350
1351 pr_err("Failed to read timer fd: %d\n", errno);
1352 return -1;
1353 }
1354
1355 if (rc != sizeof(uint64_t)) {
1356 pr_err("Error reading timer fd - invalid size returned\n");
1357 return -1;
1358 }
1359
1360 if (c != 1)
1361 pr_debug("Missed timer beats: %" PRIu64 "\n", c-1);
1362
1363 /* update display */
1364 sort_result(kvm);
1365 print_result(kvm);
1366
1367 /* Reset sort list to "ev_name" */
1368 kvm_hists__reinit(NULL, "ev_name");
1369
1370 /* reset counts */
1371 clear_events_cache_stats();
1372 kvm->total_count = 0;
1373 kvm->total_time = 0;
1374 kvm->lost_events = 0;
1375
1376 return 0;
1377 }
1378
fd_set_nonblock(int fd)1379 static int fd_set_nonblock(int fd)
1380 {
1381 long arg = 0;
1382
1383 arg = fcntl(fd, F_GETFL);
1384 if (arg < 0) {
1385 pr_err("Failed to get current flags for fd %d\n", fd);
1386 return -1;
1387 }
1388
1389 if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) {
1390 pr_err("Failed to set non-block option on fd %d\n", fd);
1391 return -1;
1392 }
1393
1394 return 0;
1395 }
1396
perf_kvm__handle_stdin(void)1397 static int perf_kvm__handle_stdin(void)
1398 {
1399 int c;
1400
1401 c = getc(stdin);
1402 if (c == 'q')
1403 return 1;
1404
1405 return 0;
1406 }
1407
kvm_events_live_report(struct perf_kvm_stat * kvm)1408 static int kvm_events_live_report(struct perf_kvm_stat *kvm)
1409 {
1410 int nr_stdin, ret, err = -EINVAL;
1411 struct termios save;
1412
1413 /* live flag must be set first */
1414 kvm->live = true;
1415
1416 ret = cpu_isa_config(kvm);
1417 if (ret < 0)
1418 return ret;
1419
1420 if (!verify_vcpu(kvm->trace_vcpu) ||
1421 !is_valid_key(kvm) ||
1422 !register_kvm_events_ops(kvm, EM_HOST)) {
1423 goto out;
1424 }
1425
1426 set_term_quiet_input(&save);
1427
1428 kvm_hists__init();
1429
1430 signal(SIGINT, sig_handler);
1431 signal(SIGTERM, sig_handler);
1432
1433 /* add timer fd */
1434 if (perf_kvm__timerfd_create(kvm) < 0) {
1435 err = -1;
1436 goto out;
1437 }
1438
1439 if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
1440 goto out;
1441
1442 nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin));
1443 if (nr_stdin < 0)
1444 goto out;
1445
1446 if (fd_set_nonblock(fileno(stdin)) != 0)
1447 goto out;
1448
1449 /* everything is good - enable the events and process */
1450 evlist__enable(kvm->evlist);
1451
1452 while (!done) {
1453 struct fdarray *fda = &kvm->evlist->core.pollfd;
1454 int rc;
1455
1456 rc = perf_kvm__mmap_read(kvm);
1457 if (rc < 0)
1458 break;
1459
1460 err = perf_kvm__handle_timerfd(kvm);
1461 if (err)
1462 goto out;
1463
1464 if (fda->entries[nr_stdin].revents & POLLIN)
1465 done = perf_kvm__handle_stdin();
1466
1467 if (!rc && !done)
1468 err = evlist__poll(kvm->evlist, 100);
1469 }
1470
1471 evlist__disable(kvm->evlist);
1472
1473 if (err == 0) {
1474 sort_result(kvm);
1475 print_result(kvm);
1476 }
1477
1478 out:
1479 hists__delete_entries(&kvm_hists.hists);
1480
1481 if (kvm->timerfd >= 0)
1482 close(kvm->timerfd);
1483
1484 tcsetattr(0, TCSAFLUSH, &save);
1485 return err;
1486 }
1487
kvm_live_open_events(struct perf_kvm_stat * kvm)1488 static int kvm_live_open_events(struct perf_kvm_stat *kvm)
1489 {
1490 int err, rc = -1;
1491 struct evsel *pos;
1492 struct evlist *evlist = kvm->evlist;
1493 char sbuf[STRERR_BUFSIZE];
1494
1495 evlist__config(evlist, &kvm->opts, NULL);
1496
1497 /*
1498 * Note: exclude_{guest,host} do not apply here.
1499 * This command processes KVM tracepoints from host only
1500 */
1501 evlist__for_each_entry(evlist, pos) {
1502 struct perf_event_attr *attr = &pos->core.attr;
1503
1504 /* make sure these *are* set */
1505 evsel__set_sample_bit(pos, TID);
1506 evsel__set_sample_bit(pos, TIME);
1507 evsel__set_sample_bit(pos, CPU);
1508 evsel__set_sample_bit(pos, RAW);
1509 /* make sure these are *not*; want as small a sample as possible */
1510 evsel__reset_sample_bit(pos, PERIOD);
1511 evsel__reset_sample_bit(pos, IP);
1512 evsel__reset_sample_bit(pos, CALLCHAIN);
1513 evsel__reset_sample_bit(pos, ADDR);
1514 evsel__reset_sample_bit(pos, READ);
1515 attr->mmap = 0;
1516 attr->comm = 0;
1517 attr->task = 0;
1518
1519 attr->sample_period = 1;
1520
1521 attr->watermark = 0;
1522 attr->wakeup_events = 1000;
1523
1524 /* will enable all once we are ready */
1525 attr->disabled = 1;
1526 }
1527
1528 err = evlist__open(evlist);
1529 if (err < 0) {
1530 printf("Couldn't create the events: %s\n",
1531 str_error_r(errno, sbuf, sizeof(sbuf)));
1532 goto out;
1533 }
1534
1535 if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
1536 ui__error("Failed to mmap the events: %s\n",
1537 str_error_r(errno, sbuf, sizeof(sbuf)));
1538 evlist__close(evlist);
1539 goto out;
1540 }
1541
1542 rc = 0;
1543
1544 out:
1545 return rc;
1546 }
1547 #endif
1548
read_events(struct perf_kvm_stat * kvm)1549 static int read_events(struct perf_kvm_stat *kvm)
1550 {
1551 int ret;
1552 uint16_t e_machine;
1553 struct perf_data file = {
1554 .path = kvm->file_name,
1555 .mode = PERF_DATA_MODE_READ,
1556 .force = kvm->force,
1557 };
1558
1559 perf_tool__init(&kvm->tool, /*ordered_events=*/true);
1560 kvm->tool.sample = process_sample_event;
1561 kvm->tool.comm = perf_event__process_comm;
1562 kvm->tool.namespaces = perf_event__process_namespaces;
1563
1564 kvm->session = perf_session__new(&file, &kvm->tool);
1565 if (IS_ERR(kvm->session)) {
1566 pr_err("Initializing perf session failed\n");
1567 return PTR_ERR(kvm->session);
1568 }
1569
1570 symbol__init(perf_session__env(kvm->session));
1571
1572 if (!perf_session__has_traces(kvm->session, "kvm record")) {
1573 ret = -EINVAL;
1574 goto out_delete;
1575 }
1576
1577 e_machine = perf_session__e_machine(kvm->session, /*e_flags=*/NULL);
1578 if (!register_kvm_events_ops(kvm, e_machine)) {
1579 ret = -EINVAL;
1580 goto out_delete;
1581 }
1582
1583 /*
1584 * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
1585 * traced in the old kernel.
1586 */
1587 ret = cpu_isa_config(kvm);
1588 if (ret < 0)
1589 goto out_delete;
1590
1591 ret = perf_session__process_events(kvm->session);
1592
1593 out_delete:
1594 perf_session__delete(kvm->session);
1595 return ret;
1596 }
1597
parse_target_str(struct perf_kvm_stat * kvm)1598 static int parse_target_str(struct perf_kvm_stat *kvm)
1599 {
1600 if (kvm->opts.target.pid) {
1601 kvm->pid_list = intlist__new(kvm->opts.target.pid);
1602 if (kvm->pid_list == NULL) {
1603 pr_err("Error parsing process id string\n");
1604 return -EINVAL;
1605 }
1606 }
1607
1608 return 0;
1609 }
1610
kvm_events_report_vcpu(struct perf_kvm_stat * kvm)1611 static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
1612 {
1613 int ret = -EINVAL;
1614 int vcpu = kvm->trace_vcpu;
1615
1616 if (parse_target_str(kvm) != 0)
1617 goto exit;
1618
1619 if (!verify_vcpu(vcpu))
1620 goto exit;
1621
1622 if (!is_valid_key(kvm))
1623 goto exit;
1624
1625 if (kvm->use_stdio) {
1626 use_browser = 0;
1627 setup_pager();
1628 } else {
1629 use_browser = 1;
1630 }
1631
1632 setup_browser(false);
1633
1634 kvm_hists__init();
1635
1636 ret = read_events(kvm);
1637 if (ret)
1638 goto exit;
1639
1640 sort_result(kvm);
1641 kvm_display(kvm);
1642
1643 exit:
1644 hists__delete_entries(&kvm_hists.hists);
1645 return ret;
1646 }
1647
1648 static int
kvm_events_record(struct perf_kvm_stat * kvm,int argc,const char ** argv)1649 kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
1650 {
1651 unsigned int rec_argc, i, j, events_tp_size;
1652 const char **rec_argv;
1653 const char * const record_args[] = {
1654 "record",
1655 "-R",
1656 "-m", "1024",
1657 "-c", "1",
1658 };
1659 const char * const kvm_stat_record_usage[] = {
1660 "perf kvm stat record [<options>]",
1661 NULL
1662 };
1663 const char * const *events_tp;
1664 int ret;
1665 uint16_t e_machine = EM_HOST;
1666
1667 events_tp_size = 0;
1668 ret = setup_kvm_events_tp(kvm, e_machine);
1669 if (ret < 0) {
1670 pr_err("Unable to setup the kvm tracepoints\n");
1671 return ret;
1672 }
1673
1674 for (events_tp = kvm_events_tp(e_machine); *events_tp; events_tp++)
1675 events_tp_size++;
1676
1677 rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
1678 2 * events_tp_size;
1679 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1680
1681 if (rec_argv == NULL)
1682 return -ENOMEM;
1683
1684 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1685 rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
1686
1687 for (j = 0; j < events_tp_size; j++) {
1688 rec_argv[i++] = STRDUP_FAIL_EXIT("-e");
1689 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp(e_machine)[j]);
1690 }
1691
1692 rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
1693 rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
1694
1695 for (j = 1; j < (unsigned int)argc; j++, i++)
1696 rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
1697
1698 set_option_flag(record_options, 'e', "event", PARSE_OPT_HIDDEN);
1699 set_option_flag(record_options, 0, "filter", PARSE_OPT_HIDDEN);
1700 set_option_flag(record_options, 'R', "raw-samples", PARSE_OPT_HIDDEN);
1701
1702 set_option_flag(record_options, 'F', "freq", PARSE_OPT_DISABLED);
1703 set_option_flag(record_options, 0, "group", PARSE_OPT_DISABLED);
1704 set_option_flag(record_options, 'g', NULL, PARSE_OPT_DISABLED);
1705 set_option_flag(record_options, 0, "call-graph", PARSE_OPT_DISABLED);
1706 set_option_flag(record_options, 'd', "data", PARSE_OPT_DISABLED);
1707 set_option_flag(record_options, 'T', "timestamp", PARSE_OPT_DISABLED);
1708 set_option_flag(record_options, 'P', "period", PARSE_OPT_DISABLED);
1709 set_option_flag(record_options, 'n', "no-samples", PARSE_OPT_DISABLED);
1710 set_option_flag(record_options, 'N', "no-buildid-cache", PARSE_OPT_DISABLED);
1711 set_option_flag(record_options, 'B', "no-buildid", PARSE_OPT_DISABLED);
1712 set_option_flag(record_options, 'G', "cgroup", PARSE_OPT_DISABLED);
1713 set_option_flag(record_options, 'b', "branch-any", PARSE_OPT_DISABLED);
1714 set_option_flag(record_options, 'j', "branch-filter", PARSE_OPT_DISABLED);
1715 set_option_flag(record_options, 'W', "weight", PARSE_OPT_DISABLED);
1716 set_option_flag(record_options, 0, "transaction", PARSE_OPT_DISABLED);
1717
1718 record_usage = kvm_stat_record_usage;
1719 ret = cmd_record(i, rec_argv);
1720
1721 EXIT:
1722 for (i = 0; i < rec_argc; i++)
1723 free((void *)rec_argv[i]);
1724 free(rec_argv);
1725 return ret;
1726 }
1727
1728 static int
kvm_events_report(struct perf_kvm_stat * kvm,int argc,const char ** argv)1729 kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
1730 {
1731 const struct option kvm_events_report_options[] = {
1732 OPT_STRING(0, "event", &kvm->report_event, "report event",
1733 "event for reporting: vmexit, "
1734 "mmio (x86 only), ioport (x86 only)"),
1735 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1736 "vcpu id to report"),
1737 OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1738 "key for sorting: sample(sort by samples number)"
1739 " time (sort by avg time)"),
1740 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
1741 "analyze events only for given process id(s)"),
1742 OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"),
1743 OPT_BOOLEAN(0, "stdio", &kvm->use_stdio, "use the stdio interface"),
1744 OPT_END()
1745 };
1746
1747 const char * const kvm_events_report_usage[] = {
1748 "perf kvm stat report [<options>]",
1749 NULL
1750 };
1751
1752 if (argc) {
1753 argc = parse_options(argc, argv,
1754 kvm_events_report_options,
1755 kvm_events_report_usage, 0);
1756 if (argc)
1757 usage_with_options(kvm_events_report_usage,
1758 kvm_events_report_options);
1759 }
1760
1761 #ifndef HAVE_SLANG_SUPPORT
1762 kvm->use_stdio = true;
1763 #endif
1764
1765 if (!kvm->opts.target.pid)
1766 kvm->opts.target.system_wide = true;
1767
1768 return kvm_events_report_vcpu(kvm);
1769 }
1770
1771 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
kvm_live_event_list(void)1772 static struct evlist *kvm_live_event_list(void)
1773 {
1774 struct evlist *evlist;
1775 char *tp, *name, *sys;
1776 int err = -1;
1777 const char * const *events_tp;
1778
1779 evlist = evlist__new();
1780 if (evlist == NULL)
1781 return NULL;
1782
1783 for (events_tp = kvm_events_tp(EM_HOST); *events_tp; events_tp++) {
1784
1785 tp = strdup(*events_tp);
1786 if (tp == NULL)
1787 goto out;
1788
1789 /* split tracepoint into subsystem and name */
1790 sys = tp;
1791 name = strchr(tp, ':');
1792 if (name == NULL) {
1793 pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
1794 *events_tp);
1795 free(tp);
1796 goto out;
1797 }
1798 *name = '\0';
1799 name++;
1800
1801 if (evlist__add_newtp(evlist, sys, name, NULL)) {
1802 pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
1803 free(tp);
1804 goto out;
1805 }
1806
1807 free(tp);
1808 }
1809
1810 err = 0;
1811
1812 out:
1813 if (err) {
1814 evlist__delete(evlist);
1815 evlist = NULL;
1816 }
1817
1818 return evlist;
1819 }
1820
kvm_events_live(struct perf_kvm_stat * kvm,int argc,const char ** argv)1821 static int kvm_events_live(struct perf_kvm_stat *kvm,
1822 int argc, const char **argv)
1823 {
1824 char errbuf[BUFSIZ];
1825 int err;
1826
1827 const struct option live_options[] = {
1828 OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
1829 "record events on existing process id"),
1830 OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
1831 "number of mmap data pages", evlist__parse_mmap_pages),
1832 OPT_INCR('v', "verbose", &verbose,
1833 "be more verbose (show counter open errors, etc)"),
1834 OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
1835 "system-wide collection from all CPUs"),
1836 OPT_UINTEGER('d', "display", &kvm->display_time,
1837 "time in seconds between display updates"),
1838 OPT_STRING(0, "event", &kvm->report_event, "report event",
1839 "event for reporting: "
1840 "vmexit, mmio (x86 only), ioport (x86 only)"),
1841 OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
1842 "vcpu id to report"),
1843 OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
1844 "key for sorting: sample(sort by samples number)"
1845 " time (sort by avg time)"),
1846 OPT_U64(0, "duration", &kvm->duration,
1847 "show events other than"
1848 " HLT (x86 only) or Wait state (s390 only)"
1849 " that take longer than duration usecs"),
1850 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1851 "per thread proc mmap processing timeout in ms"),
1852 OPT_END()
1853 };
1854 const char * const live_usage[] = {
1855 "perf kvm stat live [<options>]",
1856 NULL
1857 };
1858 struct perf_data data = {
1859 .mode = PERF_DATA_MODE_WRITE,
1860 };
1861
1862
1863 /* event handling */
1864 perf_tool__init(&kvm->tool, /*ordered_events=*/true);
1865 kvm->tool.sample = process_sample_event;
1866 kvm->tool.comm = perf_event__process_comm;
1867 kvm->tool.exit = perf_event__process_exit;
1868 kvm->tool.fork = perf_event__process_fork;
1869 kvm->tool.lost = process_lost_event;
1870 kvm->tool.namespaces = perf_event__process_namespaces;
1871
1872 /* set defaults */
1873 kvm->display_time = 1;
1874 kvm->opts.user_interval = 1;
1875 kvm->opts.mmap_pages = 512;
1876 kvm->opts.target.uses_mmap = false;
1877
1878 symbol__init(NULL);
1879 disable_buildid_cache();
1880
1881 use_browser = 0;
1882
1883 if (argc) {
1884 argc = parse_options(argc, argv, live_options,
1885 live_usage, 0);
1886 if (argc)
1887 usage_with_options(live_usage, live_options);
1888 }
1889
1890 kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */
1891
1892 /*
1893 * target related setups
1894 */
1895 err = target__validate(&kvm->opts.target);
1896 if (err) {
1897 target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
1898 ui__warning("%s", errbuf);
1899 }
1900
1901 if (target__none(&kvm->opts.target))
1902 kvm->opts.target.system_wide = true;
1903
1904
1905 /*
1906 * generate the event list
1907 */
1908 err = setup_kvm_events_tp(kvm, EM_HOST);
1909 if (err < 0) {
1910 pr_err("Unable to setup the kvm tracepoints\n");
1911 return err;
1912 }
1913
1914 kvm->evlist = kvm_live_event_list();
1915 if (kvm->evlist == NULL) {
1916 err = -1;
1917 goto out;
1918 }
1919
1920 if (evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
1921 usage_with_options(live_usage, live_options);
1922
1923 /*
1924 * perf session
1925 */
1926 kvm->session = perf_session__new(&data, &kvm->tool);
1927 if (IS_ERR(kvm->session)) {
1928 err = PTR_ERR(kvm->session);
1929 goto out;
1930 }
1931 kvm->session->evlist = kvm->evlist;
1932 perf_session__set_id_hdr_size(kvm->session);
1933 ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
1934 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
1935 kvm->evlist->core.threads, true, false, 1);
1936 err = kvm_live_open_events(kvm);
1937 if (err)
1938 goto out;
1939
1940 err = kvm_events_live_report(kvm);
1941
1942 out:
1943 perf_session__delete(kvm->session);
1944 kvm->session = NULL;
1945 evlist__delete(kvm->evlist);
1946
1947 return err;
1948 }
1949 #endif
1950
print_kvm_stat_usage(void)1951 static void print_kvm_stat_usage(void)
1952 {
1953 printf("Usage: perf kvm stat <command>\n\n");
1954
1955 printf("# Available commands:\n");
1956 printf("\trecord: record kvm events\n");
1957 printf("\treport: report statistical data of kvm events\n");
1958 printf("\tlive: live reporting of statistical data of kvm events\n");
1959
1960 printf("\nOtherwise, it is the alias of 'perf stat':\n");
1961 }
1962
kvm_cmd_stat(const char * file_name,int argc,const char ** argv)1963 static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
1964 {
1965 struct perf_kvm_stat kvm = {
1966 .file_name = file_name,
1967
1968 .trace_vcpu = -1,
1969 .report_event = "vmexit",
1970 .sort_key = "sample",
1971
1972 };
1973
1974 if (argc == 1) {
1975 print_kvm_stat_usage();
1976 goto perf_stat;
1977 }
1978
1979 if (strlen(argv[1]) > 2 && strstarts("record", argv[1]))
1980 return kvm_events_record(&kvm, argc - 1, argv + 1);
1981
1982 if (strlen(argv[1]) > 2 && strstarts("report", argv[1]))
1983 return kvm_events_report(&kvm, argc - 1 , argv + 1);
1984
1985 #if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
1986 if (!strncmp(argv[1], "live", 4))
1987 return kvm_events_live(&kvm, argc - 1 , argv + 1);
1988 #endif
1989
1990 perf_stat:
1991 return cmd_stat(argc, argv);
1992 }
1993 #endif /* HAVE_LIBTRACEEVENT */
1994
__cmd_record(const char * file_name,int argc,const char ** argv)1995 static int __cmd_record(const char *file_name, int argc, const char **argv)
1996 {
1997 int rec_argc, i = 0, j, ret;
1998 const char **rec_argv;
1999
2000 /*
2001 * Besides the 2 more options "-o" and "filename",
2002 * kvm_add_default_arch_event() may add 2 extra options,
2003 * so allocate 4 more items.
2004 */
2005 rec_argc = argc + 2 + 2;
2006 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2007 if (!rec_argv)
2008 return -ENOMEM;
2009
2010 rec_argv[i++] = STRDUP_FAIL_EXIT("record");
2011 rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
2012 rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
2013 for (j = 1; j < argc; j++, i++)
2014 rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
2015
2016 BUG_ON(i + 2 != rec_argc);
2017
2018 ret = kvm_add_default_arch_event(EM_HOST, &i, rec_argv);
2019 if (ret)
2020 goto EXIT;
2021
2022 ret = cmd_record(i, rec_argv);
2023
2024 EXIT:
2025 for (i = 0; i < rec_argc; i++)
2026 free((void *)rec_argv[i]);
2027 free(rec_argv);
2028 return ret;
2029 }
2030
__cmd_report(const char * file_name,int argc,const char ** argv)2031 static int __cmd_report(const char *file_name, int argc, const char **argv)
2032 {
2033 int rec_argc, i = 0, j, ret;
2034 const char **rec_argv;
2035
2036 rec_argc = argc + 2;
2037 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2038 if (!rec_argv)
2039 return -ENOMEM;
2040
2041 rec_argv[i++] = STRDUP_FAIL_EXIT("report");
2042 rec_argv[i++] = STRDUP_FAIL_EXIT("-i");
2043 rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
2044 for (j = 1; j < argc; j++, i++)
2045 rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
2046
2047 BUG_ON(i != rec_argc);
2048
2049 ret = cmd_report(i, rec_argv);
2050
2051 EXIT:
2052 for (i = 0; i < rec_argc; i++)
2053 free((void *)rec_argv[i]);
2054 free(rec_argv);
2055 return ret;
2056 }
2057
2058 static int
__cmd_buildid_list(const char * file_name,int argc,const char ** argv)2059 __cmd_buildid_list(const char *file_name, int argc, const char **argv)
2060 {
2061 int rec_argc, i = 0, j, ret;
2062 const char **rec_argv;
2063
2064 rec_argc = argc + 2;
2065 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2066 if (!rec_argv)
2067 return -ENOMEM;
2068
2069 rec_argv[i++] = STRDUP_FAIL_EXIT("buildid-list");
2070 rec_argv[i++] = STRDUP_FAIL_EXIT("-i");
2071 rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
2072 for (j = 1; j < argc; j++, i++)
2073 rec_argv[i] = STRDUP_FAIL_EXIT(argv[j]);
2074
2075 BUG_ON(i != rec_argc);
2076
2077 ret = cmd_buildid_list(i, rec_argv);
2078
2079 EXIT:
2080 for (i = 0; i < rec_argc; i++)
2081 free((void *)rec_argv[i]);
2082 free(rec_argv);
2083 return ret;
2084 }
2085
__cmd_top(int argc,const char ** argv)2086 static int __cmd_top(int argc, const char **argv)
2087 {
2088 int rec_argc, i = 0, ret;
2089 const char **rec_argv;
2090
2091 /*
2092 * kvm_add_default_arch_event() may add 2 extra options, so
2093 * allocate 2 more pointers in adavance.
2094 */
2095 rec_argc = argc + 2;
2096 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2097 if (!rec_argv)
2098 return -ENOMEM;
2099
2100 for (i = 0; i < argc; i++)
2101 rec_argv[i] = STRDUP_FAIL_EXIT(argv[i]);
2102
2103 BUG_ON(i != argc);
2104
2105 ret = kvm_add_default_arch_event(EM_HOST, &i, rec_argv);
2106 if (ret)
2107 goto EXIT;
2108
2109 ret = cmd_top(i, rec_argv);
2110
2111 EXIT:
2112 for (i = 0; i < rec_argc; i++)
2113 free((void *)rec_argv[i]);
2114 free(rec_argv);
2115 return ret;
2116 }
2117
cmd_kvm(int argc,const char ** argv)2118 int cmd_kvm(int argc, const char **argv)
2119 {
2120 const char *file_name = NULL;
2121 const struct option kvm_options[] = {
2122 OPT_STRING('i', "input", &file_name, "file",
2123 "Input file name"),
2124 OPT_STRING('o', "output", &file_name, "file",
2125 "Output file name"),
2126 OPT_BOOLEAN(0, "guest", &perf_guest,
2127 "Collect guest os data"),
2128 OPT_BOOLEAN(0, "host", &perf_host,
2129 "Collect host os data"),
2130 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
2131 "guest mount directory under which every guest os"
2132 " instance has a subdir"),
2133 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
2134 "file", "file saving guest os vmlinux"),
2135 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
2136 "file", "file saving guest os /proc/kallsyms"),
2137 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
2138 "file", "file saving guest os /proc/modules"),
2139 OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
2140 "Guest code can be found in hypervisor process"),
2141 OPT_INCR('v', "verbose", &verbose,
2142 "be more verbose (show counter open errors, etc)"),
2143 OPT_END()
2144 };
2145
2146 const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
2147 "buildid-list", "stat", NULL };
2148 const char *kvm_usage[] = { NULL, NULL };
2149
2150 exclude_GH_default = true;
2151 perf_host = 0;
2152 perf_guest = 1;
2153
2154 argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
2155 PARSE_OPT_STOP_AT_NON_OPTION);
2156 if (!argc)
2157 usage_with_options(kvm_usage, kvm_options);
2158
2159 if (!perf_host)
2160 perf_guest = 1;
2161
2162 if (!file_name) {
2163 file_name = get_filename_for_perf_kvm();
2164
2165 if (!file_name) {
2166 pr_err("Failed to allocate memory for filename\n");
2167 return -ENOMEM;
2168 }
2169 }
2170
2171 if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
2172 return __cmd_record(file_name, argc, argv);
2173 else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
2174 return __cmd_report(file_name, argc, argv);
2175 else if (strlen(argv[0]) > 2 && strstarts("diff", argv[0]))
2176 return cmd_diff(argc, argv);
2177 else if (!strcmp(argv[0], "top"))
2178 return __cmd_top(argc, argv);
2179 else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
2180 return __cmd_buildid_list(file_name, argc, argv);
2181 #if defined(HAVE_LIBTRACEEVENT)
2182 else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0]))
2183 return kvm_cmd_stat(file_name, argc, argv);
2184 #endif
2185 else
2186 usage_with_options(kvm_usage, kvm_options);
2187
2188 /* free usage string allocated by parse_options_subcommand */
2189 free((void *)kvm_usage[0]);
2190
2191 return 0;
2192 }
2193