Lines Matching refs:top

3  * builtin-top.c
5 * Builtin top command: Display a continuously updated profile of
40 #include "util/top.h"
95 static void perf_top__update_print_entries(struct perf_top *top)
97 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
105 static void perf_top__resize(struct perf_top *top)
107 get_term_dimensions(&top->winsize);
108 perf_top__update_print_entries(top);
111 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
142 if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
152 top->sym_filter_entry = he;
197 static void perf_top__record_precise_ip(struct perf_top *top,
208 (top->sym_filter_entry == NULL ||
209 top->sym_filter_entry->ms.sym != sym)))
240 static void perf_top__show_details(struct perf_top *top)
242 struct hist_entry *he = top->sym_filter_entry;
263 printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
266 more = symbol__annotate_printf(&he->ms, top->sym_evsel);
268 if (top->evlist->enabled) {
269 if (top->zero)
270 symbol__annotate_zero_histogram(symbol, top->sym_evsel);
272 symbol__annotate_decay_histogram(symbol, top->sym_evsel);
319 static void perf_top__print_sym_table(struct perf_top *top)
323 const int win_width = top->winsize.ws_col - 1;
324 struct evsel *evsel = top->sym_evsel;
329 perf_top__header_snprintf(top, bf, sizeof(bf));
334 if (!top->record_opts.overwrite &&
335 (top->evlist->stats.nr_lost_warned !=
336 top->evlist->stats.nr_events[PERF_RECORD_LOST])) {
337 top->evlist->stats.nr_lost_warned =
338 top->evlist->stats.nr_events[PERF_RECORD_LOST];
341 top->evlist->stats.nr_lost_warned);
345 if (top->sym_filter_entry) {
346 perf_top__show_details(top);
350 perf_top__resort_hists(top);
352 hists__output_recalc_col_len(hists, top->print_entries - printed);
354 hists__fprintf(hists, false, top->print_entries - printed, win_width,
355 top->min_percent, stdout, !symbol_conf.use_callchain);
393 static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
396 struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
397 struct hists *hists = evsel__hists(top->sym_evsel);
404 top->sym_filter_entry = NULL;
429 perf_top__parse_source(top, found);
435 static void perf_top__print_mapped_keys(struct perf_top *top)
439 if (top->sym_filter_entry) {
440 struct symbol *sym = top->sym_filter_entry->ms.sym;
445 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
446 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
448 if (top->evlist->core.nr_entries > 1)
449 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", evsel__name(top->sym_evsel));
451 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
459 top->hide_kernel_symbols ? "yes" : "no");
462 top->hide_user_symbols ? "yes" : "no");
463 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
467 static int perf_top__key_mapped(struct perf_top *top, int c)
483 return top->evlist->core.nr_entries > 1 ? 1 : 0;
491 static bool perf_top__handle_keypress(struct perf_top *top, int c)
495 if (!perf_top__key_mapped(top, c)) {
499 perf_top__print_mapped_keys(top);
509 if (!perf_top__key_mapped(top, c))
515 prompt_integer(&top->delay_secs, "Enter display delay");
516 if (top->delay_secs < 1)
517 top->delay_secs = 1;
520 prompt_integer(&top->print_entries, "Enter display entries (lines)");
521 if (top->print_entries == 0) {
522 perf_top__resize(top);
529 if (top->evlist->core.nr_entries > 1) {
535 evlist__for_each_entry(top->evlist, top->sym_evsel)
536 fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel));
540 if (counter >= top->evlist->core.nr_entries) {
541 top->sym_evsel = evlist__first(top->evlist);
542 fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel));
546 evlist__for_each_entry(top->evlist, top->sym_evsel)
547 if (top->sym_evsel->core.idx == counter)
550 top->sym_evsel = evlist__first(top->evlist);
553 prompt_integer(&top->count_filter, "Enter display event count filter");
560 top->hide_kernel_symbols = !top->hide_kernel_symbols;
565 if (top->dump_symtab)
566 perf_session__fprintf_dsos(top->session, stderr);
570 perf_top__prompt_symbol(top, "Enter details symbol");
573 if (!top->sym_filter_entry)
576 struct hist_entry *syme = top->sym_filter_entry;
578 top->sym_filter_entry = NULL;
583 top->hide_user_symbols = !top->hide_user_symbols;
586 top->zero = !top->zero;
617 struct perf_top *top = arg;
618 const char *help = "For a higher level overview, try: perf top --sort comm,dso";
621 .arg = top,
622 .refresh = top->delay_secs,
633 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
636 perf_top__sort_new_samples(top);
643 evlist__for_each_entry(top->evlist, pos) {
645 hists->uid_filter_str = top->record_opts.target.uid_str;
648 ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
649 &top->session->header.env, !top->record_opts.overwrite);
651 top->zero = true;
677 struct perf_top *top = arg;
687 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
692 delay_msecs = top->delay_secs * MSEC_PER_SEC;
700 perf_top__print_sym_table(top);
716 if (perf_top__handle_keypress(top, c))
731 struct perf_top *top = arg;
735 perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr);
738 !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
749 struct perf_top *top = container_of(tool, struct perf_top, tool);
768 top->session->evlist->stats.nr_unprocessable_samples++);
773 top->exact_samples++;
779 if (top->stitch_lbr)
785 if (!evlist__exclude_kernel(top->session->evlist)) {
811 if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
826 top->vmlinux_warned = true;
845 if (hist_entry_iter__add(&iter, &al, top->max_stack, top) < 0)
856 perf_top__process_lost(struct perf_top *top, union perf_event *event,
859 top->lost += event->lost.lost;
860 top->lost_total += event->lost.lost;
865 perf_top__process_lost_samples(struct perf_top *top,
869 top->lost += event->lost_samples.lost;
870 top->lost_total += event->lost_samples.lost;
876 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
878 struct record_opts *opts = &top->record_opts;
879 struct evlist *evlist = top->evlist;
894 ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0, NULL);
900 if (top->qe.rotate) {
901 mutex_lock(&top->qe.mutex);
902 top->qe.rotate = false;
903 cond_signal(&top->qe.cond);
904 mutex_unlock(&top->qe.mutex);
911 static void perf_top__mmap_read(struct perf_top *top)
913 bool overwrite = top->record_opts.overwrite;
914 struct evlist *evlist = top->evlist;
920 for (i = 0; i < top->evlist->core.nr_mmaps; i++)
921 perf_top__mmap_read_idx(top, i);
931 * perf top should support consistent term for all events.
946 static int perf_top__overwrite_check(struct perf_top *top)
948 struct record_opts *opts = &top->record_opts;
949 struct evlist *evlist = top->evlist;
991 static int perf_top_overwrite_fallback(struct perf_top *top,
994 struct record_opts *opts = &top->record_opts;
995 struct evlist *evlist = top->evlist;
1012 static int perf_top__start_counters(struct perf_top *top)
1016 struct evlist *evlist = top->evlist;
1017 struct record_opts *opts = &top->record_opts;
1019 if (perf_top__overwrite_check(top)) {
1020 ui__error("perf top only support consistent per-event "
1034 * Because perf top is the only tool which has
1043 perf_top_overwrite_fallback(top, counter))
1089 static struct ordered_events *rotate_queues(struct perf_top *top)
1091 struct ordered_events *in = top->qe.in;
1093 if (top->qe.in == &top->qe.data[1])
1094 top->qe.in = &top->qe.data[0];
1096 top->qe.in = &top->qe.data[1];
1103 struct perf_top *top = arg;
1106 struct ordered_events *out, *in = top->qe.in;
1113 out = rotate_queues(top);
1115 mutex_lock(&top->qe.mutex);
1116 top->qe.rotate = true;
1117 cond_wait(&top->qe.cond, &top->qe.mutex);
1118 mutex_unlock(&top->qe.mutex);
1128 * Allow only 'top->delay_secs' seconds behind samples.
1130 static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1138 delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1145 struct perf_top *top = qe->data;
1146 struct evlist *evlist = top->evlist;
1147 struct perf_session *session = top->session;
1154 if (should_drop(qevent, top)) {
1155 top->drop++;
1156 top->drop_total++;
1170 if (evswitch__discard(&top->evswitch, evsel))
1172 ++top->samples;
1177 ++top->us_samples;
1178 if (top->hide_user_symbols)
1183 ++top->kernel_samples;
1184 if (top->hide_kernel_symbols)
1189 ++top->guest_kernel_samples;
1194 ++top->guest_us_samples;
1208 perf_event__process_sample(&top->tool, event, evsel,
1211 perf_top__process_lost(top, event, evsel);
1213 perf_top__process_lost_samples(top, event, evsel);
1225 static void init_process_thread(struct perf_top *top)
1227 ordered_events__init(&top->qe.data[0], deliver_event, top);
1228 ordered_events__init(&top->qe.data[1], deliver_event, top);
1229 ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1230 ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1231 top->qe.in = &top->qe.data[0];
1232 mutex_init(&top->qe.mutex);
1233 cond_init(&top->qe.cond);
1236 static void exit_process_thread(struct perf_top *top)
1238 ordered_events__free(&top->qe.data[0]);
1239 ordered_events__free(&top->qe.data[1]);
1240 mutex_destroy(&top->qe.mutex);
1241 cond_destroy(&top->qe.cond);
1244 static int __cmd_top(struct perf_top *top)
1246 struct record_opts *opts = &top->record_opts;
1251 ret = perf_env__lookup_objdump(&top->session->header.env,
1261 if (perf_session__register_idle_thread(top->session) < 0)
1264 if (top->nr_threads_synthesize > 1)
1267 init_process_thread(top);
1270 top->tool.namespace_events = true;
1273 top->tool.cgroup_events = true;
1280 ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
1281 &top->session->machines.host,
1282 &top->record_opts);
1286 ret = perf_event__synthesize_cgroups(&top->tool, perf_event__process,
1287 &top->session->machines.host);
1291 machine__synthesize_threads(&top->session->machines.host, &opts->target,
1292 top->evlist->core.threads, true, false,
1293 top->nr_threads_synthesize);
1308 evlist__uniquify_name(top->evlist);
1309 ret = perf_top__start_counters(top);
1313 top->session->evlist = top->evlist;
1314 perf_session__set_id_hdr_size(top->session);
1321 * XXX 'top' still doesn't start workloads like record, trace, but should,
1325 evlist__enable(top->evlist);
1328 if (pthread_create(&thread_process, NULL, process_thread, top)) {
1334 display_thread), top)) {
1339 if (top->realtime_prio) {
1342 param.sched_priority = top->realtime_prio;
1350 evlist__poll(top->evlist, 100);
1352 perf_top__mmap_read(top);
1355 u64 hits = top->samples;
1357 perf_top__mmap_read(top);
1359 if (opts->overwrite || (hits == top->samples))
1360 ret = evlist__poll(top->evlist, 100);
1363 perf_top__resize(top);
1372 cond_signal(&top->qe.cond);
1375 exit_process_thread(top);
1408 if (!strcmp(var, "top.call-graph")) {
1412 if (!strcmp(var, "top.children")) {
1424 struct perf_top *top = opt->value;
1426 top->min_percent = strtof(arg, NULL);
1436 struct perf_top top = {
1462 .evlistp = &top.evlist,
1465 struct record_opts *opts = &top.record_opts;
1472 OPT_CALLBACK(0, "filter", &top.evlist, "filter",
1489 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1493 OPT_INTEGER('r', "realtime", &top.realtime_prio,
1495 OPT_INTEGER('d', "delay", &top.delay_secs,
1497 OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1499 OPT_INTEGER('f', "count-filter", &top.count_filter,
1503 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1505 OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
1506 OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
1509 OPT_INTEGER('E', "entries", &top.print_entries,
1511 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1514 OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
1516 OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1534 OPT_INTEGER(0, "max-stack", &top.max_stack,
1554 OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
1566 OPT_CALLBACK(0, "percent-limit", &top, "percent",
1587 OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
1590 OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1592 OPT_CALLBACK('G', "cgroup", &top.evlist, "name",
1602 OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
1605 OPT_CALLBACK(0, "pfm-events", &top.evlist, "event",
1609 OPTS_EVSWITCH(&top.evswitch),
1613 "perf top [<options>]",
1626 top.evlist = evlist__new();
1627 if (top.evlist == NULL)
1630 status = perf_config(perf_top_config, &top);
1647 top.evlist->env = &perf_env;
1676 if (!top.evlist->core.nr_entries) {
1678 int err = parse_event(top.evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
1684 status = evswitch__init(&top.evswitch, top.evlist, stderr);
1701 if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
1732 if (top.use_stdio)
1735 else if (top.use_tui)
1741 if (setup_sorting(top.evlist) < 0) {
1770 if (evlist__create_maps(top.evlist, target) < 0) {
1777 if (top.delay_secs < 1)
1778 top.delay_secs = 1;
1785 top.sym_evsel = evlist__first(top.evlist);
1808 get_term_dimensions(&top.winsize);
1809 if (top.print_entries == 0) {
1810 perf_top__update_print_entries(&top);
1814 top.session = perf_session__new(NULL, NULL);
1815 if (IS_ERR(top.session)) {
1816 status = PTR_ERR(top.session);
1817 top.session = NULL;
1822 if (!top.record_opts.no_bpf_event) {
1823 top.sb_evlist = evlist__new();
1825 if (top.sb_evlist == NULL) {
1831 if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
1839 if (evlist__start_sb_thread(top.sb_evlist, target)) {
1844 status = __cmd_top(&top);
1847 evlist__stop_sb_thread(top.sb_evlist);
1850 evlist__delete(top.evlist);
1851 perf_session__delete(top.session);