Lines Matching +full:sample +full:- +full:time

1 // SPDX-License-Identifier: GPL-2.0
3 * builtin-kwork.c
25 #include <subcmd/parse-options.h>
26 #include <traceevent/event-parse.h>
66 if (l->cpu > r->cpu) in id_cmp()
68 if (l->cpu < r->cpu) in id_cmp()
69 return -1; in id_cmp()
71 if (l->id > r->id) in id_cmp()
73 if (l->id < r->id) in id_cmp()
74 return -1; in id_cmp()
81 if (l->nr_atoms > r->nr_atoms) in count_cmp()
83 if (l->nr_atoms < r->nr_atoms) in count_cmp()
84 return -1; in count_cmp()
91 if (l->total_runtime > r->total_runtime) in runtime_cmp()
93 if (l->total_runtime < r->total_runtime) in runtime_cmp()
94 return -1; in runtime_cmp()
101 if (l->max_runtime > r->max_runtime) in max_runtime_cmp()
103 if (l->max_runtime < r->max_runtime) in max_runtime_cmp()
104 return -1; in max_runtime_cmp()
113 if (!r->nr_atoms) in avg_latency_cmp()
115 if (!l->nr_atoms) in avg_latency_cmp()
116 return -1; in avg_latency_cmp()
118 avgl = l->total_latency / l->nr_atoms; in avg_latency_cmp()
119 avgr = r->total_latency / r->nr_atoms; in avg_latency_cmp()
124 return -1; in avg_latency_cmp()
131 if (l->max_latency > r->max_latency) in max_latency_cmp()
133 if (l->max_latency < r->max_latency) in max_latency_cmp()
134 return -1; in max_latency_cmp()
141 if (l->cpu_usage > r->cpu_usage) in cpu_usage_cmp()
143 if (l->cpu_usage < r->cpu_usage) in cpu_usage_cmp()
144 return -1; in cpu_usage_cmp()
151 if (l->id < r->id) in id_or_cpu_r_cmp()
153 if (l->id > r->id) in id_or_cpu_r_cmp()
154 return -1; in id_or_cpu_r_cmp()
156 if (l->id != 0) in id_or_cpu_r_cmp()
159 if (l->cpu < r->cpu) in id_or_cpu_r_cmp()
161 if (l->cpu > r->cpu) in id_or_cpu_r_cmp()
162 return -1; in id_or_cpu_r_cmp()
209 if (kwork->report == KWORK_REPORT_LATENCY) in sort_dimension__add()
213 if (!strcmp(available_sorts[i]->name, tok)) { in sort_dimension__add()
214 list_add_tail(&available_sorts[i]->list, list); in sort_dimension__add()
219 return -1; in sort_dimension__add()
226 char *tmp, *tok, *str = strdup(kwork->sort_order); in setup_sorting()
230 if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0) in setup_sorting()
232 "Unknown --sort key: `%s'", tok); in setup_sorting()
235 pr_debug("Sort order: %s\n", kwork->sort_order); in setup_sorting()
240 struct perf_sample *sample) in atom_new() argument
246 list_for_each_entry(page, &kwork->atom_page_list, list) { in atom_new()
247 if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) { in atom_new()
248 i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE); in atom_new()
250 atom = &page->atoms[i]; in atom_new()
265 atom = &page->atoms[0]; in atom_new()
266 list_add_tail(&page->list, &kwork->atom_page_list); in atom_new()
269 __set_bit(i, page->bitmap); in atom_new()
270 atom->time = sample->time; in atom_new()
271 atom->prev = NULL; in atom_new()
272 atom->page_addr = page; in atom_new()
273 atom->bit_inpage = i; in atom_new()
279 if (atom->prev != NULL) in atom_free()
280 atom_free(atom->prev); in atom_free()
282 __clear_bit(atom->bit_inpage, in atom_free()
283 ((struct kwork_atom_page *)atom->page_addr)->bitmap); in atom_free()
288 list_del(&atom->list); in atom_del()
301 ret = sort->cmp(l, r); in work_cmp()
315 struct rb_node *node = root->rb_root.rb_node; in work_search()
321 node = node->rb_left; in work_search()
323 node = node->rb_right; in work_search()
325 if (work->name == NULL) in work_search()
326 work->name = key->name; in work_search()
339 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; in work_insert()
347 new = &((*new)->rb_left); in work_insert()
349 new = &((*new)->rb_right); in work_insert()
354 rb_link_node(&key->node, parent, new); in work_insert()
355 rb_insert_color_cached(&key->node, root, leftmost); in work_insert()
369 INIT_LIST_HEAD(&work->atom_list[i]); in work_new()
371 work->id = key->id; in work_new()
372 work->cpu = key->cpu; in work_new()
373 work->name = key->name; in work_new()
374 work->class = key->class; in work_new()
395 struct perf_sample *sample) in profile_update_timespan() argument
397 if (!kwork->summary) in profile_update_timespan()
400 if ((kwork->timestart == 0) || (kwork->timestart > sample->time)) in profile_update_timespan()
401 kwork->timestart = sample->time; in profile_update_timespan()
403 if (kwork->timeend < sample->time) in profile_update_timespan()
404 kwork->timeend = sample->time; in profile_update_timespan()
410 if (kwork->profile_name && work->name && in profile_name_match()
411 (strcmp(work->name, kwork->profile_name) != 0)) { in profile_name_match()
420 struct perf_sample *sample) in profile_event_match() argument
422 int cpu = work->cpu; in profile_event_match()
423 u64 time = sample->time; in profile_event_match() local
424 struct perf_time_interval *ptime = &kwork->ptime; in profile_event_match()
426 if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap)) in profile_event_match()
429 if (((ptime->start != 0) && (ptime->start > time)) || in profile_event_match()
430 ((ptime->end != 0) && (ptime->end < time))) in profile_event_match()
437 if ((kwork->report != KWORK_REPORT_TOP) && in profile_event_match()
442 profile_update_timespan(kwork, sample); in profile_event_match()
451 struct perf_sample *sample, in work_push_atom() argument
459 BUG_ON(class->work_init == NULL); in work_push_atom()
460 class->work_init(kwork, class, &key, src_type, evsel, sample, machine); in work_push_atom()
462 atom = atom_new(kwork, sample); in work_push_atom()
464 return -1; in work_push_atom()
466 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); in work_push_atom()
469 return -1; in work_push_atom()
472 if (!profile_event_match(kwork, work, sample)) { in work_push_atom()
478 dst_atom = list_last_entry_or_null(&work->atom_list[dst_type], in work_push_atom()
481 atom->prev = dst_atom; in work_push_atom()
482 list_del(&dst_atom->list); in work_push_atom()
490 last_atom = list_last_entry_or_null(&work->atom_list[src_type], in work_push_atom()
495 kwork->nr_skipped_events[src_type]++; in work_push_atom()
496 kwork->nr_skipped_events[KWORK_TRACE_MAX]++; in work_push_atom()
500 list_add_tail(&atom->list, &work->atom_list[src_type]); in work_push_atom()
510 struct perf_sample *sample, in work_pop_atom() argument
517 BUG_ON(class->work_init == NULL); in work_pop_atom()
518 class->work_init(kwork, class, &key, src_type, evsel, sample, machine); in work_pop_atom()
520 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); in work_pop_atom()
527 if (!profile_event_match(kwork, work, sample)) in work_pop_atom()
530 atom = list_last_entry_or_null(&work->atom_list[dst_type], in work_pop_atom()
535 src_atom = atom_new(kwork, sample); in work_pop_atom()
537 list_add_tail(&src_atom->list, &work->atom_list[src_type]); in work_pop_atom()
555 if ((cpu != -1 && work->id == id && work->cpu == cpu) || in find_work_by_id()
556 (cpu == -1 && work->id == id)) in find_work_by_id()
570 list_for_each_entry(class, &kwork->class_list, list) { in get_kwork_class()
571 if (class->type == type) in get_kwork_class()
580 struct perf_sample *sample) in report_update_exit_event() argument
583 u64 exit_time = sample->time; in report_update_exit_event()
584 u64 entry_time = atom->time; in report_update_exit_event()
587 delta = exit_time - entry_time; in report_update_exit_event()
588 if ((delta > work->max_runtime) || in report_update_exit_event()
589 (work->max_runtime == 0)) { in report_update_exit_event()
590 work->max_runtime = delta; in report_update_exit_event()
591 work->max_runtime_start = entry_time; in report_update_exit_event()
592 work->max_runtime_end = exit_time; in report_update_exit_event()
594 work->total_runtime += delta; in report_update_exit_event()
595 work->nr_atoms++; in report_update_exit_event()
602 struct perf_sample *sample, in report_entry_event() argument
606 KWORK_TRACE_MAX, evsel, sample, in report_entry_event()
613 struct perf_sample *sample, in report_exit_event() argument
620 KWORK_TRACE_ENTRY, evsel, sample, in report_exit_event()
623 return -1; in report_exit_event()
626 report_update_exit_event(work, atom, sample); in report_exit_event()
635 struct perf_sample *sample) in latency_update_entry_event() argument
638 u64 entry_time = sample->time; in latency_update_entry_event()
639 u64 raise_time = atom->time; in latency_update_entry_event()
642 delta = entry_time - raise_time; in latency_update_entry_event()
643 if ((delta > work->max_latency) || in latency_update_entry_event()
644 (work->max_latency == 0)) { in latency_update_entry_event()
645 work->max_latency = delta; in latency_update_entry_event()
646 work->max_latency_start = raise_time; in latency_update_entry_event()
647 work->max_latency_end = entry_time; in latency_update_entry_event()
649 work->total_latency += delta; in latency_update_entry_event()
650 work->nr_atoms++; in latency_update_entry_event()
657 struct perf_sample *sample, in latency_raise_event() argument
661 KWORK_TRACE_MAX, evsel, sample, in latency_raise_event()
668 struct perf_sample *sample, in latency_entry_event() argument
675 KWORK_TRACE_RAISE, evsel, sample, in latency_entry_event()
678 return -1; in latency_entry_event()
681 latency_update_entry_event(work, atom, sample); in latency_entry_event()
689 struct perf_sample *sample, in timehist_save_callchain() argument
698 if (!kwork->show_callchain || sample->callchain == NULL) in timehist_save_callchain()
701 /* want main thread for process - has maps */ in timehist_save_callchain()
702 thread = machine__findnew_thread(machine, sample->pid, sample->pid); in timehist_save_callchain()
704 pr_debug("Failed to get thread for pid %d\n", sample->pid); in timehist_save_callchain()
710 if (thread__resolve_callchain(thread, cursor, evsel, sample, in timehist_save_callchain()
711 NULL, NULL, kwork->max_stack + 2) != 0) { in timehist_save_callchain()
723 sym = node->ms.sym; in timehist_save_callchain()
725 if (!strcmp(sym->name, "__softirqentry_text_start") || in timehist_save_callchain()
726 !strcmp(sym->name, "__do_softirq")) in timehist_save_callchain()
727 sym->ignore = 1; in timehist_save_callchain()
740 struct perf_sample *sample, in timehist_print_event() argument
749 timestamp__scnprintf_usec(atom->time, in timehist_print_event()
756 timestamp__scnprintf_usec(sample->time, in timehist_print_event()
763 printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu); in timehist_print_event()
768 if (work->class && work->class->work_name) { in timehist_print_event()
769 work->class->work_name(work, kwork_name, in timehist_print_event()
771 printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name); in timehist_print_event()
773 printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, ""); in timehist_print_event()
780 (double)(sample->time - atom->time) / NSEC_PER_MSEC); in timehist_print_event()
785 if (atom->prev != NULL) in timehist_print_event()
787 (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC); in timehist_print_event()
794 if (kwork->show_callchain) { in timehist_print_event()
802 sample__fprintf_sym(sample, al, 0, in timehist_print_event()
816 struct perf_sample *sample, in timehist_raise_event() argument
820 KWORK_TRACE_MAX, evsel, sample, in timehist_raise_event()
827 struct perf_sample *sample, in timehist_entry_event() argument
834 KWORK_TRACE_RAISE, evsel, sample, in timehist_entry_event()
840 timehist_save_callchain(kwork, sample, evsel, machine); in timehist_entry_event()
848 struct perf_sample *sample, in timehist_exit_event() argument
857 if (machine__resolve(machine, &al, sample) < 0) { in timehist_exit_event()
859 ret = -1; in timehist_exit_event()
864 KWORK_TRACE_ENTRY, evsel, sample, in timehist_exit_event()
867 ret = -1; in timehist_exit_event()
872 work->nr_atoms++; in timehist_exit_event()
873 timehist_print_event(kwork, work, atom, sample, &al); in timehist_exit_event()
884 struct perf_sample *sample) in top_update_runtime() argument
887 u64 exit_time = sample->time; in top_update_runtime()
888 u64 entry_time = atom->time; in top_update_runtime()
891 delta = exit_time - entry_time; in top_update_runtime()
892 work->total_runtime += delta; in top_update_runtime()
899 struct perf_sample *sample, in top_entry_event() argument
903 KWORK_TRACE_MAX, evsel, sample, in top_entry_event()
910 struct perf_sample *sample, in top_exit_event() argument
918 KWORK_TRACE_ENTRY, evsel, sample, in top_exit_event()
921 return -1; in top_exit_event()
926 sched_work = find_work_by_id(&sched_class->work_root, in top_exit_event()
927 work->id, work->cpu); in top_exit_event()
929 top_update_runtime(work, atom, sample); in top_exit_event()
940 struct perf_sample *sample, in top_sched_switch_event() argument
947 KWORK_TRACE_ENTRY, evsel, sample, in top_sched_switch_event()
950 return -1; in top_sched_switch_event()
953 top_update_runtime(work, atom, sample); in top_sched_switch_event()
957 return top_entry_event(kwork, class, evsel, sample, machine); in top_sched_switch_event()
963 struct perf_sample *sample, in process_irq_handler_entry_event() argument
968 if (kwork->tp_handler->entry_event) in process_irq_handler_entry_event()
969 return kwork->tp_handler->entry_event(kwork, &kwork_irq, in process_irq_handler_entry_event()
970 evsel, sample, machine); in process_irq_handler_entry_event()
976 struct perf_sample *sample, in process_irq_handler_exit_event() argument
981 if (kwork->tp_handler->exit_event) in process_irq_handler_exit_event()
982 return kwork->tp_handler->exit_event(kwork, &kwork_irq, in process_irq_handler_exit_event()
983 evsel, sample, machine); in process_irq_handler_exit_event()
997 return -1; in irq_class_init()
1000 class->work_root = RB_ROOT_CACHED; in irq_class_init()
1009 struct perf_sample *sample, in irq_work_init() argument
1012 work->class = class; in irq_work_init()
1013 work->cpu = sample->cpu; in irq_work_init()
1015 if (kwork->report == KWORK_REPORT_TOP) { in irq_work_init()
1016 work->id = evsel__intval_common(evsel, sample, "common_pid"); in irq_work_init()
1017 work->name = NULL; in irq_work_init()
1019 work->id = evsel__intval(evsel, sample, "irq"); in irq_work_init()
1020 work->name = evsel__strval(evsel, sample, "name"); in irq_work_init()
1026 snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id); in irq_work_name()
1042 struct perf_sample *sample, in process_softirq_raise_event() argument
1047 if (kwork->tp_handler->raise_event) in process_softirq_raise_event()
1048 return kwork->tp_handler->raise_event(kwork, &kwork_softirq, in process_softirq_raise_event()
1049 evsel, sample, machine); in process_softirq_raise_event()
1056 struct perf_sample *sample, in process_softirq_entry_event() argument
1061 if (kwork->tp_handler->entry_event) in process_softirq_entry_event()
1062 return kwork->tp_handler->entry_event(kwork, &kwork_softirq, in process_softirq_entry_event()
1063 evsel, sample, machine); in process_softirq_entry_event()
1070 struct perf_sample *sample, in process_softirq_exit_event() argument
1075 if (kwork->tp_handler->exit_event) in process_softirq_exit_event()
1076 return kwork->tp_handler->exit_event(kwork, &kwork_softirq, in process_softirq_exit_event()
1077 evsel, sample, machine); in process_softirq_exit_event()
1094 return -1; in softirq_class_init()
1097 class->work_root = RB_ROOT_CACHED; in softirq_class_init()
1106 struct tep_print_arg *args = evsel->tp_format->print_fmt.args; in evsel__softirq_name()
1108 if ((args == NULL) || (args->next == NULL)) in evsel__softirq_name()
1111 /* skip softirq field: "REC->vec" */ in evsel__softirq_name()
1112 for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) { in evsel__softirq_name()
1113 if ((eval_flag(sym->value) == (unsigned long long)num) && in evsel__softirq_name()
1114 (strlen(sym->str) != 0)) { in evsel__softirq_name()
1123 name = strdup(sym->str); in evsel__softirq_name()
1136 struct perf_sample *sample, in softirq_work_init() argument
1141 work->class = class; in softirq_work_init()
1142 work->cpu = sample->cpu; in softirq_work_init()
1144 if (kwork->report == KWORK_REPORT_TOP) { in softirq_work_init()
1145 work->id = evsel__intval_common(evsel, sample, "common_pid"); in softirq_work_init()
1146 work->name = NULL; in softirq_work_init()
1148 num = evsel__intval(evsel, sample, "vec"); in softirq_work_init()
1149 work->id = num; in softirq_work_init()
1150 work->name = evsel__softirq_name(evsel, num); in softirq_work_init()
1156 snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id); in softirq_work_name()
1172 struct perf_sample *sample, in process_workqueue_activate_work_event() argument
1177 if (kwork->tp_handler->raise_event) in process_workqueue_activate_work_event()
1178 return kwork->tp_handler->raise_event(kwork, &kwork_workqueue, in process_workqueue_activate_work_event()
1179 evsel, sample, machine); in process_workqueue_activate_work_event()
1186 struct perf_sample *sample, in process_workqueue_execute_start_event() argument
1191 if (kwork->tp_handler->entry_event) in process_workqueue_execute_start_event()
1192 return kwork->tp_handler->entry_event(kwork, &kwork_workqueue, in process_workqueue_execute_start_event()
1193 evsel, sample, machine); in process_workqueue_execute_start_event()
1200 struct perf_sample *sample, in process_workqueue_execute_end_event() argument
1205 if (kwork->tp_handler->exit_event) in process_workqueue_execute_end_event()
1206 return kwork->tp_handler->exit_event(kwork, &kwork_workqueue, in process_workqueue_execute_end_event()
1207 evsel, sample, machine); in process_workqueue_execute_end_event()
1224 return -1; in workqueue_class_init()
1227 class->work_root = RB_ROOT_CACHED; in workqueue_class_init()
1236 struct perf_sample *sample, in workqueue_work_init() argument
1241 sample, "function"); in workqueue_work_init()
1243 work->class = class; in workqueue_work_init()
1244 work->cpu = sample->cpu; in workqueue_work_init()
1245 work->id = evsel__intval(evsel, sample, "work"); in workqueue_work_init()
1246 work->name = function_addr == 0 ? NULL : in workqueue_work_init()
1252 if (work->name != NULL) in workqueue_work_name()
1253 snprintf(buf, len, "(w)%s", work->name); in workqueue_work_name()
1255 snprintf(buf, len, "(w)0x%" PRIx64, work->id); in workqueue_work_name()
1271 struct perf_sample *sample, in process_sched_switch_event() argument
1276 if (kwork->tp_handler->sched_switch_event) in process_sched_switch_event()
1277 return kwork->tp_handler->sched_switch_event(kwork, &kwork_sched, in process_sched_switch_event()
1278 evsel, sample, machine); in process_sched_switch_event()
1292 return -1; in sched_class_init()
1295 class->work_root = RB_ROOT_CACHED; in sched_class_init()
1304 struct perf_sample *sample, in sched_work_init() argument
1307 work->class = class; in sched_work_init()
1308 work->cpu = sample->cpu; in sched_work_init()
1311 work->id = evsel__intval(evsel, sample, "prev_pid"); in sched_work_init()
1312 work->name = strdup(evsel__strval(evsel, sample, "prev_comm")); in sched_work_init()
1314 work->id = evsel__intval(evsel, sample, "next_pid"); in sched_work_init()
1315 work->name = strdup(evsel__strval(evsel, sample, "next_comm")); in sched_work_init()
1321 snprintf(buf, len, "%s", work->name); in sched_work_name()
1358 if (work->class && work->class->work_name) { in report_print_work()
1359 work->class->work_name(work, kwork_name, in report_print_work()
1361 ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name); in report_print_work()
1363 ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, ""); in report_print_work()
1369 ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu); in report_print_work()
1374 if (kwork->report == KWORK_REPORT_RUNTIME) { in report_print_work()
1377 (double)work->total_runtime / NSEC_PER_MSEC); in report_print_work()
1378 } else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay in report_print_work()
1381 (double)work->total_latency / in report_print_work()
1382 work->nr_atoms / NSEC_PER_MSEC); in report_print_work()
1388 ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms); in report_print_work()
1393 if (kwork->report == KWORK_REPORT_RUNTIME) { in report_print_work()
1394 timestamp__scnprintf_usec(work->max_runtime_start, in report_print_work()
1397 timestamp__scnprintf_usec(work->max_runtime_end, in report_print_work()
1402 (double)work->max_runtime / NSEC_PER_MSEC, in report_print_work()
1409 else if (kwork->report == KWORK_REPORT_LATENCY) { in report_print_work()
1410 timestamp__scnprintf_usec(work->max_latency_start, in report_print_work()
1413 timestamp__scnprintf_usec(work->max_latency_end, in report_print_work()
1418 (double)work->max_latency / NSEC_PER_MSEC, in report_print_work()
1432 ret = printf(" %-*s | %-*s |", in report_print_header()
1436 if (kwork->report == KWORK_REPORT_RUNTIME) { in report_print_header()
1437 ret += printf(" %-*s |", in report_print_header()
1439 } else if (kwork->report == KWORK_REPORT_LATENCY) { in report_print_header()
1440 ret += printf(" %-*s |", in report_print_header()
1444 ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count"); in report_print_header()
1446 if (kwork->report == KWORK_REPORT_RUNTIME) { in report_print_header()
1447 ret += printf(" %-*s | %-*s | %-*s |", in report_print_header()
1451 } else if (kwork->report == KWORK_REPORT_LATENCY) { in report_print_header()
1452 ret += printf(" %-*s | %-*s | %-*s |", in report_print_header()
1468 printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n", in timehist_print_header()
1479 printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n", in timehist_print_header()
1501 u64 time = kwork->timeend - kwork->timestart; in print_summary() local
1503 printf(" Total count : %9" PRIu64 "\n", kwork->all_count); in print_summary()
1505 (double)kwork->all_runtime / NSEC_PER_MSEC, in print_summary()
1506 time == 0 ? 0 : (double)kwork->all_runtime / time); in print_summary()
1507 printf(" Total time span (msec) : %9.3f\n", in print_summary()
1508 (double)time / NSEC_PER_MSEC); in print_summary()
1531 if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) && in print_skipped_events()
1532 (kwork->nr_events != 0)) { in print_skipped_events()
1534 (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] / in print_skipped_events()
1535 (double)kwork->nr_events * 100.0, in print_skipped_events()
1536 kwork->nr_skipped_events[KWORK_TRACE_MAX]); in print_skipped_events()
1540 kwork->nr_skipped_events[i], in print_skipped_events()
1542 (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", "); in print_skipped_events()
1548 nr_list_entry(&kwork->atom_page_list)); in print_skipped_events()
1553 if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) { in print_bad_events()
1555 (double)kwork->nr_lost_events / in print_bad_events()
1556 (double)kwork->nr_events * 100.0, in print_bad_events()
1557 kwork->nr_lost_events, kwork->nr_events, in print_bad_events()
1558 kwork->nr_lost_chunks); in print_bad_events()
1568 struct kwork_top_stat *stat = &kwork->top_stat; in top_print_per_cpu_load()
1571 total = stat->cpus_runtime[i].total; in top_print_per_cpu_load()
1572 load = stat->cpus_runtime[i].load; in top_print_per_cpu_load()
1573 if (test_bit(i, stat->all_cpus_bitmap) && total) { in top_print_per_cpu_load()
1578 printf("%%Cpu%-*d[%.*s%.*s %*.*f%%]\n", in top_print_per_cpu_load()
1581 PRINT_CPU_USAGE_HIST_WIDTH - load_width, in top_print_per_cpu_load()
1592 struct kwork_top_stat *stat = &kwork->top_stat; in top_print_cpu_usage()
1593 u64 idle_time = stat->cpus_runtime[MAX_NR_CPUS].idle; in top_print_cpu_usage()
1594 u64 hardirq_time = stat->cpus_runtime[MAX_NR_CPUS].irq; in top_print_cpu_usage()
1595 u64 softirq_time = stat->cpus_runtime[MAX_NR_CPUS].softirq; in top_print_cpu_usage()
1596 int cpus_nr = bitmap_weight(stat->all_cpus_bitmap, MAX_NR_CPUS); in top_print_cpu_usage()
1597 u64 cpus_total_time = stat->cpus_runtime[MAX_NR_CPUS].total; in top_print_cpu_usage()
1622 ret = printf(" %*s %s%*s%s %*s %*s %-*s", in top_print_header()
1625 kwork->use_bpf ? " " : "", in top_print_header()
1626 kwork->use_bpf ? PRINT_PID_WIDTH : 0, in top_print_header()
1627 kwork->use_bpf ? "SPID" : "", in top_print_header()
1628 kwork->use_bpf ? " " : "", in top_print_header()
1646 ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id); in top_print_work()
1651 if (kwork->use_bpf) in top_print_work()
1652 ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid); in top_print_work()
1659 (double)work->cpu_usage / 100); in top_print_work()
1666 (double)work->total_runtime / NSEC_PER_MSEC); in top_print_work()
1671 if (kwork->use_bpf) in top_print_work()
1673 work->is_kthread ? "[" : "", in top_print_work()
1674 work->name, in top_print_work()
1675 work->is_kthread ? "]" : ""); in top_print_work()
1677 ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name); in top_print_work()
1689 pr_debug("Sorting %s ...\n", class->name); in work_sort()
1697 work_insert(&kwork->sorted_work_root, in work_sort()
1698 data, &kwork->sort_list); in work_sort()
1706 list_for_each_entry(class, &kwork->class_list, list) in perf_kwork__sort()
1707 work_sort(kwork, class, &class->work_root); in perf_kwork__sort()
1736 switch (kwork->report) { in perf_kwork__check_config()
1738 kwork->tp_handler = &report_ops; in perf_kwork__check_config()
1741 kwork->tp_handler = &latency_ops; in perf_kwork__check_config()
1744 kwork->tp_handler = &timehist_ops; in perf_kwork__check_config()
1747 kwork->tp_handler = &top_ops; in perf_kwork__check_config()
1750 pr_debug("Invalid report type %d\n", kwork->report); in perf_kwork__check_config()
1751 return -1; in perf_kwork__check_config()
1754 list_for_each_entry(class, &kwork->class_list, list) in perf_kwork__check_config()
1755 if ((class->class_init != NULL) && in perf_kwork__check_config()
1756 (class->class_init(class, session) != 0)) in perf_kwork__check_config()
1757 return -1; in perf_kwork__check_config()
1759 if (kwork->cpu_list != NULL) { in perf_kwork__check_config()
1761 kwork->cpu_list, in perf_kwork__check_config()
1762 kwork->cpu_bitmap); in perf_kwork__check_config()
1765 return -1; in perf_kwork__check_config()
1769 if (kwork->time_str != NULL) { in perf_kwork__check_config()
1770 ret = perf_time__parse_str(&kwork->ptime, kwork->time_str); in perf_kwork__check_config()
1772 pr_err("Invalid time span\n"); in perf_kwork__check_config()
1773 return -1; in perf_kwork__check_config()
1777 list_for_each_entry(evsel, &session->evlist->core.entries, core.node) { in perf_kwork__check_config()
1778 if (kwork->show_callchain && !evsel__has_callchain(evsel)) { in perf_kwork__check_config()
1780 kwork->show_callchain = 0; in perf_kwork__check_config()
1790 int ret = -1; in perf_kwork__read_events()
1796 .force = kwork->force, in perf_kwork__read_events()
1799 session = perf_session__new(&data, &kwork->tool); in perf_kwork__read_events()
1805 symbol__init(&session->header.env); in perf_kwork__read_events()
1810 if (session->tevent.pevent && in perf_kwork__read_events()
1811 tep_set_function_resolver(session->tevent.pevent, in perf_kwork__read_events()
1813 &session->machines.host) < 0) { in perf_kwork__read_events()
1818 if (kwork->report == KWORK_REPORT_TIMEHIST) in perf_kwork__read_events()
1827 kwork->nr_events = session->evlist->stats.nr_events[0]; in perf_kwork__read_events()
1828 kwork->nr_lost_events = session->evlist->stats.total_lost; in perf_kwork__read_events()
1829 kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; in perf_kwork__read_events()
1843 count = nr_list_entry(&work->atom_list[i]); in process_skipped_events()
1844 kwork->nr_skipped_events[i] += count; in process_skipped_events()
1845 kwork->nr_skipped_events[KWORK_TRACE_MAX] += count; in process_skipped_events()
1859 work_insert(&class->work_root, work, &kwork->cmp_id); in perf_kwork_add_work()
1881 return -1; in perf_kwork__report_bpf()
1907 if (kwork->use_bpf) in perf_kwork__report()
1913 return -1; in perf_kwork__report()
1920 next = rb_first_cached(&kwork->sorted_work_root); in perf_kwork__report()
1925 if (work->nr_atoms != 0) { in perf_kwork__report()
1927 if (kwork->summary) { in perf_kwork__report()
1928 kwork->all_runtime += work->total_runtime; in perf_kwork__report()
1929 kwork->all_count += work->nr_atoms; in perf_kwork__report()
1936 if (kwork->summary) { in perf_kwork__report()
1950 struct perf_sample *sample,
1955 struct perf_sample *sample, in perf_kwork__process_tracepoint_sample() argument
1961 if (evsel->handler != NULL) { in perf_kwork__process_tracepoint_sample()
1962 tracepoint_handler f = evsel->handler; in perf_kwork__process_tracepoint_sample()
1964 err = f(tool, evsel, sample, machine); in perf_kwork__process_tracepoint_sample()
1975 kwork->tool.comm = perf_event__process_comm; in perf_kwork__timehist()
1976 kwork->tool.exit = perf_event__process_exit; in perf_kwork__timehist()
1977 kwork->tool.fork = perf_event__process_fork; in perf_kwork__timehist()
1978 kwork->tool.attr = perf_event__process_attr; in perf_kwork__timehist()
1979 kwork->tool.tracing_data = perf_event__process_tracing_data; in perf_kwork__timehist()
1980 kwork->tool.build_id = perf_event__process_build_id; in perf_kwork__timehist()
1981 kwork->tool.ordered_events = true; in perf_kwork__timehist()
1982 kwork->tool.ordering_requires_timestamps = true; in perf_kwork__timehist()
1983 symbol_conf.use_callchain = kwork->show_callchain; in perf_kwork__timehist()
1987 return -1; in perf_kwork__timehist()
2000 struct kwork_top_stat *stat = &kwork->top_stat; in top_calc_total_runtime()
2006 next = rb_first_cached(&class->work_root); in top_calc_total_runtime()
2009 BUG_ON(work->cpu >= MAX_NR_CPUS); in top_calc_total_runtime()
2010 stat->cpus_runtime[work->cpu].total += work->total_runtime; in top_calc_total_runtime()
2011 stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime; in top_calc_total_runtime()
2019 struct kwork_top_stat *stat = &kwork->top_stat; in top_calc_idle_time()
2021 if (work->id == 0) { in top_calc_idle_time()
2022 stat->cpus_runtime[work->cpu].idle += work->total_runtime; in top_calc_idle_time()
2023 stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime; in top_calc_idle_time()
2031 struct kwork_top_stat *stat = &kwork->top_stat; in top_calc_irq_runtime()
2034 stat->cpus_runtime[work->cpu].irq += work->total_runtime; in top_calc_irq_runtime()
2035 stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime; in top_calc_irq_runtime()
2037 stat->cpus_runtime[work->cpu].softirq += work->total_runtime; in top_calc_irq_runtime()
2038 stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime; in top_calc_irq_runtime()
2055 data = find_work_by_id(&class->work_root, in top_subtract_irq_runtime()
2056 work->id, work->cpu); in top_subtract_irq_runtime()
2060 if (work->total_runtime > data->total_runtime) { in top_subtract_irq_runtime()
2061 work->total_runtime -= data->total_runtime; in top_subtract_irq_runtime()
2072 struct kwork_top_stat *stat = &kwork->top_stat; in top_calc_cpu_usage()
2078 next = rb_first_cached(&class->work_root); in top_calc_cpu_usage()
2082 if (work->total_runtime == 0) in top_calc_cpu_usage()
2085 __set_bit(work->cpu, stat->all_cpus_bitmap); in top_calc_cpu_usage()
2089 work->cpu_usage = work->total_runtime * 10000 / in top_calc_cpu_usage()
2090 stat->cpus_runtime[work->cpu].total; in top_calc_cpu_usage()
2101 struct kwork_top_stat *stat = &kwork->top_stat; in top_calc_load_runtime()
2103 if (work->id != 0) { in top_calc_load_runtime()
2104 stat->cpus_runtime[work->cpu].load += work->total_runtime; in top_calc_load_runtime()
2105 stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime; in top_calc_load_runtime()
2122 node = rb_first_cached(&class->work_root); in top_merge_tasks()
2126 rb_erase_cached(node, &class->work_root); in top_merge_tasks()
2132 cpu = data->cpu; in top_merge_tasks()
2133 merged_work = find_work_by_id(&merged_root, data->id, in top_merge_tasks()
2134 data->id == 0 ? cpu : -1); in top_merge_tasks()
2136 work_insert(&merged_root, data, &kwork->cmp_id); in top_merge_tasks()
2138 merged_work->total_runtime += data->total_runtime; in top_merge_tasks()
2139 merged_work->cpu_usage += data->cpu_usage; in top_merge_tasks()
2157 next = rb_first_cached(&kwork->sorted_work_root); in perf_kwork__top_report()
2162 if (work->total_runtime == 0) in perf_kwork__top_report()
2183 return -1; in perf_kwork__top_bpf()
2211 return -1; in perf_kwork__top()
2213 kwork->top_stat.cpus_runtime = cpus_runtime; in perf_kwork__top()
2214 bitmap_zero(kwork->top_stat.all_cpus_bitmap, MAX_NR_CPUS); in perf_kwork__top()
2216 if (kwork->use_bpf) in perf_kwork__top()
2233 zfree(&kwork->top_stat.cpus_runtime); in perf_kwork__top()
2248 if (kwork->event_list_str == NULL) in setup_event_list()
2249 kwork->event_list_str = "irq, softirq, workqueue"; in setup_event_list()
2251 str = strdup(kwork->event_list_str); in setup_event_list()
2256 if (strcmp(tok, class->name) == 0) { in setup_event_list()
2257 list_add_tail(&class->list, &kwork->class_list); in setup_event_list()
2263 "Unknown --event key: `%s'", tok); in setup_event_list()
2269 list_for_each_entry(class, &kwork->class_list, list) in setup_event_list()
2270 pr_debug(" %s", class->name); in setup_event_list()
2283 "-a", in perf_kwork__record()
2284 "-R", in perf_kwork__record()
2285 "-m", "1024", in perf_kwork__record()
2286 "-c", "1", in perf_kwork__record()
2289 rec_argc = ARRAY_SIZE(record_args) + argc - 1; in perf_kwork__record()
2291 list_for_each_entry(class, &kwork->class_list, list) in perf_kwork__record()
2292 rec_argc += 2 * class->nr_tracepoints; in perf_kwork__record()
2296 return -ENOMEM; in perf_kwork__record()
2301 list_for_each_entry(class, &kwork->class_list, list) { in perf_kwork__record()
2302 for (j = 0; j < class->nr_tracepoints; j++) { in perf_kwork__record()
2303 rec_argv[i++] = strdup("-e"); in perf_kwork__record()
2304 rec_argv[i++] = strdup(class->tp_handlers[j].name); in perf_kwork__record()
2354 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, in cmd_kwork()
2368 OPT_STRING(0, "time", &kwork.time_str, "str", in cmd_kwork()
2369 "Time span for analysis (start,stop)"), in cmd_kwork()
2372 OPT_BOOLEAN('S', "with-summary", &kwork.summary, in cmd_kwork()
2375 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf, in cmd_kwork()
2387 OPT_STRING(0, "time", &kwork.time_str, "str", in cmd_kwork()
2388 "Time span for analysis (start,stop)"), in cmd_kwork()
2392 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf, in cmd_kwork()
2402 OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain, in cmd_kwork()
2404 OPT_UINTEGER(0, "max-stack", &kwork.max_stack, in cmd_kwork()
2408 OPT_STRING(0, "time", &kwork.time_str, "str", in cmd_kwork()
2409 "Time span for analysis (start,stop)"), in cmd_kwork()
2425 OPT_STRING(0, "time", &kwork.time_str, "str", in cmd_kwork()
2426 "Time span for analysis (start,stop)"), in cmd_kwork()
2430 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf, in cmd_kwork()
2462 kwork.tool.sample = perf_kwork__process_tracepoint_sample; in cmd_kwork()