Lines Matching refs:work
314 struct kwork_work *work;
318 work = container_of(node, struct kwork_work, node);
319 cmp = work_cmp(sort_list, key, work);
325 if (work->name == NULL)
326 work->name = key->name;
327 return work;
361 struct kwork_work *work = zalloc(sizeof(*work));
363 if (work == NULL) {
364 pr_err("Failed to zalloc kwork work\n");
369 INIT_LIST_HEAD(&work->atom_list[i]);
371 work->id = key->id;
372 work->cpu = key->cpu;
373 work->name = key->name;
374 work->class = key->class;
375 return work;
382 struct kwork_work *work = work_search(root, key, sort_list);
384 if (work != NULL)
385 return work;
387 work = work_new(key);
388 if (work)
389 work_insert(root, work, sort_list);
391 return work;
408 struct kwork_work *work)
410 if (kwork->profile_name && work->name &&
411 (strcmp(work->name, kwork->profile_name) != 0)) {
419 struct kwork_work *work,
422 int cpu = work->cpu;
438 !profile_name_match(kwork, work)) {
457 struct kwork_work *work, key;
466 work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
467 if (work == NULL) {
472 if (!profile_event_match(kwork, work, sample)) {
478 dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
487 *ret_work = work;
490 last_atom = list_last_entry_or_null(&work->atom_list[src_type],
500 list_add_tail(&atom->list, &work->atom_list[src_type]);
515 struct kwork_work *work, key;
520 work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
522 *ret_work = work;
524 if (work == NULL)
527 if (!profile_event_match(kwork, work, sample))
530 atom = list_last_entry_or_null(&work->atom_list[dst_type],
537 list_add_tail(&src_atom->list, &work->atom_list[src_type]);
550 struct kwork_work *work;
554 work = rb_entry(next, struct kwork_work, node);
555 if ((cpu != -1 && work->id == id && work->cpu == cpu) ||
556 (cpu == -1 && work->id == id))
557 return work;
578 static void report_update_exit_event(struct kwork_work *work,
588 if ((delta > work->max_runtime) ||
589 (work->max_runtime == 0)) {
590 work->max_runtime = delta;
591 work->max_runtime_start = entry_time;
592 work->max_runtime_end = exit_time;
594 work->total_runtime += delta;
595 work->nr_atoms++;
617 struct kwork_work *work = NULL;
621 machine, &work);
622 if (work == NULL)
626 report_update_exit_event(work, atom, sample);
633 static void latency_update_entry_event(struct kwork_work *work,
643 if ((delta > work->max_latency) ||
644 (work->max_latency == 0)) {
645 work->max_latency = delta;
646 work->max_latency_start = raise_time;
647 work->max_latency_end = entry_time;
649 work->total_latency += delta;
650 work->nr_atoms++;
672 struct kwork_work *work = NULL;
676 machine, &work);
677 if (work == NULL)
681 latency_update_entry_event(work, atom, sample);
738 struct kwork_work *work,
763 printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
768 if (work->class && work->class->work_name) {
769 work->class->work_name(work, kwork_name,
831 struct kwork_work *work = NULL;
835 machine, &work, true);
839 if (work != NULL)
852 struct kwork_work *work = NULL;
865 machine, &work);
866 if (work == NULL) {
872 work->nr_atoms++;
873 timehist_print_event(kwork, work, atom, sample, &al);
882 static void top_update_runtime(struct kwork_work *work,
892 work->total_runtime += delta;
913 struct kwork_work *work, *sched_work;
919 machine, &work);
920 if (!work)
927 work->id, work->cpu);
929 top_update_runtime(work, atom, sample);
944 struct kwork_work *work;
948 machine, &work);
949 if (!work)
953 top_update_runtime(work, atom, sample);
1006 struct kwork_work *work,
1012 work->class = class;
1013 work->cpu = sample->cpu;
1016 work->id = evsel__intval_common(evsel, sample, "common_pid");
1017 work->name = NULL;
1019 work->id = evsel__intval(evsel, sample, "irq");
1020 work->name = evsel__strval(evsel, sample, "name");
1024 static void irq_work_name(struct kwork_work *work, char *buf, int len)
1026 snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
1133 struct kwork_work *work,
1141 work->class = class;
1142 work->cpu = sample->cpu;
1145 work->id = evsel__intval_common(evsel, sample, "common_pid");
1146 work->name = NULL;
1149 work->id = num;
1150 work->name = evsel__softirq_name(evsel, num);
1154 static void softirq_work_name(struct kwork_work *work, char *buf, int len)
1156 snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
1233 struct kwork_work *work,
1243 work->class = class;
1244 work->cpu = sample->cpu;
1245 work->id = evsel__intval(evsel, sample, "work");
1246 work->name = function_addr == 0 ? NULL :
1250 static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1252 if (work->name != NULL)
1253 snprintf(buf, len, "(w)%s", work->name);
1255 snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1301 struct kwork_work *work,
1307 work->class = class;
1308 work->cpu = sample->cpu;
1311 work->id = evsel__intval(evsel, sample, "prev_pid");
1312 work->name = strdup(evsel__strval(evsel, sample, "prev_comm"));
1314 work->id = evsel__intval(evsel, sample, "next_pid");
1315 work->name = strdup(evsel__strval(evsel, sample, "next_comm"));
1319 static void sched_work_name(struct kwork_work *work, char *buf, int len)
1321 snprintf(buf, len, "%s", work->name);
1346 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1358 if (work->class && work->class->work_name) {
1359 work->class->work_name(work, kwork_name,
1369 ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1377 (double)work->total_runtime / NSEC_PER_MSEC);
1381 (double)work->total_latency /
1382 work->nr_atoms / NSEC_PER_MSEC);
1388 ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1394 timestamp__scnprintf_usec(work->max_runtime_start,
1397 timestamp__scnprintf_usec(work->max_runtime_end,
1402 (double)work->max_runtime / NSEC_PER_MSEC,
1410 timestamp__scnprintf_usec(work->max_latency_start,
1413 timestamp__scnprintf_usec(work->max_latency_end,
1418 (double)work->max_latency / NSEC_PER_MSEC,
1637 static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work)
1646 ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id);
1652 ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid);
1659 (double)work->cpu_usage / 100);
1666 (double)work->total_runtime / NSEC_PER_MSEC);
1673 work->is_kthread ? "[" : "",
1674 work->name,
1675 work->is_kthread ? "]" : "");
1677 ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name);
1837 struct kwork_work *work)
1843 count = nr_list_entry(&work->atom_list[i]);
1853 struct kwork_work *work = NULL;
1855 work = work_new(key);
1856 if (work == NULL)
1859 work_insert(&class->work_root, work, &kwork->cmp_id);
1860 return work;
1905 struct kwork_work *work;
1922 work = rb_entry(next, struct kwork_work, node);
1923 process_skipped_events(kwork, work);
1925 if (work->nr_atoms != 0) {
1926 report_print_work(kwork, work);
1928 kwork->all_runtime += work->total_runtime;
1929 kwork->all_count += work->nr_atoms;
1998 struct kwork_work *work;
2008 work = rb_entry(next, struct kwork_work, node);
2009 BUG_ON(work->cpu >= MAX_NR_CPUS);
2010 stat->cpus_runtime[work->cpu].total += work->total_runtime;
2011 stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime;
2017 struct kwork_work *work)
2021 if (work->id == 0) {
2022 stat->cpus_runtime[work->cpu].idle += work->total_runtime;
2023 stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime;
2029 struct kwork_work *work)
2034 stat->cpus_runtime[work->cpu].irq += work->total_runtime;
2035 stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime;
2037 stat->cpus_runtime[work->cpu].softirq += work->total_runtime;
2038 stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime;
2043 struct kwork_work *work)
2056 work->id, work->cpu);
2060 if (work->total_runtime > data->total_runtime) {
2061 work->total_runtime -= data->total_runtime;
2070 struct kwork_work *work;
2080 work = rb_entry(next, struct kwork_work, node);
2082 if (work->total_runtime == 0)
2085 __set_bit(work->cpu, stat->all_cpus_bitmap);
2087 top_subtract_irq_runtime(kwork, work);
2089 work->cpu_usage = work->total_runtime * 10000 /
2090 stat->cpus_runtime[work->cpu].total;
2092 top_calc_idle_time(kwork, work);
2099 struct kwork_work *work)
2103 if (work->id != 0) {
2104 stat->cpus_runtime[work->cpu].load += work->total_runtime;
2105 stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime;
2150 struct kwork_work *work;
2159 work = rb_entry(next, struct kwork_work, node);
2160 process_skipped_events(kwork, work);
2162 if (work->total_runtime == 0)
2165 top_print_work(kwork, work);