1daf07d22SYang Jihong // SPDX-License-Identifier: GPL-2.0 2daf07d22SYang Jihong /* 3daf07d22SYang Jihong * bpf_kwork.c 4daf07d22SYang Jihong * 5daf07d22SYang Jihong * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com> 6daf07d22SYang Jihong */ 7daf07d22SYang Jihong 8daf07d22SYang Jihong #include <time.h> 9daf07d22SYang Jihong #include <fcntl.h> 10628d6999SArnaldo Carvalho de Melo #include <signal.h> 11daf07d22SYang Jihong #include <stdio.h> 12daf07d22SYang Jihong #include <unistd.h> 13daf07d22SYang Jihong 14daf07d22SYang Jihong #include <linux/time64.h> 15daf07d22SYang Jihong 16daf07d22SYang Jihong #include "util/debug.h" 17628d6999SArnaldo Carvalho de Melo #include "util/evsel.h" 18daf07d22SYang Jihong #include "util/kwork.h" 19daf07d22SYang Jihong 20daf07d22SYang Jihong #include <bpf/bpf.h> 21628d6999SArnaldo Carvalho de Melo #include <perf/cpumap.h> 22daf07d22SYang Jihong 23daf07d22SYang Jihong #include "util/bpf_skel/kwork_trace.skel.h" 24daf07d22SYang Jihong 25daf07d22SYang Jihong /* 26daf07d22SYang Jihong * This should be in sync with "util/kwork_trace.bpf.c" 27daf07d22SYang Jihong */ 28daf07d22SYang Jihong #define MAX_KWORKNAME 128 29daf07d22SYang Jihong 30daf07d22SYang Jihong struct work_key { 31daf07d22SYang Jihong u32 type; 32daf07d22SYang Jihong u32 cpu; 33daf07d22SYang Jihong u64 id; 34daf07d22SYang Jihong }; 35daf07d22SYang Jihong 36daf07d22SYang Jihong struct report_data { 37daf07d22SYang Jihong u64 nr; 38daf07d22SYang Jihong u64 total_time; 39daf07d22SYang Jihong u64 max_time; 40daf07d22SYang Jihong u64 max_time_start; 41daf07d22SYang Jihong u64 max_time_end; 42daf07d22SYang Jihong }; 43daf07d22SYang Jihong 44daf07d22SYang Jihong struct kwork_class_bpf { 45daf07d22SYang Jihong struct kwork_class *class; 46daf07d22SYang Jihong 47daf07d22SYang Jihong void (*load_prepare)(struct perf_kwork *kwork); 48daf07d22SYang Jihong int (*get_work_name)(struct work_key *key, char **ret_name); 49daf07d22SYang Jihong }; 50daf07d22SYang Jihong 51daf07d22SYang Jihong static struct kwork_trace_bpf *skel; 52daf07d22SYang Jihong 53daf07d22SYang Jihong static struct timespec ts_start; 54daf07d22SYang Jihong static struct timespec ts_end; 55daf07d22SYang Jihong 56daf07d22SYang Jihong void perf_kwork__trace_start(void) 57daf07d22SYang Jihong { 58daf07d22SYang Jihong clock_gettime(CLOCK_MONOTONIC, &ts_start); 59daf07d22SYang Jihong skel->bss->enabled = 1; 60daf07d22SYang Jihong } 61daf07d22SYang Jihong 62daf07d22SYang Jihong void perf_kwork__trace_finish(void) 63daf07d22SYang Jihong { 64daf07d22SYang Jihong clock_gettime(CLOCK_MONOTONIC, &ts_end); 65daf07d22SYang Jihong skel->bss->enabled = 0; 66daf07d22SYang Jihong } 67daf07d22SYang Jihong 68420298aeSYang Jihong static int get_work_name_from_map(struct work_key *key, char **ret_name) 69420298aeSYang Jihong { 70420298aeSYang Jihong char name[MAX_KWORKNAME] = { 0 }; 71420298aeSYang Jihong int fd = bpf_map__fd(skel->maps.perf_kwork_names); 72420298aeSYang Jihong 73420298aeSYang Jihong *ret_name = NULL; 74420298aeSYang Jihong 75420298aeSYang Jihong if (fd < 0) { 76420298aeSYang Jihong pr_debug("Invalid names map fd\n"); 77420298aeSYang Jihong return 0; 78420298aeSYang Jihong } 79420298aeSYang Jihong 80420298aeSYang Jihong if ((bpf_map_lookup_elem(fd, key, name) == 0) && (strlen(name) != 0)) { 81420298aeSYang Jihong *ret_name = strdup(name); 82420298aeSYang Jihong if (*ret_name == NULL) { 83420298aeSYang Jihong pr_err("Failed to copy work name\n"); 84420298aeSYang Jihong return -1; 85420298aeSYang Jihong } 86420298aeSYang Jihong } 87420298aeSYang Jihong 88420298aeSYang Jihong return 0; 89420298aeSYang Jihong } 90420298aeSYang Jihong 91420298aeSYang Jihong static void irq_load_prepare(struct perf_kwork *kwork) 92420298aeSYang Jihong { 93420298aeSYang Jihong if (kwork->report == KWORK_REPORT_RUNTIME) { 94420298aeSYang Jihong bpf_program__set_autoload(skel->progs.report_irq_handler_entry, true); 95420298aeSYang Jihong bpf_program__set_autoload(skel->progs.report_irq_handler_exit, true); 96420298aeSYang Jihong } 97420298aeSYang Jihong } 98420298aeSYang Jihong 99420298aeSYang Jihong static struct kwork_class_bpf kwork_irq_bpf = { 100420298aeSYang Jihong .load_prepare = irq_load_prepare, 101420298aeSYang Jihong .get_work_name = get_work_name_from_map, 102420298aeSYang Jihong }; 103420298aeSYang Jihong 1045a81927aSYang Jihong static void softirq_load_prepare(struct perf_kwork *kwork) 1055a81927aSYang Jihong { 1065a81927aSYang Jihong if (kwork->report == KWORK_REPORT_RUNTIME) { 1075a81927aSYang Jihong bpf_program__set_autoload(skel->progs.report_softirq_entry, true); 1085a81927aSYang Jihong bpf_program__set_autoload(skel->progs.report_softirq_exit, true); 1095a81927aSYang Jihong } else if (kwork->report == KWORK_REPORT_LATENCY) { 1105a81927aSYang Jihong bpf_program__set_autoload(skel->progs.latency_softirq_raise, true); 1115a81927aSYang Jihong bpf_program__set_autoload(skel->progs.latency_softirq_entry, true); 1125a81927aSYang Jihong } 1135a81927aSYang Jihong } 1145a81927aSYang Jihong 1155a81927aSYang Jihong static struct kwork_class_bpf kwork_softirq_bpf = { 1165a81927aSYang Jihong .load_prepare = softirq_load_prepare, 1175a81927aSYang Jihong .get_work_name = get_work_name_from_map, 1185a81927aSYang Jihong }; 1195a81927aSYang Jihong 120acfb65feSYang Jihong static void workqueue_load_prepare(struct perf_kwork *kwork) 121acfb65feSYang Jihong { 122acfb65feSYang Jihong if (kwork->report == KWORK_REPORT_RUNTIME) { 123acfb65feSYang Jihong bpf_program__set_autoload(skel->progs.report_workqueue_execute_start, true); 124acfb65feSYang Jihong bpf_program__set_autoload(skel->progs.report_workqueue_execute_end, true); 125acfb65feSYang Jihong } else if (kwork->report == KWORK_REPORT_LATENCY) { 126acfb65feSYang Jihong bpf_program__set_autoload(skel->progs.latency_workqueue_activate_work, true); 127acfb65feSYang Jihong bpf_program__set_autoload(skel->progs.latency_workqueue_execute_start, true); 128acfb65feSYang Jihong } 129acfb65feSYang Jihong } 130acfb65feSYang Jihong 131acfb65feSYang Jihong static struct kwork_class_bpf kwork_workqueue_bpf = { 132acfb65feSYang Jihong .load_prepare = workqueue_load_prepare, 133acfb65feSYang Jihong .get_work_name = get_work_name_from_map, 134acfb65feSYang Jihong }; 135acfb65feSYang Jihong 136daf07d22SYang Jihong static struct kwork_class_bpf * 137daf07d22SYang Jihong kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = { 138420298aeSYang Jihong [KWORK_CLASS_IRQ] = &kwork_irq_bpf, 1395a81927aSYang Jihong [KWORK_CLASS_SOFTIRQ] = &kwork_softirq_bpf, 140acfb65feSYang Jihong [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue_bpf, 141daf07d22SYang Jihong }; 142daf07d22SYang Jihong 143daf07d22SYang Jihong static bool valid_kwork_class_type(enum kwork_class_type type) 144daf07d22SYang Jihong { 145daf07d22SYang Jihong return type >= 0 && type < KWORK_CLASS_MAX ? true : false; 146daf07d22SYang Jihong } 147daf07d22SYang Jihong 148daf07d22SYang Jihong static int setup_filters(struct perf_kwork *kwork) 149daf07d22SYang Jihong { 150daf07d22SYang Jihong if (kwork->cpu_list != NULL) { 151*71bc3ac8SIan Rogers int idx, nr_cpus; 152*71bc3ac8SIan Rogers struct perf_cpu_map *map; 153*71bc3ac8SIan Rogers struct perf_cpu cpu; 154*71bc3ac8SIan Rogers int fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter); 155*71bc3ac8SIan Rogers 156daf07d22SYang Jihong if (fd < 0) { 157daf07d22SYang Jihong pr_debug("Invalid cpu filter fd\n"); 158daf07d22SYang Jihong return -1; 159daf07d22SYang Jihong } 160daf07d22SYang Jihong 161daf07d22SYang Jihong map = perf_cpu_map__new(kwork->cpu_list); 162daf07d22SYang Jihong if (map == NULL) { 163daf07d22SYang Jihong pr_debug("Invalid cpu_list\n"); 164daf07d22SYang Jihong return -1; 165daf07d22SYang Jihong } 166daf07d22SYang Jihong 167daf07d22SYang Jihong nr_cpus = libbpf_num_possible_cpus(); 168*71bc3ac8SIan Rogers perf_cpu_map__for_each_cpu(cpu, idx, map) { 169*71bc3ac8SIan Rogers u8 val = 1; 170daf07d22SYang Jihong 171daf07d22SYang Jihong if (cpu.cpu >= nr_cpus) { 172daf07d22SYang Jihong perf_cpu_map__put(map); 173daf07d22SYang Jihong pr_err("Requested cpu %d too large\n", cpu.cpu); 174daf07d22SYang Jihong return -1; 175daf07d22SYang Jihong } 176daf07d22SYang Jihong bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY); 177daf07d22SYang Jihong } 178daf07d22SYang Jihong perf_cpu_map__put(map); 179daf07d22SYang Jihong 180daf07d22SYang Jihong skel->bss->has_cpu_filter = 1; 181daf07d22SYang Jihong } 182daf07d22SYang Jihong 183daf07d22SYang Jihong if (kwork->profile_name != NULL) { 184*71bc3ac8SIan Rogers int key, fd; 185*71bc3ac8SIan Rogers 186daf07d22SYang Jihong if (strlen(kwork->profile_name) >= MAX_KWORKNAME) { 187daf07d22SYang Jihong pr_err("Requested name filter %s too large, limit to %d\n", 188daf07d22SYang Jihong kwork->profile_name, MAX_KWORKNAME - 1); 189daf07d22SYang Jihong return -1; 190daf07d22SYang Jihong } 191daf07d22SYang Jihong 192daf07d22SYang Jihong fd = bpf_map__fd(skel->maps.perf_kwork_name_filter); 193daf07d22SYang Jihong if (fd < 0) { 194daf07d22SYang Jihong pr_debug("Invalid name filter fd\n"); 195daf07d22SYang Jihong return -1; 196daf07d22SYang Jihong } 197daf07d22SYang Jihong 198daf07d22SYang Jihong key = 0; 199daf07d22SYang Jihong bpf_map_update_elem(fd, &key, kwork->profile_name, BPF_ANY); 200daf07d22SYang Jihong 201daf07d22SYang Jihong skel->bss->has_name_filter = 1; 202daf07d22SYang Jihong } 203daf07d22SYang Jihong 204daf07d22SYang Jihong return 0; 205daf07d22SYang Jihong } 206daf07d22SYang Jihong 207daf07d22SYang Jihong int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork) 208daf07d22SYang Jihong { 209daf07d22SYang Jihong struct bpf_program *prog; 210daf07d22SYang Jihong struct kwork_class *class; 211daf07d22SYang Jihong struct kwork_class_bpf *class_bpf; 212daf07d22SYang Jihong enum kwork_class_type type; 213daf07d22SYang Jihong 214daf07d22SYang Jihong skel = kwork_trace_bpf__open(); 215daf07d22SYang Jihong if (!skel) { 216daf07d22SYang Jihong pr_debug("Failed to open kwork trace skeleton\n"); 217daf07d22SYang Jihong return -1; 218daf07d22SYang Jihong } 219daf07d22SYang Jihong 220daf07d22SYang Jihong /* 221daf07d22SYang Jihong * set all progs to non-autoload, 222daf07d22SYang Jihong * then set corresponding progs according to config 223daf07d22SYang Jihong */ 224daf07d22SYang Jihong bpf_object__for_each_program(prog, skel->obj) 225daf07d22SYang Jihong bpf_program__set_autoload(prog, false); 226daf07d22SYang Jihong 227daf07d22SYang Jihong list_for_each_entry(class, &kwork->class_list, list) { 228daf07d22SYang Jihong type = class->type; 229daf07d22SYang Jihong if (!valid_kwork_class_type(type) || 230daf07d22SYang Jihong (kwork_class_bpf_supported_list[type] == NULL)) { 231daf07d22SYang Jihong pr_err("Unsupported bpf trace class %s\n", class->name); 232daf07d22SYang Jihong goto out; 233daf07d22SYang Jihong } 234daf07d22SYang Jihong 235daf07d22SYang Jihong class_bpf = kwork_class_bpf_supported_list[type]; 236daf07d22SYang Jihong class_bpf->class = class; 237daf07d22SYang Jihong 238daf07d22SYang Jihong if (class_bpf->load_prepare != NULL) 239daf07d22SYang Jihong class_bpf->load_prepare(kwork); 240daf07d22SYang Jihong } 241daf07d22SYang Jihong 242daf07d22SYang Jihong if (kwork_trace_bpf__load(skel)) { 243daf07d22SYang Jihong pr_debug("Failed to load kwork trace skeleton\n"); 244daf07d22SYang Jihong goto out; 245daf07d22SYang Jihong } 246daf07d22SYang Jihong 247daf07d22SYang Jihong if (setup_filters(kwork)) 248daf07d22SYang Jihong goto out; 249daf07d22SYang Jihong 250daf07d22SYang Jihong if (kwork_trace_bpf__attach(skel)) { 251daf07d22SYang Jihong pr_debug("Failed to attach kwork trace skeleton\n"); 252daf07d22SYang Jihong goto out; 253daf07d22SYang Jihong } 254daf07d22SYang Jihong 255daf07d22SYang Jihong return 0; 256daf07d22SYang Jihong 257daf07d22SYang Jihong out: 258daf07d22SYang Jihong kwork_trace_bpf__destroy(skel); 259daf07d22SYang Jihong return -1; 260daf07d22SYang Jihong } 261daf07d22SYang Jihong 262daf07d22SYang Jihong static int add_work(struct perf_kwork *kwork, 263daf07d22SYang Jihong struct work_key *key, 264daf07d22SYang Jihong struct report_data *data) 265daf07d22SYang Jihong { 266daf07d22SYang Jihong struct kwork_work *work; 267daf07d22SYang Jihong struct kwork_class_bpf *bpf_trace; 268daf07d22SYang Jihong struct kwork_work tmp = { 269daf07d22SYang Jihong .id = key->id, 270daf07d22SYang Jihong .name = NULL, 271daf07d22SYang Jihong .cpu = key->cpu, 272daf07d22SYang Jihong }; 273daf07d22SYang Jihong enum kwork_class_type type = key->type; 274daf07d22SYang Jihong 275daf07d22SYang Jihong if (!valid_kwork_class_type(type)) { 276daf07d22SYang Jihong pr_debug("Invalid class type %d to add work\n", type); 277daf07d22SYang Jihong return -1; 278daf07d22SYang Jihong } 279daf07d22SYang Jihong 280daf07d22SYang Jihong bpf_trace = kwork_class_bpf_supported_list[type]; 281daf07d22SYang Jihong tmp.class = bpf_trace->class; 282daf07d22SYang Jihong 283daf07d22SYang Jihong if ((bpf_trace->get_work_name != NULL) && 284daf07d22SYang Jihong (bpf_trace->get_work_name(key, &tmp.name))) 285daf07d22SYang Jihong return -1; 286daf07d22SYang Jihong 287daf07d22SYang Jihong work = perf_kwork_add_work(kwork, tmp.class, &tmp); 288daf07d22SYang Jihong if (work == NULL) 289daf07d22SYang Jihong return -1; 290daf07d22SYang Jihong 291daf07d22SYang Jihong if (kwork->report == KWORK_REPORT_RUNTIME) { 292daf07d22SYang Jihong work->nr_atoms = data->nr; 293daf07d22SYang Jihong work->total_runtime = data->total_time; 294daf07d22SYang Jihong work->max_runtime = data->max_time; 295daf07d22SYang Jihong work->max_runtime_start = data->max_time_start; 296daf07d22SYang Jihong work->max_runtime_end = data->max_time_end; 297daf07d22SYang Jihong } else if (kwork->report == KWORK_REPORT_LATENCY) { 298daf07d22SYang Jihong work->nr_atoms = data->nr; 299daf07d22SYang Jihong work->total_latency = data->total_time; 300daf07d22SYang Jihong work->max_latency = data->max_time; 301daf07d22SYang Jihong work->max_latency_start = data->max_time_start; 302daf07d22SYang Jihong work->max_latency_end = data->max_time_end; 303daf07d22SYang Jihong } else { 304daf07d22SYang Jihong pr_debug("Invalid bpf report type %d\n", kwork->report); 305daf07d22SYang Jihong return -1; 306daf07d22SYang Jihong } 307daf07d22SYang Jihong 308daf07d22SYang Jihong kwork->timestart = (u64)ts_start.tv_sec * NSEC_PER_SEC + ts_start.tv_nsec; 309daf07d22SYang Jihong kwork->timeend = (u64)ts_end.tv_sec * NSEC_PER_SEC + ts_end.tv_nsec; 310daf07d22SYang Jihong 311daf07d22SYang Jihong return 0; 312daf07d22SYang Jihong } 313daf07d22SYang Jihong 314daf07d22SYang Jihong int perf_kwork__report_read_bpf(struct perf_kwork *kwork) 315daf07d22SYang Jihong { 316daf07d22SYang Jihong struct report_data data; 317daf07d22SYang Jihong struct work_key key = { 318daf07d22SYang Jihong .type = 0, 319daf07d22SYang Jihong .cpu = 0, 320daf07d22SYang Jihong .id = 0, 321daf07d22SYang Jihong }; 322daf07d22SYang Jihong struct work_key prev = { 323daf07d22SYang Jihong .type = 0, 324daf07d22SYang Jihong .cpu = 0, 325daf07d22SYang Jihong .id = 0, 326daf07d22SYang Jihong }; 327daf07d22SYang Jihong int fd = bpf_map__fd(skel->maps.perf_kwork_report); 328daf07d22SYang Jihong 329daf07d22SYang Jihong if (fd < 0) { 330daf07d22SYang Jihong pr_debug("Invalid report fd\n"); 331daf07d22SYang Jihong return -1; 332daf07d22SYang Jihong } 333daf07d22SYang Jihong 334daf07d22SYang Jihong while (!bpf_map_get_next_key(fd, &prev, &key)) { 335daf07d22SYang Jihong if ((bpf_map_lookup_elem(fd, &key, &data)) != 0) { 336daf07d22SYang Jihong pr_debug("Failed to lookup report elem\n"); 337daf07d22SYang Jihong return -1; 338daf07d22SYang Jihong } 339daf07d22SYang Jihong 340daf07d22SYang Jihong if ((data.nr != 0) && (add_work(kwork, &key, &data) != 0)) 341daf07d22SYang Jihong return -1; 342daf07d22SYang Jihong 343daf07d22SYang Jihong prev = key; 344daf07d22SYang Jihong } 345daf07d22SYang Jihong return 0; 346daf07d22SYang Jihong } 347daf07d22SYang Jihong 348daf07d22SYang Jihong void perf_kwork__report_cleanup_bpf(void) 349daf07d22SYang Jihong { 350daf07d22SYang Jihong kwork_trace_bpf__destroy(skel); 351daf07d22SYang Jihong } 352