1 // SPDX-License-Identifier: GPL-2.0 2 #include "util/bpf_counter.h" 3 #include "util/debug.h" 4 #include "util/evsel.h" 5 #include "util/evlist.h" 6 #include "util/off_cpu.h" 7 #include "util/perf-hooks.h" 8 #include "util/record.h" 9 #include "util/session.h" 10 #include "util/target.h" 11 #include "util/cpumap.h" 12 #include "util/thread_map.h" 13 #include "util/cgroup.h" 14 #include "util/strlist.h" 15 #include <bpf/bpf.h> 16 17 #include "bpf_skel/off_cpu.skel.h" 18 19 #define MAX_STACKS 32 20 #define MAX_PROC 4096 21 /* we don't need actual timestamp, just want to put the samples at last */ 22 #define OFF_CPU_TIMESTAMP (~0ull << 32) 23 24 static struct off_cpu_bpf *skel; 25 26 struct off_cpu_key { 27 u32 pid; 28 u32 tgid; 29 u32 stack_id; 30 u32 state; 31 u64 cgroup_id; 32 }; 33 34 union off_cpu_data { 35 struct perf_event_header hdr; 36 u64 array[1024 / sizeof(u64)]; 37 }; 38 39 static int off_cpu_config(struct evlist *evlist) 40 { 41 struct evsel *evsel; 42 struct perf_event_attr attr = { 43 .type = PERF_TYPE_SOFTWARE, 44 .config = PERF_COUNT_SW_BPF_OUTPUT, 45 .size = sizeof(attr), /* to capture ABI version */ 46 }; 47 char *evname = strdup(OFFCPU_EVENT); 48 49 if (evname == NULL) 50 return -ENOMEM; 51 52 evsel = evsel__new(&attr); 53 if (!evsel) { 54 free(evname); 55 return -ENOMEM; 56 } 57 58 evsel->core.attr.freq = 1; 59 evsel->core.attr.sample_period = 1; 60 /* off-cpu analysis depends on stack trace */ 61 evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN; 62 63 evlist__add(evlist, evsel); 64 65 free(evsel->name); 66 evsel->name = evname; 67 68 return 0; 69 } 70 71 static void off_cpu_start(void *arg) 72 { 73 struct evlist *evlist = arg; 74 75 /* update task filter for the given workload */ 76 if (skel->rodata->has_task && skel->rodata->uses_tgid && 77 perf_thread_map__pid(evlist->core.threads, 0) != -1) { 78 int fd; 79 u32 pid; 80 u8 val = 1; 81 82 fd = bpf_map__fd(skel->maps.task_filter); 83 pid = perf_thread_map__pid(evlist->core.threads, 0); 84 bpf_map_update_elem(fd, &pid, &val, BPF_ANY); 85 } 86 87 skel->bss->enabled = 1; 88 } 89 90 static void off_cpu_finish(void *arg __maybe_unused) 91 { 92 skel->bss->enabled = 0; 93 off_cpu_bpf__destroy(skel); 94 } 95 96 /* v5.18 kernel added prev_state arg, so it needs to check the signature */ 97 static void check_sched_switch_args(void) 98 { 99 struct btf *btf = btf__load_vmlinux_btf(); 100 const struct btf_type *t1, *t2, *t3; 101 u32 type_id; 102 103 type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch", 104 BTF_KIND_TYPEDEF); 105 if ((s32)type_id < 0) 106 goto cleanup; 107 108 t1 = btf__type_by_id(btf, type_id); 109 if (t1 == NULL) 110 goto cleanup; 111 112 t2 = btf__type_by_id(btf, t1->type); 113 if (t2 == NULL || !btf_is_ptr(t2)) 114 goto cleanup; 115 116 t3 = btf__type_by_id(btf, t2->type); 117 /* btf_trace func proto has one more argument for the context */ 118 if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) { 119 /* new format: pass prev_state as 4th arg */ 120 skel->rodata->has_prev_state = true; 121 } 122 cleanup: 123 btf__free(btf); 124 } 125 126 int off_cpu_prepare(struct evlist *evlist, struct target *target, 127 struct record_opts *opts) 128 { 129 int err, fd, i; 130 int ncpus = 1, ntasks = 1, ncgrps = 1; 131 struct strlist *pid_slist = NULL; 132 struct str_node *pos; 133 134 if (off_cpu_config(evlist) < 0) { 135 pr_err("Failed to config off-cpu BPF event\n"); 136 return -1; 137 } 138 139 skel = off_cpu_bpf__open(); 140 if (!skel) { 141 pr_err("Failed to open off-cpu BPF skeleton\n"); 142 return -1; 143 } 144 145 /* don't need to set cpu filter for system-wide mode */ 146 if (target->cpu_list) { 147 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); 148 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); 149 skel->rodata->has_cpu = 1; 150 } 151 152 if (target->pid) { 153 pid_slist = strlist__new(target->pid, NULL); 154 if (!pid_slist) { 155 pr_err("Failed to create a strlist for pid\n"); 156 return -1; 157 } 158 159 ntasks = 0; 160 strlist__for_each_entry(pos, pid_slist) { 161 char *end_ptr; 162 int pid = strtol(pos->s, &end_ptr, 10); 163 164 if (pid == INT_MIN || pid == INT_MAX || 165 (*end_ptr != '\0' && *end_ptr != ',')) 166 continue; 167 168 ntasks++; 169 } 170 171 if (ntasks < MAX_PROC) 172 ntasks = MAX_PROC; 173 174 bpf_map__set_max_entries(skel->maps.task_filter, ntasks); 175 skel->rodata->has_task = 1; 176 skel->rodata->uses_tgid = 1; 177 } else if (target__has_task(target)) { 178 ntasks = perf_thread_map__nr(evlist->core.threads); 179 bpf_map__set_max_entries(skel->maps.task_filter, ntasks); 180 skel->rodata->has_task = 1; 181 } else if (target__none(target)) { 182 bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC); 183 skel->rodata->has_task = 1; 184 skel->rodata->uses_tgid = 1; 185 } 186 187 if (evlist__first(evlist)->cgrp) { 188 ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */ 189 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps); 190 191 if (!cgroup_is_v2("perf_event")) 192 skel->rodata->uses_cgroup_v1 = true; 193 skel->rodata->has_cgroup = 1; 194 } 195 196 if (opts->record_cgroup) { 197 skel->rodata->needs_cgroup = true; 198 199 if (!cgroup_is_v2("perf_event")) 200 skel->rodata->uses_cgroup_v1 = true; 201 } 202 203 set_max_rlimit(); 204 check_sched_switch_args(); 205 206 err = off_cpu_bpf__load(skel); 207 if (err) { 208 pr_err("Failed to load off-cpu skeleton\n"); 209 goto out; 210 } 211 212 if (target->cpu_list) { 213 u32 cpu; 214 u8 val = 1; 215 216 fd = bpf_map__fd(skel->maps.cpu_filter); 217 218 for (i = 0; i < ncpus; i++) { 219 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu; 220 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY); 221 } 222 } 223 224 if (target->pid) { 225 u8 val = 1; 226 227 fd = bpf_map__fd(skel->maps.task_filter); 228 229 strlist__for_each_entry(pos, pid_slist) { 230 char *end_ptr; 231 u32 tgid; 232 int pid = strtol(pos->s, &end_ptr, 10); 233 234 if (pid == INT_MIN || pid == INT_MAX || 235 (*end_ptr != '\0' && *end_ptr != ',')) 236 continue; 237 238 tgid = pid; 239 bpf_map_update_elem(fd, &tgid, &val, BPF_ANY); 240 } 241 } else if (target__has_task(target)) { 242 u32 pid; 243 u8 val = 1; 244 245 fd = bpf_map__fd(skel->maps.task_filter); 246 247 for (i = 0; i < ntasks; i++) { 248 pid = perf_thread_map__pid(evlist->core.threads, i); 249 bpf_map_update_elem(fd, &pid, &val, BPF_ANY); 250 } 251 } 252 253 if (evlist__first(evlist)->cgrp) { 254 struct evsel *evsel; 255 u8 val = 1; 256 257 fd = bpf_map__fd(skel->maps.cgroup_filter); 258 259 evlist__for_each_entry(evlist, evsel) { 260 struct cgroup *cgrp = evsel->cgrp; 261 262 if (cgrp == NULL) 263 continue; 264 265 if (!cgrp->id && read_cgroup_id(cgrp) < 0) { 266 pr_err("Failed to read cgroup id of %s\n", 267 cgrp->name); 268 goto out; 269 } 270 271 bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY); 272 } 273 } 274 275 err = off_cpu_bpf__attach(skel); 276 if (err) { 277 pr_err("Failed to attach off-cpu BPF skeleton\n"); 278 goto out; 279 } 280 281 if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) || 282 perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) { 283 pr_err("Failed to attach off-cpu skeleton\n"); 284 goto out; 285 } 286 287 return 0; 288 289 out: 290 off_cpu_bpf__destroy(skel); 291 return -1; 292 } 293 294 int off_cpu_write(struct perf_session *session) 295 { 296 int bytes = 0, size; 297 int fd, stack; 298 u64 sample_type, val, sid = 0; 299 struct evsel *evsel; 300 struct perf_data_file *file = &session->data->file; 301 struct off_cpu_key prev, key; 302 union off_cpu_data data = { 303 .hdr = { 304 .type = PERF_RECORD_SAMPLE, 305 .misc = PERF_RECORD_MISC_USER, 306 }, 307 }; 308 u64 tstamp = OFF_CPU_TIMESTAMP; 309 310 skel->bss->enabled = 0; 311 312 evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT); 313 if (evsel == NULL) { 314 pr_err("%s evsel not found\n", OFFCPU_EVENT); 315 return 0; 316 } 317 318 sample_type = evsel->core.attr.sample_type; 319 320 if (sample_type & ~OFFCPU_SAMPLE_TYPES) { 321 pr_err("not supported sample type: %llx\n", 322 (unsigned long long)sample_type); 323 return -1; 324 } 325 326 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) { 327 if (evsel->core.id) 328 sid = evsel->core.id[0]; 329 } 330 331 fd = bpf_map__fd(skel->maps.off_cpu); 332 stack = bpf_map__fd(skel->maps.stacks); 333 memset(&prev, 0, sizeof(prev)); 334 335 while (!bpf_map_get_next_key(fd, &prev, &key)) { 336 int n = 1; /* start from perf_event_header */ 337 int ip_pos = -1; 338 339 bpf_map_lookup_elem(fd, &key, &val); 340 341 if (sample_type & PERF_SAMPLE_IDENTIFIER) 342 data.array[n++] = sid; 343 if (sample_type & PERF_SAMPLE_IP) { 344 ip_pos = n; 345 data.array[n++] = 0; /* will be updated */ 346 } 347 if (sample_type & PERF_SAMPLE_TID) 348 data.array[n++] = (u64)key.pid << 32 | key.tgid; 349 if (sample_type & PERF_SAMPLE_TIME) 350 data.array[n++] = tstamp; 351 if (sample_type & PERF_SAMPLE_ID) 352 data.array[n++] = sid; 353 if (sample_type & PERF_SAMPLE_CPU) 354 data.array[n++] = 0; 355 if (sample_type & PERF_SAMPLE_PERIOD) 356 data.array[n++] = val; 357 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 358 int len = 0; 359 360 /* data.array[n] is callchain->nr (updated later) */ 361 data.array[n + 1] = PERF_CONTEXT_USER; 362 data.array[n + 2] = 0; 363 364 bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]); 365 while (data.array[n + 2 + len]) 366 len++; 367 368 /* update length of callchain */ 369 data.array[n] = len + 1; 370 371 /* update sample ip with the first callchain entry */ 372 if (ip_pos >= 0) 373 data.array[ip_pos] = data.array[n + 2]; 374 375 /* calculate sample callchain data array length */ 376 n += len + 2; 377 } 378 if (sample_type & PERF_SAMPLE_CGROUP) 379 data.array[n++] = key.cgroup_id; 380 381 size = n * sizeof(u64); 382 data.hdr.size = size; 383 bytes += size; 384 385 if (perf_data_file__write(file, &data, size) < 0) { 386 pr_err("failed to write perf data, error: %m\n"); 387 return bytes; 388 } 389 390 prev = key; 391 /* increase dummy timestamp to sort later samples */ 392 tstamp++; 393 } 394 return bytes; 395 } 396