xref: /linux/tools/perf/util/bpf_lock_contention.c (revision 2241f81c91f211b512bd2c3a26a4a74258d0e008)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/cgroup.h"
3 #include "util/debug.h"
4 #include "util/evlist.h"
5 #include "util/machine.h"
6 #include "util/map.h"
7 #include "util/symbol.h"
8 #include "util/target.h"
9 #include "util/thread.h"
10 #include "util/thread_map.h"
11 #include "util/lock-contention.h"
12 #include <linux/zalloc.h>
13 #include <linux/string.h>
14 #include <bpf/bpf.h>
15 
16 #include "bpf_skel/lock_contention.skel.h"
17 #include "bpf_skel/lock_data.h"
18 
19 static struct lock_contention_bpf *skel;
20 
21 int lock_contention_prepare(struct lock_contention *con)
22 {
23 	int i, fd;
24 	int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1;
25 	struct evlist *evlist = con->evlist;
26 	struct target *target = con->target;
27 
28 	skel = lock_contention_bpf__open();
29 	if (!skel) {
30 		pr_err("Failed to open lock-contention BPF skeleton\n");
31 		return -1;
32 	}
33 
34 	bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
35 	bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
36 	bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
37 
38 	if (con->aggr_mode == LOCK_AGGR_TASK)
39 		bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
40 	else
41 		bpf_map__set_max_entries(skel->maps.task_data, 1);
42 
43 	if (con->save_callstack)
44 		bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
45 	else
46 		bpf_map__set_max_entries(skel->maps.stacks, 1);
47 
48 	if (target__has_cpu(target))
49 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
50 	if (target__has_task(target))
51 		ntasks = perf_thread_map__nr(evlist->core.threads);
52 	if (con->filters->nr_types)
53 		ntypes = con->filters->nr_types;
54 	if (con->filters->nr_cgrps)
55 		ncgrps = con->filters->nr_cgrps;
56 
57 	/* resolve lock name filters to addr */
58 	if (con->filters->nr_syms) {
59 		struct symbol *sym;
60 		struct map *kmap;
61 		unsigned long *addrs;
62 
63 		for (i = 0; i < con->filters->nr_syms; i++) {
64 			sym = machine__find_kernel_symbol_by_name(con->machine,
65 								  con->filters->syms[i],
66 								  &kmap);
67 			if (sym == NULL) {
68 				pr_warning("ignore unknown symbol: %s\n",
69 					   con->filters->syms[i]);
70 				continue;
71 			}
72 
73 			addrs = realloc(con->filters->addrs,
74 					(con->filters->nr_addrs + 1) * sizeof(*addrs));
75 			if (addrs == NULL) {
76 				pr_warning("memory allocation failure\n");
77 				continue;
78 			}
79 
80 			addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
81 			con->filters->addrs = addrs;
82 		}
83 		naddrs = con->filters->nr_addrs;
84 	}
85 
86 	bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
87 	bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
88 	bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
89 	bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
90 	bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
91 
92 	if (lock_contention_bpf__load(skel) < 0) {
93 		pr_err("Failed to load lock-contention BPF skeleton\n");
94 		return -1;
95 	}
96 
97 	if (target__has_cpu(target)) {
98 		u32 cpu;
99 		u8 val = 1;
100 
101 		skel->bss->has_cpu = 1;
102 		fd = bpf_map__fd(skel->maps.cpu_filter);
103 
104 		for (i = 0; i < ncpus; i++) {
105 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
106 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
107 		}
108 	}
109 
110 	if (target__has_task(target)) {
111 		u32 pid;
112 		u8 val = 1;
113 
114 		skel->bss->has_task = 1;
115 		fd = bpf_map__fd(skel->maps.task_filter);
116 
117 		for (i = 0; i < ntasks; i++) {
118 			pid = perf_thread_map__pid(evlist->core.threads, i);
119 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
120 		}
121 	}
122 
123 	if (target__none(target) && evlist->workload.pid > 0) {
124 		u32 pid = evlist->workload.pid;
125 		u8 val = 1;
126 
127 		skel->bss->has_task = 1;
128 		fd = bpf_map__fd(skel->maps.task_filter);
129 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
130 	}
131 
132 	if (con->filters->nr_types) {
133 		u8 val = 1;
134 
135 		skel->bss->has_type = 1;
136 		fd = bpf_map__fd(skel->maps.type_filter);
137 
138 		for (i = 0; i < con->filters->nr_types; i++)
139 			bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
140 	}
141 
142 	if (con->filters->nr_addrs) {
143 		u8 val = 1;
144 
145 		skel->bss->has_addr = 1;
146 		fd = bpf_map__fd(skel->maps.addr_filter);
147 
148 		for (i = 0; i < con->filters->nr_addrs; i++)
149 			bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
150 	}
151 
152 	if (con->filters->nr_cgrps) {
153 		u8 val = 1;
154 
155 		skel->bss->has_cgroup = 1;
156 		fd = bpf_map__fd(skel->maps.cgroup_filter);
157 
158 		for (i = 0; i < con->filters->nr_cgrps; i++)
159 			bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
160 	}
161 
162 	/* these don't work well if in the rodata section */
163 	skel->bss->stack_skip = con->stack_skip;
164 	skel->bss->aggr_mode = con->aggr_mode;
165 	skel->bss->needs_callstack = con->save_callstack;
166 	skel->bss->lock_owner = con->owner;
167 
168 	if (con->aggr_mode == LOCK_AGGR_CGROUP) {
169 		if (cgroup_is_v2("perf_event"))
170 			skel->bss->use_cgroup_v2 = 1;
171 
172 		read_all_cgroups(&con->cgroups);
173 	}
174 
175 	bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
176 
177 	lock_contention_bpf__attach(skel);
178 	return 0;
179 }
180 
181 int lock_contention_start(void)
182 {
183 	skel->bss->enabled = 1;
184 	return 0;
185 }
186 
187 int lock_contention_stop(void)
188 {
189 	skel->bss->enabled = 0;
190 	return 0;
191 }
192 
193 static const char *lock_contention_get_name(struct lock_contention *con,
194 					    struct contention_key *key,
195 					    u64 *stack_trace, u32 flags)
196 {
197 	int idx = 0;
198 	u64 addr;
199 	const char *name = "";
200 	static char name_buf[KSYM_NAME_LEN];
201 	struct symbol *sym;
202 	struct map *kmap;
203 	struct machine *machine = con->machine;
204 
205 	if (con->aggr_mode == LOCK_AGGR_TASK) {
206 		struct contention_task_data task;
207 		int pid = key->pid;
208 		int task_fd = bpf_map__fd(skel->maps.task_data);
209 
210 		/* do not update idle comm which contains CPU number */
211 		if (pid) {
212 			struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid);
213 
214 			if (t == NULL)
215 				return name;
216 			if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
217 			    thread__set_comm(t, task.comm, /*timestamp=*/0))
218 				name = task.comm;
219 		}
220 		return name;
221 	}
222 
223 	if (con->aggr_mode == LOCK_AGGR_ADDR) {
224 		int lock_fd = bpf_map__fd(skel->maps.lock_syms);
225 
226 		/* per-process locks set upper bits of the flags */
227 		if (flags & LCD_F_MMAP_LOCK)
228 			return "mmap_lock";
229 		if (flags & LCD_F_SIGHAND_LOCK)
230 			return "siglock";
231 
232 		/* global locks with symbols */
233 		sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
234 		if (sym)
235 			return sym->name;
236 
237 		/* try semi-global locks collected separately */
238 		if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
239 			if (flags == LOCK_CLASS_RQLOCK)
240 				return "rq_lock";
241 		}
242 
243 		return "";
244 	}
245 
246 	if (con->aggr_mode == LOCK_AGGR_CGROUP) {
247 		u64 cgrp_id = key->lock_addr_or_cgroup;
248 		struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
249 
250 		if (cgrp)
251 			return cgrp->name;
252 
253 		snprintf(name_buf, sizeof(name_buf), "cgroup:%lu", cgrp_id);
254 		return name_buf;
255 	}
256 
257 	/* LOCK_AGGR_CALLER: skip lock internal functions */
258 	while (machine__is_lock_function(machine, stack_trace[idx]) &&
259 	       idx < con->max_stack - 1)
260 		idx++;
261 
262 	addr = stack_trace[idx];
263 	sym = machine__find_kernel_symbol(machine, addr, &kmap);
264 
265 	if (sym) {
266 		unsigned long offset;
267 
268 		offset = map__map_ip(kmap, addr) - sym->start;
269 
270 		if (offset == 0)
271 			return sym->name;
272 
273 		snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
274 	} else {
275 		snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
276 	}
277 
278 	return name_buf;
279 }
280 
281 int lock_contention_read(struct lock_contention *con)
282 {
283 	int fd, stack, err = 0;
284 	struct contention_key *prev_key, key = {};
285 	struct contention_data data = {};
286 	struct lock_stat *st = NULL;
287 	struct machine *machine = con->machine;
288 	u64 *stack_trace;
289 	size_t stack_size = con->max_stack * sizeof(*stack_trace);
290 
291 	fd = bpf_map__fd(skel->maps.lock_stat);
292 	stack = bpf_map__fd(skel->maps.stacks);
293 
294 	con->fails.task = skel->bss->task_fail;
295 	con->fails.stack = skel->bss->stack_fail;
296 	con->fails.time = skel->bss->time_fail;
297 	con->fails.data = skel->bss->data_fail;
298 
299 	stack_trace = zalloc(stack_size);
300 	if (stack_trace == NULL)
301 		return -1;
302 
303 	if (con->aggr_mode == LOCK_AGGR_TASK) {
304 		struct thread *idle = __machine__findnew_thread(machine,
305 								/*pid=*/0,
306 								/*tid=*/0);
307 		thread__set_comm(idle, "swapper", /*timestamp=*/0);
308 	}
309 
310 	if (con->aggr_mode == LOCK_AGGR_ADDR) {
311 		DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
312 			.flags = BPF_F_TEST_RUN_ON_CPU,
313 		);
314 		int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
315 
316 		bpf_prog_test_run_opts(prog_fd, &opts);
317 	}
318 
319 	/* make sure it loads the kernel map */
320 	map__load(maps__first(machine->kmaps)->map);
321 
322 	prev_key = NULL;
323 	while (!bpf_map_get_next_key(fd, prev_key, &key)) {
324 		s64 ls_key;
325 		const char *name;
326 
327 		/* to handle errors in the loop body */
328 		err = -1;
329 
330 		bpf_map_lookup_elem(fd, &key, &data);
331 		if (con->save_callstack) {
332 			bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
333 
334 			if (!match_callstack_filter(machine, stack_trace)) {
335 				con->nr_filtered += data.count;
336 				goto next;
337 			}
338 		}
339 
340 		switch (con->aggr_mode) {
341 		case LOCK_AGGR_CALLER:
342 			ls_key = key.stack_id;
343 			break;
344 		case LOCK_AGGR_TASK:
345 			ls_key = key.pid;
346 			break;
347 		case LOCK_AGGR_ADDR:
348 		case LOCK_AGGR_CGROUP:
349 			ls_key = key.lock_addr_or_cgroup;
350 			break;
351 		default:
352 			goto next;
353 		}
354 
355 		st = lock_stat_find(ls_key);
356 		if (st != NULL) {
357 			st->wait_time_total += data.total_time;
358 			if (st->wait_time_max < data.max_time)
359 				st->wait_time_max = data.max_time;
360 			if (st->wait_time_min > data.min_time)
361 				st->wait_time_min = data.min_time;
362 
363 			st->nr_contended += data.count;
364 			if (st->nr_contended)
365 				st->avg_wait_time = st->wait_time_total / st->nr_contended;
366 			goto next;
367 		}
368 
369 		name = lock_contention_get_name(con, &key, stack_trace, data.flags);
370 		st = lock_stat_findnew(ls_key, name, data.flags);
371 		if (st == NULL)
372 			break;
373 
374 		st->nr_contended = data.count;
375 		st->wait_time_total = data.total_time;
376 		st->wait_time_max = data.max_time;
377 		st->wait_time_min = data.min_time;
378 
379 		if (data.count)
380 			st->avg_wait_time = data.total_time / data.count;
381 
382 		if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
383 			st->callstack = memdup(stack_trace, stack_size);
384 			if (st->callstack == NULL)
385 				break;
386 		}
387 
388 next:
389 		prev_key = &key;
390 
391 		/* we're fine now, reset the error */
392 		err = 0;
393 	}
394 
395 	free(stack_trace);
396 
397 	return err;
398 }
399 
400 int lock_contention_finish(struct lock_contention *con)
401 {
402 	if (skel) {
403 		skel->bss->enabled = 0;
404 		lock_contention_bpf__destroy(skel);
405 	}
406 
407 	while (!RB_EMPTY_ROOT(&con->cgroups)) {
408 		struct rb_node *node = rb_first(&con->cgroups);
409 		struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
410 
411 		rb_erase(node, &con->cgroups);
412 		cgroup__put(cgrp);
413 	}
414 
415 	return 0;
416 }
417