xref: /linux/tools/perf/util/bpf_skel/lock_contention.bpf.c (revision 6d9b262afe0ec1d6e0ef99321ca9d6b921310471)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2022 Google
3 #include "vmlinux.h"
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_core_read.h>
7 #include <asm-generic/errno-base.h>
8 
9 #include "lock_data.h"
10 
11 /* for collect_lock_syms().  4096 was rejected by the verifier */
12 #define MAX_CPUS  1024
13 
14 /* lock contention flags from include/trace/events/lock.h */
15 #define LCB_F_SPIN	(1U << 0)
16 #define LCB_F_READ	(1U << 1)
17 #define LCB_F_WRITE	(1U << 2)
18 #define LCB_F_RT	(1U << 3)
19 #define LCB_F_PERCPU	(1U << 4)
20 #define LCB_F_MUTEX	(1U << 5)
21 
22 /* callstack storage  */
23 struct {
24 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
25 	__uint(key_size, sizeof(__u32));
26 	__uint(value_size, sizeof(__u64));
27 	__uint(max_entries, MAX_ENTRIES);
28 } stacks SEC(".maps");
29 
30 /* maintain timestamp at the beginning of contention */
31 struct {
32 	__uint(type, BPF_MAP_TYPE_HASH);
33 	__type(key, int);
34 	__type(value, struct tstamp_data);
35 	__uint(max_entries, MAX_ENTRIES);
36 } tstamp SEC(".maps");
37 
38 /* maintain per-CPU timestamp at the beginning of contention */
39 struct {
40 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
41 	__uint(key_size, sizeof(__u32));
42 	__uint(value_size, sizeof(struct tstamp_data));
43 	__uint(max_entries, 1);
44 } tstamp_cpu SEC(".maps");
45 
46 /* actual lock contention statistics */
47 struct {
48 	__uint(type, BPF_MAP_TYPE_HASH);
49 	__uint(key_size, sizeof(struct contention_key));
50 	__uint(value_size, sizeof(struct contention_data));
51 	__uint(max_entries, MAX_ENTRIES);
52 } lock_stat SEC(".maps");
53 
54 struct {
55 	__uint(type, BPF_MAP_TYPE_HASH);
56 	__uint(key_size, sizeof(__u32));
57 	__uint(value_size, sizeof(struct contention_task_data));
58 	__uint(max_entries, MAX_ENTRIES);
59 } task_data SEC(".maps");
60 
61 struct {
62 	__uint(type, BPF_MAP_TYPE_HASH);
63 	__uint(key_size, sizeof(__u64));
64 	__uint(value_size, sizeof(__u32));
65 	__uint(max_entries, MAX_ENTRIES);
66 } lock_syms SEC(".maps");
67 
68 struct {
69 	__uint(type, BPF_MAP_TYPE_HASH);
70 	__uint(key_size, sizeof(__u32));
71 	__uint(value_size, sizeof(__u8));
72 	__uint(max_entries, 1);
73 } cpu_filter SEC(".maps");
74 
75 struct {
76 	__uint(type, BPF_MAP_TYPE_HASH);
77 	__uint(key_size, sizeof(__u32));
78 	__uint(value_size, sizeof(__u8));
79 	__uint(max_entries, 1);
80 } task_filter SEC(".maps");
81 
82 struct {
83 	__uint(type, BPF_MAP_TYPE_HASH);
84 	__uint(key_size, sizeof(__u32));
85 	__uint(value_size, sizeof(__u8));
86 	__uint(max_entries, 1);
87 } type_filter SEC(".maps");
88 
89 struct {
90 	__uint(type, BPF_MAP_TYPE_HASH);
91 	__uint(key_size, sizeof(__u64));
92 	__uint(value_size, sizeof(__u8));
93 	__uint(max_entries, 1);
94 } addr_filter SEC(".maps");
95 
96 struct {
97 	__uint(type, BPF_MAP_TYPE_HASH);
98 	__uint(key_size, sizeof(__u64));
99 	__uint(value_size, sizeof(__u8));
100 	__uint(max_entries, 1);
101 } cgroup_filter SEC(".maps");
102 
103 struct rw_semaphore___old {
104 	struct task_struct *owner;
105 } __attribute__((preserve_access_index));
106 
107 struct rw_semaphore___new {
108 	atomic_long_t owner;
109 } __attribute__((preserve_access_index));
110 
111 struct mm_struct___old {
112 	struct rw_semaphore mmap_sem;
113 } __attribute__((preserve_access_index));
114 
115 struct mm_struct___new {
116 	struct rw_semaphore mmap_lock;
117 } __attribute__((preserve_access_index));
118 
119 /* control flags */
120 int enabled;
121 int has_cpu;
122 int has_task;
123 int has_type;
124 int has_addr;
125 int has_cgroup;
126 int needs_callstack;
127 int stack_skip;
128 int lock_owner;
129 
130 int use_cgroup_v2;
131 int perf_subsys_id = -1;
132 
133 /* determine the key of lock stat */
134 int aggr_mode;
135 
136 __u64 end_ts;
137 
138 /* error stat */
139 int task_fail;
140 int stack_fail;
141 int time_fail;
142 int data_fail;
143 
144 int task_map_full;
145 int data_map_full;
146 
147 static inline __u64 get_current_cgroup_id(void)
148 {
149 	struct task_struct *task;
150 	struct cgroup *cgrp;
151 
152 	if (use_cgroup_v2)
153 		return bpf_get_current_cgroup_id();
154 
155 	task = bpf_get_current_task_btf();
156 
157 	if (perf_subsys_id == -1) {
158 #if __has_builtin(__builtin_preserve_enum_value)
159 		perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
160 						     perf_event_cgrp_id);
161 #else
162 		perf_subsys_id = perf_event_cgrp_id;
163 #endif
164 	}
165 
166 	cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
167 	return BPF_CORE_READ(cgrp, kn, id);
168 }
169 
170 static inline int can_record(u64 *ctx)
171 {
172 	if (has_cpu) {
173 		__u32 cpu = bpf_get_smp_processor_id();
174 		__u8 *ok;
175 
176 		ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
177 		if (!ok)
178 			return 0;
179 	}
180 
181 	if (has_task) {
182 		__u8 *ok;
183 		__u32 pid = bpf_get_current_pid_tgid();
184 
185 		ok = bpf_map_lookup_elem(&task_filter, &pid);
186 		if (!ok)
187 			return 0;
188 	}
189 
190 	if (has_type) {
191 		__u8 *ok;
192 		__u32 flags = (__u32)ctx[1];
193 
194 		ok = bpf_map_lookup_elem(&type_filter, &flags);
195 		if (!ok)
196 			return 0;
197 	}
198 
199 	if (has_addr) {
200 		__u8 *ok;
201 		__u64 addr = ctx[0];
202 
203 		ok = bpf_map_lookup_elem(&addr_filter, &addr);
204 		if (!ok)
205 			return 0;
206 	}
207 
208 	if (has_cgroup) {
209 		__u8 *ok;
210 		__u64 cgrp = get_current_cgroup_id();
211 
212 		ok = bpf_map_lookup_elem(&cgroup_filter, &cgrp);
213 		if (!ok)
214 			return 0;
215 	}
216 
217 	return 1;
218 }
219 
220 static inline int update_task_data(struct task_struct *task)
221 {
222 	struct contention_task_data *p;
223 	int pid, err;
224 
225 	err = bpf_core_read(&pid, sizeof(pid), &task->pid);
226 	if (err)
227 		return -1;
228 
229 	p = bpf_map_lookup_elem(&task_data, &pid);
230 	if (p == NULL && !task_map_full) {
231 		struct contention_task_data data = {};
232 
233 		BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
234 		if (bpf_map_update_elem(&task_data, &pid, &data, BPF_NOEXIST) == -E2BIG)
235 			task_map_full = 1;
236 	}
237 
238 	return 0;
239 }
240 
241 #ifndef __has_builtin
242 # define __has_builtin(x) 0
243 #endif
244 
245 static inline struct task_struct *get_lock_owner(__u64 lock, __u32 flags)
246 {
247 	struct task_struct *task;
248 	__u64 owner = 0;
249 
250 	if (flags & LCB_F_MUTEX) {
251 		struct mutex *mutex = (void *)lock;
252 		owner = BPF_CORE_READ(mutex, owner.counter);
253 	} else if (flags == LCB_F_READ || flags == LCB_F_WRITE) {
254 	/*
255 	 * Support for the BPF_TYPE_MATCHES argument to the
256 	 * __builtin_preserve_type_info builtin was added at some point during
257 	 * development of clang 15 and it's what is needed for
258 	 * bpf_core_type_matches.
259 	 */
260 #if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
261 		if (bpf_core_type_matches(struct rw_semaphore___old)) {
262 			struct rw_semaphore___old *rwsem = (void *)lock;
263 			owner = (unsigned long)BPF_CORE_READ(rwsem, owner);
264 		} else if (bpf_core_type_matches(struct rw_semaphore___new)) {
265 			struct rw_semaphore___new *rwsem = (void *)lock;
266 			owner = BPF_CORE_READ(rwsem, owner.counter);
267 		}
268 #else
269 		/* assume new struct */
270 		struct rw_semaphore *rwsem = (void *)lock;
271 		owner = BPF_CORE_READ(rwsem, owner.counter);
272 #endif
273 	}
274 
275 	if (!owner)
276 		return NULL;
277 
278 	task = (void *)(owner & ~7UL);
279 	return task;
280 }
281 
282 static inline __u32 check_lock_type(__u64 lock, __u32 flags)
283 {
284 	struct task_struct *curr;
285 	struct mm_struct___old *mm_old;
286 	struct mm_struct___new *mm_new;
287 
288 	switch (flags) {
289 	case LCB_F_READ:  /* rwsem */
290 	case LCB_F_WRITE:
291 		curr = bpf_get_current_task_btf();
292 		if (curr->mm == NULL)
293 			break;
294 		mm_new = (void *)curr->mm;
295 		if (bpf_core_field_exists(mm_new->mmap_lock)) {
296 			if (&mm_new->mmap_lock == (void *)lock)
297 				return LCD_F_MMAP_LOCK;
298 			break;
299 		}
300 		mm_old = (void *)curr->mm;
301 		if (bpf_core_field_exists(mm_old->mmap_sem)) {
302 			if (&mm_old->mmap_sem == (void *)lock)
303 				return LCD_F_MMAP_LOCK;
304 		}
305 		break;
306 	case LCB_F_SPIN:  /* spinlock */
307 		curr = bpf_get_current_task_btf();
308 		if (&curr->sighand->siglock == (void *)lock)
309 			return LCD_F_SIGHAND_LOCK;
310 		break;
311 	default:
312 		break;
313 	}
314 	return 0;
315 }
316 
317 static inline struct tstamp_data *get_tstamp_elem(__u32 flags)
318 {
319 	__u32 pid;
320 	struct tstamp_data *pelem;
321 
322 	/* Use per-cpu array map for spinlock and rwlock */
323 	if (flags == (LCB_F_SPIN | LCB_F_READ) || flags == LCB_F_SPIN ||
324 	    flags == (LCB_F_SPIN | LCB_F_WRITE)) {
325 		__u32 idx = 0;
326 
327 		pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
328 		/* Do not update the element for nested locks */
329 		if (pelem && pelem->lock)
330 			pelem = NULL;
331 		return pelem;
332 	}
333 
334 	pid = bpf_get_current_pid_tgid();
335 	pelem = bpf_map_lookup_elem(&tstamp, &pid);
336 	/* Do not update the element for nested locks */
337 	if (pelem && pelem->lock)
338 		return NULL;
339 
340 	if (pelem == NULL) {
341 		struct tstamp_data zero = {};
342 
343 		if (bpf_map_update_elem(&tstamp, &pid, &zero, BPF_NOEXIST) < 0) {
344 			__sync_fetch_and_add(&task_fail, 1);
345 			return NULL;
346 		}
347 
348 		pelem = bpf_map_lookup_elem(&tstamp, &pid);
349 		if (pelem == NULL) {
350 			__sync_fetch_and_add(&task_fail, 1);
351 			return NULL;
352 		}
353 	}
354 	return pelem;
355 }
356 
357 SEC("tp_btf/contention_begin")
358 int contention_begin(u64 *ctx)
359 {
360 	struct tstamp_data *pelem;
361 
362 	if (!enabled || !can_record(ctx))
363 		return 0;
364 
365 	pelem = get_tstamp_elem(ctx[1]);
366 	if (pelem == NULL)
367 		return 0;
368 
369 	pelem->timestamp = bpf_ktime_get_ns();
370 	pelem->lock = (__u64)ctx[0];
371 	pelem->flags = (__u32)ctx[1];
372 
373 	if (needs_callstack) {
374 		pelem->stack_id = bpf_get_stackid(ctx, &stacks,
375 						  BPF_F_FAST_STACK_CMP | stack_skip);
376 		if (pelem->stack_id < 0)
377 			__sync_fetch_and_add(&stack_fail, 1);
378 	} else if (aggr_mode == LOCK_AGGR_TASK) {
379 		struct task_struct *task;
380 
381 		if (lock_owner) {
382 			task = get_lock_owner(pelem->lock, pelem->flags);
383 
384 			/* The flags is not used anymore.  Pass the owner pid. */
385 			if (task)
386 				pelem->flags = BPF_CORE_READ(task, pid);
387 			else
388 				pelem->flags = -1U;
389 
390 		} else {
391 			task = bpf_get_current_task_btf();
392 		}
393 
394 		if (task) {
395 			if (update_task_data(task) < 0 && lock_owner)
396 				pelem->flags = -1U;
397 		}
398 	}
399 
400 	return 0;
401 }
402 
403 SEC("tp_btf/contention_end")
404 int contention_end(u64 *ctx)
405 {
406 	__u32 pid = 0, idx = 0;
407 	struct tstamp_data *pelem;
408 	struct contention_key key = {};
409 	struct contention_data *data;
410 	__u64 duration;
411 	bool need_delete = false;
412 
413 	if (!enabled)
414 		return 0;
415 
416 	/*
417 	 * For spinlock and rwlock, it needs to get the timestamp for the
418 	 * per-cpu map.  However, contention_end does not have the flags
419 	 * so it cannot know whether it reads percpu or hash map.
420 	 *
421 	 * Try per-cpu map first and check if there's active contention.
422 	 * If it is, do not read hash map because it cannot go to sleeping
423 	 * locks before releasing the spinning locks.
424 	 */
425 	pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
426 	if (pelem && pelem->lock) {
427 		if (pelem->lock != ctx[0])
428 			return 0;
429 	} else {
430 		pid = bpf_get_current_pid_tgid();
431 		pelem = bpf_map_lookup_elem(&tstamp, &pid);
432 		if (!pelem || pelem->lock != ctx[0])
433 			return 0;
434 		need_delete = true;
435 	}
436 
437 	duration = bpf_ktime_get_ns() - pelem->timestamp;
438 	if ((__s64)duration < 0) {
439 		pelem->lock = 0;
440 		if (need_delete)
441 			bpf_map_delete_elem(&tstamp, &pid);
442 		__sync_fetch_and_add(&time_fail, 1);
443 		return 0;
444 	}
445 
446 	switch (aggr_mode) {
447 	case LOCK_AGGR_CALLER:
448 		key.stack_id = pelem->stack_id;
449 		break;
450 	case LOCK_AGGR_TASK:
451 		if (lock_owner)
452 			key.pid = pelem->flags;
453 		else {
454 			if (!need_delete)
455 				pid = bpf_get_current_pid_tgid();
456 			key.pid = pid;
457 		}
458 		if (needs_callstack)
459 			key.stack_id = pelem->stack_id;
460 		break;
461 	case LOCK_AGGR_ADDR:
462 		key.lock_addr_or_cgroup = pelem->lock;
463 		if (needs_callstack)
464 			key.stack_id = pelem->stack_id;
465 		break;
466 	case LOCK_AGGR_CGROUP:
467 		key.lock_addr_or_cgroup = get_current_cgroup_id();
468 		break;
469 	default:
470 		/* should not happen */
471 		return 0;
472 	}
473 
474 	data = bpf_map_lookup_elem(&lock_stat, &key);
475 	if (!data) {
476 		if (data_map_full) {
477 			pelem->lock = 0;
478 			if (need_delete)
479 				bpf_map_delete_elem(&tstamp, &pid);
480 			__sync_fetch_and_add(&data_fail, 1);
481 			return 0;
482 		}
483 
484 		struct contention_data first = {
485 			.total_time = duration,
486 			.max_time = duration,
487 			.min_time = duration,
488 			.count = 1,
489 			.flags = pelem->flags,
490 		};
491 		int err;
492 
493 		if (aggr_mode == LOCK_AGGR_ADDR)
494 			first.flags |= check_lock_type(pelem->lock, pelem->flags);
495 
496 		err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
497 		if (err < 0) {
498 			if (err == -E2BIG)
499 				data_map_full = 1;
500 			__sync_fetch_and_add(&data_fail, 1);
501 		}
502 		pelem->lock = 0;
503 		if (need_delete)
504 			bpf_map_delete_elem(&tstamp, &pid);
505 		return 0;
506 	}
507 
508 	__sync_fetch_and_add(&data->total_time, duration);
509 	__sync_fetch_and_add(&data->count, 1);
510 
511 	/* FIXME: need atomic operations */
512 	if (data->max_time < duration)
513 		data->max_time = duration;
514 	if (data->min_time > duration)
515 		data->min_time = duration;
516 
517 	pelem->lock = 0;
518 	if (need_delete)
519 		bpf_map_delete_elem(&tstamp, &pid);
520 	return 0;
521 }
522 
523 extern struct rq runqueues __ksym;
524 
525 struct rq___old {
526 	raw_spinlock_t lock;
527 } __attribute__((preserve_access_index));
528 
529 struct rq___new {
530 	raw_spinlock_t __lock;
531 } __attribute__((preserve_access_index));
532 
533 SEC("raw_tp/bpf_test_finish")
534 int BPF_PROG(collect_lock_syms)
535 {
536 	__u64 lock_addr, lock_off;
537 	__u32 lock_flag;
538 
539 	if (bpf_core_field_exists(struct rq___new, __lock))
540 		lock_off = offsetof(struct rq___new, __lock);
541 	else
542 		lock_off = offsetof(struct rq___old, lock);
543 
544 	for (int i = 0; i < MAX_CPUS; i++) {
545 		struct rq *rq = bpf_per_cpu_ptr(&runqueues, i);
546 
547 		if (rq == NULL)
548 			break;
549 
550 		lock_addr = (__u64)(void *)rq + lock_off;
551 		lock_flag = LOCK_CLASS_RQLOCK;
552 		bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
553 	}
554 	return 0;
555 }
556 
557 SEC("raw_tp/bpf_test_finish")
558 int BPF_PROG(end_timestamp)
559 {
560 	end_ts = bpf_ktime_get_ns();
561 	return 0;
562 }
563 
564 char LICENSE[] SEC("license") = "Dual BSD/GPL";
565