xref: /linux/tools/perf/util/bpf_skel/lock_contention.bpf.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2022 Google
3 #include "vmlinux.h"
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_core_read.h>
7 #include <asm-generic/errno-base.h>
8 
9 #include "lock_data.h"
10 
11 /* for collect_lock_syms().  4096 was rejected by the verifier */
12 #define MAX_CPUS  1024
13 
14 /* lock contention flags from include/trace/events/lock.h */
15 #define LCB_F_SPIN	(1U << 0)
16 #define LCB_F_READ	(1U << 1)
17 #define LCB_F_WRITE	(1U << 2)
18 #define LCB_F_RT	(1U << 3)
19 #define LCB_F_PERCPU	(1U << 4)
20 #define LCB_F_MUTEX	(1U << 5)
21 
22 /* callstack storage  */
23 struct {
24 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
25 	__uint(key_size, sizeof(__u32));
26 	__uint(value_size, sizeof(__u64));
27 	__uint(max_entries, MAX_ENTRIES);
28 } stacks SEC(".maps");
29 
30 /* maintain timestamp at the beginning of contention */
31 struct {
32 	__uint(type, BPF_MAP_TYPE_HASH);
33 	__type(key, int);
34 	__type(value, struct tstamp_data);
35 	__uint(max_entries, MAX_ENTRIES);
36 } tstamp SEC(".maps");
37 
38 /* maintain per-CPU timestamp at the beginning of contention */
39 struct {
40 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
41 	__uint(key_size, sizeof(__u32));
42 	__uint(value_size, sizeof(struct tstamp_data));
43 	__uint(max_entries, 1);
44 } tstamp_cpu SEC(".maps");
45 
46 /* actual lock contention statistics */
47 struct {
48 	__uint(type, BPF_MAP_TYPE_HASH);
49 	__uint(key_size, sizeof(struct contention_key));
50 	__uint(value_size, sizeof(struct contention_data));
51 	__uint(max_entries, MAX_ENTRIES);
52 } lock_stat SEC(".maps");
53 
54 struct {
55 	__uint(type, BPF_MAP_TYPE_HASH);
56 	__uint(key_size, sizeof(__u32));
57 	__uint(value_size, sizeof(struct contention_task_data));
58 	__uint(max_entries, MAX_ENTRIES);
59 } task_data SEC(".maps");
60 
61 struct {
62 	__uint(type, BPF_MAP_TYPE_HASH);
63 	__uint(key_size, sizeof(__u64));
64 	__uint(value_size, sizeof(__u32));
65 	__uint(max_entries, MAX_ENTRIES);
66 } lock_syms SEC(".maps");
67 
68 struct {
69 	__uint(type, BPF_MAP_TYPE_HASH);
70 	__uint(key_size, sizeof(__u32));
71 	__uint(value_size, sizeof(__u8));
72 	__uint(max_entries, 1);
73 } cpu_filter SEC(".maps");
74 
75 struct {
76 	__uint(type, BPF_MAP_TYPE_HASH);
77 	__uint(key_size, sizeof(__u32));
78 	__uint(value_size, sizeof(__u8));
79 	__uint(max_entries, 1);
80 } task_filter SEC(".maps");
81 
82 struct {
83 	__uint(type, BPF_MAP_TYPE_HASH);
84 	__uint(key_size, sizeof(__u32));
85 	__uint(value_size, sizeof(__u8));
86 	__uint(max_entries, 1);
87 } type_filter SEC(".maps");
88 
89 struct {
90 	__uint(type, BPF_MAP_TYPE_HASH);
91 	__uint(key_size, sizeof(__u64));
92 	__uint(value_size, sizeof(__u8));
93 	__uint(max_entries, 1);
94 } addr_filter SEC(".maps");
95 
96 struct {
97 	__uint(type, BPF_MAP_TYPE_HASH);
98 	__uint(key_size, sizeof(__u64));
99 	__uint(value_size, sizeof(__u8));
100 	__uint(max_entries, 1);
101 } cgroup_filter SEC(".maps");
102 
103 struct rw_semaphore___old {
104 	struct task_struct *owner;
105 } __attribute__((preserve_access_index));
106 
107 struct rw_semaphore___new {
108 	atomic_long_t owner;
109 } __attribute__((preserve_access_index));
110 
111 struct mm_struct___old {
112 	struct rw_semaphore mmap_sem;
113 } __attribute__((preserve_access_index));
114 
115 struct mm_struct___new {
116 	struct rw_semaphore mmap_lock;
117 } __attribute__((preserve_access_index));
118 
119 /* control flags */
120 const volatile int has_cpu;
121 const volatile int has_task;
122 const volatile int has_type;
123 const volatile int has_addr;
124 const volatile int has_cgroup;
125 const volatile int needs_callstack;
126 const volatile int stack_skip;
127 const volatile int lock_owner;
128 const volatile int use_cgroup_v2;
129 
130 /* determine the key of lock stat */
131 const volatile int aggr_mode;
132 
133 int enabled;
134 
135 int perf_subsys_id = -1;
136 
137 __u64 end_ts;
138 
139 /* error stat */
140 int task_fail;
141 int stack_fail;
142 int time_fail;
143 int data_fail;
144 
145 int task_map_full;
146 int data_map_full;
147 
148 static inline __u64 get_current_cgroup_id(void)
149 {
150 	struct task_struct *task;
151 	struct cgroup *cgrp;
152 
153 	if (use_cgroup_v2)
154 		return bpf_get_current_cgroup_id();
155 
156 	task = bpf_get_current_task_btf();
157 
158 	if (perf_subsys_id == -1) {
159 #if __has_builtin(__builtin_preserve_enum_value)
160 		perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
161 						     perf_event_cgrp_id);
162 #else
163 		perf_subsys_id = perf_event_cgrp_id;
164 #endif
165 	}
166 
167 	cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
168 	return BPF_CORE_READ(cgrp, kn, id);
169 }
170 
171 static inline int can_record(u64 *ctx)
172 {
173 	if (has_cpu) {
174 		__u32 cpu = bpf_get_smp_processor_id();
175 		__u8 *ok;
176 
177 		ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
178 		if (!ok)
179 			return 0;
180 	}
181 
182 	if (has_task) {
183 		__u8 *ok;
184 		__u32 pid = bpf_get_current_pid_tgid();
185 
186 		ok = bpf_map_lookup_elem(&task_filter, &pid);
187 		if (!ok)
188 			return 0;
189 	}
190 
191 	if (has_type) {
192 		__u8 *ok;
193 		__u32 flags = (__u32)ctx[1];
194 
195 		ok = bpf_map_lookup_elem(&type_filter, &flags);
196 		if (!ok)
197 			return 0;
198 	}
199 
200 	if (has_addr) {
201 		__u8 *ok;
202 		__u64 addr = ctx[0];
203 
204 		ok = bpf_map_lookup_elem(&addr_filter, &addr);
205 		if (!ok)
206 			return 0;
207 	}
208 
209 	if (has_cgroup) {
210 		__u8 *ok;
211 		__u64 cgrp = get_current_cgroup_id();
212 
213 		ok = bpf_map_lookup_elem(&cgroup_filter, &cgrp);
214 		if (!ok)
215 			return 0;
216 	}
217 
218 	return 1;
219 }
220 
221 static inline int update_task_data(struct task_struct *task)
222 {
223 	struct contention_task_data *p;
224 	int pid, err;
225 
226 	err = bpf_core_read(&pid, sizeof(pid), &task->pid);
227 	if (err)
228 		return -1;
229 
230 	p = bpf_map_lookup_elem(&task_data, &pid);
231 	if (p == NULL && !task_map_full) {
232 		struct contention_task_data data = {};
233 
234 		BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
235 		if (bpf_map_update_elem(&task_data, &pid, &data, BPF_NOEXIST) == -E2BIG)
236 			task_map_full = 1;
237 	}
238 
239 	return 0;
240 }
241 
242 #ifndef __has_builtin
243 # define __has_builtin(x) 0
244 #endif
245 
246 static inline struct task_struct *get_lock_owner(__u64 lock, __u32 flags)
247 {
248 	struct task_struct *task;
249 	__u64 owner = 0;
250 
251 	if (flags & LCB_F_MUTEX) {
252 		struct mutex *mutex = (void *)lock;
253 		owner = BPF_CORE_READ(mutex, owner.counter);
254 	} else if (flags == LCB_F_READ || flags == LCB_F_WRITE) {
255 	/*
256 	 * Support for the BPF_TYPE_MATCHES argument to the
257 	 * __builtin_preserve_type_info builtin was added at some point during
258 	 * development of clang 15 and it's what is needed for
259 	 * bpf_core_type_matches.
260 	 */
261 #if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
262 		if (bpf_core_type_matches(struct rw_semaphore___old)) {
263 			struct rw_semaphore___old *rwsem = (void *)lock;
264 			owner = (unsigned long)BPF_CORE_READ(rwsem, owner);
265 		} else if (bpf_core_type_matches(struct rw_semaphore___new)) {
266 			struct rw_semaphore___new *rwsem = (void *)lock;
267 			owner = BPF_CORE_READ(rwsem, owner.counter);
268 		}
269 #else
270 		/* assume new struct */
271 		struct rw_semaphore *rwsem = (void *)lock;
272 		owner = BPF_CORE_READ(rwsem, owner.counter);
273 #endif
274 	}
275 
276 	if (!owner)
277 		return NULL;
278 
279 	task = (void *)(owner & ~7UL);
280 	return task;
281 }
282 
283 static inline __u32 check_lock_type(__u64 lock, __u32 flags)
284 {
285 	struct task_struct *curr;
286 	struct mm_struct___old *mm_old;
287 	struct mm_struct___new *mm_new;
288 	struct sighand_struct *sighand;
289 
290 	switch (flags) {
291 	case LCB_F_READ:  /* rwsem */
292 	case LCB_F_WRITE:
293 		curr = bpf_get_current_task_btf();
294 		if (curr->mm == NULL)
295 			break;
296 		mm_new = (void *)curr->mm;
297 		if (bpf_core_field_exists(mm_new->mmap_lock)) {
298 			if (&mm_new->mmap_lock == (void *)lock)
299 				return LCD_F_MMAP_LOCK;
300 			break;
301 		}
302 		mm_old = (void *)curr->mm;
303 		if (bpf_core_field_exists(mm_old->mmap_sem)) {
304 			if (&mm_old->mmap_sem == (void *)lock)
305 				return LCD_F_MMAP_LOCK;
306 		}
307 		break;
308 	case LCB_F_SPIN:  /* spinlock */
309 		curr = bpf_get_current_task_btf();
310 		sighand = curr->sighand;
311 
312 		if (sighand && &sighand->siglock == (void *)lock)
313 			return LCD_F_SIGHAND_LOCK;
314 		break;
315 	default:
316 		break;
317 	}
318 	return 0;
319 }
320 
321 static inline struct tstamp_data *get_tstamp_elem(__u32 flags)
322 {
323 	__u32 pid;
324 	struct tstamp_data *pelem;
325 
326 	/* Use per-cpu array map for spinlock and rwlock */
327 	if ((flags & (LCB_F_SPIN | LCB_F_MUTEX)) == LCB_F_SPIN) {
328 		__u32 idx = 0;
329 
330 		pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
331 		/* Do not update the element for nested locks */
332 		if (pelem && pelem->lock)
333 			pelem = NULL;
334 		return pelem;
335 	}
336 
337 	pid = bpf_get_current_pid_tgid();
338 	pelem = bpf_map_lookup_elem(&tstamp, &pid);
339 	/* Do not update the element for nested locks */
340 	if (pelem && pelem->lock)
341 		return NULL;
342 
343 	if (pelem == NULL) {
344 		struct tstamp_data zero = {};
345 
346 		if (bpf_map_update_elem(&tstamp, &pid, &zero, BPF_NOEXIST) < 0) {
347 			__sync_fetch_and_add(&task_fail, 1);
348 			return NULL;
349 		}
350 
351 		pelem = bpf_map_lookup_elem(&tstamp, &pid);
352 		if (pelem == NULL) {
353 			__sync_fetch_and_add(&task_fail, 1);
354 			return NULL;
355 		}
356 	}
357 	return pelem;
358 }
359 
360 SEC("tp_btf/contention_begin")
361 int contention_begin(u64 *ctx)
362 {
363 	struct tstamp_data *pelem;
364 
365 	if (!enabled || !can_record(ctx))
366 		return 0;
367 
368 	pelem = get_tstamp_elem(ctx[1]);
369 	if (pelem == NULL)
370 		return 0;
371 
372 	pelem->timestamp = bpf_ktime_get_ns();
373 	pelem->lock = (__u64)ctx[0];
374 	pelem->flags = (__u32)ctx[1];
375 
376 	if (needs_callstack) {
377 		pelem->stack_id = bpf_get_stackid(ctx, &stacks,
378 						  BPF_F_FAST_STACK_CMP | stack_skip);
379 		if (pelem->stack_id < 0)
380 			__sync_fetch_and_add(&stack_fail, 1);
381 	} else if (aggr_mode == LOCK_AGGR_TASK) {
382 		struct task_struct *task;
383 
384 		if (lock_owner) {
385 			task = get_lock_owner(pelem->lock, pelem->flags);
386 
387 			/* The flags is not used anymore.  Pass the owner pid. */
388 			if (task)
389 				pelem->flags = BPF_CORE_READ(task, pid);
390 			else
391 				pelem->flags = -1U;
392 
393 		} else {
394 			task = bpf_get_current_task_btf();
395 		}
396 
397 		if (task) {
398 			if (update_task_data(task) < 0 && lock_owner)
399 				pelem->flags = -1U;
400 		}
401 	}
402 
403 	return 0;
404 }
405 
406 SEC("tp_btf/contention_end")
407 int contention_end(u64 *ctx)
408 {
409 	__u32 pid = 0, idx = 0;
410 	struct tstamp_data *pelem;
411 	struct contention_key key = {};
412 	struct contention_data *data;
413 	__u64 duration;
414 	bool need_delete = false;
415 
416 	if (!enabled)
417 		return 0;
418 
419 	/*
420 	 * For spinlock and rwlock, it needs to get the timestamp for the
421 	 * per-cpu map.  However, contention_end does not have the flags
422 	 * so it cannot know whether it reads percpu or hash map.
423 	 *
424 	 * Try per-cpu map first and check if there's active contention.
425 	 * If it is, do not read hash map because it cannot go to sleeping
426 	 * locks before releasing the spinning locks.
427 	 */
428 	pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
429 	if (pelem && pelem->lock) {
430 		if (pelem->lock != ctx[0])
431 			return 0;
432 	} else {
433 		pid = bpf_get_current_pid_tgid();
434 		pelem = bpf_map_lookup_elem(&tstamp, &pid);
435 		if (!pelem || pelem->lock != ctx[0])
436 			return 0;
437 		need_delete = true;
438 	}
439 
440 	duration = bpf_ktime_get_ns() - pelem->timestamp;
441 	if ((__s64)duration < 0) {
442 		__sync_fetch_and_add(&time_fail, 1);
443 		goto out;
444 	}
445 
446 	switch (aggr_mode) {
447 	case LOCK_AGGR_CALLER:
448 		key.stack_id = pelem->stack_id;
449 		break;
450 	case LOCK_AGGR_TASK:
451 		if (lock_owner)
452 			key.pid = pelem->flags;
453 		else {
454 			if (!need_delete)
455 				pid = bpf_get_current_pid_tgid();
456 			key.pid = pid;
457 		}
458 		if (needs_callstack)
459 			key.stack_id = pelem->stack_id;
460 		break;
461 	case LOCK_AGGR_ADDR:
462 		key.lock_addr_or_cgroup = pelem->lock;
463 		if (needs_callstack)
464 			key.stack_id = pelem->stack_id;
465 		break;
466 	case LOCK_AGGR_CGROUP:
467 		key.lock_addr_or_cgroup = get_current_cgroup_id();
468 		break;
469 	default:
470 		/* should not happen */
471 		return 0;
472 	}
473 
474 	data = bpf_map_lookup_elem(&lock_stat, &key);
475 	if (!data) {
476 		if (data_map_full) {
477 			__sync_fetch_and_add(&data_fail, 1);
478 			goto out;
479 		}
480 
481 		struct contention_data first = {
482 			.total_time = duration,
483 			.max_time = duration,
484 			.min_time = duration,
485 			.count = 1,
486 			.flags = pelem->flags,
487 		};
488 		int err;
489 
490 		if (aggr_mode == LOCK_AGGR_ADDR)
491 			first.flags |= check_lock_type(pelem->lock, pelem->flags);
492 
493 		err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
494 		if (err < 0) {
495 			if (err == -EEXIST) {
496 				/* it lost the race, try to get it again */
497 				data = bpf_map_lookup_elem(&lock_stat, &key);
498 				if (data != NULL)
499 					goto found;
500 			}
501 			if (err == -E2BIG)
502 				data_map_full = 1;
503 			__sync_fetch_and_add(&data_fail, 1);
504 		}
505 		goto out;
506 	}
507 
508 found:
509 	__sync_fetch_and_add(&data->total_time, duration);
510 	__sync_fetch_and_add(&data->count, 1);
511 
512 	/* FIXME: need atomic operations */
513 	if (data->max_time < duration)
514 		data->max_time = duration;
515 	if (data->min_time > duration)
516 		data->min_time = duration;
517 
518 out:
519 	pelem->lock = 0;
520 	if (need_delete)
521 		bpf_map_delete_elem(&tstamp, &pid);
522 	return 0;
523 }
524 
525 extern struct rq runqueues __ksym;
526 
527 struct rq___old {
528 	raw_spinlock_t lock;
529 } __attribute__((preserve_access_index));
530 
531 struct rq___new {
532 	raw_spinlock_t __lock;
533 } __attribute__((preserve_access_index));
534 
535 SEC("raw_tp/bpf_test_finish")
536 int BPF_PROG(collect_lock_syms)
537 {
538 	__u64 lock_addr, lock_off;
539 	__u32 lock_flag;
540 
541 	if (bpf_core_field_exists(struct rq___new, __lock))
542 		lock_off = offsetof(struct rq___new, __lock);
543 	else
544 		lock_off = offsetof(struct rq___old, lock);
545 
546 	for (int i = 0; i < MAX_CPUS; i++) {
547 		struct rq *rq = bpf_per_cpu_ptr(&runqueues, i);
548 
549 		if (rq == NULL)
550 			break;
551 
552 		lock_addr = (__u64)(void *)rq + lock_off;
553 		lock_flag = LOCK_CLASS_RQLOCK;
554 		bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
555 	}
556 	return 0;
557 }
558 
559 SEC("raw_tp/bpf_test_finish")
560 int BPF_PROG(end_timestamp)
561 {
562 	end_ts = bpf_ktime_get_ns();
563 	return 0;
564 }
565 
566 char LICENSE[] SEC("license") = "Dual BSD/GPL";
567