1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2021 Google
3 #include "vmlinux.h"
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_tracing.h>
6
7 // This should be in sync with "util/ftrace.h"
8 #define NUM_BUCKET 22
9
10 struct {
11 __uint(type, BPF_MAP_TYPE_HASH);
12 __uint(key_size, sizeof(__u64));
13 __uint(value_size, sizeof(__u64));
14 __uint(max_entries, 10000);
15 } functime SEC(".maps");
16
17 struct {
18 __uint(type, BPF_MAP_TYPE_HASH);
19 __uint(key_size, sizeof(__u32));
20 __uint(value_size, sizeof(__u8));
21 __uint(max_entries, 1);
22 } cpu_filter SEC(".maps");
23
24 struct {
25 __uint(type, BPF_MAP_TYPE_HASH);
26 __uint(key_size, sizeof(__u32));
27 __uint(value_size, sizeof(__u8));
28 __uint(max_entries, 1);
29 } task_filter SEC(".maps");
30
31 struct {
32 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
33 __uint(key_size, sizeof(__u32));
34 __uint(value_size, sizeof(__u64));
35 __uint(max_entries, NUM_BUCKET);
36 } latency SEC(".maps");
37
38
39 int enabled = 0;
40
41 const volatile int has_cpu = 0;
42 const volatile int has_task = 0;
43 const volatile int use_nsec = 0;
44
45 SEC("kprobe/func")
BPF_PROG(func_begin)46 int BPF_PROG(func_begin)
47 {
48 __u64 key, now;
49
50 if (!enabled)
51 return 0;
52
53 key = bpf_get_current_pid_tgid();
54
55 if (has_cpu) {
56 __u32 cpu = bpf_get_smp_processor_id();
57 __u8 *ok;
58
59 ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
60 if (!ok)
61 return 0;
62 }
63
64 if (has_task) {
65 __u32 pid = key & 0xffffffff;
66 __u8 *ok;
67
68 ok = bpf_map_lookup_elem(&task_filter, &pid);
69 if (!ok)
70 return 0;
71 }
72
73 now = bpf_ktime_get_ns();
74
75 // overwrite timestamp for nested functions
76 bpf_map_update_elem(&functime, &key, &now, BPF_ANY);
77 return 0;
78 }
79
80 SEC("kretprobe/func")
BPF_PROG(func_end)81 int BPF_PROG(func_end)
82 {
83 __u64 tid;
84 __u64 *start;
85 __u64 cmp_base = use_nsec ? 1 : 1000;
86
87 if (!enabled)
88 return 0;
89
90 tid = bpf_get_current_pid_tgid();
91
92 start = bpf_map_lookup_elem(&functime, &tid);
93 if (start) {
94 __s64 delta = bpf_ktime_get_ns() - *start;
95 __u32 key;
96 __u64 *hist;
97
98 bpf_map_delete_elem(&functime, &tid);
99
100 if (delta < 0)
101 return 0;
102
103 // calculate index using delta
104 for (key = 0; key < (NUM_BUCKET - 1); key++) {
105 if (delta < (cmp_base << key))
106 break;
107 }
108
109 hist = bpf_map_lookup_elem(&latency, &key);
110 if (!hist)
111 return 0;
112
113 *hist += 1;
114 }
115
116 return 0;
117 }
118