xref: /linux/tools/perf/util/bpf_skel/kwork_top.bpf.c (revision 7685b334d1e4927cc73b62c65293ba65748d9c52)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2022, Huawei
3 
4 #include "vmlinux.h"
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 #include <bpf/bpf_core_read.h>
8 
9 /*
10  * This should be in sync with "util/kwork.h"
11  */
12 enum kwork_class_type {
13 	KWORK_CLASS_IRQ,
14 	KWORK_CLASS_SOFTIRQ,
15 	KWORK_CLASS_WORKQUEUE,
16 	KWORK_CLASS_SCHED,
17 	KWORK_CLASS_MAX,
18 };
19 
20 #define MAX_ENTRIES     102400
21 #ifndef MAX_NR_CPUS
22 #define MAX_NR_CPUS     4096
23 #endif
24 #define PF_KTHREAD      0x00200000
25 #define MAX_COMMAND_LEN 16
26 
27 struct time_data {
28 	__u64 timestamp;
29 };
30 
31 struct work_data {
32 	__u64 runtime;
33 };
34 
35 struct task_data {
36 	__u32 tgid;
37 	__u32 is_kthread;
38 	char comm[MAX_COMMAND_LEN];
39 };
40 
41 struct work_key {
42 	__u32 type;
43 	__u32 pid;
44 	__u64 task_p;
45 };
46 
47 struct task_key {
48 	__u32 pid;
49 	__u32 cpu;
50 };
51 
52 struct {
53 	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
54 	__uint(map_flags, BPF_F_NO_PREALLOC);
55 	__type(key, int);
56 	__type(value, struct time_data);
57 } kwork_top_task_time SEC(".maps");
58 
59 struct {
60 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
61 	__uint(key_size, sizeof(struct work_key));
62 	__uint(value_size, sizeof(struct time_data));
63 	__uint(max_entries, MAX_ENTRIES);
64 } kwork_top_irq_time SEC(".maps");
65 
66 struct {
67 	__uint(type, BPF_MAP_TYPE_HASH);
68 	__uint(key_size, sizeof(struct task_key));
69 	__uint(value_size, sizeof(struct task_data));
70 	__uint(max_entries, MAX_ENTRIES);
71 } kwork_top_tasks SEC(".maps");
72 
73 struct {
74 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
75 	__uint(key_size, sizeof(struct work_key));
76 	__uint(value_size, sizeof(struct work_data));
77 	__uint(max_entries, MAX_ENTRIES);
78 } kwork_top_works SEC(".maps");
79 
80 struct {
81 	__uint(type, BPF_MAP_TYPE_HASH);
82 	__uint(key_size, sizeof(u32));
83 	__uint(value_size, sizeof(u8));
84 	__uint(max_entries, MAX_NR_CPUS);
85 } kwork_top_cpu_filter SEC(".maps");
86 
87 int enabled = 0;
88 
89 const volatile int has_cpu_filter = 0;
90 
91 __u64 from_timestamp = 0;
92 __u64 to_timestamp = 0;
93 
94 static __always_inline int cpu_is_filtered(__u32 cpu)
95 {
96 	__u8 *cpu_val;
97 
98 	if (has_cpu_filter) {
99 		cpu_val = bpf_map_lookup_elem(&kwork_top_cpu_filter, &cpu);
100 		if (!cpu_val)
101 			return 1;
102 	}
103 
104 	return 0;
105 }
106 
107 static __always_inline void update_task_info(struct task_struct *task, __u32 cpu)
108 {
109 	struct task_key key = {
110 		.pid = task->pid,
111 		.cpu = cpu,
112 	};
113 
114 	if (!bpf_map_lookup_elem(&kwork_top_tasks, &key)) {
115 		struct task_data data = {
116 			.tgid = task->tgid,
117 			.is_kthread = task->flags & PF_KTHREAD ? 1 : 0,
118 		};
119 		BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
120 
121 		bpf_map_update_elem(&kwork_top_tasks, &key, &data, BPF_ANY);
122 	}
123 }
124 
125 static __always_inline void update_work(struct work_key *key, __u64 delta)
126 {
127 	struct work_data *data;
128 
129 	data = bpf_map_lookup_elem(&kwork_top_works, key);
130 	if (data) {
131 		data->runtime += delta;
132 	} else {
133 		struct work_data new_data = {
134 			.runtime = delta,
135 		};
136 
137 		bpf_map_update_elem(&kwork_top_works, key, &new_data, BPF_ANY);
138 	}
139 }
140 
141 static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu)
142 {
143 	__u64 delta;
144 	struct time_data *pelem;
145 
146 	pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, 0);
147 	if (pelem)
148 		delta = ts - pelem->timestamp;
149 	else
150 		delta = ts - from_timestamp;
151 
152 	struct work_key key = {
153 		.type = KWORK_CLASS_SCHED,
154 		.pid = task->pid,
155 		.task_p = (__u64)task,
156 	};
157 
158 	update_work(&key, delta);
159 	update_task_info(task, cpu);
160 }
161 
162 static void on_sched_in(struct task_struct *task, __u64 ts)
163 {
164 	struct time_data *pelem;
165 
166 	pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL,
167 				     BPF_LOCAL_STORAGE_GET_F_CREATE);
168 	if (pelem)
169 		pelem->timestamp = ts;
170 }
171 
172 SEC("tp_btf/sched_switch")
173 int on_switch(u64 *ctx)
174 {
175 	struct task_struct *prev, *next;
176 
177 	prev = (struct task_struct *)ctx[1];
178 	next = (struct task_struct *)ctx[2];
179 
180 	if (!enabled)
181 		return 0;
182 
183 	__u32 cpu = bpf_get_smp_processor_id();
184 
185 	if (cpu_is_filtered(cpu))
186 		return 0;
187 
188 	__u64 ts = bpf_ktime_get_ns();
189 
190 	on_sched_out(prev, ts, cpu);
191 	on_sched_in(next, ts);
192 
193 	return 0;
194 }
195 
196 SEC("tp_btf/irq_handler_entry")
197 int on_irq_handler_entry(u64 *cxt)
198 {
199 	struct task_struct *task;
200 
201 	if (!enabled)
202 		return 0;
203 
204 	__u32 cpu = bpf_get_smp_processor_id();
205 
206 	if (cpu_is_filtered(cpu))
207 		return 0;
208 
209 	__u64 ts = bpf_ktime_get_ns();
210 
211 	task = (struct task_struct *)bpf_get_current_task();
212 	if (!task)
213 		return 0;
214 
215 	struct work_key key = {
216 		.type = KWORK_CLASS_IRQ,
217 		.pid = BPF_CORE_READ(task, pid),
218 		.task_p = (__u64)task,
219 	};
220 
221 	struct time_data data = {
222 		.timestamp = ts,
223 	};
224 
225 	bpf_map_update_elem(&kwork_top_irq_time, &key, &data, BPF_ANY);
226 
227 	return 0;
228 }
229 
230 SEC("tp_btf/irq_handler_exit")
231 int on_irq_handler_exit(u64 *cxt)
232 {
233 	__u64 delta;
234 	struct task_struct *task;
235 	struct time_data *pelem;
236 
237 	if (!enabled)
238 		return 0;
239 
240 	__u32 cpu = bpf_get_smp_processor_id();
241 
242 	if (cpu_is_filtered(cpu))
243 		return 0;
244 
245 	__u64 ts = bpf_ktime_get_ns();
246 
247 	task = (struct task_struct *)bpf_get_current_task();
248 	if (!task)
249 		return 0;
250 
251 	struct work_key key = {
252 		.type = KWORK_CLASS_IRQ,
253 		.pid = BPF_CORE_READ(task, pid),
254 		.task_p = (__u64)task,
255 	};
256 
257 	pelem = bpf_map_lookup_elem(&kwork_top_irq_time, &key);
258 	if (pelem && pelem->timestamp != 0)
259 		delta = ts - pelem->timestamp;
260 	else
261 		delta = ts - from_timestamp;
262 
263 	update_work(&key, delta);
264 
265 	return 0;
266 }
267 
268 SEC("tp_btf/softirq_entry")
269 int on_softirq_entry(u64 *cxt)
270 {
271 	struct task_struct *task;
272 
273 	if (!enabled)
274 		return 0;
275 
276 	__u32 cpu = bpf_get_smp_processor_id();
277 
278 	if (cpu_is_filtered(cpu))
279 		return 0;
280 
281 	__u64 ts = bpf_ktime_get_ns();
282 
283 	task = (struct task_struct *)bpf_get_current_task();
284 	if (!task)
285 		return 0;
286 
287 	struct work_key key = {
288 		.type = KWORK_CLASS_SOFTIRQ,
289 		.pid = BPF_CORE_READ(task, pid),
290 		.task_p = (__u64)task,
291 	};
292 
293 	struct time_data data = {
294 		.timestamp = ts,
295 	};
296 
297 	bpf_map_update_elem(&kwork_top_irq_time, &key, &data, BPF_ANY);
298 
299 	return 0;
300 }
301 
302 SEC("tp_btf/softirq_exit")
303 int on_softirq_exit(u64 *cxt)
304 {
305 	__u64 delta;
306 	struct task_struct *task;
307 	struct time_data *pelem;
308 
309 	if (!enabled)
310 		return 0;
311 
312 	__u32 cpu = bpf_get_smp_processor_id();
313 
314 	if (cpu_is_filtered(cpu))
315 		return 0;
316 
317 	__u64 ts = bpf_ktime_get_ns();
318 
319 	task = (struct task_struct *)bpf_get_current_task();
320 	if (!task)
321 		return 0;
322 
323 	struct work_key key = {
324 		.type = KWORK_CLASS_SOFTIRQ,
325 		.pid = BPF_CORE_READ(task, pid),
326 		.task_p = (__u64)task,
327 	};
328 
329 	pelem = bpf_map_lookup_elem(&kwork_top_irq_time, &key);
330 	if (pelem)
331 		delta = ts - pelem->timestamp;
332 	else
333 		delta = ts - from_timestamp;
334 
335 	update_work(&key, delta);
336 
337 	return 0;
338 }
339 
340 char LICENSE[] SEC("license") = "Dual BSD/GPL";
341