xref: /linux/tools/perf/util/bpf_skel/kwork_trace.bpf.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (c) 2022, Huawei
3 
4 #include "vmlinux.h"
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 
8 #define KWORK_COUNT 100
9 #define MAX_KWORKNAME 128
10 
11 /*
12  * This should be in sync with "util/kwork.h"
13  */
14 enum kwork_class_type {
15 	KWORK_CLASS_IRQ,
16 	KWORK_CLASS_SOFTIRQ,
17 	KWORK_CLASS_WORKQUEUE,
18 	KWORK_CLASS_MAX,
19 };
20 
21 struct work_key {
22 	__u32 type;
23 	__u32 cpu;
24 	__u64 id;
25 };
26 
27 struct report_data {
28 	__u64 nr;
29 	__u64 total_time;
30 	__u64 max_time;
31 	__u64 max_time_start;
32 	__u64 max_time_end;
33 };
34 
35 struct {
36 	__uint(type, BPF_MAP_TYPE_HASH);
37 	__uint(key_size, sizeof(struct work_key));
38 	__uint(value_size, MAX_KWORKNAME);
39 	__uint(max_entries, KWORK_COUNT);
40 } perf_kwork_names SEC(".maps");
41 
42 struct {
43 	__uint(type, BPF_MAP_TYPE_HASH);
44 	__uint(key_size, sizeof(struct work_key));
45 	__uint(value_size, sizeof(__u64));
46 	__uint(max_entries, KWORK_COUNT);
47 } perf_kwork_time SEC(".maps");
48 
49 struct {
50 	__uint(type, BPF_MAP_TYPE_HASH);
51 	__uint(key_size, sizeof(struct work_key));
52 	__uint(value_size, sizeof(struct report_data));
53 	__uint(max_entries, KWORK_COUNT);
54 } perf_kwork_report SEC(".maps");
55 
56 struct {
57 	__uint(type, BPF_MAP_TYPE_HASH);
58 	__uint(key_size, sizeof(__u32));
59 	__uint(value_size, sizeof(__u8));
60 	__uint(max_entries, 1);
61 } perf_kwork_cpu_filter SEC(".maps");
62 
63 struct {
64 	__uint(type, BPF_MAP_TYPE_ARRAY);
65 	__uint(key_size, sizeof(__u32));
66 	__uint(value_size, MAX_KWORKNAME);
67 	__uint(max_entries, 1);
68 } perf_kwork_name_filter SEC(".maps");
69 
70 int enabled = 0;
71 
72 const volatile int has_cpu_filter = 0;
73 const volatile int has_name_filter = 0;
74 
local_strncmp(const char * s1,unsigned int sz,const char * s2)75 static __always_inline int local_strncmp(const char *s1,
76 					 unsigned int sz, const char *s2)
77 {
78 	int ret = 0;
79 	unsigned int i;
80 
81 	for (i = 0; i < sz; i++) {
82 		ret = (unsigned char)s1[i] - (unsigned char)s2[i];
83 		if (ret || !s1[i] || !s2[i])
84 			break;
85 	}
86 
87 	return ret;
88 }
89 
trace_event_match(struct work_key * key,char * name)90 static __always_inline int trace_event_match(struct work_key *key, char *name)
91 {
92 	__u8 *cpu_val;
93 	char *name_val;
94 	__u32 zero = 0;
95 	__u32 cpu = bpf_get_smp_processor_id();
96 
97 	if (!enabled)
98 		return 0;
99 
100 	if (has_cpu_filter) {
101 		cpu_val = bpf_map_lookup_elem(&perf_kwork_cpu_filter, &cpu);
102 		if (!cpu_val)
103 			return 0;
104 	}
105 
106 	if (has_name_filter && (name != NULL)) {
107 		name_val = bpf_map_lookup_elem(&perf_kwork_name_filter, &zero);
108 		if (name_val &&
109 		    (local_strncmp(name_val, MAX_KWORKNAME, name) != 0)) {
110 			return 0;
111 		}
112 	}
113 
114 	return 1;
115 }
116 
do_update_time(void * map,struct work_key * key,__u64 time_start,__u64 time_end)117 static __always_inline void do_update_time(void *map, struct work_key *key,
118 					   __u64 time_start, __u64 time_end)
119 {
120 	struct report_data zero, *data;
121 	__s64 delta = time_end - time_start;
122 
123 	if (delta < 0)
124 		return;
125 
126 	data = bpf_map_lookup_elem(map, key);
127 	if (!data) {
128 		__builtin_memset(&zero, 0, sizeof(zero));
129 		bpf_map_update_elem(map, key, &zero, BPF_NOEXIST);
130 		data = bpf_map_lookup_elem(map, key);
131 		if (!data)
132 			return;
133 	}
134 
135 	if ((delta > data->max_time) ||
136 	    (data->max_time == 0)) {
137 		data->max_time       = delta;
138 		data->max_time_start = time_start;
139 		data->max_time_end   = time_end;
140 	}
141 
142 	data->total_time += delta;
143 	data->nr++;
144 }
145 
do_update_timestart(void * map,struct work_key * key)146 static __always_inline void do_update_timestart(void *map, struct work_key *key)
147 {
148 	__u64 ts = bpf_ktime_get_ns();
149 
150 	bpf_map_update_elem(map, key, &ts, BPF_ANY);
151 }
152 
do_update_timeend(void * report_map,void * time_map,struct work_key * key)153 static __always_inline void do_update_timeend(void *report_map, void *time_map,
154 					      struct work_key *key)
155 {
156 	__u64 *time = bpf_map_lookup_elem(time_map, key);
157 
158 	if (time) {
159 		bpf_map_delete_elem(time_map, key);
160 		do_update_time(report_map, key, *time, bpf_ktime_get_ns());
161 	}
162 }
163 
do_update_name(void * map,struct work_key * key,char * name)164 static __always_inline void do_update_name(void *map,
165 					   struct work_key *key, char *name)
166 {
167 	if (!bpf_map_lookup_elem(map, key))
168 		bpf_map_update_elem(map, key, name, BPF_ANY);
169 }
170 
update_timestart(void * map,struct work_key * key)171 static __always_inline int update_timestart(void *map, struct work_key *key)
172 {
173 	if (!trace_event_match(key, NULL))
174 		return 0;
175 
176 	do_update_timestart(map, key);
177 	return 0;
178 }
179 
update_timestart_and_name(void * time_map,void * names_map,struct work_key * key,char * name)180 static __always_inline int update_timestart_and_name(void *time_map,
181 						     void *names_map,
182 						     struct work_key *key,
183 						     char *name)
184 {
185 	if (!trace_event_match(key, name))
186 		return 0;
187 
188 	do_update_timestart(time_map, key);
189 	do_update_name(names_map, key, name);
190 
191 	return 0;
192 }
193 
update_timeend(void * report_map,void * time_map,struct work_key * key)194 static __always_inline int update_timeend(void *report_map,
195 					  void *time_map, struct work_key *key)
196 {
197 	if (!trace_event_match(key, NULL))
198 		return 0;
199 
200 	do_update_timeend(report_map, time_map, key);
201 
202 	return 0;
203 }
204 
update_timeend_and_name(void * report_map,void * time_map,void * names_map,struct work_key * key,char * name)205 static __always_inline int update_timeend_and_name(void *report_map,
206 						   void *time_map,
207 						   void *names_map,
208 						   struct work_key *key,
209 						   char *name)
210 {
211 	if (!trace_event_match(key, name))
212 		return 0;
213 
214 	do_update_timeend(report_map, time_map, key);
215 	do_update_name(names_map, key, name);
216 
217 	return 0;
218 }
219 
220 SEC("tracepoint/irq/irq_handler_entry")
report_irq_handler_entry(struct trace_event_raw_irq_handler_entry * ctx)221 int report_irq_handler_entry(struct trace_event_raw_irq_handler_entry *ctx)
222 {
223 	char name[MAX_KWORKNAME];
224 	struct work_key key = {
225 		.type = KWORK_CLASS_IRQ,
226 		.cpu  = bpf_get_smp_processor_id(),
227 		.id   = (__u64)ctx->irq,
228 	};
229 	void *name_addr = (void *)ctx + (ctx->__data_loc_name & 0xffff);
230 
231 	bpf_probe_read_kernel_str(name, sizeof(name), name_addr);
232 
233 	return update_timestart_and_name(&perf_kwork_time,
234 					 &perf_kwork_names, &key, name);
235 }
236 
237 SEC("tracepoint/irq/irq_handler_exit")
report_irq_handler_exit(struct trace_event_raw_irq_handler_exit * ctx)238 int report_irq_handler_exit(struct trace_event_raw_irq_handler_exit *ctx)
239 {
240 	struct work_key key = {
241 		.type = KWORK_CLASS_IRQ,
242 		.cpu  = bpf_get_smp_processor_id(),
243 		.id   = (__u64)ctx->irq,
244 	};
245 
246 	return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
247 }
248 
249 static char softirq_name_list[NR_SOFTIRQS][MAX_KWORKNAME] = {
250 	{ "HI"       },
251 	{ "TIMER"    },
252 	{ "NET_TX"   },
253 	{ "NET_RX"   },
254 	{ "BLOCK"    },
255 	{ "IRQ_POLL" },
256 	{ "TASKLET"  },
257 	{ "SCHED"    },
258 	{ "HRTIMER"  },
259 	{ "RCU"      },
260 };
261 
262 SEC("tracepoint/irq/softirq_entry")
report_softirq_entry(struct trace_event_raw_softirq * ctx)263 int report_softirq_entry(struct trace_event_raw_softirq *ctx)
264 {
265 	unsigned int vec = ctx->vec;
266 	struct work_key key = {
267 		.type = KWORK_CLASS_SOFTIRQ,
268 		.cpu  = bpf_get_smp_processor_id(),
269 		.id   = (__u64)vec,
270 	};
271 
272 	if (vec < NR_SOFTIRQS) {
273 		return update_timestart_and_name(&perf_kwork_time,
274 						 &perf_kwork_names, &key,
275 						 softirq_name_list[vec]);
276 	}
277 
278 	return 0;
279 }
280 
281 SEC("tracepoint/irq/softirq_exit")
report_softirq_exit(struct trace_event_raw_softirq * ctx)282 int report_softirq_exit(struct trace_event_raw_softirq *ctx)
283 {
284 	struct work_key key = {
285 		.type = KWORK_CLASS_SOFTIRQ,
286 		.cpu  = bpf_get_smp_processor_id(),
287 		.id   = (__u64)ctx->vec,
288 	};
289 
290 	return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
291 }
292 
293 SEC("tracepoint/irq/softirq_raise")
latency_softirq_raise(struct trace_event_raw_softirq * ctx)294 int latency_softirq_raise(struct trace_event_raw_softirq *ctx)
295 {
296 	unsigned int vec = ctx->vec;
297 	struct work_key key = {
298 		.type = KWORK_CLASS_SOFTIRQ,
299 		.cpu  = bpf_get_smp_processor_id(),
300 		.id   = (__u64)vec,
301 	};
302 
303 	if (vec < NR_SOFTIRQS) {
304 		return update_timestart_and_name(&perf_kwork_time,
305 						 &perf_kwork_names, &key,
306 						 softirq_name_list[vec]);
307 	}
308 
309 	return 0;
310 }
311 
312 SEC("tracepoint/irq/softirq_entry")
latency_softirq_entry(struct trace_event_raw_softirq * ctx)313 int latency_softirq_entry(struct trace_event_raw_softirq *ctx)
314 {
315 	struct work_key key = {
316 		.type = KWORK_CLASS_SOFTIRQ,
317 		.cpu  = bpf_get_smp_processor_id(),
318 		.id   = (__u64)ctx->vec,
319 	};
320 
321 	return update_timeend(&perf_kwork_report, &perf_kwork_time, &key);
322 }
323 
324 SEC("tracepoint/workqueue/workqueue_execute_start")
report_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start * ctx)325 int report_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
326 {
327 	struct work_key key = {
328 		.type = KWORK_CLASS_WORKQUEUE,
329 		.cpu  = bpf_get_smp_processor_id(),
330 		.id   = (__u64)ctx->work,
331 	};
332 
333 	return update_timestart(&perf_kwork_time, &key);
334 }
335 
336 SEC("tracepoint/workqueue/workqueue_execute_end")
report_workqueue_execute_end(struct trace_event_raw_workqueue_execute_end * ctx)337 int report_workqueue_execute_end(struct trace_event_raw_workqueue_execute_end *ctx)
338 {
339 	char name[MAX_KWORKNAME];
340 	struct work_key key = {
341 		.type = KWORK_CLASS_WORKQUEUE,
342 		.cpu  = bpf_get_smp_processor_id(),
343 		.id   = (__u64)ctx->work,
344 	};
345 	unsigned long long func_addr = (unsigned long long)ctx->function;
346 
347 	__builtin_memset(name, 0, sizeof(name));
348 	bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
349 
350 	return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
351 				       &perf_kwork_names, &key, name);
352 }
353 
354 SEC("tracepoint/workqueue/workqueue_activate_work")
latency_workqueue_activate_work(struct trace_event_raw_workqueue_activate_work * ctx)355 int latency_workqueue_activate_work(struct trace_event_raw_workqueue_activate_work *ctx)
356 {
357 	struct work_key key = {
358 		.type = KWORK_CLASS_WORKQUEUE,
359 		.cpu  = bpf_get_smp_processor_id(),
360 		.id   = (__u64)ctx->work,
361 	};
362 
363 	return update_timestart(&perf_kwork_time, &key);
364 }
365 
366 SEC("tracepoint/workqueue/workqueue_execute_start")
latency_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start * ctx)367 int latency_workqueue_execute_start(struct trace_event_raw_workqueue_execute_start *ctx)
368 {
369 	char name[MAX_KWORKNAME];
370 	struct work_key key = {
371 		.type = KWORK_CLASS_WORKQUEUE,
372 		.cpu  = bpf_get_smp_processor_id(),
373 		.id   = (__u64)ctx->work,
374 	};
375 	unsigned long long func_addr = (unsigned long long)ctx->function;
376 
377 	__builtin_memset(name, 0, sizeof(name));
378 	bpf_snprintf(name, sizeof(name), "%ps", &func_addr, sizeof(func_addr));
379 
380 	return update_timeend_and_name(&perf_kwork_report, &perf_kwork_time,
381 				       &perf_kwork_names, &key, name);
382 }
383 
384 char LICENSE[] SEC("license") = "Dual BSD/GPL";
385