xref: /linux/tools/tracing/rtla/src/timerlat.bpf.c (revision 8cd0f08ac72e25e2a048c72d76730676ab0106f3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bpf.h>
3 #include <bpf/bpf_tracing.h>
4 #include <stdbool.h>
5 #include "timerlat_bpf.h"
6 
7 #define nosubprog __always_inline
8 #define MAX_ENTRIES_DEFAULT 4096
9 
10 char LICENSE[] SEC("license") = "GPL";
11 
12 struct trace_event_raw_timerlat_sample {
13 	unsigned long long timer_latency;
14 	int context;
15 } __attribute__((preserve_access_index));
16 
17 struct {
18 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
19 	__uint(max_entries, MAX_ENTRIES_DEFAULT);
20 	__type(key, unsigned int);
21 	__type(value, unsigned long long);
22 } hist_irq SEC(".maps"), hist_thread SEC(".maps"), hist_user SEC(".maps");
23 
24 struct {
25 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
26 	__uint(max_entries, SUMMARY_FIELD_N);
27 	__type(key, unsigned int);
28 	__type(value, unsigned long long);
29 } summary_irq SEC(".maps"), summary_thread SEC(".maps"), summary_user SEC(".maps");
30 
31 struct {
32 	__uint(type, BPF_MAP_TYPE_ARRAY);
33 	__uint(max_entries, 1);
34 	__type(key, unsigned int);
35 	__type(value, unsigned long long);
36 } stop_tracing SEC(".maps");
37 
38 struct {
39 	__uint(type, BPF_MAP_TYPE_RINGBUF);
40 	__uint(max_entries, 1);
41 } signal_stop_tracing SEC(".maps");
42 
43 struct {
44 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
45 	__uint(key_size, sizeof(unsigned int));
46 	__uint(max_entries, 1);
47 	__array(values, unsigned int (void *));
48 } bpf_action SEC(".maps") = {
49 	.values = {
50 		[0] = 0
51 	},
52 };
53 
54 /* Params to be set by rtla */
55 const volatile int bucket_size = 1;
56 const volatile int output_divisor = 1000;
57 const volatile int entries = 256;
58 const volatile int irq_threshold;
59 const volatile int thread_threshold;
60 const volatile bool aa_only;
61 
62 nosubprog unsigned long long map_get(void *map,
63 				     unsigned int key)
64 {
65 	unsigned long long *value_ptr;
66 
67 	value_ptr = bpf_map_lookup_elem(map, &key);
68 
69 	return !value_ptr ? 0 : *value_ptr;
70 }
71 
72 nosubprog void map_set(void *map,
73 		       unsigned int key,
74 		       unsigned long long value)
75 {
76 	bpf_map_update_elem(map, &key, &value, BPF_ANY);
77 }
78 
79 nosubprog void map_increment(void *map,
80 			     unsigned int key)
81 {
82 	map_set(map, key, map_get(map, key) + 1);
83 }
84 
85 nosubprog void update_main_hist(void *map,
86 				int bucket)
87 {
88 	if (entries == 0)
89 		/* No histogram */
90 		return;
91 
92 	if (bucket >= entries)
93 		/* Overflow */
94 		return;
95 
96 	map_increment(map, bucket);
97 }
98 
99 nosubprog void update_summary(void *map,
100 			      unsigned long long latency,
101 			      int bucket)
102 {
103 	if (aa_only)
104 		/* Auto-analysis only, nothing to be done here */
105 		return;
106 
107 	map_set(map, SUMMARY_CURRENT, latency);
108 
109 	if (bucket >= entries)
110 		/* Overflow */
111 		map_increment(map, SUMMARY_OVERFLOW);
112 
113 	if (latency > map_get(map, SUMMARY_MAX))
114 		map_set(map, SUMMARY_MAX, latency);
115 
116 	if (latency < map_get(map, SUMMARY_MIN) || map_get(map, SUMMARY_COUNT) == 0)
117 		map_set(map, SUMMARY_MIN, latency);
118 
119 	map_increment(map, SUMMARY_COUNT);
120 	map_set(map, SUMMARY_SUM, map_get(map, SUMMARY_SUM) + latency);
121 }
122 
123 nosubprog void set_stop_tracing(struct trace_event_raw_timerlat_sample *tp_args)
124 {
125 	int value = 0;
126 
127 	/* Suppress further sample processing */
128 	map_set(&stop_tracing, 0, 1);
129 
130 	/* Signal to userspace */
131 	bpf_ringbuf_output(&signal_stop_tracing, &value, sizeof(value), 0);
132 
133 	/*
134 	 * Call into BPF action program, if attached.
135 	 * Otherwise, just silently fail.
136 	 */
137 	bpf_tail_call(tp_args, &bpf_action, 0);
138 }
139 
140 SEC("tp/osnoise/timerlat_sample")
141 int handle_timerlat_sample(struct trace_event_raw_timerlat_sample *tp_args)
142 {
143 	unsigned long long latency, latency_us;
144 	int bucket;
145 
146 	if (map_get(&stop_tracing, 0))
147 		return 0;
148 
149 	latency = tp_args->timer_latency / output_divisor;
150 	latency_us = tp_args->timer_latency / 1000;
151 	bucket = latency / bucket_size;
152 
153 	if (tp_args->context == 0) {
154 		update_main_hist(&hist_irq, bucket);
155 		update_summary(&summary_irq, latency, bucket);
156 
157 		if (irq_threshold != 0 && latency_us >= irq_threshold)
158 			set_stop_tracing(tp_args);
159 	} else if (tp_args->context == 1) {
160 		update_main_hist(&hist_thread, bucket);
161 		update_summary(&summary_thread, latency, bucket);
162 
163 		if (thread_threshold != 0 && latency_us >= thread_threshold)
164 			set_stop_tracing(tp_args);
165 	} else {
166 		update_main_hist(&hist_user, bucket);
167 		update_summary(&summary_user, latency, bucket);
168 
169 		if (thread_threshold != 0 && latency_us >= thread_threshold)
170 			set_stop_tracing(tp_args);
171 	}
172 
173 	return 0;
174 }
175