xref: /linux/kernel/trace/trace_benchmark.c (revision 2573c25e2c482b53b6e1142ff3cd28f6de13e659)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/delay.h>
3 #include <linux/module.h>
4 #include <linux/kthread.h>
5 #include <linux/trace_clock.h>
6 
7 #define CREATE_TRACE_POINTS
8 #include "trace_benchmark.h"
9 
10 static struct task_struct *bm_event_thread;
11 
12 static char bm_str[BENCHMARK_EVENT_STRLEN] = "START";
13 
14 static u64 bm_total;
15 static u64 bm_totalsq;
16 static u64 bm_last;
17 static u64 bm_max;
18 static u64 bm_min;
19 static u64 bm_first;
20 static u64 bm_cnt;
21 static u64 bm_stddev;
22 static unsigned int bm_avg;
23 static unsigned int bm_std;
24 
25 static bool ok_to_run;
26 
27 /*
28  * This gets called in a loop recording the time it took to write
29  * the tracepoint. What it writes is the time statistics of the last
30  * tracepoint write. As there is nothing to write the first time
31  * it simply writes "START". As the first write is cold cache and
32  * the rest is hot, we save off that time in bm_first and it is
33  * reported as "first", which is shown in the second write to the
34  * tracepoint. The "first" field is written within the statics from
35  * then on but never changes.
36  */
37 static void trace_do_benchmark(void)
38 {
39 	u64 start;
40 	u64 stop;
41 	u64 delta;
42 	u64 stddev;
43 	u64 seed;
44 	u64 last_seed;
45 	unsigned int avg;
46 	unsigned int std = 0;
47 
48 	/* Only run if the tracepoint is actually active */
49 	if (!trace_benchmark_event_enabled() || !tracing_is_on())
50 		return;
51 
52 	local_irq_disable();
53 	start = trace_clock_local();
54 	trace_benchmark_event(bm_str, bm_last);
55 	stop = trace_clock_local();
56 	local_irq_enable();
57 
58 	bm_cnt++;
59 
60 	delta = stop - start;
61 
62 	/*
63 	 * The first read is cold cached, keep it separate from the
64 	 * other calculations.
65 	 */
66 	if (bm_cnt == 1) {
67 		bm_first = delta;
68 		scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
69 			  "first=%llu [COLD CACHED]", bm_first);
70 		return;
71 	}
72 
73 	bm_last = delta;
74 
75 	if (delta > bm_max)
76 		bm_max = delta;
77 	if (!bm_min || delta < bm_min)
78 		bm_min = delta;
79 
80 	/*
81 	 * When bm_cnt is greater than UINT_MAX, it breaks the statistics
82 	 * accounting. Freeze the statistics when that happens.
83 	 * We should have enough data for the avg and stddev anyway.
84 	 */
85 	if (bm_cnt > UINT_MAX) {
86 		scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
87 		    "last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld",
88 			  bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev);
89 		return;
90 	}
91 
92 	bm_total += delta;
93 	bm_totalsq += delta * delta;
94 
95 	if (bm_cnt > 1) {
96 		/*
97 		 * Apply Welford's method to calculate standard deviation:
98 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
99 		 */
100 		stddev = (u64)bm_cnt * bm_totalsq - bm_total * bm_total;
101 		do_div(stddev, (u32)bm_cnt);
102 		do_div(stddev, (u32)bm_cnt - 1);
103 	} else
104 		stddev = 0;
105 
106 	delta = bm_total;
107 	delta = div64_u64(delta, bm_cnt);
108 	avg = delta;
109 
110 	if (stddev > 0) {
111 		int i = 0;
112 		/*
113 		 * stddev is the square of standard deviation but
114 		 * we want the actually number. Use the average
115 		 * as our seed to find the std.
116 		 *
117 		 * The next try is:
118 		 *  x = (x + N/x) / 2
119 		 *
120 		 * Where N is the squared number to find the square
121 		 * root of.
122 		 */
123 		seed = avg;
124 		do {
125 			last_seed = seed;
126 			seed = stddev;
127 			if (!last_seed)
128 				break;
129 			seed = div64_u64(seed, last_seed);
130 			seed += last_seed;
131 			do_div(seed, 2);
132 		} while (i++ < 10 && last_seed != seed);
133 
134 		std = seed;
135 	}
136 
137 	scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
138 		  "last=%llu first=%llu max=%llu min=%llu avg=%u std=%d std^2=%lld",
139 		  bm_last, bm_first, bm_max, bm_min, avg, std, stddev);
140 
141 	bm_std = std;
142 	bm_avg = avg;
143 	bm_stddev = stddev;
144 }
145 
146 static int benchmark_event_kthread(void *arg)
147 {
148 	/* sleep a bit to make sure the tracepoint gets activated */
149 	msleep(100);
150 
151 	while (!kthread_should_stop()) {
152 
153 		trace_do_benchmark();
154 
155 		/*
156 		 * We don't go to sleep, but let others run as well.
157 		 * This is basically a "yield()" to let any task that
158 		 * wants to run, schedule in, but if the CPU is idle,
159 		 * we'll keep burning cycles.
160 		 *
161 		 * Note the tasks_rcu_qs() version of cond_resched() will
162 		 * notify synchronize_rcu_tasks() that this thread has
163 		 * passed a quiescent state for rcu_tasks. Otherwise
164 		 * this thread will never voluntarily schedule which would
165 		 * block synchronize_rcu_tasks() indefinitely.
166 		 */
167 		cond_resched_tasks_rcu_qs();
168 	}
169 
170 	return 0;
171 }
172 
173 /*
174  * When the benchmark tracepoint is enabled, it calls this
175  * function and the thread that calls the tracepoint is created.
176  */
177 int trace_benchmark_reg(void)
178 {
179 	if (!ok_to_run) {
180 		pr_warn("trace benchmark cannot be started via kernel command line\n");
181 		return -EBUSY;
182 	}
183 
184 	bm_event_thread = kthread_run(benchmark_event_kthread,
185 				      NULL, "event_benchmark");
186 	if (IS_ERR(bm_event_thread)) {
187 		pr_warn("trace benchmark failed to create kernel thread\n");
188 		return PTR_ERR(bm_event_thread);
189 	}
190 
191 	return 0;
192 }
193 
194 /*
195  * When the benchmark tracepoint is disabled, it calls this
196  * function and the thread that calls the tracepoint is deleted
197  * and all the numbers are reset.
198  */
199 void trace_benchmark_unreg(void)
200 {
201 	if (!bm_event_thread)
202 		return;
203 
204 	kthread_stop(bm_event_thread);
205 	bm_event_thread = NULL;
206 
207 	strcpy(bm_str, "START");
208 	bm_total = 0;
209 	bm_totalsq = 0;
210 	bm_last = 0;
211 	bm_max = 0;
212 	bm_min = 0;
213 	bm_cnt = 0;
214 	/* These don't need to be reset but reset them anyway */
215 	bm_first = 0;
216 	bm_std = 0;
217 	bm_avg = 0;
218 	bm_stddev = 0;
219 }
220 
221 static __init int ok_to_run_trace_benchmark(void)
222 {
223 	ok_to_run = true;
224 
225 	return 0;
226 }
227 
228 early_initcall(ok_to_run_trace_benchmark);
229