xref: /linux/kernel/trace/trace_benchmark.c (revision f3539c12d8196ce0a1993364d30b3a18908470d1)
1 #include <linux/delay.h>
2 #include <linux/module.h>
3 #include <linux/kthread.h>
4 #include <linux/trace_clock.h>
5 
6 #define CREATE_TRACE_POINTS
7 #include "trace_benchmark.h"
8 
9 static struct task_struct *bm_event_thread;
10 
11 static char bm_str[BENCHMARK_EVENT_STRLEN] = "START";
12 
13 static u64 bm_total;
14 static u64 bm_totalsq;
15 static u64 bm_last;
16 static u64 bm_max;
17 static u64 bm_min;
18 static u64 bm_first;
19 static u64 bm_cnt;
20 static u64 bm_stddev;
21 static unsigned int bm_avg;
22 static unsigned int bm_std;
23 
24 /*
25  * This gets called in a loop recording the time it took to write
26  * the tracepoint. What it writes is the time statistics of the last
27  * tracepoint write. As there is nothing to write the first time
28  * it simply writes "START". As the first write is cold cache and
29  * the rest is hot, we save off that time in bm_first and it is
30  * reported as "first", which is shown in the second write to the
31  * tracepoint. The "first" field is writen within the statics from
32  * then on but never changes.
33  */
34 static void trace_do_benchmark(void)
35 {
36 	u64 start;
37 	u64 stop;
38 	u64 delta;
39 	u64 stddev;
40 	u64 seed;
41 	u64 last_seed;
42 	unsigned int avg;
43 	unsigned int std = 0;
44 
45 	/* Only run if the tracepoint is actually active */
46 	if (!trace_benchmark_event_enabled() || !tracing_is_on())
47 		return;
48 
49 	local_irq_disable();
50 	start = trace_clock_local();
51 	trace_benchmark_event(bm_str);
52 	stop = trace_clock_local();
53 	local_irq_enable();
54 
55 	bm_cnt++;
56 
57 	delta = stop - start;
58 
59 	/*
60 	 * The first read is cold cached, keep it separate from the
61 	 * other calculations.
62 	 */
63 	if (bm_cnt == 1) {
64 		bm_first = delta;
65 		scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
66 			  "first=%llu [COLD CACHED]", bm_first);
67 		return;
68 	}
69 
70 	bm_last = delta;
71 
72 	if (delta > bm_max)
73 		bm_max = delta;
74 	if (!bm_min || delta < bm_min)
75 		bm_min = delta;
76 
77 	/*
78 	 * When bm_cnt is greater than UINT_MAX, it breaks the statistics
79 	 * accounting. Freeze the statistics when that happens.
80 	 * We should have enough data for the avg and stddev anyway.
81 	 */
82 	if (bm_cnt > UINT_MAX) {
83 		scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
84 		    "last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld",
85 			  bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev);
86 		return;
87 	}
88 
89 	bm_total += delta;
90 	bm_totalsq += delta * delta;
91 
92 
93 	if (bm_cnt > 1) {
94 		/*
95 		 * Apply Welford's method to calculate standard deviation:
96 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
97 		 */
98 		stddev = (u64)bm_cnt * bm_totalsq - bm_total * bm_total;
99 		do_div(stddev, (u32)bm_cnt);
100 		do_div(stddev, (u32)bm_cnt - 1);
101 	} else
102 		stddev = 0;
103 
104 	delta = bm_total;
105 	do_div(delta, bm_cnt);
106 	avg = delta;
107 
108 	if (stddev > 0) {
109 		int i = 0;
110 		/*
111 		 * stddev is the square of standard deviation but
112 		 * we want the actualy number. Use the average
113 		 * as our seed to find the std.
114 		 *
115 		 * The next try is:
116 		 *  x = (x + N/x) / 2
117 		 *
118 		 * Where N is the squared number to find the square
119 		 * root of.
120 		 */
121 		seed = avg;
122 		do {
123 			last_seed = seed;
124 			seed = stddev;
125 			if (!last_seed)
126 				break;
127 			do_div(seed, last_seed);
128 			seed += last_seed;
129 			do_div(seed, 2);
130 		} while (i++ < 10 && last_seed != seed);
131 
132 		std = seed;
133 	}
134 
135 	scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
136 		  "last=%llu first=%llu max=%llu min=%llu avg=%u std=%d std^2=%lld",
137 		  bm_last, bm_first, bm_max, bm_min, avg, std, stddev);
138 
139 	bm_std = std;
140 	bm_avg = avg;
141 	bm_stddev = stddev;
142 }
143 
144 static int benchmark_event_kthread(void *arg)
145 {
146 	/* sleep a bit to make sure the tracepoint gets activated */
147 	msleep(100);
148 
149 	while (!kthread_should_stop()) {
150 
151 		trace_do_benchmark();
152 
153 		/*
154 		 * We don't go to sleep, but let others
155 		 * run as well.
156 		 */
157 		cond_resched();
158 	}
159 
160 	return 0;
161 }
162 
163 /*
164  * When the benchmark tracepoint is enabled, it calls this
165  * function and the thread that calls the tracepoint is created.
166  */
167 void trace_benchmark_reg(void)
168 {
169 	bm_event_thread = kthread_run(benchmark_event_kthread,
170 				      NULL, "event_benchmark");
171 	WARN_ON(!bm_event_thread);
172 }
173 
174 /*
175  * When the benchmark tracepoint is disabled, it calls this
176  * function and the thread that calls the tracepoint is deleted
177  * and all the numbers are reset.
178  */
179 void trace_benchmark_unreg(void)
180 {
181 	if (!bm_event_thread)
182 		return;
183 
184 	kthread_stop(bm_event_thread);
185 
186 	strcpy(bm_str, "START");
187 	bm_total = 0;
188 	bm_totalsq = 0;
189 	bm_last = 0;
190 	bm_max = 0;
191 	bm_min = 0;
192 	bm_cnt = 0;
193 	/* These don't need to be reset but reset them anyway */
194 	bm_first = 0;
195 	bm_std = 0;
196 	bm_avg = 0;
197 	bm_stddev = 0;
198 }
199