xref: /linux/kernel/trace/trace_branch.c (revision c411ed854584a71b0e86ac3019b60e4789d88086)
1 /*
2  * unlikely profiler
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/irqflags.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/ftrace.h>
13 #include <linux/hash.h>
14 #include <linux/fs.h>
15 #include <asm/local.h>
16 
17 #include "trace.h"
18 #include "trace_stat.h"
19 #include "trace_output.h"
20 
21 #ifdef CONFIG_BRANCH_TRACER
22 
23 static struct tracer branch_trace;
24 static int branch_tracing_enabled __read_mostly;
25 static DEFINE_MUTEX(branch_tracing_mutex);
26 
27 static struct trace_array *branch_tracer;
28 
29 static void
30 probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
31 {
32 	struct trace_event_call *call = &event_branch;
33 	struct trace_array *tr = branch_tracer;
34 	struct trace_array_cpu *data;
35 	struct ring_buffer_event *event;
36 	struct trace_branch *entry;
37 	struct ring_buffer *buffer;
38 	unsigned long flags;
39 	int pc;
40 	const char *p;
41 
42 	if (current->trace_recursion & TRACE_BRANCH_BIT)
43 		return;
44 
45 	/*
46 	 * I would love to save just the ftrace_likely_data pointer, but
47 	 * this code can also be used by modules. Ugly things can happen
48 	 * if the module is unloaded, and then we go and read the
49 	 * pointer.  This is slower, but much safer.
50 	 */
51 
52 	if (unlikely(!tr))
53 		return;
54 
55 	raw_local_irq_save(flags);
56 	current->trace_recursion |= TRACE_BRANCH_BIT;
57 	data = this_cpu_ptr(tr->trace_buffer.data);
58 	if (atomic_read(&data->disabled))
59 		goto out;
60 
61 	pc = preempt_count();
62 	buffer = tr->trace_buffer.buffer;
63 	event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
64 					  sizeof(*entry), flags, pc);
65 	if (!event)
66 		goto out;
67 
68 	entry	= ring_buffer_event_data(event);
69 
70 	/* Strip off the path, only save the file */
71 	p = f->data.file + strlen(f->data.file);
72 	while (p >= f->data.file && *p != '/')
73 		p--;
74 	p++;
75 
76 	strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE);
77 	strncpy(entry->file, p, TRACE_FILE_SIZE);
78 	entry->func[TRACE_FUNC_SIZE] = 0;
79 	entry->file[TRACE_FILE_SIZE] = 0;
80 	entry->constant = f->constant;
81 	entry->line = f->data.line;
82 	entry->correct = val == expect;
83 
84 	if (!call_filter_check_discard(call, entry, buffer, event))
85 		trace_buffer_unlock_commit_nostack(buffer, event);
86 
87  out:
88 	current->trace_recursion &= ~TRACE_BRANCH_BIT;
89 	raw_local_irq_restore(flags);
90 }
91 
92 static inline
93 void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
94 {
95 	if (!branch_tracing_enabled)
96 		return;
97 
98 	probe_likely_condition(f, val, expect);
99 }
100 
101 int enable_branch_tracing(struct trace_array *tr)
102 {
103 	mutex_lock(&branch_tracing_mutex);
104 	branch_tracer = tr;
105 	/*
106 	 * Must be seen before enabling. The reader is a condition
107 	 * where we do not need a matching rmb()
108 	 */
109 	smp_wmb();
110 	branch_tracing_enabled++;
111 	mutex_unlock(&branch_tracing_mutex);
112 
113 	return 0;
114 }
115 
116 void disable_branch_tracing(void)
117 {
118 	mutex_lock(&branch_tracing_mutex);
119 
120 	if (!branch_tracing_enabled)
121 		goto out_unlock;
122 
123 	branch_tracing_enabled--;
124 
125  out_unlock:
126 	mutex_unlock(&branch_tracing_mutex);
127 }
128 
129 static int branch_trace_init(struct trace_array *tr)
130 {
131 	return enable_branch_tracing(tr);
132 }
133 
134 static void branch_trace_reset(struct trace_array *tr)
135 {
136 	disable_branch_tracing();
137 }
138 
139 static enum print_line_t trace_branch_print(struct trace_iterator *iter,
140 					    int flags, struct trace_event *event)
141 {
142 	struct trace_branch *field;
143 
144 	trace_assign_type(field, iter->ent);
145 
146 	trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
147 			 field->correct ? "  ok  " : " MISS ",
148 			 field->func,
149 			 field->file,
150 			 field->line);
151 
152 	return trace_handle_return(&iter->seq);
153 }
154 
155 static void branch_print_header(struct seq_file *s)
156 {
157 	seq_puts(s, "#           TASK-PID    CPU#    TIMESTAMP  CORRECT"
158 		    "  FUNC:FILE:LINE\n"
159 		    "#              | |       |          |         |   "
160 		    "    |\n");
161 }
162 
163 static struct trace_event_functions trace_branch_funcs = {
164 	.trace		= trace_branch_print,
165 };
166 
167 static struct trace_event trace_branch_event = {
168 	.type		= TRACE_BRANCH,
169 	.funcs		= &trace_branch_funcs,
170 };
171 
172 static struct tracer branch_trace __read_mostly =
173 {
174 	.name		= "branch",
175 	.init		= branch_trace_init,
176 	.reset		= branch_trace_reset,
177 #ifdef CONFIG_FTRACE_SELFTEST
178 	.selftest	= trace_selftest_startup_branch,
179 #endif /* CONFIG_FTRACE_SELFTEST */
180 	.print_header	= branch_print_header,
181 };
182 
183 __init static int init_branch_tracer(void)
184 {
185 	int ret;
186 
187 	ret = register_trace_event(&trace_branch_event);
188 	if (!ret) {
189 		printk(KERN_WARNING "Warning: could not register "
190 				    "branch events\n");
191 		return 1;
192 	}
193 	return register_tracer(&branch_trace);
194 }
195 core_initcall(init_branch_tracer);
196 
197 #else
198 static inline
199 void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
200 {
201 }
202 #endif /* CONFIG_BRANCH_TRACER */
203 
204 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
205 			  int expect, int is_constant)
206 {
207 	/* A constant is always correct */
208 	if (is_constant) {
209 		f->constant++;
210 		val = expect;
211 	}
212 	/*
213 	 * I would love to have a trace point here instead, but the
214 	 * trace point code is so inundated with unlikely and likely
215 	 * conditions that the recursive nightmare that exists is too
216 	 * much to try to get working. At least for now.
217 	 */
218 	trace_likely_condition(f, val, expect);
219 
220 	/* FIXME: Make this atomic! */
221 	if (val == expect)
222 		f->data.correct++;
223 	else
224 		f->data.incorrect++;
225 }
226 EXPORT_SYMBOL(ftrace_likely_update);
227 
228 extern unsigned long __start_annotated_branch_profile[];
229 extern unsigned long __stop_annotated_branch_profile[];
230 
231 static int annotated_branch_stat_headers(struct seq_file *m)
232 {
233 	seq_puts(m, " correct incorrect  % "
234 		    "       Function                "
235 		    "  File              Line\n"
236 		    " ------- ---------  - "
237 		    "       --------                "
238 		    "  ----              ----\n");
239 	return 0;
240 }
241 
242 static inline long get_incorrect_percent(struct ftrace_branch_data *p)
243 {
244 	long percent;
245 
246 	if (p->correct) {
247 		percent = p->incorrect * 100;
248 		percent /= p->correct + p->incorrect;
249 	} else
250 		percent = p->incorrect ? 100 : -1;
251 
252 	return percent;
253 }
254 
255 static const char *branch_stat_process_file(struct ftrace_branch_data *p)
256 {
257 	const char *f;
258 
259 	/* Only print the file, not the path */
260 	f = p->file + strlen(p->file);
261 	while (f >= p->file && *f != '/')
262 		f--;
263 	return ++f;
264 }
265 
266 static void branch_stat_show(struct seq_file *m,
267 			     struct ftrace_branch_data *p, const char *f)
268 {
269 	long percent;
270 
271 	/*
272 	 * The miss is overlayed on correct, and hit on incorrect.
273 	 */
274 	percent = get_incorrect_percent(p);
275 
276 	if (percent < 0)
277 		seq_puts(m, "  X ");
278 	else
279 		seq_printf(m, "%3ld ", percent);
280 
281 	seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
282 }
283 
284 static int branch_stat_show_normal(struct seq_file *m,
285 				   struct ftrace_branch_data *p, const char *f)
286 {
287 	seq_printf(m, "%8lu %8lu ",  p->correct, p->incorrect);
288 	branch_stat_show(m, p, f);
289 	return 0;
290 }
291 
292 static int annotate_branch_stat_show(struct seq_file *m, void *v)
293 {
294 	struct ftrace_likely_data *p = v;
295 	const char *f;
296 	int l;
297 
298 	f = branch_stat_process_file(&p->data);
299 
300 	if (!p->constant)
301 		return branch_stat_show_normal(m, &p->data, f);
302 
303 	l = snprintf(NULL, 0, "/%lu", p->constant);
304 	l = l > 8 ? 0 : 8 - l;
305 
306 	seq_printf(m, "%8lu/%lu %*lu ",
307 		   p->data.correct, p->constant, l, p->data.incorrect);
308 	branch_stat_show(m, &p->data, f);
309 	return 0;
310 }
311 
312 static void *annotated_branch_stat_start(struct tracer_stat *trace)
313 {
314 	return __start_annotated_branch_profile;
315 }
316 
317 static void *
318 annotated_branch_stat_next(void *v, int idx)
319 {
320 	struct ftrace_likely_data *p = v;
321 
322 	++p;
323 
324 	if ((void *)p >= (void *)__stop_annotated_branch_profile)
325 		return NULL;
326 
327 	return p;
328 }
329 
330 static int annotated_branch_stat_cmp(void *p1, void *p2)
331 {
332 	struct ftrace_branch_data *a = p1;
333 	struct ftrace_branch_data *b = p2;
334 
335 	long percent_a, percent_b;
336 
337 	percent_a = get_incorrect_percent(a);
338 	percent_b = get_incorrect_percent(b);
339 
340 	if (percent_a < percent_b)
341 		return -1;
342 	if (percent_a > percent_b)
343 		return 1;
344 
345 	if (a->incorrect < b->incorrect)
346 		return -1;
347 	if (a->incorrect > b->incorrect)
348 		return 1;
349 
350 	/*
351 	 * Since the above shows worse (incorrect) cases
352 	 * first, we continue that by showing best (correct)
353 	 * cases last.
354 	 */
355 	if (a->correct > b->correct)
356 		return -1;
357 	if (a->correct < b->correct)
358 		return 1;
359 
360 	return 0;
361 }
362 
363 static struct tracer_stat annotated_branch_stats = {
364 	.name = "branch_annotated",
365 	.stat_start = annotated_branch_stat_start,
366 	.stat_next = annotated_branch_stat_next,
367 	.stat_cmp = annotated_branch_stat_cmp,
368 	.stat_headers = annotated_branch_stat_headers,
369 	.stat_show = annotate_branch_stat_show
370 };
371 
372 __init static int init_annotated_branch_stats(void)
373 {
374 	int ret;
375 
376 	ret = register_stat_tracer(&annotated_branch_stats);
377 	if (!ret) {
378 		printk(KERN_WARNING "Warning: could not register "
379 				    "annotated branches stats\n");
380 		return 1;
381 	}
382 	return 0;
383 }
384 fs_initcall(init_annotated_branch_stats);
385 
386 #ifdef CONFIG_PROFILE_ALL_BRANCHES
387 
388 extern unsigned long __start_branch_profile[];
389 extern unsigned long __stop_branch_profile[];
390 
391 static int all_branch_stat_headers(struct seq_file *m)
392 {
393 	seq_puts(m, "   miss      hit    % "
394 		    "       Function                "
395 		    "  File              Line\n"
396 		    " ------- ---------  - "
397 		    "       --------                "
398 		    "  ----              ----\n");
399 	return 0;
400 }
401 
402 static void *all_branch_stat_start(struct tracer_stat *trace)
403 {
404 	return __start_branch_profile;
405 }
406 
407 static void *
408 all_branch_stat_next(void *v, int idx)
409 {
410 	struct ftrace_branch_data *p = v;
411 
412 	++p;
413 
414 	if ((void *)p >= (void *)__stop_branch_profile)
415 		return NULL;
416 
417 	return p;
418 }
419 
420 static int all_branch_stat_show(struct seq_file *m, void *v)
421 {
422 	struct ftrace_branch_data *p = v;
423 	const char *f;
424 
425 	f = branch_stat_process_file(p);
426 	return branch_stat_show_normal(m, p, f);
427 }
428 
429 static struct tracer_stat all_branch_stats = {
430 	.name = "branch_all",
431 	.stat_start = all_branch_stat_start,
432 	.stat_next = all_branch_stat_next,
433 	.stat_headers = all_branch_stat_headers,
434 	.stat_show = all_branch_stat_show
435 };
436 
437 __init static int all_annotated_branch_stats(void)
438 {
439 	int ret;
440 
441 	ret = register_stat_tracer(&all_branch_stats);
442 	if (!ret) {
443 		printk(KERN_WARNING "Warning: could not register "
444 				    "all branches stats\n");
445 		return 1;
446 	}
447 	return 0;
448 }
449 fs_initcall(all_annotated_branch_stats);
450 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
451