xref: /linux/kernel/trace/trace_irqsoff.c (revision a0b54e256d513ed99e456bea6e4e188ff92e7c46)
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/fs.h>
18 
19 #include "trace.h"
20 
21 static struct trace_array		*irqsoff_trace __read_mostly;
22 static int				tracer_enabled __read_mostly;
23 
24 static DEFINE_PER_CPU(int, tracing_cpu);
25 
26 static DEFINE_SPINLOCK(max_trace_lock);
27 
28 enum {
29 	TRACER_IRQS_OFF		= (1 << 1),
30 	TRACER_PREEMPT_OFF	= (1 << 2),
31 };
32 
33 static int trace_type __read_mostly;
34 
35 static int save_lat_flag;
36 
37 #ifdef CONFIG_PREEMPT_TRACER
38 static inline int
39 preempt_trace(void)
40 {
41 	return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
42 }
43 #else
44 # define preempt_trace() (0)
45 #endif
46 
47 #ifdef CONFIG_IRQSOFF_TRACER
48 static inline int
49 irq_trace(void)
50 {
51 	return ((trace_type & TRACER_IRQS_OFF) &&
52 		irqs_disabled());
53 }
54 #else
55 # define irq_trace() (0)
56 #endif
57 
58 /*
59  * Sequence count - we record it when starting a measurement and
60  * skip the latency if the sequence has changed - some other section
61  * did a maximum and could disturb our measurement with serial console
62  * printouts, etc. Truly coinciding maximum latencies should be rare
63  * and what happens together happens separately as well, so this doesnt
64  * decrease the validity of the maximum found:
65  */
66 static __cacheline_aligned_in_smp	unsigned long max_sequence;
67 
68 #ifdef CONFIG_FUNCTION_TRACER
69 /*
70  * irqsoff uses its own tracer function to keep the overhead down:
71  */
72 static void
73 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
74 {
75 	struct trace_array *tr = irqsoff_trace;
76 	struct trace_array_cpu *data;
77 	unsigned long flags;
78 	long disabled;
79 	int cpu;
80 
81 	/*
82 	 * Does not matter if we preempt. We test the flags
83 	 * afterward, to see if irqs are disabled or not.
84 	 * If we preempt and get a false positive, the flags
85 	 * test will fail.
86 	 */
87 	cpu = raw_smp_processor_id();
88 	if (likely(!per_cpu(tracing_cpu, cpu)))
89 		return;
90 
91 	local_save_flags(flags);
92 	/* slight chance to get a false positive on tracing_cpu */
93 	if (!irqs_disabled_flags(flags))
94 		return;
95 
96 	data = tr->data[cpu];
97 	disabled = atomic_inc_return(&data->disabled);
98 
99 	if (likely(disabled == 1))
100 		trace_function(tr, ip, parent_ip, flags, preempt_count());
101 
102 	atomic_dec(&data->disabled);
103 }
104 
105 static struct ftrace_ops trace_ops __read_mostly =
106 {
107 	.func = irqsoff_tracer_call,
108 };
109 #endif /* CONFIG_FUNCTION_TRACER */
110 
111 /*
112  * Should this new latency be reported/recorded?
113  */
114 static int report_latency(cycle_t delta)
115 {
116 	if (tracing_thresh) {
117 		if (delta < tracing_thresh)
118 			return 0;
119 	} else {
120 		if (delta <= tracing_max_latency)
121 			return 0;
122 	}
123 	return 1;
124 }
125 
126 static void
127 check_critical_timing(struct trace_array *tr,
128 		      struct trace_array_cpu *data,
129 		      unsigned long parent_ip,
130 		      int cpu)
131 {
132 	unsigned long latency, t0, t1;
133 	cycle_t T0, T1, delta;
134 	unsigned long flags;
135 	int pc;
136 
137 	/*
138 	 * usecs conversion is slow so we try to delay the conversion
139 	 * as long as possible:
140 	 */
141 	T0 = data->preempt_timestamp;
142 	T1 = ftrace_now(cpu);
143 	delta = T1-T0;
144 
145 	local_save_flags(flags);
146 
147 	pc = preempt_count();
148 
149 	if (!report_latency(delta))
150 		goto out;
151 
152 	spin_lock_irqsave(&max_trace_lock, flags);
153 
154 	/* check if we are still the max latency */
155 	if (!report_latency(delta))
156 		goto out_unlock;
157 
158 	trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
159 
160 	latency = nsecs_to_usecs(delta);
161 
162 	if (data->critical_sequence != max_sequence)
163 		goto out_unlock;
164 
165 	tracing_max_latency = delta;
166 	t0 = nsecs_to_usecs(T0);
167 	t1 = nsecs_to_usecs(T1);
168 
169 	data->critical_end = parent_ip;
170 
171 	update_max_tr_single(tr, current, cpu);
172 
173 	max_sequence++;
174 
175 out_unlock:
176 	spin_unlock_irqrestore(&max_trace_lock, flags);
177 
178 out:
179 	data->critical_sequence = max_sequence;
180 	data->preempt_timestamp = ftrace_now(cpu);
181 	trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
182 }
183 
184 static inline void
185 start_critical_timing(unsigned long ip, unsigned long parent_ip)
186 {
187 	int cpu;
188 	struct trace_array *tr = irqsoff_trace;
189 	struct trace_array_cpu *data;
190 	unsigned long flags;
191 
192 	if (likely(!tracer_enabled))
193 		return;
194 
195 	cpu = raw_smp_processor_id();
196 
197 	if (per_cpu(tracing_cpu, cpu))
198 		return;
199 
200 	data = tr->data[cpu];
201 
202 	if (unlikely(!data) || atomic_read(&data->disabled))
203 		return;
204 
205 	atomic_inc(&data->disabled);
206 
207 	data->critical_sequence = max_sequence;
208 	data->preempt_timestamp = ftrace_now(cpu);
209 	data->critical_start = parent_ip ? : ip;
210 
211 	local_save_flags(flags);
212 
213 	trace_function(tr, ip, parent_ip, flags, preempt_count());
214 
215 	per_cpu(tracing_cpu, cpu) = 1;
216 
217 	atomic_dec(&data->disabled);
218 }
219 
220 static inline void
221 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
222 {
223 	int cpu;
224 	struct trace_array *tr = irqsoff_trace;
225 	struct trace_array_cpu *data;
226 	unsigned long flags;
227 
228 	cpu = raw_smp_processor_id();
229 	/* Always clear the tracing cpu on stopping the trace */
230 	if (unlikely(per_cpu(tracing_cpu, cpu)))
231 		per_cpu(tracing_cpu, cpu) = 0;
232 	else
233 		return;
234 
235 	if (!tracer_enabled)
236 		return;
237 
238 	data = tr->data[cpu];
239 
240 	if (unlikely(!data) ||
241 	    !data->critical_start || atomic_read(&data->disabled))
242 		return;
243 
244 	atomic_inc(&data->disabled);
245 
246 	local_save_flags(flags);
247 	trace_function(tr, ip, parent_ip, flags, preempt_count());
248 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
249 	data->critical_start = 0;
250 	atomic_dec(&data->disabled);
251 }
252 
253 /* start and stop critical timings used to for stoppage (in idle) */
254 void start_critical_timings(void)
255 {
256 	if (preempt_trace() || irq_trace())
257 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
258 }
259 EXPORT_SYMBOL_GPL(start_critical_timings);
260 
261 void stop_critical_timings(void)
262 {
263 	if (preempt_trace() || irq_trace())
264 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
265 }
266 EXPORT_SYMBOL_GPL(stop_critical_timings);
267 
268 #ifdef CONFIG_IRQSOFF_TRACER
269 #ifdef CONFIG_PROVE_LOCKING
270 void time_hardirqs_on(unsigned long a0, unsigned long a1)
271 {
272 	if (!preempt_trace() && irq_trace())
273 		stop_critical_timing(a0, a1);
274 }
275 
276 void time_hardirqs_off(unsigned long a0, unsigned long a1)
277 {
278 	if (!preempt_trace() && irq_trace())
279 		start_critical_timing(a0, a1);
280 }
281 
282 #else /* !CONFIG_PROVE_LOCKING */
283 
284 /*
285  * Stubs:
286  */
287 
288 void early_boot_irqs_off(void)
289 {
290 }
291 
292 void early_boot_irqs_on(void)
293 {
294 }
295 
296 void trace_softirqs_on(unsigned long ip)
297 {
298 }
299 
300 void trace_softirqs_off(unsigned long ip)
301 {
302 }
303 
304 inline void print_irqtrace_events(struct task_struct *curr)
305 {
306 }
307 
308 /*
309  * We are only interested in hardirq on/off events:
310  */
311 void trace_hardirqs_on(void)
312 {
313 	if (!preempt_trace() && irq_trace())
314 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
315 }
316 EXPORT_SYMBOL(trace_hardirqs_on);
317 
318 void trace_hardirqs_off(void)
319 {
320 	if (!preempt_trace() && irq_trace())
321 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
322 }
323 EXPORT_SYMBOL(trace_hardirqs_off);
324 
325 void trace_hardirqs_on_caller(unsigned long caller_addr)
326 {
327 	if (!preempt_trace() && irq_trace())
328 		stop_critical_timing(CALLER_ADDR0, caller_addr);
329 }
330 EXPORT_SYMBOL(trace_hardirqs_on_caller);
331 
332 void trace_hardirqs_off_caller(unsigned long caller_addr)
333 {
334 	if (!preempt_trace() && irq_trace())
335 		start_critical_timing(CALLER_ADDR0, caller_addr);
336 }
337 EXPORT_SYMBOL(trace_hardirqs_off_caller);
338 
339 #endif /* CONFIG_PROVE_LOCKING */
340 #endif /*  CONFIG_IRQSOFF_TRACER */
341 
342 #ifdef CONFIG_PREEMPT_TRACER
343 void trace_preempt_on(unsigned long a0, unsigned long a1)
344 {
345 	if (preempt_trace())
346 		stop_critical_timing(a0, a1);
347 }
348 
349 void trace_preempt_off(unsigned long a0, unsigned long a1)
350 {
351 	if (preempt_trace())
352 		start_critical_timing(a0, a1);
353 }
354 #endif /* CONFIG_PREEMPT_TRACER */
355 
356 static void start_irqsoff_tracer(struct trace_array *tr)
357 {
358 	register_ftrace_function(&trace_ops);
359 	if (tracing_is_enabled())
360 		tracer_enabled = 1;
361 	else
362 		tracer_enabled = 0;
363 }
364 
365 static void stop_irqsoff_tracer(struct trace_array *tr)
366 {
367 	tracer_enabled = 0;
368 	unregister_ftrace_function(&trace_ops);
369 }
370 
371 static void __irqsoff_tracer_init(struct trace_array *tr)
372 {
373 	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
374 	trace_flags |= TRACE_ITER_LATENCY_FMT;
375 
376 	tracing_max_latency = 0;
377 	irqsoff_trace = tr;
378 	/* make sure that the tracer is visible */
379 	smp_wmb();
380 	tracing_reset_online_cpus(tr);
381 	start_irqsoff_tracer(tr);
382 }
383 
384 static void irqsoff_tracer_reset(struct trace_array *tr)
385 {
386 	stop_irqsoff_tracer(tr);
387 
388 	if (!save_lat_flag)
389 		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
390 }
391 
392 static void irqsoff_tracer_start(struct trace_array *tr)
393 {
394 	tracer_enabled = 1;
395 }
396 
397 static void irqsoff_tracer_stop(struct trace_array *tr)
398 {
399 	tracer_enabled = 0;
400 }
401 
402 #ifdef CONFIG_IRQSOFF_TRACER
403 static int irqsoff_tracer_init(struct trace_array *tr)
404 {
405 	trace_type = TRACER_IRQS_OFF;
406 
407 	__irqsoff_tracer_init(tr);
408 	return 0;
409 }
410 static struct tracer irqsoff_tracer __read_mostly =
411 {
412 	.name		= "irqsoff",
413 	.init		= irqsoff_tracer_init,
414 	.reset		= irqsoff_tracer_reset,
415 	.start		= irqsoff_tracer_start,
416 	.stop		= irqsoff_tracer_stop,
417 	.print_max	= 1,
418 #ifdef CONFIG_FTRACE_SELFTEST
419 	.selftest    = trace_selftest_startup_irqsoff,
420 #endif
421 };
422 # define register_irqsoff(trace) register_tracer(&trace)
423 #else
424 # define register_irqsoff(trace) do { } while (0)
425 #endif
426 
427 #ifdef CONFIG_PREEMPT_TRACER
428 static int preemptoff_tracer_init(struct trace_array *tr)
429 {
430 	trace_type = TRACER_PREEMPT_OFF;
431 
432 	__irqsoff_tracer_init(tr);
433 	return 0;
434 }
435 
436 static struct tracer preemptoff_tracer __read_mostly =
437 {
438 	.name		= "preemptoff",
439 	.init		= preemptoff_tracer_init,
440 	.reset		= irqsoff_tracer_reset,
441 	.start		= irqsoff_tracer_start,
442 	.stop		= irqsoff_tracer_stop,
443 	.print_max	= 1,
444 #ifdef CONFIG_FTRACE_SELFTEST
445 	.selftest    = trace_selftest_startup_preemptoff,
446 #endif
447 };
448 # define register_preemptoff(trace) register_tracer(&trace)
449 #else
450 # define register_preemptoff(trace) do { } while (0)
451 #endif
452 
453 #if defined(CONFIG_IRQSOFF_TRACER) && \
454 	defined(CONFIG_PREEMPT_TRACER)
455 
456 static int preemptirqsoff_tracer_init(struct trace_array *tr)
457 {
458 	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
459 
460 	__irqsoff_tracer_init(tr);
461 	return 0;
462 }
463 
464 static struct tracer preemptirqsoff_tracer __read_mostly =
465 {
466 	.name		= "preemptirqsoff",
467 	.init		= preemptirqsoff_tracer_init,
468 	.reset		= irqsoff_tracer_reset,
469 	.start		= irqsoff_tracer_start,
470 	.stop		= irqsoff_tracer_stop,
471 	.print_max	= 1,
472 #ifdef CONFIG_FTRACE_SELFTEST
473 	.selftest    = trace_selftest_startup_preemptirqsoff,
474 #endif
475 };
476 
477 # define register_preemptirqsoff(trace) register_tracer(&trace)
478 #else
479 # define register_preemptirqsoff(trace) do { } while (0)
480 #endif
481 
482 __init static int init_irqsoff_tracer(void)
483 {
484 	register_irqsoff(irqsoff_tracer);
485 	register_preemptoff(preemptoff_tracer);
486 	register_preemptirqsoff(preemptirqsoff_tracer);
487 
488 	return 0;
489 }
490 device_initcall(init_irqsoff_tracer);
491