xref: /linux/kernel/trace/trace_preemptirq.c (revision 7a5f93ea5862da91488975acaa0c7abd508f192b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * preemptoff and irqoff tracepoints
4  *
5  * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6  */
7 
8 #include <linux/kallsyms.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/hardirq.h>
14 #include "trace.h"
15 
16 #define CREATE_TRACE_POINTS
17 #include <trace/events/preemptirq.h>
18 
19 /*
20  * Use regular trace points on architectures that implement noinstr
21  * tooling: these calls will only happen with RCU enabled, which can
22  * use a regular tracepoint.
23  *
24  * On older architectures, RCU may not be watching in idle. In that
25  * case, wake up RCU to watch while calling the tracepoint. These
26  * aren't NMI-safe - so exclude NMI contexts:
27  */
28 #ifdef CONFIG_ARCH_WANTS_NO_INSTR
29 #define trace(point, args)	trace_##point(args)
30 #else
31 #define trace(point, args)					\
32 	do {							\
33 		if (trace_##point##_enabled()) {		\
34 			bool exit_rcu = false;			\
35 			if (in_nmi())				\
36 				break;				\
37 			if (!IS_ENABLED(CONFIG_TINY_RCU) &&	\
38 			    is_idle_task(current)) {		\
39 				ct_irq_enter();			\
40 				exit_rcu = true;		\
41 			}					\
42 			trace_##point(args);			\
43 			if (exit_rcu)				\
44 				ct_irq_exit();			\
45 		}						\
46 	} while (0)
47 #endif
48 
49 #ifdef CONFIG_TRACE_IRQFLAGS
50 /* Per-cpu variable to prevent redundant calls when IRQs already off */
51 static DEFINE_PER_CPU(int, tracing_irq_cpu);
52 
53 /*
54  * Like trace_hardirqs_on() but without the lockdep invocation. This is
55  * used in the low level entry code where the ordering vs. RCU is important
56  * and lockdep uses a staged approach which splits the lockdep hardirq
57  * tracking into a RCU on and a RCU off section.
58  */
59 void trace_hardirqs_on_prepare(void)
60 {
61 	if (this_cpu_read(tracing_irq_cpu)) {
62 		trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
63 		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
64 		this_cpu_write(tracing_irq_cpu, 0);
65 	}
66 }
67 EXPORT_SYMBOL(trace_hardirqs_on_prepare);
68 NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
69 
70 void trace_hardirqs_on(void)
71 {
72 	if (this_cpu_read(tracing_irq_cpu)) {
73 		trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
74 		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
75 		this_cpu_write(tracing_irq_cpu, 0);
76 	}
77 
78 	lockdep_hardirqs_on_prepare();
79 	lockdep_hardirqs_on(CALLER_ADDR0);
80 }
81 EXPORT_SYMBOL(trace_hardirqs_on);
82 NOKPROBE_SYMBOL(trace_hardirqs_on);
83 
84 /*
85  * Like trace_hardirqs_off() but without the lockdep invocation. This is
86  * used in the low level entry code where the ordering vs. RCU is important
87  * and lockdep uses a staged approach which splits the lockdep hardirq
88  * tracking into a RCU on and a RCU off section.
89  */
90 void trace_hardirqs_off_finish(void)
91 {
92 	if (!this_cpu_read(tracing_irq_cpu)) {
93 		this_cpu_write(tracing_irq_cpu, 1);
94 		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
95 		trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
96 	}
97 
98 }
99 EXPORT_SYMBOL(trace_hardirqs_off_finish);
100 NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
101 
102 void trace_hardirqs_off(void)
103 {
104 	lockdep_hardirqs_off(CALLER_ADDR0);
105 
106 	if (!this_cpu_read(tracing_irq_cpu)) {
107 		this_cpu_write(tracing_irq_cpu, 1);
108 		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
109 		trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
110 	}
111 }
112 EXPORT_SYMBOL(trace_hardirqs_off);
113 NOKPROBE_SYMBOL(trace_hardirqs_off);
114 #endif /* CONFIG_TRACE_IRQFLAGS */
115 
116 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
117 
118 void trace_preempt_on(unsigned long a0, unsigned long a1)
119 {
120 	trace(preempt_enable, TP_ARGS(a0, a1));
121 	tracer_preempt_on(a0, a1);
122 }
123 
124 void trace_preempt_off(unsigned long a0, unsigned long a1)
125 {
126 	trace(preempt_disable, TP_ARGS(a0, a1));
127 	tracer_preempt_off(a0, a1);
128 }
129 #endif
130