xref: /linux/include/linux/context_tracking_state.h (revision 98e7dcbb82fa57de8dfad357f9b851c3625797fa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CONTEXT_TRACKING_STATE_H
3 #define _LINUX_CONTEXT_TRACKING_STATE_H
4 
5 #include <linux/percpu.h>
6 #include <linux/static_key.h>
7 #include <linux/context_tracking_irq.h>
8 
9 /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
10 #define CT_NESTING_IRQ_NONIDLE	((LONG_MAX / 2) + 1)
11 
12 enum ctx_state {
13 	CT_STATE_DISABLED	= -1,	/* returned by ct_state() if unknown */
14 	CT_STATE_KERNEL		= 0,
15 	CT_STATE_IDLE		= 1,
16 	CT_STATE_USER		= 2,
17 	CT_STATE_GUEST		= 3,
18 	CT_STATE_MAX		= 4,
19 };
20 
21 struct context_tracking {
22 #ifdef CONFIG_CONTEXT_TRACKING_USER
23 	/*
24 	 * When active is false, probes are unset in order
25 	 * to minimize overhead: TIF flags are cleared
26 	 * and calls to user_enter/exit are ignored. This
27 	 * may be further optimized using static keys.
28 	 */
29 	bool active;
30 	int recursion;
31 #endif
32 #ifdef CONFIG_CONTEXT_TRACKING
33 	atomic_t state;
34 #endif
35 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
36 	long nesting;		/* Track process nesting level. */
37 	long nmi_nesting;	/* Track irq/NMI nesting level. */
38 #endif
39 };
40 
41 /*
42  * We cram two different things within the same atomic variable:
43  *
44  *                     CT_RCU_WATCHING_START  CT_STATE_START
45  *                                |                |
46  *                                v                v
47  *     MSB [ RCU watching counter ][ context_state ] LSB
48  *         ^                       ^
49  *         |                       |
50  * CT_RCU_WATCHING_END        CT_STATE_END
51  *
52  * Bits are used from the LSB upwards, so unused bits (if any) will always be in
53  * upper bits of the variable.
54  */
55 #ifdef CONFIG_CONTEXT_TRACKING
56 #define CT_SIZE (sizeof(((struct context_tracking *)0)->state) * BITS_PER_BYTE)
57 
58 #define CT_STATE_WIDTH bits_per(CT_STATE_MAX - 1)
59 #define CT_STATE_START 0
60 #define CT_STATE_END   (CT_STATE_START + CT_STATE_WIDTH - 1)
61 
62 #define CT_RCU_WATCHING_MAX_WIDTH (CT_SIZE - CT_STATE_WIDTH)
63 #define CT_RCU_WATCHING_WIDTH     (IS_ENABLED(CONFIG_RCU_DYNTICKS_TORTURE) ? 2 : CT_RCU_WATCHING_MAX_WIDTH)
64 #define CT_RCU_WATCHING_START     (CT_STATE_END + 1)
65 #define CT_RCU_WATCHING_END       (CT_RCU_WATCHING_START + CT_RCU_WATCHING_WIDTH - 1)
66 #define CT_RCU_WATCHING           BIT(CT_RCU_WATCHING_START)
67 
68 #define CT_STATE_MASK        GENMASK(CT_STATE_END,        CT_STATE_START)
69 #define CT_RCU_WATCHING_MASK GENMASK(CT_RCU_WATCHING_END, CT_RCU_WATCHING_START)
70 
71 #define CT_UNUSED_WIDTH (CT_RCU_WATCHING_MAX_WIDTH - CT_RCU_WATCHING_WIDTH)
72 
73 static_assert(CT_STATE_WIDTH        +
74 	      CT_RCU_WATCHING_WIDTH +
75 	      CT_UNUSED_WIDTH       ==
76 	      CT_SIZE);
77 
78 DECLARE_PER_CPU(struct context_tracking, context_tracking);
79 #endif	/* CONFIG_CONTEXT_TRACKING */
80 
81 #ifdef CONFIG_CONTEXT_TRACKING_USER
__ct_state(void)82 static __always_inline int __ct_state(void)
83 {
84 	return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
85 }
86 #endif
87 
88 #ifdef CONFIG_CONTEXT_TRACKING_IDLE
ct_rcu_watching(void)89 static __always_inline int ct_rcu_watching(void)
90 {
91 	return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
92 }
93 
ct_rcu_watching_cpu(int cpu)94 static __always_inline int ct_rcu_watching_cpu(int cpu)
95 {
96 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
97 
98 	return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
99 }
100 
ct_rcu_watching_cpu_acquire(int cpu)101 static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
102 {
103 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
104 
105 	return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
106 }
107 
ct_nesting(void)108 static __always_inline long ct_nesting(void)
109 {
110 	return __this_cpu_read(context_tracking.nesting);
111 }
112 
ct_nesting_cpu(int cpu)113 static __always_inline long ct_nesting_cpu(int cpu)
114 {
115 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
116 
117 	return ct->nesting;
118 }
119 
ct_nmi_nesting(void)120 static __always_inline long ct_nmi_nesting(void)
121 {
122 	return __this_cpu_read(context_tracking.nmi_nesting);
123 }
124 
ct_nmi_nesting_cpu(int cpu)125 static __always_inline long ct_nmi_nesting_cpu(int cpu)
126 {
127 	struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
128 
129 	return ct->nmi_nesting;
130 }
131 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
132 
133 #ifdef CONFIG_CONTEXT_TRACKING_USER
134 extern struct static_key_false context_tracking_key;
135 
context_tracking_enabled(void)136 static __always_inline bool context_tracking_enabled(void)
137 {
138 	return static_branch_unlikely(&context_tracking_key);
139 }
140 
context_tracking_enabled_cpu(int cpu)141 static __always_inline bool context_tracking_enabled_cpu(int cpu)
142 {
143 	return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
144 }
145 
context_tracking_enabled_this_cpu(void)146 static __always_inline bool context_tracking_enabled_this_cpu(void)
147 {
148 	return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
149 }
150 
151 /**
152  * ct_state() - return the current context tracking state if known
153  *
154  * Returns the current cpu's context tracking state if context tracking
155  * is enabled.  If context tracking is disabled, returns
156  * CT_STATE_DISABLED.  This should be used primarily for debugging.
157  */
ct_state(void)158 static __always_inline int ct_state(void)
159 {
160 	int ret;
161 
162 	if (!context_tracking_enabled())
163 		return CT_STATE_DISABLED;
164 
165 	preempt_disable();
166 	ret = __ct_state();
167 	preempt_enable();
168 
169 	return ret;
170 }
171 
172 #else
context_tracking_enabled(void)173 static __always_inline bool context_tracking_enabled(void) { return false; }
context_tracking_enabled_cpu(int cpu)174 static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
context_tracking_enabled_this_cpu(void)175 static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
176 #endif /* CONFIG_CONTEXT_TRACKING_USER */
177 
178 #endif
179