xref: /linux/kernel/context_tracking.c (revision 4c62e9764ab403d42f9b8871b1241fe7812f19d4)
1 #include <linux/context_tracking.h>
2 #include <linux/rcupdate.h>
3 #include <linux/sched.h>
4 #include <linux/percpu.h>
5 #include <linux/hardirq.h>
6 
7 struct context_tracking {
8 	/*
9 	 * When active is false, hooks are not set to
10 	 * minimize overhead: TIF flags are cleared
11 	 * and calls to user_enter/exit are ignored. This
12 	 * may be further optimized using static keys.
13 	 */
14 	bool active;
15 	enum {
16 		IN_KERNEL = 0,
17 		IN_USER,
18 	} state;
19 };
20 
21 static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
22 #ifdef CONFIG_CONTEXT_TRACKING_FORCE
23 	.active = true,
24 #endif
25 };
26 
27 void user_enter(void)
28 {
29 	unsigned long flags;
30 
31 	/*
32 	 * Some contexts may involve an exception occuring in an irq,
33 	 * leading to that nesting:
34 	 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
35 	 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
36 	 * helpers are enough to protect RCU uses inside the exception. So
37 	 * just return immediately if we detect we are in an IRQ.
38 	 */
39 	if (in_interrupt())
40 		return;
41 
42 	WARN_ON_ONCE(!current->mm);
43 
44 	local_irq_save(flags);
45 	if (__this_cpu_read(context_tracking.active) &&
46 	    __this_cpu_read(context_tracking.state) != IN_USER) {
47 		__this_cpu_write(context_tracking.state, IN_USER);
48 		rcu_user_enter();
49 	}
50 	local_irq_restore(flags);
51 }
52 
53 void user_exit(void)
54 {
55 	unsigned long flags;
56 
57 	/*
58 	 * Some contexts may involve an exception occuring in an irq,
59 	 * leading to that nesting:
60 	 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
61 	 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
62 	 * helpers are enough to protect RCU uses inside the exception. So
63 	 * just return immediately if we detect we are in an IRQ.
64 	 */
65 	if (in_interrupt())
66 		return;
67 
68 	local_irq_save(flags);
69 	if (__this_cpu_read(context_tracking.state) == IN_USER) {
70 		__this_cpu_write(context_tracking.state, IN_KERNEL);
71 		rcu_user_exit();
72 	}
73 	local_irq_restore(flags);
74 }
75 
76 void context_tracking_task_switch(struct task_struct *prev,
77 			     struct task_struct *next)
78 {
79 	if (__this_cpu_read(context_tracking.active)) {
80 		clear_tsk_thread_flag(prev, TIF_NOHZ);
81 		set_tsk_thread_flag(next, TIF_NOHZ);
82 	}
83 }
84