xref: /linux/kernel/hung_task.c (revision bcb63314e2c23f1ed622418b65f9409512659c73)
1 /*
2  * Detect Hung Task
3  *
4  * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
5  *
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/cpu.h>
10 #include <linux/nmi.h>
11 #include <linux/init.h>
12 #include <linux/delay.h>
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <linux/lockdep.h>
16 #include <linux/export.h>
17 #include <linux/sysctl.h>
18 #include <linux/utsname.h>
19 #include <trace/events/sched.h>
20 
21 /*
22  * The number of tasks checked:
23  */
24 int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
25 
26 /*
27  * Limit number of tasks checked in a batch.
28  *
29  * This value controls the preemptibility of khungtaskd since preemption
30  * is disabled during the critical section. It also controls the size of
31  * the RCU grace period. So it needs to be upper-bound.
32  */
33 #define HUNG_TASK_BATCHING 1024
34 
35 /*
36  * Zero means infinite timeout - no checking done:
37  */
38 unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
39 
40 int __read_mostly sysctl_hung_task_warnings = 10;
41 
42 static int __read_mostly did_panic;
43 
44 static struct task_struct *watchdog_task;
45 
46 /*
47  * Should we panic (and reboot, if panic_timeout= is set) when a
48  * hung task is detected:
49  */
50 unsigned int __read_mostly sysctl_hung_task_panic =
51 				CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
52 
53 static int __init hung_task_panic_setup(char *str)
54 {
55 	int rc = kstrtouint(str, 0, &sysctl_hung_task_panic);
56 
57 	if (rc)
58 		return rc;
59 	return 1;
60 }
61 __setup("hung_task_panic=", hung_task_panic_setup);
62 
63 static int
64 hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
65 {
66 	did_panic = 1;
67 
68 	return NOTIFY_DONE;
69 }
70 
71 static struct notifier_block panic_block = {
72 	.notifier_call = hung_task_panic,
73 };
74 
75 static void check_hung_task(struct task_struct *t, unsigned long timeout)
76 {
77 	unsigned long switch_count = t->nvcsw + t->nivcsw;
78 
79 	/*
80 	 * Ensure the task is not frozen.
81 	 * Also, skip vfork and any other user process that freezer should skip.
82 	 */
83 	if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
84 	    return;
85 
86 	/*
87 	 * When a freshly created task is scheduled once, changes its state to
88 	 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
89 	 * musn't be checked.
90 	 */
91 	if (unlikely(!switch_count))
92 		return;
93 
94 	if (switch_count != t->last_switch_count) {
95 		t->last_switch_count = switch_count;
96 		return;
97 	}
98 
99 	trace_sched_process_hang(t);
100 
101 	if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
102 		return;
103 
104 	/*
105 	 * Ok, the task did not get scheduled for more than 2 minutes,
106 	 * complain:
107 	 */
108 	if (sysctl_hung_task_warnings) {
109 		if (sysctl_hung_task_warnings > 0)
110 			sysctl_hung_task_warnings--;
111 		pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
112 			t->comm, t->pid, timeout);
113 		pr_err("      %s %s %.*s\n",
114 			print_tainted(), init_utsname()->release,
115 			(int)strcspn(init_utsname()->version, " "),
116 			init_utsname()->version);
117 		pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
118 			" disables this message.\n");
119 		sched_show_task(t);
120 		debug_show_all_locks();
121 	}
122 
123 	touch_nmi_watchdog();
124 
125 	if (sysctl_hung_task_panic) {
126 		trigger_all_cpu_backtrace();
127 		panic("hung_task: blocked tasks");
128 	}
129 }
130 
131 /*
132  * To avoid extending the RCU grace period for an unbounded amount of time,
133  * periodically exit the critical section and enter a new one.
134  *
135  * For preemptible RCU it is sufficient to call rcu_read_unlock in order
136  * to exit the grace period. For classic RCU, a reschedule is required.
137  */
138 static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
139 {
140 	bool can_cont;
141 
142 	get_task_struct(g);
143 	get_task_struct(t);
144 	rcu_read_unlock();
145 	cond_resched();
146 	rcu_read_lock();
147 	can_cont = pid_alive(g) && pid_alive(t);
148 	put_task_struct(t);
149 	put_task_struct(g);
150 
151 	return can_cont;
152 }
153 
154 /*
155  * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
156  * a really long time (120 seconds). If that happens, print out
157  * a warning.
158  */
159 static void check_hung_uninterruptible_tasks(unsigned long timeout)
160 {
161 	int max_count = sysctl_hung_task_check_count;
162 	int batch_count = HUNG_TASK_BATCHING;
163 	struct task_struct *g, *t;
164 
165 	/*
166 	 * If the system crashed already then all bets are off,
167 	 * do not report extra hung tasks:
168 	 */
169 	if (test_taint(TAINT_DIE) || did_panic)
170 		return;
171 
172 	rcu_read_lock();
173 	for_each_process_thread(g, t) {
174 		if (!max_count--)
175 			goto unlock;
176 		if (!--batch_count) {
177 			batch_count = HUNG_TASK_BATCHING;
178 			if (!rcu_lock_break(g, t))
179 				goto unlock;
180 		}
181 		/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
182 		if (t->state == TASK_UNINTERRUPTIBLE)
183 			check_hung_task(t, timeout);
184 	}
185  unlock:
186 	rcu_read_unlock();
187 }
188 
189 static long hung_timeout_jiffies(unsigned long last_checked,
190 				 unsigned long timeout)
191 {
192 	/* timeout of 0 will disable the watchdog */
193 	return timeout ? last_checked - jiffies + timeout * HZ :
194 		MAX_SCHEDULE_TIMEOUT;
195 }
196 
197 /*
198  * Process updating of timeout sysctl
199  */
200 int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
201 				  void __user *buffer,
202 				  size_t *lenp, loff_t *ppos)
203 {
204 	int ret;
205 
206 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
207 
208 	if (ret || !write)
209 		goto out;
210 
211 	wake_up_process(watchdog_task);
212 
213  out:
214 	return ret;
215 }
216 
217 static atomic_t reset_hung_task = ATOMIC_INIT(0);
218 
219 void reset_hung_task_detector(void)
220 {
221 	atomic_set(&reset_hung_task, 1);
222 }
223 EXPORT_SYMBOL_GPL(reset_hung_task_detector);
224 
225 /*
226  * kthread which checks for tasks stuck in D state
227  */
228 static int watchdog(void *dummy)
229 {
230 	unsigned long hung_last_checked = jiffies;
231 
232 	set_user_nice(current, 0);
233 
234 	for ( ; ; ) {
235 		unsigned long timeout = sysctl_hung_task_timeout_secs;
236 		long t = hung_timeout_jiffies(hung_last_checked, timeout);
237 
238 		if (t <= 0) {
239 			if (!atomic_xchg(&reset_hung_task, 0))
240 				check_hung_uninterruptible_tasks(timeout);
241 			hung_last_checked = jiffies;
242 			continue;
243 		}
244 		schedule_timeout_interruptible(t);
245 	}
246 
247 	return 0;
248 }
249 
250 static int __init hung_task_init(void)
251 {
252 	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
253 	watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
254 
255 	return 0;
256 }
257 subsys_initcall(hung_task_init);
258