xref: /linux/kernel/power/process.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * drivers/power/process.c - Functions for starting/stopping processes on
3  *                           suspend transitions.
4  *
5  * Originally from swsusp.
6  */
7 
8 
9 #undef DEBUG
10 
11 #include <linux/interrupt.h>
12 #include <linux/oom.h>
13 #include <linux/suspend.h>
14 #include <linux/module.h>
15 #include <linux/syscalls.h>
16 #include <linux/freezer.h>
17 #include <linux/delay.h>
18 #include <linux/workqueue.h>
19 #include <linux/kmod.h>
20 #include <trace/events/power.h>
21 
22 /*
23  * Timeout for stopping processes
24  */
25 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
26 
27 static int try_to_freeze_tasks(bool user_only)
28 {
29 	struct task_struct *g, *p;
30 	unsigned long end_time;
31 	unsigned int todo;
32 	bool wq_busy = false;
33 	struct timeval start, end;
34 	u64 elapsed_msecs64;
35 	unsigned int elapsed_msecs;
36 	bool wakeup = false;
37 	int sleep_usecs = USEC_PER_MSEC;
38 
39 	do_gettimeofday(&start);
40 
41 	end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
42 
43 	if (!user_only)
44 		freeze_workqueues_begin();
45 
46 	while (true) {
47 		todo = 0;
48 		read_lock(&tasklist_lock);
49 		for_each_process_thread(g, p) {
50 			if (p == current || !freeze_task(p))
51 				continue;
52 
53 			if (!freezer_should_skip(p))
54 				todo++;
55 		}
56 		read_unlock(&tasklist_lock);
57 
58 		if (!user_only) {
59 			wq_busy = freeze_workqueues_busy();
60 			todo += wq_busy;
61 		}
62 
63 		if (!todo || time_after(jiffies, end_time))
64 			break;
65 
66 		if (pm_wakeup_pending()) {
67 			wakeup = true;
68 			break;
69 		}
70 
71 		/*
72 		 * We need to retry, but first give the freezing tasks some
73 		 * time to enter the refrigerator.  Start with an initial
74 		 * 1 ms sleep followed by exponential backoff until 8 ms.
75 		 */
76 		usleep_range(sleep_usecs / 2, sleep_usecs);
77 		if (sleep_usecs < 8 * USEC_PER_MSEC)
78 			sleep_usecs *= 2;
79 	}
80 
81 	do_gettimeofday(&end);
82 	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
83 	do_div(elapsed_msecs64, NSEC_PER_MSEC);
84 	elapsed_msecs = elapsed_msecs64;
85 
86 	if (todo) {
87 		printk("\n");
88 		printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
89 		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
90 		       wakeup ? "aborted" : "failed",
91 		       elapsed_msecs / 1000, elapsed_msecs % 1000,
92 		       todo - wq_busy, wq_busy);
93 
94 		if (!wakeup) {
95 			read_lock(&tasklist_lock);
96 			for_each_process_thread(g, p) {
97 				if (p != current && !freezer_should_skip(p)
98 				    && freezing(p) && !frozen(p))
99 					sched_show_task(p);
100 			}
101 			read_unlock(&tasklist_lock);
102 		}
103 	} else {
104 		printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
105 			elapsed_msecs % 1000);
106 	}
107 
108 	return todo ? -EBUSY : 0;
109 }
110 
111 static bool __check_frozen_processes(void)
112 {
113 	struct task_struct *g, *p;
114 
115 	for_each_process_thread(g, p)
116 		if (p != current && !freezer_should_skip(p) && !frozen(p))
117 			return false;
118 
119 	return true;
120 }
121 
122 /*
123  * Returns true if all freezable tasks (except for current) are frozen already
124  */
125 static bool check_frozen_processes(void)
126 {
127 	bool ret;
128 
129 	read_lock(&tasklist_lock);
130 	ret = __check_frozen_processes();
131 	read_unlock(&tasklist_lock);
132 	return ret;
133 }
134 
135 /**
136  * freeze_processes - Signal user space processes to enter the refrigerator.
137  * The current thread will not be frozen.  The same process that calls
138  * freeze_processes must later call thaw_processes.
139  *
140  * On success, returns 0.  On failure, -errno and system is fully thawed.
141  */
142 int freeze_processes(void)
143 {
144 	int error;
145 	int oom_kills_saved;
146 
147 	error = __usermodehelper_disable(UMH_FREEZING);
148 	if (error)
149 		return error;
150 
151 	/* Make sure this task doesn't get frozen */
152 	current->flags |= PF_SUSPEND_TASK;
153 
154 	if (!pm_freezing)
155 		atomic_inc(&system_freezing_cnt);
156 
157 	pm_wakeup_clear();
158 	printk("Freezing user space processes ... ");
159 	pm_freezing = true;
160 	oom_kills_saved = oom_kills_count();
161 	error = try_to_freeze_tasks(true);
162 	if (!error) {
163 		__usermodehelper_set_disable_depth(UMH_DISABLED);
164 		oom_killer_disable();
165 
166 		/*
167 		 * There might have been an OOM kill while we were
168 		 * freezing tasks and the killed task might be still
169 		 * on the way out so we have to double check for race.
170 		 */
171 		if (oom_kills_count() != oom_kills_saved &&
172 		    !check_frozen_processes()) {
173 			__usermodehelper_set_disable_depth(UMH_ENABLED);
174 			printk("OOM in progress.");
175 			error = -EBUSY;
176 		} else {
177 			printk("done.");
178 		}
179 	}
180 	printk("\n");
181 	BUG_ON(in_atomic());
182 
183 	if (error)
184 		thaw_processes();
185 	return error;
186 }
187 
188 /**
189  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
190  *
191  * On success, returns 0.  On failure, -errno and only the kernel threads are
192  * thawed, so as to give a chance to the caller to do additional cleanups
193  * (if any) before thawing the userspace tasks. So, it is the responsibility
194  * of the caller to thaw the userspace tasks, when the time is right.
195  */
196 int freeze_kernel_threads(void)
197 {
198 	int error;
199 
200 	printk("Freezing remaining freezable tasks ... ");
201 	pm_nosig_freezing = true;
202 	error = try_to_freeze_tasks(false);
203 	if (!error)
204 		printk("done.");
205 
206 	printk("\n");
207 	BUG_ON(in_atomic());
208 
209 	if (error)
210 		thaw_kernel_threads();
211 	return error;
212 }
213 
214 void thaw_processes(void)
215 {
216 	struct task_struct *g, *p;
217 	struct task_struct *curr = current;
218 
219 	trace_suspend_resume(TPS("thaw_processes"), 0, true);
220 	if (pm_freezing)
221 		atomic_dec(&system_freezing_cnt);
222 	pm_freezing = false;
223 	pm_nosig_freezing = false;
224 
225 	oom_killer_enable();
226 
227 	printk("Restarting tasks ... ");
228 
229 	__usermodehelper_set_disable_depth(UMH_FREEZING);
230 	thaw_workqueues();
231 
232 	read_lock(&tasklist_lock);
233 	for_each_process_thread(g, p) {
234 		/* No other threads should have PF_SUSPEND_TASK set */
235 		WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
236 		__thaw_task(p);
237 	}
238 	read_unlock(&tasklist_lock);
239 
240 	WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
241 	curr->flags &= ~PF_SUSPEND_TASK;
242 
243 	usermodehelper_enable();
244 
245 	schedule();
246 	printk("done.\n");
247 	trace_suspend_resume(TPS("thaw_processes"), 0, false);
248 }
249 
250 void thaw_kernel_threads(void)
251 {
252 	struct task_struct *g, *p;
253 
254 	pm_nosig_freezing = false;
255 	printk("Restarting kernel threads ... ");
256 
257 	thaw_workqueues();
258 
259 	read_lock(&tasklist_lock);
260 	for_each_process_thread(g, p) {
261 		if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
262 			__thaw_task(p);
263 	}
264 	read_unlock(&tasklist_lock);
265 
266 	schedule();
267 	printk("done.\n");
268 }
269