xref: /linux/kernel/cpu.c (revision b0148a98ec5151fec82064d95f11eb9efbc628ea)
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17 
18 /* This protects CPUs going up and down... */
19 static DEFINE_MUTEX(cpu_add_remove_lock);
20 static DEFINE_MUTEX(cpu_bitmask_lock);
21 
22 static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
23 
24 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
25  * Should always be manipulated under cpu_add_remove_lock
26  */
27 static int cpu_hotplug_disabled;
28 
29 #ifdef CONFIG_HOTPLUG_CPU
30 
31 /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
32 static struct task_struct *recursive;
33 static int recursive_depth;
34 
35 void lock_cpu_hotplug(void)
36 {
37 	struct task_struct *tsk = current;
38 
39 	if (tsk == recursive) {
40 		static int warnings = 10;
41 		if (warnings) {
42 			printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");
43 			WARN_ON(1);
44 			warnings--;
45 		}
46 		recursive_depth++;
47 		return;
48 	}
49 	mutex_lock(&cpu_bitmask_lock);
50 	recursive = tsk;
51 }
52 EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
53 
54 void unlock_cpu_hotplug(void)
55 {
56 	WARN_ON(recursive != current);
57 	if (recursive_depth) {
58 		recursive_depth--;
59 		return;
60 	}
61 	recursive = NULL;
62 	mutex_unlock(&cpu_bitmask_lock);
63 }
64 EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
65 
66 #endif	/* CONFIG_HOTPLUG_CPU */
67 
68 /* Need to know about CPUs going up/down? */
69 int __cpuinit register_cpu_notifier(struct notifier_block *nb)
70 {
71 	int ret;
72 	mutex_lock(&cpu_add_remove_lock);
73 	ret = raw_notifier_chain_register(&cpu_chain, nb);
74 	mutex_unlock(&cpu_add_remove_lock);
75 	return ret;
76 }
77 
78 #ifdef CONFIG_HOTPLUG_CPU
79 
80 EXPORT_SYMBOL(register_cpu_notifier);
81 
82 void unregister_cpu_notifier(struct notifier_block *nb)
83 {
84 	mutex_lock(&cpu_add_remove_lock);
85 	raw_notifier_chain_unregister(&cpu_chain, nb);
86 	mutex_unlock(&cpu_add_remove_lock);
87 }
88 EXPORT_SYMBOL(unregister_cpu_notifier);
89 
90 static inline void check_for_tasks(int cpu)
91 {
92 	struct task_struct *p;
93 
94 	write_lock_irq(&tasklist_lock);
95 	for_each_process(p) {
96 		if (task_cpu(p) == cpu &&
97 		    (!cputime_eq(p->utime, cputime_zero) ||
98 		     !cputime_eq(p->stime, cputime_zero)))
99 			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
100 				(state = %ld, flags = %lx) \n",
101 				 p->comm, p->pid, cpu, p->state, p->flags);
102 	}
103 	write_unlock_irq(&tasklist_lock);
104 }
105 
106 /* Take this CPU down. */
107 static int take_cpu_down(void *unused)
108 {
109 	int err;
110 
111 	/* Ensure this CPU doesn't handle any more interrupts. */
112 	err = __cpu_disable();
113 	if (err < 0)
114 		return err;
115 
116 	/* Force idle task to run as soon as we yield: it should
117 	   immediately notice cpu is offline and die quickly. */
118 	sched_idle_next();
119 	return 0;
120 }
121 
122 /* Requires cpu_add_remove_lock to be held */
123 static int _cpu_down(unsigned int cpu)
124 {
125 	int err;
126 	struct task_struct *p;
127 	cpumask_t old_allowed, tmp;
128 
129 	if (num_online_cpus() == 1)
130 		return -EBUSY;
131 
132 	if (!cpu_online(cpu))
133 		return -EINVAL;
134 
135 	err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
136 						(void *)(long)cpu);
137 	if (err == NOTIFY_BAD) {
138 		printk("%s: attempt to take down CPU %u failed\n",
139 				__FUNCTION__, cpu);
140 		return -EINVAL;
141 	}
142 
143 	/* Ensure that we are not runnable on dying cpu */
144 	old_allowed = current->cpus_allowed;
145 	tmp = CPU_MASK_ALL;
146 	cpu_clear(cpu, tmp);
147 	set_cpus_allowed(current, tmp);
148 
149 	mutex_lock(&cpu_bitmask_lock);
150 	p = __stop_machine_run(take_cpu_down, NULL, cpu);
151 	mutex_unlock(&cpu_bitmask_lock);
152 
153 	if (IS_ERR(p) || cpu_online(cpu)) {
154 		/* CPU didn't die: tell everyone.  Can't complain. */
155 		if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
156 				(void *)(long)cpu) == NOTIFY_BAD)
157 			BUG();
158 
159 		if (IS_ERR(p)) {
160 			err = PTR_ERR(p);
161 			goto out_allowed;
162 		}
163 		goto out_thread;
164 	}
165 
166 	/* Wait for it to sleep (leaving idle task). */
167 	while (!idle_cpu(cpu))
168 		yield();
169 
170 	/* This actually kills the CPU. */
171 	__cpu_die(cpu);
172 
173 	/* Move it here so it can run. */
174 	kthread_bind(p, get_cpu());
175 	put_cpu();
176 
177 	/* CPU is completely dead: tell everyone.  Too late to complain. */
178 	if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD,
179 			(void *)(long)cpu) == NOTIFY_BAD)
180 		BUG();
181 
182 	check_for_tasks(cpu);
183 
184 out_thread:
185 	err = kthread_stop(p);
186 out_allowed:
187 	set_cpus_allowed(current, old_allowed);
188 	return err;
189 }
190 
191 int cpu_down(unsigned int cpu)
192 {
193 	int err = 0;
194 
195 	mutex_lock(&cpu_add_remove_lock);
196 	if (cpu_hotplug_disabled)
197 		err = -EBUSY;
198 	else
199 		err = _cpu_down(cpu);
200 
201 	mutex_unlock(&cpu_add_remove_lock);
202 	return err;
203 }
204 #endif /*CONFIG_HOTPLUG_CPU*/
205 
206 /* Requires cpu_add_remove_lock to be held */
207 static int __cpuinit _cpu_up(unsigned int cpu)
208 {
209 	int ret;
210 	void *hcpu = (void *)(long)cpu;
211 
212 	if (cpu_online(cpu) || !cpu_present(cpu))
213 		return -EINVAL;
214 
215 	ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
216 	if (ret == NOTIFY_BAD) {
217 		printk("%s: attempt to bring up CPU %u failed\n",
218 				__FUNCTION__, cpu);
219 		ret = -EINVAL;
220 		goto out_notify;
221 	}
222 
223 	/* Arch-specific enabling code. */
224 	mutex_lock(&cpu_bitmask_lock);
225 	ret = __cpu_up(cpu);
226 	mutex_unlock(&cpu_bitmask_lock);
227 	if (ret != 0)
228 		goto out_notify;
229 	BUG_ON(!cpu_online(cpu));
230 
231 	/* Now call notifier in preparation. */
232 	raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
233 
234 out_notify:
235 	if (ret != 0)
236 		raw_notifier_call_chain(&cpu_chain,
237 				CPU_UP_CANCELED, hcpu);
238 
239 	return ret;
240 }
241 
242 int __cpuinit cpu_up(unsigned int cpu)
243 {
244 	int err = 0;
245 
246 	mutex_lock(&cpu_add_remove_lock);
247 	if (cpu_hotplug_disabled)
248 		err = -EBUSY;
249 	else
250 		err = _cpu_up(cpu);
251 
252 	mutex_unlock(&cpu_add_remove_lock);
253 	return err;
254 }
255 
256 #ifdef CONFIG_SUSPEND_SMP
257 static cpumask_t frozen_cpus;
258 
259 int disable_nonboot_cpus(void)
260 {
261 	int cpu, first_cpu, error = 0;
262 
263 	mutex_lock(&cpu_add_remove_lock);
264 	first_cpu = first_cpu(cpu_present_map);
265 	if (!cpu_online(first_cpu)) {
266 		error = _cpu_up(first_cpu);
267 		if (error) {
268 			printk(KERN_ERR "Could not bring CPU%d up.\n",
269 				first_cpu);
270 			goto out;
271 		}
272 	}
273 
274 	/* We take down all of the non-boot CPUs in one shot to avoid races
275 	 * with the userspace trying to use the CPU hotplug at the same time
276 	 */
277 	cpus_clear(frozen_cpus);
278 	printk("Disabling non-boot CPUs ...\n");
279 	for_each_online_cpu(cpu) {
280 		if (cpu == first_cpu)
281 			continue;
282 		error = _cpu_down(cpu);
283 		if (!error) {
284 			cpu_set(cpu, frozen_cpus);
285 			printk("CPU%d is down\n", cpu);
286 		} else {
287 			printk(KERN_ERR "Error taking CPU%d down: %d\n",
288 				cpu, error);
289 			break;
290 		}
291 	}
292 	if (!error) {
293 		BUG_ON(num_online_cpus() > 1);
294 		/* Make sure the CPUs won't be enabled by someone else */
295 		cpu_hotplug_disabled = 1;
296 	} else {
297 		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
298 	}
299 out:
300 	mutex_unlock(&cpu_add_remove_lock);
301 	return error;
302 }
303 
304 void enable_nonboot_cpus(void)
305 {
306 	int cpu, error;
307 
308 	/* Allow everyone to use the CPU hotplug again */
309 	mutex_lock(&cpu_add_remove_lock);
310 	cpu_hotplug_disabled = 0;
311 	mutex_unlock(&cpu_add_remove_lock);
312 
313 	printk("Enabling non-boot CPUs ...\n");
314 	for_each_cpu_mask(cpu, frozen_cpus) {
315 		error = cpu_up(cpu);
316 		if (!error) {
317 			printk("CPU%d is up\n", cpu);
318 			continue;
319 		}
320 		printk(KERN_WARNING "Error taking CPU%d up: %d\n",
321 			cpu, error);
322 	}
323 	cpus_clear(frozen_cpus);
324 }
325 #endif
326