xref: /linux/kernel/smp.c (revision 4d7696f1b05f4aeb586c74868fe3da2731daca4b)
1 /*
2  * Generic helpers for smp ipi calls
3  *
4  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5  */
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
15 
16 #include "smpboot.h"
17 
18 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
19 enum {
20 	CSD_FLAG_LOCK		= 0x01,
21 };
22 
23 struct call_function_data {
24 	struct call_single_data	__percpu *csd;
25 	cpumask_var_t		cpumask;
26 	cpumask_var_t		cpumask_ipi;
27 };
28 
29 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
30 
31 struct call_single_queue {
32 	struct list_head	list;
33 	raw_spinlock_t		lock;
34 };
35 
36 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
37 
38 static int
39 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
40 {
41 	long cpu = (long)hcpu;
42 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
43 
44 	switch (action) {
45 	case CPU_UP_PREPARE:
46 	case CPU_UP_PREPARE_FROZEN:
47 		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
48 				cpu_to_node(cpu)))
49 			return notifier_from_errno(-ENOMEM);
50 		if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
51 				cpu_to_node(cpu)))
52 			return notifier_from_errno(-ENOMEM);
53 		cfd->csd = alloc_percpu(struct call_single_data);
54 		if (!cfd->csd) {
55 			free_cpumask_var(cfd->cpumask);
56 			return notifier_from_errno(-ENOMEM);
57 		}
58 		break;
59 
60 #ifdef CONFIG_HOTPLUG_CPU
61 	case CPU_UP_CANCELED:
62 	case CPU_UP_CANCELED_FROZEN:
63 
64 	case CPU_DEAD:
65 	case CPU_DEAD_FROZEN:
66 		free_cpumask_var(cfd->cpumask);
67 		free_cpumask_var(cfd->cpumask_ipi);
68 		free_percpu(cfd->csd);
69 		break;
70 #endif
71 	};
72 
73 	return NOTIFY_OK;
74 }
75 
76 static struct notifier_block hotplug_cfd_notifier = {
77 	.notifier_call		= hotplug_cfd,
78 };
79 
80 void __init call_function_init(void)
81 {
82 	void *cpu = (void *)(long)smp_processor_id();
83 	int i;
84 
85 	for_each_possible_cpu(i) {
86 		struct call_single_queue *q = &per_cpu(call_single_queue, i);
87 
88 		raw_spin_lock_init(&q->lock);
89 		INIT_LIST_HEAD(&q->list);
90 	}
91 
92 	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
93 	register_cpu_notifier(&hotplug_cfd_notifier);
94 }
95 
96 /*
97  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
98  *
99  * For non-synchronous ipi calls the csd can still be in use by the
100  * previous function call. For multi-cpu calls its even more interesting
101  * as we'll have to ensure no other cpu is observing our csd.
102  */
103 static void csd_lock_wait(struct call_single_data *csd)
104 {
105 	while (csd->flags & CSD_FLAG_LOCK)
106 		cpu_relax();
107 }
108 
109 static void csd_lock(struct call_single_data *csd)
110 {
111 	csd_lock_wait(csd);
112 	csd->flags |= CSD_FLAG_LOCK;
113 
114 	/*
115 	 * prevent CPU from reordering the above assignment
116 	 * to ->flags with any subsequent assignments to other
117 	 * fields of the specified call_single_data structure:
118 	 */
119 	smp_mb();
120 }
121 
122 static void csd_unlock(struct call_single_data *csd)
123 {
124 	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
125 
126 	/*
127 	 * ensure we're all done before releasing data:
128 	 */
129 	smp_mb();
130 
131 	csd->flags &= ~CSD_FLAG_LOCK;
132 }
133 
134 /*
135  * Insert a previously allocated call_single_data element
136  * for execution on the given CPU. data must already have
137  * ->func, ->info, and ->flags set.
138  */
139 static
140 void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
141 {
142 	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
143 	unsigned long flags;
144 	int ipi;
145 
146 	raw_spin_lock_irqsave(&dst->lock, flags);
147 	ipi = list_empty(&dst->list);
148 	list_add_tail(&csd->list, &dst->list);
149 	raw_spin_unlock_irqrestore(&dst->lock, flags);
150 
151 	/*
152 	 * The list addition should be visible before sending the IPI
153 	 * handler locks the list to pull the entry off it because of
154 	 * normal cache coherency rules implied by spinlocks.
155 	 *
156 	 * If IPIs can go out of order to the cache coherency protocol
157 	 * in an architecture, sufficient synchronisation should be added
158 	 * to arch code to make it appear to obey cache coherency WRT
159 	 * locking and barrier primitives. Generic code isn't really
160 	 * equipped to do the right thing...
161 	 */
162 	if (ipi)
163 		arch_send_call_function_single_ipi(cpu);
164 
165 	if (wait)
166 		csd_lock_wait(csd);
167 }
168 
169 /*
170  * Invoked by arch to handle an IPI for call function single. Must be
171  * called from the arch with interrupts disabled.
172  */
173 void generic_smp_call_function_single_interrupt(void)
174 {
175 	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
176 	LIST_HEAD(list);
177 
178 	/*
179 	 * Shouldn't receive this interrupt on a cpu that is not yet online.
180 	 */
181 	WARN_ON_ONCE(!cpu_online(smp_processor_id()));
182 
183 	raw_spin_lock(&q->lock);
184 	list_replace_init(&q->list, &list);
185 	raw_spin_unlock(&q->lock);
186 
187 	while (!list_empty(&list)) {
188 		struct call_single_data *csd;
189 
190 		csd = list_entry(list.next, struct call_single_data, list);
191 		list_del(&csd->list);
192 
193 		csd->func(csd->info);
194 
195 		csd_unlock(csd);
196 	}
197 }
198 
199 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
200 
201 /*
202  * smp_call_function_single - Run a function on a specific CPU
203  * @func: The function to run. This must be fast and non-blocking.
204  * @info: An arbitrary pointer to pass to the function.
205  * @wait: If true, wait until function has completed on other CPUs.
206  *
207  * Returns 0 on success, else a negative status code.
208  */
209 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
210 			     int wait)
211 {
212 	struct call_single_data d = {
213 		.flags = 0,
214 	};
215 	unsigned long flags;
216 	int this_cpu;
217 	int err = 0;
218 
219 	/*
220 	 * prevent preemption and reschedule on another processor,
221 	 * as well as CPU removal
222 	 */
223 	this_cpu = get_cpu();
224 
225 	/*
226 	 * Can deadlock when called with interrupts disabled.
227 	 * We allow cpu's that are not yet online though, as no one else can
228 	 * send smp call function interrupt to this cpu and as such deadlocks
229 	 * can't happen.
230 	 */
231 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
232 		     && !oops_in_progress);
233 
234 	if (cpu == this_cpu) {
235 		local_irq_save(flags);
236 		func(info);
237 		local_irq_restore(flags);
238 	} else {
239 		if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
240 			struct call_single_data *csd = &d;
241 
242 			if (!wait)
243 				csd = &__get_cpu_var(csd_data);
244 
245 			csd_lock(csd);
246 
247 			csd->func = func;
248 			csd->info = info;
249 			generic_exec_single(cpu, csd, wait);
250 		} else {
251 			err = -ENXIO;	/* CPU not online */
252 		}
253 	}
254 
255 	put_cpu();
256 
257 	return err;
258 }
259 EXPORT_SYMBOL(smp_call_function_single);
260 
261 /*
262  * smp_call_function_any - Run a function on any of the given cpus
263  * @mask: The mask of cpus it can run on.
264  * @func: The function to run. This must be fast and non-blocking.
265  * @info: An arbitrary pointer to pass to the function.
266  * @wait: If true, wait until function has completed.
267  *
268  * Returns 0 on success, else a negative status code (if no cpus were online).
269  *
270  * Selection preference:
271  *	1) current cpu if in @mask
272  *	2) any cpu of current node if in @mask
273  *	3) any other online cpu in @mask
274  */
275 int smp_call_function_any(const struct cpumask *mask,
276 			  smp_call_func_t func, void *info, int wait)
277 {
278 	unsigned int cpu;
279 	const struct cpumask *nodemask;
280 	int ret;
281 
282 	/* Try for same CPU (cheapest) */
283 	cpu = get_cpu();
284 	if (cpumask_test_cpu(cpu, mask))
285 		goto call;
286 
287 	/* Try for same node. */
288 	nodemask = cpumask_of_node(cpu_to_node(cpu));
289 	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
290 	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
291 		if (cpu_online(cpu))
292 			goto call;
293 	}
294 
295 	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
296 	cpu = cpumask_any_and(mask, cpu_online_mask);
297 call:
298 	ret = smp_call_function_single(cpu, func, info, wait);
299 	put_cpu();
300 	return ret;
301 }
302 EXPORT_SYMBOL_GPL(smp_call_function_any);
303 
304 /**
305  * __smp_call_function_single(): Run a function on a specific CPU
306  * @cpu: The CPU to run on.
307  * @data: Pre-allocated and setup data structure
308  * @wait: If true, wait until function has completed on specified CPU.
309  *
310  * Like smp_call_function_single(), but allow caller to pass in a
311  * pre-allocated data structure. Useful for embedding @data inside
312  * other structures, for instance.
313  */
314 void __smp_call_function_single(int cpu, struct call_single_data *csd,
315 				int wait)
316 {
317 	unsigned int this_cpu;
318 	unsigned long flags;
319 
320 	this_cpu = get_cpu();
321 	/*
322 	 * Can deadlock when called with interrupts disabled.
323 	 * We allow cpu's that are not yet online though, as no one else can
324 	 * send smp call function interrupt to this cpu and as such deadlocks
325 	 * can't happen.
326 	 */
327 	WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
328 		     && !oops_in_progress);
329 
330 	if (cpu == this_cpu) {
331 		local_irq_save(flags);
332 		csd->func(csd->info);
333 		local_irq_restore(flags);
334 	} else {
335 		csd_lock(csd);
336 		generic_exec_single(cpu, csd, wait);
337 	}
338 	put_cpu();
339 }
340 
341 /**
342  * smp_call_function_many(): Run a function on a set of other CPUs.
343  * @mask: The set of cpus to run on (only runs on online subset).
344  * @func: The function to run. This must be fast and non-blocking.
345  * @info: An arbitrary pointer to pass to the function.
346  * @wait: If true, wait (atomically) until function has completed
347  *        on other CPUs.
348  *
349  * If @wait is true, then returns once @func has returned.
350  *
351  * You must not call this function with disabled interrupts or from a
352  * hardware interrupt handler or from a bottom half handler. Preemption
353  * must be disabled when calling this function.
354  */
355 void smp_call_function_many(const struct cpumask *mask,
356 			    smp_call_func_t func, void *info, bool wait)
357 {
358 	struct call_function_data *cfd;
359 	int cpu, next_cpu, this_cpu = smp_processor_id();
360 
361 	/*
362 	 * Can deadlock when called with interrupts disabled.
363 	 * We allow cpu's that are not yet online though, as no one else can
364 	 * send smp call function interrupt to this cpu and as such deadlocks
365 	 * can't happen.
366 	 */
367 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
368 		     && !oops_in_progress && !early_boot_irqs_disabled);
369 
370 	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
371 	cpu = cpumask_first_and(mask, cpu_online_mask);
372 	if (cpu == this_cpu)
373 		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
374 
375 	/* No online cpus?  We're done. */
376 	if (cpu >= nr_cpu_ids)
377 		return;
378 
379 	/* Do we have another CPU which isn't us? */
380 	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
381 	if (next_cpu == this_cpu)
382 		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
383 
384 	/* Fastpath: do that cpu by itself. */
385 	if (next_cpu >= nr_cpu_ids) {
386 		smp_call_function_single(cpu, func, info, wait);
387 		return;
388 	}
389 
390 	cfd = &__get_cpu_var(cfd_data);
391 
392 	cpumask_and(cfd->cpumask, mask, cpu_online_mask);
393 	cpumask_clear_cpu(this_cpu, cfd->cpumask);
394 
395 	/* Some callers race with other cpus changing the passed mask */
396 	if (unlikely(!cpumask_weight(cfd->cpumask)))
397 		return;
398 
399 	/*
400 	 * After we put an entry into the list, cfd->cpumask may be cleared
401 	 * again when another CPU sends another IPI for a SMP function call, so
402 	 * cfd->cpumask will be zero.
403 	 */
404 	cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
405 
406 	for_each_cpu(cpu, cfd->cpumask) {
407 		struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
408 		struct call_single_queue *dst =
409 					&per_cpu(call_single_queue, cpu);
410 		unsigned long flags;
411 
412 		csd_lock(csd);
413 		csd->func = func;
414 		csd->info = info;
415 
416 		raw_spin_lock_irqsave(&dst->lock, flags);
417 		list_add_tail(&csd->list, &dst->list);
418 		raw_spin_unlock_irqrestore(&dst->lock, flags);
419 	}
420 
421 	/* Send a message to all CPUs in the map */
422 	arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
423 
424 	if (wait) {
425 		for_each_cpu(cpu, cfd->cpumask) {
426 			struct call_single_data *csd;
427 
428 			csd = per_cpu_ptr(cfd->csd, cpu);
429 			csd_lock_wait(csd);
430 		}
431 	}
432 }
433 EXPORT_SYMBOL(smp_call_function_many);
434 
435 /**
436  * smp_call_function(): Run a function on all other CPUs.
437  * @func: The function to run. This must be fast and non-blocking.
438  * @info: An arbitrary pointer to pass to the function.
439  * @wait: If true, wait (atomically) until function has completed
440  *        on other CPUs.
441  *
442  * Returns 0.
443  *
444  * If @wait is true, then returns once @func has returned; otherwise
445  * it returns just before the target cpu calls @func.
446  *
447  * You must not call this function with disabled interrupts or from a
448  * hardware interrupt handler or from a bottom half handler.
449  */
450 int smp_call_function(smp_call_func_t func, void *info, int wait)
451 {
452 	preempt_disable();
453 	smp_call_function_many(cpu_online_mask, func, info, wait);
454 	preempt_enable();
455 
456 	return 0;
457 }
458 EXPORT_SYMBOL(smp_call_function);
459 #endif /* USE_GENERIC_SMP_HELPERS */
460 
461 /* Setup configured maximum number of CPUs to activate */
462 unsigned int setup_max_cpus = NR_CPUS;
463 EXPORT_SYMBOL(setup_max_cpus);
464 
465 
466 /*
467  * Setup routine for controlling SMP activation
468  *
469  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
470  * activation entirely (the MPS table probe still happens, though).
471  *
472  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
473  * greater than 0, limits the maximum number of CPUs activated in
474  * SMP mode to <NUM>.
475  */
476 
477 void __weak arch_disable_smp_support(void) { }
478 
479 static int __init nosmp(char *str)
480 {
481 	setup_max_cpus = 0;
482 	arch_disable_smp_support();
483 
484 	return 0;
485 }
486 
487 early_param("nosmp", nosmp);
488 
489 /* this is hard limit */
490 static int __init nrcpus(char *str)
491 {
492 	int nr_cpus;
493 
494 	get_option(&str, &nr_cpus);
495 	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
496 		nr_cpu_ids = nr_cpus;
497 
498 	return 0;
499 }
500 
501 early_param("nr_cpus", nrcpus);
502 
503 static int __init maxcpus(char *str)
504 {
505 	get_option(&str, &setup_max_cpus);
506 	if (setup_max_cpus == 0)
507 		arch_disable_smp_support();
508 
509 	return 0;
510 }
511 
512 early_param("maxcpus", maxcpus);
513 
514 /* Setup number of possible processor ids */
515 int nr_cpu_ids __read_mostly = NR_CPUS;
516 EXPORT_SYMBOL(nr_cpu_ids);
517 
518 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
519 void __init setup_nr_cpu_ids(void)
520 {
521 	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
522 }
523 
524 /* Called by boot processor to activate the rest. */
525 void __init smp_init(void)
526 {
527 	unsigned int cpu;
528 
529 	idle_threads_init();
530 
531 	/* FIXME: This should be done in userspace --RR */
532 	for_each_present_cpu(cpu) {
533 		if (num_online_cpus() >= setup_max_cpus)
534 			break;
535 		if (!cpu_online(cpu))
536 			cpu_up(cpu);
537 	}
538 
539 	/* Any cleanup work */
540 	printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
541 	smp_cpus_done(setup_max_cpus);
542 }
543 
544 /*
545  * Call a function on all processors.  May be used during early boot while
546  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
547  * of local_irq_disable/enable().
548  */
549 int on_each_cpu(void (*func) (void *info), void *info, int wait)
550 {
551 	unsigned long flags;
552 	int ret = 0;
553 
554 	preempt_disable();
555 	ret = smp_call_function(func, info, wait);
556 	local_irq_save(flags);
557 	func(info);
558 	local_irq_restore(flags);
559 	preempt_enable();
560 	return ret;
561 }
562 EXPORT_SYMBOL(on_each_cpu);
563 
564 /**
565  * on_each_cpu_mask(): Run a function on processors specified by
566  * cpumask, which may include the local processor.
567  * @mask: The set of cpus to run on (only runs on online subset).
568  * @func: The function to run. This must be fast and non-blocking.
569  * @info: An arbitrary pointer to pass to the function.
570  * @wait: If true, wait (atomically) until function has completed
571  *        on other CPUs.
572  *
573  * If @wait is true, then returns once @func has returned.
574  *
575  * You must not call this function with disabled interrupts or
576  * from a hardware interrupt handler or from a bottom half handler.
577  */
578 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
579 			void *info, bool wait)
580 {
581 	int cpu = get_cpu();
582 
583 	smp_call_function_many(mask, func, info, wait);
584 	if (cpumask_test_cpu(cpu, mask)) {
585 		local_irq_disable();
586 		func(info);
587 		local_irq_enable();
588 	}
589 	put_cpu();
590 }
591 EXPORT_SYMBOL(on_each_cpu_mask);
592 
593 /*
594  * on_each_cpu_cond(): Call a function on each processor for which
595  * the supplied function cond_func returns true, optionally waiting
596  * for all the required CPUs to finish. This may include the local
597  * processor.
598  * @cond_func:	A callback function that is passed a cpu id and
599  *		the the info parameter. The function is called
600  *		with preemption disabled. The function should
601  *		return a blooean value indicating whether to IPI
602  *		the specified CPU.
603  * @func:	The function to run on all applicable CPUs.
604  *		This must be fast and non-blocking.
605  * @info:	An arbitrary pointer to pass to both functions.
606  * @wait:	If true, wait (atomically) until function has
607  *		completed on other CPUs.
608  * @gfp_flags:	GFP flags to use when allocating the cpumask
609  *		used internally by the function.
610  *
611  * The function might sleep if the GFP flags indicates a non
612  * atomic allocation is allowed.
613  *
614  * Preemption is disabled to protect against CPUs going offline but not online.
615  * CPUs going online during the call will not be seen or sent an IPI.
616  *
617  * You must not call this function with disabled interrupts or
618  * from a hardware interrupt handler or from a bottom half handler.
619  */
620 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
621 			smp_call_func_t func, void *info, bool wait,
622 			gfp_t gfp_flags)
623 {
624 	cpumask_var_t cpus;
625 	int cpu, ret;
626 
627 	might_sleep_if(gfp_flags & __GFP_WAIT);
628 
629 	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
630 		preempt_disable();
631 		for_each_online_cpu(cpu)
632 			if (cond_func(cpu, info))
633 				cpumask_set_cpu(cpu, cpus);
634 		on_each_cpu_mask(cpus, func, info, wait);
635 		preempt_enable();
636 		free_cpumask_var(cpus);
637 	} else {
638 		/*
639 		 * No free cpumask, bother. No matter, we'll
640 		 * just have to IPI them one by one.
641 		 */
642 		preempt_disable();
643 		for_each_online_cpu(cpu)
644 			if (cond_func(cpu, info)) {
645 				ret = smp_call_function_single(cpu, func,
646 								info, wait);
647 				WARN_ON_ONCE(!ret);
648 			}
649 		preempt_enable();
650 	}
651 }
652 EXPORT_SYMBOL(on_each_cpu_cond);
653 
654 static void do_nothing(void *unused)
655 {
656 }
657 
658 /**
659  * kick_all_cpus_sync - Force all cpus out of idle
660  *
661  * Used to synchronize the update of pm_idle function pointer. It's
662  * called after the pointer is updated and returns after the dummy
663  * callback function has been executed on all cpus. The execution of
664  * the function can only happen on the remote cpus after they have
665  * left the idle function which had been called via pm_idle function
666  * pointer. So it's guaranteed that nothing uses the previous pointer
667  * anymore.
668  */
669 void kick_all_cpus_sync(void)
670 {
671 	/* Make sure the change is visible before we kick the cpus */
672 	smp_mb();
673 	smp_call_function(do_nothing, NULL, 1);
674 }
675 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
676