xref: /linux/kernel/smp.c (revision 909d2bb07dc0e08ea81841f7c901f0f16f965f0e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic helpers for smp ipi calls
4  *
5  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/irq_work.h>
11 #include <linux/rcupdate.h>
12 #include <linux/rculist.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/gfp.h>
19 #include <linux/smp.h>
20 #include <linux/cpu.h>
21 #include <linux/sched.h>
22 #include <linux/sched/idle.h>
23 #include <linux/hypervisor.h>
24 #include <linux/sched/clock.h>
25 #include <linux/nmi.h>
26 #include <linux/sched/debug.h>
27 #include <linux/jump_label.h>
28 #include <linux/string_choices.h>
29 
30 #include <trace/events/ipi.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/csd.h>
33 #undef CREATE_TRACE_POINTS
34 
35 #include "smpboot.h"
36 #include "sched/smp.h"
37 
38 #define CSD_TYPE(_csd)	((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
39 
40 struct call_function_data {
41 	call_single_data_t	__percpu *csd;
42 	cpumask_var_t		cpumask;
43 	cpumask_var_t		cpumask_ipi;
44 };
45 
46 static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
47 
48 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
49 
50 static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
51 
52 static void __flush_smp_call_function_queue(bool warn_cpu_offline);
53 
smpcfd_prepare_cpu(unsigned int cpu)54 int smpcfd_prepare_cpu(unsigned int cpu)
55 {
56 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
57 
58 	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
59 				     cpu_to_node(cpu)))
60 		return -ENOMEM;
61 	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
62 				     cpu_to_node(cpu))) {
63 		free_cpumask_var(cfd->cpumask);
64 		return -ENOMEM;
65 	}
66 	cfd->csd = alloc_percpu(call_single_data_t);
67 	if (!cfd->csd) {
68 		free_cpumask_var(cfd->cpumask);
69 		free_cpumask_var(cfd->cpumask_ipi);
70 		return -ENOMEM;
71 	}
72 
73 	return 0;
74 }
75 
smpcfd_dead_cpu(unsigned int cpu)76 int smpcfd_dead_cpu(unsigned int cpu)
77 {
78 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
79 
80 	free_cpumask_var(cfd->cpumask);
81 	free_cpumask_var(cfd->cpumask_ipi);
82 	free_percpu(cfd->csd);
83 	return 0;
84 }
85 
smpcfd_dying_cpu(unsigned int cpu)86 int smpcfd_dying_cpu(unsigned int cpu)
87 {
88 	/*
89 	 * The IPIs for the smp-call-function callbacks queued by other CPUs
90 	 * might arrive late, either due to hardware latencies or because this
91 	 * CPU disabled interrupts (inside stop-machine) before the IPIs were
92 	 * sent. So flush out any pending callbacks explicitly (without waiting
93 	 * for the IPIs to arrive), to ensure that the outgoing CPU doesn't go
94 	 * offline with work still pending.
95 	 *
96 	 * This runs with interrupts disabled inside the stopper task invoked by
97 	 * stop_machine(), ensuring mutually exclusive CPU offlining and IPI flush.
98 	 */
99 	__flush_smp_call_function_queue(false);
100 	irq_work_run();
101 	return 0;
102 }
103 
call_function_init(void)104 void __init call_function_init(void)
105 {
106 	int i;
107 
108 	for_each_possible_cpu(i)
109 		init_llist_head(&per_cpu(call_single_queue, i));
110 
111 	smpcfd_prepare_cpu(smp_processor_id());
112 }
113 
114 static __always_inline void
send_call_function_single_ipi(int cpu)115 send_call_function_single_ipi(int cpu)
116 {
117 	if (call_function_single_prep_ipi(cpu)) {
118 		trace_ipi_send_cpu(cpu, _RET_IP_,
119 				   generic_smp_call_function_single_interrupt);
120 		arch_send_call_function_single_ipi(cpu);
121 	}
122 }
123 
124 static __always_inline void
send_call_function_ipi_mask(struct cpumask * mask)125 send_call_function_ipi_mask(struct cpumask *mask)
126 {
127 	trace_ipi_send_cpumask(mask, _RET_IP_,
128 			       generic_smp_call_function_single_interrupt);
129 	arch_send_call_function_ipi_mask(mask);
130 }
131 
132 static __always_inline void
csd_do_func(smp_call_func_t func,void * info,call_single_data_t * csd)133 csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
134 {
135 	trace_csd_function_entry(func, csd);
136 	func(info);
137 	trace_csd_function_exit(func, csd);
138 }
139 
140 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
141 
142 static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
143 
144 /*
145  * Parse the csdlock_debug= kernel boot parameter.
146  *
147  * If you need to restore the old "ext" value that once provided
148  * additional debugging information, reapply the following commits:
149  *
150  * de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging")
151  * a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging")
152  */
csdlock_debug(char * str)153 static int __init csdlock_debug(char *str)
154 {
155 	int ret;
156 	unsigned int val = 0;
157 
158 	ret = get_option(&str, &val);
159 	if (ret) {
160 		if (val)
161 			static_branch_enable(&csdlock_debug_enabled);
162 		else
163 			static_branch_disable(&csdlock_debug_enabled);
164 	}
165 
166 	return 1;
167 }
168 __setup("csdlock_debug=", csdlock_debug);
169 
170 static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
171 static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
172 static DEFINE_PER_CPU(void *, cur_csd_info);
173 
174 static ulong csd_lock_timeout = 5000;  /* CSD lock timeout in milliseconds. */
175 module_param(csd_lock_timeout, ulong, 0644);
176 static int panic_on_ipistall;  /* CSD panic timeout in milliseconds, 300000 for five minutes. */
177 module_param(panic_on_ipistall, int, 0644);
178 
179 static atomic_t csd_bug_count = ATOMIC_INIT(0);
180 
181 /* Record current CSD work for current CPU, NULL to erase. */
__csd_lock_record(call_single_data_t * csd)182 static void __csd_lock_record(call_single_data_t *csd)
183 {
184 	if (!csd) {
185 		smp_mb(); /* NULL cur_csd after unlock. */
186 		__this_cpu_write(cur_csd, NULL);
187 		return;
188 	}
189 	__this_cpu_write(cur_csd_func, csd->func);
190 	__this_cpu_write(cur_csd_info, csd->info);
191 	smp_wmb(); /* func and info before csd. */
192 	__this_cpu_write(cur_csd, csd);
193 	smp_mb(); /* Update cur_csd before function call. */
194 		  /* Or before unlock, as the case may be. */
195 }
196 
csd_lock_record(call_single_data_t * csd)197 static __always_inline void csd_lock_record(call_single_data_t *csd)
198 {
199 	if (static_branch_unlikely(&csdlock_debug_enabled))
200 		__csd_lock_record(csd);
201 }
202 
csd_lock_wait_getcpu(call_single_data_t * csd)203 static int csd_lock_wait_getcpu(call_single_data_t *csd)
204 {
205 	unsigned int csd_type;
206 
207 	csd_type = CSD_TYPE(csd);
208 	if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
209 		return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */
210 	return -1;
211 }
212 
213 static atomic_t n_csd_lock_stuck;
214 
215 /**
216  * csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
217  *
218  * Returns @true if a CSD-lock acquisition is stuck and has been stuck
219  * long enough for a "non-responsive CSD lock" message to be printed.
220  */
csd_lock_is_stuck(void)221 bool csd_lock_is_stuck(void)
222 {
223 	return !!atomic_read(&n_csd_lock_stuck);
224 }
225 
226 /*
227  * Complain if too much time spent waiting.  Note that only
228  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
229  * so waiting on other types gets much less information.
230  */
csd_lock_wait_toolong(call_single_data_t * csd,u64 ts0,u64 * ts1,int * bug_id,unsigned long * nmessages)231 static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id, unsigned long *nmessages)
232 {
233 	int cpu = -1;
234 	int cpux;
235 	bool firsttime;
236 	u64 ts2, ts_delta;
237 	call_single_data_t *cpu_cur_csd;
238 	unsigned int flags = READ_ONCE(csd->node.u_flags);
239 	unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC;
240 
241 	if (!(flags & CSD_FLAG_LOCK)) {
242 		if (!unlikely(*bug_id))
243 			return true;
244 		cpu = csd_lock_wait_getcpu(csd);
245 		pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
246 			 *bug_id, raw_smp_processor_id(), cpu);
247 		atomic_dec(&n_csd_lock_stuck);
248 		return true;
249 	}
250 
251 	ts2 = ktime_get_mono_fast_ns();
252 	/* How long since we last checked for a stuck CSD lock.*/
253 	ts_delta = ts2 - *ts1;
254 	if (likely(ts_delta <= csd_lock_timeout_ns * (*nmessages + 1) *
255 			       (!*nmessages ? 1 : (ilog2(num_online_cpus()) / 2 + 1)) ||
256 		   csd_lock_timeout_ns == 0))
257 		return false;
258 
259 	if (ts0 > ts2) {
260 		/* Our own sched_clock went backward; don't blame another CPU. */
261 		ts_delta = ts0 - ts2;
262 		pr_alert("sched_clock on CPU %d went backward by %llu ns\n", raw_smp_processor_id(), ts_delta);
263 		*ts1 = ts2;
264 		return false;
265 	}
266 
267 	firsttime = !*bug_id;
268 	if (firsttime)
269 		*bug_id = atomic_inc_return(&csd_bug_count);
270 	cpu = csd_lock_wait_getcpu(csd);
271 	if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
272 		cpux = 0;
273 	else
274 		cpux = cpu;
275 	cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
276 	/* How long since this CSD lock was stuck. */
277 	ts_delta = ts2 - ts0;
278 	pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %lld ns for CPU#%02d %pS(%ps).\n",
279 		 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), (s64)ts_delta,
280 		 cpu, csd->func, csd->info);
281 	(*nmessages)++;
282 	if (firsttime)
283 		atomic_inc(&n_csd_lock_stuck);
284 	/*
285 	 * If the CSD lock is still stuck after 5 minutes, it is unlikely
286 	 * to become unstuck. Use a signed comparison to avoid triggering
287 	 * on underflows when the TSC is out of sync between sockets.
288 	 */
289 	BUG_ON(panic_on_ipistall > 0 && (s64)ts_delta > ((s64)panic_on_ipistall * NSEC_PER_MSEC));
290 	if (cpu_cur_csd && csd != cpu_cur_csd) {
291 		pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
292 			 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
293 			 READ_ONCE(per_cpu(cur_csd_info, cpux)));
294 	} else {
295 		pr_alert("\tcsd: CSD lock (#%d) %s.\n",
296 			 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
297 	}
298 	if (cpu >= 0) {
299 		if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
300 			dump_cpu_task(cpu);
301 		if (!cpu_cur_csd) {
302 			pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
303 			arch_send_call_function_single_ipi(cpu);
304 		}
305 	}
306 	if (firsttime)
307 		dump_stack();
308 	*ts1 = ts2;
309 
310 	return false;
311 }
312 
313 /*
314  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
315  *
316  * For non-synchronous ipi calls the csd can still be in use by the
317  * previous function call. For multi-cpu calls its even more interesting
318  * as we'll have to ensure no other cpu is observing our csd.
319  */
__csd_lock_wait(call_single_data_t * csd)320 static void __csd_lock_wait(call_single_data_t *csd)
321 {
322 	unsigned long nmessages = 0;
323 	int bug_id = 0;
324 	u64 ts0, ts1;
325 
326 	ts1 = ts0 = ktime_get_mono_fast_ns();
327 	for (;;) {
328 		if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id, &nmessages))
329 			break;
330 		cpu_relax();
331 	}
332 	smp_acquire__after_ctrl_dep();
333 }
334 
csd_lock_wait(call_single_data_t * csd)335 static __always_inline void csd_lock_wait(call_single_data_t *csd)
336 {
337 	if (static_branch_unlikely(&csdlock_debug_enabled)) {
338 		__csd_lock_wait(csd);
339 		return;
340 	}
341 
342 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
343 }
344 #else
csd_lock_record(call_single_data_t * csd)345 static void csd_lock_record(call_single_data_t *csd)
346 {
347 }
348 
csd_lock_wait(call_single_data_t * csd)349 static __always_inline void csd_lock_wait(call_single_data_t *csd)
350 {
351 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
352 }
353 #endif
354 
csd_lock(call_single_data_t * csd)355 static __always_inline void csd_lock(call_single_data_t *csd)
356 {
357 	csd_lock_wait(csd);
358 	csd->node.u_flags |= CSD_FLAG_LOCK;
359 
360 	/*
361 	 * prevent CPU from reordering the above assignment
362 	 * to ->flags with any subsequent assignments to other
363 	 * fields of the specified call_single_data_t structure:
364 	 */
365 	smp_wmb();
366 }
367 
csd_unlock(call_single_data_t * csd)368 static __always_inline void csd_unlock(call_single_data_t *csd)
369 {
370 	WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
371 
372 	/*
373 	 * ensure we're all done before releasing data:
374 	 */
375 	smp_store_release(&csd->node.u_flags, 0);
376 }
377 
378 static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
379 
__smp_call_single_queue(int cpu,struct llist_node * node)380 void __smp_call_single_queue(int cpu, struct llist_node *node)
381 {
382 	/*
383 	 * We have to check the type of the CSD before queueing it, because
384 	 * once queued it can have its flags cleared by
385 	 *   flush_smp_call_function_queue()
386 	 * even if we haven't sent the smp_call IPI yet (e.g. the stopper
387 	 * executes migration_cpu_stop() on the remote CPU).
388 	 */
389 	if (trace_csd_queue_cpu_enabled()) {
390 		call_single_data_t *csd;
391 		smp_call_func_t func;
392 
393 		csd = container_of(node, call_single_data_t, node.llist);
394 		func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
395 			sched_ttwu_pending : csd->func;
396 
397 		trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
398 	}
399 
400 	/*
401 	 * The list addition should be visible to the target CPU when it pops
402 	 * the head of the list to pull the entry off it in the IPI handler
403 	 * because of normal cache coherency rules implied by the underlying
404 	 * llist ops.
405 	 *
406 	 * If IPIs can go out of order to the cache coherency protocol
407 	 * in an architecture, sufficient synchronisation should be added
408 	 * to arch code to make it appear to obey cache coherency WRT
409 	 * locking and barrier primitives. Generic code isn't really
410 	 * equipped to do the right thing...
411 	 */
412 	if (llist_add(node, &per_cpu(call_single_queue, cpu)))
413 		send_call_function_single_ipi(cpu);
414 }
415 
416 /*
417  * Insert a previously allocated call_single_data_t element
418  * for execution on the given CPU. data must already have
419  * ->func, ->info, and ->flags set.
420  */
generic_exec_single(int cpu,call_single_data_t * csd)421 static int generic_exec_single(int cpu, call_single_data_t *csd)
422 {
423 	/*
424 	 * Preemption already disabled here so stopper cannot run on this CPU,
425 	 * ensuring mutually exclusive CPU offlining and last IPI flush.
426 	 */
427 	if (cpu == smp_processor_id()) {
428 		smp_call_func_t func = csd->func;
429 		void *info = csd->info;
430 		unsigned long flags;
431 
432 		/*
433 		 * We can unlock early even for the synchronous on-stack case,
434 		 * since we're doing this from the same CPU..
435 		 */
436 		csd_lock_record(csd);
437 		csd_unlock(csd);
438 		local_irq_save(flags);
439 		csd_do_func(func, info, NULL);
440 		csd_lock_record(NULL);
441 		local_irq_restore(flags);
442 		return 0;
443 	}
444 
445 	if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
446 		csd_unlock(csd);
447 		return -ENXIO;
448 	}
449 
450 	__smp_call_single_queue(cpu, &csd->node.llist);
451 
452 	return 0;
453 }
454 
455 /**
456  * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
457  *
458  * Invoked by arch to handle an IPI for call function single.
459  * Must be called with interrupts disabled.
460  */
generic_smp_call_function_single_interrupt(void)461 void generic_smp_call_function_single_interrupt(void)
462 {
463 	__flush_smp_call_function_queue(true);
464 }
465 
466 /**
467  * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
468  *
469  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
470  *		      offline CPU. Skip this check if set to 'false'.
471  *
472  * Flush any pending smp-call-function callbacks queued on this CPU. This is
473  * invoked by the generic IPI handler, as well as by a CPU about to go offline,
474  * to ensure that all pending IPI callbacks are run before it goes completely
475  * offline.
476  *
477  * Loop through the call_single_queue and run all the queued callbacks.
478  * Must be called with interrupts disabled.
479  */
__flush_smp_call_function_queue(bool warn_cpu_offline)480 static void __flush_smp_call_function_queue(bool warn_cpu_offline)
481 {
482 	call_single_data_t *csd, *csd_next;
483 	struct llist_node *entry, *prev;
484 	struct llist_head *head;
485 	static bool warned;
486 	atomic_t *tbt;
487 
488 	lockdep_assert_irqs_disabled();
489 
490 	/* Allow waiters to send backtrace NMI from here onwards */
491 	tbt = this_cpu_ptr(&trigger_backtrace);
492 	atomic_set_release(tbt, 1);
493 
494 	head = this_cpu_ptr(&call_single_queue);
495 	entry = llist_del_all(head);
496 	entry = llist_reverse_order(entry);
497 
498 	/* There shouldn't be any pending callbacks on an offline CPU. */
499 	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
500 		     !warned && entry != NULL)) {
501 		warned = true;
502 		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
503 
504 		/*
505 		 * We don't have to use the _safe() variant here
506 		 * because we are not invoking the IPI handlers yet.
507 		 */
508 		llist_for_each_entry(csd, entry, node.llist) {
509 			switch (CSD_TYPE(csd)) {
510 			case CSD_TYPE_ASYNC:
511 			case CSD_TYPE_SYNC:
512 			case CSD_TYPE_IRQ_WORK:
513 				pr_warn("IPI callback %pS sent to offline CPU\n",
514 					csd->func);
515 				break;
516 
517 			case CSD_TYPE_TTWU:
518 				pr_warn("IPI task-wakeup sent to offline CPU\n");
519 				break;
520 
521 			default:
522 				pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
523 					CSD_TYPE(csd));
524 				break;
525 			}
526 		}
527 	}
528 
529 	/*
530 	 * First; run all SYNC callbacks, people are waiting for us.
531 	 */
532 	prev = NULL;
533 	llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
534 		/* Do we wait until *after* callback? */
535 		if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
536 			smp_call_func_t func = csd->func;
537 			void *info = csd->info;
538 
539 			if (prev) {
540 				prev->next = &csd_next->node.llist;
541 			} else {
542 				entry = &csd_next->node.llist;
543 			}
544 
545 			csd_lock_record(csd);
546 			csd_do_func(func, info, csd);
547 			csd_unlock(csd);
548 			csd_lock_record(NULL);
549 		} else {
550 			prev = &csd->node.llist;
551 		}
552 	}
553 
554 	if (!entry)
555 		return;
556 
557 	/*
558 	 * Second; run all !SYNC callbacks.
559 	 */
560 	prev = NULL;
561 	llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
562 		int type = CSD_TYPE(csd);
563 
564 		if (type != CSD_TYPE_TTWU) {
565 			if (prev) {
566 				prev->next = &csd_next->node.llist;
567 			} else {
568 				entry = &csd_next->node.llist;
569 			}
570 
571 			if (type == CSD_TYPE_ASYNC) {
572 				smp_call_func_t func = csd->func;
573 				void *info = csd->info;
574 
575 				csd_lock_record(csd);
576 				csd_unlock(csd);
577 				csd_do_func(func, info, csd);
578 				csd_lock_record(NULL);
579 			} else if (type == CSD_TYPE_IRQ_WORK) {
580 				irq_work_single(csd);
581 			}
582 
583 		} else {
584 			prev = &csd->node.llist;
585 		}
586 	}
587 
588 	/*
589 	 * Third; only CSD_TYPE_TTWU is left, issue those.
590 	 */
591 	if (entry) {
592 		csd = llist_entry(entry, typeof(*csd), node.llist);
593 		csd_do_func(sched_ttwu_pending, entry, csd);
594 	}
595 }
596 
597 
598 /**
599  * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
600  *				   from task context (idle, migration thread)
601  *
602  * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
603  * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
604  * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
605  * handle queued SMP function calls before scheduling.
606  *
607  * The migration thread has to ensure that an eventually pending wakeup has
608  * been handled before it migrates a task.
609  */
flush_smp_call_function_queue(void)610 void flush_smp_call_function_queue(void)
611 {
612 	unsigned int was_pending;
613 	unsigned long flags;
614 
615 	if (llist_empty(this_cpu_ptr(&call_single_queue)))
616 		return;
617 
618 	local_irq_save(flags);
619 	/* Get the already pending soft interrupts for RT enabled kernels */
620 	was_pending = local_softirq_pending();
621 	__flush_smp_call_function_queue(true);
622 	if (local_softirq_pending())
623 		do_softirq_post_smp_call_flush(was_pending);
624 
625 	local_irq_restore(flags);
626 }
627 
628 /*
629  * smp_call_function_single - Run a function on a specific CPU
630  * @func: The function to run. This must be fast and non-blocking.
631  * @info: An arbitrary pointer to pass to the function.
632  * @wait: If true, wait until function has completed on other CPUs.
633  *
634  * Returns 0 on success, else a negative status code.
635  */
smp_call_function_single(int cpu,smp_call_func_t func,void * info,int wait)636 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
637 			     int wait)
638 {
639 	call_single_data_t *csd;
640 	call_single_data_t csd_stack = {
641 		.node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
642 	};
643 	int this_cpu;
644 	int err;
645 
646 	/*
647 	 * Prevent preemption and reschedule on another CPU, as well as CPU
648 	 * removal. This prevents stopper from running on this CPU, thus
649 	 * providing mutual exclusion of the below cpu_online() check and
650 	 * IPI sending ensuring IPI are not missed by CPU going offline.
651 	 */
652 	this_cpu = get_cpu();
653 
654 	/*
655 	 * Can deadlock when called with interrupts disabled.
656 	 * We allow cpu's that are not yet online though, as no one else can
657 	 * send smp call function interrupt to this cpu and as such deadlocks
658 	 * can't happen.
659 	 */
660 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
661 		     && !oops_in_progress);
662 
663 	/*
664 	 * When @wait we can deadlock when we interrupt between llist_add() and
665 	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
666 	 * csd_lock() on because the interrupt context uses the same csd
667 	 * storage.
668 	 */
669 	WARN_ON_ONCE(!in_task());
670 
671 	csd = &csd_stack;
672 	if (!wait) {
673 		csd = this_cpu_ptr(&csd_data);
674 		csd_lock(csd);
675 	}
676 
677 	csd->func = func;
678 	csd->info = info;
679 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
680 	csd->node.src = smp_processor_id();
681 	csd->node.dst = cpu;
682 #endif
683 
684 	err = generic_exec_single(cpu, csd);
685 
686 	if (wait)
687 		csd_lock_wait(csd);
688 
689 	put_cpu();
690 
691 	return err;
692 }
693 EXPORT_SYMBOL(smp_call_function_single);
694 
695 /**
696  * smp_call_function_single_async() - Run an asynchronous function on a
697  * 			         specific CPU.
698  * @cpu: The CPU to run on.
699  * @csd: Pre-allocated and setup data structure
700  *
701  * Like smp_call_function_single(), but the call is asynchonous and
702  * can thus be done from contexts with disabled interrupts.
703  *
704  * The caller passes his own pre-allocated data structure
705  * (ie: embedded in an object) and is responsible for synchronizing it
706  * such that the IPIs performed on the @csd are strictly serialized.
707  *
708  * If the function is called with one csd which has not yet been
709  * processed by previous call to smp_call_function_single_async(), the
710  * function will return immediately with -EBUSY showing that the csd
711  * object is still in progress.
712  *
713  * NOTE: Be careful, there is unfortunately no current debugging facility to
714  * validate the correctness of this serialization.
715  *
716  * Return: %0 on success or negative errno value on error
717  */
smp_call_function_single_async(int cpu,call_single_data_t * csd)718 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
719 {
720 	int err = 0;
721 
722 	preempt_disable();
723 
724 	if (csd->node.u_flags & CSD_FLAG_LOCK) {
725 		err = -EBUSY;
726 		goto out;
727 	}
728 
729 	csd->node.u_flags = CSD_FLAG_LOCK;
730 	smp_wmb();
731 
732 	err = generic_exec_single(cpu, csd);
733 
734 out:
735 	preempt_enable();
736 
737 	return err;
738 }
739 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
740 
741 /*
742  * smp_call_function_any - Run a function on any of the given cpus
743  * @mask: The mask of cpus it can run on.
744  * @func: The function to run. This must be fast and non-blocking.
745  * @info: An arbitrary pointer to pass to the function.
746  * @wait: If true, wait until function has completed.
747  *
748  * Returns 0 on success, else a negative status code (if no cpus were online).
749  *
750  * Selection preference:
751  *	1) current cpu if in @mask
752  *	2) nearest cpu in @mask, based on NUMA topology
753  */
smp_call_function_any(const struct cpumask * mask,smp_call_func_t func,void * info,int wait)754 int smp_call_function_any(const struct cpumask *mask,
755 			  smp_call_func_t func, void *info, int wait)
756 {
757 	unsigned int cpu;
758 	int ret;
759 
760 	/* Try for same CPU (cheapest) */
761 	cpu = get_cpu();
762 	if (!cpumask_test_cpu(cpu, mask))
763 		cpu = sched_numa_find_nth_cpu(mask, 0, cpu_to_node(cpu));
764 
765 	ret = smp_call_function_single(cpu, func, info, wait);
766 	put_cpu();
767 	return ret;
768 }
769 EXPORT_SYMBOL_GPL(smp_call_function_any);
770 
771 /*
772  * Flags to be used as scf_flags argument of smp_call_function_many_cond().
773  *
774  * %SCF_WAIT:		Wait until function execution is completed
775  * %SCF_RUN_LOCAL:	Run also locally if local cpu is set in cpumask
776  */
777 #define SCF_WAIT	(1U << 0)
778 #define SCF_RUN_LOCAL	(1U << 1)
779 
smp_call_function_many_cond(const struct cpumask * mask,smp_call_func_t func,void * info,unsigned int scf_flags,smp_cond_func_t cond_func)780 static void smp_call_function_many_cond(const struct cpumask *mask,
781 					smp_call_func_t func, void *info,
782 					unsigned int scf_flags,
783 					smp_cond_func_t cond_func)
784 {
785 	int cpu, last_cpu, this_cpu = smp_processor_id();
786 	struct call_function_data *cfd;
787 	bool wait = scf_flags & SCF_WAIT;
788 	int nr_cpus = 0;
789 	bool run_remote = false;
790 
791 	lockdep_assert_preemption_disabled();
792 
793 	/*
794 	 * Can deadlock when called with interrupts disabled.
795 	 * We allow cpu's that are not yet online though, as no one else can
796 	 * send smp call function interrupt to this cpu and as such deadlocks
797 	 * can't happen.
798 	 */
799 	if (cpu_online(this_cpu) && !oops_in_progress &&
800 	    !early_boot_irqs_disabled)
801 		lockdep_assert_irqs_enabled();
802 
803 	/*
804 	 * When @wait we can deadlock when we interrupt between llist_add() and
805 	 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
806 	 * csd_lock() on because the interrupt context uses the same csd
807 	 * storage.
808 	 */
809 	WARN_ON_ONCE(!in_task());
810 
811 	/* Check if we need remote execution, i.e., any CPU excluding this one. */
812 	if (cpumask_any_and_but(mask, cpu_online_mask, this_cpu) < nr_cpu_ids) {
813 		cfd = this_cpu_ptr(&cfd_data);
814 		cpumask_and(cfd->cpumask, mask, cpu_online_mask);
815 		__cpumask_clear_cpu(this_cpu, cfd->cpumask);
816 
817 		cpumask_clear(cfd->cpumask_ipi);
818 		for_each_cpu(cpu, cfd->cpumask) {
819 			call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
820 
821 			if (cond_func && !cond_func(cpu, info)) {
822 				__cpumask_clear_cpu(cpu, cfd->cpumask);
823 				continue;
824 			}
825 
826 			/* Work is enqueued on a remote CPU. */
827 			run_remote = true;
828 
829 			csd_lock(csd);
830 			if (wait)
831 				csd->node.u_flags |= CSD_TYPE_SYNC;
832 			csd->func = func;
833 			csd->info = info;
834 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
835 			csd->node.src = smp_processor_id();
836 			csd->node.dst = cpu;
837 #endif
838 			trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
839 
840 			/*
841 			 * Kick the remote CPU if this is the first work
842 			 * item enqueued.
843 			 */
844 			if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
845 				__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
846 				nr_cpus++;
847 				last_cpu = cpu;
848 			}
849 		}
850 
851 		/*
852 		 * Choose the most efficient way to send an IPI. Note that the
853 		 * number of CPUs might be zero due to concurrent changes to the
854 		 * provided mask.
855 		 */
856 		if (nr_cpus == 1)
857 			send_call_function_single_ipi(last_cpu);
858 		else if (likely(nr_cpus > 1))
859 			send_call_function_ipi_mask(cfd->cpumask_ipi);
860 	}
861 
862 	/* Check if we need local execution. */
863 	if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
864 	    (!cond_func || cond_func(this_cpu, info))) {
865 		unsigned long flags;
866 
867 		local_irq_save(flags);
868 		csd_do_func(func, info, NULL);
869 		local_irq_restore(flags);
870 	}
871 
872 	if (run_remote && wait) {
873 		for_each_cpu(cpu, cfd->cpumask) {
874 			call_single_data_t *csd;
875 
876 			csd = per_cpu_ptr(cfd->csd, cpu);
877 			csd_lock_wait(csd);
878 		}
879 	}
880 }
881 
882 /**
883  * smp_call_function_many(): Run a function on a set of CPUs.
884  * @mask: The set of cpus to run on (only runs on online subset).
885  * @func: The function to run. This must be fast and non-blocking.
886  * @info: An arbitrary pointer to pass to the function.
887  * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait
888  *        (atomically) until function has completed on other CPUs. If
889  *        %SCF_RUN_LOCAL is set, the function will also be run locally
890  *        if the local CPU is set in the @cpumask.
891  *
892  * If @wait is true, then returns once @func has returned.
893  *
894  * You must not call this function with disabled interrupts or from a
895  * hardware interrupt handler or from a bottom half handler. Preemption
896  * must be disabled when calling this function.
897  */
smp_call_function_many(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)898 void smp_call_function_many(const struct cpumask *mask,
899 			    smp_call_func_t func, void *info, bool wait)
900 {
901 	smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
902 }
903 EXPORT_SYMBOL(smp_call_function_many);
904 
905 /**
906  * smp_call_function(): Run a function on all other CPUs.
907  * @func: The function to run. This must be fast and non-blocking.
908  * @info: An arbitrary pointer to pass to the function.
909  * @wait: If true, wait (atomically) until function has completed
910  *        on other CPUs.
911  *
912  * Returns 0.
913  *
914  * If @wait is true, then returns once @func has returned; otherwise
915  * it returns just before the target cpu calls @func.
916  *
917  * You must not call this function with disabled interrupts or from a
918  * hardware interrupt handler or from a bottom half handler.
919  */
smp_call_function(smp_call_func_t func,void * info,int wait)920 void smp_call_function(smp_call_func_t func, void *info, int wait)
921 {
922 	preempt_disable();
923 	smp_call_function_many(cpu_online_mask, func, info, wait);
924 	preempt_enable();
925 }
926 EXPORT_SYMBOL(smp_call_function);
927 
928 /* Setup configured maximum number of CPUs to activate */
929 unsigned int setup_max_cpus = NR_CPUS;
930 EXPORT_SYMBOL(setup_max_cpus);
931 
932 
933 /*
934  * Setup routine for controlling SMP activation
935  *
936  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
937  * activation entirely (the MPS table probe still happens, though).
938  *
939  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
940  * greater than 0, limits the maximum number of CPUs activated in
941  * SMP mode to <NUM>.
942  */
943 
arch_disable_smp_support(void)944 void __weak __init arch_disable_smp_support(void) { }
945 
nosmp(char * str)946 static int __init nosmp(char *str)
947 {
948 	setup_max_cpus = 0;
949 	arch_disable_smp_support();
950 
951 	return 0;
952 }
953 
954 early_param("nosmp", nosmp);
955 
956 /* this is hard limit */
nrcpus(char * str)957 static int __init nrcpus(char *str)
958 {
959 	int nr_cpus;
960 
961 	if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
962 		set_nr_cpu_ids(nr_cpus);
963 
964 	return 0;
965 }
966 
967 early_param("nr_cpus", nrcpus);
968 
maxcpus(char * str)969 static int __init maxcpus(char *str)
970 {
971 	get_option(&str, &setup_max_cpus);
972 	if (setup_max_cpus == 0)
973 		arch_disable_smp_support();
974 
975 	return 0;
976 }
977 
978 early_param("maxcpus", maxcpus);
979 
980 #if (NR_CPUS > 1) && !defined(CONFIG_FORCE_NR_CPUS)
981 /* Setup number of possible processor ids */
982 unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
983 EXPORT_SYMBOL(nr_cpu_ids);
984 #endif
985 
986 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
setup_nr_cpu_ids(void)987 void __init setup_nr_cpu_ids(void)
988 {
989 	set_nr_cpu_ids(find_last_bit(cpumask_bits(cpu_possible_mask), NR_CPUS) + 1);
990 }
991 
992 /* Called by boot processor to activate the rest. */
smp_init(void)993 void __init smp_init(void)
994 {
995 	int num_nodes, num_cpus;
996 
997 	idle_threads_init();
998 	cpuhp_threads_init();
999 
1000 	pr_info("Bringing up secondary CPUs ...\n");
1001 
1002 	bringup_nonboot_cpus(setup_max_cpus);
1003 
1004 	num_nodes = num_online_nodes();
1005 	num_cpus  = num_online_cpus();
1006 	pr_info("Brought up %d node%s, %d CPU%s\n",
1007 		num_nodes, str_plural(num_nodes), num_cpus, str_plural(num_cpus));
1008 
1009 	/* Any cleanup work */
1010 	smp_cpus_done(setup_max_cpus);
1011 }
1012 
1013 /*
1014  * on_each_cpu_cond(): Call a function on each processor for which
1015  * the supplied function cond_func returns true, optionally waiting
1016  * for all the required CPUs to finish. This may include the local
1017  * processor.
1018  * @cond_func:	A callback function that is passed a cpu id and
1019  *		the info parameter. The function is called
1020  *		with preemption disabled. The function should
1021  *		return a blooean value indicating whether to IPI
1022  *		the specified CPU.
1023  * @func:	The function to run on all applicable CPUs.
1024  *		This must be fast and non-blocking.
1025  * @info:	An arbitrary pointer to pass to both functions.
1026  * @wait:	If true, wait (atomically) until function has
1027  *		completed on other CPUs.
1028  *
1029  * Preemption is disabled to protect against CPUs going offline but not online.
1030  * CPUs going online during the call will not be seen or sent an IPI.
1031  *
1032  * You must not call this function with disabled interrupts or
1033  * from a hardware interrupt handler or from a bottom half handler.
1034  */
on_each_cpu_cond_mask(smp_cond_func_t cond_func,smp_call_func_t func,void * info,bool wait,const struct cpumask * mask)1035 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
1036 			   void *info, bool wait, const struct cpumask *mask)
1037 {
1038 	unsigned int scf_flags = SCF_RUN_LOCAL;
1039 
1040 	if (wait)
1041 		scf_flags |= SCF_WAIT;
1042 
1043 	preempt_disable();
1044 	smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1045 	preempt_enable();
1046 }
1047 EXPORT_SYMBOL(on_each_cpu_cond_mask);
1048 
do_nothing(void * unused)1049 static void do_nothing(void *unused)
1050 {
1051 }
1052 
1053 /**
1054  * kick_all_cpus_sync - Force all cpus out of idle
1055  *
1056  * Used to synchronize the update of pm_idle function pointer. It's
1057  * called after the pointer is updated and returns after the dummy
1058  * callback function has been executed on all cpus. The execution of
1059  * the function can only happen on the remote cpus after they have
1060  * left the idle function which had been called via pm_idle function
1061  * pointer. So it's guaranteed that nothing uses the previous pointer
1062  * anymore.
1063  */
kick_all_cpus_sync(void)1064 void kick_all_cpus_sync(void)
1065 {
1066 	/* Make sure the change is visible before we kick the cpus */
1067 	smp_mb();
1068 	smp_call_function(do_nothing, NULL, 1);
1069 }
1070 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1071 
1072 /**
1073  * wake_up_all_idle_cpus - break all cpus out of idle
1074  * wake_up_all_idle_cpus try to break all cpus which is in idle state even
1075  * including idle polling cpus, for non-idle cpus, we will do nothing
1076  * for them.
1077  */
wake_up_all_idle_cpus(void)1078 void wake_up_all_idle_cpus(void)
1079 {
1080 	int cpu;
1081 
1082 	for_each_possible_cpu(cpu) {
1083 		preempt_disable();
1084 		if (cpu != smp_processor_id() && cpu_online(cpu))
1085 			wake_up_if_idle(cpu);
1086 		preempt_enable();
1087 	}
1088 }
1089 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1090 
1091 /**
1092  * struct smp_call_on_cpu_struct - Call a function on a specific CPU
1093  * @work: &work_struct
1094  * @done: &completion to signal
1095  * @func: function to call
1096  * @data: function's data argument
1097  * @ret: return value from @func
1098  * @cpu: target CPU (%-1 for any CPU)
1099  *
1100  * Used to call a function on a specific cpu and wait for it to return.
1101  * Optionally make sure the call is done on a specified physical cpu via vcpu
1102  * pinning in order to support virtualized environments.
1103  */
1104 struct smp_call_on_cpu_struct {
1105 	struct work_struct	work;
1106 	struct completion	done;
1107 	int			(*func)(void *);
1108 	void			*data;
1109 	int			ret;
1110 	int			cpu;
1111 };
1112 
smp_call_on_cpu_callback(struct work_struct * work)1113 static void smp_call_on_cpu_callback(struct work_struct *work)
1114 {
1115 	struct smp_call_on_cpu_struct *sscs;
1116 
1117 	sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1118 	if (sscs->cpu >= 0)
1119 		hypervisor_pin_vcpu(sscs->cpu);
1120 	sscs->ret = sscs->func(sscs->data);
1121 	if (sscs->cpu >= 0)
1122 		hypervisor_pin_vcpu(-1);
1123 
1124 	complete(&sscs->done);
1125 }
1126 
smp_call_on_cpu(unsigned int cpu,int (* func)(void *),void * par,bool phys)1127 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1128 {
1129 	struct smp_call_on_cpu_struct sscs = {
1130 		.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1131 		.func = func,
1132 		.data = par,
1133 		.cpu  = phys ? cpu : -1,
1134 	};
1135 
1136 	INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1137 
1138 	if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1139 		return -ENXIO;
1140 
1141 	queue_work_on(cpu, system_wq, &sscs.work);
1142 	wait_for_completion(&sscs.done);
1143 	destroy_work_on_stack(&sscs.work);
1144 
1145 	return sscs.ret;
1146 }
1147 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1148