xref: /linux/include/linux/smp.h (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SMP_H
3 #define __LINUX_SMP_H
4 
5 /*
6  *	Generic SMP support
7  *		Alan Cox. <alan@redhat.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/list.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/smp_types.h>
16 
17 typedef void (*smp_call_func_t)(void *info);
18 typedef bool (*smp_cond_func_t)(int cpu, void *info);
19 
20 /*
21  * structure shares (partial) layout with struct irq_work
22  */
23 struct __call_single_data {
24 	struct __call_single_node node;
25 	smp_call_func_t func;
26 	void *info;
27 };
28 
29 #define CSD_INIT(_func, _info) \
30 	(struct __call_single_data){ .func = (_func), .info = (_info), }
31 
32 /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
33 typedef struct __call_single_data call_single_data_t
34 	__aligned(sizeof(struct __call_single_data));
35 
36 #define INIT_CSD(_csd, _func, _info)		\
37 do {						\
38 	*(_csd) = CSD_INIT((_func), (_info));	\
39 } while (0)
40 
41 /*
42  * Enqueue a llist_node on the call_single_queue; be very careful, read
43  * flush_smp_call_function_queue() in detail.
44  */
45 extern void __smp_call_single_queue(int cpu, struct llist_node *node);
46 
47 /* total number of cpus in this system (may exceed NR_CPUS) */
48 extern unsigned int total_cpus;
49 
50 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
51 			     int wait);
52 
53 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
54 			   void *info, bool wait, const struct cpumask *mask);
55 
56 int smp_call_function_single_async(int cpu, call_single_data_t *csd);
57 
58 /*
59  * Cpus stopping functions in panic. All have default weak definitions.
60  * Architecture-dependent code may override them.
61  */
62 void __noreturn panic_smp_self_stop(void);
63 void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
64 void crash_smp_send_stop(void);
65 int panic_smp_redirect_cpu(int target_cpu, void *msg);
66 
67 /*
68  * Call a function on all processors
69  */
70 static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
71 {
72 	on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
73 }
74 
75 /**
76  * on_each_cpu_mask(): Run a function on processors specified by
77  * cpumask, which may include the local processor.
78  * @mask: The set of cpus to run on (only runs on online subset).
79  * @func: The function to run. This must be fast and non-blocking.
80  * @info: An arbitrary pointer to pass to the function.
81  * @wait: If true, wait (atomically) until function has completed
82  *        on other CPUs.
83  *
84  * If @wait is true, then returns once @func has returned.
85  *
86  * You must not call this function with disabled interrupts or from a
87  * hardware interrupt handler or from a bottom half handler.  The
88  * exception is that it may be used during early boot while
89  * early_boot_irqs_disabled is set.
90  */
91 static inline void on_each_cpu_mask(const struct cpumask *mask,
92 				    smp_call_func_t func, void *info, bool wait)
93 {
94 	on_each_cpu_cond_mask(NULL, func, info, wait, mask);
95 }
96 
97 /*
98  * Call a function on each processor for which the supplied function
99  * cond_func returns a positive value. This may include the local
100  * processor.  May be used during early boot while early_boot_irqs_disabled is
101  * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
102  */
103 static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
104 				    smp_call_func_t func, void *info, bool wait)
105 {
106 	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
107 }
108 
109 /*
110  * Architecture specific boot CPU setup.  Defined as empty weak function in
111  * init/main.c. Architectures can override it.
112  */
113 void __init smp_prepare_boot_cpu(void);
114 
115 #ifdef CONFIG_SMP
116 
117 #include <linux/preempt.h>
118 #include <linux/compiler.h>
119 #include <linux/thread_info.h>
120 #include <asm/smp.h>
121 
122 /*
123  * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
124  * (defined in asm header):
125  */
126 
127 /*
128  * stops all CPUs but the current one:
129  */
130 extern void smp_send_stop(void);
131 
132 /*
133  * sends a 'reschedule' event to another CPU:
134  */
135 extern void arch_smp_send_reschedule(int cpu);
136 /*
137  * scheduler_ipi() is inline so can't be passed as callback reason, but the
138  * callsite IP should be sufficient for root-causing IPIs sent from here.
139  */
140 #define smp_send_reschedule(cpu) ({		  \
141 	trace_ipi_send_cpu(cpu, _RET_IP_, NULL);  \
142 	arch_smp_send_reschedule(cpu);		  \
143 })
144 
145 /*
146  * Prepare machine for booting other CPUs.
147  */
148 extern void smp_prepare_cpus(unsigned int max_cpus);
149 
150 /*
151  * Bring a CPU up
152  */
153 extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
154 
155 /*
156  * Final polishing of CPUs
157  */
158 extern void smp_cpus_done(unsigned int max_cpus);
159 
160 /*
161  * Call a function on all other processors
162  */
163 void smp_call_function(smp_call_func_t func, void *info, int wait);
164 void smp_call_function_many(const struct cpumask *mask,
165 			    smp_call_func_t func, void *info, bool wait);
166 
167 int smp_call_function_any(const struct cpumask *mask,
168 			  smp_call_func_t func, void *info, int wait);
169 
170 void kick_all_cpus_sync(void);
171 void wake_up_all_idle_cpus(void);
172 bool cpus_peek_for_pending_ipi(const struct cpumask *mask);
173 
174 /*
175  * Generic and arch helpers
176  */
177 void __init call_function_init(void);
178 void generic_smp_call_function_single_interrupt(void);
179 #define generic_smp_call_function_interrupt \
180 	generic_smp_call_function_single_interrupt
181 
182 extern unsigned int setup_max_cpus;
183 extern void __init setup_nr_cpu_ids(void);
184 extern void __init smp_init(void);
185 
186 extern int __boot_cpu_id;
187 
188 static inline int get_boot_cpu_id(void)
189 {
190 	return __boot_cpu_id;
191 }
192 
193 #else /* !SMP */
194 
195 static inline void smp_send_stop(void) { }
196 
197 /*
198  *	These macros fold the SMP functionality into a single CPU system
199  */
200 #define raw_smp_processor_id()			0
201 static inline void up_smp_call_function(smp_call_func_t func, void *info)
202 {
203 }
204 #define smp_call_function(func, info, wait) \
205 			(up_smp_call_function(func, info))
206 
207 static inline void smp_send_reschedule(int cpu) { }
208 #define smp_call_function_many(mask, func, info, wait) \
209 			(up_smp_call_function(func, info))
210 static inline void call_function_init(void) { }
211 
212 static inline int
213 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
214 		      void *info, int wait)
215 {
216 	return smp_call_function_single(0, func, info, wait);
217 }
218 
219 static inline void kick_all_cpus_sync(void) {  }
220 static inline void wake_up_all_idle_cpus(void) {  }
221 static inline bool cpus_peek_for_pending_ipi(const struct cpumask *mask)
222 {
223 	return false;
224 }
225 
226 #define setup_max_cpus 0
227 
228 #ifdef CONFIG_UP_LATE_INIT
229 extern void __init up_late_init(void);
230 static __always_inline void smp_init(void) { up_late_init(); }
231 #else
232 static inline void smp_init(void) { }
233 #endif
234 
235 static inline int get_boot_cpu_id(void)
236 {
237 	return 0;
238 }
239 
240 #endif /* !SMP */
241 
242 /**
243  * raw_smp_processor_id() - get the current (unstable) CPU id
244  *
245  * For then you know what you are doing and need an unstable
246  * CPU id.
247  */
248 
249 /**
250  * smp_processor_id() - get the current (stable) CPU id
251  *
252  * This is the normal accessor to the CPU id and should be used
253  * whenever possible.
254  *
255  * The CPU id is stable when:
256  *
257  *  - IRQs are disabled;
258  *  - preemption is disabled;
259  *  - the task is CPU affine.
260  *
261  * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
262  * when smp_processor_id() is used when the CPU id is not stable.
263  */
264 
265 /*
266  * Allow the architecture to differentiate between a stable and unstable read.
267  * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
268  * regular asm read for the stable.
269  */
270 #ifndef __smp_processor_id
271 #define __smp_processor_id() raw_smp_processor_id()
272 #endif
273 
274 #ifdef CONFIG_DEBUG_PREEMPT
275   extern unsigned int debug_smp_processor_id(void);
276 # define smp_processor_id() debug_smp_processor_id()
277 #else
278 # define smp_processor_id() __smp_processor_id()
279 #endif
280 
281 #define get_cpu()		({ preempt_disable(); __smp_processor_id(); })
282 #define put_cpu()		preempt_enable()
283 
284 /*
285  * Callback to arch code if there's nosmp or maxcpus=0 on the
286  * boot command line:
287  */
288 extern void arch_disable_smp_support(void);
289 
290 extern void arch_thaw_secondary_cpus_begin(void);
291 extern void arch_thaw_secondary_cpus_end(void);
292 
293 void smp_setup_processor_id(void);
294 
295 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
296 		    bool phys);
297 
298 /* SMP core functions */
299 int smpcfd_prepare_cpu(unsigned int cpu);
300 int smpcfd_dead_cpu(unsigned int cpu);
301 int smpcfd_dying_cpu(unsigned int cpu);
302 
303 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
304 bool csd_lock_is_stuck(void);
305 #else
306 static inline bool csd_lock_is_stuck(void) { return false; }
307 #endif
308 
309 #endif /* __LINUX_SMP_H */
310