xref: /linux/kernel/up.c (revision 24e8a2ca1f74574ad2ed1ac7af0260dd90fd911e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
4  */
5 
6 #include <linux/interrupt.h>
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/smp.h>
10 #include <linux/hypervisor.h>
11 
12 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
13 				int wait)
14 {
15 	unsigned long flags;
16 
17 	WARN_ON(cpu != 0);
18 
19 	local_irq_save(flags);
20 	func(info);
21 	local_irq_restore(flags);
22 
23 	return 0;
24 }
25 EXPORT_SYMBOL(smp_call_function_single);
26 
27 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
28 {
29 	unsigned long flags;
30 
31 	local_irq_save(flags);
32 	csd->func(csd->info);
33 	local_irq_restore(flags);
34 	return 0;
35 }
36 EXPORT_SYMBOL(smp_call_function_single_async);
37 
38 int on_each_cpu(smp_call_func_t func, void *info, int wait)
39 {
40 	unsigned long flags;
41 
42 	local_irq_save(flags);
43 	func(info);
44 	local_irq_restore(flags);
45 	return 0;
46 }
47 EXPORT_SYMBOL(on_each_cpu);
48 
49 /*
50  * Note we still need to test the mask even for UP
51  * because we actually can get an empty mask from
52  * code that on SMP might call us without the local
53  * CPU in the mask.
54  */
55 void on_each_cpu_mask(const struct cpumask *mask,
56 		      smp_call_func_t func, void *info, bool wait)
57 {
58 	unsigned long flags;
59 
60 	if (cpumask_test_cpu(0, mask)) {
61 		local_irq_save(flags);
62 		func(info);
63 		local_irq_restore(flags);
64 	}
65 }
66 EXPORT_SYMBOL(on_each_cpu_mask);
67 
68 /*
69  * Preemption is disabled here to make sure the cond_func is called under the
70  * same condtions in UP and SMP.
71  */
72 void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
73 			   smp_call_func_t func, void *info, bool wait,
74 			   gfp_t gfp_flags, const struct cpumask *mask)
75 {
76 	unsigned long flags;
77 
78 	preempt_disable();
79 	if (cond_func(0, info)) {
80 		local_irq_save(flags);
81 		func(info);
82 		local_irq_restore(flags);
83 	}
84 	preempt_enable();
85 }
86 EXPORT_SYMBOL(on_each_cpu_cond_mask);
87 
88 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
89 		      smp_call_func_t func, void *info, bool wait,
90 		      gfp_t gfp_flags)
91 {
92 	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
93 }
94 EXPORT_SYMBOL(on_each_cpu_cond);
95 
96 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
97 {
98 	int ret;
99 
100 	if (cpu != 0)
101 		return -ENXIO;
102 
103 	if (phys)
104 		hypervisor_pin_vcpu(0);
105 	ret = func(par);
106 	if (phys)
107 		hypervisor_pin_vcpu(-1);
108 
109 	return ret;
110 }
111 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
112