xref: /linux/kernel/up.c (revision 6a0abce4c4cce0890e2c930b960b9a05c8c6e5da)
1 /*
2  * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
3  */
4 
5 #include <linux/interrupt.h>
6 #include <linux/kernel.h>
7 #include <linux/export.h>
8 #include <linux/smp.h>
9 #include <linux/hypervisor.h>
10 
11 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
12 				int wait)
13 {
14 	unsigned long flags;
15 
16 	WARN_ON(cpu != 0);
17 
18 	local_irq_save(flags);
19 	func(info);
20 	local_irq_restore(flags);
21 
22 	return 0;
23 }
24 EXPORT_SYMBOL(smp_call_function_single);
25 
26 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
27 {
28 	unsigned long flags;
29 
30 	local_irq_save(flags);
31 	csd->func(csd->info);
32 	local_irq_restore(flags);
33 	return 0;
34 }
35 EXPORT_SYMBOL(smp_call_function_single_async);
36 
37 int on_each_cpu(smp_call_func_t func, void *info, int wait)
38 {
39 	unsigned long flags;
40 
41 	local_irq_save(flags);
42 	func(info);
43 	local_irq_restore(flags);
44 	return 0;
45 }
46 EXPORT_SYMBOL(on_each_cpu);
47 
48 /*
49  * Note we still need to test the mask even for UP
50  * because we actually can get an empty mask from
51  * code that on SMP might call us without the local
52  * CPU in the mask.
53  */
54 void on_each_cpu_mask(const struct cpumask *mask,
55 		      smp_call_func_t func, void *info, bool wait)
56 {
57 	unsigned long flags;
58 
59 	if (cpumask_test_cpu(0, mask)) {
60 		local_irq_save(flags);
61 		func(info);
62 		local_irq_restore(flags);
63 	}
64 }
65 EXPORT_SYMBOL(on_each_cpu_mask);
66 
67 /*
68  * Preemption is disabled here to make sure the cond_func is called under the
69  * same condtions in UP and SMP.
70  */
71 void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
72 			   smp_call_func_t func, void *info, bool wait,
73 			   gfp_t gfp_flags, const struct cpumask *mask)
74 {
75 	unsigned long flags;
76 
77 	preempt_disable();
78 	if (cond_func(0, info)) {
79 		local_irq_save(flags);
80 		func(info);
81 		local_irq_restore(flags);
82 	}
83 	preempt_enable();
84 }
85 EXPORT_SYMBOL(on_each_cpu_cond_mask);
86 
87 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
88 		      smp_call_func_t func, void *info, bool wait,
89 		      gfp_t gfp_flags)
90 {
91 	on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
92 }
93 EXPORT_SYMBOL(on_each_cpu_cond);
94 
95 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
96 {
97 	int ret;
98 
99 	if (cpu != 0)
100 		return -ENXIO;
101 
102 	if (phys)
103 		hypervisor_pin_vcpu(0);
104 	ret = func(par);
105 	if (phys)
106 		hypervisor_pin_vcpu(-1);
107 
108 	return ret;
109 }
110 EXPORT_SYMBOL_GPL(smp_call_on_cpu);
111