xref: /linux/arch/loongarch/kernel/paravirt.c (revision 249ebf3f65f8530beb2cbfb91bff1d83ba88d23c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/interrupt.h>
5 #include <linux/jump_label.h>
6 #include <linux/kvm_para.h>
7 #include <linux/static_call.h>
8 #include <asm/paravirt.h>
9 
10 struct static_key paravirt_steal_enabled;
11 struct static_key paravirt_steal_rq_enabled;
12 
13 static u64 native_steal_clock(int cpu)
14 {
15 	return 0;
16 }
17 
18 DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
19 
20 #ifdef CONFIG_SMP
21 static void pv_send_ipi_single(int cpu, unsigned int action)
22 {
23 	int min, old;
24 	irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
25 
26 	old = atomic_fetch_or(BIT(action), &info->message);
27 	if (old)
28 		return;
29 
30 	min = cpu_logical_map(cpu);
31 	kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
32 }
33 
34 #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)
35 
36 static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
37 {
38 	int i, cpu, min = 0, max = 0, old;
39 	__uint128_t bitmap = 0;
40 	irq_cpustat_t *info;
41 
42 	if (cpumask_empty(mask))
43 		return;
44 
45 	action = BIT(action);
46 	for_each_cpu(i, mask) {
47 		info = &per_cpu(irq_stat, i);
48 		old = atomic_fetch_or(action, &info->message);
49 		if (old)
50 			continue;
51 
52 		cpu = cpu_logical_map(i);
53 		if (!bitmap) {
54 			min = max = cpu;
55 		} else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
56 			/* cpu < min, and bitmap still enough */
57 			bitmap <<= min - cpu;
58 			min = cpu;
59 		} else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
60 			/* cpu > min, and bitmap still enough */
61 			max = cpu > max ? cpu : max;
62 		} else {
63 			/*
64 			 * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
65 			 * send IPI here directly and skip the remaining CPUs.
66 			 */
67 			kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
68 				      (unsigned long)(bitmap >> BITS_PER_LONG), min);
69 			min = max = cpu;
70 			bitmap = 0;
71 		}
72 		__set_bit(cpu - min, (unsigned long *)&bitmap);
73 	}
74 
75 	if (bitmap)
76 		kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
77 			      (unsigned long)(bitmap >> BITS_PER_LONG), min);
78 }
79 
80 static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
81 {
82 	u32 action;
83 	irq_cpustat_t *info;
84 
85 	/* Clear SWI interrupt */
86 	clear_csr_estat(1 << INT_SWI0);
87 	info = this_cpu_ptr(&irq_stat);
88 	action = atomic_xchg(&info->message, 0);
89 
90 	if (action & SMP_RESCHEDULE) {
91 		scheduler_ipi();
92 		info->ipi_irqs[IPI_RESCHEDULE]++;
93 	}
94 
95 	if (action & SMP_CALL_FUNCTION) {
96 		generic_smp_call_function_interrupt();
97 		info->ipi_irqs[IPI_CALL_FUNCTION]++;
98 	}
99 
100 	return IRQ_HANDLED;
101 }
102 
103 static void pv_init_ipi(void)
104 {
105 	int r, swi;
106 
107 	swi = get_percpu_irq(INT_SWI0);
108 	if (swi < 0)
109 		panic("SWI0 IRQ mapping failed\n");
110 	irq_set_percpu_devid(swi);
111 	r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
112 	if (r < 0)
113 		panic("SWI0 IRQ request failed\n");
114 }
115 #endif
116 
117 static bool kvm_para_available(void)
118 {
119 	int config;
120 	static int hypervisor_type;
121 
122 	if (!hypervisor_type) {
123 		config = read_cpucfg(CPUCFG_KVM_SIG);
124 		if (!memcmp(&config, KVM_SIGNATURE, 4))
125 			hypervisor_type = HYPERVISOR_KVM;
126 	}
127 
128 	return hypervisor_type == HYPERVISOR_KVM;
129 }
130 
131 int __init pv_ipi_init(void)
132 {
133 	int feature;
134 
135 	if (!cpu_has_hypervisor)
136 		return 0;
137 	if (!kvm_para_available())
138 		return 0;
139 
140 	feature = read_cpucfg(CPUCFG_KVM_FEATURE);
141 	if (!(feature & KVM_FEATURE_IPI))
142 		return 0;
143 
144 #ifdef CONFIG_SMP
145 	mp_ops.init_ipi		= pv_init_ipi;
146 	mp_ops.send_ipi_single	= pv_send_ipi_single;
147 	mp_ops.send_ipi_mask	= pv_send_ipi_mask;
148 #endif
149 
150 	return 0;
151 }
152