xref: /linux/arch/loongarch/kernel/paravirt.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq_work.h>
6 #include <linux/jump_label.h>
7 #include <linux/kvm_para.h>
8 #include <linux/reboot.h>
9 #include <linux/static_call.h>
10 #include <asm/paravirt.h>
11 
12 static int has_steal_clock;
13 struct static_key paravirt_steal_enabled;
14 struct static_key paravirt_steal_rq_enabled;
15 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
16 DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
17 
18 static u64 native_steal_clock(int cpu)
19 {
20 	return 0;
21 }
22 
23 DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
24 
25 static bool steal_acc = true;
26 
27 static int __init parse_no_stealacc(char *arg)
28 {
29 	steal_acc = false;
30 	return 0;
31 }
32 early_param("no-steal-acc", parse_no_stealacc);
33 
34 static u64 paravt_steal_clock(int cpu)
35 {
36 	int version;
37 	u64 steal;
38 	struct kvm_steal_time *src;
39 
40 	src = &per_cpu(steal_time, cpu);
41 	do {
42 
43 		version = src->version;
44 		virt_rmb(); /* Make sure that the version is read before the steal */
45 		steal = src->steal;
46 		virt_rmb(); /* Make sure that the steal is read before the next version */
47 
48 	} while ((version & 1) || (version != src->version));
49 
50 	return steal;
51 }
52 
53 #ifdef CONFIG_SMP
54 static void pv_send_ipi_single(int cpu, unsigned int action)
55 {
56 	int min, old;
57 	irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
58 
59 	old = atomic_fetch_or(BIT(action), &info->message);
60 	if (old)
61 		return;
62 
63 	min = cpu_logical_map(cpu);
64 	kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
65 }
66 
67 #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)
68 
69 static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
70 {
71 	int i, cpu, min = 0, max = 0, old;
72 	__uint128_t bitmap = 0;
73 	irq_cpustat_t *info;
74 
75 	if (cpumask_empty(mask))
76 		return;
77 
78 	action = BIT(action);
79 	for_each_cpu(i, mask) {
80 		info = &per_cpu(irq_stat, i);
81 		old = atomic_fetch_or(action, &info->message);
82 		if (old)
83 			continue;
84 
85 		cpu = cpu_logical_map(i);
86 		if (!bitmap) {
87 			min = max = cpu;
88 		} else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
89 			/* cpu < min, and bitmap still enough */
90 			bitmap <<= min - cpu;
91 			min = cpu;
92 		} else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
93 			/* cpu > min, and bitmap still enough */
94 			max = cpu > max ? cpu : max;
95 		} else {
96 			/*
97 			 * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
98 			 * send IPI here directly and skip the remaining CPUs.
99 			 */
100 			kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
101 				      (unsigned long)(bitmap >> BITS_PER_LONG), min);
102 			min = max = cpu;
103 			bitmap = 0;
104 		}
105 		__set_bit(cpu - min, (unsigned long *)&bitmap);
106 	}
107 
108 	if (bitmap)
109 		kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
110 			      (unsigned long)(bitmap >> BITS_PER_LONG), min);
111 }
112 
113 static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
114 {
115 	u32 action;
116 	irq_cpustat_t *info;
117 
118 	/* Clear SWI interrupt */
119 	clear_csr_estat(1 << INT_SWI0);
120 	info = this_cpu_ptr(&irq_stat);
121 	action = atomic_xchg(&info->message, 0);
122 
123 	if (action & SMP_RESCHEDULE) {
124 		scheduler_ipi();
125 		info->ipi_irqs[IPI_RESCHEDULE]++;
126 	}
127 
128 	if (action & SMP_CALL_FUNCTION) {
129 		generic_smp_call_function_interrupt();
130 		info->ipi_irqs[IPI_CALL_FUNCTION]++;
131 	}
132 
133 	if (action & SMP_IRQ_WORK) {
134 		irq_work_run();
135 		info->ipi_irqs[IPI_IRQ_WORK]++;
136 	}
137 
138 	if (action & SMP_CLEAR_VECTOR) {
139 		complete_irq_moving();
140 		info->ipi_irqs[IPI_CLEAR_VECTOR]++;
141 	}
142 
143 	return IRQ_HANDLED;
144 }
145 
146 static void pv_init_ipi(void)
147 {
148 	int r, swi;
149 
150 	swi = get_percpu_irq(INT_SWI0);
151 	if (swi < 0)
152 		panic("SWI0 IRQ mapping failed\n");
153 	irq_set_percpu_devid(swi);
154 	r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
155 	if (r < 0)
156 		panic("SWI0 IRQ request failed\n");
157 }
158 #endif
159 
160 bool kvm_para_available(void)
161 {
162 	int config;
163 	static int hypervisor_type;
164 
165 	if (!cpu_has_hypervisor)
166 		return false;
167 
168 	if (!hypervisor_type) {
169 		config = read_cpucfg(CPUCFG_KVM_SIG);
170 		if (!memcmp(&config, KVM_SIGNATURE, 4))
171 			hypervisor_type = HYPERVISOR_KVM;
172 	}
173 
174 	return hypervisor_type == HYPERVISOR_KVM;
175 }
176 
177 unsigned int kvm_arch_para_features(void)
178 {
179 	static unsigned int feature;
180 
181 	if (!kvm_para_available())
182 		return 0;
183 
184 	if (!feature)
185 		feature = read_cpucfg(CPUCFG_KVM_FEATURE);
186 
187 	return feature;
188 }
189 
190 int __init pv_ipi_init(void)
191 {
192 	if (!kvm_para_has_feature(KVM_FEATURE_IPI))
193 		return 0;
194 
195 #ifdef CONFIG_SMP
196 	mp_ops.init_ipi		= pv_init_ipi;
197 	mp_ops.send_ipi_single	= pv_send_ipi_single;
198 	mp_ops.send_ipi_mask	= pv_send_ipi_mask;
199 #endif
200 
201 	return 0;
202 }
203 
204 static int pv_enable_steal_time(void)
205 {
206 	int cpu = smp_processor_id();
207 	unsigned long addr;
208 	struct kvm_steal_time *st;
209 
210 	if (!has_steal_clock)
211 		return -EPERM;
212 
213 	st = &per_cpu(steal_time, cpu);
214 	addr = per_cpu_ptr_to_phys(st);
215 
216 	/* The whole structure kvm_steal_time should be in one page */
217 	if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
218 		pr_warn("Illegal PV steal time addr %lx\n", addr);
219 		return -EFAULT;
220 	}
221 
222 	addr |= KVM_STEAL_PHYS_VALID;
223 	kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
224 
225 	return 0;
226 }
227 
228 static void pv_disable_steal_time(void)
229 {
230 	if (has_steal_clock)
231 		kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
232 }
233 
234 #ifdef CONFIG_SMP
235 static int pv_time_cpu_online(unsigned int cpu)
236 {
237 	unsigned long flags;
238 
239 	local_irq_save(flags);
240 	pv_enable_steal_time();
241 	local_irq_restore(flags);
242 
243 	return 0;
244 }
245 
246 static int pv_time_cpu_down_prepare(unsigned int cpu)
247 {
248 	unsigned long flags;
249 
250 	local_irq_save(flags);
251 	pv_disable_steal_time();
252 	local_irq_restore(flags);
253 
254 	return 0;
255 }
256 #endif
257 
258 static void pv_cpu_reboot(void *unused)
259 {
260 	pv_disable_steal_time();
261 }
262 
263 static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
264 {
265 	on_each_cpu(pv_cpu_reboot, NULL, 1);
266 	return NOTIFY_DONE;
267 }
268 
269 static struct notifier_block pv_reboot_nb = {
270 	.notifier_call  = pv_reboot_notify,
271 };
272 
273 int __init pv_time_init(void)
274 {
275 	int r;
276 
277 	if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
278 		return 0;
279 
280 	has_steal_clock = 1;
281 	r = pv_enable_steal_time();
282 	if (r < 0) {
283 		has_steal_clock = 0;
284 		return 0;
285 	}
286 	register_reboot_notifier(&pv_reboot_nb);
287 
288 #ifdef CONFIG_SMP
289 	r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
290 				      "loongarch/pv_time:online",
291 				      pv_time_cpu_online, pv_time_cpu_down_prepare);
292 	if (r < 0) {
293 		has_steal_clock = 0;
294 		pr_err("Failed to install cpu hotplug callbacks\n");
295 		return r;
296 	}
297 #endif
298 
299 	static_call_update(pv_steal_clock, paravt_steal_clock);
300 
301 	static_key_slow_inc(&paravirt_steal_enabled);
302 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
303 	if (steal_acc)
304 		static_key_slow_inc(&paravirt_steal_rq_enabled);
305 #endif
306 
307 	pr_info("Using paravirt steal-time\n");
308 
309 	return 0;
310 }
311 
312 int __init pv_spinlock_init(void)
313 {
314 	if (!cpu_has_hypervisor)
315 		return 0;
316 
317 	static_branch_enable(&virt_spin_lock_key);
318 
319 	return 0;
320 }
321