xref: /linux/arch/loongarch/kernel/paravirt.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/types.h>
3 #include <linux/interrupt.h>
4 #include <linux/irq_work.h>
5 #include <linux/jump_label.h>
6 #include <linux/kvm_para.h>
7 #include <linux/reboot.h>
8 #include <linux/static_call.h>
9 #include <linux/sched/cputime.h>
10 #include <asm/paravirt.h>
11 
12 static int has_steal_clock;
13 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
14 static DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
15 DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
16 
17 static bool steal_acc = true;
18 
19 static int __init parse_no_stealacc(char *arg)
20 {
21 	steal_acc = false;
22 	return 0;
23 }
24 early_param("no-steal-acc", parse_no_stealacc);
25 
26 static u64 paravt_steal_clock(int cpu)
27 {
28 	int version;
29 	u64 steal;
30 	struct kvm_steal_time *src;
31 
32 	src = &per_cpu(steal_time, cpu);
33 	do {
34 
35 		version = src->version;
36 		virt_rmb(); /* Make sure that the version is read before the steal */
37 		steal = src->steal;
38 		virt_rmb(); /* Make sure that the steal is read before the next version */
39 
40 	} while ((version & 1) || (version != src->version));
41 
42 	return steal;
43 }
44 
45 #ifdef CONFIG_SMP
46 static struct smp_ops native_ops;
47 
48 static void pv_send_ipi_single(int cpu, unsigned int action)
49 {
50 	int min, old;
51 	irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
52 
53 	if (unlikely(action == ACTION_BOOT_CPU)) {
54 		native_ops.send_ipi_single(cpu, action);
55 		return;
56 	}
57 
58 	old = atomic_fetch_or(BIT(action), &info->message);
59 	if (old)
60 		return;
61 
62 	min = cpu_logical_map(cpu);
63 	kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
64 }
65 
66 #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)
67 
68 static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
69 {
70 	int i, cpu, min = 0, max = 0, old;
71 	__uint128_t bitmap = 0;
72 	irq_cpustat_t *info;
73 
74 	if (cpumask_empty(mask))
75 		return;
76 
77 	if (unlikely(action == ACTION_BOOT_CPU)) {
78 		native_ops.send_ipi_mask(mask, action);
79 		return;
80 	}
81 
82 	action = BIT(action);
83 	for_each_cpu(i, mask) {
84 		info = &per_cpu(irq_stat, i);
85 		old = atomic_fetch_or(action, &info->message);
86 		if (old)
87 			continue;
88 
89 		cpu = cpu_logical_map(i);
90 		if (!bitmap) {
91 			min = max = cpu;
92 		} else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
93 			/* cpu < min, and bitmap still enough */
94 			bitmap <<= min - cpu;
95 			min = cpu;
96 		} else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
97 			/* cpu > min, and bitmap still enough */
98 			max = cpu > max ? cpu : max;
99 		} else {
100 			/*
101 			 * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
102 			 * send IPI here directly and skip the remaining CPUs.
103 			 */
104 			kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
105 				      (unsigned long)(bitmap >> BITS_PER_LONG), min);
106 			min = max = cpu;
107 			bitmap = 0;
108 		}
109 		__set_bit(cpu - min, (unsigned long *)&bitmap);
110 	}
111 
112 	if (bitmap)
113 		kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
114 			      (unsigned long)(bitmap >> BITS_PER_LONG), min);
115 }
116 
117 static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
118 {
119 	u32 action;
120 	irq_cpustat_t *info;
121 
122 	/* Clear SWI interrupt */
123 	clear_csr_estat(1 << INT_SWI0);
124 	info = this_cpu_ptr(&irq_stat);
125 	action = atomic_xchg(&info->message, 0);
126 
127 	if (action & SMP_RESCHEDULE) {
128 		scheduler_ipi();
129 		info->ipi_irqs[IPI_RESCHEDULE]++;
130 	}
131 
132 	if (action & SMP_CALL_FUNCTION) {
133 		generic_smp_call_function_interrupt();
134 		info->ipi_irqs[IPI_CALL_FUNCTION]++;
135 	}
136 
137 	if (action & SMP_IRQ_WORK) {
138 		irq_work_run();
139 		info->ipi_irqs[IPI_IRQ_WORK]++;
140 	}
141 
142 	if (action & SMP_CLEAR_VECTOR) {
143 		complete_irq_moving();
144 		info->ipi_irqs[IPI_CLEAR_VECTOR]++;
145 	}
146 
147 	return IRQ_HANDLED;
148 }
149 
150 static void pv_init_ipi(void)
151 {
152 	int r, swi;
153 
154 	/* Init native ipi irq for ACTION_BOOT_CPU */
155 	native_ops.init_ipi();
156 	swi = get_percpu_irq(INT_SWI0);
157 	if (swi < 0)
158 		panic("SWI0 IRQ mapping failed\n");
159 	irq_set_percpu_devid(swi);
160 	r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
161 	if (r < 0)
162 		panic("SWI0 IRQ request failed\n");
163 }
164 #endif
165 
166 bool kvm_para_available(void)
167 {
168 	int config;
169 	static int hypervisor_type;
170 
171 	if (!cpu_has_hypervisor)
172 		return false;
173 
174 	if (!hypervisor_type) {
175 		config = read_cpucfg(CPUCFG_KVM_SIG);
176 		if (!memcmp(&config, KVM_SIGNATURE, 4))
177 			hypervisor_type = HYPERVISOR_KVM;
178 	}
179 
180 	return hypervisor_type == HYPERVISOR_KVM;
181 }
182 
183 unsigned int kvm_arch_para_features(void)
184 {
185 	static unsigned int feature;
186 
187 	if (!kvm_para_available())
188 		return 0;
189 
190 	if (!feature)
191 		feature = read_cpucfg(CPUCFG_KVM_FEATURE);
192 
193 	return feature;
194 }
195 
196 int __init pv_ipi_init(void)
197 {
198 	if (!kvm_para_has_feature(KVM_FEATURE_IPI))
199 		return 0;
200 
201 #ifdef CONFIG_SMP
202 	native_ops		= mp_ops;
203 	mp_ops.init_ipi		= pv_init_ipi;
204 	mp_ops.send_ipi_single	= pv_send_ipi_single;
205 	mp_ops.send_ipi_mask	= pv_send_ipi_mask;
206 #endif
207 
208 	return 0;
209 }
210 
211 static int pv_enable_steal_time(void)
212 {
213 	int cpu = smp_processor_id();
214 	unsigned long addr;
215 	struct kvm_steal_time *st;
216 
217 	if (!has_steal_clock)
218 		return -EPERM;
219 
220 	st = &per_cpu(steal_time, cpu);
221 	addr = per_cpu_ptr_to_phys(st);
222 
223 	/* The whole structure kvm_steal_time should be in one page */
224 	if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
225 		pr_warn("Illegal PV steal time addr %lx\n", addr);
226 		return -EFAULT;
227 	}
228 
229 	addr |= KVM_STEAL_PHYS_VALID;
230 	kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
231 
232 	return 0;
233 }
234 
235 static void pv_disable_steal_time(void)
236 {
237 	if (has_steal_clock)
238 		kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
239 }
240 
241 #ifdef CONFIG_SMP
242 static int pv_time_cpu_online(unsigned int cpu)
243 {
244 	unsigned long flags;
245 
246 	local_irq_save(flags);
247 	pv_enable_steal_time();
248 	local_irq_restore(flags);
249 
250 	return 0;
251 }
252 
253 static int pv_time_cpu_down_prepare(unsigned int cpu)
254 {
255 	unsigned long flags;
256 
257 	local_irq_save(flags);
258 	pv_disable_steal_time();
259 	local_irq_restore(flags);
260 
261 	return 0;
262 }
263 
264 bool vcpu_is_preempted(int cpu)
265 {
266 	struct kvm_steal_time *src;
267 
268 	if (!static_branch_unlikely(&virt_preempt_key))
269 		return false;
270 
271 	src = &per_cpu(steal_time, cpu);
272 	return !!(src->preempted & KVM_VCPU_PREEMPTED);
273 }
274 EXPORT_SYMBOL(vcpu_is_preempted);
275 #endif
276 
277 static void pv_cpu_reboot(void *unused)
278 {
279 	pv_disable_steal_time();
280 }
281 
282 static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
283 {
284 	on_each_cpu(pv_cpu_reboot, NULL, 1);
285 	return NOTIFY_DONE;
286 }
287 
288 static struct notifier_block pv_reboot_nb = {
289 	.notifier_call  = pv_reboot_notify,
290 };
291 
292 int __init pv_time_init(void)
293 {
294 	int r;
295 
296 	if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
297 		return 0;
298 
299 	has_steal_clock = 1;
300 	r = pv_enable_steal_time();
301 	if (r < 0) {
302 		has_steal_clock = 0;
303 		return 0;
304 	}
305 	register_reboot_notifier(&pv_reboot_nb);
306 
307 #ifdef CONFIG_SMP
308 	r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
309 				      "loongarch/pv_time:online",
310 				      pv_time_cpu_online, pv_time_cpu_down_prepare);
311 	if (r < 0) {
312 		has_steal_clock = 0;
313 		pr_err("Failed to install cpu hotplug callbacks\n");
314 		return r;
315 	}
316 
317 	if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
318 		static_branch_enable(&virt_preempt_key);
319 #endif
320 
321 	static_call_update(pv_steal_clock, paravt_steal_clock);
322 
323 	static_key_slow_inc(&paravirt_steal_enabled);
324 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
325 	if (steal_acc)
326 		static_key_slow_inc(&paravirt_steal_rq_enabled);
327 #endif
328 
329 	if (static_key_enabled(&virt_preempt_key))
330 		pr_info("Using paravirt steal-time with preempt enabled\n");
331 	else
332 		pr_info("Using paravirt steal-time with preempt disabled\n");
333 
334 	return 0;
335 }
336 
337 int __init pv_spinlock_init(void)
338 {
339 	if (!cpu_has_hypervisor)
340 		return 0;
341 
342 	static_branch_enable(&virt_spin_lock_key);
343 
344 	return 0;
345 }
346