xref: /linux/arch/loongarch/kernel/paravirt.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq_work.h>
6 #include <linux/jump_label.h>
7 #include <linux/kvm_para.h>
8 #include <linux/reboot.h>
9 #include <linux/static_call.h>
10 #include <asm/paravirt.h>
11 
12 static int has_steal_clock;
13 struct static_key paravirt_steal_enabled;
14 struct static_key paravirt_steal_rq_enabled;
15 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
16 
17 static u64 native_steal_clock(int cpu)
18 {
19 	return 0;
20 }
21 
22 DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
23 
24 static bool steal_acc = true;
25 
26 static int __init parse_no_stealacc(char *arg)
27 {
28 	steal_acc = false;
29 	return 0;
30 }
31 early_param("no-steal-acc", parse_no_stealacc);
32 
33 static u64 paravt_steal_clock(int cpu)
34 {
35 	int version;
36 	u64 steal;
37 	struct kvm_steal_time *src;
38 
39 	src = &per_cpu(steal_time, cpu);
40 	do {
41 
42 		version = src->version;
43 		virt_rmb(); /* Make sure that the version is read before the steal */
44 		steal = src->steal;
45 		virt_rmb(); /* Make sure that the steal is read before the next version */
46 
47 	} while ((version & 1) || (version != src->version));
48 
49 	return steal;
50 }
51 
52 #ifdef CONFIG_SMP
53 static void pv_send_ipi_single(int cpu, unsigned int action)
54 {
55 	int min, old;
56 	irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
57 
58 	old = atomic_fetch_or(BIT(action), &info->message);
59 	if (old)
60 		return;
61 
62 	min = cpu_logical_map(cpu);
63 	kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
64 }
65 
66 #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)
67 
68 static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
69 {
70 	int i, cpu, min = 0, max = 0, old;
71 	__uint128_t bitmap = 0;
72 	irq_cpustat_t *info;
73 
74 	if (cpumask_empty(mask))
75 		return;
76 
77 	action = BIT(action);
78 	for_each_cpu(i, mask) {
79 		info = &per_cpu(irq_stat, i);
80 		old = atomic_fetch_or(action, &info->message);
81 		if (old)
82 			continue;
83 
84 		cpu = cpu_logical_map(i);
85 		if (!bitmap) {
86 			min = max = cpu;
87 		} else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
88 			/* cpu < min, and bitmap still enough */
89 			bitmap <<= min - cpu;
90 			min = cpu;
91 		} else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
92 			/* cpu > min, and bitmap still enough */
93 			max = cpu > max ? cpu : max;
94 		} else {
95 			/*
96 			 * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
97 			 * send IPI here directly and skip the remaining CPUs.
98 			 */
99 			kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
100 				      (unsigned long)(bitmap >> BITS_PER_LONG), min);
101 			min = max = cpu;
102 			bitmap = 0;
103 		}
104 		__set_bit(cpu - min, (unsigned long *)&bitmap);
105 	}
106 
107 	if (bitmap)
108 		kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
109 			      (unsigned long)(bitmap >> BITS_PER_LONG), min);
110 }
111 
112 static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
113 {
114 	u32 action;
115 	irq_cpustat_t *info;
116 
117 	/* Clear SWI interrupt */
118 	clear_csr_estat(1 << INT_SWI0);
119 	info = this_cpu_ptr(&irq_stat);
120 	action = atomic_xchg(&info->message, 0);
121 
122 	if (action & SMP_RESCHEDULE) {
123 		scheduler_ipi();
124 		info->ipi_irqs[IPI_RESCHEDULE]++;
125 	}
126 
127 	if (action & SMP_CALL_FUNCTION) {
128 		generic_smp_call_function_interrupt();
129 		info->ipi_irqs[IPI_CALL_FUNCTION]++;
130 	}
131 
132 	if (action & SMP_IRQ_WORK) {
133 		irq_work_run();
134 		info->ipi_irqs[IPI_IRQ_WORK]++;
135 	}
136 
137 	return IRQ_HANDLED;
138 }
139 
140 static void pv_init_ipi(void)
141 {
142 	int r, swi;
143 
144 	swi = get_percpu_irq(INT_SWI0);
145 	if (swi < 0)
146 		panic("SWI0 IRQ mapping failed\n");
147 	irq_set_percpu_devid(swi);
148 	r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
149 	if (r < 0)
150 		panic("SWI0 IRQ request failed\n");
151 }
152 #endif
153 
154 static bool kvm_para_available(void)
155 {
156 	int config;
157 	static int hypervisor_type;
158 
159 	if (!hypervisor_type) {
160 		config = read_cpucfg(CPUCFG_KVM_SIG);
161 		if (!memcmp(&config, KVM_SIGNATURE, 4))
162 			hypervisor_type = HYPERVISOR_KVM;
163 	}
164 
165 	return hypervisor_type == HYPERVISOR_KVM;
166 }
167 
168 int __init pv_ipi_init(void)
169 {
170 	int feature;
171 
172 	if (!cpu_has_hypervisor)
173 		return 0;
174 	if (!kvm_para_available())
175 		return 0;
176 
177 	feature = read_cpucfg(CPUCFG_KVM_FEATURE);
178 	if (!(feature & KVM_FEATURE_IPI))
179 		return 0;
180 
181 #ifdef CONFIG_SMP
182 	mp_ops.init_ipi		= pv_init_ipi;
183 	mp_ops.send_ipi_single	= pv_send_ipi_single;
184 	mp_ops.send_ipi_mask	= pv_send_ipi_mask;
185 #endif
186 
187 	return 0;
188 }
189 
190 static int pv_enable_steal_time(void)
191 {
192 	int cpu = smp_processor_id();
193 	unsigned long addr;
194 	struct kvm_steal_time *st;
195 
196 	if (!has_steal_clock)
197 		return -EPERM;
198 
199 	st = &per_cpu(steal_time, cpu);
200 	addr = per_cpu_ptr_to_phys(st);
201 
202 	/* The whole structure kvm_steal_time should be in one page */
203 	if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
204 		pr_warn("Illegal PV steal time addr %lx\n", addr);
205 		return -EFAULT;
206 	}
207 
208 	addr |= KVM_STEAL_PHYS_VALID;
209 	kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr);
210 
211 	return 0;
212 }
213 
214 static void pv_disable_steal_time(void)
215 {
216 	if (has_steal_clock)
217 		kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0);
218 }
219 
220 #ifdef CONFIG_SMP
221 static int pv_time_cpu_online(unsigned int cpu)
222 {
223 	unsigned long flags;
224 
225 	local_irq_save(flags);
226 	pv_enable_steal_time();
227 	local_irq_restore(flags);
228 
229 	return 0;
230 }
231 
232 static int pv_time_cpu_down_prepare(unsigned int cpu)
233 {
234 	unsigned long flags;
235 
236 	local_irq_save(flags);
237 	pv_disable_steal_time();
238 	local_irq_restore(flags);
239 
240 	return 0;
241 }
242 #endif
243 
244 static void pv_cpu_reboot(void *unused)
245 {
246 	pv_disable_steal_time();
247 }
248 
249 static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
250 {
251 	on_each_cpu(pv_cpu_reboot, NULL, 1);
252 	return NOTIFY_DONE;
253 }
254 
255 static struct notifier_block pv_reboot_nb = {
256 	.notifier_call  = pv_reboot_notify,
257 };
258 
259 int __init pv_time_init(void)
260 {
261 	int r, feature;
262 
263 	if (!cpu_has_hypervisor)
264 		return 0;
265 	if (!kvm_para_available())
266 		return 0;
267 
268 	feature = read_cpucfg(CPUCFG_KVM_FEATURE);
269 	if (!(feature & KVM_FEATURE_STEAL_TIME))
270 		return 0;
271 
272 	has_steal_clock = 1;
273 	r = pv_enable_steal_time();
274 	if (r < 0) {
275 		has_steal_clock = 0;
276 		return 0;
277 	}
278 	register_reboot_notifier(&pv_reboot_nb);
279 
280 #ifdef CONFIG_SMP
281 	r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
282 				      "loongarch/pv_time:online",
283 				      pv_time_cpu_online, pv_time_cpu_down_prepare);
284 	if (r < 0) {
285 		has_steal_clock = 0;
286 		pr_err("Failed to install cpu hotplug callbacks\n");
287 		return r;
288 	}
289 #endif
290 
291 	static_call_update(pv_steal_clock, paravt_steal_clock);
292 
293 	static_key_slow_inc(&paravirt_steal_enabled);
294 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
295 	if (steal_acc)
296 		static_key_slow_inc(&paravirt_steal_rq_enabled);
297 #endif
298 
299 	pr_info("Using paravirt steal-time\n");
300 
301 	return 0;
302 }
303