1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/kvm_host.h>
9 #include <asm/cacheflush.h>
10 #include <asm/cpufeature.h>
11 #include <asm/kvm_csr.h>
12 #include <asm/kvm_eiointc.h>
13 #include <asm/kvm_pch_pic.h>
14 #include "trace.h"
15
16 unsigned long vpid_mask;
17 struct kvm_world_switch *kvm_loongarch_ops;
18 static int gcsr_flag[CSR_MAX_NUMS];
19 static struct kvm_context __percpu *vmcs;
20
get_gcsr_flag(int csr)21 int get_gcsr_flag(int csr)
22 {
23 if (csr < CSR_MAX_NUMS)
24 return gcsr_flag[csr];
25
26 return INVALID_GCSR;
27 }
28
set_gcsr_sw_flag(int csr)29 static inline void set_gcsr_sw_flag(int csr)
30 {
31 if (csr < CSR_MAX_NUMS)
32 gcsr_flag[csr] |= SW_GCSR;
33 }
34
set_gcsr_hw_flag(int csr)35 static inline void set_gcsr_hw_flag(int csr)
36 {
37 if (csr < CSR_MAX_NUMS)
38 gcsr_flag[csr] |= HW_GCSR;
39 }
40
41 /*
42 * The default value of gcsr_flag[CSR] is 0, and we use this
43 * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the
44 * gcsr is software or hardware. It will be used by get/set_gcsr,
45 * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it,
46 * else use software csr to emulate it.
47 */
kvm_init_gcsr_flag(void)48 static void kvm_init_gcsr_flag(void)
49 {
50 set_gcsr_hw_flag(LOONGARCH_CSR_CRMD);
51 set_gcsr_hw_flag(LOONGARCH_CSR_PRMD);
52 set_gcsr_hw_flag(LOONGARCH_CSR_EUEN);
53 set_gcsr_hw_flag(LOONGARCH_CSR_MISC);
54 set_gcsr_hw_flag(LOONGARCH_CSR_ECFG);
55 set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT);
56 set_gcsr_hw_flag(LOONGARCH_CSR_ERA);
57 set_gcsr_hw_flag(LOONGARCH_CSR_BADV);
58 set_gcsr_hw_flag(LOONGARCH_CSR_BADI);
59 set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY);
60 set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX);
61 set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI);
62 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0);
63 set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1);
64 set_gcsr_hw_flag(LOONGARCH_CSR_ASID);
65 set_gcsr_hw_flag(LOONGARCH_CSR_PGDL);
66 set_gcsr_hw_flag(LOONGARCH_CSR_PGDH);
67 set_gcsr_hw_flag(LOONGARCH_CSR_PGD);
68 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0);
69 set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1);
70 set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE);
71 set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG);
72 set_gcsr_hw_flag(LOONGARCH_CSR_CPUID);
73 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1);
74 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2);
75 set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3);
76 set_gcsr_hw_flag(LOONGARCH_CSR_KS0);
77 set_gcsr_hw_flag(LOONGARCH_CSR_KS1);
78 set_gcsr_hw_flag(LOONGARCH_CSR_KS2);
79 set_gcsr_hw_flag(LOONGARCH_CSR_KS3);
80 set_gcsr_hw_flag(LOONGARCH_CSR_KS4);
81 set_gcsr_hw_flag(LOONGARCH_CSR_KS5);
82 set_gcsr_hw_flag(LOONGARCH_CSR_KS6);
83 set_gcsr_hw_flag(LOONGARCH_CSR_KS7);
84 set_gcsr_hw_flag(LOONGARCH_CSR_TMID);
85 set_gcsr_hw_flag(LOONGARCH_CSR_TCFG);
86 set_gcsr_hw_flag(LOONGARCH_CSR_TVAL);
87 set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR);
88 set_gcsr_hw_flag(LOONGARCH_CSR_CNTC);
89 set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL);
90 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY);
91 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV);
92 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA);
93 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE);
94 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0);
95 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1);
96 set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI);
97 set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD);
98 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0);
99 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1);
100 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2);
101 set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3);
102
103 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1);
104 set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2);
105 set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL);
106 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1);
107 set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2);
108 set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY);
109 set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA);
110 set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE);
111 set_gcsr_sw_flag(LOONGARCH_CSR_CTAG);
112 set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG);
113 set_gcsr_sw_flag(LOONGARCH_CSR_DERA);
114 set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE);
115
116 set_gcsr_sw_flag(LOONGARCH_CSR_FWPC);
117 set_gcsr_sw_flag(LOONGARCH_CSR_FWPS);
118 set_gcsr_sw_flag(LOONGARCH_CSR_MWPC);
119 set_gcsr_sw_flag(LOONGARCH_CSR_MWPS);
120
121 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR);
122 set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK);
123 set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL);
124 set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID);
125 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR);
126 set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK);
127 set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL);
128 set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID);
129 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR);
130 set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK);
131 set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL);
132 set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID);
133 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR);
134 set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK);
135 set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL);
136 set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID);
137 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR);
138 set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK);
139 set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL);
140 set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID);
141 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR);
142 set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK);
143 set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL);
144 set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID);
145 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR);
146 set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK);
147 set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL);
148 set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID);
149 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR);
150 set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK);
151 set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL);
152 set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID);
153
154 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR);
155 set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK);
156 set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL);
157 set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID);
158 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR);
159 set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK);
160 set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL);
161 set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID);
162 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR);
163 set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK);
164 set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL);
165 set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID);
166 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR);
167 set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK);
168 set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL);
169 set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID);
170 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR);
171 set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK);
172 set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL);
173 set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID);
174 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR);
175 set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK);
176 set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL);
177 set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID);
178 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR);
179 set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK);
180 set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL);
181 set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID);
182 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR);
183 set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK);
184 set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL);
185 set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID);
186
187 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0);
188 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0);
189 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1);
190 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1);
191 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2);
192 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2);
193 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3);
194 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3);
195 }
196
kvm_update_vpid(struct kvm_vcpu * vcpu,int cpu)197 static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu)
198 {
199 unsigned long vpid;
200 struct kvm_context *context;
201
202 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
203 vpid = context->vpid_cache + 1;
204 if (!(vpid & vpid_mask)) {
205 /* finish round of vpid loop */
206 if (unlikely(!vpid))
207 vpid = vpid_mask + 1;
208
209 ++vpid; /* vpid 0 reserved for root */
210
211 /* start new vpid cycle */
212 kvm_flush_tlb_all();
213 }
214
215 context->vpid_cache = vpid;
216 vcpu->arch.vpid = vpid;
217 }
218
kvm_check_vpid(struct kvm_vcpu * vcpu)219 void kvm_check_vpid(struct kvm_vcpu *vcpu)
220 {
221 int cpu;
222 bool migrated;
223 unsigned long ver, old, vpid;
224 struct kvm_context *context;
225
226 cpu = smp_processor_id();
227 /*
228 * Are we entering guest context on a different CPU to last time?
229 * If so, the vCPU's guest TLB state on this CPU may be stale.
230 */
231 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
232 migrated = (vcpu->cpu != cpu);
233
234 /*
235 * Check if our vpid is of an older version
236 *
237 * We also discard the stored vpid if we've executed on
238 * another CPU, as the guest mappings may have changed without
239 * hypervisor knowledge.
240 */
241 ver = vcpu->arch.vpid & ~vpid_mask;
242 old = context->vpid_cache & ~vpid_mask;
243 if (migrated || (ver != old)) {
244 kvm_update_vpid(vcpu, cpu);
245 trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
246 vcpu->cpu = cpu;
247 kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
248
249 /*
250 * LLBCTL is a separated guest CSR register from host, a general
251 * exception ERET instruction clears the host LLBCTL register in
252 * host mode, and clears the guest LLBCTL register in guest mode.
253 * ERET in tlb refill exception does not clear LLBCTL register.
254 *
255 * When secondary mmu mapping is changed, guest OS does not know
256 * even if the content is changed after mapping is changed.
257 *
258 * Here clear WCLLB of the guest LLBCTL register when mapping is
259 * changed. Otherwise, if mmu mapping is changed while guest is
260 * executing LL/SC pair, LL loads with the old address and set
261 * the LLBCTL flag, SC checks the LLBCTL flag and will store the
262 * new address successfully since LLBCTL_WCLLB is on, even if
263 * memory with new address is changed on other VCPUs.
264 */
265 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
266 }
267
268 /* Restore GSTAT(0x50).vpid */
269 vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT;
270 change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid);
271 }
272
kvm_init_vmcs(struct kvm * kvm)273 void kvm_init_vmcs(struct kvm *kvm)
274 {
275 kvm->arch.vmcs = vmcs;
276 }
277
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)278 long kvm_arch_dev_ioctl(struct file *filp,
279 unsigned int ioctl, unsigned long arg)
280 {
281 return -ENOIOCTLCMD;
282 }
283
kvm_arch_enable_virtualization_cpu(void)284 int kvm_arch_enable_virtualization_cpu(void)
285 {
286 unsigned long env, gcfg = 0;
287
288 env = read_csr_gcfg();
289
290 /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */
291 write_csr_gcfg(0);
292 write_csr_gstat(0);
293 write_csr_gintc(0);
294 clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI);
295
296 /*
297 * Enable virtualization features granting guest direct control of
298 * certain features:
299 * GCI=2: Trap on init or unimplement cache instruction.
300 * TORU=0: Trap on Root Unimplement.
301 * CACTRL=1: Root control cache.
302 * TOP=0: Trap on Previlege.
303 * TOE=0: Trap on Exception.
304 * TIT=0: Trap on Timer.
305 */
306 if (env & CSR_GCFG_GCIP_SECURE)
307 gcfg |= CSR_GCFG_GCI_SECURE;
308 if (env & CSR_GCFG_MATP_ROOT)
309 gcfg |= CSR_GCFG_MATC_ROOT;
310
311 write_csr_gcfg(gcfg);
312
313 kvm_flush_tlb_all();
314
315 /* Enable using TGID */
316 set_csr_gtlbc(CSR_GTLBC_USETGID);
317 kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
318 read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
319
320 /*
321 * HW Guest CSR registers are lost after CPU suspend and resume.
322 * Clear last_vcpu so that Guest CSR registers forced to reload
323 * from vCPU SW state.
324 */
325 this_cpu_ptr(vmcs)->last_vcpu = NULL;
326
327 return 0;
328 }
329
kvm_arch_disable_virtualization_cpu(void)330 void kvm_arch_disable_virtualization_cpu(void)
331 {
332 write_csr_gcfg(0);
333 write_csr_gstat(0);
334 write_csr_gintc(0);
335 clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI);
336
337 /* Flush any remaining guest TLB entries */
338 kvm_flush_tlb_all();
339 }
340
kvm_loongarch_env_init(void)341 static int kvm_loongarch_env_init(void)
342 {
343 int cpu, order, ret;
344 void *addr;
345 struct kvm_context *context;
346
347 vmcs = alloc_percpu(struct kvm_context);
348 if (!vmcs) {
349 pr_err("kvm: failed to allocate percpu kvm_context\n");
350 return -ENOMEM;
351 }
352
353 kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL);
354 if (!kvm_loongarch_ops) {
355 free_percpu(vmcs);
356 vmcs = NULL;
357 return -ENOMEM;
358 }
359
360 /*
361 * PGD register is shared between root kernel and kvm hypervisor.
362 * So world switch entry should be in DMW area rather than TLB area
363 * to avoid page fault reenter.
364 *
365 * In future if hardware pagetable walking is supported, we won't
366 * need to copy world switch code to DMW area.
367 */
368 order = get_order(kvm_exception_size + kvm_enter_guest_size);
369 addr = (void *)__get_free_pages(GFP_KERNEL, order);
370 if (!addr) {
371 free_percpu(vmcs);
372 vmcs = NULL;
373 kfree(kvm_loongarch_ops);
374 kvm_loongarch_ops = NULL;
375 return -ENOMEM;
376 }
377
378 memcpy(addr, kvm_exc_entry, kvm_exception_size);
379 memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size);
380 flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size);
381 kvm_loongarch_ops->exc_entry = addr;
382 kvm_loongarch_ops->enter_guest = addr + kvm_exception_size;
383 kvm_loongarch_ops->page_order = order;
384
385 vpid_mask = read_csr_gstat();
386 vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
387 if (vpid_mask)
388 vpid_mask = GENMASK(vpid_mask - 1, 0);
389
390 for_each_possible_cpu(cpu) {
391 context = per_cpu_ptr(vmcs, cpu);
392 context->vpid_cache = vpid_mask + 1;
393 context->last_vcpu = NULL;
394 }
395
396 kvm_init_gcsr_flag();
397
398 /* Register LoongArch IPI interrupt controller interface. */
399 ret = kvm_loongarch_register_ipi_device();
400 if (ret)
401 return ret;
402
403 /* Register LoongArch EIOINTC interrupt controller interface. */
404 ret = kvm_loongarch_register_eiointc_device();
405 if (ret)
406 return ret;
407
408 /* Register LoongArch PCH-PIC interrupt controller interface. */
409 ret = kvm_loongarch_register_pch_pic_device();
410
411 return ret;
412 }
413
kvm_loongarch_env_exit(void)414 static void kvm_loongarch_env_exit(void)
415 {
416 unsigned long addr;
417
418 if (vmcs)
419 free_percpu(vmcs);
420
421 if (kvm_loongarch_ops) {
422 if (kvm_loongarch_ops->exc_entry) {
423 addr = (unsigned long)kvm_loongarch_ops->exc_entry;
424 free_pages(addr, kvm_loongarch_ops->page_order);
425 }
426 kfree(kvm_loongarch_ops);
427 }
428 }
429
kvm_loongarch_init(void)430 static int kvm_loongarch_init(void)
431 {
432 int r;
433
434 if (!cpu_has_lvz) {
435 kvm_info("Hardware virtualization not available\n");
436 return -ENODEV;
437 }
438 r = kvm_loongarch_env_init();
439 if (r)
440 return r;
441
442 return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
443 }
444
kvm_loongarch_exit(void)445 static void kvm_loongarch_exit(void)
446 {
447 kvm_exit();
448 kvm_loongarch_env_exit();
449 }
450
451 module_init(kvm_loongarch_init);
452 module_exit(kvm_loongarch_exit);
453
454 #ifdef MODULE
455 static const struct cpu_feature kvm_feature[] = {
456 { .feature = cpu_feature(LOONGARCH_LVZ) },
457 {},
458 };
459 MODULE_DEVICE_TABLE(cpu, kvm_feature);
460 #endif
461