xref: /linux/arch/riscv/kvm/main.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/module.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/kvm_nacl.h>
16 #include <asm/sbi.h>
17 
18 long kvm_arch_dev_ioctl(struct file *filp,
19 			unsigned int ioctl, unsigned long arg)
20 {
21 	return -EINVAL;
22 }
23 
24 int kvm_arch_enable_virtualization_cpu(void)
25 {
26 	int rc;
27 
28 	rc = kvm_riscv_nacl_enable();
29 	if (rc)
30 		return rc;
31 
32 	csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT);
33 	csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT);
34 
35 	/* VS should access only the time counter directly. Everything else should trap */
36 	csr_write(CSR_HCOUNTEREN, 0x02);
37 
38 	csr_write(CSR_HVIP, 0);
39 
40 	kvm_riscv_aia_enable();
41 
42 	return 0;
43 }
44 
45 void kvm_arch_disable_virtualization_cpu(void)
46 {
47 	kvm_riscv_aia_disable();
48 
49 	/*
50 	 * After clearing the hideleg CSR, the host kernel will receive
51 	 * spurious interrupts if hvip CSR has pending interrupts and the
52 	 * corresponding enable bits in vsie CSR are asserted. To avoid it,
53 	 * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
54 	 */
55 	csr_write(CSR_VSIE, 0);
56 	csr_write(CSR_HVIP, 0);
57 	csr_write(CSR_HEDELEG, 0);
58 	csr_write(CSR_HIDELEG, 0);
59 
60 	kvm_riscv_nacl_disable();
61 }
62 
63 static void kvm_riscv_teardown(void)
64 {
65 	kvm_riscv_aia_exit();
66 	kvm_riscv_nacl_exit();
67 	kvm_unregister_perf_callbacks();
68 }
69 
70 static int __init riscv_kvm_init(void)
71 {
72 	int rc;
73 	char slist[64];
74 	const char *str;
75 
76 	if (!riscv_isa_extension_available(NULL, h)) {
77 		kvm_info("hypervisor extension not available\n");
78 		return -ENODEV;
79 	}
80 
81 	if (sbi_spec_is_0_1()) {
82 		kvm_info("require SBI v0.2 or higher\n");
83 		return -ENODEV;
84 	}
85 
86 	if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
87 		kvm_info("require SBI RFENCE extension\n");
88 		return -ENODEV;
89 	}
90 
91 	rc = kvm_riscv_nacl_init();
92 	if (rc && rc != -ENODEV)
93 		return rc;
94 
95 	kvm_riscv_gstage_mode_detect();
96 	switch (kvm_riscv_gstage_mode) {
97 	case HGATP_MODE_SV32X4:
98 		str = "Sv32x4";
99 		break;
100 	case HGATP_MODE_SV39X4:
101 		str = "Sv39x4";
102 		break;
103 	case HGATP_MODE_SV48X4:
104 		str = "Sv48x4";
105 		break;
106 	case HGATP_MODE_SV57X4:
107 		str = "Sv57x4";
108 		break;
109 	default:
110 		kvm_riscv_nacl_exit();
111 		return -ENODEV;
112 	}
113 
114 	kvm_riscv_gstage_vmid_detect();
115 
116 	rc = kvm_riscv_aia_init();
117 	if (rc && rc != -ENODEV) {
118 		kvm_riscv_nacl_exit();
119 		return rc;
120 	}
121 
122 	kvm_info("hypervisor extension available\n");
123 
124 	if (kvm_riscv_nacl_available()) {
125 		rc = 0;
126 		slist[0] = '\0';
127 		if (kvm_riscv_nacl_sync_csr_available()) {
128 			if (rc)
129 				strcat(slist, ", ");
130 			strcat(slist, "sync_csr");
131 			rc++;
132 		}
133 		if (kvm_riscv_nacl_sync_hfence_available()) {
134 			if (rc)
135 				strcat(slist, ", ");
136 			strcat(slist, "sync_hfence");
137 			rc++;
138 		}
139 		if (kvm_riscv_nacl_sync_sret_available()) {
140 			if (rc)
141 				strcat(slist, ", ");
142 			strcat(slist, "sync_sret");
143 			rc++;
144 		}
145 		if (kvm_riscv_nacl_autoswap_csr_available()) {
146 			if (rc)
147 				strcat(slist, ", ");
148 			strcat(slist, "autoswap_csr");
149 			rc++;
150 		}
151 		kvm_info("using SBI nested acceleration with %s\n",
152 			 (rc) ? slist : "no features");
153 	}
154 
155 	kvm_info("using %s G-stage page table format\n", str);
156 
157 	kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
158 
159 	if (kvm_riscv_aia_available())
160 		kvm_info("AIA available with %d guest external interrupts\n",
161 			 kvm_riscv_aia_nr_hgei);
162 
163 	kvm_register_perf_callbacks(NULL);
164 
165 	rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
166 	if (rc) {
167 		kvm_riscv_teardown();
168 		return rc;
169 	}
170 
171 	return 0;
172 }
173 module_init(riscv_kvm_init);
174 
175 static void __exit riscv_kvm_exit(void)
176 {
177 	kvm_exit();
178 
179 	kvm_riscv_teardown();
180 }
181 module_exit(riscv_kvm_exit);
182