1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
4 */
5
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
9 #include <linux/err.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
14 #include <asm/csr.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn-def.h>
17
18 #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
19
kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,gpa_t gpa,gpa_t gpsz,unsigned long order)20 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
21 gpa_t gpa, gpa_t gpsz,
22 unsigned long order)
23 {
24 gpa_t pos;
25
26 if (PTRS_PER_PTE < (gpsz >> order)) {
27 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
28 return;
29 }
30
31 if (has_svinval()) {
32 asm volatile (SFENCE_W_INVAL() ::: "memory");
33 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
34 asm volatile (HINVAL_GVMA(%0, %1)
35 : : "r" (pos >> 2), "r" (vmid) : "memory");
36 asm volatile (SFENCE_INVAL_IR() ::: "memory");
37 } else {
38 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
39 asm volatile (HFENCE_GVMA(%0, %1)
40 : : "r" (pos >> 2), "r" (vmid) : "memory");
41 }
42 }
43
kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)44 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
45 {
46 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
47 }
48
kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa,gpa_t gpsz,unsigned long order)49 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
50 unsigned long order)
51 {
52 gpa_t pos;
53
54 if (PTRS_PER_PTE < (gpsz >> order)) {
55 kvm_riscv_local_hfence_gvma_all();
56 return;
57 }
58
59 if (has_svinval()) {
60 asm volatile (SFENCE_W_INVAL() ::: "memory");
61 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
62 asm volatile(HINVAL_GVMA(%0, zero)
63 : : "r" (pos >> 2) : "memory");
64 asm volatile (SFENCE_INVAL_IR() ::: "memory");
65 } else {
66 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
67 asm volatile(HFENCE_GVMA(%0, zero)
68 : : "r" (pos >> 2) : "memory");
69 }
70 }
71
kvm_riscv_local_hfence_gvma_all(void)72 void kvm_riscv_local_hfence_gvma_all(void)
73 {
74 asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
75 }
76
kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,unsigned long asid,unsigned long gva,unsigned long gvsz,unsigned long order)77 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
78 unsigned long asid,
79 unsigned long gva,
80 unsigned long gvsz,
81 unsigned long order)
82 {
83 unsigned long pos, hgatp;
84
85 if (PTRS_PER_PTE < (gvsz >> order)) {
86 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
87 return;
88 }
89
90 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
91
92 if (has_svinval()) {
93 asm volatile (SFENCE_W_INVAL() ::: "memory");
94 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
95 asm volatile(HINVAL_VVMA(%0, %1)
96 : : "r" (pos), "r" (asid) : "memory");
97 asm volatile (SFENCE_INVAL_IR() ::: "memory");
98 } else {
99 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
100 asm volatile(HFENCE_VVMA(%0, %1)
101 : : "r" (pos), "r" (asid) : "memory");
102 }
103
104 csr_write(CSR_HGATP, hgatp);
105 }
106
kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,unsigned long asid)107 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
108 unsigned long asid)
109 {
110 unsigned long hgatp;
111
112 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
113
114 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
115
116 csr_write(CSR_HGATP, hgatp);
117 }
118
kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,unsigned long gva,unsigned long gvsz,unsigned long order)119 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
120 unsigned long gva, unsigned long gvsz,
121 unsigned long order)
122 {
123 unsigned long pos, hgatp;
124
125 if (PTRS_PER_PTE < (gvsz >> order)) {
126 kvm_riscv_local_hfence_vvma_all(vmid);
127 return;
128 }
129
130 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
131
132 if (has_svinval()) {
133 asm volatile (SFENCE_W_INVAL() ::: "memory");
134 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
135 asm volatile(HINVAL_VVMA(%0, zero)
136 : : "r" (pos) : "memory");
137 asm volatile (SFENCE_INVAL_IR() ::: "memory");
138 } else {
139 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
140 asm volatile(HFENCE_VVMA(%0, zero)
141 : : "r" (pos) : "memory");
142 }
143
144 csr_write(CSR_HGATP, hgatp);
145 }
146
kvm_riscv_local_hfence_vvma_all(unsigned long vmid)147 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
148 {
149 unsigned long hgatp;
150
151 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
152
153 asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
154
155 csr_write(CSR_HGATP, hgatp);
156 }
157
kvm_riscv_local_tlb_sanitize(struct kvm_vcpu * vcpu)158 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
159 {
160 unsigned long vmid;
161
162 if (!kvm_riscv_gstage_vmid_bits() ||
163 vcpu->arch.last_exit_cpu == vcpu->cpu)
164 return;
165
166 /*
167 * On RISC-V platforms with hardware VMID support, we share same
168 * VMID for all VCPUs of a particular Guest/VM. This means we might
169 * have stale G-stage TLB entries on the current Host CPU due to
170 * some other VCPU of the same Guest which ran previously on the
171 * current Host CPU.
172 *
173 * To cleanup stale TLB entries, we simply flush all G-stage TLB
174 * entries by VMID whenever underlying Host CPU changes for a VCPU.
175 */
176
177 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
178 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
179 }
180
kvm_riscv_fence_i_process(struct kvm_vcpu * vcpu)181 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
182 {
183 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
184 local_flush_icache_all();
185 }
186
kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu * vcpu)187 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
188 {
189 struct kvm_vmid *vmid;
190
191 vmid = &vcpu->kvm->arch.vmid;
192 kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
193 }
194
kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu * vcpu)195 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
196 {
197 struct kvm_vmid *vmid;
198
199 vmid = &vcpu->kvm->arch.vmid;
200 kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
201 }
202
vcpu_hfence_dequeue(struct kvm_vcpu * vcpu,struct kvm_riscv_hfence * out_data)203 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
204 struct kvm_riscv_hfence *out_data)
205 {
206 bool ret = false;
207 struct kvm_vcpu_arch *varch = &vcpu->arch;
208
209 spin_lock(&varch->hfence_lock);
210
211 if (varch->hfence_queue[varch->hfence_head].type) {
212 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
213 sizeof(*out_data));
214 varch->hfence_queue[varch->hfence_head].type = 0;
215
216 varch->hfence_head++;
217 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
218 varch->hfence_head = 0;
219
220 ret = true;
221 }
222
223 spin_unlock(&varch->hfence_lock);
224
225 return ret;
226 }
227
vcpu_hfence_enqueue(struct kvm_vcpu * vcpu,const struct kvm_riscv_hfence * data)228 static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
229 const struct kvm_riscv_hfence *data)
230 {
231 bool ret = false;
232 struct kvm_vcpu_arch *varch = &vcpu->arch;
233
234 spin_lock(&varch->hfence_lock);
235
236 if (!varch->hfence_queue[varch->hfence_tail].type) {
237 memcpy(&varch->hfence_queue[varch->hfence_tail],
238 data, sizeof(*data));
239
240 varch->hfence_tail++;
241 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
242 varch->hfence_tail = 0;
243
244 ret = true;
245 }
246
247 spin_unlock(&varch->hfence_lock);
248
249 return ret;
250 }
251
kvm_riscv_hfence_process(struct kvm_vcpu * vcpu)252 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
253 {
254 struct kvm_riscv_hfence d = { 0 };
255 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
256
257 while (vcpu_hfence_dequeue(vcpu, &d)) {
258 switch (d.type) {
259 case KVM_RISCV_HFENCE_UNKNOWN:
260 break;
261 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
262 kvm_riscv_local_hfence_gvma_vmid_gpa(
263 READ_ONCE(v->vmid),
264 d.addr, d.size, d.order);
265 break;
266 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
267 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
268 kvm_riscv_local_hfence_vvma_asid_gva(
269 READ_ONCE(v->vmid), d.asid,
270 d.addr, d.size, d.order);
271 break;
272 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
273 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
274 kvm_riscv_local_hfence_vvma_asid_all(
275 READ_ONCE(v->vmid), d.asid);
276 break;
277 case KVM_RISCV_HFENCE_VVMA_GVA:
278 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
279 kvm_riscv_local_hfence_vvma_gva(
280 READ_ONCE(v->vmid),
281 d.addr, d.size, d.order);
282 break;
283 default:
284 break;
285 }
286 }
287 }
288
make_xfence_request(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned int req,unsigned int fallback_req,const struct kvm_riscv_hfence * data)289 static void make_xfence_request(struct kvm *kvm,
290 unsigned long hbase, unsigned long hmask,
291 unsigned int req, unsigned int fallback_req,
292 const struct kvm_riscv_hfence *data)
293 {
294 unsigned long i;
295 struct kvm_vcpu *vcpu;
296 unsigned int actual_req = req;
297 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
298
299 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
300 kvm_for_each_vcpu(i, vcpu, kvm) {
301 if (hbase != -1UL) {
302 if (vcpu->vcpu_id < hbase)
303 continue;
304 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
305 continue;
306 }
307
308 bitmap_set(vcpu_mask, i, 1);
309
310 if (!data || !data->type)
311 continue;
312
313 /*
314 * Enqueue hfence data to VCPU hfence queue. If we don't
315 * have space in the VCPU hfence queue then fallback to
316 * a more conservative hfence request.
317 */
318 if (!vcpu_hfence_enqueue(vcpu, data))
319 actual_req = fallback_req;
320 }
321
322 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
323 }
324
kvm_riscv_fence_i(struct kvm * kvm,unsigned long hbase,unsigned long hmask)325 void kvm_riscv_fence_i(struct kvm *kvm,
326 unsigned long hbase, unsigned long hmask)
327 {
328 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
329 KVM_REQ_FENCE_I, NULL);
330 }
331
kvm_riscv_hfence_gvma_vmid_gpa(struct kvm * kvm,unsigned long hbase,unsigned long hmask,gpa_t gpa,gpa_t gpsz,unsigned long order)332 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
333 unsigned long hbase, unsigned long hmask,
334 gpa_t gpa, gpa_t gpsz,
335 unsigned long order)
336 {
337 struct kvm_riscv_hfence data;
338
339 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
340 data.asid = 0;
341 data.addr = gpa;
342 data.size = gpsz;
343 data.order = order;
344 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
345 KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
346 }
347
kvm_riscv_hfence_gvma_vmid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask)348 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
349 unsigned long hbase, unsigned long hmask)
350 {
351 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
352 KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
353 }
354
kvm_riscv_hfence_vvma_asid_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long asid)355 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
356 unsigned long hbase, unsigned long hmask,
357 unsigned long gva, unsigned long gvsz,
358 unsigned long order, unsigned long asid)
359 {
360 struct kvm_riscv_hfence data;
361
362 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
363 data.asid = asid;
364 data.addr = gva;
365 data.size = gvsz;
366 data.order = order;
367 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
368 KVM_REQ_HFENCE_VVMA_ALL, &data);
369 }
370
kvm_riscv_hfence_vvma_asid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long asid)371 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
372 unsigned long hbase, unsigned long hmask,
373 unsigned long asid)
374 {
375 struct kvm_riscv_hfence data;
376
377 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
378 data.asid = asid;
379 data.addr = data.size = data.order = 0;
380 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
381 KVM_REQ_HFENCE_VVMA_ALL, &data);
382 }
383
kvm_riscv_hfence_vvma_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order)384 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
385 unsigned long hbase, unsigned long hmask,
386 unsigned long gva, unsigned long gvsz,
387 unsigned long order)
388 {
389 struct kvm_riscv_hfence data;
390
391 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
392 data.asid = 0;
393 data.addr = gva;
394 data.size = gvsz;
395 data.order = order;
396 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
397 KVM_REQ_HFENCE_VVMA_ALL, &data);
398 }
399
kvm_riscv_hfence_vvma_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask)400 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
401 unsigned long hbase, unsigned long hmask)
402 {
403 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
404 KVM_REQ_HFENCE_VVMA_ALL, NULL);
405 }
406