1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
4 */
5
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
9 #include <linux/err.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
14 #include <asm/csr.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn-def.h>
17 #include <asm/kvm_nacl.h>
18 #include <asm/kvm_tlb.h>
19 #include <asm/kvm_vmid.h>
20
21 #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
22
kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,gpa_t gpa,gpa_t gpsz,unsigned long order)23 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
24 gpa_t gpa, gpa_t gpsz,
25 unsigned long order)
26 {
27 gpa_t pos;
28
29 if (PTRS_PER_PTE < (gpsz >> order)) {
30 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
31 return;
32 }
33
34 if (has_svinval()) {
35 asm volatile (SFENCE_W_INVAL() ::: "memory");
36 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
37 asm volatile (HINVAL_GVMA(%0, %1)
38 : : "r" (pos >> 2), "r" (vmid) : "memory");
39 asm volatile (SFENCE_INVAL_IR() ::: "memory");
40 } else {
41 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
42 asm volatile (HFENCE_GVMA(%0, %1)
43 : : "r" (pos >> 2), "r" (vmid) : "memory");
44 }
45 }
46
kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)47 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
48 {
49 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
50 }
51
kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa,gpa_t gpsz,unsigned long order)52 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
53 unsigned long order)
54 {
55 gpa_t pos;
56
57 if (PTRS_PER_PTE < (gpsz >> order)) {
58 kvm_riscv_local_hfence_gvma_all();
59 return;
60 }
61
62 if (has_svinval()) {
63 asm volatile (SFENCE_W_INVAL() ::: "memory");
64 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
65 asm volatile(HINVAL_GVMA(%0, zero)
66 : : "r" (pos >> 2) : "memory");
67 asm volatile (SFENCE_INVAL_IR() ::: "memory");
68 } else {
69 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
70 asm volatile(HFENCE_GVMA(%0, zero)
71 : : "r" (pos >> 2) : "memory");
72 }
73 }
74
kvm_riscv_local_hfence_gvma_all(void)75 void kvm_riscv_local_hfence_gvma_all(void)
76 {
77 asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
78 }
79
kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,unsigned long asid,unsigned long gva,unsigned long gvsz,unsigned long order)80 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
81 unsigned long asid,
82 unsigned long gva,
83 unsigned long gvsz,
84 unsigned long order)
85 {
86 unsigned long pos, hgatp;
87
88 if (PTRS_PER_PTE < (gvsz >> order)) {
89 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
90 return;
91 }
92
93 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
94
95 if (has_svinval()) {
96 asm volatile (SFENCE_W_INVAL() ::: "memory");
97 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
98 asm volatile(HINVAL_VVMA(%0, %1)
99 : : "r" (pos), "r" (asid) : "memory");
100 asm volatile (SFENCE_INVAL_IR() ::: "memory");
101 } else {
102 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
103 asm volatile(HFENCE_VVMA(%0, %1)
104 : : "r" (pos), "r" (asid) : "memory");
105 }
106
107 csr_write(CSR_HGATP, hgatp);
108 }
109
kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,unsigned long asid)110 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
111 unsigned long asid)
112 {
113 unsigned long hgatp;
114
115 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
116
117 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
118
119 csr_write(CSR_HGATP, hgatp);
120 }
121
kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,unsigned long gva,unsigned long gvsz,unsigned long order)122 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
123 unsigned long gva, unsigned long gvsz,
124 unsigned long order)
125 {
126 unsigned long pos, hgatp;
127
128 if (PTRS_PER_PTE < (gvsz >> order)) {
129 kvm_riscv_local_hfence_vvma_all(vmid);
130 return;
131 }
132
133 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
134
135 if (has_svinval()) {
136 asm volatile (SFENCE_W_INVAL() ::: "memory");
137 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
138 asm volatile(HINVAL_VVMA(%0, zero)
139 : : "r" (pos) : "memory");
140 asm volatile (SFENCE_INVAL_IR() ::: "memory");
141 } else {
142 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
143 asm volatile(HFENCE_VVMA(%0, zero)
144 : : "r" (pos) : "memory");
145 }
146
147 csr_write(CSR_HGATP, hgatp);
148 }
149
kvm_riscv_local_hfence_vvma_all(unsigned long vmid)150 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
151 {
152 unsigned long hgatp;
153
154 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
155
156 asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
157
158 csr_write(CSR_HGATP, hgatp);
159 }
160
kvm_riscv_local_tlb_sanitize(struct kvm_vcpu * vcpu)161 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
162 {
163 unsigned long vmid;
164
165 if (!kvm_riscv_gstage_vmid_bits() ||
166 vcpu->arch.last_exit_cpu == vcpu->cpu)
167 return;
168
169 /*
170 * On RISC-V platforms with hardware VMID support, we share same
171 * VMID for all VCPUs of a particular Guest/VM. This means we might
172 * have stale G-stage TLB entries on the current Host CPU due to
173 * some other VCPU of the same Guest which ran previously on the
174 * current Host CPU.
175 *
176 * To cleanup stale TLB entries, we simply flush all G-stage TLB
177 * entries by VMID whenever underlying Host CPU changes for a VCPU.
178 */
179
180 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
181 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
182
183 /*
184 * Flush VS-stage TLB entries for implementation where VS-stage
185 * TLB does not cahce guest physical address and VMID.
186 */
187 if (static_branch_unlikely(&kvm_riscv_vsstage_tlb_no_gpa))
188 kvm_riscv_local_hfence_vvma_all(vmid);
189 }
190
kvm_riscv_fence_i_process(struct kvm_vcpu * vcpu)191 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
192 {
193 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
194 local_flush_icache_all();
195 }
196
kvm_riscv_tlb_flush_process(struct kvm_vcpu * vcpu)197 void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu)
198 {
199 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
200 unsigned long vmid = READ_ONCE(v->vmid);
201
202 if (kvm_riscv_nacl_available())
203 nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
204 else
205 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
206 }
207
kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu * vcpu)208 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
209 {
210 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
211 unsigned long vmid = READ_ONCE(v->vmid);
212
213 if (kvm_riscv_nacl_available())
214 nacl_hfence_vvma_all(nacl_shmem(), vmid);
215 else
216 kvm_riscv_local_hfence_vvma_all(vmid);
217 }
218
vcpu_hfence_dequeue(struct kvm_vcpu * vcpu,struct kvm_riscv_hfence * out_data)219 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
220 struct kvm_riscv_hfence *out_data)
221 {
222 bool ret = false;
223 struct kvm_vcpu_arch *varch = &vcpu->arch;
224
225 spin_lock(&varch->hfence_lock);
226
227 if (varch->hfence_queue[varch->hfence_head].type) {
228 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
229 sizeof(*out_data));
230 varch->hfence_queue[varch->hfence_head].type = 0;
231
232 varch->hfence_head++;
233 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
234 varch->hfence_head = 0;
235
236 ret = true;
237 }
238
239 spin_unlock(&varch->hfence_lock);
240
241 return ret;
242 }
243
vcpu_hfence_enqueue(struct kvm_vcpu * vcpu,const struct kvm_riscv_hfence * data)244 static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
245 const struct kvm_riscv_hfence *data)
246 {
247 bool ret = false;
248 struct kvm_vcpu_arch *varch = &vcpu->arch;
249
250 spin_lock(&varch->hfence_lock);
251
252 if (!varch->hfence_queue[varch->hfence_tail].type) {
253 memcpy(&varch->hfence_queue[varch->hfence_tail],
254 data, sizeof(*data));
255
256 varch->hfence_tail++;
257 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
258 varch->hfence_tail = 0;
259
260 ret = true;
261 }
262
263 spin_unlock(&varch->hfence_lock);
264
265 return ret;
266 }
267
kvm_riscv_hfence_process(struct kvm_vcpu * vcpu)268 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
269 {
270 struct kvm_riscv_hfence d = { 0 };
271
272 while (vcpu_hfence_dequeue(vcpu, &d)) {
273 switch (d.type) {
274 case KVM_RISCV_HFENCE_UNKNOWN:
275 break;
276 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
277 if (kvm_riscv_nacl_available())
278 nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid,
279 d.addr, d.size, d.order);
280 else
281 kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
282 d.size, d.order);
283 break;
284 case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
285 if (kvm_riscv_nacl_available())
286 nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
287 else
288 kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
289 break;
290 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
291 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
292 if (kvm_riscv_nacl_available())
293 nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid,
294 d.addr, d.size, d.order);
295 else
296 kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr,
297 d.size, d.order);
298 break;
299 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
300 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
301 if (kvm_riscv_nacl_available())
302 nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid);
303 else
304 kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid);
305 break;
306 case KVM_RISCV_HFENCE_VVMA_GVA:
307 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
308 if (kvm_riscv_nacl_available())
309 nacl_hfence_vvma(nacl_shmem(), d.vmid,
310 d.addr, d.size, d.order);
311 else
312 kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
313 d.size, d.order);
314 break;
315 case KVM_RISCV_HFENCE_VVMA_ALL:
316 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
317 if (kvm_riscv_nacl_available())
318 nacl_hfence_vvma_all(nacl_shmem(), d.vmid);
319 else
320 kvm_riscv_local_hfence_vvma_all(d.vmid);
321 break;
322 default:
323 break;
324 }
325 }
326 }
327
make_xfence_request(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned int req,unsigned int fallback_req,const struct kvm_riscv_hfence * data)328 static void make_xfence_request(struct kvm *kvm,
329 unsigned long hbase, unsigned long hmask,
330 unsigned int req, unsigned int fallback_req,
331 const struct kvm_riscv_hfence *data)
332 {
333 unsigned long i;
334 struct kvm_vcpu *vcpu;
335 unsigned int actual_req = req;
336 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
337
338 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
339 kvm_for_each_vcpu(i, vcpu, kvm) {
340 if (hbase != -1UL) {
341 if (vcpu->vcpu_id < hbase)
342 continue;
343 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
344 continue;
345 }
346
347 bitmap_set(vcpu_mask, i, 1);
348
349 if (!data || !data->type)
350 continue;
351
352 /*
353 * Enqueue hfence data to VCPU hfence queue. If we don't
354 * have space in the VCPU hfence queue then fallback to
355 * a more conservative hfence request.
356 */
357 if (!vcpu_hfence_enqueue(vcpu, data))
358 actual_req = fallback_req;
359 }
360
361 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
362 }
363
kvm_riscv_fence_i(struct kvm * kvm,unsigned long hbase,unsigned long hmask)364 void kvm_riscv_fence_i(struct kvm *kvm,
365 unsigned long hbase, unsigned long hmask)
366 {
367 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
368 KVM_REQ_FENCE_I, NULL);
369 }
370
kvm_riscv_hfence_gvma_vmid_gpa(struct kvm * kvm,unsigned long hbase,unsigned long hmask,gpa_t gpa,gpa_t gpsz,unsigned long order,unsigned long vmid)371 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
372 unsigned long hbase, unsigned long hmask,
373 gpa_t gpa, gpa_t gpsz,
374 unsigned long order, unsigned long vmid)
375 {
376 struct kvm_riscv_hfence data;
377
378 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
379 data.asid = 0;
380 data.vmid = vmid;
381 data.addr = gpa;
382 data.size = gpsz;
383 data.order = order;
384 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
385 KVM_REQ_TLB_FLUSH, &data);
386 }
387
kvm_riscv_hfence_gvma_vmid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long vmid)388 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
389 unsigned long hbase, unsigned long hmask,
390 unsigned long vmid)
391 {
392 struct kvm_riscv_hfence data = {0};
393
394 data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL;
395 data.vmid = vmid;
396 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
397 KVM_REQ_TLB_FLUSH, &data);
398 }
399
kvm_riscv_hfence_vvma_asid_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long asid,unsigned long vmid)400 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
401 unsigned long hbase, unsigned long hmask,
402 unsigned long gva, unsigned long gvsz,
403 unsigned long order, unsigned long asid,
404 unsigned long vmid)
405 {
406 struct kvm_riscv_hfence data;
407
408 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
409 data.asid = asid;
410 data.vmid = vmid;
411 data.addr = gva;
412 data.size = gvsz;
413 data.order = order;
414 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
415 KVM_REQ_HFENCE_VVMA_ALL, &data);
416 }
417
kvm_riscv_hfence_vvma_asid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long asid,unsigned long vmid)418 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
419 unsigned long hbase, unsigned long hmask,
420 unsigned long asid, unsigned long vmid)
421 {
422 struct kvm_riscv_hfence data = {0};
423
424 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
425 data.asid = asid;
426 data.vmid = vmid;
427 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
428 KVM_REQ_HFENCE_VVMA_ALL, &data);
429 }
430
kvm_riscv_hfence_vvma_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long vmid)431 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
432 unsigned long hbase, unsigned long hmask,
433 unsigned long gva, unsigned long gvsz,
434 unsigned long order, unsigned long vmid)
435 {
436 struct kvm_riscv_hfence data;
437
438 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
439 data.asid = 0;
440 data.vmid = vmid;
441 data.addr = gva;
442 data.size = gvsz;
443 data.order = order;
444 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
445 KVM_REQ_HFENCE_VVMA_ALL, &data);
446 }
447
kvm_riscv_hfence_vvma_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long vmid)448 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
449 unsigned long hbase, unsigned long hmask,
450 unsigned long vmid)
451 {
452 struct kvm_riscv_hfence data = {0};
453
454 data.type = KVM_RISCV_HFENCE_VVMA_ALL;
455 data.vmid = vmid;
456 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
457 KVM_REQ_HFENCE_VVMA_ALL, &data);
458 }
459
kvm_arch_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)460 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
461 {
462 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
463 gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT,
464 PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid));
465 return 0;
466 }
467