1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
4 */
5
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
9 #include <linux/err.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
14 #include <asm/csr.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn-def.h>
17 #include <asm/kvm_nacl.h>
18 #include <asm/kvm_tlb.h>
19 #include <asm/kvm_vmid.h>
20
21 #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
22
kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,gpa_t gpa,gpa_t gpsz,unsigned long order)23 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
24 gpa_t gpa, gpa_t gpsz,
25 unsigned long order)
26 {
27 gpa_t pos;
28
29 if (PTRS_PER_PTE < (gpsz >> order)) {
30 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
31 return;
32 }
33
34 if (has_svinval()) {
35 asm volatile (SFENCE_W_INVAL() ::: "memory");
36 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
37 asm volatile (HINVAL_GVMA(%0, %1)
38 : : "r" (pos >> 2), "r" (vmid) : "memory");
39 asm volatile (SFENCE_INVAL_IR() ::: "memory");
40 } else {
41 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
42 asm volatile (HFENCE_GVMA(%0, %1)
43 : : "r" (pos >> 2), "r" (vmid) : "memory");
44 }
45 }
46
kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)47 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
48 {
49 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
50 }
51
kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa,gpa_t gpsz,unsigned long order)52 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
53 unsigned long order)
54 {
55 gpa_t pos;
56
57 if (PTRS_PER_PTE < (gpsz >> order)) {
58 kvm_riscv_local_hfence_gvma_all();
59 return;
60 }
61
62 if (has_svinval()) {
63 asm volatile (SFENCE_W_INVAL() ::: "memory");
64 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
65 asm volatile(HINVAL_GVMA(%0, zero)
66 : : "r" (pos >> 2) : "memory");
67 asm volatile (SFENCE_INVAL_IR() ::: "memory");
68 } else {
69 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
70 asm volatile(HFENCE_GVMA(%0, zero)
71 : : "r" (pos >> 2) : "memory");
72 }
73 }
74
kvm_riscv_local_hfence_gvma_all(void)75 void kvm_riscv_local_hfence_gvma_all(void)
76 {
77 asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
78 }
79
kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,unsigned long asid,unsigned long gva,unsigned long gvsz,unsigned long order)80 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
81 unsigned long asid,
82 unsigned long gva,
83 unsigned long gvsz,
84 unsigned long order)
85 {
86 unsigned long pos, hgatp;
87
88 if (PTRS_PER_PTE < (gvsz >> order)) {
89 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
90 return;
91 }
92
93 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
94
95 if (has_svinval()) {
96 asm volatile (SFENCE_W_INVAL() ::: "memory");
97 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
98 asm volatile(HINVAL_VVMA(%0, %1)
99 : : "r" (pos), "r" (asid) : "memory");
100 asm volatile (SFENCE_INVAL_IR() ::: "memory");
101 } else {
102 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
103 asm volatile(HFENCE_VVMA(%0, %1)
104 : : "r" (pos), "r" (asid) : "memory");
105 }
106
107 csr_write(CSR_HGATP, hgatp);
108 }
109
kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,unsigned long asid)110 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
111 unsigned long asid)
112 {
113 unsigned long hgatp;
114
115 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
116
117 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
118
119 csr_write(CSR_HGATP, hgatp);
120 }
121
kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,unsigned long gva,unsigned long gvsz,unsigned long order)122 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
123 unsigned long gva, unsigned long gvsz,
124 unsigned long order)
125 {
126 unsigned long pos, hgatp;
127
128 if (PTRS_PER_PTE < (gvsz >> order)) {
129 kvm_riscv_local_hfence_vvma_all(vmid);
130 return;
131 }
132
133 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
134
135 if (has_svinval()) {
136 asm volatile (SFENCE_W_INVAL() ::: "memory");
137 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
138 asm volatile(HINVAL_VVMA(%0, zero)
139 : : "r" (pos) : "memory");
140 asm volatile (SFENCE_INVAL_IR() ::: "memory");
141 } else {
142 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
143 asm volatile(HFENCE_VVMA(%0, zero)
144 : : "r" (pos) : "memory");
145 }
146
147 csr_write(CSR_HGATP, hgatp);
148 }
149
kvm_riscv_local_hfence_vvma_all(unsigned long vmid)150 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
151 {
152 unsigned long hgatp;
153
154 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
155
156 asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
157
158 csr_write(CSR_HGATP, hgatp);
159 }
160
kvm_riscv_local_tlb_sanitize(struct kvm_vcpu * vcpu)161 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
162 {
163 unsigned long vmid;
164
165 if (!kvm_riscv_gstage_vmid_bits() ||
166 vcpu->arch.last_exit_cpu == vcpu->cpu)
167 return;
168
169 /*
170 * On RISC-V platforms with hardware VMID support, we share same
171 * VMID for all VCPUs of a particular Guest/VM. This means we might
172 * have stale G-stage TLB entries on the current Host CPU due to
173 * some other VCPU of the same Guest which ran previously on the
174 * current Host CPU.
175 *
176 * To cleanup stale TLB entries, we simply flush all G-stage TLB
177 * entries by VMID whenever underlying Host CPU changes for a VCPU.
178 */
179
180 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
181 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
182
183 /*
184 * Flush VS-stage TLB entries for implementation where VS-stage
185 * TLB does not cache guest physical address and VMID.
186 */
187 if (static_branch_unlikely(&kvm_riscv_vsstage_tlb_no_gpa))
188 kvm_riscv_local_hfence_vvma_all(vmid);
189 }
190
kvm_riscv_fence_i_process(struct kvm_vcpu * vcpu)191 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
192 {
193 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
194 local_flush_icache_all();
195 }
196
kvm_riscv_tlb_flush_process(struct kvm_vcpu * vcpu)197 void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu)
198 {
199 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
200 unsigned long vmid = READ_ONCE(v->vmid);
201
202 if (kvm_riscv_nacl_available())
203 nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
204 else
205 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
206 }
207
kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu * vcpu)208 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
209 {
210 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
211 unsigned long vmid = READ_ONCE(v->vmid);
212
213 if (kvm_riscv_nacl_available())
214 nacl_hfence_vvma_all(nacl_shmem(), vmid);
215 else
216 kvm_riscv_local_hfence_vvma_all(vmid);
217 }
218
vcpu_hfence_dequeue(struct kvm_vcpu * vcpu,struct kvm_riscv_hfence * out_data)219 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
220 struct kvm_riscv_hfence *out_data)
221 {
222 bool ret = false;
223 struct kvm_vcpu_arch *varch = &vcpu->arch;
224
225 spin_lock(&varch->hfence_lock);
226
227 if (varch->hfence_queue[varch->hfence_head].type) {
228 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
229 sizeof(*out_data));
230 varch->hfence_queue[varch->hfence_head].type = 0;
231
232 varch->hfence_head++;
233 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
234 varch->hfence_head = 0;
235
236 ret = true;
237 }
238
239 spin_unlock(&varch->hfence_lock);
240
241 return ret;
242 }
243
vcpu_hfence_enqueue(struct kvm_vcpu * vcpu,const struct kvm_riscv_hfence * data)244 static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
245 const struct kvm_riscv_hfence *data)
246 {
247 bool ret = false;
248 struct kvm_vcpu_arch *varch = &vcpu->arch;
249
250 spin_lock(&varch->hfence_lock);
251
252 if (!varch->hfence_queue[varch->hfence_tail].type) {
253 memcpy(&varch->hfence_queue[varch->hfence_tail],
254 data, sizeof(*data));
255
256 varch->hfence_tail++;
257 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
258 varch->hfence_tail = 0;
259
260 ret = true;
261 }
262
263 spin_unlock(&varch->hfence_lock);
264
265 return ret;
266 }
267
kvm_riscv_hfence_process(struct kvm_vcpu * vcpu)268 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
269 {
270 struct kvm_riscv_hfence d = { 0 };
271
272 while (vcpu_hfence_dequeue(vcpu, &d)) {
273 switch (d.type) {
274 case KVM_RISCV_HFENCE_UNKNOWN:
275 break;
276 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
277 if (kvm_riscv_nacl_available())
278 nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid,
279 d.addr, d.size, d.order);
280 else
281 kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
282 d.size, d.order);
283 break;
284 case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
285 if (kvm_riscv_nacl_available())
286 nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
287 else
288 kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
289 break;
290 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
291 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
292 if (kvm_riscv_nacl_available())
293 nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid,
294 d.addr, d.size, d.order);
295 else
296 kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr,
297 d.size, d.order);
298 break;
299 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
300 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
301 if (kvm_riscv_nacl_available())
302 nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid);
303 else
304 kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid);
305 break;
306 case KVM_RISCV_HFENCE_VVMA_GVA:
307 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
308 if (kvm_riscv_nacl_available())
309 nacl_hfence_vvma(nacl_shmem(), d.vmid,
310 d.addr, d.size, d.order);
311 else
312 kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
313 d.size, d.order);
314 break;
315 case KVM_RISCV_HFENCE_VVMA_ALL:
316 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
317 if (kvm_riscv_nacl_available())
318 nacl_hfence_vvma_all(nacl_shmem(), d.vmid);
319 else
320 kvm_riscv_local_hfence_vvma_all(d.vmid);
321 break;
322 default:
323 break;
324 }
325 }
326 }
327
make_xfence_request(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned int req,unsigned int fallback_req,const struct kvm_riscv_hfence * data)328 static void make_xfence_request(struct kvm *kvm,
329 unsigned long hbase, unsigned long hmask,
330 unsigned int req, unsigned int fallback_req,
331 const struct kvm_riscv_hfence *data)
332 {
333 unsigned long i;
334 struct kvm_vcpu *vcpu;
335 unsigned int actual_req = req;
336 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
337
338 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
339 kvm_for_each_vcpu(i, vcpu, kvm) {
340 if (hbase != -1UL) {
341 if (vcpu->vcpu_id < hbase ||
342 vcpu->vcpu_id >= hbase + BITS_PER_LONG)
343 continue;
344 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
345 continue;
346 }
347
348 bitmap_set(vcpu_mask, i, 1);
349
350 if (!data || !data->type)
351 continue;
352
353 /*
354 * Enqueue hfence data to VCPU hfence queue. If we don't
355 * have space in the VCPU hfence queue then fallback to
356 * a more conservative hfence request.
357 */
358 if (!vcpu_hfence_enqueue(vcpu, data))
359 actual_req = fallback_req;
360 }
361
362 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
363 }
364
kvm_riscv_fence_i(struct kvm * kvm,unsigned long hbase,unsigned long hmask)365 void kvm_riscv_fence_i(struct kvm *kvm,
366 unsigned long hbase, unsigned long hmask)
367 {
368 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
369 KVM_REQ_FENCE_I, NULL);
370 }
371
kvm_riscv_hfence_gvma_vmid_gpa(struct kvm * kvm,unsigned long hbase,unsigned long hmask,gpa_t gpa,gpa_t gpsz,unsigned long order,unsigned long vmid)372 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
373 unsigned long hbase, unsigned long hmask,
374 gpa_t gpa, gpa_t gpsz,
375 unsigned long order, unsigned long vmid)
376 {
377 struct kvm_riscv_hfence data;
378
379 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
380 data.asid = 0;
381 data.vmid = vmid;
382 data.addr = gpa;
383 data.size = gpsz;
384 data.order = order;
385 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
386 KVM_REQ_TLB_FLUSH, &data);
387 }
388
kvm_riscv_hfence_gvma_vmid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long vmid)389 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
390 unsigned long hbase, unsigned long hmask,
391 unsigned long vmid)
392 {
393 struct kvm_riscv_hfence data = {0};
394
395 data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL;
396 data.vmid = vmid;
397 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
398 KVM_REQ_TLB_FLUSH, &data);
399 }
400
kvm_riscv_hfence_vvma_asid_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long asid,unsigned long vmid)401 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
402 unsigned long hbase, unsigned long hmask,
403 unsigned long gva, unsigned long gvsz,
404 unsigned long order, unsigned long asid,
405 unsigned long vmid)
406 {
407 struct kvm_riscv_hfence data;
408
409 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
410 data.asid = asid;
411 data.vmid = vmid;
412 data.addr = gva;
413 data.size = gvsz;
414 data.order = order;
415 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
416 KVM_REQ_HFENCE_VVMA_ALL, &data);
417 }
418
kvm_riscv_hfence_vvma_asid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long asid,unsigned long vmid)419 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
420 unsigned long hbase, unsigned long hmask,
421 unsigned long asid, unsigned long vmid)
422 {
423 struct kvm_riscv_hfence data = {0};
424
425 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
426 data.asid = asid;
427 data.vmid = vmid;
428 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
429 KVM_REQ_HFENCE_VVMA_ALL, &data);
430 }
431
kvm_riscv_hfence_vvma_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long vmid)432 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
433 unsigned long hbase, unsigned long hmask,
434 unsigned long gva, unsigned long gvsz,
435 unsigned long order, unsigned long vmid)
436 {
437 struct kvm_riscv_hfence data;
438
439 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
440 data.asid = 0;
441 data.vmid = vmid;
442 data.addr = gva;
443 data.size = gvsz;
444 data.order = order;
445 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
446 KVM_REQ_HFENCE_VVMA_ALL, &data);
447 }
448
kvm_riscv_hfence_vvma_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long vmid)449 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
450 unsigned long hbase, unsigned long hmask,
451 unsigned long vmid)
452 {
453 struct kvm_riscv_hfence data = {0};
454
455 data.type = KVM_RISCV_HFENCE_VVMA_ALL;
456 data.vmid = vmid;
457 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
458 KVM_REQ_HFENCE_VVMA_ALL, &data);
459 }
460
kvm_arch_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)461 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
462 {
463 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
464 gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT,
465 PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid));
466 return 0;
467 }
468