1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
4 */
5
6 #include <linux/bitmap.h>
7 #include <linux/cpumask.h>
8 #include <linux/errno.h>
9 #include <linux/err.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/kvm_host.h>
13 #include <asm/cacheflush.h>
14 #include <asm/csr.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn-def.h>
17 #include <asm/kvm_nacl.h>
18 #include <asm/kvm_tlb.h>
19 #include <asm/kvm_vmid.h>
20
21 #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
22
kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,gpa_t gpa,gpa_t gpsz,unsigned long order)23 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
24 gpa_t gpa, gpa_t gpsz,
25 unsigned long order)
26 {
27 gpa_t pos;
28
29 if (PTRS_PER_PTE < (gpsz >> order)) {
30 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
31 return;
32 }
33
34 if (has_svinval()) {
35 asm volatile (SFENCE_W_INVAL() ::: "memory");
36 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
37 asm volatile (HINVAL_GVMA(%0, %1)
38 : : "r" (pos >> 2), "r" (vmid) : "memory");
39 asm volatile (SFENCE_INVAL_IR() ::: "memory");
40 } else {
41 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
42 asm volatile (HFENCE_GVMA(%0, %1)
43 : : "r" (pos >> 2), "r" (vmid) : "memory");
44 }
45 }
46
kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)47 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
48 {
49 asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
50 }
51
kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa,gpa_t gpsz,unsigned long order)52 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
53 unsigned long order)
54 {
55 gpa_t pos;
56
57 if (PTRS_PER_PTE < (gpsz >> order)) {
58 kvm_riscv_local_hfence_gvma_all();
59 return;
60 }
61
62 if (has_svinval()) {
63 asm volatile (SFENCE_W_INVAL() ::: "memory");
64 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
65 asm volatile(HINVAL_GVMA(%0, zero)
66 : : "r" (pos >> 2) : "memory");
67 asm volatile (SFENCE_INVAL_IR() ::: "memory");
68 } else {
69 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
70 asm volatile(HFENCE_GVMA(%0, zero)
71 : : "r" (pos >> 2) : "memory");
72 }
73 }
74
kvm_riscv_local_hfence_gvma_all(void)75 void kvm_riscv_local_hfence_gvma_all(void)
76 {
77 asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
78 }
79
kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,unsigned long asid,unsigned long gva,unsigned long gvsz,unsigned long order)80 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
81 unsigned long asid,
82 unsigned long gva,
83 unsigned long gvsz,
84 unsigned long order)
85 {
86 unsigned long pos, hgatp;
87
88 if (PTRS_PER_PTE < (gvsz >> order)) {
89 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
90 return;
91 }
92
93 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
94
95 if (has_svinval()) {
96 asm volatile (SFENCE_W_INVAL() ::: "memory");
97 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
98 asm volatile(HINVAL_VVMA(%0, %1)
99 : : "r" (pos), "r" (asid) : "memory");
100 asm volatile (SFENCE_INVAL_IR() ::: "memory");
101 } else {
102 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
103 asm volatile(HFENCE_VVMA(%0, %1)
104 : : "r" (pos), "r" (asid) : "memory");
105 }
106
107 csr_write(CSR_HGATP, hgatp);
108 }
109
kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,unsigned long asid)110 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
111 unsigned long asid)
112 {
113 unsigned long hgatp;
114
115 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
116
117 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
118
119 csr_write(CSR_HGATP, hgatp);
120 }
121
kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,unsigned long gva,unsigned long gvsz,unsigned long order)122 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
123 unsigned long gva, unsigned long gvsz,
124 unsigned long order)
125 {
126 unsigned long pos, hgatp;
127
128 if (PTRS_PER_PTE < (gvsz >> order)) {
129 kvm_riscv_local_hfence_vvma_all(vmid);
130 return;
131 }
132
133 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
134
135 if (has_svinval()) {
136 asm volatile (SFENCE_W_INVAL() ::: "memory");
137 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
138 asm volatile(HINVAL_VVMA(%0, zero)
139 : : "r" (pos) : "memory");
140 asm volatile (SFENCE_INVAL_IR() ::: "memory");
141 } else {
142 for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
143 asm volatile(HFENCE_VVMA(%0, zero)
144 : : "r" (pos) : "memory");
145 }
146
147 csr_write(CSR_HGATP, hgatp);
148 }
149
kvm_riscv_local_hfence_vvma_all(unsigned long vmid)150 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
151 {
152 unsigned long hgatp;
153
154 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
155
156 asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
157
158 csr_write(CSR_HGATP, hgatp);
159 }
160
kvm_riscv_fence_i_process(struct kvm_vcpu * vcpu)161 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
162 {
163 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
164 local_flush_icache_all();
165 }
166
kvm_riscv_tlb_flush_process(struct kvm_vcpu * vcpu)167 void kvm_riscv_tlb_flush_process(struct kvm_vcpu *vcpu)
168 {
169 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
170 unsigned long vmid = READ_ONCE(v->vmid);
171
172 if (kvm_riscv_nacl_available())
173 nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
174 else
175 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
176 }
177
kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu * vcpu)178 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
179 {
180 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
181 unsigned long vmid = READ_ONCE(v->vmid);
182
183 if (kvm_riscv_nacl_available())
184 nacl_hfence_vvma_all(nacl_shmem(), vmid);
185 else
186 kvm_riscv_local_hfence_vvma_all(vmid);
187 }
188
vcpu_hfence_dequeue(struct kvm_vcpu * vcpu,struct kvm_riscv_hfence * out_data)189 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
190 struct kvm_riscv_hfence *out_data)
191 {
192 bool ret = false;
193 struct kvm_vcpu_arch *varch = &vcpu->arch;
194
195 spin_lock(&varch->hfence_lock);
196
197 if (varch->hfence_queue[varch->hfence_head].type) {
198 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
199 sizeof(*out_data));
200 varch->hfence_queue[varch->hfence_head].type = 0;
201
202 varch->hfence_head++;
203 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
204 varch->hfence_head = 0;
205
206 ret = true;
207 }
208
209 spin_unlock(&varch->hfence_lock);
210
211 return ret;
212 }
213
vcpu_hfence_enqueue(struct kvm_vcpu * vcpu,const struct kvm_riscv_hfence * data)214 static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
215 const struct kvm_riscv_hfence *data)
216 {
217 bool ret = false;
218 struct kvm_vcpu_arch *varch = &vcpu->arch;
219
220 spin_lock(&varch->hfence_lock);
221
222 if (!varch->hfence_queue[varch->hfence_tail].type) {
223 memcpy(&varch->hfence_queue[varch->hfence_tail],
224 data, sizeof(*data));
225
226 varch->hfence_tail++;
227 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
228 varch->hfence_tail = 0;
229
230 ret = true;
231 }
232
233 spin_unlock(&varch->hfence_lock);
234
235 return ret;
236 }
237
kvm_riscv_hfence_process(struct kvm_vcpu * vcpu)238 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
239 {
240 struct kvm_riscv_hfence d = { 0 };
241
242 while (vcpu_hfence_dequeue(vcpu, &d)) {
243 switch (d.type) {
244 case KVM_RISCV_HFENCE_UNKNOWN:
245 break;
246 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
247 if (kvm_riscv_nacl_available())
248 nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid,
249 d.addr, d.size, d.order);
250 else
251 kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
252 d.size, d.order);
253 break;
254 case KVM_RISCV_HFENCE_GVMA_VMID_ALL:
255 if (kvm_riscv_nacl_available())
256 nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid);
257 else
258 kvm_riscv_local_hfence_gvma_vmid_all(d.vmid);
259 break;
260 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
261 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
262 if (kvm_riscv_nacl_available())
263 nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid,
264 d.addr, d.size, d.order);
265 else
266 kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr,
267 d.size, d.order);
268 break;
269 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
270 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
271 if (kvm_riscv_nacl_available())
272 nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid);
273 else
274 kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid);
275 break;
276 case KVM_RISCV_HFENCE_VVMA_GVA:
277 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
278 if (kvm_riscv_nacl_available())
279 nacl_hfence_vvma(nacl_shmem(), d.vmid,
280 d.addr, d.size, d.order);
281 else
282 kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
283 d.size, d.order);
284 break;
285 case KVM_RISCV_HFENCE_VVMA_ALL:
286 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
287 if (kvm_riscv_nacl_available())
288 nacl_hfence_vvma_all(nacl_shmem(), d.vmid);
289 else
290 kvm_riscv_local_hfence_vvma_all(d.vmid);
291 break;
292 default:
293 break;
294 }
295 }
296 }
297
make_xfence_request(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned int req,unsigned int fallback_req,const struct kvm_riscv_hfence * data)298 static void make_xfence_request(struct kvm *kvm,
299 unsigned long hbase, unsigned long hmask,
300 unsigned int req, unsigned int fallback_req,
301 const struct kvm_riscv_hfence *data)
302 {
303 unsigned long i;
304 struct kvm_vcpu *vcpu;
305 unsigned int actual_req = req;
306 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
307
308 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
309 kvm_for_each_vcpu(i, vcpu, kvm) {
310 if (hbase != -1UL) {
311 if (vcpu->vcpu_id < hbase)
312 continue;
313 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
314 continue;
315 }
316
317 bitmap_set(vcpu_mask, i, 1);
318
319 if (!data || !data->type)
320 continue;
321
322 /*
323 * Enqueue hfence data to VCPU hfence queue. If we don't
324 * have space in the VCPU hfence queue then fallback to
325 * a more conservative hfence request.
326 */
327 if (!vcpu_hfence_enqueue(vcpu, data))
328 actual_req = fallback_req;
329 }
330
331 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
332 }
333
kvm_riscv_fence_i(struct kvm * kvm,unsigned long hbase,unsigned long hmask)334 void kvm_riscv_fence_i(struct kvm *kvm,
335 unsigned long hbase, unsigned long hmask)
336 {
337 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
338 KVM_REQ_FENCE_I, NULL);
339 }
340
kvm_riscv_hfence_gvma_vmid_gpa(struct kvm * kvm,unsigned long hbase,unsigned long hmask,gpa_t gpa,gpa_t gpsz,unsigned long order,unsigned long vmid)341 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
342 unsigned long hbase, unsigned long hmask,
343 gpa_t gpa, gpa_t gpsz,
344 unsigned long order, unsigned long vmid)
345 {
346 struct kvm_riscv_hfence data;
347
348 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
349 data.asid = 0;
350 data.vmid = vmid;
351 data.addr = gpa;
352 data.size = gpsz;
353 data.order = order;
354 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
355 KVM_REQ_TLB_FLUSH, &data);
356 }
357
kvm_riscv_hfence_gvma_vmid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long vmid)358 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
359 unsigned long hbase, unsigned long hmask,
360 unsigned long vmid)
361 {
362 struct kvm_riscv_hfence data = {0};
363
364 data.type = KVM_RISCV_HFENCE_GVMA_VMID_ALL;
365 data.vmid = vmid;
366 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
367 KVM_REQ_TLB_FLUSH, &data);
368 }
369
kvm_riscv_hfence_vvma_asid_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long asid,unsigned long vmid)370 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
371 unsigned long hbase, unsigned long hmask,
372 unsigned long gva, unsigned long gvsz,
373 unsigned long order, unsigned long asid,
374 unsigned long vmid)
375 {
376 struct kvm_riscv_hfence data;
377
378 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
379 data.asid = asid;
380 data.vmid = vmid;
381 data.addr = gva;
382 data.size = gvsz;
383 data.order = order;
384 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
385 KVM_REQ_HFENCE_VVMA_ALL, &data);
386 }
387
kvm_riscv_hfence_vvma_asid_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long asid,unsigned long vmid)388 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
389 unsigned long hbase, unsigned long hmask,
390 unsigned long asid, unsigned long vmid)
391 {
392 struct kvm_riscv_hfence data = {0};
393
394 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
395 data.asid = asid;
396 data.vmid = vmid;
397 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
398 KVM_REQ_HFENCE_VVMA_ALL, &data);
399 }
400
kvm_riscv_hfence_vvma_gva(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long gva,unsigned long gvsz,unsigned long order,unsigned long vmid)401 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
402 unsigned long hbase, unsigned long hmask,
403 unsigned long gva, unsigned long gvsz,
404 unsigned long order, unsigned long vmid)
405 {
406 struct kvm_riscv_hfence data;
407
408 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
409 data.asid = 0;
410 data.vmid = vmid;
411 data.addr = gva;
412 data.size = gvsz;
413 data.order = order;
414 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
415 KVM_REQ_HFENCE_VVMA_ALL, &data);
416 }
417
kvm_riscv_hfence_vvma_all(struct kvm * kvm,unsigned long hbase,unsigned long hmask,unsigned long vmid)418 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
419 unsigned long hbase, unsigned long hmask,
420 unsigned long vmid)
421 {
422 struct kvm_riscv_hfence data = {0};
423
424 data.type = KVM_RISCV_HFENCE_VVMA_ALL;
425 data.vmid = vmid;
426 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
427 KVM_REQ_HFENCE_VVMA_ALL, &data);
428 }
429
kvm_arch_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)430 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
431 {
432 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0,
433 gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT,
434 PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid));
435 return 0;
436 }
437