Lines Matching +full:d +full:- +full:tlb +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
16 #include <asm/insn-def.h>
166 vcpu->arch.last_exit_cpu == vcpu->cpu) in kvm_riscv_local_tlb_sanitize()
170 * On RISC-V platforms with hardware VMID support, we share same in kvm_riscv_local_tlb_sanitize()
172 * have stale G-stage TLB entries on the current Host CPU due to in kvm_riscv_local_tlb_sanitize()
176 * To cleanup stale TLB entries, we simply flush all G-stage TLB in kvm_riscv_local_tlb_sanitize()
180 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid); in kvm_riscv_local_tlb_sanitize()
184 * Flush VS-stage TLB entries for implementation where VS-stage in kvm_riscv_local_tlb_sanitize()
185 * TLB does not cahce guest physical address and VMID. in kvm_riscv_local_tlb_sanitize()
199 struct kvm_vmid *v = &vcpu->kvm->arch.vmid; in kvm_riscv_tlb_flush_process()
200 unsigned long vmid = READ_ONCE(v->vmid); in kvm_riscv_tlb_flush_process()
210 struct kvm_vmid *v = &vcpu->kvm->arch.vmid; in kvm_riscv_hfence_vvma_all_process()
211 unsigned long vmid = READ_ONCE(v->vmid); in kvm_riscv_hfence_vvma_all_process()
223 struct kvm_vcpu_arch *varch = &vcpu->arch; in vcpu_hfence_dequeue()
225 spin_lock(&varch->hfence_lock); in vcpu_hfence_dequeue()
227 if (varch->hfence_queue[varch->hfence_head].type) { in vcpu_hfence_dequeue()
228 memcpy(out_data, &varch->hfence_queue[varch->hfence_head], in vcpu_hfence_dequeue()
230 varch->hfence_queue[varch->hfence_head].type = 0; in vcpu_hfence_dequeue()
232 varch->hfence_head++; in vcpu_hfence_dequeue()
233 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE) in vcpu_hfence_dequeue()
234 varch->hfence_head = 0; in vcpu_hfence_dequeue()
239 spin_unlock(&varch->hfence_lock); in vcpu_hfence_dequeue()
248 struct kvm_vcpu_arch *varch = &vcpu->arch; in vcpu_hfence_enqueue()
250 spin_lock(&varch->hfence_lock); in vcpu_hfence_enqueue()
252 if (!varch->hfence_queue[varch->hfence_tail].type) { in vcpu_hfence_enqueue()
253 memcpy(&varch->hfence_queue[varch->hfence_tail], in vcpu_hfence_enqueue()
256 varch->hfence_tail++; in vcpu_hfence_enqueue()
257 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE) in vcpu_hfence_enqueue()
258 varch->hfence_tail = 0; in vcpu_hfence_enqueue()
263 spin_unlock(&varch->hfence_lock); in vcpu_hfence_enqueue()
270 struct kvm_riscv_hfence d = { 0 }; in kvm_riscv_hfence_process() local
272 while (vcpu_hfence_dequeue(vcpu, &d)) { in kvm_riscv_hfence_process()
273 switch (d.type) { in kvm_riscv_hfence_process()
278 nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid, in kvm_riscv_hfence_process()
279 d.addr, d.size, d.order); in kvm_riscv_hfence_process()
281 kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr, in kvm_riscv_hfence_process()
282 d.size, d.order); in kvm_riscv_hfence_process()
286 nacl_hfence_gvma_vmid_all(nacl_shmem(), d.vmid); in kvm_riscv_hfence_process()
288 kvm_riscv_local_hfence_gvma_vmid_all(d.vmid); in kvm_riscv_hfence_process()
293 nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid, in kvm_riscv_hfence_process()
294 d.addr, d.size, d.order); in kvm_riscv_hfence_process()
296 kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr, in kvm_riscv_hfence_process()
297 d.size, d.order); in kvm_riscv_hfence_process()
302 nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid); in kvm_riscv_hfence_process()
304 kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid); in kvm_riscv_hfence_process()
309 nacl_hfence_vvma(nacl_shmem(), d.vmid, in kvm_riscv_hfence_process()
310 d.addr, d.size, d.order); in kvm_riscv_hfence_process()
312 kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr, in kvm_riscv_hfence_process()
313 d.size, d.order); in kvm_riscv_hfence_process()
318 nacl_hfence_vvma_all(nacl_shmem(), d.vmid); in kvm_riscv_hfence_process()
320 kvm_riscv_local_hfence_vvma_all(d.vmid); in kvm_riscv_hfence_process()
340 if (hbase != -1UL) { in make_xfence_request()
341 if (vcpu->vcpu_id < hbase) in make_xfence_request()
343 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase)))) in make_xfence_request()
349 if (!data || !data->type) in make_xfence_request()
382 data.size = gpsz; in kvm_riscv_hfence_gvma_vmid_gpa()
412 data.size = gvsz; in kvm_riscv_hfence_vvma_asid_gva()
442 data.size = gvsz; in kvm_riscv_hfence_vvma_gva()
462 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, in kvm_arch_flush_remote_tlbs_range()
464 PAGE_SHIFT, READ_ONCE(kvm->arch.vmid.vmid)); in kvm_arch_flush_remote_tlbs_range()