Lines Matching +full:ipa +full:- +full:shared
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 - Google Inc
10 #include <asm/pgtable-types.h>
35 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); in __hyp_sve_save_guest()
36 __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true); in __hyp_sve_save_guest()
37 write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); in __hyp_sve_save_guest()
49 * Note that this constrains the PE to the maximum shared VL in __hyp_sve_restore_host()
53 write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); in __hyp_sve_restore_host()
54 __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), in __hyp_sve_restore_host()
55 &sve_state->fpsr, in __hyp_sve_restore_host()
57 write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR); in __hyp_sve_restore_host()
81 __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs); in fpsimd_sve_sync()
83 has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm)); in fpsimd_sve_sync()
100 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in flush_debug_state()
102 hyp_vcpu->vcpu.arch.debug_owner = host_vcpu->arch.debug_owner; in flush_debug_state()
104 if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu)) in flush_debug_state()
105 hyp_vcpu->vcpu.arch.vcpu_debug_state = host_vcpu->arch.vcpu_debug_state; in flush_debug_state()
106 else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu)) in flush_debug_state()
107 hyp_vcpu->vcpu.arch.external_debug_state = host_vcpu->arch.external_debug_state; in flush_debug_state()
112 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in sync_debug_state()
114 if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu)) in sync_debug_state()
115 host_vcpu->arch.vcpu_debug_state = hyp_vcpu->vcpu.arch.vcpu_debug_state; in sync_debug_state()
116 else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu)) in sync_debug_state()
117 host_vcpu->arch.external_debug_state = hyp_vcpu->vcpu.arch.external_debug_state; in sync_debug_state()
122 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in flush_hyp_vcpu()
127 hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt; in flush_hyp_vcpu()
129 hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2; in flush_hyp_vcpu()
130 hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE); in flush_hyp_vcpu()
131 hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) & in flush_hyp_vcpu()
134 hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags; in flush_hyp_vcpu()
136 hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2; in flush_hyp_vcpu()
138 hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3; in flush_hyp_vcpu()
143 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in sync_hyp_vcpu()
144 struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3; in sync_hyp_vcpu()
145 struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3; in sync_hyp_vcpu()
148 fpsimd_sve_sync(&hyp_vcpu->vcpu); in sync_hyp_vcpu()
151 host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt; in sync_hyp_vcpu()
153 host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2; in sync_hyp_vcpu()
155 host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault; in sync_hyp_vcpu()
157 host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags; in sync_hyp_vcpu()
159 host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr; in sync_hyp_vcpu()
160 for (i = 0; i < hyp_cpu_if->used_lrs; ++i) in sync_hyp_vcpu()
161 host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i]; in sync_hyp_vcpu()
180 hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI); in handle___pkvm_vcpu_load()
181 hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI); in handle___pkvm_vcpu_load()
212 ret = -EINVAL; in handle___kvm_vcpu_run()
217 ret = -EINVAL; in handle___kvm_vcpu_run()
223 ret = __kvm_vcpu_run(&hyp_vcpu->vcpu); in handle___kvm_vcpu_run()
240 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; in pkvm_refill_memcache()
242 return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache, in pkvm_refill_memcache()
243 host_vcpu->arch.pkvm_memcache.nr_pages, in pkvm_refill_memcache()
244 &host_vcpu->arch.pkvm_memcache); in pkvm_refill_memcache()
254 int ret = -EINVAL; in handle___pkvm_host_share_guest()
278 int ret = -EINVAL; in handle___pkvm_host_unshare_guest()
298 int ret = -EINVAL; in handle___pkvm_host_relax_perms_guest()
318 int ret = -EINVAL; in handle___pkvm_host_wrprotect_guest()
340 int ret = -EINVAL; in handle___pkvm_host_test_clear_young_guest()
359 int ret = -EINVAL; in handle___pkvm_host_mkyoung_guest()
388 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2); in handle___kvm_tlb_flush_vmid_ipa()
391 __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level); in handle___kvm_tlb_flush_vmid_ipa()
397 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2); in handle___kvm_tlb_flush_vmid_ipa_nsh()
400 __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level); in handle___kvm_tlb_flush_vmid_ipa_nsh()
432 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); in handle___pkvm_tlb_flush_vmid()
491 * will tail-call in __pkvm_init_finalise() which will have to deal in handle___pkvm_init()
628 * finalisation and (2) finalisation is performed on a per-CPU in handle_host_hcall()
630 * returns -EPERM after the first call for a given CPU. in handle_host_hcall()
636 id -= KVM_HOST_SMCCC_ID(0); in handle_host_hcall()