1*caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 28a43a2b3SChristoffer Dall /* 38a43a2b3SChristoffer Dall * Copyright (C) 2012-2015 - ARM Ltd 48a43a2b3SChristoffer Dall * Author: Marc Zyngier <marc.zyngier@arm.com> 58a43a2b3SChristoffer Dall */ 68a43a2b3SChristoffer Dall 78a43a2b3SChristoffer Dall #include <linux/compiler.h> 88a43a2b3SChristoffer Dall #include <linux/irqchip/arm-gic.h> 98a43a2b3SChristoffer Dall #include <linux/kvm_host.h> 10b220244dSJames Morse #include <linux/swab.h> 118a43a2b3SChristoffer Dall 128a43a2b3SChristoffer Dall #include <asm/kvm_emulate.h> 138a43a2b3SChristoffer Dall #include <asm/kvm_hyp.h> 148a43a2b3SChristoffer Dall #include <asm/kvm_mmu.h> 158a43a2b3SChristoffer Dall 16b220244dSJames Morse static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) 17b220244dSJames Morse { 18b220244dSJames Morse if (vcpu_mode_is_32bit(vcpu)) 19256c0960SMark Rutland return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT); 20b220244dSJames Morse 21b220244dSJames Morse return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE); 22b220244dSJames Morse } 23b220244dSJames Morse 248a43a2b3SChristoffer Dall /* 258a43a2b3SChristoffer Dall * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the 268a43a2b3SChristoffer Dall * guest. 278a43a2b3SChristoffer Dall * 288a43a2b3SChristoffer Dall * @vcpu: the offending vcpu 298a43a2b3SChristoffer Dall * 308a43a2b3SChristoffer Dall * Returns: 318a43a2b3SChristoffer Dall * 1: GICV access successfully performed 328a43a2b3SChristoffer Dall * 0: Not a GICV access 33bd7d95caSMark Rutland * -1: Illegal GICV access successfully performed 348a43a2b3SChristoffer Dall */ 358a43a2b3SChristoffer Dall int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) 368a43a2b3SChristoffer Dall { 378a43a2b3SChristoffer Dall struct kvm *kvm = kern_hyp_va(vcpu->kvm); 388a43a2b3SChristoffer Dall struct vgic_dist *vgic = &kvm->arch.vgic; 398a43a2b3SChristoffer Dall phys_addr_t fault_ipa; 408a43a2b3SChristoffer Dall void __iomem *addr; 418a43a2b3SChristoffer Dall int rd; 428a43a2b3SChristoffer Dall 438a43a2b3SChristoffer Dall /* Build the full address */ 448a43a2b3SChristoffer Dall fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 458a43a2b3SChristoffer Dall fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 468a43a2b3SChristoffer Dall 478a43a2b3SChristoffer Dall /* If not for GICV, move on */ 488a43a2b3SChristoffer Dall if (fault_ipa < vgic->vgic_cpu_base || 498a43a2b3SChristoffer Dall fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE)) 508a43a2b3SChristoffer Dall return 0; 518a43a2b3SChristoffer Dall 528a43a2b3SChristoffer Dall /* Reject anything but a 32bit access */ 53bd7d95caSMark Rutland if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) { 54bd7d95caSMark Rutland __kvm_skip_instr(vcpu); 558a43a2b3SChristoffer Dall return -1; 56bd7d95caSMark Rutland } 578a43a2b3SChristoffer Dall 588a43a2b3SChristoffer Dall /* Not aligned? Don't bother */ 59bd7d95caSMark Rutland if (fault_ipa & 3) { 60bd7d95caSMark Rutland __kvm_skip_instr(vcpu); 618a43a2b3SChristoffer Dall return -1; 62bd7d95caSMark Rutland } 638a43a2b3SChristoffer Dall 648a43a2b3SChristoffer Dall rd = kvm_vcpu_dabt_get_rd(vcpu); 651bb32a44SMarc Zyngier addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va; 668a43a2b3SChristoffer Dall addr += fault_ipa - vgic->vgic_cpu_base; 678a43a2b3SChristoffer Dall 688a43a2b3SChristoffer Dall if (kvm_vcpu_dabt_iswrite(vcpu)) { 69b220244dSJames Morse u32 data = vcpu_get_reg(vcpu, rd); 70b220244dSJames Morse if (__is_be(vcpu)) { 71b220244dSJames Morse /* guest pre-swabbed data, undo this for writel() */ 72b220244dSJames Morse data = swab32(data); 73b220244dSJames Morse } 748a43a2b3SChristoffer Dall writel_relaxed(data, addr); 758a43a2b3SChristoffer Dall } else { 768a43a2b3SChristoffer Dall u32 data = readl_relaxed(addr); 77b220244dSJames Morse if (__is_be(vcpu)) { 78b220244dSJames Morse /* guest expects swabbed data */ 79b220244dSJames Morse data = swab32(data); 80b220244dSJames Morse } 81b220244dSJames Morse vcpu_set_reg(vcpu, rd, data); 828a43a2b3SChristoffer Dall } 838a43a2b3SChristoffer Dall 84bd7d95caSMark Rutland __kvm_skip_instr(vcpu); 85bd7d95caSMark Rutland 868a43a2b3SChristoffer Dall return 1; 878a43a2b3SChristoffer Dall } 88