xref: /linux/arch/arm64/kvm/hyp/include/hyp/fault.h (revision 0e8863244ef5b7d4391816062fcc07ff49aa7dcf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM64_KVM_HYP_FAULT_H__
8 #define __ARM64_KVM_HYP_FAULT_H__
9 
10 #include <asm/kvm_asm.h>
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_hyp.h>
13 #include <asm/kvm_mmu.h>
14 
__fault_safe_to_translate(u64 esr)15 static inline bool __fault_safe_to_translate(u64 esr)
16 {
17 	u64 fsc = esr & ESR_ELx_FSC;
18 
19 	if (esr_fsc_is_sea_ttw(esr) || esr_fsc_is_secc_ttw(esr))
20 		return false;
21 
22 	return !(fsc == ESR_ELx_FSC_EXTABT && (esr & ESR_ELx_FnV));
23 }
24 
__translate_far_to_hpfar(u64 far,u64 * hpfar)25 static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
26 {
27 	int ret;
28 	u64 par, tmp;
29 
30 	/*
31 	 * Resolve the IPA the hard way using the guest VA.
32 	 *
33 	 * Stage-1 translation already validated the memory access
34 	 * rights. As such, we can use the EL1 translation regime, and
35 	 * don't have to distinguish between EL0 and EL1 access.
36 	 *
37 	 * We do need to save/restore PAR_EL1 though, as we haven't
38 	 * saved the guest context yet, and we may return early...
39 	 */
40 	par = read_sysreg_par();
41 	ret = system_supports_poe() ? __kvm_at(OP_AT_S1E1A, far) :
42 	                              __kvm_at(OP_AT_S1E1R, far);
43 	if (!ret)
44 		tmp = read_sysreg_par();
45 	else
46 		tmp = SYS_PAR_EL1_F; /* back to the guest */
47 	write_sysreg(par, par_el1);
48 
49 	if (unlikely(tmp & SYS_PAR_EL1_F))
50 		return false; /* Translation failed, back to guest */
51 
52 	/* Convert PAR to HPFAR format */
53 	*hpfar = PAR_TO_HPFAR(tmp);
54 	return true;
55 }
56 
57 /*
58  * Checks for the conditions when HPFAR_EL2 is written, per ARM ARM R_FKLWR.
59  */
__hpfar_valid(u64 esr)60 static inline bool __hpfar_valid(u64 esr)
61 {
62 	/*
63 	 * CPUs affected by ARM erratum #834220 may incorrectly report a
64 	 * stage-2 translation fault when a stage-1 permission fault occurs.
65 	 *
66 	 * Re-walk the page tables to determine if a stage-1 fault actually
67 	 * occurred.
68 	 */
69 	if (cpus_have_final_cap(ARM64_WORKAROUND_834220) &&
70 	    esr_fsc_is_translation_fault(esr))
71 		return false;
72 
73 	if (esr_fsc_is_translation_fault(esr) || esr_fsc_is_access_flag_fault(esr))
74 		return true;
75 
76 	if ((esr & ESR_ELx_S1PTW) && esr_fsc_is_permission_fault(esr))
77 		return true;
78 
79 	return esr_fsc_is_addr_sz_fault(esr);
80 }
81 
__get_fault_info(u64 esr,struct kvm_vcpu_fault_info * fault)82 static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
83 {
84 	u64 hpfar;
85 
86 	fault->far_el2		= read_sysreg_el2(SYS_FAR);
87 	fault->hpfar_el2	= 0;
88 
89 	if (__hpfar_valid(esr))
90 		hpfar = read_sysreg(hpfar_el2);
91 	else if (unlikely(!__fault_safe_to_translate(esr)))
92 		return true;
93 	else if (!__translate_far_to_hpfar(fault->far_el2, &hpfar))
94 		return false;
95 
96 	/*
97 	 * Hijack HPFAR_EL2.NS (RES0 in Non-secure) to indicate a valid
98 	 * HPFAR value.
99 	 */
100 	fault->hpfar_el2 = hpfar | HPFAR_EL2_NS;
101 	return true;
102 }
103 
104 #endif
105