xref: /linux/arch/arm64/kvm/inject_fault.c (revision 63eb28bb1402891b1ad2be02a530f29a9dd7f1cd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Fault injection for both 32 and 64bit guests.
4  *
5  * Copyright (C) 2012,2013 - ARM Ltd
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  *
8  * Based on arch/arm/kvm/emulate.c
9  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
10  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11  */
12 
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_nested.h>
16 #include <asm/esr.h>
17 
exception_target_el(struct kvm_vcpu * vcpu)18 static unsigned int exception_target_el(struct kvm_vcpu *vcpu)
19 {
20 	/* If not nesting, EL1 is the only possible exception target */
21 	if (likely(!vcpu_has_nv(vcpu)))
22 		return PSR_MODE_EL1h;
23 
24 	/*
25 	 * With NV, we need to pick between EL1 and EL2. Note that we
26 	 * never deal with a nesting exception here, hence never
27 	 * changing context, and the exception itself can be delayed
28 	 * until the next entry.
29 	 */
30 	switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) {
31 	case PSR_MODE_EL2h:
32 	case PSR_MODE_EL2t:
33 		return PSR_MODE_EL2h;
34 	case PSR_MODE_EL1h:
35 	case PSR_MODE_EL1t:
36 		return PSR_MODE_EL1h;
37 	case PSR_MODE_EL0t:
38 		return vcpu_el2_tge_is_set(vcpu) ? PSR_MODE_EL2h : PSR_MODE_EL1h;
39 	default:
40 		BUG();
41 	}
42 }
43 
exception_esr_elx(struct kvm_vcpu * vcpu)44 static enum vcpu_sysreg exception_esr_elx(struct kvm_vcpu *vcpu)
45 {
46 	if (exception_target_el(vcpu) == PSR_MODE_EL2h)
47 		return ESR_EL2;
48 
49 	return ESR_EL1;
50 }
51 
exception_far_elx(struct kvm_vcpu * vcpu)52 static enum vcpu_sysreg exception_far_elx(struct kvm_vcpu *vcpu)
53 {
54 	if (exception_target_el(vcpu) == PSR_MODE_EL2h)
55 		return FAR_EL2;
56 
57 	return FAR_EL1;
58 }
59 
pend_sync_exception(struct kvm_vcpu * vcpu)60 static void pend_sync_exception(struct kvm_vcpu *vcpu)
61 {
62 	if (exception_target_el(vcpu) == PSR_MODE_EL1h)
63 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
64 	else
65 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
66 }
67 
pend_serror_exception(struct kvm_vcpu * vcpu)68 static void pend_serror_exception(struct kvm_vcpu *vcpu)
69 {
70 	if (exception_target_el(vcpu) == PSR_MODE_EL1h)
71 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SERR);
72 	else
73 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR);
74 }
75 
__effective_sctlr2_bit(struct kvm_vcpu * vcpu,unsigned int idx)76 static bool __effective_sctlr2_bit(struct kvm_vcpu *vcpu, unsigned int idx)
77 {
78 	u64 sctlr2;
79 
80 	if (!kvm_has_sctlr2(vcpu->kvm))
81 		return false;
82 
83 	if (is_nested_ctxt(vcpu) &&
84 	    !(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_SCTLR2En))
85 		return false;
86 
87 	if (exception_target_el(vcpu) == PSR_MODE_EL1h)
88 		sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL1);
89 	else
90 		sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL2);
91 
92 	return sctlr2 & BIT(idx);
93 }
94 
effective_sctlr2_ease(struct kvm_vcpu * vcpu)95 static bool effective_sctlr2_ease(struct kvm_vcpu *vcpu)
96 {
97 	return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_EASE_SHIFT);
98 }
99 
effective_sctlr2_nmea(struct kvm_vcpu * vcpu)100 static bool effective_sctlr2_nmea(struct kvm_vcpu *vcpu)
101 {
102 	return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_NMEA_SHIFT);
103 }
104 
inject_abt64(struct kvm_vcpu * vcpu,bool is_iabt,unsigned long addr)105 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
106 {
107 	unsigned long cpsr = *vcpu_cpsr(vcpu);
108 	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
109 	u64 esr = 0;
110 
111 	/* This delight is brought to you by FEAT_DoubleFault2. */
112 	if (effective_sctlr2_ease(vcpu))
113 		pend_serror_exception(vcpu);
114 	else
115 		pend_sync_exception(vcpu);
116 
117 	/*
118 	 * Build an {i,d}abort, depending on the level and the
119 	 * instruction set. Report an external synchronous abort.
120 	 */
121 	if (kvm_vcpu_trap_il_is32bit(vcpu))
122 		esr |= ESR_ELx_IL;
123 
124 	/*
125 	 * Here, the guest runs in AArch64 mode when in EL1. If we get
126 	 * an AArch32 fault, it means we managed to trap an EL0 fault.
127 	 */
128 	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
129 		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
130 	else
131 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
132 
133 	if (!is_iabt)
134 		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
135 
136 	esr |= ESR_ELx_FSC_EXTABT;
137 
138 	vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu));
139 	vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
140 }
141 
inject_undef64(struct kvm_vcpu * vcpu)142 static void inject_undef64(struct kvm_vcpu *vcpu)
143 {
144 	u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
145 
146 	pend_sync_exception(vcpu);
147 
148 	/*
149 	 * Build an unknown exception, depending on the instruction
150 	 * set.
151 	 */
152 	if (kvm_vcpu_trap_il_is32bit(vcpu))
153 		esr |= ESR_ELx_IL;
154 
155 	vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
156 }
157 
158 #define DFSR_FSC_EXTABT_LPAE	0x10
159 #define DFSR_FSC_EXTABT_nLPAE	0x08
160 #define DFSR_LPAE		BIT(9)
161 #define TTBCR_EAE		BIT(31)
162 
inject_undef32(struct kvm_vcpu * vcpu)163 static void inject_undef32(struct kvm_vcpu *vcpu)
164 {
165 	kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
166 }
167 
168 /*
169  * Modelled after TakeDataAbortException() and TakePrefetchAbortException
170  * pseudocode.
171  */
inject_abt32(struct kvm_vcpu * vcpu,bool is_pabt,u32 addr)172 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
173 {
174 	u64 far;
175 	u32 fsr;
176 
177 	/* Give the guest an IMPLEMENTATION DEFINED exception */
178 	if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
179 		fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
180 	} else {
181 		/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
182 		fsr = DFSR_FSC_EXTABT_nLPAE;
183 	}
184 
185 	far = vcpu_read_sys_reg(vcpu, FAR_EL1);
186 
187 	if (is_pabt) {
188 		kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
189 		far &= GENMASK(31, 0);
190 		far |= (u64)addr << 32;
191 		vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
192 	} else { /* !iabt */
193 		kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
194 		far &= GENMASK(63, 32);
195 		far |= addr;
196 		vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
197 	}
198 
199 	vcpu_write_sys_reg(vcpu, far, FAR_EL1);
200 }
201 
__kvm_inject_sea(struct kvm_vcpu * vcpu,bool iabt,u64 addr)202 static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
203 {
204 	if (vcpu_el1_is_32bit(vcpu))
205 		inject_abt32(vcpu, iabt, addr);
206 	else
207 		inject_abt64(vcpu, iabt, addr);
208 }
209 
kvm_sea_target_is_el2(struct kvm_vcpu * vcpu)210 static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
211 {
212 	if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA))
213 		return true;
214 
215 	if (!vcpu_mode_priv(vcpu))
216 		return false;
217 
218 	return (*vcpu_cpsr(vcpu) & PSR_A_BIT) &&
219 	       (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
220 }
221 
kvm_inject_sea(struct kvm_vcpu * vcpu,bool iabt,u64 addr)222 int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
223 {
224 	lockdep_assert_held(&vcpu->mutex);
225 
226 	if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu))
227 		return kvm_inject_nested_sea(vcpu, iabt, addr);
228 
229 	__kvm_inject_sea(vcpu, iabt, addr);
230 	return 1;
231 }
232 
kvm_inject_size_fault(struct kvm_vcpu * vcpu)233 void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
234 {
235 	unsigned long addr, esr;
236 
237 	addr  = kvm_vcpu_get_fault_ipa(vcpu);
238 	addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
239 
240 	__kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr);
241 
242 	/*
243 	 * If AArch64 or LPAE, set FSC to 0 to indicate an Address
244 	 * Size Fault at level 0, as if exceeding PARange.
245 	 *
246 	 * Non-LPAE guests will only get the external abort, as there
247 	 * is no way to describe the ASF.
248 	 */
249 	if (vcpu_el1_is_32bit(vcpu) &&
250 	    !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
251 		return;
252 
253 	esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
254 	esr &= ~GENMASK_ULL(5, 0);
255 	vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
256 }
257 
258 /**
259  * kvm_inject_undefined - inject an undefined instruction into the guest
260  * @vcpu: The vCPU in which to inject the exception
261  *
262  * It is assumed that this code is called from the VCPU thread and that the
263  * VCPU therefore is not currently executing guest code.
264  */
kvm_inject_undefined(struct kvm_vcpu * vcpu)265 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
266 {
267 	if (vcpu_el1_is_32bit(vcpu))
268 		inject_undef32(vcpu);
269 	else
270 		inject_undef64(vcpu);
271 }
272 
serror_is_masked(struct kvm_vcpu * vcpu)273 static bool serror_is_masked(struct kvm_vcpu *vcpu)
274 {
275 	return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && !effective_sctlr2_nmea(vcpu);
276 }
277 
kvm_serror_target_is_el2(struct kvm_vcpu * vcpu)278 static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu)
279 {
280 	if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu))
281 		return true;
282 
283 	if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA))
284 		return false;
285 
286 	/*
287 	 * In another example where FEAT_DoubleFault2 is entirely backwards,
288 	 * "masked" as it relates to the routing effects of HCRX_EL2.TMEA
289 	 * doesn't consider SCTLR2_EL1.NMEA. That is to say, even if EL1 asked
290 	 * for non-maskable SErrors, the EL2 bit takes priority if A is set.
291 	 */
292 	if (vcpu_mode_priv(vcpu))
293 		return *vcpu_cpsr(vcpu) & PSR_A_BIT;
294 
295 	/*
296 	 * Otherwise SErrors are considered unmasked when taken from EL0 and
297 	 * NMEA is set.
298 	 */
299 	return serror_is_masked(vcpu);
300 }
301 
kvm_serror_undeliverable_at_el2(struct kvm_vcpu * vcpu)302 static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu)
303 {
304 	return !(vcpu_el2_tge_is_set(vcpu) || vcpu_el2_amo_is_set(vcpu));
305 }
306 
kvm_inject_serror_esr(struct kvm_vcpu * vcpu,u64 esr)307 int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr)
308 {
309 	lockdep_assert_held(&vcpu->mutex);
310 
311 	if (is_nested_ctxt(vcpu) && kvm_serror_target_is_el2(vcpu))
312 		return kvm_inject_nested_serror(vcpu, esr);
313 
314 	if (vcpu_is_el2(vcpu) && kvm_serror_undeliverable_at_el2(vcpu)) {
315 		vcpu_set_vsesr(vcpu, esr);
316 		vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
317 		return 1;
318 	}
319 
320 	/*
321 	 * Emulate the exception entry if SErrors are unmasked. This is useful if
322 	 * the vCPU is in a nested context w/ vSErrors enabled then we've already
323 	 * delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2,
324 	 * VDISR_EL2) to the guest hypervisor.
325 	 *
326 	 * As we're emulating the SError injection we need to explicitly populate
327 	 * ESR_ELx.EC because hardware will not do it on our behalf.
328 	 */
329 	if (!serror_is_masked(vcpu)) {
330 		pend_serror_exception(vcpu);
331 		esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR);
332 		vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
333 		return 1;
334 	}
335 
336 	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
337 	*vcpu_hcr(vcpu) |= HCR_VSE;
338 	return 1;
339 }
340