xref: /linux/arch/arm64/kvm/inject_fault.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Fault injection for both 32 and 64bit guests.
4  *
5  * Copyright (C) 2012,2013 - ARM Ltd
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  *
8  * Based on arch/arm/kvm/emulate.c
9  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
10  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11  */
12 
13 #include <linux/kvm_host.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_nested.h>
16 #include <asm/esr.h>
17 
18 static unsigned int exception_target_el(struct kvm_vcpu *vcpu)
19 {
20 	/* If not nesting, EL1 is the only possible exception target */
21 	if (likely(!vcpu_has_nv(vcpu)))
22 		return PSR_MODE_EL1h;
23 
24 	/*
25 	 * With NV, we need to pick between EL1 and EL2. Note that we
26 	 * never deal with a nesting exception here, hence never
27 	 * changing context, and the exception itself can be delayed
28 	 * until the next entry.
29 	 */
30 	switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) {
31 	case PSR_MODE_EL2h:
32 	case PSR_MODE_EL2t:
33 		return PSR_MODE_EL2h;
34 	case PSR_MODE_EL1h:
35 	case PSR_MODE_EL1t:
36 		return PSR_MODE_EL1h;
37 	case PSR_MODE_EL0t:
38 		return vcpu_el2_tge_is_set(vcpu) ? PSR_MODE_EL2h : PSR_MODE_EL1h;
39 	default:
40 		BUG();
41 	}
42 }
43 
44 static enum vcpu_sysreg exception_esr_elx(struct kvm_vcpu *vcpu)
45 {
46 	if (exception_target_el(vcpu) == PSR_MODE_EL2h)
47 		return ESR_EL2;
48 
49 	return ESR_EL1;
50 }
51 
52 static enum vcpu_sysreg exception_far_elx(struct kvm_vcpu *vcpu)
53 {
54 	if (exception_target_el(vcpu) == PSR_MODE_EL2h)
55 		return FAR_EL2;
56 
57 	return FAR_EL1;
58 }
59 
60 static void pend_sync_exception(struct kvm_vcpu *vcpu)
61 {
62 	if (exception_target_el(vcpu) == PSR_MODE_EL1h)
63 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
64 	else
65 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
66 }
67 
68 static void pend_serror_exception(struct kvm_vcpu *vcpu)
69 {
70 	if (exception_target_el(vcpu) == PSR_MODE_EL1h)
71 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SERR);
72 	else
73 		kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR);
74 }
75 
76 static bool __effective_sctlr2_bit(struct kvm_vcpu *vcpu, unsigned int idx)
77 {
78 	u64 sctlr2;
79 
80 	if (!kvm_has_sctlr2(vcpu->kvm))
81 		return false;
82 
83 	if (is_nested_ctxt(vcpu) &&
84 	    !(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_SCTLR2En))
85 		return false;
86 
87 	if (exception_target_el(vcpu) == PSR_MODE_EL1h)
88 		sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL1);
89 	else
90 		sctlr2 = vcpu_read_sys_reg(vcpu, SCTLR2_EL2);
91 
92 	return sctlr2 & BIT(idx);
93 }
94 
95 static bool effective_sctlr2_ease(struct kvm_vcpu *vcpu)
96 {
97 	return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_EASE_SHIFT);
98 }
99 
100 static bool effective_sctlr2_nmea(struct kvm_vcpu *vcpu)
101 {
102 	return __effective_sctlr2_bit(vcpu, SCTLR2_EL1_NMEA_SHIFT);
103 }
104 
105 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
106 {
107 	unsigned long cpsr = *vcpu_cpsr(vcpu);
108 	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
109 	u64 esr = 0, fsc;
110 	int level;
111 
112 	/*
113 	 * If injecting an abort from a failed S1PTW, rewalk the S1 PTs to
114 	 * find the failing level. If we can't find it, assume the error was
115 	 * transient and restart without changing the state.
116 	 */
117 	if (kvm_vcpu_abt_iss1tw(vcpu)) {
118 		u64 hpfar = kvm_vcpu_get_fault_ipa(vcpu);
119 		int ret;
120 
121 		if (hpfar == INVALID_GPA)
122 			return;
123 
124 		ret = __kvm_find_s1_desc_level(vcpu, addr, hpfar, &level);
125 		if (ret)
126 			return;
127 
128 		WARN_ON_ONCE(level < -1 || level > 3);
129 		fsc = ESR_ELx_FSC_SEA_TTW(level);
130 	} else {
131 		fsc = ESR_ELx_FSC_EXTABT;
132 	}
133 
134 	/* This delight is brought to you by FEAT_DoubleFault2. */
135 	if (effective_sctlr2_ease(vcpu))
136 		pend_serror_exception(vcpu);
137 	else
138 		pend_sync_exception(vcpu);
139 
140 	/*
141 	 * Build an {i,d}abort, depending on the level and the
142 	 * instruction set. Report an external synchronous abort.
143 	 */
144 	if (kvm_vcpu_trap_il_is32bit(vcpu))
145 		esr |= ESR_ELx_IL;
146 
147 	/*
148 	 * Here, the guest runs in AArch64 mode when in EL1. If we get
149 	 * an AArch32 fault, it means we managed to trap an EL0 fault.
150 	 */
151 	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
152 		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
153 	else
154 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
155 
156 	if (!is_iabt)
157 		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
158 
159 	esr |= fsc;
160 
161 	vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu));
162 	vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
163 }
164 
165 static void inject_undef64(struct kvm_vcpu *vcpu)
166 {
167 	u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
168 
169 	pend_sync_exception(vcpu);
170 
171 	/*
172 	 * Build an unknown exception, depending on the instruction
173 	 * set.
174 	 */
175 	if (kvm_vcpu_trap_il_is32bit(vcpu))
176 		esr |= ESR_ELx_IL;
177 
178 	vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
179 }
180 
181 #define DFSR_FSC_EXTABT_LPAE	0x10
182 #define DFSR_FSC_EXTABT_nLPAE	0x08
183 #define DFSR_LPAE		BIT(9)
184 #define TTBCR_EAE		BIT(31)
185 
186 static void inject_undef32(struct kvm_vcpu *vcpu)
187 {
188 	kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
189 }
190 
191 /*
192  * Modelled after TakeDataAbortException() and TakePrefetchAbortException
193  * pseudocode.
194  */
195 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
196 {
197 	u64 far;
198 	u32 fsr;
199 
200 	/* Give the guest an IMPLEMENTATION DEFINED exception */
201 	if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
202 		fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
203 	} else {
204 		/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
205 		fsr = DFSR_FSC_EXTABT_nLPAE;
206 	}
207 
208 	far = vcpu_read_sys_reg(vcpu, FAR_EL1);
209 
210 	if (is_pabt) {
211 		kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
212 		far &= GENMASK(31, 0);
213 		far |= (u64)addr << 32;
214 		vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
215 	} else { /* !iabt */
216 		kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
217 		far &= GENMASK(63, 32);
218 		far |= addr;
219 		vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
220 	}
221 
222 	vcpu_write_sys_reg(vcpu, far, FAR_EL1);
223 }
224 
225 static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
226 {
227 	if (vcpu_el1_is_32bit(vcpu))
228 		inject_abt32(vcpu, iabt, addr);
229 	else
230 		inject_abt64(vcpu, iabt, addr);
231 }
232 
233 static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
234 {
235 	if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA))
236 		return true;
237 
238 	if (!vcpu_mode_priv(vcpu))
239 		return false;
240 
241 	return (*vcpu_cpsr(vcpu) & PSR_A_BIT) &&
242 	       (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
243 }
244 
245 int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
246 {
247 	lockdep_assert_held(&vcpu->mutex);
248 
249 	if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu))
250 		return kvm_inject_nested_sea(vcpu, iabt, addr);
251 
252 	__kvm_inject_sea(vcpu, iabt, addr);
253 	return 1;
254 }
255 
256 void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
257 {
258 	unsigned long addr, esr;
259 
260 	addr  = kvm_vcpu_get_fault_ipa(vcpu);
261 	addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
262 
263 	__kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr);
264 
265 	/*
266 	 * If AArch64 or LPAE, set FSC to 0 to indicate an Address
267 	 * Size Fault at level 0, as if exceeding PARange.
268 	 *
269 	 * Non-LPAE guests will only get the external abort, as there
270 	 * is no way to describe the ASF.
271 	 */
272 	if (vcpu_el1_is_32bit(vcpu) &&
273 	    !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
274 		return;
275 
276 	esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
277 	esr &= ~GENMASK_ULL(5, 0);
278 	vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
279 }
280 
281 /**
282  * kvm_inject_undefined - inject an undefined instruction into the guest
283  * @vcpu: The vCPU in which to inject the exception
284  *
285  * It is assumed that this code is called from the VCPU thread and that the
286  * VCPU therefore is not currently executing guest code.
287  */
288 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
289 {
290 	if (vcpu_el1_is_32bit(vcpu))
291 		inject_undef32(vcpu);
292 	else
293 		inject_undef64(vcpu);
294 }
295 
296 static bool serror_is_masked(struct kvm_vcpu *vcpu)
297 {
298 	return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && !effective_sctlr2_nmea(vcpu);
299 }
300 
301 static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu)
302 {
303 	if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu))
304 		return true;
305 
306 	if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA))
307 		return false;
308 
309 	/*
310 	 * In another example where FEAT_DoubleFault2 is entirely backwards,
311 	 * "masked" as it relates to the routing effects of HCRX_EL2.TMEA
312 	 * doesn't consider SCTLR2_EL1.NMEA. That is to say, even if EL1 asked
313 	 * for non-maskable SErrors, the EL2 bit takes priority if A is set.
314 	 */
315 	if (vcpu_mode_priv(vcpu))
316 		return *vcpu_cpsr(vcpu) & PSR_A_BIT;
317 
318 	/*
319 	 * Otherwise SErrors are considered unmasked when taken from EL0 and
320 	 * NMEA is set.
321 	 */
322 	return serror_is_masked(vcpu);
323 }
324 
325 static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu)
326 {
327 	return !(vcpu_el2_tge_is_set(vcpu) || vcpu_el2_amo_is_set(vcpu));
328 }
329 
330 int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr)
331 {
332 	lockdep_assert_held(&vcpu->mutex);
333 
334 	if (is_nested_ctxt(vcpu) && kvm_serror_target_is_el2(vcpu))
335 		return kvm_inject_nested_serror(vcpu, esr);
336 
337 	if (vcpu_is_el2(vcpu) && kvm_serror_undeliverable_at_el2(vcpu)) {
338 		vcpu_set_vsesr(vcpu, esr);
339 		vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
340 		return 1;
341 	}
342 
343 	/*
344 	 * Emulate the exception entry if SErrors are unmasked. This is useful if
345 	 * the vCPU is in a nested context w/ vSErrors enabled then we've already
346 	 * delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2,
347 	 * VDISR_EL2) to the guest hypervisor.
348 	 *
349 	 * As we're emulating the SError injection we need to explicitly populate
350 	 * ESR_ELx.EC because hardware will not do it on our behalf.
351 	 */
352 	if (!serror_is_masked(vcpu)) {
353 		pend_serror_exception(vcpu);
354 		esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR);
355 		vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
356 		return 1;
357 	}
358 
359 	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
360 	*vcpu_hcr(vcpu) |= HCR_VSE;
361 	return 1;
362 }
363