1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
9
10 #include <hyp/adjust_pc.h>
11 #include <hyp/fault.h>
12
13 #include <linux/arm-smccc.h>
14 #include <linux/kvm_host.h>
15 #include <linux/types.h>
16 #include <linux/jump_label.h>
17 #include <uapi/linux/psci.h>
18
19 #include <kvm/arm_psci.h>
20
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/extable.h>
24 #include <asm/kprobes.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_nested.h>
30 #include <asm/fpsimd.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/processor.h>
33 #include <asm/traps.h>
34
35 struct kvm_exception_table_entry {
36 int insn, fixup;
37 };
38
39 extern struct kvm_exception_table_entry __start___kvm_ex_table;
40 extern struct kvm_exception_table_entry __stop___kvm_ex_table;
41
42 /* Save the 32-bit only FPSIMD system register state */
__fpsimd_save_fpexc32(struct kvm_vcpu * vcpu)43 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
44 {
45 if (!vcpu_el1_is_32bit(vcpu))
46 return;
47
48 __vcpu_assign_sys_reg(vcpu, FPEXC32_EL2, read_sysreg(fpexc32_el2));
49 }
50
__activate_traps_fpsimd32(struct kvm_vcpu * vcpu)51 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
52 {
53 /*
54 * We are about to set CPTR_EL2.TFP to trap all floating point
55 * register accesses to EL2, however, the ARM ARM clearly states that
56 * traps are only taken to EL2 if the operation would not otherwise
57 * trap to EL1. Therefore, always make sure that for 32-bit guests,
58 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
59 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
60 * it will cause an exception.
61 */
62 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd())
63 write_sysreg(1 << 30, fpexc32_el2);
64 }
65
__activate_cptr_traps_nvhe(struct kvm_vcpu * vcpu)66 static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
67 {
68 u64 val = CPTR_NVHE_EL2_RES1 | CPTR_EL2_TAM | CPTR_EL2_TTA;
69
70 /*
71 * Always trap SME since it's not supported in KVM.
72 * TSM is RES1 if SME isn't implemented.
73 */
74 val |= CPTR_EL2_TSM;
75
76 if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
77 val |= CPTR_EL2_TZ;
78
79 if (!guest_owns_fp_regs())
80 val |= CPTR_EL2_TFP;
81
82 write_sysreg(val, cptr_el2);
83 }
84
__activate_cptr_traps_vhe(struct kvm_vcpu * vcpu)85 static inline void __activate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
86 {
87 /*
88 * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
89 * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
90 * except for some missing controls, such as TAM.
91 * In this case, CPTR_EL2.TAM has the same position with or without
92 * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
93 * shift value for trapping the AMU accesses.
94 */
95 u64 val = CPTR_EL2_TAM | CPACR_EL1_TTA;
96 u64 cptr;
97
98 if (guest_owns_fp_regs()) {
99 val |= CPACR_EL1_FPEN;
100 if (vcpu_has_sve(vcpu))
101 val |= CPACR_EL1_ZEN;
102 }
103
104 if (!vcpu_has_nv(vcpu))
105 goto write;
106
107 /*
108 * The architecture is a bit crap (what a surprise): an EL2 guest
109 * writing to CPTR_EL2 via CPACR_EL1 can't set any of TCPAC or TTA,
110 * as they are RES0 in the guest's view. To work around it, trap the
111 * sucker using the very same bit it can't set...
112 */
113 if (vcpu_el2_e2h_is_set(vcpu) && is_hyp_ctxt(vcpu))
114 val |= CPTR_EL2_TCPAC;
115
116 /*
117 * Layer the guest hypervisor's trap configuration on top of our own if
118 * we're in a nested context.
119 */
120 if (is_hyp_ctxt(vcpu))
121 goto write;
122
123 cptr = vcpu_sanitised_cptr_el2(vcpu);
124
125 /*
126 * Pay attention, there's some interesting detail here.
127 *
128 * The CPTR_EL2.xEN fields are 2 bits wide, although there are only two
129 * meaningful trap states when HCR_EL2.TGE = 0 (running a nested guest):
130 *
131 * - CPTR_EL2.xEN = x0, traps are enabled
132 * - CPTR_EL2.xEN = x1, traps are disabled
133 *
134 * In other words, bit[0] determines if guest accesses trap or not. In
135 * the interest of simplicity, clear the entire field if the guest
136 * hypervisor has traps enabled to dispel any illusion of something more
137 * complicated taking place.
138 */
139 if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0)))
140 val &= ~CPACR_EL1_FPEN;
141 if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0)))
142 val &= ~CPACR_EL1_ZEN;
143
144 if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
145 val |= cptr & CPACR_EL1_E0POE;
146
147 val |= cptr & CPTR_EL2_TCPAC;
148
149 write:
150 write_sysreg(val, cpacr_el1);
151 }
152
__activate_cptr_traps(struct kvm_vcpu * vcpu)153 static inline void __activate_cptr_traps(struct kvm_vcpu *vcpu)
154 {
155 if (!guest_owns_fp_regs())
156 __activate_traps_fpsimd32(vcpu);
157
158 if (has_vhe() || has_hvhe())
159 __activate_cptr_traps_vhe(vcpu);
160 else
161 __activate_cptr_traps_nvhe(vcpu);
162 }
163
__deactivate_cptr_traps_nvhe(struct kvm_vcpu * vcpu)164 static inline void __deactivate_cptr_traps_nvhe(struct kvm_vcpu *vcpu)
165 {
166 u64 val = CPTR_NVHE_EL2_RES1;
167
168 if (!cpus_have_final_cap(ARM64_SVE))
169 val |= CPTR_EL2_TZ;
170 if (!cpus_have_final_cap(ARM64_SME))
171 val |= CPTR_EL2_TSM;
172
173 write_sysreg(val, cptr_el2);
174 }
175
__deactivate_cptr_traps_vhe(struct kvm_vcpu * vcpu)176 static inline void __deactivate_cptr_traps_vhe(struct kvm_vcpu *vcpu)
177 {
178 u64 val = CPACR_EL1_FPEN;
179
180 if (cpus_have_final_cap(ARM64_SVE))
181 val |= CPACR_EL1_ZEN;
182 if (cpus_have_final_cap(ARM64_SME))
183 val |= CPACR_EL1_SMEN;
184
185 write_sysreg(val, cpacr_el1);
186 }
187
__deactivate_cptr_traps(struct kvm_vcpu * vcpu)188 static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
189 {
190 if (has_vhe() || has_hvhe())
191 __deactivate_cptr_traps_vhe(vcpu);
192 else
193 __deactivate_cptr_traps_nvhe(vcpu);
194 }
195
cpu_has_amu(void)196 static inline bool cpu_has_amu(void)
197 {
198 u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
199
200 return cpuid_feature_extract_unsigned_field(pfr0,
201 ID_AA64PFR0_EL1_AMU_SHIFT);
202 }
203
204 #define __activate_fgt(hctxt, vcpu, reg) \
205 do { \
206 ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
207 write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \
208 } while (0)
209
__activate_traps_hfgxtr(struct kvm_vcpu * vcpu)210 static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
211 {
212 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
213
214 if (!cpus_have_final_cap(ARM64_HAS_FGT))
215 return;
216
217 __activate_fgt(hctxt, vcpu, HFGRTR_EL2);
218 __activate_fgt(hctxt, vcpu, HFGWTR_EL2);
219 __activate_fgt(hctxt, vcpu, HFGITR_EL2);
220 __activate_fgt(hctxt, vcpu, HDFGRTR_EL2);
221 __activate_fgt(hctxt, vcpu, HDFGWTR_EL2);
222
223 if (cpu_has_amu())
224 __activate_fgt(hctxt, vcpu, HAFGRTR_EL2);
225
226 if (!cpus_have_final_cap(ARM64_HAS_FGT2))
227 return;
228
229 __activate_fgt(hctxt, vcpu, HFGRTR2_EL2);
230 __activate_fgt(hctxt, vcpu, HFGWTR2_EL2);
231 __activate_fgt(hctxt, vcpu, HFGITR2_EL2);
232 __activate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
233 __activate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
234 }
235
__activate_traps_ich_hfgxtr(struct kvm_vcpu * vcpu)236 static inline void __activate_traps_ich_hfgxtr(struct kvm_vcpu *vcpu)
237 {
238 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
239
240 if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
241 return;
242
243 __activate_fgt(hctxt, vcpu, ICH_HFGRTR_EL2);
244 __activate_fgt(hctxt, vcpu, ICH_HFGWTR_EL2);
245 __activate_fgt(hctxt, vcpu, ICH_HFGITR_EL2);
246 }
247
248 #define __deactivate_fgt(hctxt, vcpu, reg) \
249 do { \
250 write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
251 SYS_ ## reg); \
252 } while(0)
253
__deactivate_traps_hfgxtr(struct kvm_vcpu * vcpu)254 static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
255 {
256 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
257
258 if (!cpus_have_final_cap(ARM64_HAS_FGT))
259 return;
260
261 __deactivate_fgt(hctxt, vcpu, HFGRTR_EL2);
262 __deactivate_fgt(hctxt, vcpu, HFGWTR_EL2);
263 __deactivate_fgt(hctxt, vcpu, HFGITR_EL2);
264 __deactivate_fgt(hctxt, vcpu, HDFGRTR_EL2);
265 __deactivate_fgt(hctxt, vcpu, HDFGWTR_EL2);
266
267 if (cpu_has_amu())
268 __deactivate_fgt(hctxt, vcpu, HAFGRTR_EL2);
269
270 if (!cpus_have_final_cap(ARM64_HAS_FGT2))
271 return;
272
273 __deactivate_fgt(hctxt, vcpu, HFGRTR2_EL2);
274 __deactivate_fgt(hctxt, vcpu, HFGWTR2_EL2);
275 __deactivate_fgt(hctxt, vcpu, HFGITR2_EL2);
276 __deactivate_fgt(hctxt, vcpu, HDFGRTR2_EL2);
277 __deactivate_fgt(hctxt, vcpu, HDFGWTR2_EL2);
278 }
279
__deactivate_traps_ich_hfgxtr(struct kvm_vcpu * vcpu)280 static inline void __deactivate_traps_ich_hfgxtr(struct kvm_vcpu *vcpu)
281 {
282 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
283
284 if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
285 return;
286
287 __deactivate_fgt(hctxt, vcpu, ICH_HFGRTR_EL2);
288 __deactivate_fgt(hctxt, vcpu, ICH_HFGWTR_EL2);
289 __deactivate_fgt(hctxt, vcpu, ICH_HFGITR_EL2);
290
291 }
292
__activate_traps_mpam(struct kvm_vcpu * vcpu)293 static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu)
294 {
295 u64 clr = MPAM2_EL2_EnMPAMSM;
296 u64 set = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1;
297
298 if (!system_supports_mpam())
299 return;
300
301 /* trap guest access to MPAMIDR_EL1 */
302 if (system_supports_mpam_hcr()) {
303 write_sysreg_s(MPAMHCR_EL2_TRAP_MPAMIDR_EL1, SYS_MPAMHCR_EL2);
304 } else {
305 /* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */
306 set |= MPAM2_EL2_TIDR;
307 }
308
309 sysreg_clear_set_s(SYS_MPAM2_EL2, clr, set);
310 }
311
__deactivate_traps_mpam(void)312 static inline void __deactivate_traps_mpam(void)
313 {
314 u64 clr = MPAM2_EL2_TRAPMPAM0EL1 | MPAM2_EL2_TRAPMPAM1EL1 | MPAM2_EL2_TIDR;
315 u64 set = MPAM2_EL2_EnMPAMSM;
316
317 if (!system_supports_mpam())
318 return;
319
320 sysreg_clear_set_s(SYS_MPAM2_EL2, clr, set);
321
322 if (system_supports_mpam_hcr())
323 write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2);
324 }
325
__activate_traps_common(struct kvm_vcpu * vcpu)326 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
327 {
328 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
329
330 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
331 write_sysreg(1 << 15, hstr_el2);
332
333 /*
334 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
335 * PMSELR_EL0 to make sure it never contains the cycle
336 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
337 * EL1 instead of being trapped to EL2.
338 */
339 if (system_supports_pmuv3()) {
340 write_sysreg(0, pmselr_el0);
341
342 ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
343 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
344 vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
345 }
346
347 if (cpus_have_final_cap(ARM64_HAS_HCX)) {
348 u64 hcrx = vcpu->arch.hcrx_el2;
349 if (is_nested_ctxt(vcpu)) {
350 u64 val = __vcpu_sys_reg(vcpu, HCRX_EL2);
351 hcrx |= val & __HCRX_EL2_MASK;
352 hcrx &= ~(~val & __HCRX_EL2_nMASK);
353 }
354
355 ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
356 write_sysreg_s(hcrx, SYS_HCRX_EL2);
357 }
358
359 __activate_traps_hfgxtr(vcpu);
360 __activate_traps_ich_hfgxtr(vcpu);
361 __activate_traps_mpam(vcpu);
362 }
363
__deactivate_traps_common(struct kvm_vcpu * vcpu)364 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
365 {
366 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
367
368 write_sysreg(0, hstr_el2);
369 if (system_supports_pmuv3()) {
370 write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
371 vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
372 }
373
374 if (cpus_have_final_cap(ARM64_HAS_HCX))
375 write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
376
377 __deactivate_traps_hfgxtr(vcpu);
378 __deactivate_traps_ich_hfgxtr(vcpu);
379 __deactivate_traps_mpam();
380 }
381
___activate_traps(struct kvm_vcpu * vcpu,u64 hcr)382 static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
383 {
384 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
385 hcr |= HCR_TVM;
386
387 write_sysreg_hcr(hcr);
388
389 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) {
390 u64 vsesr;
391
392 /*
393 * When HCR_EL2.AMO is set, physical SErrors are taken to EL2
394 * and vSError injection is enabled for EL1. Conveniently, for
395 * NV this means that it is never the case where a 'physical'
396 * SError (injected by KVM or userspace) and vSError are
397 * deliverable to the same context.
398 *
399 * As such, we can trivially select between the host or guest's
400 * VSESR_EL2. Except for the case that FEAT_RAS hasn't been
401 * exposed to the guest, where ESR propagation in hardware
402 * occurs unconditionally.
403 *
404 * Paper over the architectural wart and use an IMPLEMENTATION
405 * DEFINED ESR value in case FEAT_RAS is hidden from the guest.
406 */
407 if (!vserror_state_is_nested(vcpu))
408 vsesr = vcpu->arch.vsesr_el2;
409 else if (kvm_has_ras(kern_hyp_va(vcpu->kvm)))
410 vsesr = __vcpu_sys_reg(vcpu, VSESR_EL2);
411 else
412 vsesr = ESR_ELx_ISV;
413
414 write_sysreg_s(vsesr, SYS_VSESR_EL2);
415 }
416 }
417
___deactivate_traps(struct kvm_vcpu * vcpu)418 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
419 {
420 u64 *hcr;
421
422 if (vserror_state_is_nested(vcpu))
423 hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2);
424 else
425 hcr = &vcpu->arch.hcr_el2;
426
427 /*
428 * If we pended a virtual abort, preserve it until it gets
429 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
430 * the crucial bit is "On taking a vSError interrupt,
431 * HCR_EL2.VSE is cleared to 0."
432 *
433 * Additionally, when in a nested context we need to propagate the
434 * updated state to the guest hypervisor's HCR_EL2.
435 */
436 if (*hcr & HCR_VSE) {
437 *hcr &= ~HCR_VSE;
438 *hcr |= read_sysreg(hcr_el2) & HCR_VSE;
439 }
440 }
441
__populate_fault_info(struct kvm_vcpu * vcpu)442 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
443 {
444 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
445 }
446
kvm_hyp_handle_mops(struct kvm_vcpu * vcpu,u64 * exit_code)447 static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
448 {
449 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
450 arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
451 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
452
453 /*
454 * Finish potential single step before executing the prologue
455 * instruction.
456 */
457 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
458 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
459
460 return true;
461 }
462
__hyp_sve_restore_guest(struct kvm_vcpu * vcpu)463 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
464 {
465 /*
466 * The vCPU's saved SVE state layout always matches the max VL of the
467 * vCPU. Start off with the max VL so we can load the SVE state.
468 */
469 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
470 __sve_restore_state(vcpu_sve_pffr(vcpu),
471 &vcpu->arch.ctxt.fp_regs.fpsr,
472 true);
473
474 /*
475 * The effective VL for a VM could differ from the max VL when running a
476 * nested guest, as the guest hypervisor could select a smaller VL. Slap
477 * that into hardware before wrapping up.
478 */
479 if (is_nested_ctxt(vcpu))
480 sve_cond_update_zcr_vq(__vcpu_sys_reg(vcpu, ZCR_EL2), SYS_ZCR_EL2);
481
482 write_sysreg_el1(__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)), SYS_ZCR);
483 }
484
__hyp_sve_save_host(void)485 static inline void __hyp_sve_save_host(void)
486 {
487 struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
488
489 sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
490 write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
491 __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
492 &sve_state->fpsr,
493 true);
494 }
495
fpsimd_lazy_switch_to_guest(struct kvm_vcpu * vcpu)496 static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
497 {
498 u64 zcr_el1, zcr_el2;
499
500 if (!guest_owns_fp_regs())
501 return;
502
503 if (vcpu_has_sve(vcpu)) {
504 /* A guest hypervisor may restrict the effective max VL. */
505 if (is_nested_ctxt(vcpu))
506 zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
507 else
508 zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
509
510 write_sysreg_el2(zcr_el2, SYS_ZCR);
511
512 zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
513 write_sysreg_el1(zcr_el1, SYS_ZCR);
514 }
515 }
516
fpsimd_lazy_switch_to_host(struct kvm_vcpu * vcpu)517 static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
518 {
519 u64 zcr_el1, zcr_el2;
520
521 if (!guest_owns_fp_regs())
522 return;
523
524 /*
525 * When the guest owns the FP regs, we know that guest+hyp traps for
526 * any FPSIMD/SVE/SME features exposed to the guest have been disabled
527 * by either __activate_cptr_traps() or kvm_hyp_handle_fpsimd()
528 * prior to __guest_entry(). As __guest_entry() guarantees a context
529 * synchronization event, we don't need an ISB here to avoid taking
530 * traps for anything that was exposed to the guest.
531 */
532 if (vcpu_has_sve(vcpu)) {
533 zcr_el1 = read_sysreg_el1(SYS_ZCR);
534 __vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
535
536 /*
537 * The guest's state is always saved using the guest's max VL.
538 * Ensure that the host has the guest's max VL active such that
539 * the host can save the guest's state lazily, but don't
540 * artificially restrict the host to the guest's max VL.
541 */
542 if (has_vhe()) {
543 zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
544 write_sysreg_el2(zcr_el2, SYS_ZCR);
545 } else {
546 zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
547 write_sysreg_el2(zcr_el2, SYS_ZCR);
548
549 zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
550 write_sysreg_el1(zcr_el1, SYS_ZCR);
551 }
552 }
553 }
554
kvm_hyp_save_fpsimd_host(struct kvm_vcpu * vcpu)555 static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
556 {
557 /*
558 * Non-protected kvm relies on the host restoring its sve state.
559 * Protected kvm restores the host's sve state as not to reveal that
560 * fpsimd was used by a guest nor leak upper sve bits.
561 */
562 if (system_supports_sve()) {
563 __hyp_sve_save_host();
564 } else {
565 __fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
566 }
567
568 if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
569 *host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
570 }
571
572
573 /*
574 * We trap the first access to the FP/SIMD to save the host context and
575 * restore the guest context lazily.
576 * If FP/SIMD is not implemented, handle the trap and inject an undefined
577 * instruction exception to the guest. Similarly for trapped SVE accesses.
578 */
kvm_hyp_handle_fpsimd(struct kvm_vcpu * vcpu,u64 * exit_code)579 static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
580 {
581 bool sve_guest;
582 u8 esr_ec;
583
584 if (!system_supports_fpsimd())
585 return false;
586
587 sve_guest = vcpu_has_sve(vcpu);
588 esr_ec = kvm_vcpu_trap_get_class(vcpu);
589
590 /* Only handle traps the vCPU can support here: */
591 switch (esr_ec) {
592 case ESR_ELx_EC_FP_ASIMD:
593 /* Forward traps to the guest hypervisor as required */
594 if (guest_hyp_fpsimd_traps_enabled(vcpu))
595 return false;
596 break;
597 case ESR_ELx_EC_SYS64:
598 if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu)))
599 return false;
600 fallthrough;
601 case ESR_ELx_EC_SVE:
602 if (!sve_guest)
603 return false;
604 if (guest_hyp_sve_traps_enabled(vcpu))
605 return false;
606 break;
607 default:
608 return false;
609 }
610
611 /* Valid trap. Switch the context: */
612
613 /* First disable enough traps to allow us to update the registers */
614 __deactivate_cptr_traps(vcpu);
615 isb();
616
617 /* Write out the host state if it's in the registers */
618 if (is_protected_kvm_enabled() && host_owns_fp_regs())
619 kvm_hyp_save_fpsimd_host(vcpu);
620
621 /* Restore the guest state */
622 if (sve_guest)
623 __hyp_sve_restore_guest(vcpu);
624 else
625 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
626
627 if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
628 write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
629
630 /* Skip restoring fpexc32 for AArch64 guests */
631 if (!(read_sysreg(hcr_el2) & HCR_RW))
632 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
633
634 *host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
635
636 /*
637 * Re-enable traps necessary for the current state of the guest, e.g.
638 * those enabled by a guest hypervisor. The ERET to the guest will
639 * provide the necessary context synchronization.
640 */
641 __activate_cptr_traps(vcpu);
642
643 return true;
644 }
645
handle_tx2_tvm(struct kvm_vcpu * vcpu)646 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
647 {
648 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
649 int rt = kvm_vcpu_sys_get_rt(vcpu);
650 u64 val = vcpu_get_reg(vcpu, rt);
651
652 /*
653 * The normal sysreg handling code expects to see the traps,
654 * let's not do anything here.
655 */
656 if (vcpu->arch.hcr_el2 & HCR_TVM)
657 return false;
658
659 switch (sysreg) {
660 case SYS_SCTLR_EL1:
661 write_sysreg_el1(val, SYS_SCTLR);
662 break;
663 case SYS_TTBR0_EL1:
664 write_sysreg_el1(val, SYS_TTBR0);
665 break;
666 case SYS_TTBR1_EL1:
667 write_sysreg_el1(val, SYS_TTBR1);
668 break;
669 case SYS_TCR_EL1:
670 write_sysreg_el1(val, SYS_TCR);
671 break;
672 case SYS_ESR_EL1:
673 write_sysreg_el1(val, SYS_ESR);
674 break;
675 case SYS_FAR_EL1:
676 write_sysreg_el1(val, SYS_FAR);
677 break;
678 case SYS_AFSR0_EL1:
679 write_sysreg_el1(val, SYS_AFSR0);
680 break;
681 case SYS_AFSR1_EL1:
682 write_sysreg_el1(val, SYS_AFSR1);
683 break;
684 case SYS_MAIR_EL1:
685 write_sysreg_el1(val, SYS_MAIR);
686 break;
687 case SYS_AMAIR_EL1:
688 write_sysreg_el1(val, SYS_AMAIR);
689 break;
690 case SYS_CONTEXTIDR_EL1:
691 write_sysreg_el1(val, SYS_CONTEXTIDR);
692 break;
693 default:
694 return false;
695 }
696
697 __kvm_skip_instr(vcpu);
698 return true;
699 }
700
701 /* Open-coded version of timer_get_offset() to allow for kern_hyp_va() */
hyp_timer_get_offset(struct arch_timer_context * ctxt)702 static inline u64 hyp_timer_get_offset(struct arch_timer_context *ctxt)
703 {
704 u64 offset = 0;
705
706 if (ctxt->offset.vm_offset)
707 offset += *kern_hyp_va(ctxt->offset.vm_offset);
708 if (ctxt->offset.vcpu_offset)
709 offset += *kern_hyp_va(ctxt->offset.vcpu_offset);
710
711 return offset;
712 }
713
compute_counter_value(struct arch_timer_context * ctxt)714 static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
715 {
716 return arch_timer_read_cntpct_el0() - hyp_timer_get_offset(ctxt);
717 }
718
kvm_handle_cntxct(struct kvm_vcpu * vcpu)719 static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu)
720 {
721 struct arch_timer_context *ctxt;
722 u32 sysreg;
723 u64 val;
724
725 /*
726 * We only get here for 64bit guests, 32bit guests will hit
727 * the long and winding road all the way to the standard
728 * handling. Yes, it sucks to be irrelevant.
729 *
730 * Also, we only deal with non-hypervisor context here (either
731 * an EL1 guest, or a non-HYP context of an EL2 guest).
732 */
733 if (is_hyp_ctxt(vcpu))
734 return false;
735
736 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
737
738 switch (sysreg) {
739 case SYS_CNTPCT_EL0:
740 case SYS_CNTPCTSS_EL0:
741 if (vcpu_has_nv(vcpu)) {
742 /* Check for guest hypervisor trapping */
743 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
744 if (!vcpu_el2_e2h_is_set(vcpu))
745 val = (val & CNTHCTL_EL1PCTEN) << 10;
746
747 if (!(val & (CNTHCTL_EL1PCTEN << 10)))
748 return false;
749 }
750
751 ctxt = vcpu_ptimer(vcpu);
752 break;
753 case SYS_CNTVCT_EL0:
754 case SYS_CNTVCTSS_EL0:
755 if (vcpu_has_nv(vcpu)) {
756 /* Check for guest hypervisor trapping */
757 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
758
759 if (val & CNTHCTL_EL1TVCT)
760 return false;
761 }
762
763 ctxt = vcpu_vtimer(vcpu);
764 break;
765 default:
766 return false;
767 }
768
769 val = compute_counter_value(ctxt);
770
771 vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val);
772 __kvm_skip_instr(vcpu);
773 return true;
774 }
775
handle_ampere1_tcr(struct kvm_vcpu * vcpu)776 static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
777 {
778 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
779 int rt = kvm_vcpu_sys_get_rt(vcpu);
780 u64 val = vcpu_get_reg(vcpu, rt);
781
782 if (sysreg != SYS_TCR_EL1)
783 return false;
784
785 /*
786 * Affected parts do not advertise support for hardware Access Flag /
787 * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying
788 * control bits are still functional. The architecture requires these be
789 * RES0 on systems that do not implement FEAT_HAFDBS.
790 *
791 * Uphold the requirements of the architecture by masking guest writes
792 * to TCR_EL1.{HA,HD} here.
793 */
794 val &= ~(TCR_HD | TCR_HA);
795 write_sysreg_el1(val, SYS_TCR);
796 __kvm_skip_instr(vcpu);
797 return true;
798 }
799
kvm_hyp_handle_sysreg(struct kvm_vcpu * vcpu,u64 * exit_code)800 static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
801 {
802 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
803 handle_tx2_tvm(vcpu))
804 return true;
805
806 if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) &&
807 handle_ampere1_tcr(vcpu))
808 return true;
809
810 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
811 __vgic_v3_perform_cpuif_access(vcpu) == 1)
812 return true;
813
814 if (kvm_handle_cntxct(vcpu))
815 return true;
816
817 return false;
818 }
819
kvm_hyp_handle_cp15_32(struct kvm_vcpu * vcpu,u64 * exit_code)820 static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
821 {
822 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
823 __vgic_v3_perform_cpuif_access(vcpu) == 1)
824 return true;
825
826 return false;
827 }
828
kvm_hyp_handle_memory_fault(struct kvm_vcpu * vcpu,u64 * exit_code)829 static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
830 u64 *exit_code)
831 {
832 if (!__populate_fault_info(vcpu))
833 return true;
834
835 return false;
836 }
837 #define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault
838 #define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault
839
kvm_hyp_handle_dabt_low(struct kvm_vcpu * vcpu,u64 * exit_code)840 static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
841 {
842 if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
843 return true;
844
845 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
846 bool valid;
847
848 valid = kvm_vcpu_trap_is_translation_fault(vcpu) &&
849 kvm_vcpu_dabt_isvalid(vcpu) &&
850 !kvm_vcpu_abt_issea(vcpu) &&
851 !kvm_vcpu_abt_iss1tw(vcpu);
852
853 if (valid) {
854 int ret = __vgic_v2_perform_cpuif_access(vcpu);
855
856 if (ret == 1)
857 return true;
858
859 /* Promote an illegal access to an SError.*/
860 if (ret == -1)
861 *exit_code = ARM_EXCEPTION_EL1_SERROR;
862 }
863 }
864
865 return false;
866 }
867
868 typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
869
870 /*
871 * Allow the hypervisor to handle the exit with an exit handler if it has one.
872 *
873 * Returns true if the hypervisor handled the exit, and control should go back
874 * to the guest, or false if it hasn't.
875 */
kvm_hyp_handle_exit(struct kvm_vcpu * vcpu,u64 * exit_code,const exit_handler_fn * handlers)876 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
877 const exit_handler_fn *handlers)
878 {
879 exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
880 if (fn)
881 return fn(vcpu, exit_code);
882
883 return false;
884 }
885
synchronize_vcpu_pstate(struct kvm_vcpu * vcpu)886 static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu)
887 {
888 /*
889 * Check for the conditions of Cortex-A510's #2077057. When these occur
890 * SPSR_EL2 can't be trusted, but isn't needed either as it is
891 * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
892 * Are we single-stepping the guest, and took a PAC exception from the
893 * active-not-pending state?
894 */
895 if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
896 vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
897 *vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
898 ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
899 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
900
901 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
902 }
903
904 /*
905 * Return true when we were able to fixup the guest exit and should return to
906 * the guest, false when we should restore the host state and return to the
907 * main run loop.
908 */
__fixup_guest_exit(struct kvm_vcpu * vcpu,u64 * exit_code,const exit_handler_fn * handlers)909 static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
910 const exit_handler_fn *handlers)
911 {
912 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
913 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
914
915 if (ARM_SERROR_PENDING(*exit_code) &&
916 ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
917 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
918
919 /*
920 * HVC already have an adjusted PC, which we need to
921 * correct in order to return to after having injected
922 * the SError.
923 *
924 * SMC, on the other hand, is *trapped*, meaning its
925 * preferred return address is the SMC itself.
926 */
927 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
928 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
929 }
930
931 /*
932 * We're using the raw exception code in order to only process
933 * the trap if no SError is pending. We will come back to the
934 * same PC once the SError has been injected, and replay the
935 * trapping instruction.
936 */
937 if (*exit_code != ARM_EXCEPTION_TRAP)
938 goto exit;
939
940 /* Check if there's an exit handler and allow it to handle the exit. */
941 if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
942 goto guest;
943 exit:
944 /* Return to the host kernel and handle the exit */
945 return false;
946
947 guest:
948 /* Re-enter the guest */
949 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
950 return true;
951 }
952
__kvm_unexpected_el2_exception(void)953 static inline void __kvm_unexpected_el2_exception(void)
954 {
955 extern char __guest_exit_restore_elr_and_panic[];
956 unsigned long addr, fixup;
957 struct kvm_exception_table_entry *entry, *end;
958 unsigned long elr_el2 = read_sysreg(elr_el2);
959
960 entry = &__start___kvm_ex_table;
961 end = &__stop___kvm_ex_table;
962
963 while (entry < end) {
964 addr = (unsigned long)&entry->insn + entry->insn;
965 fixup = (unsigned long)&entry->fixup + entry->fixup;
966
967 if (addr != elr_el2) {
968 entry++;
969 continue;
970 }
971
972 write_sysreg(fixup, elr_el2);
973 return;
974 }
975
976 /* Trigger a panic after restoring the hyp context. */
977 this_cpu_ptr(&kvm_hyp_ctxt)->sys_regs[ELR_EL2] = elr_el2;
978 write_sysreg(__guest_exit_restore_elr_and_panic, elr_el2);
979 }
980
981 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */
982