1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <hyp/sysreg-sr.h>
8
9 #include <linux/compiler.h>
10 #include <linux/kvm_host.h>
11
12 #include <asm/kprobes.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_hyp.h>
16 #include <asm/kvm_nested.h>
17
__sysreg_save_vel2_state(struct kvm_vcpu * vcpu)18 static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
19 {
20 /* These registers are common with EL1 */
21 __vcpu_assign_sys_reg(vcpu, PAR_EL1, read_sysreg(par_el1));
22 __vcpu_assign_sys_reg(vcpu, TPIDR_EL1, read_sysreg(tpidr_el1));
23
24 __vcpu_assign_sys_reg(vcpu, ESR_EL2, read_sysreg_el1(SYS_ESR));
25 __vcpu_assign_sys_reg(vcpu, AFSR0_EL2, read_sysreg_el1(SYS_AFSR0));
26 __vcpu_assign_sys_reg(vcpu, AFSR1_EL2, read_sysreg_el1(SYS_AFSR1));
27 __vcpu_assign_sys_reg(vcpu, FAR_EL2, read_sysreg_el1(SYS_FAR));
28 __vcpu_assign_sys_reg(vcpu, MAIR_EL2, read_sysreg_el1(SYS_MAIR));
29 __vcpu_assign_sys_reg(vcpu, VBAR_EL2, read_sysreg_el1(SYS_VBAR));
30 __vcpu_assign_sys_reg(vcpu, CONTEXTIDR_EL2, read_sysreg_el1(SYS_CONTEXTIDR));
31 __vcpu_assign_sys_reg(vcpu, AMAIR_EL2, read_sysreg_el1(SYS_AMAIR));
32
33 /*
34 * In VHE mode those registers are compatible between EL1 and EL2,
35 * and the guest uses the _EL1 versions on the CPU naturally.
36 * So we save them into their _EL2 versions here.
37 * For nVHE mode we trap accesses to those registers, so our
38 * _EL2 copy in sys_regs[] is always up-to-date and we don't need
39 * to save anything here.
40 */
41 if (vcpu_el2_e2h_is_set(vcpu)) {
42 u64 val;
43
44 /*
45 * We don't save CPTR_EL2, as accesses to CPACR_EL1
46 * are always trapped, ensuring that the in-memory
47 * copy is always up-to-date. A small blessing...
48 */
49 __vcpu_assign_sys_reg(vcpu, SCTLR_EL2, read_sysreg_el1(SYS_SCTLR));
50 __vcpu_assign_sys_reg(vcpu, TTBR0_EL2, read_sysreg_el1(SYS_TTBR0));
51 __vcpu_assign_sys_reg(vcpu, TTBR1_EL2, read_sysreg_el1(SYS_TTBR1));
52 __vcpu_assign_sys_reg(vcpu, TCR_EL2, read_sysreg_el1(SYS_TCR));
53
54 if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
55 __vcpu_assign_sys_reg(vcpu, TCR2_EL2, read_sysreg_el1(SYS_TCR2));
56
57 if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
58 __vcpu_assign_sys_reg(vcpu, PIRE0_EL2, read_sysreg_el1(SYS_PIRE0));
59 __vcpu_assign_sys_reg(vcpu, PIR_EL2, read_sysreg_el1(SYS_PIR));
60 }
61
62 if (ctxt_has_s1poe(&vcpu->arch.ctxt))
63 __vcpu_assign_sys_reg(vcpu, POR_EL2, read_sysreg_el1(SYS_POR));
64 }
65
66 /*
67 * The EL1 view of CNTKCTL_EL1 has a bunch of RES0 bits where
68 * the interesting CNTHCTL_EL2 bits live. So preserve these
69 * bits when reading back the guest-visible value.
70 */
71 val = read_sysreg_el1(SYS_CNTKCTL);
72 val &= CNTKCTL_VALID_BITS;
73 __vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS);
74 __vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val);
75 }
76
77 __vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
78 __vcpu_assign_sys_reg(vcpu, ELR_EL2, read_sysreg_el1(SYS_ELR));
79 __vcpu_assign_sys_reg(vcpu, SPSR_EL2, read_sysreg_el1(SYS_SPSR));
80
81 if (ctxt_has_sctlr2(&vcpu->arch.ctxt))
82 __vcpu_assign_sys_reg(vcpu, SCTLR2_EL2, read_sysreg_el1(SYS_SCTLR2));
83 }
84
__sysreg_restore_vel2_state(struct kvm_vcpu * vcpu)85 static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
86 {
87 u64 val;
88
89 /* These registers are common with EL1 */
90 write_sysreg(__vcpu_sys_reg(vcpu, PAR_EL1), par_el1);
91 write_sysreg(__vcpu_sys_reg(vcpu, TPIDR_EL1), tpidr_el1);
92
93 write_sysreg(ctxt_midr_el1(&vcpu->arch.ctxt), vpidr_el2);
94 write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2);
95 write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR);
96 write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR);
97 write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR);
98 write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR);
99
100 if (vcpu_el2_e2h_is_set(vcpu)) {
101 /*
102 * In VHE mode those registers are compatible between
103 * EL1 and EL2.
104 */
105 write_sysreg_el1(__vcpu_sys_reg(vcpu, SCTLR_EL2), SYS_SCTLR);
106 write_sysreg_el1(__vcpu_sys_reg(vcpu, CPTR_EL2), SYS_CPACR);
107 write_sysreg_el1(__vcpu_sys_reg(vcpu, TTBR0_EL2), SYS_TTBR0);
108 write_sysreg_el1(__vcpu_sys_reg(vcpu, TTBR1_EL2), SYS_TTBR1);
109 write_sysreg_el1(__vcpu_sys_reg(vcpu, TCR_EL2), SYS_TCR);
110 write_sysreg_el1(__vcpu_sys_reg(vcpu, CNTHCTL_EL2), SYS_CNTKCTL);
111 } else {
112 /*
113 * CNTHCTL_EL2 only affects EL1 when running nVHE, so
114 * no need to restore it.
115 */
116 val = translate_sctlr_el2_to_sctlr_el1(__vcpu_sys_reg(vcpu, SCTLR_EL2));
117 write_sysreg_el1(val, SYS_SCTLR);
118 val = translate_cptr_el2_to_cpacr_el1(__vcpu_sys_reg(vcpu, CPTR_EL2));
119 write_sysreg_el1(val, SYS_CPACR);
120 val = translate_ttbr0_el2_to_ttbr0_el1(__vcpu_sys_reg(vcpu, TTBR0_EL2));
121 write_sysreg_el1(val, SYS_TTBR0);
122 val = translate_tcr_el2_to_tcr_el1(__vcpu_sys_reg(vcpu, TCR_EL2));
123 write_sysreg_el1(val, SYS_TCR);
124 }
125
126 if (ctxt_has_tcrx(&vcpu->arch.ctxt)) {
127 write_sysreg_el1(__vcpu_sys_reg(vcpu, TCR2_EL2), SYS_TCR2);
128
129 if (ctxt_has_s1pie(&vcpu->arch.ctxt)) {
130 write_sysreg_el1(__vcpu_sys_reg(vcpu, PIR_EL2), SYS_PIR);
131 write_sysreg_el1(__vcpu_sys_reg(vcpu, PIRE0_EL2), SYS_PIRE0);
132 }
133
134 if (ctxt_has_s1poe(&vcpu->arch.ctxt))
135 write_sysreg_el1(__vcpu_sys_reg(vcpu, POR_EL2), SYS_POR);
136 }
137
138 write_sysreg_el1(__vcpu_sys_reg(vcpu, ESR_EL2), SYS_ESR);
139 write_sysreg_el1(__vcpu_sys_reg(vcpu, AFSR0_EL2), SYS_AFSR0);
140 write_sysreg_el1(__vcpu_sys_reg(vcpu, AFSR1_EL2), SYS_AFSR1);
141 write_sysreg_el1(__vcpu_sys_reg(vcpu, FAR_EL2), SYS_FAR);
142 write_sysreg(__vcpu_sys_reg(vcpu, SP_EL2), sp_el1);
143 write_sysreg_el1(__vcpu_sys_reg(vcpu, ELR_EL2), SYS_ELR);
144 write_sysreg_el1(__vcpu_sys_reg(vcpu, SPSR_EL2), SYS_SPSR);
145
146 if (ctxt_has_sctlr2(&vcpu->arch.ctxt))
147 write_sysreg_el1(__vcpu_sys_reg(vcpu, SCTLR2_EL2), SYS_SCTLR2);
148 }
149
150 /*
151 * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
152 * pstate, which are handled as part of the el2 return state) on every
153 * switch (sp_el0 is being dealt with in the assembly code).
154 * tpidr_el0 and tpidrro_el0 only need to be switched when going
155 * to host userspace or a different VCPU. EL1 registers only need to be
156 * switched when potentially going to run a different VCPU. The latter two
157 * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
158 */
159
sysreg_save_host_state_vhe(struct kvm_cpu_context * ctxt)160 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
161 {
162 __sysreg_save_common_state(ctxt);
163 }
164 NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
165
sysreg_save_guest_state_vhe(struct kvm_cpu_context * ctxt)166 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
167 {
168 __sysreg_save_common_state(ctxt);
169 __sysreg_save_el2_return_state(ctxt);
170 }
171 NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
172
sysreg_restore_host_state_vhe(struct kvm_cpu_context * ctxt)173 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
174 {
175 __sysreg_restore_common_state(ctxt);
176 }
177 NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
178
sysreg_restore_guest_state_vhe(struct kvm_cpu_context * ctxt)179 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
180 {
181 __sysreg_restore_common_state(ctxt);
182 __sysreg_restore_el2_return_state(ctxt);
183 }
184 NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
185
186 /**
187 * __vcpu_load_switch_sysregs - Load guest system registers to the physical CPU
188 *
189 * @vcpu: The VCPU pointer
190 *
191 * Load system registers that do not affect the host's execution, for
192 * example EL1 system registers on a VHE system where the host kernel
193 * runs at EL2. This function is called from KVM's vcpu_load() function
194 * and loading system register state early avoids having to load them on
195 * every entry to the VM.
196 */
__vcpu_load_switch_sysregs(struct kvm_vcpu * vcpu)197 void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
198 {
199 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
200 struct kvm_cpu_context *host_ctxt;
201 u64 midr, mpidr;
202
203 host_ctxt = host_data_ptr(host_ctxt);
204 __sysreg_save_user_state(host_ctxt);
205
206 /*
207 * When running a normal EL1 guest, we only load a new vcpu
208 * after a context switch, which imvolves a DSB, so all
209 * speculative EL1&0 walks will have already completed.
210 * If running NV, the vcpu may transition between vEL1 and
211 * vEL2 without a context switch, so make sure we complete
212 * those walks before loading a new context.
213 */
214 if (vcpu_has_nv(vcpu))
215 dsb(nsh);
216
217 /*
218 * Load guest EL1 and user state
219 *
220 * We must restore the 32-bit state before the sysregs, thanks
221 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
222 */
223 __sysreg32_restore_state(vcpu);
224 __sysreg_restore_user_state(guest_ctxt);
225
226 if (unlikely(is_hyp_ctxt(vcpu))) {
227 __sysreg_restore_vel2_state(vcpu);
228 } else {
229 if (vcpu_has_nv(vcpu)) {
230 /*
231 * As we're restoring a nested guest, set the value
232 * provided by the guest hypervisor.
233 */
234 midr = ctxt_sys_reg(guest_ctxt, VPIDR_EL2);
235 mpidr = ctxt_sys_reg(guest_ctxt, VMPIDR_EL2);
236 } else {
237 midr = ctxt_midr_el1(guest_ctxt);
238 mpidr = ctxt_sys_reg(guest_ctxt, MPIDR_EL1);
239 }
240
241 __sysreg_restore_el1_state(guest_ctxt, midr, mpidr);
242 }
243
244 vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
245 }
246
247 /**
248 * __vcpu_put_switch_sysregs - Restore host system registers to the physical CPU
249 *
250 * @vcpu: The VCPU pointer
251 *
252 * Save guest system registers that do not affect the host's execution, for
253 * example EL1 system registers on a VHE system where the host kernel
254 * runs at EL2. This function is called from KVM's vcpu_put() function
255 * and deferring saving system register state until we're no longer running the
256 * VCPU avoids having to save them on every exit from the VM.
257 */
__vcpu_put_switch_sysregs(struct kvm_vcpu * vcpu)258 void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
259 {
260 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
261 struct kvm_cpu_context *host_ctxt;
262
263 host_ctxt = host_data_ptr(host_ctxt);
264
265 if (unlikely(is_hyp_ctxt(vcpu)))
266 __sysreg_save_vel2_state(vcpu);
267 else
268 __sysreg_save_el1_state(guest_ctxt);
269
270 __sysreg_save_user_state(guest_ctxt);
271 __sysreg32_save_state(vcpu);
272
273 /* Restore host user state */
274 __sysreg_restore_user_state(host_ctxt);
275
276 vcpu_clear_flag(vcpu, SYSREGS_ON_CPU);
277 }
278