xref: /linux/arch/arm64/kvm/reset.c (revision 6179d4a213006491ff0d50073256f21fad22149b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/reset.c
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_host.h>
14 #include <linux/kvm.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 
20 #include <kvm/arm_arch_timer.h>
21 
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/fpsimd.h>
25 #include <asm/ptrace.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_mmu.h>
30 #include <asm/kvm_nested.h>
31 #include <asm/virt.h>
32 
33 /* Maximum phys_shift supported for any VM on this host */
34 static u32 __ro_after_init kvm_ipa_limit;
35 
36 /*
37  * ARMv8 Reset Values
38  */
39 #define VCPU_RESET_PSTATE_EL1	(PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
40 				 PSR_F_BIT | PSR_D_BIT)
41 
42 #define VCPU_RESET_PSTATE_EL2	(PSR_MODE_EL2h | PSR_A_BIT | PSR_I_BIT | \
43 				 PSR_F_BIT | PSR_D_BIT)
44 
45 #define VCPU_RESET_PSTATE_SVC	(PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
46 				 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
47 
48 unsigned int __ro_after_init kvm_sve_max_vl;
49 
50 int __init kvm_arm_init_sve(void)
51 {
52 	if (system_supports_sve()) {
53 		kvm_sve_max_vl = sve_max_virtualisable_vl();
54 
55 		/*
56 		 * The get_sve_reg()/set_sve_reg() ioctl interface will need
57 		 * to be extended with multiple register slice support in
58 		 * order to support vector lengths greater than
59 		 * VL_ARCH_MAX:
60 		 */
61 		if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
62 			kvm_sve_max_vl = VL_ARCH_MAX;
63 
64 		/*
65 		 * Don't even try to make use of vector lengths that
66 		 * aren't available on all CPUs, for now:
67 		 */
68 		if (kvm_sve_max_vl < sve_max_vl())
69 			pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
70 				kvm_sve_max_vl);
71 	}
72 
73 	return 0;
74 }
75 
76 static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
77 {
78 	vcpu->arch.sve_max_vl = kvm_sve_max_vl;
79 
80 	/*
81 	 * Userspace can still customize the vector lengths by writing
82 	 * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
83 	 * kvm_arm_vcpu_finalize(), which freezes the configuration.
84 	 */
85 	vcpu_set_flag(vcpu, GUEST_HAS_SVE);
86 }
87 
88 /*
89  * Finalize vcpu's maximum SVE vector length, allocating
90  * vcpu->arch.sve_state as necessary.
91  */
92 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
93 {
94 	void *buf;
95 	unsigned int vl;
96 	size_t reg_sz;
97 	int ret;
98 
99 	vl = vcpu->arch.sve_max_vl;
100 
101 	/*
102 	 * Responsibility for these properties is shared between
103 	 * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
104 	 * set_sve_vls().  Double-check here just to be sure:
105 	 */
106 	if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
107 		    vl > VL_ARCH_MAX))
108 		return -EIO;
109 
110 	reg_sz = vcpu_sve_state_size(vcpu);
111 	buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
112 	if (!buf)
113 		return -ENOMEM;
114 
115 	ret = kvm_share_hyp(buf, buf + reg_sz);
116 	if (ret) {
117 		kfree(buf);
118 		return ret;
119 	}
120 
121 	vcpu->arch.sve_state = buf;
122 	vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
123 	return 0;
124 }
125 
126 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
127 {
128 	switch (feature) {
129 	case KVM_ARM_VCPU_SVE:
130 		if (!vcpu_has_sve(vcpu))
131 			return -EINVAL;
132 
133 		if (kvm_arm_vcpu_sve_finalized(vcpu))
134 			return -EPERM;
135 
136 		return kvm_vcpu_finalize_sve(vcpu);
137 	}
138 
139 	return -EINVAL;
140 }
141 
142 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
143 {
144 	if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
145 		return false;
146 
147 	return true;
148 }
149 
150 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
151 {
152 	void *sve_state = vcpu->arch.sve_state;
153 
154 	kvm_vcpu_unshare_task_fp(vcpu);
155 	kvm_unshare_hyp(vcpu, vcpu + 1);
156 	if (sve_state)
157 		kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
158 	kfree(sve_state);
159 	kfree(vcpu->arch.ccsidr);
160 }
161 
162 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
163 {
164 	if (vcpu_has_sve(vcpu))
165 		memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
166 }
167 
168 static void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
169 {
170 	vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
171 }
172 
173 /**
174  * kvm_reset_vcpu - sets core registers and sys_regs to reset value
175  * @vcpu: The VCPU pointer
176  *
177  * This function sets the registers on the virtual CPU struct to their
178  * architecturally defined reset values, except for registers whose reset is
179  * deferred until kvm_arm_vcpu_finalize().
180  *
181  * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
182  * ioctl or as part of handling a request issued by another VCPU in the PSCI
183  * handling code.  In the first case, the VCPU will not be loaded, and in the
184  * second case the VCPU will be loaded.  Because this function operates purely
185  * on the memory-backed values of system registers, we want to do a full put if
186  * we were loaded (handling a request) and load the values back at the end of
187  * the function.  Otherwise we leave the state alone.  In both cases, we
188  * disable preemption around the vcpu reset as we would otherwise race with
189  * preempt notifiers which also call put/load.
190  */
191 void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
192 {
193 	struct vcpu_reset_state reset_state;
194 	bool loaded;
195 	u32 pstate;
196 
197 	spin_lock(&vcpu->arch.mp_state_lock);
198 	reset_state = vcpu->arch.reset_state;
199 	vcpu->arch.reset_state.reset = false;
200 	spin_unlock(&vcpu->arch.mp_state_lock);
201 
202 	/* Reset PMU outside of the non-preemptible section */
203 	kvm_pmu_vcpu_reset(vcpu);
204 
205 	preempt_disable();
206 	loaded = (vcpu->cpu != -1);
207 	if (loaded)
208 		kvm_arch_vcpu_put(vcpu);
209 
210 	if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
211 		if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
212 			kvm_vcpu_enable_sve(vcpu);
213 	} else {
214 		kvm_vcpu_reset_sve(vcpu);
215 	}
216 
217 	if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
218 	    vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))
219 		kvm_vcpu_enable_ptrauth(vcpu);
220 
221 	if (vcpu_el1_is_32bit(vcpu))
222 		pstate = VCPU_RESET_PSTATE_SVC;
223 	else if (vcpu_has_nv(vcpu))
224 		pstate = VCPU_RESET_PSTATE_EL2;
225 	else
226 		pstate = VCPU_RESET_PSTATE_EL1;
227 
228 	/* Reset core registers */
229 	memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
230 	memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
231 	vcpu->arch.ctxt.spsr_abt = 0;
232 	vcpu->arch.ctxt.spsr_und = 0;
233 	vcpu->arch.ctxt.spsr_irq = 0;
234 	vcpu->arch.ctxt.spsr_fiq = 0;
235 	vcpu_gp_regs(vcpu)->pstate = pstate;
236 
237 	/* Reset system registers */
238 	kvm_reset_sys_regs(vcpu);
239 
240 	/*
241 	 * Additional reset state handling that PSCI may have imposed on us.
242 	 * Must be done after all the sys_reg reset.
243 	 */
244 	if (reset_state.reset) {
245 		unsigned long target_pc = reset_state.pc;
246 
247 		/* Gracefully handle Thumb2 entry point */
248 		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
249 			target_pc &= ~1UL;
250 			vcpu_set_thumb(vcpu);
251 		}
252 
253 		/* Propagate caller endianness */
254 		if (reset_state.be)
255 			kvm_vcpu_set_be(vcpu);
256 
257 		*vcpu_pc(vcpu) = target_pc;
258 		vcpu_set_reg(vcpu, 0, reset_state.r0);
259 	}
260 
261 	/* Reset timer */
262 	kvm_timer_vcpu_reset(vcpu);
263 
264 	if (loaded)
265 		kvm_arch_vcpu_load(vcpu, smp_processor_id());
266 	preempt_enable();
267 }
268 
269 u32 get_kvm_ipa_limit(void)
270 {
271 	return kvm_ipa_limit;
272 }
273 
274 int __init kvm_set_ipa_limit(void)
275 {
276 	unsigned int parange;
277 	u64 mmfr0;
278 
279 	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
280 	parange = cpuid_feature_extract_unsigned_field(mmfr0,
281 				ID_AA64MMFR0_EL1_PARANGE_SHIFT);
282 	/*
283 	 * IPA size beyond 48 bits for 4K and 16K page size is only supported
284 	 * when LPA2 is available. So if we have LPA2, enable it, else cap to 48
285 	 * bits, in case it's reported as larger on the system.
286 	 */
287 	if (!kvm_lpa2_is_enabled() && PAGE_SIZE != SZ_64K)
288 		parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
289 
290 	/*
291 	 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
292 	 * Stage-2. If not, things will stop very quickly.
293 	 */
294 	switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
295 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
296 		kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
297 		return -EINVAL;
298 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
299 		kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
300 		break;
301 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
302 		kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
303 		break;
304 	default:
305 		kvm_err("Unsupported value for TGRAN_2, giving up\n");
306 		return -EINVAL;
307 	}
308 
309 	kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
310 	kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
311 		 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
312 		  " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
313 
314 	return 0;
315 }
316