xref: /linux/arch/arm64/kvm/reset.c (revision 172cdcaefea5c297fdb3d20b7d5aff60ae4fbce6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/reset.c
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_host.h>
14 #include <linux/kvm.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 
20 #include <kvm/arm_arch_timer.h>
21 
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/fpsimd.h>
25 #include <asm/ptrace.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_mmu.h>
30 #include <asm/virt.h>
31 
32 /* Maximum phys_shift supported for any VM on this host */
33 static u32 kvm_ipa_limit;
34 
35 /*
36  * ARMv8 Reset Values
37  */
38 #define VCPU_RESET_PSTATE_EL1	(PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
39 				 PSR_F_BIT | PSR_D_BIT)
40 
41 #define VCPU_RESET_PSTATE_SVC	(PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
42 				 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
43 
44 unsigned int kvm_sve_max_vl;
45 
46 int kvm_arm_init_sve(void)
47 {
48 	if (system_supports_sve()) {
49 		kvm_sve_max_vl = sve_max_virtualisable_vl;
50 
51 		/*
52 		 * The get_sve_reg()/set_sve_reg() ioctl interface will need
53 		 * to be extended with multiple register slice support in
54 		 * order to support vector lengths greater than
55 		 * SVE_VL_ARCH_MAX:
56 		 */
57 		if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX))
58 			kvm_sve_max_vl = SVE_VL_ARCH_MAX;
59 
60 		/*
61 		 * Don't even try to make use of vector lengths that
62 		 * aren't available on all CPUs, for now:
63 		 */
64 		if (kvm_sve_max_vl < sve_max_vl)
65 			pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
66 				kvm_sve_max_vl);
67 	}
68 
69 	return 0;
70 }
71 
72 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
73 {
74 	if (!system_supports_sve())
75 		return -EINVAL;
76 
77 	vcpu->arch.sve_max_vl = kvm_sve_max_vl;
78 
79 	/*
80 	 * Userspace can still customize the vector lengths by writing
81 	 * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
82 	 * kvm_arm_vcpu_finalize(), which freezes the configuration.
83 	 */
84 	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
85 
86 	return 0;
87 }
88 
89 /*
90  * Finalize vcpu's maximum SVE vector length, allocating
91  * vcpu->arch.sve_state as necessary.
92  */
93 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
94 {
95 	void *buf;
96 	unsigned int vl;
97 
98 	vl = vcpu->arch.sve_max_vl;
99 
100 	/*
101 	 * Responsibility for these properties is shared between
102 	 * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
103 	 * set_sve_vls().  Double-check here just to be sure:
104 	 */
105 	if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl ||
106 		    vl > SVE_VL_ARCH_MAX))
107 		return -EIO;
108 
109 	buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL);
110 	if (!buf)
111 		return -ENOMEM;
112 
113 	vcpu->arch.sve_state = buf;
114 	vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
115 	return 0;
116 }
117 
118 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
119 {
120 	switch (feature) {
121 	case KVM_ARM_VCPU_SVE:
122 		if (!vcpu_has_sve(vcpu))
123 			return -EINVAL;
124 
125 		if (kvm_arm_vcpu_sve_finalized(vcpu))
126 			return -EPERM;
127 
128 		return kvm_vcpu_finalize_sve(vcpu);
129 	}
130 
131 	return -EINVAL;
132 }
133 
134 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
135 {
136 	if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
137 		return false;
138 
139 	return true;
140 }
141 
142 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
143 {
144 	kfree(vcpu->arch.sve_state);
145 }
146 
147 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
148 {
149 	if (vcpu_has_sve(vcpu))
150 		memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
151 }
152 
153 static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
154 {
155 	/*
156 	 * For now make sure that both address/generic pointer authentication
157 	 * features are requested by the userspace together and the system
158 	 * supports these capabilities.
159 	 */
160 	if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
161 	    !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
162 	    !system_has_full_ptr_auth())
163 		return -EINVAL;
164 
165 	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
166 	return 0;
167 }
168 
169 /**
170  * kvm_reset_vcpu - sets core registers and sys_regs to reset value
171  * @vcpu: The VCPU pointer
172  *
173  * This function finds the right table above and sets the registers on
174  * the virtual CPU struct to their architecturally defined reset
175  * values, except for registers whose reset is deferred until
176  * kvm_arm_vcpu_finalize().
177  *
178  * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
179  * ioctl or as part of handling a request issued by another VCPU in the PSCI
180  * handling code.  In the first case, the VCPU will not be loaded, and in the
181  * second case the VCPU will be loaded.  Because this function operates purely
182  * on the memory-backed values of system registers, we want to do a full put if
183  * we were loaded (handling a request) and load the values back at the end of
184  * the function.  Otherwise we leave the state alone.  In both cases, we
185  * disable preemption around the vcpu reset as we would otherwise race with
186  * preempt notifiers which also call put/load.
187  */
188 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
189 {
190 	int ret;
191 	bool loaded;
192 	u32 pstate;
193 
194 	/* Reset PMU outside of the non-preemptible section */
195 	kvm_pmu_vcpu_reset(vcpu);
196 
197 	preempt_disable();
198 	loaded = (vcpu->cpu != -1);
199 	if (loaded)
200 		kvm_arch_vcpu_put(vcpu);
201 
202 	if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
203 		if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
204 			ret = kvm_vcpu_enable_sve(vcpu);
205 			if (ret)
206 				goto out;
207 		}
208 	} else {
209 		kvm_vcpu_reset_sve(vcpu);
210 	}
211 
212 	if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
213 	    test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
214 		if (kvm_vcpu_enable_ptrauth(vcpu)) {
215 			ret = -EINVAL;
216 			goto out;
217 		}
218 	}
219 
220 	switch (vcpu->arch.target) {
221 	default:
222 		if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
223 			if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
224 				ret = -EINVAL;
225 				goto out;
226 			}
227 			pstate = VCPU_RESET_PSTATE_SVC;
228 		} else {
229 			pstate = VCPU_RESET_PSTATE_EL1;
230 		}
231 
232 		if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
233 			ret = -EINVAL;
234 			goto out;
235 		}
236 		break;
237 	}
238 
239 	/* Reset core registers */
240 	memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
241 	memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
242 	vcpu->arch.ctxt.spsr_abt = 0;
243 	vcpu->arch.ctxt.spsr_und = 0;
244 	vcpu->arch.ctxt.spsr_irq = 0;
245 	vcpu->arch.ctxt.spsr_fiq = 0;
246 	vcpu_gp_regs(vcpu)->pstate = pstate;
247 
248 	/* Reset system registers */
249 	kvm_reset_sys_regs(vcpu);
250 
251 	/*
252 	 * Additional reset state handling that PSCI may have imposed on us.
253 	 * Must be done after all the sys_reg reset.
254 	 */
255 	if (vcpu->arch.reset_state.reset) {
256 		unsigned long target_pc = vcpu->arch.reset_state.pc;
257 
258 		/* Gracefully handle Thumb2 entry point */
259 		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
260 			target_pc &= ~1UL;
261 			vcpu_set_thumb(vcpu);
262 		}
263 
264 		/* Propagate caller endianness */
265 		if (vcpu->arch.reset_state.be)
266 			kvm_vcpu_set_be(vcpu);
267 
268 		*vcpu_pc(vcpu) = target_pc;
269 		vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
270 
271 		vcpu->arch.reset_state.reset = false;
272 	}
273 
274 	/* Reset timer */
275 	ret = kvm_timer_vcpu_reset(vcpu);
276 out:
277 	if (loaded)
278 		kvm_arch_vcpu_load(vcpu, smp_processor_id());
279 	preempt_enable();
280 	return ret;
281 }
282 
283 u32 get_kvm_ipa_limit(void)
284 {
285 	return kvm_ipa_limit;
286 }
287 
288 int kvm_set_ipa_limit(void)
289 {
290 	unsigned int parange, tgran_2;
291 	u64 mmfr0;
292 
293 	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
294 	parange = cpuid_feature_extract_unsigned_field(mmfr0,
295 				ID_AA64MMFR0_PARANGE_SHIFT);
296 
297 	/*
298 	 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
299 	 * Stage-2. If not, things will stop very quickly.
300 	 */
301 	switch (PAGE_SIZE) {
302 	default:
303 	case SZ_4K:
304 		tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT;
305 		break;
306 	case SZ_16K:
307 		tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT;
308 		break;
309 	case SZ_64K:
310 		tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT;
311 		break;
312 	}
313 
314 	switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
315 	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
316 		kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
317 		return -EINVAL;
318 	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
319 		kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
320 		break;
321 	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
322 		kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
323 		break;
324 	default:
325 		kvm_err("Unsupported value for TGRAN_2, giving up\n");
326 		return -EINVAL;
327 	}
328 
329 	kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
330 	kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
331 		 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
332 		  " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
333 
334 	return 0;
335 }
336 
337 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
338 {
339 	u64 mmfr0, mmfr1;
340 	u32 phys_shift;
341 
342 	if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
343 		return -EINVAL;
344 
345 	phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
346 	if (phys_shift) {
347 		if (phys_shift > kvm_ipa_limit ||
348 		    phys_shift < 32)
349 			return -EINVAL;
350 	} else {
351 		phys_shift = KVM_PHYS_SHIFT;
352 		if (phys_shift > kvm_ipa_limit) {
353 			pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
354 				     current->comm);
355 			return -EINVAL;
356 		}
357 	}
358 
359 	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
360 	mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
361 	kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
362 
363 	return 0;
364 }
365