xref: /linux/arch/arm64/kvm/reset.c (revision e724e7aaf9ca794670a4d4931af7a7e24e37fec3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/reset.c
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_host.h>
14 #include <linux/kvm.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 
20 #include <kvm/arm_arch_timer.h>
21 
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/fpsimd.h>
25 #include <asm/ptrace.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_mmu.h>
30 #include <asm/kvm_nested.h>
31 #include <asm/virt.h>
32 
33 /* Maximum phys_shift supported for any VM on this host */
34 static u32 __ro_after_init kvm_ipa_limit;
35 
36 /*
37  * ARMv8 Reset Values
38  */
39 #define VCPU_RESET_PSTATE_EL1	(PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
40 				 PSR_F_BIT | PSR_D_BIT)
41 
42 #define VCPU_RESET_PSTATE_EL2	(PSR_MODE_EL2h | PSR_A_BIT | PSR_I_BIT | \
43 				 PSR_F_BIT | PSR_D_BIT)
44 
45 #define VCPU_RESET_PSTATE_SVC	(PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
46 				 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
47 
48 unsigned int __ro_after_init kvm_sve_max_vl;
49 
50 int __init kvm_arm_init_sve(void)
51 {
52 	if (system_supports_sve()) {
53 		kvm_sve_max_vl = sve_max_virtualisable_vl();
54 
55 		/*
56 		 * The get_sve_reg()/set_sve_reg() ioctl interface will need
57 		 * to be extended with multiple register slice support in
58 		 * order to support vector lengths greater than
59 		 * VL_ARCH_MAX:
60 		 */
61 		if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
62 			kvm_sve_max_vl = VL_ARCH_MAX;
63 
64 		/*
65 		 * Don't even try to make use of vector lengths that
66 		 * aren't available on all CPUs, for now:
67 		 */
68 		if (kvm_sve_max_vl < sve_max_vl())
69 			pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
70 				kvm_sve_max_vl);
71 	}
72 
73 	return 0;
74 }
75 
76 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
77 {
78 	if (!system_supports_sve())
79 		return -EINVAL;
80 
81 	vcpu->arch.sve_max_vl = kvm_sve_max_vl;
82 
83 	/*
84 	 * Userspace can still customize the vector lengths by writing
85 	 * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
86 	 * kvm_arm_vcpu_finalize(), which freezes the configuration.
87 	 */
88 	vcpu_set_flag(vcpu, GUEST_HAS_SVE);
89 
90 	return 0;
91 }
92 
93 /*
94  * Finalize vcpu's maximum SVE vector length, allocating
95  * vcpu->arch.sve_state as necessary.
96  */
97 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
98 {
99 	void *buf;
100 	unsigned int vl;
101 	size_t reg_sz;
102 	int ret;
103 
104 	vl = vcpu->arch.sve_max_vl;
105 
106 	/*
107 	 * Responsibility for these properties is shared between
108 	 * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
109 	 * set_sve_vls().  Double-check here just to be sure:
110 	 */
111 	if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
112 		    vl > VL_ARCH_MAX))
113 		return -EIO;
114 
115 	reg_sz = vcpu_sve_state_size(vcpu);
116 	buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
117 	if (!buf)
118 		return -ENOMEM;
119 
120 	ret = kvm_share_hyp(buf, buf + reg_sz);
121 	if (ret) {
122 		kfree(buf);
123 		return ret;
124 	}
125 
126 	vcpu->arch.sve_state = buf;
127 	vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
128 	return 0;
129 }
130 
131 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
132 {
133 	switch (feature) {
134 	case KVM_ARM_VCPU_SVE:
135 		if (!vcpu_has_sve(vcpu))
136 			return -EINVAL;
137 
138 		if (kvm_arm_vcpu_sve_finalized(vcpu))
139 			return -EPERM;
140 
141 		return kvm_vcpu_finalize_sve(vcpu);
142 	}
143 
144 	return -EINVAL;
145 }
146 
147 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
148 {
149 	if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
150 		return false;
151 
152 	return true;
153 }
154 
155 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
156 {
157 	void *sve_state = vcpu->arch.sve_state;
158 
159 	kvm_vcpu_unshare_task_fp(vcpu);
160 	kvm_unshare_hyp(vcpu, vcpu + 1);
161 	if (sve_state)
162 		kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
163 	kfree(sve_state);
164 	kfree(vcpu->arch.ccsidr);
165 }
166 
167 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
168 {
169 	if (vcpu_has_sve(vcpu))
170 		memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
171 }
172 
173 static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
174 {
175 	/*
176 	 * For now make sure that both address/generic pointer authentication
177 	 * features are requested by the userspace together and the system
178 	 * supports these capabilities.
179 	 */
180 	if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
181 	    !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
182 	    !system_has_full_ptr_auth())
183 		return -EINVAL;
184 
185 	vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
186 	return 0;
187 }
188 
189 /**
190  * kvm_reset_vcpu - sets core registers and sys_regs to reset value
191  * @vcpu: The VCPU pointer
192  *
193  * This function sets the registers on the virtual CPU struct to their
194  * architecturally defined reset values, except for registers whose reset is
195  * deferred until kvm_arm_vcpu_finalize().
196  *
197  * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
198  * ioctl or as part of handling a request issued by another VCPU in the PSCI
199  * handling code.  In the first case, the VCPU will not be loaded, and in the
200  * second case the VCPU will be loaded.  Because this function operates purely
201  * on the memory-backed values of system registers, we want to do a full put if
202  * we were loaded (handling a request) and load the values back at the end of
203  * the function.  Otherwise we leave the state alone.  In both cases, we
204  * disable preemption around the vcpu reset as we would otherwise race with
205  * preempt notifiers which also call put/load.
206  */
207 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
208 {
209 	struct vcpu_reset_state reset_state;
210 	int ret;
211 	bool loaded;
212 	u32 pstate;
213 
214 	spin_lock(&vcpu->arch.mp_state_lock);
215 	reset_state = vcpu->arch.reset_state;
216 	vcpu->arch.reset_state.reset = false;
217 	spin_unlock(&vcpu->arch.mp_state_lock);
218 
219 	/* Reset PMU outside of the non-preemptible section */
220 	kvm_pmu_vcpu_reset(vcpu);
221 
222 	preempt_disable();
223 	loaded = (vcpu->cpu != -1);
224 	if (loaded)
225 		kvm_arch_vcpu_put(vcpu);
226 
227 	/* Disallow NV+SVE for the time being */
228 	if (vcpu_has_nv(vcpu) && vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
229 		ret = -EINVAL;
230 		goto out;
231 	}
232 
233 	if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
234 		if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
235 			ret = kvm_vcpu_enable_sve(vcpu);
236 			if (ret)
237 				goto out;
238 		}
239 	} else {
240 		kvm_vcpu_reset_sve(vcpu);
241 	}
242 
243 	if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
244 	    test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
245 		if (kvm_vcpu_enable_ptrauth(vcpu)) {
246 			ret = -EINVAL;
247 			goto out;
248 		}
249 	}
250 
251 	switch (vcpu->arch.target) {
252 	default:
253 		if (vcpu_el1_is_32bit(vcpu)) {
254 			pstate = VCPU_RESET_PSTATE_SVC;
255 		} else if (vcpu_has_nv(vcpu)) {
256 			pstate = VCPU_RESET_PSTATE_EL2;
257 		} else {
258 			pstate = VCPU_RESET_PSTATE_EL1;
259 		}
260 
261 		if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
262 			ret = -EINVAL;
263 			goto out;
264 		}
265 		break;
266 	}
267 
268 	/* Reset core registers */
269 	memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
270 	memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
271 	vcpu->arch.ctxt.spsr_abt = 0;
272 	vcpu->arch.ctxt.spsr_und = 0;
273 	vcpu->arch.ctxt.spsr_irq = 0;
274 	vcpu->arch.ctxt.spsr_fiq = 0;
275 	vcpu_gp_regs(vcpu)->pstate = pstate;
276 
277 	/* Reset system registers */
278 	kvm_reset_sys_regs(vcpu);
279 
280 	/*
281 	 * Additional reset state handling that PSCI may have imposed on us.
282 	 * Must be done after all the sys_reg reset.
283 	 */
284 	if (reset_state.reset) {
285 		unsigned long target_pc = reset_state.pc;
286 
287 		/* Gracefully handle Thumb2 entry point */
288 		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
289 			target_pc &= ~1UL;
290 			vcpu_set_thumb(vcpu);
291 		}
292 
293 		/* Propagate caller endianness */
294 		if (reset_state.be)
295 			kvm_vcpu_set_be(vcpu);
296 
297 		*vcpu_pc(vcpu) = target_pc;
298 		vcpu_set_reg(vcpu, 0, reset_state.r0);
299 	}
300 
301 	/* Reset timer */
302 	ret = kvm_timer_vcpu_reset(vcpu);
303 out:
304 	if (loaded)
305 		kvm_arch_vcpu_load(vcpu, smp_processor_id());
306 	preempt_enable();
307 	return ret;
308 }
309 
310 u32 get_kvm_ipa_limit(void)
311 {
312 	return kvm_ipa_limit;
313 }
314 
315 int __init kvm_set_ipa_limit(void)
316 {
317 	unsigned int parange;
318 	u64 mmfr0;
319 
320 	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
321 	parange = cpuid_feature_extract_unsigned_field(mmfr0,
322 				ID_AA64MMFR0_EL1_PARANGE_SHIFT);
323 	/*
324 	 * IPA size beyond 48 bits could not be supported
325 	 * on either 4K or 16K page size. Hence let's cap
326 	 * it to 48 bits, in case it's reported as larger
327 	 * on the system.
328 	 */
329 	if (PAGE_SIZE != SZ_64K)
330 		parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
331 
332 	/*
333 	 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
334 	 * Stage-2. If not, things will stop very quickly.
335 	 */
336 	switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
337 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
338 		kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
339 		return -EINVAL;
340 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
341 		kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
342 		break;
343 	case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
344 		kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
345 		break;
346 	default:
347 		kvm_err("Unsupported value for TGRAN_2, giving up\n");
348 		return -EINVAL;
349 	}
350 
351 	kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
352 	kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
353 		 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
354 		  " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
355 
356 	return 0;
357 }
358