xref: /linux/arch/arm64/kvm/arm.c (revision 949d0a46ad1b9ab3450fb6ed69ff1e3e13c657bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6 
7 #include <linux/bug.h>
8 #include <linux/cpu_pm.h>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/vmalloc.h>
15 #include <linux/fs.h>
16 #include <linux/mman.h>
17 #include <linux/sched.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_irqfd.h>
20 #include <linux/irqbypass.h>
21 #include <linux/sched/stat.h>
22 #include <linux/psci.h>
23 #include <trace/events/kvm.h>
24 
25 #define CREATE_TRACE_POINTS
26 #include "trace_arm.h"
27 
28 #include <linux/uaccess.h>
29 #include <asm/ptrace.h>
30 #include <asm/mman.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cacheflush.h>
33 #include <asm/cpufeature.h>
34 #include <asm/virt.h>
35 #include <asm/kvm_arm.h>
36 #include <asm/kvm_asm.h>
37 #include <asm/kvm_emulate.h>
38 #include <asm/kvm_mmu.h>
39 #include <asm/kvm_nested.h>
40 #include <asm/kvm_pkvm.h>
41 #include <asm/kvm_ptrauth.h>
42 #include <asm/sections.h>
43 #include <asm/stacktrace/nvhe.h>
44 
45 #include <kvm/arm_hypercalls.h>
46 #include <kvm/arm_pmu.h>
47 #include <kvm/arm_psci.h>
48 
49 #include "sys_regs.h"
50 
51 static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
52 
53 enum kvm_wfx_trap_policy {
54 	KVM_WFX_NOTRAP_SINGLE_TASK, /* Default option */
55 	KVM_WFX_NOTRAP,
56 	KVM_WFX_TRAP,
57 };
58 
59 static enum kvm_wfx_trap_policy kvm_wfi_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK;
60 static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK;
61 
62 /*
63  * Tracks KVM IOCTLs and their associated KVM capabilities.
64  */
65 struct kvm_ioctl_cap_map {
66 	unsigned int ioctl;
67 	long ext;
68 };
69 
70 /* Make KVM_CAP_NR_VCPUS the reference for features we always supported */
71 #define KVM_CAP_ARM_BASIC	KVM_CAP_NR_VCPUS
72 
73 /*
74  * Sorted by ioctl to allow for potential binary search,
75  * though linear scan is sufficient for this size.
76  */
77 static const struct kvm_ioctl_cap_map vm_ioctl_caps[] = {
78 	{ KVM_CREATE_IRQCHIP, KVM_CAP_IRQCHIP },
79 	{ KVM_ARM_SET_DEVICE_ADDR, KVM_CAP_ARM_SET_DEVICE_ADDR },
80 	{ KVM_ARM_MTE_COPY_TAGS, KVM_CAP_ARM_MTE },
81 	{ KVM_SET_DEVICE_ATTR, KVM_CAP_DEVICE_CTRL },
82 	{ KVM_GET_DEVICE_ATTR, KVM_CAP_DEVICE_CTRL },
83 	{ KVM_HAS_DEVICE_ATTR, KVM_CAP_DEVICE_CTRL },
84 	{ KVM_ARM_SET_COUNTER_OFFSET, KVM_CAP_COUNTER_OFFSET },
85 	{ KVM_ARM_GET_REG_WRITABLE_MASKS, KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES },
86 	{ KVM_ARM_PREFERRED_TARGET, KVM_CAP_ARM_BASIC },
87 };
88 
89 /*
90  * Set *ext to the capability.
91  * Return 0 if found, or -EINVAL if no IOCTL matches.
92  */
kvm_get_cap_for_kvm_ioctl(unsigned int ioctl,long * ext)93 long kvm_get_cap_for_kvm_ioctl(unsigned int ioctl, long *ext)
94 {
95 	int i;
96 
97 	for (i = 0; i < ARRAY_SIZE(vm_ioctl_caps); i++) {
98 		if (vm_ioctl_caps[i].ioctl == ioctl) {
99 			*ext = vm_ioctl_caps[i].ext;
100 			return 0;
101 		}
102 	}
103 
104 	return -EINVAL;
105 }
106 
107 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
108 
109 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
110 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
111 
112 DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
113 
114 static bool vgic_present, kvm_arm_initialised;
115 
116 static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
117 
is_kvm_arm_initialised(void)118 bool is_kvm_arm_initialised(void)
119 {
120 	return kvm_arm_initialised;
121 }
122 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)123 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
124 {
125 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
126 }
127 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)128 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
129 			    struct kvm_enable_cap *cap)
130 {
131 	int r = -EINVAL;
132 
133 	if (cap->flags)
134 		return -EINVAL;
135 
136 	if (is_protected_kvm_enabled() && !kvm_pkvm_ext_allowed(kvm, cap->cap))
137 		return -EINVAL;
138 
139 	switch (cap->cap) {
140 	case KVM_CAP_ARM_NISV_TO_USER:
141 		r = 0;
142 		set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
143 			&kvm->arch.flags);
144 		break;
145 	case KVM_CAP_ARM_MTE:
146 		mutex_lock(&kvm->lock);
147 		if (system_supports_mte() && !kvm->created_vcpus) {
148 			r = 0;
149 			set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
150 		}
151 		mutex_unlock(&kvm->lock);
152 		break;
153 	case KVM_CAP_ARM_SYSTEM_SUSPEND:
154 		r = 0;
155 		set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
156 		break;
157 	case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
158 		mutex_lock(&kvm->slots_lock);
159 		/*
160 		 * To keep things simple, allow changing the chunk
161 		 * size only when no memory slots have been created.
162 		 */
163 		if (kvm_are_all_memslots_empty(kvm)) {
164 			u64 new_cap = cap->args[0];
165 
166 			if (!new_cap || kvm_is_block_size_supported(new_cap)) {
167 				r = 0;
168 				kvm->arch.mmu.split_page_chunk_size = new_cap;
169 			}
170 		}
171 		mutex_unlock(&kvm->slots_lock);
172 		break;
173 	case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
174 		mutex_lock(&kvm->lock);
175 		if (!kvm->created_vcpus) {
176 			r = 0;
177 			set_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags);
178 		}
179 		mutex_unlock(&kvm->lock);
180 		break;
181 	case KVM_CAP_ARM_SEA_TO_USER:
182 		r = 0;
183 		set_bit(KVM_ARCH_FLAG_EXIT_SEA, &kvm->arch.flags);
184 		break;
185 	default:
186 		break;
187 	}
188 
189 	return r;
190 }
191 
kvm_arm_default_max_vcpus(void)192 static int kvm_arm_default_max_vcpus(void)
193 {
194 	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
195 }
196 
197 /**
198  * kvm_arch_init_vm - initializes a VM data structure
199  * @kvm:	pointer to the KVM struct
200  * @type:	kvm device type
201  */
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)202 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
203 {
204 	int ret;
205 
206 	mutex_init(&kvm->arch.config_lock);
207 
208 #ifdef CONFIG_LOCKDEP
209 	/* Clue in lockdep that the config_lock must be taken inside kvm->lock */
210 	mutex_lock(&kvm->lock);
211 	mutex_lock(&kvm->arch.config_lock);
212 	mutex_unlock(&kvm->arch.config_lock);
213 	mutex_unlock(&kvm->lock);
214 #endif
215 
216 	kvm_init_nested(kvm);
217 
218 	ret = kvm_share_hyp(kvm, kvm + 1);
219 	if (ret)
220 		return ret;
221 
222 	if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) {
223 		ret = -ENOMEM;
224 		goto err_unshare_kvm;
225 	}
226 	cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
227 
228 	ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type);
229 	if (ret)
230 		goto err_free_cpumask;
231 
232 	if (is_protected_kvm_enabled()) {
233 		/*
234 		 * If any failures occur after this is successful, make sure to
235 		 * call __pkvm_unreserve_vm to unreserve the VM in hyp.
236 		 */
237 		ret = pkvm_init_host_vm(kvm);
238 		if (ret)
239 			goto err_free_cpumask;
240 	}
241 
242 	kvm_vgic_early_init(kvm);
243 
244 	kvm_timer_init_vm(kvm);
245 
246 	/* The maximum number of VCPUs is limited by the host's GIC model */
247 	kvm->max_vcpus = kvm_arm_default_max_vcpus();
248 
249 	kvm_arm_init_hypercalls(kvm);
250 
251 	bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
252 
253 	return 0;
254 
255 err_free_cpumask:
256 	free_cpumask_var(kvm->arch.supported_cpus);
257 err_unshare_kvm:
258 	kvm_unshare_hyp(kvm, kvm + 1);
259 	return ret;
260 }
261 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)262 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
263 {
264 	return VM_FAULT_SIGBUS;
265 }
266 
kvm_arch_create_vm_debugfs(struct kvm * kvm)267 void kvm_arch_create_vm_debugfs(struct kvm *kvm)
268 {
269 	kvm_sys_regs_create_debugfs(kvm);
270 	kvm_s2_ptdump_create_debugfs(kvm);
271 }
272 
kvm_destroy_mpidr_data(struct kvm * kvm)273 static void kvm_destroy_mpidr_data(struct kvm *kvm)
274 {
275 	struct kvm_mpidr_data *data;
276 
277 	mutex_lock(&kvm->arch.config_lock);
278 
279 	data = rcu_dereference_protected(kvm->arch.mpidr_data,
280 					 lockdep_is_held(&kvm->arch.config_lock));
281 	if (data) {
282 		rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
283 		synchronize_rcu();
284 		kfree(data);
285 	}
286 
287 	mutex_unlock(&kvm->arch.config_lock);
288 }
289 
290 /**
291  * kvm_arch_destroy_vm - destroy the VM data structure
292  * @kvm:	pointer to the KVM struct
293  */
kvm_arch_destroy_vm(struct kvm * kvm)294 void kvm_arch_destroy_vm(struct kvm *kvm)
295 {
296 	bitmap_free(kvm->arch.pmu_filter);
297 	free_cpumask_var(kvm->arch.supported_cpus);
298 
299 	kvm_vgic_destroy(kvm);
300 
301 	if (is_protected_kvm_enabled())
302 		pkvm_destroy_hyp_vm(kvm);
303 
304 	kvm_destroy_mpidr_data(kvm);
305 
306 	kfree(kvm->arch.sysreg_masks);
307 	kvm_destroy_vcpus(kvm);
308 
309 	kvm_unshare_hyp(kvm, kvm + 1);
310 
311 	kvm_arm_teardown_hypercalls(kvm);
312 }
313 
kvm_has_full_ptr_auth(void)314 static bool kvm_has_full_ptr_auth(void)
315 {
316 	bool apa, gpa, api, gpi, apa3, gpa3;
317 	u64 isar1, isar2, val;
318 
319 	/*
320 	 * Check that:
321 	 *
322 	 * - both Address and Generic auth are implemented for a given
323          *   algorithm (Q5, IMPDEF or Q3)
324 	 * - only a single algorithm is implemented.
325 	 */
326 	if (!system_has_full_ptr_auth())
327 		return false;
328 
329 	isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
330 	isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
331 
332 	apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1);
333 	val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1);
334 	gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP);
335 
336 	api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1);
337 	val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1);
338 	gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP);
339 
340 	apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2);
341 	val  = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2);
342 	gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP);
343 
344 	return (apa == gpa && api == gpi && apa3 == gpa3 &&
345 		(apa + api + apa3) == 1);
346 }
347 
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)348 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
349 {
350 	int r;
351 
352 	if (is_protected_kvm_enabled() && !kvm_pkvm_ext_allowed(kvm, ext))
353 		return 0;
354 
355 	switch (ext) {
356 	case KVM_CAP_IRQCHIP:
357 		r = vgic_present;
358 		break;
359 	case KVM_CAP_IOEVENTFD:
360 	case KVM_CAP_USER_MEMORY:
361 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
362 	case KVM_CAP_ONE_REG:
363 	case KVM_CAP_ARM_PSCI:
364 	case KVM_CAP_ARM_PSCI_0_2:
365 	case KVM_CAP_READONLY_MEM:
366 	case KVM_CAP_MP_STATE:
367 	case KVM_CAP_IMMEDIATE_EXIT:
368 	case KVM_CAP_VCPU_EVENTS:
369 	case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
370 	case KVM_CAP_ARM_NISV_TO_USER:
371 	case KVM_CAP_ARM_INJECT_EXT_DABT:
372 	case KVM_CAP_SET_GUEST_DEBUG:
373 	case KVM_CAP_VCPU_ATTRIBUTES:
374 	case KVM_CAP_PTP_KVM:
375 	case KVM_CAP_ARM_SYSTEM_SUSPEND:
376 	case KVM_CAP_IRQFD_RESAMPLE:
377 	case KVM_CAP_COUNTER_OFFSET:
378 	case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
379 	case KVM_CAP_ARM_SEA_TO_USER:
380 		r = 1;
381 		break;
382 	case KVM_CAP_SET_GUEST_DEBUG2:
383 		return KVM_GUESTDBG_VALID_MASK;
384 	case KVM_CAP_ARM_SET_DEVICE_ADDR:
385 		r = 1;
386 		break;
387 	case KVM_CAP_NR_VCPUS:
388 		/*
389 		 * ARM64 treats KVM_CAP_NR_CPUS differently from all other
390 		 * architectures, as it does not always bound it to
391 		 * KVM_CAP_MAX_VCPUS. It should not matter much because
392 		 * this is just an advisory value.
393 		 */
394 		r = min_t(unsigned int, num_online_cpus(),
395 			  kvm_arm_default_max_vcpus());
396 		break;
397 	case KVM_CAP_MAX_VCPUS:
398 	case KVM_CAP_MAX_VCPU_ID:
399 		if (kvm)
400 			r = kvm->max_vcpus;
401 		else
402 			r = kvm_arm_default_max_vcpus();
403 		break;
404 	case KVM_CAP_MSI_DEVID:
405 		if (!kvm)
406 			r = -EINVAL;
407 		else
408 			r = kvm->arch.vgic.msis_require_devid;
409 		break;
410 	case KVM_CAP_ARM_USER_IRQ:
411 		/*
412 		 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
413 		 * (bump this number if adding more devices)
414 		 */
415 		r = 1;
416 		break;
417 	case KVM_CAP_ARM_MTE:
418 		r = system_supports_mte();
419 		break;
420 	case KVM_CAP_STEAL_TIME:
421 		r = kvm_arm_pvtime_supported();
422 		break;
423 	case KVM_CAP_ARM_EL1_32BIT:
424 		r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
425 		break;
426 	case KVM_CAP_ARM_EL2:
427 		r = cpus_have_final_cap(ARM64_HAS_NESTED_VIRT);
428 		break;
429 	case KVM_CAP_ARM_EL2_E2H0:
430 		r = cpus_have_final_cap(ARM64_HAS_HCR_NV1);
431 		break;
432 	case KVM_CAP_GUEST_DEBUG_HW_BPS:
433 		r = get_num_brps();
434 		break;
435 	case KVM_CAP_GUEST_DEBUG_HW_WPS:
436 		r = get_num_wrps();
437 		break;
438 	case KVM_CAP_ARM_PMU_V3:
439 		r = kvm_supports_guest_pmuv3();
440 		break;
441 	case KVM_CAP_ARM_INJECT_SERROR_ESR:
442 		r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
443 		break;
444 	case KVM_CAP_ARM_VM_IPA_SIZE:
445 		r = get_kvm_ipa_limit();
446 		break;
447 	case KVM_CAP_ARM_SVE:
448 		r = system_supports_sve();
449 		break;
450 	case KVM_CAP_ARM_PTRAUTH_ADDRESS:
451 	case KVM_CAP_ARM_PTRAUTH_GENERIC:
452 		r = kvm_has_full_ptr_auth();
453 		break;
454 	case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
455 		if (kvm)
456 			r = kvm->arch.mmu.split_page_chunk_size;
457 		else
458 			r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
459 		break;
460 	case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
461 		r = kvm_supported_block_sizes();
462 		break;
463 	case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES:
464 		r = BIT(0);
465 		break;
466 	case KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED:
467 		if (!kvm)
468 			r = -EINVAL;
469 		else
470 			r = kvm_supports_cacheable_pfnmap();
471 		break;
472 
473 	default:
474 		r = 0;
475 	}
476 
477 	return r;
478 }
479 
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)480 long kvm_arch_dev_ioctl(struct file *filp,
481 			unsigned int ioctl, unsigned long arg)
482 {
483 	return -EINVAL;
484 }
485 
kvm_arch_alloc_vm(void)486 struct kvm *kvm_arch_alloc_vm(void)
487 {
488 	size_t sz = sizeof(struct kvm);
489 
490 	if (!has_vhe())
491 		return kzalloc(sz, GFP_KERNEL_ACCOUNT);
492 
493 	return kvzalloc(sz, GFP_KERNEL_ACCOUNT);
494 }
495 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)496 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
497 {
498 	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
499 		return -EBUSY;
500 
501 	if (id >= kvm->max_vcpus)
502 		return -EINVAL;
503 
504 	return 0;
505 }
506 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)507 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
508 {
509 	int err;
510 
511 	spin_lock_init(&vcpu->arch.mp_state_lock);
512 
513 #ifdef CONFIG_LOCKDEP
514 	/* Inform lockdep that the config_lock is acquired after vcpu->mutex */
515 	mutex_lock(&vcpu->mutex);
516 	mutex_lock(&vcpu->kvm->arch.config_lock);
517 	mutex_unlock(&vcpu->kvm->arch.config_lock);
518 	mutex_unlock(&vcpu->mutex);
519 #endif
520 
521 	/* Force users to call KVM_ARM_VCPU_INIT */
522 	vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
523 
524 	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
525 
526 	/* Set up the timer */
527 	kvm_timer_vcpu_init(vcpu);
528 
529 	kvm_pmu_vcpu_init(vcpu);
530 
531 	kvm_arm_pvtime_vcpu_init(&vcpu->arch);
532 
533 	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
534 
535 	/*
536 	 * This vCPU may have been created after mpidr_data was initialized.
537 	 * Throw out the pre-computed mappings if that is the case which forces
538 	 * KVM to fall back to iteratively searching the vCPUs.
539 	 */
540 	kvm_destroy_mpidr_data(vcpu->kvm);
541 
542 	err = kvm_vgic_vcpu_init(vcpu);
543 	if (err)
544 		return err;
545 
546 	err = kvm_share_hyp(vcpu, vcpu + 1);
547 	if (err)
548 		kvm_vgic_vcpu_destroy(vcpu);
549 
550 	return err;
551 }
552 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)553 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
554 {
555 }
556 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)557 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
558 {
559 	if (!is_protected_kvm_enabled())
560 		kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
561 	else
562 		free_hyp_memcache(&vcpu->arch.pkvm_memcache);
563 	kvm_timer_vcpu_terminate(vcpu);
564 	kvm_pmu_vcpu_destroy(vcpu);
565 	kvm_vgic_vcpu_destroy(vcpu);
566 	kvm_arm_vcpu_destroy(vcpu);
567 }
568 
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)569 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
570 {
571 
572 }
573 
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)574 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
575 {
576 
577 }
578 
vcpu_set_pauth_traps(struct kvm_vcpu * vcpu)579 static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
580 {
581 	if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) {
582 		/*
583 		 * Either we're running an L2 guest, and the API/APK bits come
584 		 * from L1's HCR_EL2, or API/APK are both set.
585 		 */
586 		if (unlikely(is_nested_ctxt(vcpu))) {
587 			u64 val;
588 
589 			val = __vcpu_sys_reg(vcpu, HCR_EL2);
590 			val &= (HCR_API | HCR_APK);
591 			vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
592 			vcpu->arch.hcr_el2 |= val;
593 		} else {
594 			vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
595 		}
596 
597 		/*
598 		 * Save the host keys if there is any chance for the guest
599 		 * to use pauth, as the entry code will reload the guest
600 		 * keys in that case.
601 		 */
602 		if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
603 			struct kvm_cpu_context *ctxt;
604 
605 			ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
606 			ptrauth_save_keys(ctxt);
607 		}
608 	}
609 }
610 
kvm_vcpu_should_clear_twi(struct kvm_vcpu * vcpu)611 static bool kvm_vcpu_should_clear_twi(struct kvm_vcpu *vcpu)
612 {
613 	if (unlikely(kvm_wfi_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK))
614 		return kvm_wfi_trap_policy == KVM_WFX_NOTRAP;
615 
616 	return single_task_running() &&
617 	       vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
618 	       (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
619 		vcpu->kvm->arch.vgic.nassgireq);
620 }
621 
kvm_vcpu_should_clear_twe(struct kvm_vcpu * vcpu)622 static bool kvm_vcpu_should_clear_twe(struct kvm_vcpu *vcpu)
623 {
624 	if (unlikely(kvm_wfe_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK))
625 		return kvm_wfe_trap_policy == KVM_WFX_NOTRAP;
626 
627 	return single_task_running();
628 }
629 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)630 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
631 {
632 	struct kvm_s2_mmu *mmu;
633 	int *last_ran;
634 
635 	if (is_protected_kvm_enabled())
636 		goto nommu;
637 
638 	if (vcpu_has_nv(vcpu))
639 		kvm_vcpu_load_hw_mmu(vcpu);
640 
641 	mmu = vcpu->arch.hw_mmu;
642 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
643 
644 	/*
645 	 * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
646 	 * which happens eagerly in VHE.
647 	 *
648 	 * Also, the VMID allocator only preserves VMIDs that are active at the
649 	 * time of rollover, so KVM might need to grab a new VMID for the MMU if
650 	 * this is called from kvm_sched_in().
651 	 */
652 	kvm_arm_vmid_update(&mmu->vmid);
653 
654 	/*
655 	 * We guarantee that both TLBs and I-cache are private to each
656 	 * vcpu. If detecting that a vcpu from the same VM has
657 	 * previously run on the same physical CPU, call into the
658 	 * hypervisor code to nuke the relevant contexts.
659 	 *
660 	 * We might get preempted before the vCPU actually runs, but
661 	 * over-invalidation doesn't affect correctness.
662 	 */
663 	if (*last_ran != vcpu->vcpu_idx) {
664 		kvm_call_hyp(__kvm_flush_cpu_context, mmu);
665 		*last_ran = vcpu->vcpu_idx;
666 	}
667 
668 nommu:
669 	vcpu->cpu = cpu;
670 
671 	/*
672 	 * The timer must be loaded before the vgic to correctly set up physical
673 	 * interrupt deactivation in nested state (e.g. timer interrupt).
674 	 */
675 	kvm_timer_vcpu_load(vcpu);
676 	kvm_vgic_load(vcpu);
677 	kvm_vcpu_load_debug(vcpu);
678 	kvm_vcpu_load_fgt(vcpu);
679 	if (has_vhe())
680 		kvm_vcpu_load_vhe(vcpu);
681 	kvm_arch_vcpu_load_fp(vcpu);
682 	kvm_vcpu_pmu_restore_guest(vcpu);
683 	if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
684 		kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
685 
686 	if (kvm_vcpu_should_clear_twe(vcpu))
687 		vcpu->arch.hcr_el2 &= ~HCR_TWE;
688 	else
689 		vcpu->arch.hcr_el2 |= HCR_TWE;
690 
691 	if (kvm_vcpu_should_clear_twi(vcpu))
692 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
693 	else
694 		vcpu->arch.hcr_el2 |= HCR_TWI;
695 
696 	vcpu_set_pauth_traps(vcpu);
697 
698 	if (is_protected_kvm_enabled()) {
699 		kvm_call_hyp_nvhe(__pkvm_vcpu_load,
700 				  vcpu->kvm->arch.pkvm.handle,
701 				  vcpu->vcpu_idx, vcpu->arch.hcr_el2);
702 		kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
703 			     &vcpu->arch.vgic_cpu.vgic_v3);
704 	}
705 
706 	if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
707 		vcpu_set_on_unsupported_cpu(vcpu);
708 }
709 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)710 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
711 {
712 	if (is_protected_kvm_enabled()) {
713 		kvm_call_hyp(__vgic_v3_save_aprs, &vcpu->arch.vgic_cpu.vgic_v3);
714 		kvm_call_hyp_nvhe(__pkvm_vcpu_put);
715 	}
716 
717 	kvm_vcpu_put_debug(vcpu);
718 	kvm_arch_vcpu_put_fp(vcpu);
719 	if (has_vhe())
720 		kvm_vcpu_put_vhe(vcpu);
721 	kvm_timer_vcpu_put(vcpu);
722 	kvm_vgic_put(vcpu);
723 	kvm_vcpu_pmu_restore_host(vcpu);
724 	if (vcpu_has_nv(vcpu))
725 		kvm_vcpu_put_hw_mmu(vcpu);
726 	kvm_arm_vmid_clear_active();
727 
728 	vcpu_clear_on_unsupported_cpu(vcpu);
729 	vcpu->cpu = -1;
730 }
731 
__kvm_arm_vcpu_power_off(struct kvm_vcpu * vcpu)732 static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
733 {
734 	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
735 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
736 	kvm_vcpu_kick(vcpu);
737 }
738 
kvm_arm_vcpu_power_off(struct kvm_vcpu * vcpu)739 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
740 {
741 	spin_lock(&vcpu->arch.mp_state_lock);
742 	__kvm_arm_vcpu_power_off(vcpu);
743 	spin_unlock(&vcpu->arch.mp_state_lock);
744 }
745 
kvm_arm_vcpu_stopped(struct kvm_vcpu * vcpu)746 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
747 {
748 	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
749 }
750 
kvm_arm_vcpu_suspend(struct kvm_vcpu * vcpu)751 static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
752 {
753 	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
754 	kvm_make_request(KVM_REQ_SUSPEND, vcpu);
755 	kvm_vcpu_kick(vcpu);
756 }
757 
kvm_arm_vcpu_suspended(struct kvm_vcpu * vcpu)758 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
759 {
760 	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
761 }
762 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)763 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
764 				    struct kvm_mp_state *mp_state)
765 {
766 	*mp_state = READ_ONCE(vcpu->arch.mp_state);
767 
768 	return 0;
769 }
770 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)771 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
772 				    struct kvm_mp_state *mp_state)
773 {
774 	int ret = 0;
775 
776 	spin_lock(&vcpu->arch.mp_state_lock);
777 
778 	switch (mp_state->mp_state) {
779 	case KVM_MP_STATE_RUNNABLE:
780 		WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
781 		break;
782 	case KVM_MP_STATE_STOPPED:
783 		__kvm_arm_vcpu_power_off(vcpu);
784 		break;
785 	case KVM_MP_STATE_SUSPENDED:
786 		kvm_arm_vcpu_suspend(vcpu);
787 		break;
788 	default:
789 		ret = -EINVAL;
790 	}
791 
792 	spin_unlock(&vcpu->arch.mp_state_lock);
793 
794 	return ret;
795 }
796 
797 /**
798  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
799  * @v:		The VCPU pointer
800  *
801  * If the guest CPU is not waiting for interrupts or an interrupt line is
802  * asserted, the CPU is by definition runnable.
803  */
kvm_arch_vcpu_runnable(struct kvm_vcpu * v)804 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
805 {
806 	bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE);
807 
808 	return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
809 		&& !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
810 }
811 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)812 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
813 {
814 	return vcpu_mode_priv(vcpu);
815 }
816 
817 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)818 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
819 {
820 	return *vcpu_pc(vcpu);
821 }
822 #endif
823 
kvm_init_mpidr_data(struct kvm * kvm)824 static void kvm_init_mpidr_data(struct kvm *kvm)
825 {
826 	struct kvm_mpidr_data *data = NULL;
827 	unsigned long c, mask, nr_entries;
828 	u64 aff_set = 0, aff_clr = ~0UL;
829 	struct kvm_vcpu *vcpu;
830 
831 	mutex_lock(&kvm->arch.config_lock);
832 
833 	if (rcu_access_pointer(kvm->arch.mpidr_data) ||
834 	    atomic_read(&kvm->online_vcpus) == 1)
835 		goto out;
836 
837 	kvm_for_each_vcpu(c, vcpu, kvm) {
838 		u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
839 		aff_set |= aff;
840 		aff_clr &= aff;
841 	}
842 
843 	/*
844 	 * A significant bit can be either 0 or 1, and will only appear in
845 	 * aff_set. Use aff_clr to weed out the useless stuff.
846 	 */
847 	mask = aff_set ^ aff_clr;
848 	nr_entries = BIT_ULL(hweight_long(mask));
849 
850 	/*
851 	 * Don't let userspace fool us. If we need more than a single page
852 	 * to describe the compressed MPIDR array, just fall back to the
853 	 * iterative method. Single vcpu VMs do not need this either.
854 	 */
855 	if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
856 		data = kzalloc_flex(*data, cmpidr_to_idx, nr_entries,
857 				    GFP_KERNEL_ACCOUNT);
858 
859 	if (!data)
860 		goto out;
861 
862 	data->mpidr_mask = mask;
863 
864 	kvm_for_each_vcpu(c, vcpu, kvm) {
865 		u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
866 		u16 index = kvm_mpidr_index(data, aff);
867 
868 		data->cmpidr_to_idx[index] = c;
869 	}
870 
871 	rcu_assign_pointer(kvm->arch.mpidr_data, data);
872 out:
873 	mutex_unlock(&kvm->arch.config_lock);
874 }
875 
876 /*
877  * Handle both the initialisation that is being done when the vcpu is
878  * run for the first time, as well as the updates that must be
879  * performed each time we get a new thread dealing with this vcpu.
880  */
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)881 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
882 {
883 	struct kvm *kvm = vcpu->kvm;
884 	int ret;
885 
886 	if (!kvm_vcpu_initialized(vcpu))
887 		return -ENOEXEC;
888 
889 	if (!kvm_arm_vcpu_is_finalized(vcpu))
890 		return -EPERM;
891 
892 	if (likely(vcpu_has_run_once(vcpu)))
893 		return 0;
894 
895 	kvm_init_mpidr_data(kvm);
896 
897 	if (likely(irqchip_in_kernel(kvm))) {
898 		/*
899 		 * Map the VGIC hardware resources before running a vcpu the
900 		 * first time on this VM.
901 		 */
902 		ret = kvm_vgic_map_resources(kvm);
903 		if (ret)
904 			return ret;
905 	}
906 
907 	ret = kvm_finalize_sys_regs(vcpu);
908 	if (ret)
909 		return ret;
910 
911 	if (vcpu_has_nv(vcpu)) {
912 		ret = kvm_vcpu_allocate_vncr_tlb(vcpu);
913 		if (ret)
914 			return ret;
915 
916 		ret = kvm_vgic_vcpu_nv_init(vcpu);
917 		if (ret)
918 			return ret;
919 	}
920 
921 	/*
922 	 * This needs to happen after any restriction has been applied
923 	 * to the feature set.
924 	 */
925 	kvm_calculate_traps(vcpu);
926 
927 	ret = kvm_timer_enable(vcpu);
928 	if (ret)
929 		return ret;
930 
931 	if (kvm_vcpu_has_pmu(vcpu)) {
932 		ret = kvm_arm_pmu_v3_enable(vcpu);
933 		if (ret)
934 			return ret;
935 	}
936 
937 	if (is_protected_kvm_enabled()) {
938 		ret = pkvm_create_hyp_vm(kvm);
939 		if (ret)
940 			return ret;
941 
942 		ret = pkvm_create_hyp_vcpu(vcpu);
943 		if (ret)
944 			return ret;
945 	}
946 
947 	mutex_lock(&kvm->arch.config_lock);
948 	set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
949 	mutex_unlock(&kvm->arch.config_lock);
950 
951 	return ret;
952 }
953 
kvm_arch_intc_initialized(struct kvm * kvm)954 bool kvm_arch_intc_initialized(struct kvm *kvm)
955 {
956 	return vgic_initialized(kvm);
957 }
958 
kvm_arm_halt_guest(struct kvm * kvm)959 void kvm_arm_halt_guest(struct kvm *kvm)
960 {
961 	unsigned long i;
962 	struct kvm_vcpu *vcpu;
963 
964 	kvm_for_each_vcpu(i, vcpu, kvm)
965 		vcpu->arch.pause = true;
966 	kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
967 }
968 
kvm_arm_resume_guest(struct kvm * kvm)969 void kvm_arm_resume_guest(struct kvm *kvm)
970 {
971 	unsigned long i;
972 	struct kvm_vcpu *vcpu;
973 
974 	kvm_for_each_vcpu(i, vcpu, kvm) {
975 		vcpu->arch.pause = false;
976 		__kvm_vcpu_wake_up(vcpu);
977 	}
978 }
979 
kvm_vcpu_sleep(struct kvm_vcpu * vcpu)980 static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu)
981 {
982 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
983 
984 	rcuwait_wait_event(wait,
985 			   (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
986 			   TASK_INTERRUPTIBLE);
987 
988 	if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) {
989 		/* Awaken to handle a signal, request we sleep again later. */
990 		kvm_make_request(KVM_REQ_SLEEP, vcpu);
991 	}
992 
993 	/*
994 	 * Make sure we will observe a potential reset request if we've
995 	 * observed a change to the power state. Pairs with the smp_wmb() in
996 	 * kvm_psci_vcpu_on().
997 	 */
998 	smp_rmb();
999 }
1000 
1001 /**
1002  * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
1003  * @vcpu:	The VCPU pointer
1004  *
1005  * Suspend execution of a vCPU until a valid wake event is detected, i.e. until
1006  * the vCPU is runnable.  The vCPU may or may not be scheduled out, depending
1007  * on when a wake event arrives, e.g. there may already be a pending wake event.
1008  */
kvm_vcpu_wfi(struct kvm_vcpu * vcpu)1009 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
1010 {
1011 	/*
1012 	 * Sync back the state of the GIC CPU interface so that we have
1013 	 * the latest PMR and group enables. This ensures that
1014 	 * kvm_arch_vcpu_runnable has up-to-date data to decide whether
1015 	 * we have pending interrupts, e.g. when determining if the
1016 	 * vCPU should block.
1017 	 *
1018 	 * For the same reason, we want to tell GICv4 that we need
1019 	 * doorbells to be signalled, should an interrupt become pending.
1020 	 */
1021 	preempt_disable();
1022 	vcpu_set_flag(vcpu, IN_WFI);
1023 	kvm_vgic_put(vcpu);
1024 	preempt_enable();
1025 
1026 	kvm_vcpu_halt(vcpu);
1027 	vcpu_clear_flag(vcpu, IN_WFIT);
1028 
1029 	preempt_disable();
1030 	vcpu_clear_flag(vcpu, IN_WFI);
1031 	kvm_vgic_load(vcpu);
1032 	preempt_enable();
1033 }
1034 
kvm_vcpu_suspend(struct kvm_vcpu * vcpu)1035 static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
1036 {
1037 	if (!kvm_arm_vcpu_suspended(vcpu))
1038 		return 1;
1039 
1040 	kvm_vcpu_wfi(vcpu);
1041 
1042 	/*
1043 	 * The suspend state is sticky; we do not leave it until userspace
1044 	 * explicitly marks the vCPU as runnable. Request that we suspend again
1045 	 * later.
1046 	 */
1047 	kvm_make_request(KVM_REQ_SUSPEND, vcpu);
1048 
1049 	/*
1050 	 * Check to make sure the vCPU is actually runnable. If so, exit to
1051 	 * userspace informing it of the wakeup condition.
1052 	 */
1053 	if (kvm_arch_vcpu_runnable(vcpu)) {
1054 		memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
1055 		vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
1056 		vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
1057 		return 0;
1058 	}
1059 
1060 	/*
1061 	 * Otherwise, we were unblocked to process a different event, such as a
1062 	 * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to
1063 	 * process the event.
1064 	 */
1065 	return 1;
1066 }
1067 
1068 /**
1069  * check_vcpu_requests - check and handle pending vCPU requests
1070  * @vcpu:	the VCPU pointer
1071  *
1072  * Return: 1 if we should enter the guest
1073  *	   0 if we should exit to userspace
1074  *	   < 0 if we should exit to userspace, where the return value indicates
1075  *	   an error
1076  */
check_vcpu_requests(struct kvm_vcpu * vcpu)1077 static int check_vcpu_requests(struct kvm_vcpu *vcpu)
1078 {
1079 	if (kvm_request_pending(vcpu)) {
1080 		if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
1081 			return -EIO;
1082 
1083 		if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
1084 			kvm_vcpu_sleep(vcpu);
1085 
1086 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1087 			kvm_reset_vcpu(vcpu);
1088 
1089 		/*
1090 		 * Clear IRQ_PENDING requests that were made to guarantee
1091 		 * that a VCPU sees new virtual interrupts.
1092 		 */
1093 		kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
1094 
1095 		/* Process interrupts deactivated through a trap */
1096 		if (kvm_check_request(KVM_REQ_VGIC_PROCESS_UPDATE, vcpu))
1097 			kvm_vgic_process_async_update(vcpu);
1098 
1099 		if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
1100 			kvm_update_stolen_time(vcpu);
1101 
1102 		if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
1103 			/* The distributor enable bits were changed */
1104 			preempt_disable();
1105 			vgic_v4_put(vcpu);
1106 			vgic_v4_load(vcpu);
1107 			preempt_enable();
1108 		}
1109 
1110 		if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
1111 			kvm_vcpu_reload_pmu(vcpu);
1112 
1113 		if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
1114 			kvm_vcpu_pmu_restore_guest(vcpu);
1115 
1116 		if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
1117 			return kvm_vcpu_suspend(vcpu);
1118 
1119 		if (kvm_dirty_ring_check_request(vcpu))
1120 			return 0;
1121 
1122 		check_nested_vcpu_requests(vcpu);
1123 	}
1124 
1125 	return 1;
1126 }
1127 
vcpu_mode_is_bad_32bit(struct kvm_vcpu * vcpu)1128 static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
1129 {
1130 	if (likely(!vcpu_mode_is_32bit(vcpu)))
1131 		return false;
1132 
1133 	if (vcpu_has_nv(vcpu))
1134 		return true;
1135 
1136 	return !kvm_supports_32bit_el0();
1137 }
1138 
1139 /**
1140  * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
1141  * @vcpu:	The VCPU pointer
1142  * @ret:	Pointer to write optional return code
1143  *
1144  * Returns: true if the VCPU needs to return to a preemptible + interruptible
1145  *	    and skip guest entry.
1146  *
1147  * This function disambiguates between two different types of exits: exits to a
1148  * preemptible + interruptible kernel context and exits to userspace. For an
1149  * exit to userspace, this function will write the return code to ret and return
1150  * true. For an exit to preemptible + interruptible kernel context (i.e. check
1151  * for pending work and re-enter), return true without writing to ret.
1152  */
kvm_vcpu_exit_request(struct kvm_vcpu * vcpu,int * ret)1153 static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
1154 {
1155 	struct kvm_run *run = vcpu->run;
1156 
1157 	/*
1158 	 * If we're using a userspace irqchip, then check if we need
1159 	 * to tell a userspace irqchip about timer or PMU level
1160 	 * changes and if so, exit to userspace (the actual level
1161 	 * state gets updated in kvm_timer_update_run and
1162 	 * kvm_pmu_update_run below).
1163 	 */
1164 	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
1165 		if (kvm_timer_should_notify_user(vcpu) ||
1166 		    kvm_pmu_should_notify_user(vcpu)) {
1167 			*ret = -EINTR;
1168 			run->exit_reason = KVM_EXIT_INTR;
1169 			return true;
1170 		}
1171 	}
1172 
1173 	if (unlikely(vcpu_on_unsupported_cpu(vcpu))) {
1174 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1175 		run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED;
1176 		run->fail_entry.cpu = smp_processor_id();
1177 		*ret = 0;
1178 		return true;
1179 	}
1180 
1181 	return kvm_request_pending(vcpu) ||
1182 			xfer_to_guest_mode_work_pending();
1183 }
1184 
1185 /*
1186  * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1187  * the vCPU is running.
1188  *
1189  * This must be noinstr as instrumentation may make use of RCU, and this is not
1190  * safe during the EQS.
1191  */
kvm_arm_vcpu_enter_exit(struct kvm_vcpu * vcpu)1192 static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
1193 {
1194 	int ret;
1195 
1196 	guest_state_enter_irqoff();
1197 	ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
1198 	guest_state_exit_irqoff();
1199 
1200 	return ret;
1201 }
1202 
1203 /**
1204  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
1205  * @vcpu:	The VCPU pointer
1206  *
1207  * This function is called through the VCPU_RUN ioctl called from user space. It
1208  * will execute VM code in a loop until the time slice for the process is used
1209  * or some emulation is needed from user space in which case the function will
1210  * return with return value 0 and with the kvm_run structure filled in with the
1211  * required data for the requested emulation.
1212  */
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1213 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1214 {
1215 	struct kvm_run *run = vcpu->run;
1216 	int ret;
1217 
1218 	if (run->exit_reason == KVM_EXIT_MMIO) {
1219 		ret = kvm_handle_mmio_return(vcpu);
1220 		if (ret <= 0)
1221 			return ret;
1222 	}
1223 
1224 	vcpu_load(vcpu);
1225 
1226 	if (!vcpu->wants_to_run) {
1227 		ret = -EINTR;
1228 		goto out;
1229 	}
1230 
1231 	kvm_sigset_activate(vcpu);
1232 
1233 	ret = 1;
1234 	run->exit_reason = KVM_EXIT_UNKNOWN;
1235 	run->flags = 0;
1236 	while (ret > 0) {
1237 		/*
1238 		 * Check conditions before entering the guest
1239 		 */
1240 		ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
1241 		if (!ret)
1242 			ret = 1;
1243 
1244 		if (ret > 0)
1245 			ret = check_vcpu_requests(vcpu);
1246 
1247 		/*
1248 		 * Preparing the interrupts to be injected also
1249 		 * involves poking the GIC, which must be done in a
1250 		 * non-preemptible context.
1251 		 */
1252 		preempt_disable();
1253 
1254 		kvm_nested_flush_hwstate(vcpu);
1255 
1256 		if (kvm_vcpu_has_pmu(vcpu))
1257 			kvm_pmu_flush_hwstate(vcpu);
1258 
1259 		local_irq_disable();
1260 
1261 		kvm_vgic_flush_hwstate(vcpu);
1262 
1263 		kvm_pmu_update_vcpu_events(vcpu);
1264 
1265 		/*
1266 		 * Ensure we set mode to IN_GUEST_MODE after we disable
1267 		 * interrupts and before the final VCPU requests check.
1268 		 * See the comment in kvm_vcpu_exiting_guest_mode() and
1269 		 * Documentation/virt/kvm/vcpu-requests.rst
1270 		 */
1271 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1272 
1273 		if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
1274 			vcpu->mode = OUTSIDE_GUEST_MODE;
1275 			isb(); /* Ensure work in x_flush_hwstate is committed */
1276 			if (kvm_vcpu_has_pmu(vcpu))
1277 				kvm_pmu_sync_hwstate(vcpu);
1278 			if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
1279 				kvm_timer_sync_user(vcpu);
1280 			kvm_vgic_sync_hwstate(vcpu);
1281 			local_irq_enable();
1282 			preempt_enable();
1283 			continue;
1284 		}
1285 
1286 		kvm_arch_vcpu_ctxflush_fp(vcpu);
1287 
1288 		/**************************************************************
1289 		 * Enter the guest
1290 		 */
1291 		trace_kvm_entry(*vcpu_pc(vcpu));
1292 		guest_timing_enter_irqoff();
1293 
1294 		ret = kvm_arm_vcpu_enter_exit(vcpu);
1295 
1296 		vcpu->mode = OUTSIDE_GUEST_MODE;
1297 		vcpu->stat.exits++;
1298 		/*
1299 		 * Back from guest
1300 		 *************************************************************/
1301 
1302 		/*
1303 		 * We must sync the PMU state before the vgic state so
1304 		 * that the vgic can properly sample the updated state of the
1305 		 * interrupt line.
1306 		 */
1307 		if (kvm_vcpu_has_pmu(vcpu))
1308 			kvm_pmu_sync_hwstate(vcpu);
1309 
1310 		/*
1311 		 * Sync the vgic state before syncing the timer state because
1312 		 * the timer code needs to know if the virtual timer
1313 		 * interrupts are active.
1314 		 */
1315 		kvm_vgic_sync_hwstate(vcpu);
1316 
1317 		/*
1318 		 * Sync the timer hardware state before enabling interrupts as
1319 		 * we don't want vtimer interrupts to race with syncing the
1320 		 * timer virtual interrupt state.
1321 		 */
1322 		if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
1323 			kvm_timer_sync_user(vcpu);
1324 
1325 		if (is_hyp_ctxt(vcpu))
1326 			kvm_timer_sync_nested(vcpu);
1327 
1328 		kvm_arch_vcpu_ctxsync_fp(vcpu);
1329 
1330 		/*
1331 		 * We must ensure that any pending interrupts are taken before
1332 		 * we exit guest timing so that timer ticks are accounted as
1333 		 * guest time. Transiently unmask interrupts so that any
1334 		 * pending interrupts are taken.
1335 		 *
1336 		 * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
1337 		 * context synchronization event) is necessary to ensure that
1338 		 * pending interrupts are taken.
1339 		 */
1340 		if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) {
1341 			local_irq_enable();
1342 			isb();
1343 			local_irq_disable();
1344 		}
1345 
1346 		guest_timing_exit_irqoff();
1347 
1348 		local_irq_enable();
1349 
1350 		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
1351 
1352 		/* Exit types that need handling before we can be preempted */
1353 		handle_exit_early(vcpu, ret);
1354 
1355 		kvm_nested_sync_hwstate(vcpu);
1356 
1357 		preempt_enable();
1358 
1359 		/*
1360 		 * The ARMv8 architecture doesn't give the hypervisor
1361 		 * a mechanism to prevent a guest from dropping to AArch32 EL0
1362 		 * if implemented by the CPU. If we spot the guest in such
1363 		 * state and that we decided it wasn't supposed to do so (like
1364 		 * with the asymmetric AArch32 case), return to userspace with
1365 		 * a fatal error.
1366 		 */
1367 		if (vcpu_mode_is_bad_32bit(vcpu)) {
1368 			/*
1369 			 * As we have caught the guest red-handed, decide that
1370 			 * it isn't fit for purpose anymore by making the vcpu
1371 			 * invalid. The VMM can try and fix it by issuing  a
1372 			 * KVM_ARM_VCPU_INIT if it really wants to.
1373 			 */
1374 			vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
1375 			ret = ARM_EXCEPTION_IL;
1376 		}
1377 
1378 		ret = handle_exit(vcpu, ret);
1379 	}
1380 
1381 	/* Tell userspace about in-kernel device output levels */
1382 	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
1383 		kvm_timer_update_run(vcpu);
1384 		kvm_pmu_update_run(vcpu);
1385 	}
1386 
1387 	kvm_sigset_deactivate(vcpu);
1388 
1389 out:
1390 	/*
1391 	 * In the unlikely event that we are returning to userspace
1392 	 * with pending exceptions or PC adjustment, commit these
1393 	 * adjustments in order to give userspace a consistent view of
1394 	 * the vcpu state. Note that this relies on __kvm_adjust_pc()
1395 	 * being preempt-safe on VHE.
1396 	 */
1397 	if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
1398 		     vcpu_get_flag(vcpu, INCREMENT_PC)))
1399 		kvm_call_hyp(__kvm_adjust_pc, vcpu);
1400 
1401 	vcpu_put(vcpu);
1402 	return ret;
1403 }
1404 
vcpu_interrupt_line(struct kvm_vcpu * vcpu,int number,bool level)1405 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
1406 {
1407 	int bit_index;
1408 	bool set;
1409 	unsigned long *hcr;
1410 
1411 	if (number == KVM_ARM_IRQ_CPU_IRQ)
1412 		bit_index = __ffs(HCR_VI);
1413 	else /* KVM_ARM_IRQ_CPU_FIQ */
1414 		bit_index = __ffs(HCR_VF);
1415 
1416 	hcr = vcpu_hcr(vcpu);
1417 	if (level)
1418 		set = test_and_set_bit(bit_index, hcr);
1419 	else
1420 		set = test_and_clear_bit(bit_index, hcr);
1421 
1422 	/*
1423 	 * If we didn't change anything, no need to wake up or kick other CPUs
1424 	 */
1425 	if (set == level)
1426 		return 0;
1427 
1428 	/*
1429 	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
1430 	 * trigger a world-switch round on the running physical CPU to set the
1431 	 * virtual IRQ/FIQ fields in the HCR appropriately.
1432 	 */
1433 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1434 	kvm_vcpu_kick(vcpu);
1435 
1436 	return 0;
1437 }
1438 
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_level,bool line_status)1439 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1440 			  bool line_status)
1441 {
1442 	u32 irq = irq_level->irq;
1443 	unsigned int irq_type, vcpu_id, irq_num;
1444 	struct kvm_vcpu *vcpu = NULL;
1445 	bool level = irq_level->level;
1446 
1447 	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
1448 	vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
1449 	vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
1450 	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
1451 
1452 	trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
1453 
1454 	switch (irq_type) {
1455 	case KVM_ARM_IRQ_TYPE_CPU:
1456 		if (irqchip_in_kernel(kvm))
1457 			return -ENXIO;
1458 
1459 		vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1460 		if (!vcpu)
1461 			return -EINVAL;
1462 
1463 		if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
1464 			return -EINVAL;
1465 
1466 		return vcpu_interrupt_line(vcpu, irq_num, level);
1467 	case KVM_ARM_IRQ_TYPE_PPI:
1468 		if (!irqchip_in_kernel(kvm))
1469 			return -ENXIO;
1470 
1471 		vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1472 		if (!vcpu)
1473 			return -EINVAL;
1474 
1475 		if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
1476 			return -EINVAL;
1477 
1478 		return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL);
1479 	case KVM_ARM_IRQ_TYPE_SPI:
1480 		if (!irqchip_in_kernel(kvm))
1481 			return -ENXIO;
1482 
1483 		if (irq_num < VGIC_NR_PRIVATE_IRQS)
1484 			return -EINVAL;
1485 
1486 		return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL);
1487 	}
1488 
1489 	return -EINVAL;
1490 }
1491 
system_supported_vcpu_features(void)1492 static unsigned long system_supported_vcpu_features(void)
1493 {
1494 	unsigned long features = KVM_VCPU_VALID_FEATURES;
1495 
1496 	if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
1497 		clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
1498 
1499 	if (!kvm_supports_guest_pmuv3())
1500 		clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
1501 
1502 	if (!system_supports_sve())
1503 		clear_bit(KVM_ARM_VCPU_SVE, &features);
1504 
1505 	if (!kvm_has_full_ptr_auth()) {
1506 		clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
1507 		clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
1508 	}
1509 
1510 	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
1511 		clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
1512 
1513 	return features;
1514 }
1515 
kvm_vcpu_init_check_features(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1516 static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
1517 					const struct kvm_vcpu_init *init)
1518 {
1519 	unsigned long features = init->features[0];
1520 	int i;
1521 
1522 	if (features & ~KVM_VCPU_VALID_FEATURES)
1523 		return -ENOENT;
1524 
1525 	for (i = 1; i < ARRAY_SIZE(init->features); i++) {
1526 		if (init->features[i])
1527 			return -ENOENT;
1528 	}
1529 
1530 	if (features & ~system_supported_vcpu_features())
1531 		return -EINVAL;
1532 
1533 	/*
1534 	 * For now make sure that both address/generic pointer authentication
1535 	 * features are requested by the userspace together.
1536 	 */
1537 	if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
1538 	    test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
1539 		return -EINVAL;
1540 
1541 	if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
1542 		return 0;
1543 
1544 	/* MTE is incompatible with AArch32 */
1545 	if (kvm_has_mte(vcpu->kvm))
1546 		return -EINVAL;
1547 
1548 	/* NV is incompatible with AArch32 */
1549 	if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
1550 		return -EINVAL;
1551 
1552 	return 0;
1553 }
1554 
kvm_vcpu_init_changed(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1555 static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
1556 				  const struct kvm_vcpu_init *init)
1557 {
1558 	unsigned long features = init->features[0];
1559 
1560 	return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
1561 			     KVM_VCPU_MAX_FEATURES);
1562 }
1563 
kvm_setup_vcpu(struct kvm_vcpu * vcpu)1564 static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
1565 {
1566 	struct kvm *kvm = vcpu->kvm;
1567 	int ret = 0;
1568 
1569 	/*
1570 	 * When the vCPU has a PMU, but no PMU is set for the guest
1571 	 * yet, set the default one.
1572 	 */
1573 	if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
1574 		ret = kvm_arm_set_default_pmu(kvm);
1575 
1576 	/* Prepare for nested if required */
1577 	if (!ret && vcpu_has_nv(vcpu))
1578 		ret = kvm_vcpu_init_nested(vcpu);
1579 
1580 	return ret;
1581 }
1582 
__kvm_vcpu_set_target(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1583 static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1584 				 const struct kvm_vcpu_init *init)
1585 {
1586 	unsigned long features = init->features[0];
1587 	struct kvm *kvm = vcpu->kvm;
1588 	int ret = -EINVAL;
1589 
1590 	mutex_lock(&kvm->arch.config_lock);
1591 
1592 	if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
1593 	    kvm_vcpu_init_changed(vcpu, init))
1594 		goto out_unlock;
1595 
1596 	bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
1597 
1598 	ret = kvm_setup_vcpu(vcpu);
1599 	if (ret)
1600 		goto out_unlock;
1601 
1602 	/* Now we know what it is, we can reset it. */
1603 	kvm_reset_vcpu(vcpu);
1604 
1605 	set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
1606 	vcpu_set_flag(vcpu, VCPU_INITIALIZED);
1607 	ret = 0;
1608 out_unlock:
1609 	mutex_unlock(&kvm->arch.config_lock);
1610 	return ret;
1611 }
1612 
kvm_vcpu_set_target(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1613 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1614 			       const struct kvm_vcpu_init *init)
1615 {
1616 	int ret;
1617 
1618 	if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
1619 	    init->target != kvm_target_cpu())
1620 		return -EINVAL;
1621 
1622 	ret = kvm_vcpu_init_check_features(vcpu, init);
1623 	if (ret)
1624 		return ret;
1625 
1626 	if (!kvm_vcpu_initialized(vcpu))
1627 		return __kvm_vcpu_set_target(vcpu, init);
1628 
1629 	if (kvm_vcpu_init_changed(vcpu, init))
1630 		return -EINVAL;
1631 
1632 	kvm_reset_vcpu(vcpu);
1633 	return 0;
1634 }
1635 
kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu * vcpu,struct kvm_vcpu_init * init)1636 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
1637 					 struct kvm_vcpu_init *init)
1638 {
1639 	bool power_off = false;
1640 	int ret;
1641 
1642 	/*
1643 	 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid
1644 	 * reflecting it in the finalized feature set, thus limiting its scope
1645 	 * to a single KVM_ARM_VCPU_INIT call.
1646 	 */
1647 	if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) {
1648 		init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF);
1649 		power_off = true;
1650 	}
1651 
1652 	ret = kvm_vcpu_set_target(vcpu, init);
1653 	if (ret)
1654 		return ret;
1655 
1656 	/*
1657 	 * Ensure a rebooted VM will fault in RAM pages and detect if the
1658 	 * guest MMU is turned off and flush the caches as needed.
1659 	 *
1660 	 * S2FWB enforces all memory accesses to RAM being cacheable,
1661 	 * ensuring that the data side is always coherent. We still
1662 	 * need to invalidate the I-cache though, as FWB does *not*
1663 	 * imply CTR_EL0.DIC.
1664 	 */
1665 	if (vcpu_has_run_once(vcpu)) {
1666 		if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1667 			stage2_unmap_vm(vcpu->kvm);
1668 		else
1669 			icache_inval_all_pou();
1670 	}
1671 
1672 	vcpu_reset_hcr(vcpu);
1673 
1674 	/*
1675 	 * Handle the "start in power-off" case.
1676 	 */
1677 	spin_lock(&vcpu->arch.mp_state_lock);
1678 
1679 	if (power_off)
1680 		__kvm_arm_vcpu_power_off(vcpu);
1681 	else
1682 		WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
1683 
1684 	spin_unlock(&vcpu->arch.mp_state_lock);
1685 
1686 	return 0;
1687 }
1688 
kvm_arm_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1689 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1690 				 struct kvm_device_attr *attr)
1691 {
1692 	int ret = -ENXIO;
1693 
1694 	switch (attr->group) {
1695 	default:
1696 		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1697 		break;
1698 	}
1699 
1700 	return ret;
1701 }
1702 
kvm_arm_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1703 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1704 				 struct kvm_device_attr *attr)
1705 {
1706 	int ret = -ENXIO;
1707 
1708 	switch (attr->group) {
1709 	default:
1710 		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1711 		break;
1712 	}
1713 
1714 	return ret;
1715 }
1716 
kvm_arm_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1717 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1718 				 struct kvm_device_attr *attr)
1719 {
1720 	int ret = -ENXIO;
1721 
1722 	switch (attr->group) {
1723 	default:
1724 		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1725 		break;
1726 	}
1727 
1728 	return ret;
1729 }
1730 
kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)1731 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1732 				   struct kvm_vcpu_events *events)
1733 {
1734 	memset(events, 0, sizeof(*events));
1735 
1736 	return __kvm_arm_vcpu_get_events(vcpu, events);
1737 }
1738 
kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)1739 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1740 				   struct kvm_vcpu_events *events)
1741 {
1742 	int i;
1743 
1744 	/* check whether the reserved field is zero */
1745 	for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1746 		if (events->reserved[i])
1747 			return -EINVAL;
1748 
1749 	/* check whether the pad field is zero */
1750 	for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1751 		if (events->exception.pad[i])
1752 			return -EINVAL;
1753 
1754 	return __kvm_arm_vcpu_set_events(vcpu, events);
1755 }
1756 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1757 long kvm_arch_vcpu_ioctl(struct file *filp,
1758 			 unsigned int ioctl, unsigned long arg)
1759 {
1760 	struct kvm_vcpu *vcpu = filp->private_data;
1761 	void __user *argp = (void __user *)arg;
1762 	struct kvm_device_attr attr;
1763 	long r;
1764 
1765 	switch (ioctl) {
1766 	case KVM_ARM_VCPU_INIT: {
1767 		struct kvm_vcpu_init init;
1768 
1769 		r = -EFAULT;
1770 		if (copy_from_user(&init, argp, sizeof(init)))
1771 			break;
1772 
1773 		r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1774 		break;
1775 	}
1776 	case KVM_SET_ONE_REG:
1777 	case KVM_GET_ONE_REG: {
1778 		struct kvm_one_reg reg;
1779 
1780 		r = -ENOEXEC;
1781 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
1782 			break;
1783 
1784 		r = -EFAULT;
1785 		if (copy_from_user(&reg, argp, sizeof(reg)))
1786 			break;
1787 
1788 		/*
1789 		 * We could owe a reset due to PSCI. Handle the pending reset
1790 		 * here to ensure userspace register accesses are ordered after
1791 		 * the reset.
1792 		 */
1793 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1794 			kvm_reset_vcpu(vcpu);
1795 
1796 		if (ioctl == KVM_SET_ONE_REG)
1797 			r = kvm_arm_set_reg(vcpu, &reg);
1798 		else
1799 			r = kvm_arm_get_reg(vcpu, &reg);
1800 		break;
1801 	}
1802 	case KVM_GET_REG_LIST: {
1803 		struct kvm_reg_list __user *user_list = argp;
1804 		struct kvm_reg_list reg_list;
1805 		unsigned n;
1806 
1807 		r = -ENOEXEC;
1808 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
1809 			break;
1810 
1811 		r = -EPERM;
1812 		if (!kvm_arm_vcpu_is_finalized(vcpu))
1813 			break;
1814 
1815 		r = -EFAULT;
1816 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1817 			break;
1818 		n = reg_list.n;
1819 		reg_list.n = kvm_arm_num_regs(vcpu);
1820 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1821 			break;
1822 		r = -E2BIG;
1823 		if (n < reg_list.n)
1824 			break;
1825 		r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1826 		break;
1827 	}
1828 	case KVM_SET_DEVICE_ATTR: {
1829 		r = -EFAULT;
1830 		if (copy_from_user(&attr, argp, sizeof(attr)))
1831 			break;
1832 		r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1833 		break;
1834 	}
1835 	case KVM_GET_DEVICE_ATTR: {
1836 		r = -EFAULT;
1837 		if (copy_from_user(&attr, argp, sizeof(attr)))
1838 			break;
1839 		r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1840 		break;
1841 	}
1842 	case KVM_HAS_DEVICE_ATTR: {
1843 		r = -EFAULT;
1844 		if (copy_from_user(&attr, argp, sizeof(attr)))
1845 			break;
1846 		r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1847 		break;
1848 	}
1849 	case KVM_GET_VCPU_EVENTS: {
1850 		struct kvm_vcpu_events events;
1851 
1852 		if (!kvm_vcpu_initialized(vcpu))
1853 			return -ENOEXEC;
1854 
1855 		if (kvm_arm_vcpu_get_events(vcpu, &events))
1856 			return -EINVAL;
1857 
1858 		if (copy_to_user(argp, &events, sizeof(events)))
1859 			return -EFAULT;
1860 
1861 		return 0;
1862 	}
1863 	case KVM_SET_VCPU_EVENTS: {
1864 		struct kvm_vcpu_events events;
1865 
1866 		if (!kvm_vcpu_initialized(vcpu))
1867 			return -ENOEXEC;
1868 
1869 		if (copy_from_user(&events, argp, sizeof(events)))
1870 			return -EFAULT;
1871 
1872 		return kvm_arm_vcpu_set_events(vcpu, &events);
1873 	}
1874 	case KVM_ARM_VCPU_FINALIZE: {
1875 		int what;
1876 
1877 		if (!kvm_vcpu_initialized(vcpu))
1878 			return -ENOEXEC;
1879 
1880 		if (get_user(what, (const int __user *)argp))
1881 			return -EFAULT;
1882 
1883 		return kvm_arm_vcpu_finalize(vcpu, what);
1884 	}
1885 	default:
1886 		r = -EINVAL;
1887 	}
1888 
1889 	return r;
1890 }
1891 
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1892 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
1893 				  unsigned long arg)
1894 {
1895 	return -ENOIOCTLCMD;
1896 }
1897 
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)1898 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1899 {
1900 
1901 }
1902 
kvm_vm_ioctl_set_device_addr(struct kvm * kvm,struct kvm_arm_device_addr * dev_addr)1903 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1904 					struct kvm_arm_device_addr *dev_addr)
1905 {
1906 	switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
1907 	case KVM_ARM_DEVICE_VGIC_V2:
1908 		if (!vgic_present)
1909 			return -ENXIO;
1910 		return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
1911 	default:
1912 		return -ENODEV;
1913 	}
1914 }
1915 
kvm_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)1916 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1917 {
1918 	switch (attr->group) {
1919 	case KVM_ARM_VM_SMCCC_CTRL:
1920 		return kvm_vm_smccc_has_attr(kvm, attr);
1921 	default:
1922 		return -ENXIO;
1923 	}
1924 }
1925 
kvm_vm_set_attr(struct kvm * kvm,struct kvm_device_attr * attr)1926 static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1927 {
1928 	switch (attr->group) {
1929 	case KVM_ARM_VM_SMCCC_CTRL:
1930 		return kvm_vm_smccc_set_attr(kvm, attr);
1931 	default:
1932 		return -ENXIO;
1933 	}
1934 }
1935 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1936 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1937 {
1938 	struct kvm *kvm = filp->private_data;
1939 	void __user *argp = (void __user *)arg;
1940 	struct kvm_device_attr attr;
1941 
1942 	if (is_protected_kvm_enabled() && !kvm_pkvm_ioctl_allowed(kvm, ioctl))
1943 		return -EINVAL;
1944 
1945 	switch (ioctl) {
1946 	case KVM_CREATE_IRQCHIP: {
1947 		int ret;
1948 		if (!vgic_present)
1949 			return -ENXIO;
1950 		mutex_lock(&kvm->lock);
1951 		ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1952 		mutex_unlock(&kvm->lock);
1953 		return ret;
1954 	}
1955 	case KVM_ARM_SET_DEVICE_ADDR: {
1956 		struct kvm_arm_device_addr dev_addr;
1957 
1958 		if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1959 			return -EFAULT;
1960 		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1961 	}
1962 	case KVM_ARM_PREFERRED_TARGET: {
1963 		struct kvm_vcpu_init init = {
1964 			.target = KVM_ARM_TARGET_GENERIC_V8,
1965 		};
1966 
1967 		if (copy_to_user(argp, &init, sizeof(init)))
1968 			return -EFAULT;
1969 
1970 		return 0;
1971 	}
1972 	case KVM_ARM_MTE_COPY_TAGS: {
1973 		struct kvm_arm_copy_mte_tags copy_tags;
1974 
1975 		if (copy_from_user(&copy_tags, argp, sizeof(copy_tags)))
1976 			return -EFAULT;
1977 		return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
1978 	}
1979 	case KVM_ARM_SET_COUNTER_OFFSET: {
1980 		struct kvm_arm_counter_offset offset;
1981 
1982 		if (copy_from_user(&offset, argp, sizeof(offset)))
1983 			return -EFAULT;
1984 		return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
1985 	}
1986 	case KVM_HAS_DEVICE_ATTR: {
1987 		if (copy_from_user(&attr, argp, sizeof(attr)))
1988 			return -EFAULT;
1989 
1990 		return kvm_vm_has_attr(kvm, &attr);
1991 	}
1992 	case KVM_SET_DEVICE_ATTR: {
1993 		if (copy_from_user(&attr, argp, sizeof(attr)))
1994 			return -EFAULT;
1995 
1996 		return kvm_vm_set_attr(kvm, &attr);
1997 	}
1998 	case KVM_ARM_GET_REG_WRITABLE_MASKS: {
1999 		struct reg_mask_range range;
2000 
2001 		if (copy_from_user(&range, argp, sizeof(range)))
2002 			return -EFAULT;
2003 		return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range);
2004 	}
2005 	default:
2006 		return -EINVAL;
2007 	}
2008 }
2009 
nvhe_percpu_size(void)2010 static unsigned long nvhe_percpu_size(void)
2011 {
2012 	return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
2013 		(unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
2014 }
2015 
nvhe_percpu_order(void)2016 static unsigned long nvhe_percpu_order(void)
2017 {
2018 	unsigned long size = nvhe_percpu_size();
2019 
2020 	return size ? get_order(size) : 0;
2021 }
2022 
pkvm_host_sve_state_order(void)2023 static size_t pkvm_host_sve_state_order(void)
2024 {
2025 	return get_order(pkvm_host_sve_state_size());
2026 }
2027 
2028 /* A lookup table holding the hypervisor VA for each vector slot */
2029 static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
2030 
kvm_init_vector_slot(void * base,enum arm64_hyp_spectre_vector slot)2031 static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
2032 {
2033 	hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot);
2034 }
2035 
kvm_init_vector_slots(void)2036 static int kvm_init_vector_slots(void)
2037 {
2038 	int err;
2039 	void *base;
2040 
2041 	base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
2042 	kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
2043 
2044 	base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
2045 	kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
2046 
2047 	if (kvm_system_needs_idmapped_vectors() &&
2048 	    !is_protected_kvm_enabled()) {
2049 		err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
2050 					       __BP_HARDEN_HYP_VECS_SZ, &base);
2051 		if (err)
2052 			return err;
2053 	}
2054 
2055 	kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
2056 	kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
2057 	return 0;
2058 }
2059 
cpu_prepare_hyp_mode(int cpu,u32 hyp_va_bits)2060 static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
2061 {
2062 	struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2063 	unsigned long tcr;
2064 
2065 	/*
2066 	 * Calculate the raw per-cpu offset without a translation from the
2067 	 * kernel's mapping to the linear mapping, and store it in tpidr_el2
2068 	 * so that we can use adr_l to access per-cpu variables in EL2.
2069 	 * Also drop the KASAN tag which gets in the way...
2070 	 */
2071 	params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
2072 			    (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
2073 
2074 	params->mair_el2 = read_sysreg(mair_el1);
2075 
2076 	tcr = read_sysreg(tcr_el1);
2077 	if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
2078 		tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
2079 		tcr |= TCR_EPD1_MASK;
2080 	} else {
2081 		unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
2082 
2083 		tcr &= TCR_EL2_MASK;
2084 		tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
2085 		if (lpa2_is_enabled())
2086 			tcr |= TCR_EL2_DS;
2087 	}
2088 	tcr |= TCR_T0SZ(hyp_va_bits);
2089 	params->tcr_el2 = tcr;
2090 
2091 	params->pgd_pa = kvm_mmu_get_httbr();
2092 	if (is_protected_kvm_enabled())
2093 		params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
2094 	else
2095 		params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
2096 
2097 	if (system_supports_mte())
2098 		params->hcr_el2 |= HCR_ATA;
2099 	else
2100 		params->hcr_el2 |= HCR_TID5;
2101 
2102 	if (cpus_have_final_cap(ARM64_KVM_HVHE))
2103 		params->hcr_el2 |= HCR_E2H;
2104 	params->vttbr = params->vtcr = 0;
2105 
2106 	/*
2107 	 * Flush the init params from the data cache because the struct will
2108 	 * be read while the MMU is off.
2109 	 */
2110 	kvm_flush_dcache_to_poc(params, sizeof(*params));
2111 }
2112 
hyp_install_host_vector(void)2113 static void hyp_install_host_vector(void)
2114 {
2115 	struct kvm_nvhe_init_params *params;
2116 	struct arm_smccc_res res;
2117 
2118 	/* Switch from the HYP stub to our own HYP init vector */
2119 	__hyp_set_vectors(kvm_get_idmap_vector());
2120 
2121 	/*
2122 	 * Call initialization code, and switch to the full blown HYP code.
2123 	 * If the cpucaps haven't been finalized yet, something has gone very
2124 	 * wrong, and hyp will crash and burn when it uses any
2125 	 * cpus_have_*_cap() wrapper.
2126 	 */
2127 	BUG_ON(!system_capabilities_finalized());
2128 	params = this_cpu_ptr_nvhe_sym(kvm_init_params);
2129 	arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
2130 	WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
2131 }
2132 
cpu_init_hyp_mode(void)2133 static void cpu_init_hyp_mode(void)
2134 {
2135 	hyp_install_host_vector();
2136 
2137 	/*
2138 	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
2139 	 * at EL2.
2140 	 */
2141 	if (this_cpu_has_cap(ARM64_SSBS) &&
2142 	    arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
2143 		kvm_call_hyp_nvhe(__kvm_enable_ssbs);
2144 	}
2145 }
2146 
cpu_hyp_reset(void)2147 static void cpu_hyp_reset(void)
2148 {
2149 	if (!is_kernel_in_hyp_mode())
2150 		__hyp_reset_vectors();
2151 }
2152 
2153 /*
2154  * EL2 vectors can be mapped and rerouted in a number of ways,
2155  * depending on the kernel configuration and CPU present:
2156  *
2157  * - If the CPU is affected by Spectre-v2, the hardening sequence is
2158  *   placed in one of the vector slots, which is executed before jumping
2159  *   to the real vectors.
2160  *
2161  * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
2162  *   containing the hardening sequence is mapped next to the idmap page,
2163  *   and executed before jumping to the real vectors.
2164  *
2165  * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
2166  *   empty slot is selected, mapped next to the idmap page, and
2167  *   executed before jumping to the real vectors.
2168  *
2169  * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
2170  * VHE, as we don't have hypervisor-specific mappings. If the system
2171  * is VHE and yet selects this capability, it will be ignored.
2172  */
cpu_set_hyp_vector(void)2173 static void cpu_set_hyp_vector(void)
2174 {
2175 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
2176 	void *vector = hyp_spectre_vector_selector[data->slot];
2177 
2178 	if (!is_protected_kvm_enabled())
2179 		*this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
2180 	else
2181 		kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
2182 }
2183 
cpu_hyp_init_context(void)2184 static void cpu_hyp_init_context(void)
2185 {
2186 	kvm_init_host_cpu_context(host_data_ptr(host_ctxt));
2187 	kvm_init_host_debug_data();
2188 
2189 	if (!is_kernel_in_hyp_mode())
2190 		cpu_init_hyp_mode();
2191 }
2192 
cpu_hyp_init_features(void)2193 static void cpu_hyp_init_features(void)
2194 {
2195 	cpu_set_hyp_vector();
2196 
2197 	if (is_kernel_in_hyp_mode()) {
2198 		kvm_timer_init_vhe();
2199 		kvm_debug_init_vhe();
2200 	}
2201 
2202 	if (vgic_present)
2203 		kvm_vgic_init_cpu_hardware();
2204 }
2205 
cpu_hyp_reinit(void)2206 static void cpu_hyp_reinit(void)
2207 {
2208 	cpu_hyp_reset();
2209 	cpu_hyp_init_context();
2210 	cpu_hyp_init_features();
2211 }
2212 
cpu_hyp_init(void * discard)2213 static void cpu_hyp_init(void *discard)
2214 {
2215 	if (!__this_cpu_read(kvm_hyp_initialized)) {
2216 		cpu_hyp_reinit();
2217 		__this_cpu_write(kvm_hyp_initialized, 1);
2218 	}
2219 }
2220 
cpu_hyp_uninit(void * discard)2221 static void cpu_hyp_uninit(void *discard)
2222 {
2223 	if (!is_protected_kvm_enabled() && __this_cpu_read(kvm_hyp_initialized)) {
2224 		cpu_hyp_reset();
2225 		__this_cpu_write(kvm_hyp_initialized, 0);
2226 	}
2227 }
2228 
kvm_arch_enable_virtualization_cpu(void)2229 int kvm_arch_enable_virtualization_cpu(void)
2230 {
2231 	/*
2232 	 * Most calls to this function are made with migration
2233 	 * disabled, but not with preemption disabled. The former is
2234 	 * enough to ensure correctness, but most of the helpers
2235 	 * expect the later and will throw a tantrum otherwise.
2236 	 */
2237 	preempt_disable();
2238 
2239 	cpu_hyp_init(NULL);
2240 
2241 	kvm_vgic_cpu_up();
2242 	kvm_timer_cpu_up();
2243 
2244 	preempt_enable();
2245 
2246 	return 0;
2247 }
2248 
kvm_arch_disable_virtualization_cpu(void)2249 void kvm_arch_disable_virtualization_cpu(void)
2250 {
2251 	kvm_timer_cpu_down();
2252 	kvm_vgic_cpu_down();
2253 
2254 	if (!is_protected_kvm_enabled())
2255 		cpu_hyp_uninit(NULL);
2256 }
2257 
2258 #ifdef CONFIG_CPU_PM
hyp_init_cpu_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)2259 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
2260 				    unsigned long cmd,
2261 				    void *v)
2262 {
2263 	/*
2264 	 * kvm_hyp_initialized is left with its old value over
2265 	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
2266 	 * re-enable hyp.
2267 	 */
2268 	switch (cmd) {
2269 	case CPU_PM_ENTER:
2270 		if (__this_cpu_read(kvm_hyp_initialized))
2271 			/*
2272 			 * don't update kvm_hyp_initialized here
2273 			 * so that the hyp will be re-enabled
2274 			 * when we resume. See below.
2275 			 */
2276 			cpu_hyp_reset();
2277 
2278 		return NOTIFY_OK;
2279 	case CPU_PM_ENTER_FAILED:
2280 	case CPU_PM_EXIT:
2281 		if (__this_cpu_read(kvm_hyp_initialized))
2282 			/* The hyp was enabled before suspend. */
2283 			cpu_hyp_reinit();
2284 
2285 		return NOTIFY_OK;
2286 
2287 	default:
2288 		return NOTIFY_DONE;
2289 	}
2290 }
2291 
2292 static struct notifier_block hyp_init_cpu_pm_nb = {
2293 	.notifier_call = hyp_init_cpu_pm_notifier,
2294 };
2295 
hyp_cpu_pm_init(void)2296 static void __init hyp_cpu_pm_init(void)
2297 {
2298 	if (!is_protected_kvm_enabled())
2299 		cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
2300 }
hyp_cpu_pm_exit(void)2301 static void __init hyp_cpu_pm_exit(void)
2302 {
2303 	if (!is_protected_kvm_enabled())
2304 		cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
2305 }
2306 #else
hyp_cpu_pm_init(void)2307 static inline void __init hyp_cpu_pm_init(void)
2308 {
2309 }
hyp_cpu_pm_exit(void)2310 static inline void __init hyp_cpu_pm_exit(void)
2311 {
2312 }
2313 #endif
2314 
init_cpu_logical_map(void)2315 static void __init init_cpu_logical_map(void)
2316 {
2317 	unsigned int cpu;
2318 
2319 	/*
2320 	 * Copy the MPIDR <-> logical CPU ID mapping to hyp.
2321 	 * Only copy the set of online CPUs whose features have been checked
2322 	 * against the finalized system capabilities. The hypervisor will not
2323 	 * allow any other CPUs from the `possible` set to boot.
2324 	 */
2325 	for_each_online_cpu(cpu)
2326 		hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
2327 }
2328 
2329 #define init_psci_0_1_impl_state(config, what)	\
2330 	config.psci_0_1_ ## what ## _implemented = psci_ops.what
2331 
init_psci_relay(void)2332 static bool __init init_psci_relay(void)
2333 {
2334 	/*
2335 	 * If PSCI has not been initialized, protected KVM cannot install
2336 	 * itself on newly booted CPUs.
2337 	 */
2338 	if (!psci_ops.get_version) {
2339 		kvm_err("Cannot initialize protected mode without PSCI\n");
2340 		return false;
2341 	}
2342 
2343 	kvm_host_psci_config.version = psci_ops.get_version();
2344 	kvm_host_psci_config.smccc_version = arm_smccc_get_version();
2345 
2346 	if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
2347 		kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
2348 		init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
2349 		init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
2350 		init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
2351 		init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
2352 	}
2353 	return true;
2354 }
2355 
init_subsystems(void)2356 static int __init init_subsystems(void)
2357 {
2358 	int err = 0;
2359 
2360 	/*
2361 	 * Enable hardware so that subsystem initialisation can access EL2.
2362 	 */
2363 	on_each_cpu(cpu_hyp_init, NULL, 1);
2364 
2365 	/*
2366 	 * Register CPU lower-power notifier
2367 	 */
2368 	hyp_cpu_pm_init();
2369 
2370 	/*
2371 	 * Init HYP view of VGIC
2372 	 */
2373 	err = kvm_vgic_hyp_init();
2374 	switch (err) {
2375 	case 0:
2376 		vgic_present = true;
2377 		break;
2378 	case -ENODEV:
2379 	case -ENXIO:
2380 		/*
2381 		 * No VGIC? No pKVM for you.
2382 		 *
2383 		 * Protected mode assumes that VGICv3 is present, so no point
2384 		 * in trying to hobble along if vgic initialization fails.
2385 		 */
2386 		if (is_protected_kvm_enabled())
2387 			goto out;
2388 
2389 		/*
2390 		 * Otherwise, userspace could choose to implement a GIC for its
2391 		 * guest on non-cooperative hardware.
2392 		 */
2393 		vgic_present = false;
2394 		err = 0;
2395 		break;
2396 	default:
2397 		goto out;
2398 	}
2399 
2400 	if (kvm_mode == KVM_MODE_NV &&
2401 		!(vgic_present && (kvm_vgic_global_state.type == VGIC_V3 ||
2402 				   kvm_vgic_global_state.has_gcie_v3_compat))) {
2403 		kvm_err("NV support requires GICv3 or GICv5 with legacy support, giving up\n");
2404 		err = -EINVAL;
2405 		goto out;
2406 	}
2407 
2408 	/*
2409 	 * Init HYP architected timer support
2410 	 */
2411 	err = kvm_timer_hyp_init(vgic_present);
2412 	if (err)
2413 		goto out;
2414 
2415 	kvm_register_perf_callbacks();
2416 
2417 out:
2418 	if (err)
2419 		hyp_cpu_pm_exit();
2420 
2421 	if (err || !is_protected_kvm_enabled())
2422 		on_each_cpu(cpu_hyp_uninit, NULL, 1);
2423 
2424 	return err;
2425 }
2426 
teardown_subsystems(void)2427 static void __init teardown_subsystems(void)
2428 {
2429 	kvm_unregister_perf_callbacks();
2430 	hyp_cpu_pm_exit();
2431 }
2432 
teardown_hyp_mode(void)2433 static void __init teardown_hyp_mode(void)
2434 {
2435 	bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
2436 	int cpu;
2437 
2438 	free_hyp_pgds();
2439 	for_each_possible_cpu(cpu) {
2440 		if (per_cpu(kvm_hyp_initialized, cpu))
2441 			continue;
2442 
2443 		free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
2444 
2445 		if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu])
2446 			continue;
2447 
2448 		if (free_sve) {
2449 			struct cpu_sve_state *sve_state;
2450 
2451 			sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
2452 			free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
2453 		}
2454 
2455 		free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
2456 
2457 	}
2458 }
2459 
do_pkvm_init(u32 hyp_va_bits)2460 static int __init do_pkvm_init(u32 hyp_va_bits)
2461 {
2462 	void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
2463 	int ret;
2464 
2465 	preempt_disable();
2466 	cpu_hyp_init_context();
2467 	ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
2468 				num_possible_cpus(), kern_hyp_va(per_cpu_base),
2469 				hyp_va_bits);
2470 	cpu_hyp_init_features();
2471 
2472 	/*
2473 	 * The stub hypercalls are now disabled, so set our local flag to
2474 	 * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu().
2475 	 */
2476 	__this_cpu_write(kvm_hyp_initialized, 1);
2477 	preempt_enable();
2478 
2479 	return ret;
2480 }
2481 
get_hyp_id_aa64pfr0_el1(void)2482 static u64 get_hyp_id_aa64pfr0_el1(void)
2483 {
2484 	/*
2485 	 * Track whether the system isn't affected by spectre/meltdown in the
2486 	 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
2487 	 * Although this is per-CPU, we make it global for simplicity, e.g., not
2488 	 * to have to worry about vcpu migration.
2489 	 *
2490 	 * Unlike for non-protected VMs, userspace cannot override this for
2491 	 * protected VMs.
2492 	 */
2493 	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2494 
2495 	val &= ~(ID_AA64PFR0_EL1_CSV2 |
2496 		 ID_AA64PFR0_EL1_CSV3);
2497 
2498 	val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2,
2499 			  arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
2500 	val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3,
2501 			  arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
2502 
2503 	return val;
2504 }
2505 
kvm_hyp_init_symbols(void)2506 static void kvm_hyp_init_symbols(void)
2507 {
2508 	kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
2509 	kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2510 	kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
2511 	kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
2512 	kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
2513 	kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
2514 	kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
2515 	kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2516 	kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
2517 	kvm_nvhe_sym(__icache_flags) = __icache_flags;
2518 	kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
2519 
2520 	/* Propagate the FGT state to the nVHE side */
2521 	kvm_nvhe_sym(hfgrtr_masks)  = hfgrtr_masks;
2522 	kvm_nvhe_sym(hfgwtr_masks)  = hfgwtr_masks;
2523 	kvm_nvhe_sym(hfgitr_masks)  = hfgitr_masks;
2524 	kvm_nvhe_sym(hdfgrtr_masks) = hdfgrtr_masks;
2525 	kvm_nvhe_sym(hdfgwtr_masks) = hdfgwtr_masks;
2526 	kvm_nvhe_sym(hafgrtr_masks) = hafgrtr_masks;
2527 	kvm_nvhe_sym(hfgrtr2_masks) = hfgrtr2_masks;
2528 	kvm_nvhe_sym(hfgwtr2_masks) = hfgwtr2_masks;
2529 	kvm_nvhe_sym(hfgitr2_masks) = hfgitr2_masks;
2530 	kvm_nvhe_sym(hdfgrtr2_masks)= hdfgrtr2_masks;
2531 	kvm_nvhe_sym(hdfgwtr2_masks)= hdfgwtr2_masks;
2532 
2533 	/*
2534 	 * Flush entire BSS since part of its data containing init symbols is read
2535 	 * while the MMU is off.
2536 	 */
2537 	kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start),
2538 				kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start));
2539 }
2540 
kvm_hyp_init_protection(u32 hyp_va_bits)2541 static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
2542 {
2543 	void *addr = phys_to_virt(hyp_mem_base);
2544 	int ret;
2545 
2546 	ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
2547 	if (ret)
2548 		return ret;
2549 
2550 	ret = do_pkvm_init(hyp_va_bits);
2551 	if (ret)
2552 		return ret;
2553 
2554 	free_hyp_pgds();
2555 
2556 	return 0;
2557 }
2558 
init_pkvm_host_sve_state(void)2559 static int init_pkvm_host_sve_state(void)
2560 {
2561 	int cpu;
2562 
2563 	if (!system_supports_sve())
2564 		return 0;
2565 
2566 	/* Allocate pages for host sve state in protected mode. */
2567 	for_each_possible_cpu(cpu) {
2568 		struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
2569 
2570 		if (!page)
2571 			return -ENOMEM;
2572 
2573 		per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
2574 	}
2575 
2576 	/*
2577 	 * Don't map the pages in hyp since these are only used in protected
2578 	 * mode, which will (re)create its own mapping when initialized.
2579 	 */
2580 
2581 	return 0;
2582 }
2583 
2584 /*
2585  * Finalizes the initialization of hyp mode, once everything else is initialized
2586  * and the initialziation process cannot fail.
2587  */
finalize_init_hyp_mode(void)2588 static void finalize_init_hyp_mode(void)
2589 {
2590 	int cpu;
2591 
2592 	if (system_supports_sve() && is_protected_kvm_enabled()) {
2593 		for_each_possible_cpu(cpu) {
2594 			struct cpu_sve_state *sve_state;
2595 
2596 			sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
2597 			per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
2598 				kern_hyp_va(sve_state);
2599 		}
2600 	}
2601 }
2602 
pkvm_hyp_init_ptrauth(void)2603 static void pkvm_hyp_init_ptrauth(void)
2604 {
2605 	struct kvm_cpu_context *hyp_ctxt;
2606 	int cpu;
2607 
2608 	for_each_possible_cpu(cpu) {
2609 		hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
2610 		hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
2611 		hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
2612 		hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
2613 		hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
2614 		hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
2615 		hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
2616 		hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
2617 		hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
2618 		hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
2619 		hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
2620 	}
2621 }
2622 
2623 /* Inits Hyp-mode on all online CPUs */
init_hyp_mode(void)2624 static int __init init_hyp_mode(void)
2625 {
2626 	u32 hyp_va_bits = kvm_hyp_va_bits();
2627 	int cpu;
2628 	int err = -ENOMEM;
2629 
2630 	/*
2631 	 * The protected Hyp-mode cannot be initialized if the memory pool
2632 	 * allocation has failed.
2633 	 */
2634 	if (is_protected_kvm_enabled() && !hyp_mem_base)
2635 		goto out_err;
2636 
2637 	/*
2638 	 * Allocate Hyp PGD and setup Hyp identity mapping
2639 	 */
2640 	err = kvm_mmu_init(hyp_va_bits);
2641 	if (err)
2642 		goto out_err;
2643 
2644 	/*
2645 	 * Allocate stack pages for Hypervisor-mode
2646 	 */
2647 	for_each_possible_cpu(cpu) {
2648 		unsigned long stack_base;
2649 
2650 		stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
2651 		if (!stack_base) {
2652 			err = -ENOMEM;
2653 			goto out_err;
2654 		}
2655 
2656 		per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
2657 	}
2658 
2659 	/*
2660 	 * Allocate and initialize pages for Hypervisor-mode percpu regions.
2661 	 */
2662 	for_each_possible_cpu(cpu) {
2663 		struct page *page;
2664 		void *page_addr;
2665 
2666 		page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
2667 		if (!page) {
2668 			err = -ENOMEM;
2669 			goto out_err;
2670 		}
2671 
2672 		page_addr = page_address(page);
2673 		memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
2674 		kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
2675 	}
2676 
2677 	/*
2678 	 * Map the Hyp-code called directly from the host
2679 	 */
2680 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
2681 				  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
2682 	if (err) {
2683 		kvm_err("Cannot map world-switch code\n");
2684 		goto out_err;
2685 	}
2686 
2687 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_start),
2688 				  kvm_ksym_ref(__hyp_data_end), PAGE_HYP);
2689 	if (err) {
2690 		kvm_err("Cannot map .hyp.data section\n");
2691 		goto out_err;
2692 	}
2693 
2694 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
2695 				  kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
2696 	if (err) {
2697 		kvm_err("Cannot map .hyp.rodata section\n");
2698 		goto out_err;
2699 	}
2700 
2701 	err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
2702 				  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
2703 	if (err) {
2704 		kvm_err("Cannot map rodata section\n");
2705 		goto out_err;
2706 	}
2707 
2708 	/*
2709 	 * .hyp.bss is guaranteed to be placed at the beginning of the .bss
2710 	 * section thanks to an assertion in the linker script. Map it RW and
2711 	 * the rest of .bss RO.
2712 	 */
2713 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start),
2714 				  kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
2715 	if (err) {
2716 		kvm_err("Cannot map hyp bss section: %d\n", err);
2717 		goto out_err;
2718 	}
2719 
2720 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
2721 				  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
2722 	if (err) {
2723 		kvm_err("Cannot map bss section\n");
2724 		goto out_err;
2725 	}
2726 
2727 	/*
2728 	 * Map the Hyp stack pages
2729 	 */
2730 	for_each_possible_cpu(cpu) {
2731 		struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2732 		char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
2733 
2734 		err = create_hyp_stack(__pa(stack_base), &params->stack_hyp_va);
2735 		if (err) {
2736 			kvm_err("Cannot map hyp stack\n");
2737 			goto out_err;
2738 		}
2739 
2740 		/*
2741 		 * Save the stack PA in nvhe_init_params. This will be needed
2742 		 * to recreate the stack mapping in protected nVHE mode.
2743 		 * __hyp_pa() won't do the right thing there, since the stack
2744 		 * has been mapped in the flexible private VA space.
2745 		 */
2746 		params->stack_pa = __pa(stack_base);
2747 	}
2748 
2749 	for_each_possible_cpu(cpu) {
2750 		char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
2751 		char *percpu_end = percpu_begin + nvhe_percpu_size();
2752 
2753 		/* Map Hyp percpu pages */
2754 		err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
2755 		if (err) {
2756 			kvm_err("Cannot map hyp percpu region\n");
2757 			goto out_err;
2758 		}
2759 
2760 		/* Prepare the CPU initialization parameters */
2761 		cpu_prepare_hyp_mode(cpu, hyp_va_bits);
2762 	}
2763 
2764 	kvm_hyp_init_symbols();
2765 
2766 	if (is_protected_kvm_enabled()) {
2767 		if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
2768 		    cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
2769 			pkvm_hyp_init_ptrauth();
2770 
2771 		init_cpu_logical_map();
2772 
2773 		if (!init_psci_relay()) {
2774 			err = -ENODEV;
2775 			goto out_err;
2776 		}
2777 
2778 		err = init_pkvm_host_sve_state();
2779 		if (err)
2780 			goto out_err;
2781 
2782 		err = kvm_hyp_init_protection(hyp_va_bits);
2783 		if (err) {
2784 			kvm_err("Failed to init hyp memory protection\n");
2785 			goto out_err;
2786 		}
2787 	}
2788 
2789 	return 0;
2790 
2791 out_err:
2792 	teardown_hyp_mode();
2793 	kvm_err("error initializing Hyp mode: %d\n", err);
2794 	return err;
2795 }
2796 
kvm_mpidr_to_vcpu(struct kvm * kvm,unsigned long mpidr)2797 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
2798 {
2799 	struct kvm_vcpu *vcpu = NULL;
2800 	struct kvm_mpidr_data *data;
2801 	unsigned long i;
2802 
2803 	mpidr &= MPIDR_HWID_BITMASK;
2804 
2805 	rcu_read_lock();
2806 	data = rcu_dereference(kvm->arch.mpidr_data);
2807 
2808 	if (data) {
2809 		u16 idx = kvm_mpidr_index(data, mpidr);
2810 
2811 		vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
2812 		if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
2813 			vcpu = NULL;
2814 	}
2815 
2816 	rcu_read_unlock();
2817 
2818 	if (vcpu)
2819 		return vcpu;
2820 
2821 	kvm_for_each_vcpu(i, vcpu, kvm) {
2822 		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
2823 			return vcpu;
2824 	}
2825 	return NULL;
2826 }
2827 
kvm_arch_irqchip_in_kernel(struct kvm * kvm)2828 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2829 {
2830 	return irqchip_in_kernel(kvm);
2831 }
2832 
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)2833 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
2834 				      struct irq_bypass_producer *prod)
2835 {
2836 	struct kvm_kernel_irqfd *irqfd =
2837 		container_of(cons, struct kvm_kernel_irqfd, consumer);
2838 	struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry;
2839 
2840 	/*
2841 	 * The only thing we have a chance of directly-injecting is LPIs. Maybe
2842 	 * one day...
2843 	 */
2844 	if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
2845 		return 0;
2846 
2847 	return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
2848 					  &irqfd->irq_entry);
2849 }
2850 
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)2851 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
2852 				      struct irq_bypass_producer *prod)
2853 {
2854 	struct kvm_kernel_irqfd *irqfd =
2855 		container_of(cons, struct kvm_kernel_irqfd, consumer);
2856 	struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry;
2857 
2858 	if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
2859 		return;
2860 
2861 	kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq);
2862 }
2863 
kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd * irqfd,struct kvm_kernel_irq_routing_entry * old,struct kvm_kernel_irq_routing_entry * new)2864 void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
2865 				   struct kvm_kernel_irq_routing_entry *old,
2866 				   struct kvm_kernel_irq_routing_entry *new)
2867 {
2868 	if (old->type == KVM_IRQ_ROUTING_MSI &&
2869 	    new->type == KVM_IRQ_ROUTING_MSI &&
2870 	    !memcmp(&old->msi, &new->msi, sizeof(new->msi)))
2871 		return;
2872 
2873 	/*
2874 	 * Remapping the vLPI requires taking the its_lock mutex to resolve
2875 	 * the new translation. We're in spinlock land at this point, so no
2876 	 * chance of resolving the translation.
2877 	 *
2878 	 * Unmap the vLPI and fall back to software LPI injection.
2879 	 */
2880 	return kvm_vgic_v4_unset_forwarding(irqfd->kvm, irqfd->producer->irq);
2881 }
2882 
kvm_arch_irq_bypass_stop(struct irq_bypass_consumer * cons)2883 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
2884 {
2885 	struct kvm_kernel_irqfd *irqfd =
2886 		container_of(cons, struct kvm_kernel_irqfd, consumer);
2887 
2888 	kvm_arm_halt_guest(irqfd->kvm);
2889 }
2890 
kvm_arch_irq_bypass_start(struct irq_bypass_consumer * cons)2891 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
2892 {
2893 	struct kvm_kernel_irqfd *irqfd =
2894 		container_of(cons, struct kvm_kernel_irqfd, consumer);
2895 
2896 	kvm_arm_resume_guest(irqfd->kvm);
2897 }
2898 
2899 /* Initialize Hyp-mode and memory mappings on all CPUs */
kvm_arm_init(void)2900 static __init int kvm_arm_init(void)
2901 {
2902 	int err;
2903 	bool in_hyp_mode;
2904 
2905 	if (!is_hyp_mode_available()) {
2906 		kvm_info("HYP mode not available\n");
2907 		return -ENODEV;
2908 	}
2909 
2910 	if (kvm_get_mode() == KVM_MODE_NONE) {
2911 		kvm_info("KVM disabled from command line\n");
2912 		return -ENODEV;
2913 	}
2914 
2915 	err = kvm_sys_reg_table_init();
2916 	if (err) {
2917 		kvm_info("Error initializing system register tables");
2918 		return err;
2919 	}
2920 
2921 	in_hyp_mode = is_kernel_in_hyp_mode();
2922 
2923 	if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
2924 	    cpus_have_final_cap(ARM64_WORKAROUND_1508412))
2925 		kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
2926 			 "Only trusted guests should be used on this system.\n");
2927 
2928 	err = kvm_set_ipa_limit();
2929 	if (err)
2930 		return err;
2931 
2932 	err = kvm_arm_init_sve();
2933 	if (err)
2934 		return err;
2935 
2936 	err = kvm_arm_vmid_alloc_init();
2937 	if (err) {
2938 		kvm_err("Failed to initialize VMID allocator.\n");
2939 		return err;
2940 	}
2941 
2942 	if (!in_hyp_mode) {
2943 		err = init_hyp_mode();
2944 		if (err)
2945 			goto out_err;
2946 	}
2947 
2948 	err = kvm_init_vector_slots();
2949 	if (err) {
2950 		kvm_err("Cannot initialise vector slots\n");
2951 		goto out_hyp;
2952 	}
2953 
2954 	err = init_subsystems();
2955 	if (err)
2956 		goto out_hyp;
2957 
2958 	kvm_info("%s%sVHE%s mode initialized successfully\n",
2959 		 in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
2960 				     "Protected " : "Hyp "),
2961 		 in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
2962 				     "h" : "n"),
2963 		 cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) ? "+NV2": "");
2964 
2965 	/*
2966 	 * FIXME: Do something reasonable if kvm_init() fails after pKVM
2967 	 * hypervisor protection is finalized.
2968 	 */
2969 	err = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2970 	if (err)
2971 		goto out_subs;
2972 
2973 	/*
2974 	 * This should be called after initialization is done and failure isn't
2975 	 * possible anymore.
2976 	 */
2977 	if (!in_hyp_mode)
2978 		finalize_init_hyp_mode();
2979 
2980 	kvm_arm_initialised = true;
2981 
2982 	return 0;
2983 
2984 out_subs:
2985 	teardown_subsystems();
2986 out_hyp:
2987 	if (!in_hyp_mode)
2988 		teardown_hyp_mode();
2989 out_err:
2990 	kvm_arm_vmid_alloc_free();
2991 	return err;
2992 }
2993 
early_kvm_mode_cfg(char * arg)2994 static int __init early_kvm_mode_cfg(char *arg)
2995 {
2996 	if (!arg)
2997 		return -EINVAL;
2998 
2999 	if (strcmp(arg, "none") == 0) {
3000 		kvm_mode = KVM_MODE_NONE;
3001 		return 0;
3002 	}
3003 
3004 	if (!is_hyp_mode_available()) {
3005 		pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
3006 		return 0;
3007 	}
3008 
3009 	if (strcmp(arg, "protected") == 0) {
3010 		if (!is_kernel_in_hyp_mode())
3011 			kvm_mode = KVM_MODE_PROTECTED;
3012 		else
3013 			pr_warn_once("Protected KVM not available with VHE\n");
3014 
3015 		return 0;
3016 	}
3017 
3018 	if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
3019 		kvm_mode = KVM_MODE_DEFAULT;
3020 		return 0;
3021 	}
3022 
3023 	if (strcmp(arg, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) {
3024 		kvm_mode = KVM_MODE_NV;
3025 		return 0;
3026 	}
3027 
3028 	return -EINVAL;
3029 }
3030 early_param("kvm-arm.mode", early_kvm_mode_cfg);
3031 
early_kvm_wfx_trap_policy_cfg(char * arg,enum kvm_wfx_trap_policy * p)3032 static int __init early_kvm_wfx_trap_policy_cfg(char *arg, enum kvm_wfx_trap_policy *p)
3033 {
3034 	if (!arg)
3035 		return -EINVAL;
3036 
3037 	if (strcmp(arg, "trap") == 0) {
3038 		*p = KVM_WFX_TRAP;
3039 		return 0;
3040 	}
3041 
3042 	if (strcmp(arg, "notrap") == 0) {
3043 		*p = KVM_WFX_NOTRAP;
3044 		return 0;
3045 	}
3046 
3047 	return -EINVAL;
3048 }
3049 
early_kvm_wfi_trap_policy_cfg(char * arg)3050 static int __init early_kvm_wfi_trap_policy_cfg(char *arg)
3051 {
3052 	return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfi_trap_policy);
3053 }
3054 early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg);
3055 
early_kvm_wfe_trap_policy_cfg(char * arg)3056 static int __init early_kvm_wfe_trap_policy_cfg(char *arg)
3057 {
3058 	return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfe_trap_policy);
3059 }
3060 early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg);
3061 
kvm_get_mode(void)3062 enum kvm_mode kvm_get_mode(void)
3063 {
3064 	return kvm_mode;
3065 }
3066 
3067 module_init(kvm_arm_init);
3068