xref: /linux/arch/arm64/kvm/arm.c (revision 8540bd1b990bad7f7e95b5bf1adf30bfaf2e38c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6 
7 #include <linux/bug.h>
8 #include <linux/cpu_pm.h>
9 #include <linux/entry-kvm.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16 #include <linux/fs.h>
17 #include <linux/mman.h>
18 #include <linux/sched.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_irqfd.h>
21 #include <linux/irqbypass.h>
22 #include <linux/sched/stat.h>
23 #include <linux/psci.h>
24 #include <trace/events/kvm.h>
25 
26 #define CREATE_TRACE_POINTS
27 #include "trace_arm.h"
28 
29 #include <linux/uaccess.h>
30 #include <asm/ptrace.h>
31 #include <asm/mman.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cacheflush.h>
34 #include <asm/cpufeature.h>
35 #include <asm/virt.h>
36 #include <asm/kvm_arm.h>
37 #include <asm/kvm_asm.h>
38 #include <asm/kvm_emulate.h>
39 #include <asm/kvm_mmu.h>
40 #include <asm/kvm_nested.h>
41 #include <asm/kvm_pkvm.h>
42 #include <asm/kvm_ptrauth.h>
43 #include <asm/sections.h>
44 
45 #include <kvm/arm_hypercalls.h>
46 #include <kvm/arm_pmu.h>
47 #include <kvm/arm_psci.h>
48 
49 static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
50 
51 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
52 
53 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
55 
56 DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
57 
58 static bool vgic_present, kvm_arm_initialised;
59 
60 static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
61 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
62 
63 bool is_kvm_arm_initialised(void)
64 {
65 	return kvm_arm_initialised;
66 }
67 
68 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
69 {
70 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
71 }
72 
73 /*
74  * This functions as an allow-list of protected VM capabilities.
75  * Features not explicitly allowed by this function are denied.
76  */
77 static bool pkvm_ext_allowed(struct kvm *kvm, long ext)
78 {
79 	switch (ext) {
80 	case KVM_CAP_IRQCHIP:
81 	case KVM_CAP_ARM_PSCI:
82 	case KVM_CAP_ARM_PSCI_0_2:
83 	case KVM_CAP_NR_VCPUS:
84 	case KVM_CAP_MAX_VCPUS:
85 	case KVM_CAP_MAX_VCPU_ID:
86 	case KVM_CAP_MSI_DEVID:
87 	case KVM_CAP_ARM_VM_IPA_SIZE:
88 	case KVM_CAP_ARM_PMU_V3:
89 	case KVM_CAP_ARM_SVE:
90 	case KVM_CAP_ARM_PTRAUTH_ADDRESS:
91 	case KVM_CAP_ARM_PTRAUTH_GENERIC:
92 		return true;
93 	default:
94 		return false;
95 	}
96 }
97 
98 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
99 			    struct kvm_enable_cap *cap)
100 {
101 	int r = -EINVAL;
102 
103 	if (cap->flags)
104 		return -EINVAL;
105 
106 	if (kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, cap->cap))
107 		return -EINVAL;
108 
109 	switch (cap->cap) {
110 	case KVM_CAP_ARM_NISV_TO_USER:
111 		r = 0;
112 		set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
113 			&kvm->arch.flags);
114 		break;
115 	case KVM_CAP_ARM_MTE:
116 		mutex_lock(&kvm->lock);
117 		if (system_supports_mte() && !kvm->created_vcpus) {
118 			r = 0;
119 			set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
120 		}
121 		mutex_unlock(&kvm->lock);
122 		break;
123 	case KVM_CAP_ARM_SYSTEM_SUSPEND:
124 		r = 0;
125 		set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
126 		break;
127 	case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
128 		mutex_lock(&kvm->slots_lock);
129 		/*
130 		 * To keep things simple, allow changing the chunk
131 		 * size only when no memory slots have been created.
132 		 */
133 		if (kvm_are_all_memslots_empty(kvm)) {
134 			u64 new_cap = cap->args[0];
135 
136 			if (!new_cap || kvm_is_block_size_supported(new_cap)) {
137 				r = 0;
138 				kvm->arch.mmu.split_page_chunk_size = new_cap;
139 			}
140 		}
141 		mutex_unlock(&kvm->slots_lock);
142 		break;
143 	default:
144 		break;
145 	}
146 
147 	return r;
148 }
149 
150 static int kvm_arm_default_max_vcpus(void)
151 {
152 	return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
153 }
154 
155 /**
156  * kvm_arch_init_vm - initializes a VM data structure
157  * @kvm:	pointer to the KVM struct
158  */
159 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
160 {
161 	int ret;
162 
163 	mutex_init(&kvm->arch.config_lock);
164 
165 #ifdef CONFIG_LOCKDEP
166 	/* Clue in lockdep that the config_lock must be taken inside kvm->lock */
167 	mutex_lock(&kvm->lock);
168 	mutex_lock(&kvm->arch.config_lock);
169 	mutex_unlock(&kvm->arch.config_lock);
170 	mutex_unlock(&kvm->lock);
171 #endif
172 
173 	ret = kvm_share_hyp(kvm, kvm + 1);
174 	if (ret)
175 		return ret;
176 
177 	ret = pkvm_init_host_vm(kvm);
178 	if (ret)
179 		goto err_unshare_kvm;
180 
181 	if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) {
182 		ret = -ENOMEM;
183 		goto err_unshare_kvm;
184 	}
185 	cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
186 
187 	ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type);
188 	if (ret)
189 		goto err_free_cpumask;
190 
191 	kvm_vgic_early_init(kvm);
192 
193 	kvm_timer_init_vm(kvm);
194 
195 	/* The maximum number of VCPUs is limited by the host's GIC model */
196 	kvm->max_vcpus = kvm_arm_default_max_vcpus();
197 
198 	kvm_arm_init_hypercalls(kvm);
199 
200 	bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
201 
202 	return 0;
203 
204 err_free_cpumask:
205 	free_cpumask_var(kvm->arch.supported_cpus);
206 err_unshare_kvm:
207 	kvm_unshare_hyp(kvm, kvm + 1);
208 	return ret;
209 }
210 
211 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
212 {
213 	return VM_FAULT_SIGBUS;
214 }
215 
216 void kvm_arch_create_vm_debugfs(struct kvm *kvm)
217 {
218 	kvm_sys_regs_create_debugfs(kvm);
219 }
220 
221 /**
222  * kvm_arch_destroy_vm - destroy the VM data structure
223  * @kvm:	pointer to the KVM struct
224  */
225 void kvm_arch_destroy_vm(struct kvm *kvm)
226 {
227 	bitmap_free(kvm->arch.pmu_filter);
228 	free_cpumask_var(kvm->arch.supported_cpus);
229 
230 	kvm_vgic_destroy(kvm);
231 
232 	if (is_protected_kvm_enabled())
233 		pkvm_destroy_hyp_vm(kvm);
234 
235 	kfree(kvm->arch.mpidr_data);
236 	kfree(kvm->arch.sysreg_masks);
237 	kvm_destroy_vcpus(kvm);
238 
239 	kvm_unshare_hyp(kvm, kvm + 1);
240 
241 	kvm_arm_teardown_hypercalls(kvm);
242 }
243 
244 static bool kvm_has_full_ptr_auth(void)
245 {
246 	bool apa, gpa, api, gpi, apa3, gpa3;
247 	u64 isar1, isar2, val;
248 
249 	/*
250 	 * Check that:
251 	 *
252 	 * - both Address and Generic auth are implemented for a given
253          *   algorithm (Q5, IMPDEF or Q3)
254 	 * - only a single algorithm is implemented.
255 	 */
256 	if (!system_has_full_ptr_auth())
257 		return false;
258 
259 	isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
260 	isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
261 
262 	apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1);
263 	val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1);
264 	gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP);
265 
266 	api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1);
267 	val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1);
268 	gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP);
269 
270 	apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2);
271 	val  = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2);
272 	gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP);
273 
274 	return (apa == gpa && api == gpi && apa3 == gpa3 &&
275 		(apa + api + apa3) == 1);
276 }
277 
278 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
279 {
280 	int r;
281 
282 	if (kvm && kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, ext))
283 		return 0;
284 
285 	switch (ext) {
286 	case KVM_CAP_IRQCHIP:
287 		r = vgic_present;
288 		break;
289 	case KVM_CAP_IOEVENTFD:
290 	case KVM_CAP_USER_MEMORY:
291 	case KVM_CAP_SYNC_MMU:
292 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
293 	case KVM_CAP_ONE_REG:
294 	case KVM_CAP_ARM_PSCI:
295 	case KVM_CAP_ARM_PSCI_0_2:
296 	case KVM_CAP_READONLY_MEM:
297 	case KVM_CAP_MP_STATE:
298 	case KVM_CAP_IMMEDIATE_EXIT:
299 	case KVM_CAP_VCPU_EVENTS:
300 	case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
301 	case KVM_CAP_ARM_NISV_TO_USER:
302 	case KVM_CAP_ARM_INJECT_EXT_DABT:
303 	case KVM_CAP_SET_GUEST_DEBUG:
304 	case KVM_CAP_VCPU_ATTRIBUTES:
305 	case KVM_CAP_PTP_KVM:
306 	case KVM_CAP_ARM_SYSTEM_SUSPEND:
307 	case KVM_CAP_IRQFD_RESAMPLE:
308 	case KVM_CAP_COUNTER_OFFSET:
309 		r = 1;
310 		break;
311 	case KVM_CAP_SET_GUEST_DEBUG2:
312 		return KVM_GUESTDBG_VALID_MASK;
313 	case KVM_CAP_ARM_SET_DEVICE_ADDR:
314 		r = 1;
315 		break;
316 	case KVM_CAP_NR_VCPUS:
317 		/*
318 		 * ARM64 treats KVM_CAP_NR_CPUS differently from all other
319 		 * architectures, as it does not always bound it to
320 		 * KVM_CAP_MAX_VCPUS. It should not matter much because
321 		 * this is just an advisory value.
322 		 */
323 		r = min_t(unsigned int, num_online_cpus(),
324 			  kvm_arm_default_max_vcpus());
325 		break;
326 	case KVM_CAP_MAX_VCPUS:
327 	case KVM_CAP_MAX_VCPU_ID:
328 		if (kvm)
329 			r = kvm->max_vcpus;
330 		else
331 			r = kvm_arm_default_max_vcpus();
332 		break;
333 	case KVM_CAP_MSI_DEVID:
334 		if (!kvm)
335 			r = -EINVAL;
336 		else
337 			r = kvm->arch.vgic.msis_require_devid;
338 		break;
339 	case KVM_CAP_ARM_USER_IRQ:
340 		/*
341 		 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
342 		 * (bump this number if adding more devices)
343 		 */
344 		r = 1;
345 		break;
346 	case KVM_CAP_ARM_MTE:
347 		r = system_supports_mte();
348 		break;
349 	case KVM_CAP_STEAL_TIME:
350 		r = kvm_arm_pvtime_supported();
351 		break;
352 	case KVM_CAP_ARM_EL1_32BIT:
353 		r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
354 		break;
355 	case KVM_CAP_GUEST_DEBUG_HW_BPS:
356 		r = get_num_brps();
357 		break;
358 	case KVM_CAP_GUEST_DEBUG_HW_WPS:
359 		r = get_num_wrps();
360 		break;
361 	case KVM_CAP_ARM_PMU_V3:
362 		r = kvm_arm_support_pmu_v3();
363 		break;
364 	case KVM_CAP_ARM_INJECT_SERROR_ESR:
365 		r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
366 		break;
367 	case KVM_CAP_ARM_VM_IPA_SIZE:
368 		r = get_kvm_ipa_limit();
369 		break;
370 	case KVM_CAP_ARM_SVE:
371 		r = system_supports_sve();
372 		break;
373 	case KVM_CAP_ARM_PTRAUTH_ADDRESS:
374 	case KVM_CAP_ARM_PTRAUTH_GENERIC:
375 		r = kvm_has_full_ptr_auth();
376 		break;
377 	case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
378 		if (kvm)
379 			r = kvm->arch.mmu.split_page_chunk_size;
380 		else
381 			r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
382 		break;
383 	case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
384 		r = kvm_supported_block_sizes();
385 		break;
386 	case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES:
387 		r = BIT(0);
388 		break;
389 	default:
390 		r = 0;
391 	}
392 
393 	return r;
394 }
395 
396 long kvm_arch_dev_ioctl(struct file *filp,
397 			unsigned int ioctl, unsigned long arg)
398 {
399 	return -EINVAL;
400 }
401 
402 struct kvm *kvm_arch_alloc_vm(void)
403 {
404 	size_t sz = sizeof(struct kvm);
405 
406 	if (!has_vhe())
407 		return kzalloc(sz, GFP_KERNEL_ACCOUNT);
408 
409 	return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO);
410 }
411 
412 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
413 {
414 	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
415 		return -EBUSY;
416 
417 	if (id >= kvm->max_vcpus)
418 		return -EINVAL;
419 
420 	return 0;
421 }
422 
423 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
424 {
425 	int err;
426 
427 	spin_lock_init(&vcpu->arch.mp_state_lock);
428 
429 #ifdef CONFIG_LOCKDEP
430 	/* Inform lockdep that the config_lock is acquired after vcpu->mutex */
431 	mutex_lock(&vcpu->mutex);
432 	mutex_lock(&vcpu->kvm->arch.config_lock);
433 	mutex_unlock(&vcpu->kvm->arch.config_lock);
434 	mutex_unlock(&vcpu->mutex);
435 #endif
436 
437 	/* Force users to call KVM_ARM_VCPU_INIT */
438 	vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
439 
440 	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
441 
442 	/* Set up the timer */
443 	kvm_timer_vcpu_init(vcpu);
444 
445 	kvm_pmu_vcpu_init(vcpu);
446 
447 	kvm_arm_reset_debug_ptr(vcpu);
448 
449 	kvm_arm_pvtime_vcpu_init(&vcpu->arch);
450 
451 	vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
452 
453 	err = kvm_vgic_vcpu_init(vcpu);
454 	if (err)
455 		return err;
456 
457 	return kvm_share_hyp(vcpu, vcpu + 1);
458 }
459 
460 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
461 {
462 }
463 
464 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
465 {
466 	if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
467 		static_branch_dec(&userspace_irqchip_in_use);
468 
469 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
470 	kvm_timer_vcpu_terminate(vcpu);
471 	kvm_pmu_vcpu_destroy(vcpu);
472 	kvm_vgic_vcpu_destroy(vcpu);
473 	kvm_arm_vcpu_destroy(vcpu);
474 }
475 
476 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
477 {
478 
479 }
480 
481 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
482 {
483 
484 }
485 
486 static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
487 {
488 	if (vcpu_has_ptrauth(vcpu)) {
489 		/*
490 		 * Either we're running running an L2 guest, and the API/APK
491 		 * bits come from L1's HCR_EL2, or API/APK are both set.
492 		 */
493 		if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
494 			u64 val;
495 
496 			val = __vcpu_sys_reg(vcpu, HCR_EL2);
497 			val &= (HCR_API | HCR_APK);
498 			vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
499 			vcpu->arch.hcr_el2 |= val;
500 		} else {
501 			vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
502 		}
503 
504 		/*
505 		 * Save the host keys if there is any chance for the guest
506 		 * to use pauth, as the entry code will reload the guest
507 		 * keys in that case.
508 		 * Protected mode is the exception to that rule, as the
509 		 * entry into the EL2 code eagerly switch back and forth
510 		 * between host and hyp keys (and kvm_hyp_ctxt is out of
511 		 * reach anyway).
512 		 */
513 		if (is_protected_kvm_enabled())
514 			return;
515 
516 		if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
517 			struct kvm_cpu_context *ctxt;
518 			ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
519 			ptrauth_save_keys(ctxt);
520 		}
521 	}
522 }
523 
524 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
525 {
526 	struct kvm_s2_mmu *mmu;
527 	int *last_ran;
528 
529 	mmu = vcpu->arch.hw_mmu;
530 	last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
531 
532 	/*
533 	 * We guarantee that both TLBs and I-cache are private to each
534 	 * vcpu. If detecting that a vcpu from the same VM has
535 	 * previously run on the same physical CPU, call into the
536 	 * hypervisor code to nuke the relevant contexts.
537 	 *
538 	 * We might get preempted before the vCPU actually runs, but
539 	 * over-invalidation doesn't affect correctness.
540 	 */
541 	if (*last_ran != vcpu->vcpu_idx) {
542 		kvm_call_hyp(__kvm_flush_cpu_context, mmu);
543 		*last_ran = vcpu->vcpu_idx;
544 	}
545 
546 	vcpu->cpu = cpu;
547 
548 	kvm_vgic_load(vcpu);
549 	kvm_timer_vcpu_load(vcpu);
550 	if (has_vhe())
551 		kvm_vcpu_load_vhe(vcpu);
552 	kvm_arch_vcpu_load_fp(vcpu);
553 	kvm_vcpu_pmu_restore_guest(vcpu);
554 	if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
555 		kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
556 
557 	if (single_task_running())
558 		vcpu_clear_wfx_traps(vcpu);
559 	else
560 		vcpu_set_wfx_traps(vcpu);
561 
562 	vcpu_set_pauth_traps(vcpu);
563 
564 	kvm_arch_vcpu_load_debug_state_flags(vcpu);
565 
566 	if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
567 		vcpu_set_on_unsupported_cpu(vcpu);
568 }
569 
570 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
571 {
572 	kvm_arch_vcpu_put_debug_state_flags(vcpu);
573 	kvm_arch_vcpu_put_fp(vcpu);
574 	if (has_vhe())
575 		kvm_vcpu_put_vhe(vcpu);
576 	kvm_timer_vcpu_put(vcpu);
577 	kvm_vgic_put(vcpu);
578 	kvm_vcpu_pmu_restore_host(vcpu);
579 	kvm_arm_vmid_clear_active();
580 
581 	vcpu_clear_on_unsupported_cpu(vcpu);
582 	vcpu->cpu = -1;
583 }
584 
585 static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
586 {
587 	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
588 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
589 	kvm_vcpu_kick(vcpu);
590 }
591 
592 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
593 {
594 	spin_lock(&vcpu->arch.mp_state_lock);
595 	__kvm_arm_vcpu_power_off(vcpu);
596 	spin_unlock(&vcpu->arch.mp_state_lock);
597 }
598 
599 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
600 {
601 	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
602 }
603 
604 static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
605 {
606 	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
607 	kvm_make_request(KVM_REQ_SUSPEND, vcpu);
608 	kvm_vcpu_kick(vcpu);
609 }
610 
611 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
612 {
613 	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
614 }
615 
616 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
617 				    struct kvm_mp_state *mp_state)
618 {
619 	*mp_state = READ_ONCE(vcpu->arch.mp_state);
620 
621 	return 0;
622 }
623 
624 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
625 				    struct kvm_mp_state *mp_state)
626 {
627 	int ret = 0;
628 
629 	spin_lock(&vcpu->arch.mp_state_lock);
630 
631 	switch (mp_state->mp_state) {
632 	case KVM_MP_STATE_RUNNABLE:
633 		WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
634 		break;
635 	case KVM_MP_STATE_STOPPED:
636 		__kvm_arm_vcpu_power_off(vcpu);
637 		break;
638 	case KVM_MP_STATE_SUSPENDED:
639 		kvm_arm_vcpu_suspend(vcpu);
640 		break;
641 	default:
642 		ret = -EINVAL;
643 	}
644 
645 	spin_unlock(&vcpu->arch.mp_state_lock);
646 
647 	return ret;
648 }
649 
650 /**
651  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
652  * @v:		The VCPU pointer
653  *
654  * If the guest CPU is not waiting for interrupts or an interrupt line is
655  * asserted, the CPU is by definition runnable.
656  */
657 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
658 {
659 	bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
660 	return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
661 		&& !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
662 }
663 
664 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
665 {
666 	return vcpu_mode_priv(vcpu);
667 }
668 
669 #ifdef CONFIG_GUEST_PERF_EVENTS
670 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
671 {
672 	return *vcpu_pc(vcpu);
673 }
674 #endif
675 
676 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
677 {
678 	return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
679 }
680 
681 static void kvm_init_mpidr_data(struct kvm *kvm)
682 {
683 	struct kvm_mpidr_data *data = NULL;
684 	unsigned long c, mask, nr_entries;
685 	u64 aff_set = 0, aff_clr = ~0UL;
686 	struct kvm_vcpu *vcpu;
687 
688 	mutex_lock(&kvm->arch.config_lock);
689 
690 	if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
691 		goto out;
692 
693 	kvm_for_each_vcpu(c, vcpu, kvm) {
694 		u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
695 		aff_set |= aff;
696 		aff_clr &= aff;
697 	}
698 
699 	/*
700 	 * A significant bit can be either 0 or 1, and will only appear in
701 	 * aff_set. Use aff_clr to weed out the useless stuff.
702 	 */
703 	mask = aff_set ^ aff_clr;
704 	nr_entries = BIT_ULL(hweight_long(mask));
705 
706 	/*
707 	 * Don't let userspace fool us. If we need more than a single page
708 	 * to describe the compressed MPIDR array, just fall back to the
709 	 * iterative method. Single vcpu VMs do not need this either.
710 	 */
711 	if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
712 		data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries),
713 			       GFP_KERNEL_ACCOUNT);
714 
715 	if (!data)
716 		goto out;
717 
718 	data->mpidr_mask = mask;
719 
720 	kvm_for_each_vcpu(c, vcpu, kvm) {
721 		u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
722 		u16 index = kvm_mpidr_index(data, aff);
723 
724 		data->cmpidr_to_idx[index] = c;
725 	}
726 
727 	kvm->arch.mpidr_data = data;
728 out:
729 	mutex_unlock(&kvm->arch.config_lock);
730 }
731 
732 /*
733  * Handle both the initialisation that is being done when the vcpu is
734  * run for the first time, as well as the updates that must be
735  * performed each time we get a new thread dealing with this vcpu.
736  */
737 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
738 {
739 	struct kvm *kvm = vcpu->kvm;
740 	int ret;
741 
742 	if (!kvm_vcpu_initialized(vcpu))
743 		return -ENOEXEC;
744 
745 	if (!kvm_arm_vcpu_is_finalized(vcpu))
746 		return -EPERM;
747 
748 	ret = kvm_arch_vcpu_run_map_fp(vcpu);
749 	if (ret)
750 		return ret;
751 
752 	if (likely(vcpu_has_run_once(vcpu)))
753 		return 0;
754 
755 	kvm_init_mpidr_data(kvm);
756 
757 	kvm_arm_vcpu_init_debug(vcpu);
758 
759 	if (likely(irqchip_in_kernel(kvm))) {
760 		/*
761 		 * Map the VGIC hardware resources before running a vcpu the
762 		 * first time on this VM.
763 		 */
764 		ret = kvm_vgic_map_resources(kvm);
765 		if (ret)
766 			return ret;
767 	}
768 
769 	if (vcpu_has_nv(vcpu)) {
770 		ret = kvm_init_nv_sysregs(vcpu->kvm);
771 		if (ret)
772 			return ret;
773 	}
774 
775 	/*
776 	 * This needs to happen after NV has imposed its own restrictions on
777 	 * the feature set
778 	 */
779 	kvm_init_sysreg(vcpu);
780 
781 	ret = kvm_timer_enable(vcpu);
782 	if (ret)
783 		return ret;
784 
785 	ret = kvm_arm_pmu_v3_enable(vcpu);
786 	if (ret)
787 		return ret;
788 
789 	if (is_protected_kvm_enabled()) {
790 		ret = pkvm_create_hyp_vm(kvm);
791 		if (ret)
792 			return ret;
793 	}
794 
795 	if (!irqchip_in_kernel(kvm)) {
796 		/*
797 		 * Tell the rest of the code that there are userspace irqchip
798 		 * VMs in the wild.
799 		 */
800 		static_branch_inc(&userspace_irqchip_in_use);
801 	}
802 
803 	/*
804 	 * Initialize traps for protected VMs.
805 	 * NOTE: Move to run in EL2 directly, rather than via a hypercall, once
806 	 * the code is in place for first run initialization at EL2.
807 	 */
808 	if (kvm_vm_is_protected(kvm))
809 		kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
810 
811 	mutex_lock(&kvm->arch.config_lock);
812 	set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
813 	mutex_unlock(&kvm->arch.config_lock);
814 
815 	return ret;
816 }
817 
818 bool kvm_arch_intc_initialized(struct kvm *kvm)
819 {
820 	return vgic_initialized(kvm);
821 }
822 
823 void kvm_arm_halt_guest(struct kvm *kvm)
824 {
825 	unsigned long i;
826 	struct kvm_vcpu *vcpu;
827 
828 	kvm_for_each_vcpu(i, vcpu, kvm)
829 		vcpu->arch.pause = true;
830 	kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
831 }
832 
833 void kvm_arm_resume_guest(struct kvm *kvm)
834 {
835 	unsigned long i;
836 	struct kvm_vcpu *vcpu;
837 
838 	kvm_for_each_vcpu(i, vcpu, kvm) {
839 		vcpu->arch.pause = false;
840 		__kvm_vcpu_wake_up(vcpu);
841 	}
842 }
843 
844 static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu)
845 {
846 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
847 
848 	rcuwait_wait_event(wait,
849 			   (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
850 			   TASK_INTERRUPTIBLE);
851 
852 	if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) {
853 		/* Awaken to handle a signal, request we sleep again later. */
854 		kvm_make_request(KVM_REQ_SLEEP, vcpu);
855 	}
856 
857 	/*
858 	 * Make sure we will observe a potential reset request if we've
859 	 * observed a change to the power state. Pairs with the smp_wmb() in
860 	 * kvm_psci_vcpu_on().
861 	 */
862 	smp_rmb();
863 }
864 
865 /**
866  * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
867  * @vcpu:	The VCPU pointer
868  *
869  * Suspend execution of a vCPU until a valid wake event is detected, i.e. until
870  * the vCPU is runnable.  The vCPU may or may not be scheduled out, depending
871  * on when a wake event arrives, e.g. there may already be a pending wake event.
872  */
873 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
874 {
875 	/*
876 	 * Sync back the state of the GIC CPU interface so that we have
877 	 * the latest PMR and group enables. This ensures that
878 	 * kvm_arch_vcpu_runnable has up-to-date data to decide whether
879 	 * we have pending interrupts, e.g. when determining if the
880 	 * vCPU should block.
881 	 *
882 	 * For the same reason, we want to tell GICv4 that we need
883 	 * doorbells to be signalled, should an interrupt become pending.
884 	 */
885 	preempt_disable();
886 	vcpu_set_flag(vcpu, IN_WFI);
887 	kvm_vgic_put(vcpu);
888 	preempt_enable();
889 
890 	kvm_vcpu_halt(vcpu);
891 	vcpu_clear_flag(vcpu, IN_WFIT);
892 
893 	preempt_disable();
894 	vcpu_clear_flag(vcpu, IN_WFI);
895 	kvm_vgic_load(vcpu);
896 	preempt_enable();
897 }
898 
899 static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
900 {
901 	if (!kvm_arm_vcpu_suspended(vcpu))
902 		return 1;
903 
904 	kvm_vcpu_wfi(vcpu);
905 
906 	/*
907 	 * The suspend state is sticky; we do not leave it until userspace
908 	 * explicitly marks the vCPU as runnable. Request that we suspend again
909 	 * later.
910 	 */
911 	kvm_make_request(KVM_REQ_SUSPEND, vcpu);
912 
913 	/*
914 	 * Check to make sure the vCPU is actually runnable. If so, exit to
915 	 * userspace informing it of the wakeup condition.
916 	 */
917 	if (kvm_arch_vcpu_runnable(vcpu)) {
918 		memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
919 		vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
920 		vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
921 		return 0;
922 	}
923 
924 	/*
925 	 * Otherwise, we were unblocked to process a different event, such as a
926 	 * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to
927 	 * process the event.
928 	 */
929 	return 1;
930 }
931 
932 /**
933  * check_vcpu_requests - check and handle pending vCPU requests
934  * @vcpu:	the VCPU pointer
935  *
936  * Return: 1 if we should enter the guest
937  *	   0 if we should exit to userspace
938  *	   < 0 if we should exit to userspace, where the return value indicates
939  *	   an error
940  */
941 static int check_vcpu_requests(struct kvm_vcpu *vcpu)
942 {
943 	if (kvm_request_pending(vcpu)) {
944 		if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
945 			kvm_vcpu_sleep(vcpu);
946 
947 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
948 			kvm_reset_vcpu(vcpu);
949 
950 		/*
951 		 * Clear IRQ_PENDING requests that were made to guarantee
952 		 * that a VCPU sees new virtual interrupts.
953 		 */
954 		kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
955 
956 		if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
957 			kvm_update_stolen_time(vcpu);
958 
959 		if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
960 			/* The distributor enable bits were changed */
961 			preempt_disable();
962 			vgic_v4_put(vcpu);
963 			vgic_v4_load(vcpu);
964 			preempt_enable();
965 		}
966 
967 		if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
968 			kvm_vcpu_reload_pmu(vcpu);
969 
970 		if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
971 			kvm_vcpu_pmu_restore_guest(vcpu);
972 
973 		if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
974 			return kvm_vcpu_suspend(vcpu);
975 
976 		if (kvm_dirty_ring_check_request(vcpu))
977 			return 0;
978 	}
979 
980 	return 1;
981 }
982 
983 static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
984 {
985 	if (likely(!vcpu_mode_is_32bit(vcpu)))
986 		return false;
987 
988 	if (vcpu_has_nv(vcpu))
989 		return true;
990 
991 	return !kvm_supports_32bit_el0();
992 }
993 
994 /**
995  * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
996  * @vcpu:	The VCPU pointer
997  * @ret:	Pointer to write optional return code
998  *
999  * Returns: true if the VCPU needs to return to a preemptible + interruptible
1000  *	    and skip guest entry.
1001  *
1002  * This function disambiguates between two different types of exits: exits to a
1003  * preemptible + interruptible kernel context and exits to userspace. For an
1004  * exit to userspace, this function will write the return code to ret and return
1005  * true. For an exit to preemptible + interruptible kernel context (i.e. check
1006  * for pending work and re-enter), return true without writing to ret.
1007  */
1008 static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
1009 {
1010 	struct kvm_run *run = vcpu->run;
1011 
1012 	/*
1013 	 * If we're using a userspace irqchip, then check if we need
1014 	 * to tell a userspace irqchip about timer or PMU level
1015 	 * changes and if so, exit to userspace (the actual level
1016 	 * state gets updated in kvm_timer_update_run and
1017 	 * kvm_pmu_update_run below).
1018 	 */
1019 	if (static_branch_unlikely(&userspace_irqchip_in_use)) {
1020 		if (kvm_timer_should_notify_user(vcpu) ||
1021 		    kvm_pmu_should_notify_user(vcpu)) {
1022 			*ret = -EINTR;
1023 			run->exit_reason = KVM_EXIT_INTR;
1024 			return true;
1025 		}
1026 	}
1027 
1028 	if (unlikely(vcpu_on_unsupported_cpu(vcpu))) {
1029 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1030 		run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED;
1031 		run->fail_entry.cpu = smp_processor_id();
1032 		*ret = 0;
1033 		return true;
1034 	}
1035 
1036 	return kvm_request_pending(vcpu) ||
1037 			xfer_to_guest_mode_work_pending();
1038 }
1039 
1040 /*
1041  * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1042  * the vCPU is running.
1043  *
1044  * This must be noinstr as instrumentation may make use of RCU, and this is not
1045  * safe during the EQS.
1046  */
1047 static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
1048 {
1049 	int ret;
1050 
1051 	guest_state_enter_irqoff();
1052 	ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
1053 	guest_state_exit_irqoff();
1054 
1055 	return ret;
1056 }
1057 
1058 /**
1059  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
1060  * @vcpu:	The VCPU pointer
1061  *
1062  * This function is called through the VCPU_RUN ioctl called from user space. It
1063  * will execute VM code in a loop until the time slice for the process is used
1064  * or some emulation is needed from user space in which case the function will
1065  * return with return value 0 and with the kvm_run structure filled in with the
1066  * required data for the requested emulation.
1067  */
1068 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1069 {
1070 	struct kvm_run *run = vcpu->run;
1071 	int ret;
1072 
1073 	if (run->exit_reason == KVM_EXIT_MMIO) {
1074 		ret = kvm_handle_mmio_return(vcpu);
1075 		if (ret <= 0)
1076 			return ret;
1077 	}
1078 
1079 	vcpu_load(vcpu);
1080 
1081 	if (run->immediate_exit) {
1082 		ret = -EINTR;
1083 		goto out;
1084 	}
1085 
1086 	kvm_sigset_activate(vcpu);
1087 
1088 	ret = 1;
1089 	run->exit_reason = KVM_EXIT_UNKNOWN;
1090 	run->flags = 0;
1091 	while (ret > 0) {
1092 		/*
1093 		 * Check conditions before entering the guest
1094 		 */
1095 		ret = xfer_to_guest_mode_handle_work(vcpu);
1096 		if (!ret)
1097 			ret = 1;
1098 
1099 		if (ret > 0)
1100 			ret = check_vcpu_requests(vcpu);
1101 
1102 		/*
1103 		 * Preparing the interrupts to be injected also
1104 		 * involves poking the GIC, which must be done in a
1105 		 * non-preemptible context.
1106 		 */
1107 		preempt_disable();
1108 
1109 		/*
1110 		 * The VMID allocator only tracks active VMIDs per
1111 		 * physical CPU, and therefore the VMID allocated may not be
1112 		 * preserved on VMID roll-over if the task was preempted,
1113 		 * making a thread's VMID inactive. So we need to call
1114 		 * kvm_arm_vmid_update() in non-premptible context.
1115 		 */
1116 		if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
1117 		    has_vhe())
1118 			__load_stage2(vcpu->arch.hw_mmu,
1119 				      vcpu->arch.hw_mmu->arch);
1120 
1121 		kvm_pmu_flush_hwstate(vcpu);
1122 
1123 		local_irq_disable();
1124 
1125 		kvm_vgic_flush_hwstate(vcpu);
1126 
1127 		kvm_pmu_update_vcpu_events(vcpu);
1128 
1129 		/*
1130 		 * Ensure we set mode to IN_GUEST_MODE after we disable
1131 		 * interrupts and before the final VCPU requests check.
1132 		 * See the comment in kvm_vcpu_exiting_guest_mode() and
1133 		 * Documentation/virt/kvm/vcpu-requests.rst
1134 		 */
1135 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1136 
1137 		if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
1138 			vcpu->mode = OUTSIDE_GUEST_MODE;
1139 			isb(); /* Ensure work in x_flush_hwstate is committed */
1140 			kvm_pmu_sync_hwstate(vcpu);
1141 			if (static_branch_unlikely(&userspace_irqchip_in_use))
1142 				kvm_timer_sync_user(vcpu);
1143 			kvm_vgic_sync_hwstate(vcpu);
1144 			local_irq_enable();
1145 			preempt_enable();
1146 			continue;
1147 		}
1148 
1149 		kvm_arm_setup_debug(vcpu);
1150 		kvm_arch_vcpu_ctxflush_fp(vcpu);
1151 
1152 		/**************************************************************
1153 		 * Enter the guest
1154 		 */
1155 		trace_kvm_entry(*vcpu_pc(vcpu));
1156 		guest_timing_enter_irqoff();
1157 
1158 		ret = kvm_arm_vcpu_enter_exit(vcpu);
1159 
1160 		vcpu->mode = OUTSIDE_GUEST_MODE;
1161 		vcpu->stat.exits++;
1162 		/*
1163 		 * Back from guest
1164 		 *************************************************************/
1165 
1166 		kvm_arm_clear_debug(vcpu);
1167 
1168 		/*
1169 		 * We must sync the PMU state before the vgic state so
1170 		 * that the vgic can properly sample the updated state of the
1171 		 * interrupt line.
1172 		 */
1173 		kvm_pmu_sync_hwstate(vcpu);
1174 
1175 		/*
1176 		 * Sync the vgic state before syncing the timer state because
1177 		 * the timer code needs to know if the virtual timer
1178 		 * interrupts are active.
1179 		 */
1180 		kvm_vgic_sync_hwstate(vcpu);
1181 
1182 		/*
1183 		 * Sync the timer hardware state before enabling interrupts as
1184 		 * we don't want vtimer interrupts to race with syncing the
1185 		 * timer virtual interrupt state.
1186 		 */
1187 		if (static_branch_unlikely(&userspace_irqchip_in_use))
1188 			kvm_timer_sync_user(vcpu);
1189 
1190 		kvm_arch_vcpu_ctxsync_fp(vcpu);
1191 
1192 		/*
1193 		 * We must ensure that any pending interrupts are taken before
1194 		 * we exit guest timing so that timer ticks are accounted as
1195 		 * guest time. Transiently unmask interrupts so that any
1196 		 * pending interrupts are taken.
1197 		 *
1198 		 * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
1199 		 * context synchronization event) is necessary to ensure that
1200 		 * pending interrupts are taken.
1201 		 */
1202 		if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) {
1203 			local_irq_enable();
1204 			isb();
1205 			local_irq_disable();
1206 		}
1207 
1208 		guest_timing_exit_irqoff();
1209 
1210 		local_irq_enable();
1211 
1212 		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
1213 
1214 		/* Exit types that need handling before we can be preempted */
1215 		handle_exit_early(vcpu, ret);
1216 
1217 		preempt_enable();
1218 
1219 		/*
1220 		 * The ARMv8 architecture doesn't give the hypervisor
1221 		 * a mechanism to prevent a guest from dropping to AArch32 EL0
1222 		 * if implemented by the CPU. If we spot the guest in such
1223 		 * state and that we decided it wasn't supposed to do so (like
1224 		 * with the asymmetric AArch32 case), return to userspace with
1225 		 * a fatal error.
1226 		 */
1227 		if (vcpu_mode_is_bad_32bit(vcpu)) {
1228 			/*
1229 			 * As we have caught the guest red-handed, decide that
1230 			 * it isn't fit for purpose anymore by making the vcpu
1231 			 * invalid. The VMM can try and fix it by issuing  a
1232 			 * KVM_ARM_VCPU_INIT if it really wants to.
1233 			 */
1234 			vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
1235 			ret = ARM_EXCEPTION_IL;
1236 		}
1237 
1238 		ret = handle_exit(vcpu, ret);
1239 	}
1240 
1241 	/* Tell userspace about in-kernel device output levels */
1242 	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
1243 		kvm_timer_update_run(vcpu);
1244 		kvm_pmu_update_run(vcpu);
1245 	}
1246 
1247 	kvm_sigset_deactivate(vcpu);
1248 
1249 out:
1250 	/*
1251 	 * In the unlikely event that we are returning to userspace
1252 	 * with pending exceptions or PC adjustment, commit these
1253 	 * adjustments in order to give userspace a consistent view of
1254 	 * the vcpu state. Note that this relies on __kvm_adjust_pc()
1255 	 * being preempt-safe on VHE.
1256 	 */
1257 	if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
1258 		     vcpu_get_flag(vcpu, INCREMENT_PC)))
1259 		kvm_call_hyp(__kvm_adjust_pc, vcpu);
1260 
1261 	vcpu_put(vcpu);
1262 	return ret;
1263 }
1264 
1265 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
1266 {
1267 	int bit_index;
1268 	bool set;
1269 	unsigned long *hcr;
1270 
1271 	if (number == KVM_ARM_IRQ_CPU_IRQ)
1272 		bit_index = __ffs(HCR_VI);
1273 	else /* KVM_ARM_IRQ_CPU_FIQ */
1274 		bit_index = __ffs(HCR_VF);
1275 
1276 	hcr = vcpu_hcr(vcpu);
1277 	if (level)
1278 		set = test_and_set_bit(bit_index, hcr);
1279 	else
1280 		set = test_and_clear_bit(bit_index, hcr);
1281 
1282 	/*
1283 	 * If we didn't change anything, no need to wake up or kick other CPUs
1284 	 */
1285 	if (set == level)
1286 		return 0;
1287 
1288 	/*
1289 	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
1290 	 * trigger a world-switch round on the running physical CPU to set the
1291 	 * virtual IRQ/FIQ fields in the HCR appropriately.
1292 	 */
1293 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1294 	kvm_vcpu_kick(vcpu);
1295 
1296 	return 0;
1297 }
1298 
1299 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1300 			  bool line_status)
1301 {
1302 	u32 irq = irq_level->irq;
1303 	unsigned int irq_type, vcpu_id, irq_num;
1304 	struct kvm_vcpu *vcpu = NULL;
1305 	bool level = irq_level->level;
1306 
1307 	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
1308 	vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
1309 	vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
1310 	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
1311 
1312 	trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
1313 
1314 	switch (irq_type) {
1315 	case KVM_ARM_IRQ_TYPE_CPU:
1316 		if (irqchip_in_kernel(kvm))
1317 			return -ENXIO;
1318 
1319 		vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1320 		if (!vcpu)
1321 			return -EINVAL;
1322 
1323 		if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
1324 			return -EINVAL;
1325 
1326 		return vcpu_interrupt_line(vcpu, irq_num, level);
1327 	case KVM_ARM_IRQ_TYPE_PPI:
1328 		if (!irqchip_in_kernel(kvm))
1329 			return -ENXIO;
1330 
1331 		vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1332 		if (!vcpu)
1333 			return -EINVAL;
1334 
1335 		if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
1336 			return -EINVAL;
1337 
1338 		return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL);
1339 	case KVM_ARM_IRQ_TYPE_SPI:
1340 		if (!irqchip_in_kernel(kvm))
1341 			return -ENXIO;
1342 
1343 		if (irq_num < VGIC_NR_PRIVATE_IRQS)
1344 			return -EINVAL;
1345 
1346 		return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL);
1347 	}
1348 
1349 	return -EINVAL;
1350 }
1351 
1352 static unsigned long system_supported_vcpu_features(void)
1353 {
1354 	unsigned long features = KVM_VCPU_VALID_FEATURES;
1355 
1356 	if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
1357 		clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
1358 
1359 	if (!kvm_arm_support_pmu_v3())
1360 		clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
1361 
1362 	if (!system_supports_sve())
1363 		clear_bit(KVM_ARM_VCPU_SVE, &features);
1364 
1365 	if (!kvm_has_full_ptr_auth()) {
1366 		clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
1367 		clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
1368 	}
1369 
1370 	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
1371 		clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
1372 
1373 	return features;
1374 }
1375 
1376 static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
1377 					const struct kvm_vcpu_init *init)
1378 {
1379 	unsigned long features = init->features[0];
1380 	int i;
1381 
1382 	if (features & ~KVM_VCPU_VALID_FEATURES)
1383 		return -ENOENT;
1384 
1385 	for (i = 1; i < ARRAY_SIZE(init->features); i++) {
1386 		if (init->features[i])
1387 			return -ENOENT;
1388 	}
1389 
1390 	if (features & ~system_supported_vcpu_features())
1391 		return -EINVAL;
1392 
1393 	/*
1394 	 * For now make sure that both address/generic pointer authentication
1395 	 * features are requested by the userspace together.
1396 	 */
1397 	if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
1398 	    test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
1399 		return -EINVAL;
1400 
1401 	/* Disallow NV+SVE for the time being */
1402 	if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) &&
1403 	    test_bit(KVM_ARM_VCPU_SVE, &features))
1404 		return -EINVAL;
1405 
1406 	if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
1407 		return 0;
1408 
1409 	/* MTE is incompatible with AArch32 */
1410 	if (kvm_has_mte(vcpu->kvm))
1411 		return -EINVAL;
1412 
1413 	/* NV is incompatible with AArch32 */
1414 	if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
1415 		return -EINVAL;
1416 
1417 	return 0;
1418 }
1419 
1420 static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
1421 				  const struct kvm_vcpu_init *init)
1422 {
1423 	unsigned long features = init->features[0];
1424 
1425 	return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
1426 			     KVM_VCPU_MAX_FEATURES);
1427 }
1428 
1429 static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
1430 {
1431 	struct kvm *kvm = vcpu->kvm;
1432 	int ret = 0;
1433 
1434 	/*
1435 	 * When the vCPU has a PMU, but no PMU is set for the guest
1436 	 * yet, set the default one.
1437 	 */
1438 	if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
1439 		ret = kvm_arm_set_default_pmu(kvm);
1440 
1441 	return ret;
1442 }
1443 
1444 static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1445 				 const struct kvm_vcpu_init *init)
1446 {
1447 	unsigned long features = init->features[0];
1448 	struct kvm *kvm = vcpu->kvm;
1449 	int ret = -EINVAL;
1450 
1451 	mutex_lock(&kvm->arch.config_lock);
1452 
1453 	if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
1454 	    kvm_vcpu_init_changed(vcpu, init))
1455 		goto out_unlock;
1456 
1457 	bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
1458 
1459 	ret = kvm_setup_vcpu(vcpu);
1460 	if (ret)
1461 		goto out_unlock;
1462 
1463 	/* Now we know what it is, we can reset it. */
1464 	kvm_reset_vcpu(vcpu);
1465 
1466 	set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
1467 	vcpu_set_flag(vcpu, VCPU_INITIALIZED);
1468 	ret = 0;
1469 out_unlock:
1470 	mutex_unlock(&kvm->arch.config_lock);
1471 	return ret;
1472 }
1473 
1474 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1475 			       const struct kvm_vcpu_init *init)
1476 {
1477 	int ret;
1478 
1479 	if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
1480 	    init->target != kvm_target_cpu())
1481 		return -EINVAL;
1482 
1483 	ret = kvm_vcpu_init_check_features(vcpu, init);
1484 	if (ret)
1485 		return ret;
1486 
1487 	if (!kvm_vcpu_initialized(vcpu))
1488 		return __kvm_vcpu_set_target(vcpu, init);
1489 
1490 	if (kvm_vcpu_init_changed(vcpu, init))
1491 		return -EINVAL;
1492 
1493 	kvm_reset_vcpu(vcpu);
1494 	return 0;
1495 }
1496 
1497 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
1498 					 struct kvm_vcpu_init *init)
1499 {
1500 	bool power_off = false;
1501 	int ret;
1502 
1503 	/*
1504 	 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid
1505 	 * reflecting it in the finalized feature set, thus limiting its scope
1506 	 * to a single KVM_ARM_VCPU_INIT call.
1507 	 */
1508 	if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) {
1509 		init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF);
1510 		power_off = true;
1511 	}
1512 
1513 	ret = kvm_vcpu_set_target(vcpu, init);
1514 	if (ret)
1515 		return ret;
1516 
1517 	/*
1518 	 * Ensure a rebooted VM will fault in RAM pages and detect if the
1519 	 * guest MMU is turned off and flush the caches as needed.
1520 	 *
1521 	 * S2FWB enforces all memory accesses to RAM being cacheable,
1522 	 * ensuring that the data side is always coherent. We still
1523 	 * need to invalidate the I-cache though, as FWB does *not*
1524 	 * imply CTR_EL0.DIC.
1525 	 */
1526 	if (vcpu_has_run_once(vcpu)) {
1527 		if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1528 			stage2_unmap_vm(vcpu->kvm);
1529 		else
1530 			icache_inval_all_pou();
1531 	}
1532 
1533 	vcpu_reset_hcr(vcpu);
1534 	vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
1535 
1536 	/*
1537 	 * Handle the "start in power-off" case.
1538 	 */
1539 	spin_lock(&vcpu->arch.mp_state_lock);
1540 
1541 	if (power_off)
1542 		__kvm_arm_vcpu_power_off(vcpu);
1543 	else
1544 		WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
1545 
1546 	spin_unlock(&vcpu->arch.mp_state_lock);
1547 
1548 	return 0;
1549 }
1550 
1551 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1552 				 struct kvm_device_attr *attr)
1553 {
1554 	int ret = -ENXIO;
1555 
1556 	switch (attr->group) {
1557 	default:
1558 		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1559 		break;
1560 	}
1561 
1562 	return ret;
1563 }
1564 
1565 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1566 				 struct kvm_device_attr *attr)
1567 {
1568 	int ret = -ENXIO;
1569 
1570 	switch (attr->group) {
1571 	default:
1572 		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1573 		break;
1574 	}
1575 
1576 	return ret;
1577 }
1578 
1579 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1580 				 struct kvm_device_attr *attr)
1581 {
1582 	int ret = -ENXIO;
1583 
1584 	switch (attr->group) {
1585 	default:
1586 		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1587 		break;
1588 	}
1589 
1590 	return ret;
1591 }
1592 
1593 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1594 				   struct kvm_vcpu_events *events)
1595 {
1596 	memset(events, 0, sizeof(*events));
1597 
1598 	return __kvm_arm_vcpu_get_events(vcpu, events);
1599 }
1600 
1601 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1602 				   struct kvm_vcpu_events *events)
1603 {
1604 	int i;
1605 
1606 	/* check whether the reserved field is zero */
1607 	for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1608 		if (events->reserved[i])
1609 			return -EINVAL;
1610 
1611 	/* check whether the pad field is zero */
1612 	for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1613 		if (events->exception.pad[i])
1614 			return -EINVAL;
1615 
1616 	return __kvm_arm_vcpu_set_events(vcpu, events);
1617 }
1618 
1619 long kvm_arch_vcpu_ioctl(struct file *filp,
1620 			 unsigned int ioctl, unsigned long arg)
1621 {
1622 	struct kvm_vcpu *vcpu = filp->private_data;
1623 	void __user *argp = (void __user *)arg;
1624 	struct kvm_device_attr attr;
1625 	long r;
1626 
1627 	switch (ioctl) {
1628 	case KVM_ARM_VCPU_INIT: {
1629 		struct kvm_vcpu_init init;
1630 
1631 		r = -EFAULT;
1632 		if (copy_from_user(&init, argp, sizeof(init)))
1633 			break;
1634 
1635 		r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1636 		break;
1637 	}
1638 	case KVM_SET_ONE_REG:
1639 	case KVM_GET_ONE_REG: {
1640 		struct kvm_one_reg reg;
1641 
1642 		r = -ENOEXEC;
1643 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
1644 			break;
1645 
1646 		r = -EFAULT;
1647 		if (copy_from_user(&reg, argp, sizeof(reg)))
1648 			break;
1649 
1650 		/*
1651 		 * We could owe a reset due to PSCI. Handle the pending reset
1652 		 * here to ensure userspace register accesses are ordered after
1653 		 * the reset.
1654 		 */
1655 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1656 			kvm_reset_vcpu(vcpu);
1657 
1658 		if (ioctl == KVM_SET_ONE_REG)
1659 			r = kvm_arm_set_reg(vcpu, &reg);
1660 		else
1661 			r = kvm_arm_get_reg(vcpu, &reg);
1662 		break;
1663 	}
1664 	case KVM_GET_REG_LIST: {
1665 		struct kvm_reg_list __user *user_list = argp;
1666 		struct kvm_reg_list reg_list;
1667 		unsigned n;
1668 
1669 		r = -ENOEXEC;
1670 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
1671 			break;
1672 
1673 		r = -EPERM;
1674 		if (!kvm_arm_vcpu_is_finalized(vcpu))
1675 			break;
1676 
1677 		r = -EFAULT;
1678 		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1679 			break;
1680 		n = reg_list.n;
1681 		reg_list.n = kvm_arm_num_regs(vcpu);
1682 		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1683 			break;
1684 		r = -E2BIG;
1685 		if (n < reg_list.n)
1686 			break;
1687 		r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1688 		break;
1689 	}
1690 	case KVM_SET_DEVICE_ATTR: {
1691 		r = -EFAULT;
1692 		if (copy_from_user(&attr, argp, sizeof(attr)))
1693 			break;
1694 		r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1695 		break;
1696 	}
1697 	case KVM_GET_DEVICE_ATTR: {
1698 		r = -EFAULT;
1699 		if (copy_from_user(&attr, argp, sizeof(attr)))
1700 			break;
1701 		r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1702 		break;
1703 	}
1704 	case KVM_HAS_DEVICE_ATTR: {
1705 		r = -EFAULT;
1706 		if (copy_from_user(&attr, argp, sizeof(attr)))
1707 			break;
1708 		r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1709 		break;
1710 	}
1711 	case KVM_GET_VCPU_EVENTS: {
1712 		struct kvm_vcpu_events events;
1713 
1714 		if (kvm_arm_vcpu_get_events(vcpu, &events))
1715 			return -EINVAL;
1716 
1717 		if (copy_to_user(argp, &events, sizeof(events)))
1718 			return -EFAULT;
1719 
1720 		return 0;
1721 	}
1722 	case KVM_SET_VCPU_EVENTS: {
1723 		struct kvm_vcpu_events events;
1724 
1725 		if (copy_from_user(&events, argp, sizeof(events)))
1726 			return -EFAULT;
1727 
1728 		return kvm_arm_vcpu_set_events(vcpu, &events);
1729 	}
1730 	case KVM_ARM_VCPU_FINALIZE: {
1731 		int what;
1732 
1733 		if (!kvm_vcpu_initialized(vcpu))
1734 			return -ENOEXEC;
1735 
1736 		if (get_user(what, (const int __user *)argp))
1737 			return -EFAULT;
1738 
1739 		return kvm_arm_vcpu_finalize(vcpu, what);
1740 	}
1741 	default:
1742 		r = -EINVAL;
1743 	}
1744 
1745 	return r;
1746 }
1747 
1748 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1749 {
1750 
1751 }
1752 
1753 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1754 					struct kvm_arm_device_addr *dev_addr)
1755 {
1756 	switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
1757 	case KVM_ARM_DEVICE_VGIC_V2:
1758 		if (!vgic_present)
1759 			return -ENXIO;
1760 		return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
1761 	default:
1762 		return -ENODEV;
1763 	}
1764 }
1765 
1766 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1767 {
1768 	switch (attr->group) {
1769 	case KVM_ARM_VM_SMCCC_CTRL:
1770 		return kvm_vm_smccc_has_attr(kvm, attr);
1771 	default:
1772 		return -ENXIO;
1773 	}
1774 }
1775 
1776 static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1777 {
1778 	switch (attr->group) {
1779 	case KVM_ARM_VM_SMCCC_CTRL:
1780 		return kvm_vm_smccc_set_attr(kvm, attr);
1781 	default:
1782 		return -ENXIO;
1783 	}
1784 }
1785 
1786 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1787 {
1788 	struct kvm *kvm = filp->private_data;
1789 	void __user *argp = (void __user *)arg;
1790 	struct kvm_device_attr attr;
1791 
1792 	switch (ioctl) {
1793 	case KVM_CREATE_IRQCHIP: {
1794 		int ret;
1795 		if (!vgic_present)
1796 			return -ENXIO;
1797 		mutex_lock(&kvm->lock);
1798 		ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1799 		mutex_unlock(&kvm->lock);
1800 		return ret;
1801 	}
1802 	case KVM_ARM_SET_DEVICE_ADDR: {
1803 		struct kvm_arm_device_addr dev_addr;
1804 
1805 		if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1806 			return -EFAULT;
1807 		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1808 	}
1809 	case KVM_ARM_PREFERRED_TARGET: {
1810 		struct kvm_vcpu_init init = {
1811 			.target = KVM_ARM_TARGET_GENERIC_V8,
1812 		};
1813 
1814 		if (copy_to_user(argp, &init, sizeof(init)))
1815 			return -EFAULT;
1816 
1817 		return 0;
1818 	}
1819 	case KVM_ARM_MTE_COPY_TAGS: {
1820 		struct kvm_arm_copy_mte_tags copy_tags;
1821 
1822 		if (copy_from_user(&copy_tags, argp, sizeof(copy_tags)))
1823 			return -EFAULT;
1824 		return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
1825 	}
1826 	case KVM_ARM_SET_COUNTER_OFFSET: {
1827 		struct kvm_arm_counter_offset offset;
1828 
1829 		if (copy_from_user(&offset, argp, sizeof(offset)))
1830 			return -EFAULT;
1831 		return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
1832 	}
1833 	case KVM_HAS_DEVICE_ATTR: {
1834 		if (copy_from_user(&attr, argp, sizeof(attr)))
1835 			return -EFAULT;
1836 
1837 		return kvm_vm_has_attr(kvm, &attr);
1838 	}
1839 	case KVM_SET_DEVICE_ATTR: {
1840 		if (copy_from_user(&attr, argp, sizeof(attr)))
1841 			return -EFAULT;
1842 
1843 		return kvm_vm_set_attr(kvm, &attr);
1844 	}
1845 	case KVM_ARM_GET_REG_WRITABLE_MASKS: {
1846 		struct reg_mask_range range;
1847 
1848 		if (copy_from_user(&range, argp, sizeof(range)))
1849 			return -EFAULT;
1850 		return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range);
1851 	}
1852 	default:
1853 		return -EINVAL;
1854 	}
1855 }
1856 
1857 /* unlocks vcpus from @vcpu_lock_idx and smaller */
1858 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
1859 {
1860 	struct kvm_vcpu *tmp_vcpu;
1861 
1862 	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1863 		tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1864 		mutex_unlock(&tmp_vcpu->mutex);
1865 	}
1866 }
1867 
1868 void unlock_all_vcpus(struct kvm *kvm)
1869 {
1870 	lockdep_assert_held(&kvm->lock);
1871 
1872 	unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
1873 }
1874 
1875 /* Returns true if all vcpus were locked, false otherwise */
1876 bool lock_all_vcpus(struct kvm *kvm)
1877 {
1878 	struct kvm_vcpu *tmp_vcpu;
1879 	unsigned long c;
1880 
1881 	lockdep_assert_held(&kvm->lock);
1882 
1883 	/*
1884 	 * Any time a vcpu is in an ioctl (including running), the
1885 	 * core KVM code tries to grab the vcpu->mutex.
1886 	 *
1887 	 * By grabbing the vcpu->mutex of all VCPUs we ensure that no
1888 	 * other VCPUs can fiddle with the state while we access it.
1889 	 */
1890 	kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
1891 		if (!mutex_trylock(&tmp_vcpu->mutex)) {
1892 			unlock_vcpus(kvm, c - 1);
1893 			return false;
1894 		}
1895 	}
1896 
1897 	return true;
1898 }
1899 
1900 static unsigned long nvhe_percpu_size(void)
1901 {
1902 	return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
1903 		(unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
1904 }
1905 
1906 static unsigned long nvhe_percpu_order(void)
1907 {
1908 	unsigned long size = nvhe_percpu_size();
1909 
1910 	return size ? get_order(size) : 0;
1911 }
1912 
1913 /* A lookup table holding the hypervisor VA for each vector slot */
1914 static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
1915 
1916 static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
1917 {
1918 	hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot);
1919 }
1920 
1921 static int kvm_init_vector_slots(void)
1922 {
1923 	int err;
1924 	void *base;
1925 
1926 	base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
1927 	kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
1928 
1929 	base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
1930 	kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
1931 
1932 	if (kvm_system_needs_idmapped_vectors() &&
1933 	    !is_protected_kvm_enabled()) {
1934 		err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
1935 					       __BP_HARDEN_HYP_VECS_SZ, &base);
1936 		if (err)
1937 			return err;
1938 	}
1939 
1940 	kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
1941 	kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
1942 	return 0;
1943 }
1944 
1945 static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
1946 {
1947 	struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
1948 	u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
1949 	unsigned long tcr;
1950 
1951 	/*
1952 	 * Calculate the raw per-cpu offset without a translation from the
1953 	 * kernel's mapping to the linear mapping, and store it in tpidr_el2
1954 	 * so that we can use adr_l to access per-cpu variables in EL2.
1955 	 * Also drop the KASAN tag which gets in the way...
1956 	 */
1957 	params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
1958 			    (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
1959 
1960 	params->mair_el2 = read_sysreg(mair_el1);
1961 
1962 	tcr = read_sysreg(tcr_el1);
1963 	if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
1964 		tcr |= TCR_EPD1_MASK;
1965 	} else {
1966 		tcr &= TCR_EL2_MASK;
1967 		tcr |= TCR_EL2_RES1;
1968 	}
1969 	tcr &= ~TCR_T0SZ_MASK;
1970 	tcr |= TCR_T0SZ(hyp_va_bits);
1971 	tcr &= ~TCR_EL2_PS_MASK;
1972 	tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
1973 	if (kvm_lpa2_is_enabled())
1974 		tcr |= TCR_EL2_DS;
1975 	params->tcr_el2 = tcr;
1976 
1977 	params->pgd_pa = kvm_mmu_get_httbr();
1978 	if (is_protected_kvm_enabled())
1979 		params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
1980 	else
1981 		params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
1982 	if (cpus_have_final_cap(ARM64_KVM_HVHE))
1983 		params->hcr_el2 |= HCR_E2H;
1984 	params->vttbr = params->vtcr = 0;
1985 
1986 	/*
1987 	 * Flush the init params from the data cache because the struct will
1988 	 * be read while the MMU is off.
1989 	 */
1990 	kvm_flush_dcache_to_poc(params, sizeof(*params));
1991 }
1992 
1993 static void hyp_install_host_vector(void)
1994 {
1995 	struct kvm_nvhe_init_params *params;
1996 	struct arm_smccc_res res;
1997 
1998 	/* Switch from the HYP stub to our own HYP init vector */
1999 	__hyp_set_vectors(kvm_get_idmap_vector());
2000 
2001 	/*
2002 	 * Call initialization code, and switch to the full blown HYP code.
2003 	 * If the cpucaps haven't been finalized yet, something has gone very
2004 	 * wrong, and hyp will crash and burn when it uses any
2005 	 * cpus_have_*_cap() wrapper.
2006 	 */
2007 	BUG_ON(!system_capabilities_finalized());
2008 	params = this_cpu_ptr_nvhe_sym(kvm_init_params);
2009 	arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
2010 	WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
2011 }
2012 
2013 static void cpu_init_hyp_mode(void)
2014 {
2015 	hyp_install_host_vector();
2016 
2017 	/*
2018 	 * Disabling SSBD on a non-VHE system requires us to enable SSBS
2019 	 * at EL2.
2020 	 */
2021 	if (this_cpu_has_cap(ARM64_SSBS) &&
2022 	    arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
2023 		kvm_call_hyp_nvhe(__kvm_enable_ssbs);
2024 	}
2025 }
2026 
2027 static void cpu_hyp_reset(void)
2028 {
2029 	if (!is_kernel_in_hyp_mode())
2030 		__hyp_reset_vectors();
2031 }
2032 
2033 /*
2034  * EL2 vectors can be mapped and rerouted in a number of ways,
2035  * depending on the kernel configuration and CPU present:
2036  *
2037  * - If the CPU is affected by Spectre-v2, the hardening sequence is
2038  *   placed in one of the vector slots, which is executed before jumping
2039  *   to the real vectors.
2040  *
2041  * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
2042  *   containing the hardening sequence is mapped next to the idmap page,
2043  *   and executed before jumping to the real vectors.
2044  *
2045  * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
2046  *   empty slot is selected, mapped next to the idmap page, and
2047  *   executed before jumping to the real vectors.
2048  *
2049  * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
2050  * VHE, as we don't have hypervisor-specific mappings. If the system
2051  * is VHE and yet selects this capability, it will be ignored.
2052  */
2053 static void cpu_set_hyp_vector(void)
2054 {
2055 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
2056 	void *vector = hyp_spectre_vector_selector[data->slot];
2057 
2058 	if (!is_protected_kvm_enabled())
2059 		*this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
2060 	else
2061 		kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
2062 }
2063 
2064 static void cpu_hyp_init_context(void)
2065 {
2066 	kvm_init_host_cpu_context(host_data_ptr(host_ctxt));
2067 
2068 	if (!is_kernel_in_hyp_mode())
2069 		cpu_init_hyp_mode();
2070 }
2071 
2072 static void cpu_hyp_init_features(void)
2073 {
2074 	cpu_set_hyp_vector();
2075 	kvm_arm_init_debug();
2076 
2077 	if (is_kernel_in_hyp_mode())
2078 		kvm_timer_init_vhe();
2079 
2080 	if (vgic_present)
2081 		kvm_vgic_init_cpu_hardware();
2082 }
2083 
2084 static void cpu_hyp_reinit(void)
2085 {
2086 	cpu_hyp_reset();
2087 	cpu_hyp_init_context();
2088 	cpu_hyp_init_features();
2089 }
2090 
2091 static void cpu_hyp_init(void *discard)
2092 {
2093 	if (!__this_cpu_read(kvm_hyp_initialized)) {
2094 		cpu_hyp_reinit();
2095 		__this_cpu_write(kvm_hyp_initialized, 1);
2096 	}
2097 }
2098 
2099 static void cpu_hyp_uninit(void *discard)
2100 {
2101 	if (__this_cpu_read(kvm_hyp_initialized)) {
2102 		cpu_hyp_reset();
2103 		__this_cpu_write(kvm_hyp_initialized, 0);
2104 	}
2105 }
2106 
2107 int kvm_arch_hardware_enable(void)
2108 {
2109 	/*
2110 	 * Most calls to this function are made with migration
2111 	 * disabled, but not with preemption disabled. The former is
2112 	 * enough to ensure correctness, but most of the helpers
2113 	 * expect the later and will throw a tantrum otherwise.
2114 	 */
2115 	preempt_disable();
2116 
2117 	cpu_hyp_init(NULL);
2118 
2119 	kvm_vgic_cpu_up();
2120 	kvm_timer_cpu_up();
2121 
2122 	preempt_enable();
2123 
2124 	return 0;
2125 }
2126 
2127 void kvm_arch_hardware_disable(void)
2128 {
2129 	kvm_timer_cpu_down();
2130 	kvm_vgic_cpu_down();
2131 
2132 	if (!is_protected_kvm_enabled())
2133 		cpu_hyp_uninit(NULL);
2134 }
2135 
2136 #ifdef CONFIG_CPU_PM
2137 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
2138 				    unsigned long cmd,
2139 				    void *v)
2140 {
2141 	/*
2142 	 * kvm_hyp_initialized is left with its old value over
2143 	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
2144 	 * re-enable hyp.
2145 	 */
2146 	switch (cmd) {
2147 	case CPU_PM_ENTER:
2148 		if (__this_cpu_read(kvm_hyp_initialized))
2149 			/*
2150 			 * don't update kvm_hyp_initialized here
2151 			 * so that the hyp will be re-enabled
2152 			 * when we resume. See below.
2153 			 */
2154 			cpu_hyp_reset();
2155 
2156 		return NOTIFY_OK;
2157 	case CPU_PM_ENTER_FAILED:
2158 	case CPU_PM_EXIT:
2159 		if (__this_cpu_read(kvm_hyp_initialized))
2160 			/* The hyp was enabled before suspend. */
2161 			cpu_hyp_reinit();
2162 
2163 		return NOTIFY_OK;
2164 
2165 	default:
2166 		return NOTIFY_DONE;
2167 	}
2168 }
2169 
2170 static struct notifier_block hyp_init_cpu_pm_nb = {
2171 	.notifier_call = hyp_init_cpu_pm_notifier,
2172 };
2173 
2174 static void __init hyp_cpu_pm_init(void)
2175 {
2176 	if (!is_protected_kvm_enabled())
2177 		cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
2178 }
2179 static void __init hyp_cpu_pm_exit(void)
2180 {
2181 	if (!is_protected_kvm_enabled())
2182 		cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
2183 }
2184 #else
2185 static inline void __init hyp_cpu_pm_init(void)
2186 {
2187 }
2188 static inline void __init hyp_cpu_pm_exit(void)
2189 {
2190 }
2191 #endif
2192 
2193 static void __init init_cpu_logical_map(void)
2194 {
2195 	unsigned int cpu;
2196 
2197 	/*
2198 	 * Copy the MPIDR <-> logical CPU ID mapping to hyp.
2199 	 * Only copy the set of online CPUs whose features have been checked
2200 	 * against the finalized system capabilities. The hypervisor will not
2201 	 * allow any other CPUs from the `possible` set to boot.
2202 	 */
2203 	for_each_online_cpu(cpu)
2204 		hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
2205 }
2206 
2207 #define init_psci_0_1_impl_state(config, what)	\
2208 	config.psci_0_1_ ## what ## _implemented = psci_ops.what
2209 
2210 static bool __init init_psci_relay(void)
2211 {
2212 	/*
2213 	 * If PSCI has not been initialized, protected KVM cannot install
2214 	 * itself on newly booted CPUs.
2215 	 */
2216 	if (!psci_ops.get_version) {
2217 		kvm_err("Cannot initialize protected mode without PSCI\n");
2218 		return false;
2219 	}
2220 
2221 	kvm_host_psci_config.version = psci_ops.get_version();
2222 	kvm_host_psci_config.smccc_version = arm_smccc_get_version();
2223 
2224 	if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
2225 		kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
2226 		init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
2227 		init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
2228 		init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
2229 		init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
2230 	}
2231 	return true;
2232 }
2233 
2234 static int __init init_subsystems(void)
2235 {
2236 	int err = 0;
2237 
2238 	/*
2239 	 * Enable hardware so that subsystem initialisation can access EL2.
2240 	 */
2241 	on_each_cpu(cpu_hyp_init, NULL, 1);
2242 
2243 	/*
2244 	 * Register CPU lower-power notifier
2245 	 */
2246 	hyp_cpu_pm_init();
2247 
2248 	/*
2249 	 * Init HYP view of VGIC
2250 	 */
2251 	err = kvm_vgic_hyp_init();
2252 	switch (err) {
2253 	case 0:
2254 		vgic_present = true;
2255 		break;
2256 	case -ENODEV:
2257 	case -ENXIO:
2258 		vgic_present = false;
2259 		err = 0;
2260 		break;
2261 	default:
2262 		goto out;
2263 	}
2264 
2265 	/*
2266 	 * Init HYP architected timer support
2267 	 */
2268 	err = kvm_timer_hyp_init(vgic_present);
2269 	if (err)
2270 		goto out;
2271 
2272 	kvm_register_perf_callbacks(NULL);
2273 
2274 out:
2275 	if (err)
2276 		hyp_cpu_pm_exit();
2277 
2278 	if (err || !is_protected_kvm_enabled())
2279 		on_each_cpu(cpu_hyp_uninit, NULL, 1);
2280 
2281 	return err;
2282 }
2283 
2284 static void __init teardown_subsystems(void)
2285 {
2286 	kvm_unregister_perf_callbacks();
2287 	hyp_cpu_pm_exit();
2288 }
2289 
2290 static void __init teardown_hyp_mode(void)
2291 {
2292 	int cpu;
2293 
2294 	free_hyp_pgds();
2295 	for_each_possible_cpu(cpu) {
2296 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
2297 		free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
2298 	}
2299 }
2300 
2301 static int __init do_pkvm_init(u32 hyp_va_bits)
2302 {
2303 	void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
2304 	int ret;
2305 
2306 	preempt_disable();
2307 	cpu_hyp_init_context();
2308 	ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
2309 				num_possible_cpus(), kern_hyp_va(per_cpu_base),
2310 				hyp_va_bits);
2311 	cpu_hyp_init_features();
2312 
2313 	/*
2314 	 * The stub hypercalls are now disabled, so set our local flag to
2315 	 * prevent a later re-init attempt in kvm_arch_hardware_enable().
2316 	 */
2317 	__this_cpu_write(kvm_hyp_initialized, 1);
2318 	preempt_enable();
2319 
2320 	return ret;
2321 }
2322 
2323 static u64 get_hyp_id_aa64pfr0_el1(void)
2324 {
2325 	/*
2326 	 * Track whether the system isn't affected by spectre/meltdown in the
2327 	 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
2328 	 * Although this is per-CPU, we make it global for simplicity, e.g., not
2329 	 * to have to worry about vcpu migration.
2330 	 *
2331 	 * Unlike for non-protected VMs, userspace cannot override this for
2332 	 * protected VMs.
2333 	 */
2334 	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2335 
2336 	val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
2337 		 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
2338 
2339 	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
2340 			  arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
2341 	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
2342 			  arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
2343 
2344 	return val;
2345 }
2346 
2347 static void kvm_hyp_init_symbols(void)
2348 {
2349 	kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
2350 	kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2351 	kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
2352 	kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
2353 	kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
2354 	kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
2355 	kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
2356 	kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2357 	kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
2358 	kvm_nvhe_sym(__icache_flags) = __icache_flags;
2359 	kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
2360 }
2361 
2362 static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
2363 {
2364 	void *addr = phys_to_virt(hyp_mem_base);
2365 	int ret;
2366 
2367 	ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
2368 	if (ret)
2369 		return ret;
2370 
2371 	ret = do_pkvm_init(hyp_va_bits);
2372 	if (ret)
2373 		return ret;
2374 
2375 	free_hyp_pgds();
2376 
2377 	return 0;
2378 }
2379 
2380 static void pkvm_hyp_init_ptrauth(void)
2381 {
2382 	struct kvm_cpu_context *hyp_ctxt;
2383 	int cpu;
2384 
2385 	for_each_possible_cpu(cpu) {
2386 		hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
2387 		hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
2388 		hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
2389 		hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
2390 		hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
2391 		hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
2392 		hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
2393 		hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
2394 		hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
2395 		hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
2396 		hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
2397 	}
2398 }
2399 
2400 /* Inits Hyp-mode on all online CPUs */
2401 static int __init init_hyp_mode(void)
2402 {
2403 	u32 hyp_va_bits;
2404 	int cpu;
2405 	int err = -ENOMEM;
2406 
2407 	/*
2408 	 * The protected Hyp-mode cannot be initialized if the memory pool
2409 	 * allocation has failed.
2410 	 */
2411 	if (is_protected_kvm_enabled() && !hyp_mem_base)
2412 		goto out_err;
2413 
2414 	/*
2415 	 * Allocate Hyp PGD and setup Hyp identity mapping
2416 	 */
2417 	err = kvm_mmu_init(&hyp_va_bits);
2418 	if (err)
2419 		goto out_err;
2420 
2421 	/*
2422 	 * Allocate stack pages for Hypervisor-mode
2423 	 */
2424 	for_each_possible_cpu(cpu) {
2425 		unsigned long stack_page;
2426 
2427 		stack_page = __get_free_page(GFP_KERNEL);
2428 		if (!stack_page) {
2429 			err = -ENOMEM;
2430 			goto out_err;
2431 		}
2432 
2433 		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
2434 	}
2435 
2436 	/*
2437 	 * Allocate and initialize pages for Hypervisor-mode percpu regions.
2438 	 */
2439 	for_each_possible_cpu(cpu) {
2440 		struct page *page;
2441 		void *page_addr;
2442 
2443 		page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
2444 		if (!page) {
2445 			err = -ENOMEM;
2446 			goto out_err;
2447 		}
2448 
2449 		page_addr = page_address(page);
2450 		memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
2451 		kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
2452 	}
2453 
2454 	/*
2455 	 * Map the Hyp-code called directly from the host
2456 	 */
2457 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
2458 				  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
2459 	if (err) {
2460 		kvm_err("Cannot map world-switch code\n");
2461 		goto out_err;
2462 	}
2463 
2464 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
2465 				  kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
2466 	if (err) {
2467 		kvm_err("Cannot map .hyp.rodata section\n");
2468 		goto out_err;
2469 	}
2470 
2471 	err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
2472 				  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
2473 	if (err) {
2474 		kvm_err("Cannot map rodata section\n");
2475 		goto out_err;
2476 	}
2477 
2478 	/*
2479 	 * .hyp.bss is guaranteed to be placed at the beginning of the .bss
2480 	 * section thanks to an assertion in the linker script. Map it RW and
2481 	 * the rest of .bss RO.
2482 	 */
2483 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start),
2484 				  kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
2485 	if (err) {
2486 		kvm_err("Cannot map hyp bss section: %d\n", err);
2487 		goto out_err;
2488 	}
2489 
2490 	err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
2491 				  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
2492 	if (err) {
2493 		kvm_err("Cannot map bss section\n");
2494 		goto out_err;
2495 	}
2496 
2497 	/*
2498 	 * Map the Hyp stack pages
2499 	 */
2500 	for_each_possible_cpu(cpu) {
2501 		struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2502 		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2503 
2504 		err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
2505 		if (err) {
2506 			kvm_err("Cannot map hyp stack\n");
2507 			goto out_err;
2508 		}
2509 
2510 		/*
2511 		 * Save the stack PA in nvhe_init_params. This will be needed
2512 		 * to recreate the stack mapping in protected nVHE mode.
2513 		 * __hyp_pa() won't do the right thing there, since the stack
2514 		 * has been mapped in the flexible private VA space.
2515 		 */
2516 		params->stack_pa = __pa(stack_page);
2517 	}
2518 
2519 	for_each_possible_cpu(cpu) {
2520 		char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
2521 		char *percpu_end = percpu_begin + nvhe_percpu_size();
2522 
2523 		/* Map Hyp percpu pages */
2524 		err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
2525 		if (err) {
2526 			kvm_err("Cannot map hyp percpu region\n");
2527 			goto out_err;
2528 		}
2529 
2530 		/* Prepare the CPU initialization parameters */
2531 		cpu_prepare_hyp_mode(cpu, hyp_va_bits);
2532 	}
2533 
2534 	kvm_hyp_init_symbols();
2535 
2536 	if (is_protected_kvm_enabled()) {
2537 		if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
2538 		    cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
2539 			pkvm_hyp_init_ptrauth();
2540 
2541 		init_cpu_logical_map();
2542 
2543 		if (!init_psci_relay()) {
2544 			err = -ENODEV;
2545 			goto out_err;
2546 		}
2547 
2548 		err = kvm_hyp_init_protection(hyp_va_bits);
2549 		if (err) {
2550 			kvm_err("Failed to init hyp memory protection\n");
2551 			goto out_err;
2552 		}
2553 	}
2554 
2555 	return 0;
2556 
2557 out_err:
2558 	teardown_hyp_mode();
2559 	kvm_err("error initializing Hyp mode: %d\n", err);
2560 	return err;
2561 }
2562 
2563 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
2564 {
2565 	struct kvm_vcpu *vcpu;
2566 	unsigned long i;
2567 
2568 	mpidr &= MPIDR_HWID_BITMASK;
2569 
2570 	if (kvm->arch.mpidr_data) {
2571 		u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
2572 
2573 		vcpu = kvm_get_vcpu(kvm,
2574 				    kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
2575 		if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
2576 			vcpu = NULL;
2577 
2578 		return vcpu;
2579 	}
2580 
2581 	kvm_for_each_vcpu(i, vcpu, kvm) {
2582 		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
2583 			return vcpu;
2584 	}
2585 	return NULL;
2586 }
2587 
2588 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2589 {
2590 	return irqchip_in_kernel(kvm);
2591 }
2592 
2593 bool kvm_arch_has_irq_bypass(void)
2594 {
2595 	return true;
2596 }
2597 
2598 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
2599 				      struct irq_bypass_producer *prod)
2600 {
2601 	struct kvm_kernel_irqfd *irqfd =
2602 		container_of(cons, struct kvm_kernel_irqfd, consumer);
2603 
2604 	return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
2605 					  &irqfd->irq_entry);
2606 }
2607 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
2608 				      struct irq_bypass_producer *prod)
2609 {
2610 	struct kvm_kernel_irqfd *irqfd =
2611 		container_of(cons, struct kvm_kernel_irqfd, consumer);
2612 
2613 	kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
2614 				     &irqfd->irq_entry);
2615 }
2616 
2617 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
2618 {
2619 	struct kvm_kernel_irqfd *irqfd =
2620 		container_of(cons, struct kvm_kernel_irqfd, consumer);
2621 
2622 	kvm_arm_halt_guest(irqfd->kvm);
2623 }
2624 
2625 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
2626 {
2627 	struct kvm_kernel_irqfd *irqfd =
2628 		container_of(cons, struct kvm_kernel_irqfd, consumer);
2629 
2630 	kvm_arm_resume_guest(irqfd->kvm);
2631 }
2632 
2633 /* Initialize Hyp-mode and memory mappings on all CPUs */
2634 static __init int kvm_arm_init(void)
2635 {
2636 	int err;
2637 	bool in_hyp_mode;
2638 
2639 	if (!is_hyp_mode_available()) {
2640 		kvm_info("HYP mode not available\n");
2641 		return -ENODEV;
2642 	}
2643 
2644 	if (kvm_get_mode() == KVM_MODE_NONE) {
2645 		kvm_info("KVM disabled from command line\n");
2646 		return -ENODEV;
2647 	}
2648 
2649 	err = kvm_sys_reg_table_init();
2650 	if (err) {
2651 		kvm_info("Error initializing system register tables");
2652 		return err;
2653 	}
2654 
2655 	in_hyp_mode = is_kernel_in_hyp_mode();
2656 
2657 	if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
2658 	    cpus_have_final_cap(ARM64_WORKAROUND_1508412))
2659 		kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
2660 			 "Only trusted guests should be used on this system.\n");
2661 
2662 	err = kvm_set_ipa_limit();
2663 	if (err)
2664 		return err;
2665 
2666 	err = kvm_arm_init_sve();
2667 	if (err)
2668 		return err;
2669 
2670 	err = kvm_arm_vmid_alloc_init();
2671 	if (err) {
2672 		kvm_err("Failed to initialize VMID allocator.\n");
2673 		return err;
2674 	}
2675 
2676 	if (!in_hyp_mode) {
2677 		err = init_hyp_mode();
2678 		if (err)
2679 			goto out_err;
2680 	}
2681 
2682 	err = kvm_init_vector_slots();
2683 	if (err) {
2684 		kvm_err("Cannot initialise vector slots\n");
2685 		goto out_hyp;
2686 	}
2687 
2688 	err = init_subsystems();
2689 	if (err)
2690 		goto out_hyp;
2691 
2692 	kvm_info("%s%sVHE mode initialized successfully\n",
2693 		 in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
2694 				     "Protected " : "Hyp "),
2695 		 in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
2696 				     "h" : "n"));
2697 
2698 	/*
2699 	 * FIXME: Do something reasonable if kvm_init() fails after pKVM
2700 	 * hypervisor protection is finalized.
2701 	 */
2702 	err = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2703 	if (err)
2704 		goto out_subs;
2705 
2706 	kvm_arm_initialised = true;
2707 
2708 	return 0;
2709 
2710 out_subs:
2711 	teardown_subsystems();
2712 out_hyp:
2713 	if (!in_hyp_mode)
2714 		teardown_hyp_mode();
2715 out_err:
2716 	kvm_arm_vmid_alloc_free();
2717 	return err;
2718 }
2719 
2720 static int __init early_kvm_mode_cfg(char *arg)
2721 {
2722 	if (!arg)
2723 		return -EINVAL;
2724 
2725 	if (strcmp(arg, "none") == 0) {
2726 		kvm_mode = KVM_MODE_NONE;
2727 		return 0;
2728 	}
2729 
2730 	if (!is_hyp_mode_available()) {
2731 		pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
2732 		return 0;
2733 	}
2734 
2735 	if (strcmp(arg, "protected") == 0) {
2736 		if (!is_kernel_in_hyp_mode())
2737 			kvm_mode = KVM_MODE_PROTECTED;
2738 		else
2739 			pr_warn_once("Protected KVM not available with VHE\n");
2740 
2741 		return 0;
2742 	}
2743 
2744 	if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
2745 		kvm_mode = KVM_MODE_DEFAULT;
2746 		return 0;
2747 	}
2748 
2749 	if (strcmp(arg, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) {
2750 		kvm_mode = KVM_MODE_NV;
2751 		return 0;
2752 	}
2753 
2754 	return -EINVAL;
2755 }
2756 early_param("kvm-arm.mode", early_kvm_mode_cfg);
2757 
2758 enum kvm_mode kvm_get_mode(void)
2759 {
2760 	return kvm_mode;
2761 }
2762 
2763 module_init(kvm_arm_init);
2764