1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 */
6
7 #include <linux/arm-smccc.h>
8 #include <linux/bug.h>
9 #include <linux/cpu_pm.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16 #include <linux/fs.h>
17 #include <linux/mman.h>
18 #include <linux/sched.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_irqfd.h>
21 #include <linux/irqbypass.h>
22 #include <linux/sched/stat.h>
23 #include <linux/psci.h>
24 #include <trace/events/kvm.h>
25
26 #define CREATE_TRACE_POINTS
27 #include "trace_arm.h"
28 #include "hyp_trace.h"
29
30 #include <linux/uaccess.h>
31 #include <asm/ptrace.h>
32 #include <asm/mman.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cacheflush.h>
35 #include <asm/cpufeature.h>
36 #include <asm/virt.h>
37 #include <asm/kvm_arm.h>
38 #include <asm/kvm_asm.h>
39 #include <asm/kvm_emulate.h>
40 #include <asm/kvm_hyp.h>
41 #include <asm/kvm_mmu.h>
42 #include <asm/kvm_nested.h>
43 #include <asm/kvm_pkvm.h>
44 #include <asm/kvm_ptrauth.h>
45 #include <asm/sections.h>
46 #include <asm/stacktrace/nvhe.h>
47
48 #include <kvm/arm_hypercalls.h>
49 #include <kvm/arm_pmu.h>
50 #include <kvm/arm_psci.h>
51 #include <kvm/arm_vgic.h>
52
53 #include <linux/irqchip/arm-gic-v5.h>
54
55 #include "sys_regs.h"
56
57 static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
58
59 enum kvm_wfx_trap_policy {
60 KVM_WFX_NOTRAP_SINGLE_TASK, /* Default option */
61 KVM_WFX_NOTRAP,
62 KVM_WFX_TRAP,
63 };
64
65 static enum kvm_wfx_trap_policy kvm_wfi_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK;
66 static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK;
67
68 /*
69 * Tracks KVM IOCTLs and their associated KVM capabilities.
70 */
71 struct kvm_ioctl_cap_map {
72 unsigned int ioctl;
73 long ext;
74 };
75
76 /* Make KVM_CAP_NR_VCPUS the reference for features we always supported */
77 #define KVM_CAP_ARM_BASIC KVM_CAP_NR_VCPUS
78
79 /*
80 * Sorted by ioctl to allow for potential binary search,
81 * though linear scan is sufficient for this size.
82 */
83 static const struct kvm_ioctl_cap_map vm_ioctl_caps[] = {
84 { KVM_CREATE_IRQCHIP, KVM_CAP_IRQCHIP },
85 { KVM_ARM_SET_DEVICE_ADDR, KVM_CAP_ARM_SET_DEVICE_ADDR },
86 { KVM_ARM_MTE_COPY_TAGS, KVM_CAP_ARM_MTE },
87 { KVM_SET_DEVICE_ATTR, KVM_CAP_DEVICE_CTRL },
88 { KVM_GET_DEVICE_ATTR, KVM_CAP_DEVICE_CTRL },
89 { KVM_HAS_DEVICE_ATTR, KVM_CAP_DEVICE_CTRL },
90 { KVM_ARM_SET_COUNTER_OFFSET, KVM_CAP_COUNTER_OFFSET },
91 { KVM_ARM_GET_REG_WRITABLE_MASKS, KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES },
92 { KVM_ARM_PREFERRED_TARGET, KVM_CAP_ARM_BASIC },
93 };
94
95 /*
96 * Set *ext to the capability.
97 * Return 0 if found, or -EINVAL if no IOCTL matches.
98 */
kvm_get_cap_for_kvm_ioctl(unsigned int ioctl,long * ext)99 long kvm_get_cap_for_kvm_ioctl(unsigned int ioctl, long *ext)
100 {
101 int i;
102
103 for (i = 0; i < ARRAY_SIZE(vm_ioctl_caps); i++) {
104 if (vm_ioctl_caps[i].ioctl == ioctl) {
105 *ext = vm_ioctl_caps[i].ext;
106 return 0;
107 }
108 }
109
110 return -EINVAL;
111 }
112
113 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
114
115 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
116 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
117
118 DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
119
120 static bool vgic_present, kvm_arm_initialised;
121
122 static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
123
is_kvm_arm_initialised(void)124 bool is_kvm_arm_initialised(void)
125 {
126 return kvm_arm_initialised;
127 }
128
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)129 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
130 {
131 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
132 }
133
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)134 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
135 struct kvm_enable_cap *cap)
136 {
137 int r = -EINVAL;
138
139 if (cap->flags)
140 return -EINVAL;
141
142 if (is_protected_kvm_enabled() && !kvm_pkvm_ext_allowed(kvm, cap->cap))
143 return -EINVAL;
144
145 switch (cap->cap) {
146 case KVM_CAP_ARM_NISV_TO_USER:
147 r = 0;
148 set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
149 &kvm->arch.flags);
150 break;
151 case KVM_CAP_ARM_MTE:
152 mutex_lock(&kvm->lock);
153 if (system_supports_mte() && !kvm->created_vcpus) {
154 r = 0;
155 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
156 }
157 mutex_unlock(&kvm->lock);
158 break;
159 case KVM_CAP_ARM_SYSTEM_SUSPEND:
160 r = 0;
161 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
162 break;
163 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
164 mutex_lock(&kvm->slots_lock);
165 /*
166 * To keep things simple, allow changing the chunk
167 * size only when no memory slots have been created.
168 */
169 if (kvm_are_all_memslots_empty(kvm)) {
170 u64 new_cap = cap->args[0];
171
172 if (!new_cap || kvm_is_block_size_supported(new_cap)) {
173 r = 0;
174 kvm->arch.mmu.split_page_chunk_size = new_cap;
175 }
176 }
177 mutex_unlock(&kvm->slots_lock);
178 break;
179 case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
180 mutex_lock(&kvm->lock);
181 if (!kvm->created_vcpus) {
182 r = 0;
183 set_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags);
184 }
185 mutex_unlock(&kvm->lock);
186 break;
187 case KVM_CAP_ARM_SEA_TO_USER:
188 r = 0;
189 set_bit(KVM_ARCH_FLAG_EXIT_SEA, &kvm->arch.flags);
190 break;
191 default:
192 break;
193 }
194
195 return r;
196 }
197
kvm_arm_default_max_vcpus(void)198 static int kvm_arm_default_max_vcpus(void)
199 {
200 return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
201 }
202
203 /**
204 * kvm_arch_init_vm - initializes a VM data structure
205 * @kvm: pointer to the KVM struct
206 * @type: kvm device type
207 */
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)208 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
209 {
210 int ret;
211
212 if (type & ~KVM_VM_TYPE_ARM_MASK)
213 return -EINVAL;
214
215 mutex_init(&kvm->arch.config_lock);
216
217 #ifdef CONFIG_LOCKDEP
218 /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
219 mutex_lock(&kvm->lock);
220 mutex_lock(&kvm->arch.config_lock);
221 mutex_unlock(&kvm->arch.config_lock);
222 mutex_unlock(&kvm->lock);
223 #endif
224
225 kvm_init_nested(kvm);
226
227 ret = kvm_share_hyp(kvm, kvm + 1);
228 if (ret)
229 return ret;
230
231 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) {
232 ret = -ENOMEM;
233 goto err_unshare_kvm;
234 }
235 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
236
237 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type);
238 if (ret)
239 goto err_free_cpumask;
240
241 if (is_protected_kvm_enabled()) {
242 /*
243 * If any failures occur after this is successful, make sure to
244 * call __pkvm_unreserve_vm to unreserve the VM in hyp.
245 */
246 ret = pkvm_init_host_vm(kvm, type);
247 if (ret)
248 goto err_uninit_mmu;
249 } else if (type & KVM_VM_TYPE_ARM_PROTECTED) {
250 ret = -EINVAL;
251 goto err_uninit_mmu;
252 }
253
254 kvm_vgic_early_init(kvm);
255
256 kvm_timer_init_vm(kvm);
257
258 /* The maximum number of VCPUs is limited by the host's GIC model */
259 kvm->max_vcpus = kvm_arm_default_max_vcpus();
260
261 kvm_arm_init_hypercalls(kvm);
262
263 bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
264
265 return 0;
266
267 err_uninit_mmu:
268 kvm_uninit_stage2_mmu(kvm);
269 err_free_cpumask:
270 free_cpumask_var(kvm->arch.supported_cpus);
271 err_unshare_kvm:
272 kvm_unshare_hyp(kvm, kvm + 1);
273 return ret;
274 }
275
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)276 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
277 {
278 return VM_FAULT_SIGBUS;
279 }
280
kvm_arch_create_vm_debugfs(struct kvm * kvm)281 void kvm_arch_create_vm_debugfs(struct kvm *kvm)
282 {
283 kvm_sys_regs_create_debugfs(kvm);
284 kvm_s2_ptdump_create_debugfs(kvm);
285 }
286
kvm_destroy_mpidr_data(struct kvm * kvm)287 static void kvm_destroy_mpidr_data(struct kvm *kvm)
288 {
289 struct kvm_mpidr_data *data;
290
291 mutex_lock(&kvm->arch.config_lock);
292
293 data = rcu_dereference_protected(kvm->arch.mpidr_data,
294 lockdep_is_held(&kvm->arch.config_lock));
295 if (data) {
296 rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
297 synchronize_rcu();
298 kfree(data);
299 }
300
301 mutex_unlock(&kvm->arch.config_lock);
302 }
303
304 /**
305 * kvm_arch_destroy_vm - destroy the VM data structure
306 * @kvm: pointer to the KVM struct
307 */
kvm_arch_destroy_vm(struct kvm * kvm)308 void kvm_arch_destroy_vm(struct kvm *kvm)
309 {
310 bitmap_free(kvm->arch.pmu_filter);
311 free_cpumask_var(kvm->arch.supported_cpus);
312
313 kvm_vgic_destroy(kvm);
314
315 if (is_protected_kvm_enabled())
316 pkvm_destroy_hyp_vm(kvm);
317
318 kvm_uninit_stage2_mmu(kvm);
319 kvm_destroy_mpidr_data(kvm);
320
321 kfree(kvm->arch.sysreg_masks);
322 kvm_destroy_vcpus(kvm);
323
324 kvm_unshare_hyp(kvm, kvm + 1);
325
326 kvm_arm_teardown_hypercalls(kvm);
327 }
328
kvm_has_full_ptr_auth(void)329 static bool kvm_has_full_ptr_auth(void)
330 {
331 bool apa, gpa, api, gpi, apa3, gpa3;
332 u64 isar1, isar2, val;
333
334 /*
335 * Check that:
336 *
337 * - both Address and Generic auth are implemented for a given
338 * algorithm (Q5, IMPDEF or Q3)
339 * - only a single algorithm is implemented.
340 */
341 if (!system_has_full_ptr_auth())
342 return false;
343
344 isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
345 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
346
347 apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1);
348 val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1);
349 gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP);
350
351 api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1);
352 val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1);
353 gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP);
354
355 apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2);
356 val = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2);
357 gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP);
358
359 return (apa == gpa && api == gpi && apa3 == gpa3 &&
360 (apa + api + apa3) == 1);
361 }
362
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)363 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
364 {
365 int r;
366
367 if (is_protected_kvm_enabled() && !kvm_pkvm_ext_allowed(kvm, ext))
368 return 0;
369
370 switch (ext) {
371 case KVM_CAP_IRQCHIP:
372 r = vgic_present;
373 break;
374 case KVM_CAP_IOEVENTFD:
375 case KVM_CAP_USER_MEMORY:
376 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
377 case KVM_CAP_ONE_REG:
378 case KVM_CAP_ARM_PSCI:
379 case KVM_CAP_ARM_PSCI_0_2:
380 case KVM_CAP_READONLY_MEM:
381 case KVM_CAP_MP_STATE:
382 case KVM_CAP_IMMEDIATE_EXIT:
383 case KVM_CAP_VCPU_EVENTS:
384 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
385 case KVM_CAP_ARM_NISV_TO_USER:
386 case KVM_CAP_ARM_INJECT_EXT_DABT:
387 case KVM_CAP_SET_GUEST_DEBUG:
388 case KVM_CAP_VCPU_ATTRIBUTES:
389 case KVM_CAP_PTP_KVM:
390 case KVM_CAP_ARM_SYSTEM_SUSPEND:
391 case KVM_CAP_IRQFD_RESAMPLE:
392 case KVM_CAP_COUNTER_OFFSET:
393 case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
394 case KVM_CAP_ARM_SEA_TO_USER:
395 r = 1;
396 break;
397 case KVM_CAP_SET_GUEST_DEBUG2:
398 return KVM_GUESTDBG_VALID_MASK;
399 case KVM_CAP_ARM_SET_DEVICE_ADDR:
400 r = 1;
401 break;
402 case KVM_CAP_NR_VCPUS:
403 /*
404 * ARM64 treats KVM_CAP_NR_CPUS differently from all other
405 * architectures, as it does not always bound it to
406 * KVM_CAP_MAX_VCPUS. It should not matter much because
407 * this is just an advisory value.
408 */
409 r = min_t(unsigned int, num_online_cpus(),
410 kvm_arm_default_max_vcpus());
411 break;
412 case KVM_CAP_MAX_VCPUS:
413 case KVM_CAP_MAX_VCPU_ID:
414 if (kvm)
415 r = kvm->max_vcpus;
416 else
417 r = kvm_arm_default_max_vcpus();
418 break;
419 case KVM_CAP_MSI_DEVID:
420 if (!kvm)
421 r = -EINVAL;
422 else
423 r = kvm->arch.vgic.msis_require_devid;
424 break;
425 case KVM_CAP_ARM_USER_IRQ:
426 /*
427 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
428 * (bump this number if adding more devices)
429 */
430 r = 1;
431 break;
432 case KVM_CAP_ARM_MTE:
433 r = system_supports_mte();
434 break;
435 case KVM_CAP_STEAL_TIME:
436 r = kvm_arm_pvtime_supported();
437 break;
438 case KVM_CAP_ARM_EL1_32BIT:
439 r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
440 break;
441 case KVM_CAP_ARM_EL2:
442 r = cpus_have_final_cap(ARM64_HAS_NESTED_VIRT);
443 break;
444 case KVM_CAP_ARM_EL2_E2H0:
445 r = cpus_have_final_cap(ARM64_HAS_HCR_NV1);
446 break;
447 case KVM_CAP_GUEST_DEBUG_HW_BPS:
448 r = get_num_brps();
449 break;
450 case KVM_CAP_GUEST_DEBUG_HW_WPS:
451 r = get_num_wrps();
452 break;
453 case KVM_CAP_ARM_PMU_V3:
454 r = kvm_supports_guest_pmuv3();
455 break;
456 case KVM_CAP_ARM_INJECT_SERROR_ESR:
457 r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
458 break;
459 case KVM_CAP_ARM_VM_IPA_SIZE:
460 r = get_kvm_ipa_limit();
461 break;
462 case KVM_CAP_ARM_SVE:
463 r = system_supports_sve();
464 break;
465 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
466 case KVM_CAP_ARM_PTRAUTH_GENERIC:
467 r = kvm_has_full_ptr_auth();
468 break;
469 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
470 if (kvm)
471 r = kvm->arch.mmu.split_page_chunk_size;
472 else
473 r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
474 break;
475 case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
476 r = kvm_supported_block_sizes();
477 break;
478 case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES:
479 r = BIT(0);
480 break;
481 case KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED:
482 if (!kvm)
483 r = -EINVAL;
484 else
485 r = kvm_supports_cacheable_pfnmap();
486 break;
487
488 default:
489 r = 0;
490 }
491
492 return r;
493 }
494
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)495 long kvm_arch_dev_ioctl(struct file *filp,
496 unsigned int ioctl, unsigned long arg)
497 {
498 return -EINVAL;
499 }
500
kvm_arch_alloc_vm(void)501 struct kvm *kvm_arch_alloc_vm(void)
502 {
503 size_t sz = sizeof(struct kvm);
504
505 if (!has_vhe())
506 return kzalloc(sz, GFP_KERNEL_ACCOUNT);
507
508 return kvzalloc(sz, GFP_KERNEL_ACCOUNT);
509 }
510
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)511 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
512 {
513 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
514 return -EBUSY;
515
516 if (id >= kvm->max_vcpus)
517 return -EINVAL;
518
519 return 0;
520 }
521
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)522 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
523 {
524 int err;
525
526 spin_lock_init(&vcpu->arch.mp_state_lock);
527
528 #ifdef CONFIG_LOCKDEP
529 /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
530 mutex_lock(&vcpu->mutex);
531 mutex_lock(&vcpu->kvm->arch.config_lock);
532 mutex_unlock(&vcpu->kvm->arch.config_lock);
533 mutex_unlock(&vcpu->mutex);
534 #endif
535
536 /* Force users to call KVM_ARM_VCPU_INIT */
537 vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
538
539 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
540
541 /* Set up the timer */
542 kvm_timer_vcpu_init(vcpu);
543
544 kvm_pmu_vcpu_init(vcpu);
545
546 kvm_arm_pvtime_vcpu_init(&vcpu->arch);
547
548 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
549
550 /*
551 * This vCPU may have been created after mpidr_data was initialized.
552 * Throw out the pre-computed mappings if that is the case which forces
553 * KVM to fall back to iteratively searching the vCPUs.
554 */
555 kvm_destroy_mpidr_data(vcpu->kvm);
556
557 err = kvm_vgic_vcpu_init(vcpu);
558 if (err)
559 return err;
560
561 err = kvm_share_hyp(vcpu, vcpu + 1);
562 if (err)
563 kvm_vgic_vcpu_destroy(vcpu);
564
565 return err;
566 }
567
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)568 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
569 {
570 }
571
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)572 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
573 {
574 if (!is_protected_kvm_enabled())
575 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
576 else
577 free_hyp_memcache(&vcpu->arch.pkvm_memcache);
578 kvm_timer_vcpu_terminate(vcpu);
579 kvm_pmu_vcpu_destroy(vcpu);
580 kvm_vgic_vcpu_destroy(vcpu);
581 kvm_arm_vcpu_destroy(vcpu);
582 }
583
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)584 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
585 {
586
587 }
588
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)589 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
590 {
591
592 }
593
vcpu_set_pauth_traps(struct kvm_vcpu * vcpu)594 static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
595 {
596 if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) {
597 /*
598 * Either we're running an L2 guest, and the API/APK bits come
599 * from L1's HCR_EL2, or API/APK are both set.
600 */
601 if (unlikely(is_nested_ctxt(vcpu))) {
602 u64 val;
603
604 val = __vcpu_sys_reg(vcpu, HCR_EL2);
605 val &= (HCR_API | HCR_APK);
606 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
607 vcpu->arch.hcr_el2 |= val;
608 } else {
609 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
610 }
611
612 /*
613 * Save the host keys if there is any chance for the guest
614 * to use pauth, as the entry code will reload the guest
615 * keys in that case.
616 */
617 if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
618 struct kvm_cpu_context *ctxt;
619
620 ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
621 ptrauth_save_keys(ctxt);
622 }
623 }
624 }
625
kvm_vcpu_should_clear_twi(struct kvm_vcpu * vcpu)626 static bool kvm_vcpu_should_clear_twi(struct kvm_vcpu *vcpu)
627 {
628 if (unlikely(kvm_wfi_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK))
629 return kvm_wfi_trap_policy == KVM_WFX_NOTRAP;
630
631 if (vgic_is_v5(vcpu->kvm))
632 return single_task_running();
633
634 return single_task_running() &&
635 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
636 (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
637 vcpu->kvm->arch.vgic.nassgireq);
638 }
639
kvm_vcpu_should_clear_twe(struct kvm_vcpu * vcpu)640 static bool kvm_vcpu_should_clear_twe(struct kvm_vcpu *vcpu)
641 {
642 if (unlikely(kvm_wfe_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK))
643 return kvm_wfe_trap_policy == KVM_WFX_NOTRAP;
644
645 return single_task_running();
646 }
647
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)648 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
649 {
650 struct kvm_s2_mmu *mmu;
651 int *last_ran;
652
653 if (is_protected_kvm_enabled())
654 goto nommu;
655
656 if (vcpu_has_nv(vcpu))
657 kvm_vcpu_load_hw_mmu(vcpu);
658
659 mmu = vcpu->arch.hw_mmu;
660 last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
661
662 /*
663 * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
664 * which happens eagerly in VHE.
665 *
666 * Also, the VMID allocator only preserves VMIDs that are active at the
667 * time of rollover, so KVM might need to grab a new VMID for the MMU if
668 * this is called from kvm_sched_in().
669 */
670 kvm_arm_vmid_update(&mmu->vmid);
671
672 /*
673 * We guarantee that both TLBs and I-cache are private to each
674 * vcpu. If detecting that a vcpu from the same VM has
675 * previously run on the same physical CPU, call into the
676 * hypervisor code to nuke the relevant contexts.
677 *
678 * We might get preempted before the vCPU actually runs, but
679 * over-invalidation doesn't affect correctness.
680 */
681 if (*last_ran != vcpu->vcpu_idx) {
682 kvm_call_hyp(__kvm_flush_cpu_context, mmu);
683 *last_ran = vcpu->vcpu_idx;
684 }
685
686 nommu:
687 vcpu->cpu = cpu;
688
689 /*
690 * The timer must be loaded before the vgic to correctly set up physical
691 * interrupt deactivation in nested state (e.g. timer interrupt).
692 */
693 kvm_timer_vcpu_load(vcpu);
694 kvm_vgic_load(vcpu);
695 kvm_vcpu_load_debug(vcpu);
696 kvm_vcpu_load_fgt(vcpu);
697 if (has_vhe())
698 kvm_vcpu_load_vhe(vcpu);
699 kvm_arch_vcpu_load_fp(vcpu);
700 kvm_vcpu_pmu_restore_guest(vcpu);
701 if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
702 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
703
704 if (kvm_vcpu_should_clear_twe(vcpu))
705 vcpu->arch.hcr_el2 &= ~HCR_TWE;
706 else
707 vcpu->arch.hcr_el2 |= HCR_TWE;
708
709 if (kvm_vcpu_should_clear_twi(vcpu))
710 vcpu->arch.hcr_el2 &= ~HCR_TWI;
711 else
712 vcpu->arch.hcr_el2 |= HCR_TWI;
713
714 vcpu_set_pauth_traps(vcpu);
715
716 if (is_protected_kvm_enabled()) {
717 kvm_call_hyp_nvhe(__pkvm_vcpu_load,
718 vcpu->kvm->arch.pkvm.handle,
719 vcpu->vcpu_idx, vcpu->arch.hcr_el2);
720 kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
721 &vcpu->arch.vgic_cpu.vgic_v3);
722 }
723
724 if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
725 vcpu_set_on_unsupported_cpu(vcpu);
726
727 vcpu->arch.pid = pid_nr(vcpu->pid);
728 }
729
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)730 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
731 {
732 if (is_protected_kvm_enabled()) {
733 kvm_call_hyp(__vgic_v3_save_aprs, &vcpu->arch.vgic_cpu.vgic_v3);
734 kvm_call_hyp_nvhe(__pkvm_vcpu_put);
735 }
736
737 kvm_vcpu_put_debug(vcpu);
738 kvm_arch_vcpu_put_fp(vcpu);
739 if (has_vhe())
740 kvm_vcpu_put_vhe(vcpu);
741 kvm_timer_vcpu_put(vcpu);
742 kvm_vgic_put(vcpu);
743 kvm_vcpu_pmu_restore_host(vcpu);
744 if (vcpu_has_nv(vcpu))
745 kvm_vcpu_put_hw_mmu(vcpu);
746 kvm_arm_vmid_clear_active();
747
748 vcpu_clear_on_unsupported_cpu(vcpu);
749 vcpu->cpu = -1;
750 }
751
__kvm_arm_vcpu_power_off(struct kvm_vcpu * vcpu)752 static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
753 {
754 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
755 kvm_make_request(KVM_REQ_SLEEP, vcpu);
756 kvm_vcpu_kick(vcpu);
757 }
758
kvm_arm_vcpu_power_off(struct kvm_vcpu * vcpu)759 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
760 {
761 spin_lock(&vcpu->arch.mp_state_lock);
762 __kvm_arm_vcpu_power_off(vcpu);
763 spin_unlock(&vcpu->arch.mp_state_lock);
764 }
765
kvm_arm_vcpu_stopped(struct kvm_vcpu * vcpu)766 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
767 {
768 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
769 }
770
kvm_arm_vcpu_suspend(struct kvm_vcpu * vcpu)771 static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
772 {
773 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
774 kvm_make_request(KVM_REQ_SUSPEND, vcpu);
775 kvm_vcpu_kick(vcpu);
776 }
777
kvm_arm_vcpu_suspended(struct kvm_vcpu * vcpu)778 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
779 {
780 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
781 }
782
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)783 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
784 struct kvm_mp_state *mp_state)
785 {
786 *mp_state = READ_ONCE(vcpu->arch.mp_state);
787
788 return 0;
789 }
790
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)791 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
792 struct kvm_mp_state *mp_state)
793 {
794 int ret = 0;
795
796 spin_lock(&vcpu->arch.mp_state_lock);
797
798 switch (mp_state->mp_state) {
799 case KVM_MP_STATE_RUNNABLE:
800 WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
801 break;
802 case KVM_MP_STATE_STOPPED:
803 __kvm_arm_vcpu_power_off(vcpu);
804 break;
805 case KVM_MP_STATE_SUSPENDED:
806 kvm_arm_vcpu_suspend(vcpu);
807 break;
808 default:
809 ret = -EINVAL;
810 }
811
812 spin_unlock(&vcpu->arch.mp_state_lock);
813
814 return ret;
815 }
816
817 /**
818 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
819 * @v: The VCPU pointer
820 *
821 * If the guest CPU is not waiting for interrupts or an interrupt line is
822 * asserted, the CPU is by definition runnable.
823 */
kvm_arch_vcpu_runnable(struct kvm_vcpu * v)824 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
825 {
826 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE);
827
828 irq_lines |= (!irqchip_in_kernel(v->kvm) &&
829 (kvm_timer_should_notify_user(v) ||
830 kvm_pmu_should_notify_user(v)));
831
832 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
833 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
834 }
835
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)836 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
837 {
838 return vcpu_mode_priv(vcpu);
839 }
840
841 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)842 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
843 {
844 return *vcpu_pc(vcpu);
845 }
846 #endif
847
kvm_init_mpidr_data(struct kvm * kvm)848 static void kvm_init_mpidr_data(struct kvm *kvm)
849 {
850 struct kvm_mpidr_data *data = NULL;
851 unsigned long c, mask, nr_entries;
852 u64 aff_set = 0, aff_clr = ~0UL;
853 struct kvm_vcpu *vcpu;
854
855 mutex_lock(&kvm->arch.config_lock);
856
857 if (rcu_access_pointer(kvm->arch.mpidr_data) ||
858 atomic_read(&kvm->online_vcpus) == 1)
859 goto out;
860
861 kvm_for_each_vcpu(c, vcpu, kvm) {
862 u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
863 aff_set |= aff;
864 aff_clr &= aff;
865 }
866
867 /*
868 * A significant bit can be either 0 or 1, and will only appear in
869 * aff_set. Use aff_clr to weed out the useless stuff.
870 */
871 mask = aff_set ^ aff_clr;
872 nr_entries = BIT_ULL(hweight_long(mask));
873
874 /*
875 * Don't let userspace fool us. If we need more than a single page
876 * to describe the compressed MPIDR array, just fall back to the
877 * iterative method. Single vcpu VMs do not need this either.
878 */
879 if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
880 data = kzalloc_flex(*data, cmpidr_to_idx, nr_entries,
881 GFP_KERNEL_ACCOUNT);
882
883 if (!data)
884 goto out;
885
886 data->mpidr_mask = mask;
887
888 kvm_for_each_vcpu(c, vcpu, kvm) {
889 u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
890 u16 index = kvm_mpidr_index(data, aff);
891
892 data->cmpidr_to_idx[index] = c;
893 }
894
895 rcu_assign_pointer(kvm->arch.mpidr_data, data);
896 out:
897 mutex_unlock(&kvm->arch.config_lock);
898 }
899
900 /*
901 * Handle both the initialisation that is being done when the vcpu is
902 * run for the first time, as well as the updates that must be
903 * performed each time we get a new thread dealing with this vcpu.
904 */
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)905 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
906 {
907 struct kvm *kvm = vcpu->kvm;
908 int ret;
909
910 if (!kvm_vcpu_initialized(vcpu))
911 return -ENOEXEC;
912
913 if (!kvm_arm_vcpu_is_finalized(vcpu))
914 return -EPERM;
915
916 if (likely(vcpu_has_run_once(vcpu)))
917 return 0;
918
919 kvm_init_mpidr_data(kvm);
920
921 if (likely(irqchip_in_kernel(kvm))) {
922 /*
923 * Map the VGIC hardware resources before running a vcpu the
924 * first time on this VM.
925 */
926 ret = kvm_vgic_map_resources(kvm);
927 if (ret)
928 return ret;
929 }
930
931 ret = kvm_finalize_sys_regs(vcpu);
932 if (ret)
933 return ret;
934
935 if (vcpu_has_nv(vcpu)) {
936 ret = kvm_vcpu_allocate_vncr_tlb(vcpu);
937 if (ret)
938 return ret;
939
940 ret = kvm_vgic_vcpu_nv_init(vcpu);
941 if (ret)
942 return ret;
943 }
944
945 /*
946 * This needs to happen after any restriction has been applied
947 * to the feature set.
948 */
949 kvm_calculate_traps(vcpu);
950
951 ret = kvm_timer_enable(vcpu);
952 if (ret)
953 return ret;
954
955 if (kvm_vcpu_has_pmu(vcpu)) {
956 ret = kvm_arm_pmu_v3_enable(vcpu);
957 if (ret)
958 return ret;
959 }
960
961 ret = vgic_v5_finalize_ppi_state(kvm);
962 if (ret)
963 return ret;
964
965 if (is_protected_kvm_enabled()) {
966 ret = pkvm_create_hyp_vm(kvm);
967 if (ret)
968 return ret;
969
970 ret = pkvm_create_hyp_vcpu(vcpu);
971 if (ret)
972 return ret;
973 }
974
975 mutex_lock(&kvm->arch.config_lock);
976 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
977 mutex_unlock(&kvm->arch.config_lock);
978
979 return ret;
980 }
981
kvm_arch_intc_initialized(struct kvm * kvm)982 bool kvm_arch_intc_initialized(struct kvm *kvm)
983 {
984 return vgic_initialized(kvm);
985 }
986
kvm_arm_halt_guest(struct kvm * kvm)987 void kvm_arm_halt_guest(struct kvm *kvm)
988 {
989 unsigned long i;
990 struct kvm_vcpu *vcpu;
991
992 kvm_for_each_vcpu(i, vcpu, kvm)
993 vcpu->arch.pause = true;
994 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
995 }
996
kvm_arm_resume_guest(struct kvm * kvm)997 void kvm_arm_resume_guest(struct kvm *kvm)
998 {
999 unsigned long i;
1000 struct kvm_vcpu *vcpu;
1001
1002 kvm_for_each_vcpu(i, vcpu, kvm) {
1003 vcpu->arch.pause = false;
1004 __kvm_vcpu_wake_up(vcpu);
1005 }
1006 }
1007
kvm_vcpu_sleep(struct kvm_vcpu * vcpu)1008 static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu)
1009 {
1010 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
1011
1012 rcuwait_wait_event(wait,
1013 (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
1014 TASK_INTERRUPTIBLE);
1015
1016 if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) {
1017 /* Awaken to handle a signal, request we sleep again later. */
1018 kvm_make_request(KVM_REQ_SLEEP, vcpu);
1019 }
1020
1021 /*
1022 * Make sure we will observe a potential reset request if we've
1023 * observed a change to the power state. Pairs with the smp_wmb() in
1024 * kvm_psci_vcpu_on().
1025 */
1026 smp_rmb();
1027 }
1028
1029 /**
1030 * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
1031 * @vcpu: The VCPU pointer
1032 *
1033 * Suspend execution of a vCPU until a valid wake event is detected, i.e. until
1034 * the vCPU is runnable. The vCPU may or may not be scheduled out, depending
1035 * on when a wake event arrives, e.g. there may already be a pending wake event.
1036 */
kvm_vcpu_wfi(struct kvm_vcpu * vcpu)1037 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
1038 {
1039 /*
1040 * Sync back the state of the GIC CPU interface so that we have
1041 * the latest PMR and group enables. This ensures that
1042 * kvm_arch_vcpu_runnable has up-to-date data to decide whether
1043 * we have pending interrupts, e.g. when determining if the
1044 * vCPU should block.
1045 *
1046 * For the same reason, we want to tell GICv4 that we need
1047 * doorbells to be signalled, should an interrupt become pending.
1048 */
1049 preempt_disable();
1050 vcpu_set_flag(vcpu, IN_WFI);
1051 kvm_vgic_put(vcpu);
1052 preempt_enable();
1053
1054 kvm_vcpu_halt(vcpu);
1055 vcpu_clear_flag(vcpu, IN_WFIT);
1056
1057 preempt_disable();
1058 vcpu_clear_flag(vcpu, IN_WFI);
1059 kvm_vgic_load(vcpu);
1060 preempt_enable();
1061 }
1062
kvm_vcpu_suspend(struct kvm_vcpu * vcpu)1063 static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
1064 {
1065 if (!kvm_arm_vcpu_suspended(vcpu))
1066 return 1;
1067
1068 kvm_vcpu_wfi(vcpu);
1069
1070 /*
1071 * The suspend state is sticky; we do not leave it until userspace
1072 * explicitly marks the vCPU as runnable. Request that we suspend again
1073 * later.
1074 */
1075 kvm_make_request(KVM_REQ_SUSPEND, vcpu);
1076
1077 /*
1078 * Check to make sure the vCPU is actually runnable. If so, exit to
1079 * userspace informing it of the wakeup condition.
1080 */
1081 if (kvm_arch_vcpu_runnable(vcpu)) {
1082 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
1083 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
1084 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
1085 return 0;
1086 }
1087
1088 /*
1089 * Otherwise, we were unblocked to process a different event, such as a
1090 * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to
1091 * process the event.
1092 */
1093 return 1;
1094 }
1095
1096 /**
1097 * check_vcpu_requests - check and handle pending vCPU requests
1098 * @vcpu: the VCPU pointer
1099 *
1100 * Return: 1 if we should enter the guest
1101 * 0 if we should exit to userspace
1102 * < 0 if we should exit to userspace, where the return value indicates
1103 * an error
1104 */
check_vcpu_requests(struct kvm_vcpu * vcpu)1105 static int check_vcpu_requests(struct kvm_vcpu *vcpu)
1106 {
1107 if (kvm_request_pending(vcpu)) {
1108 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
1109 return -EIO;
1110
1111 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
1112 kvm_vcpu_sleep(vcpu);
1113
1114 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1115 kvm_reset_vcpu(vcpu);
1116
1117 /*
1118 * Clear IRQ_PENDING requests that were made to guarantee
1119 * that a VCPU sees new virtual interrupts.
1120 */
1121 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
1122
1123 /* Process interrupts deactivated through a trap */
1124 if (kvm_check_request(KVM_REQ_VGIC_PROCESS_UPDATE, vcpu))
1125 kvm_vgic_process_async_update(vcpu);
1126
1127 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
1128 kvm_update_stolen_time(vcpu);
1129
1130 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
1131 /* The distributor enable bits were changed */
1132 preempt_disable();
1133 vgic_v4_put(vcpu);
1134 vgic_v4_load(vcpu);
1135 preempt_enable();
1136 }
1137
1138 if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
1139 kvm_vcpu_reload_pmu(vcpu);
1140
1141 if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
1142 kvm_vcpu_pmu_restore_guest(vcpu);
1143
1144 if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
1145 return kvm_vcpu_suspend(vcpu);
1146
1147 if (kvm_dirty_ring_check_request(vcpu))
1148 return 0;
1149
1150 check_nested_vcpu_requests(vcpu);
1151 }
1152
1153 return 1;
1154 }
1155
vcpu_mode_is_bad_32bit(struct kvm_vcpu * vcpu)1156 static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
1157 {
1158 if (likely(!vcpu_mode_is_32bit(vcpu)))
1159 return false;
1160
1161 if (vcpu_has_nv(vcpu))
1162 return true;
1163
1164 return !kvm_supports_32bit_el0();
1165 }
1166
1167 /**
1168 * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
1169 * @vcpu: The VCPU pointer
1170 * @ret: Pointer to write optional return code
1171 *
1172 * Returns: true if the VCPU needs to return to a preemptible + interruptible
1173 * and skip guest entry.
1174 *
1175 * This function disambiguates between two different types of exits: exits to a
1176 * preemptible + interruptible kernel context and exits to userspace. For an
1177 * exit to userspace, this function will write the return code to ret and return
1178 * true. For an exit to preemptible + interruptible kernel context (i.e. check
1179 * for pending work and re-enter), return true without writing to ret.
1180 */
kvm_vcpu_exit_request(struct kvm_vcpu * vcpu,int * ret)1181 static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
1182 {
1183 struct kvm_run *run = vcpu->run;
1184
1185 /*
1186 * If we're using a userspace irqchip, then check if we need
1187 * to tell a userspace irqchip about timer or PMU level
1188 * changes and if so, exit to userspace (the actual level
1189 * state gets updated in kvm_timer_update_run and
1190 * kvm_pmu_update_run below).
1191 */
1192 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
1193 if (kvm_timer_should_notify_user(vcpu) ||
1194 kvm_pmu_should_notify_user(vcpu)) {
1195 *ret = -EINTR;
1196 run->exit_reason = KVM_EXIT_INTR;
1197 return true;
1198 }
1199 }
1200
1201 if (unlikely(vcpu_on_unsupported_cpu(vcpu))) {
1202 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1203 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED;
1204 run->fail_entry.cpu = smp_processor_id();
1205 *ret = 0;
1206 return true;
1207 }
1208
1209 return kvm_request_pending(vcpu) ||
1210 xfer_to_guest_mode_work_pending();
1211 }
1212
1213 /*
1214 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1215 * the vCPU is running.
1216 *
1217 * This must be noinstr as instrumentation may make use of RCU, and this is not
1218 * safe during the EQS.
1219 */
kvm_arm_vcpu_enter_exit(struct kvm_vcpu * vcpu)1220 static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
1221 {
1222 int ret;
1223
1224 guest_state_enter_irqoff();
1225 ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
1226 guest_state_exit_irqoff();
1227
1228 return ret;
1229 }
1230
1231 /**
1232 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
1233 * @vcpu: The VCPU pointer
1234 *
1235 * This function is called through the VCPU_RUN ioctl called from user space. It
1236 * will execute VM code in a loop until the time slice for the process is used
1237 * or some emulation is needed from user space in which case the function will
1238 * return with return value 0 and with the kvm_run structure filled in with the
1239 * required data for the requested emulation.
1240 */
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1241 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1242 {
1243 struct kvm_run *run = vcpu->run;
1244 int ret;
1245
1246 if (run->exit_reason == KVM_EXIT_MMIO) {
1247 ret = kvm_handle_mmio_return(vcpu);
1248 if (ret <= 0)
1249 return ret;
1250 }
1251
1252 vcpu_load(vcpu);
1253
1254 if (!vcpu->wants_to_run) {
1255 ret = -EINTR;
1256 goto out;
1257 }
1258
1259 kvm_sigset_activate(vcpu);
1260
1261 ret = 1;
1262 run->exit_reason = KVM_EXIT_UNKNOWN;
1263 run->flags = 0;
1264 while (ret > 0) {
1265 /*
1266 * Check conditions before entering the guest
1267 */
1268 ret = kvm_xfer_to_guest_mode_handle_work(vcpu);
1269 if (!ret)
1270 ret = 1;
1271
1272 if (ret > 0)
1273 ret = check_vcpu_requests(vcpu);
1274
1275 /*
1276 * Preparing the interrupts to be injected also
1277 * involves poking the GIC, which must be done in a
1278 * non-preemptible context.
1279 */
1280 preempt_disable();
1281
1282 kvm_nested_flush_hwstate(vcpu);
1283
1284 if (kvm_vcpu_has_pmu(vcpu))
1285 kvm_pmu_flush_hwstate(vcpu);
1286
1287 local_irq_disable();
1288
1289 kvm_vgic_flush_hwstate(vcpu);
1290
1291 kvm_pmu_update_vcpu_events(vcpu);
1292
1293 /*
1294 * Ensure we set mode to IN_GUEST_MODE after we disable
1295 * interrupts and before the final VCPU requests check.
1296 * See the comment in kvm_vcpu_exiting_guest_mode() and
1297 * Documentation/virt/kvm/vcpu-requests.rst
1298 */
1299 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1300
1301 if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
1302 vcpu->mode = OUTSIDE_GUEST_MODE;
1303 isb(); /* Ensure work in x_flush_hwstate is committed */
1304 if (kvm_vcpu_has_pmu(vcpu))
1305 kvm_pmu_sync_hwstate(vcpu);
1306 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
1307 kvm_timer_sync_user(vcpu);
1308 kvm_vgic_sync_hwstate(vcpu);
1309 local_irq_enable();
1310 preempt_enable();
1311 continue;
1312 }
1313
1314 kvm_arch_vcpu_ctxflush_fp(vcpu);
1315
1316 /**************************************************************
1317 * Enter the guest
1318 */
1319 trace_kvm_entry(*vcpu_pc(vcpu));
1320 guest_timing_enter_irqoff();
1321
1322 ret = kvm_arm_vcpu_enter_exit(vcpu);
1323
1324 vcpu->mode = OUTSIDE_GUEST_MODE;
1325 vcpu->stat.exits++;
1326 /*
1327 * Back from guest
1328 *************************************************************/
1329
1330 /*
1331 * We must sync the PMU state before the vgic state so
1332 * that the vgic can properly sample the updated state of the
1333 * interrupt line.
1334 */
1335 if (kvm_vcpu_has_pmu(vcpu))
1336 kvm_pmu_sync_hwstate(vcpu);
1337
1338 /*
1339 * Sync the vgic state before syncing the timer state because
1340 * the timer code needs to know if the virtual timer
1341 * interrupts are active.
1342 */
1343 kvm_vgic_sync_hwstate(vcpu);
1344
1345 /*
1346 * Sync the timer hardware state before enabling interrupts as
1347 * we don't want vtimer interrupts to race with syncing the
1348 * timer virtual interrupt state.
1349 */
1350 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
1351 kvm_timer_sync_user(vcpu);
1352
1353 if (is_hyp_ctxt(vcpu))
1354 kvm_timer_sync_nested(vcpu);
1355
1356 kvm_arch_vcpu_ctxsync_fp(vcpu);
1357
1358 /*
1359 * We must ensure that any pending interrupts are taken before
1360 * we exit guest timing so that timer ticks are accounted as
1361 * guest time. Transiently unmask interrupts so that any
1362 * pending interrupts are taken.
1363 *
1364 * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
1365 * context synchronization event) is necessary to ensure that
1366 * pending interrupts are taken.
1367 */
1368 if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) {
1369 local_irq_enable();
1370 isb();
1371 local_irq_disable();
1372 }
1373
1374 guest_timing_exit_irqoff();
1375
1376 local_irq_enable();
1377
1378 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
1379
1380 /* Exit types that need handling before we can be preempted */
1381 handle_exit_early(vcpu, ret);
1382
1383 kvm_nested_sync_hwstate(vcpu);
1384
1385 preempt_enable();
1386
1387 /*
1388 * The ARMv8 architecture doesn't give the hypervisor
1389 * a mechanism to prevent a guest from dropping to AArch32 EL0
1390 * if implemented by the CPU. If we spot the guest in such
1391 * state and that we decided it wasn't supposed to do so (like
1392 * with the asymmetric AArch32 case), return to userspace with
1393 * a fatal error.
1394 */
1395 if (vcpu_mode_is_bad_32bit(vcpu)) {
1396 /*
1397 * As we have caught the guest red-handed, decide that
1398 * it isn't fit for purpose anymore by making the vcpu
1399 * invalid. The VMM can try and fix it by issuing a
1400 * KVM_ARM_VCPU_INIT if it really wants to.
1401 */
1402 vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
1403 ret = ARM_EXCEPTION_IL;
1404 }
1405
1406 ret = handle_exit(vcpu, ret);
1407 }
1408
1409 /* Tell userspace about in-kernel device output levels */
1410 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
1411 kvm_timer_update_run(vcpu);
1412 kvm_pmu_update_run(vcpu);
1413 }
1414
1415 kvm_sigset_deactivate(vcpu);
1416
1417 out:
1418 /*
1419 * In the unlikely event that we are returning to userspace
1420 * with pending exceptions or PC adjustment, commit these
1421 * adjustments in order to give userspace a consistent view of
1422 * the vcpu state. Note that this relies on __kvm_adjust_pc()
1423 * being preempt-safe on VHE.
1424 */
1425 if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
1426 vcpu_get_flag(vcpu, INCREMENT_PC)))
1427 kvm_call_hyp(__kvm_adjust_pc, vcpu);
1428
1429 vcpu_put(vcpu);
1430 return ret;
1431 }
1432
vcpu_interrupt_line(struct kvm_vcpu * vcpu,int number,bool level)1433 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
1434 {
1435 int bit_index;
1436 bool set;
1437 unsigned long *hcr;
1438
1439 if (number == KVM_ARM_IRQ_CPU_IRQ)
1440 bit_index = __ffs(HCR_VI);
1441 else /* KVM_ARM_IRQ_CPU_FIQ */
1442 bit_index = __ffs(HCR_VF);
1443
1444 hcr = vcpu_hcr(vcpu);
1445 if (level)
1446 set = test_and_set_bit(bit_index, hcr);
1447 else
1448 set = test_and_clear_bit(bit_index, hcr);
1449
1450 /*
1451 * If we didn't change anything, no need to wake up or kick other CPUs
1452 */
1453 if (set == level)
1454 return 0;
1455
1456 /*
1457 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
1458 * trigger a world-switch round on the running physical CPU to set the
1459 * virtual IRQ/FIQ fields in the HCR appropriately.
1460 */
1461 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1462 kvm_vcpu_kick(vcpu);
1463
1464 return 0;
1465 }
1466
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_level,bool line_status)1467 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1468 bool line_status)
1469 {
1470 unsigned int irq_type, vcpu_id, irq_num;
1471 struct kvm_vcpu *vcpu = NULL;
1472 bool level = irq_level->level;
1473 u32 irq = irq_level->irq;
1474 unsigned long *mask;
1475
1476 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
1477 vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
1478 vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
1479 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
1480
1481 trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
1482
1483 switch (irq_type) {
1484 case KVM_ARM_IRQ_TYPE_CPU:
1485 if (irqchip_in_kernel(kvm))
1486 return -ENXIO;
1487
1488 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1489 if (!vcpu)
1490 return -EINVAL;
1491
1492 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
1493 return -EINVAL;
1494
1495 return vcpu_interrupt_line(vcpu, irq_num, level);
1496 case KVM_ARM_IRQ_TYPE_PPI:
1497 if (!irqchip_in_kernel(kvm))
1498 return -ENXIO;
1499
1500 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1501 if (!vcpu)
1502 return -EINVAL;
1503
1504 if (vgic_is_v5(kvm)) {
1505 if (irq_num >= VGIC_V5_NR_PRIVATE_IRQS)
1506 return -EINVAL;
1507
1508 /*
1509 * Only allow PPIs that are explicitly exposed to
1510 * usespace to be driven via KVM_IRQ_LINE
1511 */
1512 mask = kvm->arch.vgic.gicv5_vm.userspace_ppis;
1513 if (!test_bit(irq_num, mask))
1514 return -EINVAL;
1515
1516 /* Build a GICv5-style IntID here */
1517 irq_num = vgic_v5_make_ppi(irq_num);
1518 } else if (irq_num < VGIC_NR_SGIS ||
1519 irq_num >= VGIC_NR_PRIVATE_IRQS) {
1520 return -EINVAL;
1521 }
1522
1523 return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL);
1524 case KVM_ARM_IRQ_TYPE_SPI:
1525 if (!irqchip_in_kernel(kvm))
1526 return -ENXIO;
1527
1528 if (vgic_is_v5(kvm)) {
1529 /* Build a GICv5-style IntID here */
1530 irq_num = vgic_v5_make_spi(irq_num);
1531 } else {
1532 if (irq_num < VGIC_NR_PRIVATE_IRQS)
1533 return -EINVAL;
1534 }
1535
1536 return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL);
1537 }
1538
1539 return -EINVAL;
1540 }
1541
system_supported_vcpu_features(void)1542 static unsigned long system_supported_vcpu_features(void)
1543 {
1544 unsigned long features = KVM_VCPU_VALID_FEATURES;
1545
1546 if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
1547 clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
1548
1549 if (!kvm_supports_guest_pmuv3())
1550 clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
1551
1552 if (!system_supports_sve())
1553 clear_bit(KVM_ARM_VCPU_SVE, &features);
1554
1555 if (!kvm_has_full_ptr_auth()) {
1556 clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
1557 clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
1558 }
1559
1560 if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
1561 clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
1562
1563 return features;
1564 }
1565
kvm_vcpu_init_check_features(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1566 static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
1567 const struct kvm_vcpu_init *init)
1568 {
1569 unsigned long features = init->features[0];
1570 int i;
1571
1572 if (features & ~KVM_VCPU_VALID_FEATURES)
1573 return -ENOENT;
1574
1575 for (i = 1; i < ARRAY_SIZE(init->features); i++) {
1576 if (init->features[i])
1577 return -ENOENT;
1578 }
1579
1580 if (features & ~system_supported_vcpu_features())
1581 return -EINVAL;
1582
1583 /*
1584 * For now make sure that both address/generic pointer authentication
1585 * features are requested by the userspace together.
1586 */
1587 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
1588 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
1589 return -EINVAL;
1590
1591 if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
1592 return 0;
1593
1594 /* MTE is incompatible with AArch32 */
1595 if (kvm_has_mte(vcpu->kvm))
1596 return -EINVAL;
1597
1598 /* NV is incompatible with AArch32 */
1599 if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
1600 return -EINVAL;
1601
1602 return 0;
1603 }
1604
kvm_vcpu_init_changed(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1605 static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
1606 const struct kvm_vcpu_init *init)
1607 {
1608 unsigned long features = init->features[0];
1609
1610 return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
1611 KVM_VCPU_MAX_FEATURES);
1612 }
1613
kvm_setup_vcpu(struct kvm_vcpu * vcpu)1614 static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
1615 {
1616 struct kvm *kvm = vcpu->kvm;
1617 int ret = 0;
1618
1619 /*
1620 * When the vCPU has a PMU, but no PMU is set for the guest
1621 * yet, set the default one.
1622 */
1623 if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
1624 ret = kvm_arm_set_default_pmu(kvm);
1625
1626 /* Prepare for nested if required */
1627 if (!ret && vcpu_has_nv(vcpu))
1628 ret = kvm_vcpu_init_nested(vcpu);
1629
1630 return ret;
1631 }
1632
__kvm_vcpu_set_target(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1633 static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1634 const struct kvm_vcpu_init *init)
1635 {
1636 unsigned long features = init->features[0];
1637 struct kvm *kvm = vcpu->kvm;
1638 int ret = -EINVAL;
1639
1640 mutex_lock(&kvm->arch.config_lock);
1641
1642 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
1643 kvm_vcpu_init_changed(vcpu, init))
1644 goto out_unlock;
1645
1646 bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
1647
1648 ret = kvm_setup_vcpu(vcpu);
1649 if (ret)
1650 goto out_unlock;
1651
1652 /* Now we know what it is, we can reset it. */
1653 kvm_reset_vcpu(vcpu);
1654
1655 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
1656 vcpu_set_flag(vcpu, VCPU_INITIALIZED);
1657 ret = 0;
1658 out_unlock:
1659 mutex_unlock(&kvm->arch.config_lock);
1660 return ret;
1661 }
1662
kvm_vcpu_set_target(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1663 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1664 const struct kvm_vcpu_init *init)
1665 {
1666 int ret;
1667
1668 if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
1669 init->target != kvm_target_cpu())
1670 return -EINVAL;
1671
1672 ret = kvm_vcpu_init_check_features(vcpu, init);
1673 if (ret)
1674 return ret;
1675
1676 if (!kvm_vcpu_initialized(vcpu))
1677 return __kvm_vcpu_set_target(vcpu, init);
1678
1679 if (kvm_vcpu_init_changed(vcpu, init))
1680 return -EINVAL;
1681
1682 kvm_reset_vcpu(vcpu);
1683 return 0;
1684 }
1685
kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu * vcpu,struct kvm_vcpu_init * init)1686 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
1687 struct kvm_vcpu_init *init)
1688 {
1689 bool power_off = false;
1690 int ret;
1691
1692 /*
1693 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid
1694 * reflecting it in the finalized feature set, thus limiting its scope
1695 * to a single KVM_ARM_VCPU_INIT call.
1696 */
1697 if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) {
1698 init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF);
1699 power_off = true;
1700 }
1701
1702 ret = kvm_vcpu_set_target(vcpu, init);
1703 if (ret)
1704 return ret;
1705
1706 /*
1707 * Ensure a rebooted VM will fault in RAM pages and detect if the
1708 * guest MMU is turned off and flush the caches as needed.
1709 *
1710 * S2FWB enforces all memory accesses to RAM being cacheable,
1711 * ensuring that the data side is always coherent. We still
1712 * need to invalidate the I-cache though, as FWB does *not*
1713 * imply CTR_EL0.DIC.
1714 */
1715 if (vcpu_has_run_once(vcpu)) {
1716 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1717 stage2_unmap_vm(vcpu->kvm);
1718 else
1719 icache_inval_all_pou();
1720 }
1721
1722 vcpu_reset_hcr(vcpu);
1723
1724 /*
1725 * Handle the "start in power-off" case.
1726 */
1727 spin_lock(&vcpu->arch.mp_state_lock);
1728
1729 if (power_off)
1730 __kvm_arm_vcpu_power_off(vcpu);
1731 else
1732 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
1733
1734 spin_unlock(&vcpu->arch.mp_state_lock);
1735
1736 return 0;
1737 }
1738
kvm_arm_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1739 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1740 struct kvm_device_attr *attr)
1741 {
1742 int ret = -ENXIO;
1743
1744 switch (attr->group) {
1745 default:
1746 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1747 break;
1748 }
1749
1750 return ret;
1751 }
1752
kvm_arm_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1753 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1754 struct kvm_device_attr *attr)
1755 {
1756 int ret = -ENXIO;
1757
1758 switch (attr->group) {
1759 default:
1760 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1761 break;
1762 }
1763
1764 return ret;
1765 }
1766
kvm_arm_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1767 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1768 struct kvm_device_attr *attr)
1769 {
1770 int ret = -ENXIO;
1771
1772 switch (attr->group) {
1773 default:
1774 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1775 break;
1776 }
1777
1778 return ret;
1779 }
1780
kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)1781 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1782 struct kvm_vcpu_events *events)
1783 {
1784 memset(events, 0, sizeof(*events));
1785
1786 return __kvm_arm_vcpu_get_events(vcpu, events);
1787 }
1788
kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)1789 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1790 struct kvm_vcpu_events *events)
1791 {
1792 int i;
1793
1794 /* check whether the reserved field is zero */
1795 for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1796 if (events->reserved[i])
1797 return -EINVAL;
1798
1799 /* check whether the pad field is zero */
1800 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1801 if (events->exception.pad[i])
1802 return -EINVAL;
1803
1804 return __kvm_arm_vcpu_set_events(vcpu, events);
1805 }
1806
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1807 long kvm_arch_vcpu_ioctl(struct file *filp,
1808 unsigned int ioctl, unsigned long arg)
1809 {
1810 struct kvm_vcpu *vcpu = filp->private_data;
1811 void __user *argp = (void __user *)arg;
1812 struct kvm_device_attr attr;
1813 long r;
1814
1815 switch (ioctl) {
1816 case KVM_ARM_VCPU_INIT: {
1817 struct kvm_vcpu_init init;
1818
1819 r = -EFAULT;
1820 if (copy_from_user(&init, argp, sizeof(init)))
1821 break;
1822
1823 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1824 break;
1825 }
1826 case KVM_SET_ONE_REG:
1827 case KVM_GET_ONE_REG: {
1828 struct kvm_one_reg reg;
1829
1830 r = -ENOEXEC;
1831 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1832 break;
1833
1834 r = -EFAULT;
1835 if (copy_from_user(®, argp, sizeof(reg)))
1836 break;
1837
1838 /*
1839 * We could owe a reset due to PSCI. Handle the pending reset
1840 * here to ensure userspace register accesses are ordered after
1841 * the reset.
1842 */
1843 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1844 kvm_reset_vcpu(vcpu);
1845
1846 if (ioctl == KVM_SET_ONE_REG)
1847 r = kvm_arm_set_reg(vcpu, ®);
1848 else
1849 r = kvm_arm_get_reg(vcpu, ®);
1850 break;
1851 }
1852 case KVM_GET_REG_LIST: {
1853 struct kvm_reg_list __user *user_list = argp;
1854 struct kvm_reg_list reg_list;
1855 unsigned n;
1856
1857 r = -ENOEXEC;
1858 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1859 break;
1860
1861 r = -EPERM;
1862 if (!kvm_arm_vcpu_is_finalized(vcpu))
1863 break;
1864
1865 r = -EFAULT;
1866 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
1867 break;
1868 n = reg_list.n;
1869 reg_list.n = kvm_arm_num_regs(vcpu);
1870 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
1871 break;
1872 r = -E2BIG;
1873 if (n < reg_list.n)
1874 break;
1875 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1876 break;
1877 }
1878 case KVM_SET_DEVICE_ATTR: {
1879 r = -EFAULT;
1880 if (copy_from_user(&attr, argp, sizeof(attr)))
1881 break;
1882 r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1883 break;
1884 }
1885 case KVM_GET_DEVICE_ATTR: {
1886 r = -EFAULT;
1887 if (copy_from_user(&attr, argp, sizeof(attr)))
1888 break;
1889 r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1890 break;
1891 }
1892 case KVM_HAS_DEVICE_ATTR: {
1893 r = -EFAULT;
1894 if (copy_from_user(&attr, argp, sizeof(attr)))
1895 break;
1896 r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1897 break;
1898 }
1899 case KVM_GET_VCPU_EVENTS: {
1900 struct kvm_vcpu_events events;
1901
1902 if (!kvm_vcpu_initialized(vcpu))
1903 return -ENOEXEC;
1904
1905 if (kvm_arm_vcpu_get_events(vcpu, &events))
1906 return -EINVAL;
1907
1908 if (copy_to_user(argp, &events, sizeof(events)))
1909 return -EFAULT;
1910
1911 return 0;
1912 }
1913 case KVM_SET_VCPU_EVENTS: {
1914 struct kvm_vcpu_events events;
1915
1916 if (!kvm_vcpu_initialized(vcpu))
1917 return -ENOEXEC;
1918
1919 if (copy_from_user(&events, argp, sizeof(events)))
1920 return -EFAULT;
1921
1922 return kvm_arm_vcpu_set_events(vcpu, &events);
1923 }
1924 case KVM_ARM_VCPU_FINALIZE: {
1925 int what;
1926
1927 if (!kvm_vcpu_initialized(vcpu))
1928 return -ENOEXEC;
1929
1930 if (get_user(what, (const int __user *)argp))
1931 return -EFAULT;
1932
1933 return kvm_arm_vcpu_finalize(vcpu, what);
1934 }
1935 default:
1936 r = -EINVAL;
1937 }
1938
1939 return r;
1940 }
1941
kvm_arch_vcpu_unlocked_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1942 long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
1943 unsigned long arg)
1944 {
1945 return -ENOIOCTLCMD;
1946 }
1947
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)1948 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1949 {
1950
1951 }
1952
kvm_vm_ioctl_set_device_addr(struct kvm * kvm,struct kvm_arm_device_addr * dev_addr)1953 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1954 struct kvm_arm_device_addr *dev_addr)
1955 {
1956 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
1957 case KVM_ARM_DEVICE_VGIC_V2:
1958 if (!vgic_present)
1959 return -ENXIO;
1960 return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
1961 default:
1962 return -ENODEV;
1963 }
1964 }
1965
kvm_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)1966 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1967 {
1968 switch (attr->group) {
1969 case KVM_ARM_VM_SMCCC_CTRL:
1970 return kvm_vm_smccc_has_attr(kvm, attr);
1971 default:
1972 return -ENXIO;
1973 }
1974 }
1975
kvm_vm_set_attr(struct kvm * kvm,struct kvm_device_attr * attr)1976 static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1977 {
1978 switch (attr->group) {
1979 case KVM_ARM_VM_SMCCC_CTRL:
1980 return kvm_vm_smccc_set_attr(kvm, attr);
1981 default:
1982 return -ENXIO;
1983 }
1984 }
1985
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1986 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1987 {
1988 struct kvm *kvm = filp->private_data;
1989 void __user *argp = (void __user *)arg;
1990 struct kvm_device_attr attr;
1991
1992 if (is_protected_kvm_enabled() && !kvm_pkvm_ioctl_allowed(kvm, ioctl))
1993 return -EINVAL;
1994
1995 switch (ioctl) {
1996 case KVM_CREATE_IRQCHIP: {
1997 int ret;
1998 if (!vgic_present)
1999 return -ENXIO;
2000 mutex_lock(&kvm->lock);
2001 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
2002 mutex_unlock(&kvm->lock);
2003 return ret;
2004 }
2005 case KVM_ARM_SET_DEVICE_ADDR: {
2006 struct kvm_arm_device_addr dev_addr;
2007
2008 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
2009 return -EFAULT;
2010 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
2011 }
2012 case KVM_ARM_PREFERRED_TARGET: {
2013 struct kvm_vcpu_init init = {
2014 .target = KVM_ARM_TARGET_GENERIC_V8,
2015 };
2016
2017 if (copy_to_user(argp, &init, sizeof(init)))
2018 return -EFAULT;
2019
2020 return 0;
2021 }
2022 case KVM_ARM_MTE_COPY_TAGS: {
2023 struct kvm_arm_copy_mte_tags copy_tags;
2024
2025 if (copy_from_user(©_tags, argp, sizeof(copy_tags)))
2026 return -EFAULT;
2027 return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags);
2028 }
2029 case KVM_ARM_SET_COUNTER_OFFSET: {
2030 struct kvm_arm_counter_offset offset;
2031
2032 if (copy_from_user(&offset, argp, sizeof(offset)))
2033 return -EFAULT;
2034 return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
2035 }
2036 case KVM_HAS_DEVICE_ATTR: {
2037 if (copy_from_user(&attr, argp, sizeof(attr)))
2038 return -EFAULT;
2039
2040 return kvm_vm_has_attr(kvm, &attr);
2041 }
2042 case KVM_SET_DEVICE_ATTR: {
2043 if (copy_from_user(&attr, argp, sizeof(attr)))
2044 return -EFAULT;
2045
2046 return kvm_vm_set_attr(kvm, &attr);
2047 }
2048 case KVM_ARM_GET_REG_WRITABLE_MASKS: {
2049 struct reg_mask_range range;
2050
2051 if (copy_from_user(&range, argp, sizeof(range)))
2052 return -EFAULT;
2053 return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range);
2054 }
2055 default:
2056 return -EINVAL;
2057 }
2058 }
2059
nvhe_percpu_size(void)2060 static unsigned long nvhe_percpu_size(void)
2061 {
2062 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
2063 (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
2064 }
2065
nvhe_percpu_order(void)2066 static unsigned long nvhe_percpu_order(void)
2067 {
2068 unsigned long size = nvhe_percpu_size();
2069
2070 return size ? get_order(size) : 0;
2071 }
2072
pkvm_host_sve_state_order(void)2073 static size_t pkvm_host_sve_state_order(void)
2074 {
2075 return get_order(pkvm_host_sve_state_size());
2076 }
2077
2078 /* A lookup table holding the hypervisor VA for each vector slot */
2079 static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
2080
kvm_init_vector_slot(void * base,enum arm64_hyp_spectre_vector slot)2081 static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
2082 {
2083 hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot);
2084 }
2085
kvm_init_vector_slots(void)2086 static int kvm_init_vector_slots(void)
2087 {
2088 int err;
2089 void *base;
2090
2091 base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
2092 kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
2093
2094 base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
2095 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
2096
2097 if (kvm_system_needs_idmapped_vectors() &&
2098 !is_protected_kvm_enabled()) {
2099 err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
2100 __BP_HARDEN_HYP_VECS_SZ, &base);
2101 if (err)
2102 return err;
2103 }
2104
2105 kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
2106 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
2107 return 0;
2108 }
2109
cpu_prepare_hyp_mode(int cpu,u32 hyp_va_bits)2110 static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
2111 {
2112 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2113 unsigned long tcr;
2114
2115 /*
2116 * Calculate the raw per-cpu offset without a translation from the
2117 * kernel's mapping to the linear mapping, and store it in tpidr_el2
2118 * so that we can use adr_l to access per-cpu variables in EL2.
2119 * Also drop the KASAN tag which gets in the way...
2120 */
2121 params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
2122 (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
2123
2124 params->mair_el2 = read_sysreg(mair_el1);
2125
2126 tcr = read_sysreg(tcr_el1);
2127 if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
2128 tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
2129 tcr |= TCR_EPD1_MASK;
2130 } else {
2131 unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
2132
2133 tcr &= TCR_EL2_MASK;
2134 tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
2135 if (lpa2_is_enabled())
2136 tcr |= TCR_EL2_DS;
2137 }
2138 tcr |= TCR_T0SZ(hyp_va_bits);
2139 params->tcr_el2 = tcr;
2140
2141 params->pgd_pa = kvm_mmu_get_httbr();
2142 if (is_protected_kvm_enabled())
2143 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
2144 else
2145 params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
2146
2147 if (system_supports_mte())
2148 params->hcr_el2 |= HCR_ATA;
2149 else
2150 params->hcr_el2 |= HCR_TID5;
2151
2152 if (cpus_have_final_cap(ARM64_KVM_HVHE))
2153 params->hcr_el2 |= HCR_E2H;
2154 params->vttbr = params->vtcr = 0;
2155
2156 /*
2157 * Flush the init params from the data cache because the struct will
2158 * be read while the MMU is off.
2159 */
2160 kvm_flush_dcache_to_poc(params, sizeof(*params));
2161 }
2162
hyp_install_host_vector(void)2163 static void hyp_install_host_vector(void)
2164 {
2165 struct kvm_nvhe_init_params *params;
2166 struct arm_smccc_res res;
2167
2168 /* Switch from the HYP stub to our own HYP init vector */
2169 __hyp_set_vectors(kvm_get_idmap_vector());
2170
2171 /*
2172 * Call initialization code, and switch to the full blown HYP code.
2173 * If the cpucaps haven't been finalized yet, something has gone very
2174 * wrong, and hyp will crash and burn when it uses any
2175 * cpus_have_*_cap() wrapper.
2176 */
2177 BUG_ON(!system_capabilities_finalized());
2178 params = this_cpu_ptr_nvhe_sym(kvm_init_params);
2179 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
2180 WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
2181 }
2182
cpu_init_hyp_mode(void)2183 static void cpu_init_hyp_mode(void)
2184 {
2185 hyp_install_host_vector();
2186
2187 /*
2188 * Disabling SSBD on a non-VHE system requires us to enable SSBS
2189 * at EL2.
2190 */
2191 if (this_cpu_has_cap(ARM64_SSBS) &&
2192 arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
2193 kvm_call_hyp_nvhe(__kvm_enable_ssbs);
2194 }
2195 }
2196
cpu_hyp_reset(void)2197 static void cpu_hyp_reset(void)
2198 {
2199 if (!is_kernel_in_hyp_mode())
2200 __hyp_reset_vectors();
2201 }
2202
2203 /*
2204 * EL2 vectors can be mapped and rerouted in a number of ways,
2205 * depending on the kernel configuration and CPU present:
2206 *
2207 * - If the CPU is affected by Spectre-v2, the hardening sequence is
2208 * placed in one of the vector slots, which is executed before jumping
2209 * to the real vectors.
2210 *
2211 * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
2212 * containing the hardening sequence is mapped next to the idmap page,
2213 * and executed before jumping to the real vectors.
2214 *
2215 * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
2216 * empty slot is selected, mapped next to the idmap page, and
2217 * executed before jumping to the real vectors.
2218 *
2219 * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
2220 * VHE, as we don't have hypervisor-specific mappings. If the system
2221 * is VHE and yet selects this capability, it will be ignored.
2222 */
cpu_set_hyp_vector(void)2223 static void cpu_set_hyp_vector(void)
2224 {
2225 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
2226 void *vector = hyp_spectre_vector_selector[data->slot];
2227
2228 if (!is_protected_kvm_enabled())
2229 *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
2230 else
2231 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
2232 }
2233
cpu_hyp_init_context(void)2234 static void cpu_hyp_init_context(void)
2235 {
2236 kvm_init_host_cpu_context(host_data_ptr(host_ctxt));
2237 kvm_init_host_debug_data();
2238
2239 if (!is_kernel_in_hyp_mode())
2240 cpu_init_hyp_mode();
2241 }
2242
cpu_hyp_init_features(void)2243 static void cpu_hyp_init_features(void)
2244 {
2245 cpu_set_hyp_vector();
2246
2247 if (is_kernel_in_hyp_mode()) {
2248 kvm_timer_init_vhe();
2249 kvm_debug_init_vhe();
2250 }
2251
2252 if (vgic_present)
2253 kvm_vgic_init_cpu_hardware();
2254 }
2255
cpu_hyp_reinit(void)2256 static void cpu_hyp_reinit(void)
2257 {
2258 cpu_hyp_reset();
2259 cpu_hyp_init_context();
2260 cpu_hyp_init_features();
2261 }
2262
cpu_hyp_init(void * discard)2263 static void cpu_hyp_init(void *discard)
2264 {
2265 if (!__this_cpu_read(kvm_hyp_initialized)) {
2266 cpu_hyp_reinit();
2267 __this_cpu_write(kvm_hyp_initialized, 1);
2268 }
2269 }
2270
cpu_hyp_uninit(void * discard)2271 static void cpu_hyp_uninit(void *discard)
2272 {
2273 if (!is_protected_kvm_enabled() && __this_cpu_read(kvm_hyp_initialized)) {
2274 cpu_hyp_reset();
2275 __this_cpu_write(kvm_hyp_initialized, 0);
2276 }
2277 }
2278
kvm_arch_enable_virtualization_cpu(void)2279 int kvm_arch_enable_virtualization_cpu(void)
2280 {
2281 /*
2282 * Most calls to this function are made with migration
2283 * disabled, but not with preemption disabled. The former is
2284 * enough to ensure correctness, but most of the helpers
2285 * expect the later and will throw a tantrum otherwise.
2286 */
2287 preempt_disable();
2288
2289 cpu_hyp_init(NULL);
2290
2291 kvm_vgic_cpu_up();
2292 kvm_timer_cpu_up();
2293
2294 preempt_enable();
2295
2296 return 0;
2297 }
2298
kvm_arch_disable_virtualization_cpu(void)2299 void kvm_arch_disable_virtualization_cpu(void)
2300 {
2301 kvm_timer_cpu_down();
2302 kvm_vgic_cpu_down();
2303
2304 if (!is_protected_kvm_enabled())
2305 cpu_hyp_uninit(NULL);
2306 }
2307
2308 #ifdef CONFIG_CPU_PM
hyp_init_cpu_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)2309 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
2310 unsigned long cmd,
2311 void *v)
2312 {
2313 /*
2314 * kvm_hyp_initialized is left with its old value over
2315 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
2316 * re-enable hyp.
2317 */
2318 switch (cmd) {
2319 case CPU_PM_ENTER:
2320 if (__this_cpu_read(kvm_hyp_initialized))
2321 /*
2322 * don't update kvm_hyp_initialized here
2323 * so that the hyp will be re-enabled
2324 * when we resume. See below.
2325 */
2326 cpu_hyp_reset();
2327
2328 return NOTIFY_OK;
2329 case CPU_PM_ENTER_FAILED:
2330 case CPU_PM_EXIT:
2331 if (__this_cpu_read(kvm_hyp_initialized))
2332 /* The hyp was enabled before suspend. */
2333 cpu_hyp_reinit();
2334
2335 return NOTIFY_OK;
2336
2337 default:
2338 return NOTIFY_DONE;
2339 }
2340 }
2341
2342 static struct notifier_block hyp_init_cpu_pm_nb = {
2343 .notifier_call = hyp_init_cpu_pm_notifier,
2344 };
2345
hyp_cpu_pm_init(void)2346 static void __init hyp_cpu_pm_init(void)
2347 {
2348 if (!is_protected_kvm_enabled())
2349 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
2350 }
hyp_cpu_pm_exit(void)2351 static void __init hyp_cpu_pm_exit(void)
2352 {
2353 if (!is_protected_kvm_enabled())
2354 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
2355 }
2356 #else
hyp_cpu_pm_init(void)2357 static inline void __init hyp_cpu_pm_init(void)
2358 {
2359 }
hyp_cpu_pm_exit(void)2360 static inline void __init hyp_cpu_pm_exit(void)
2361 {
2362 }
2363 #endif
2364
init_cpu_logical_map(void)2365 static void __init init_cpu_logical_map(void)
2366 {
2367 unsigned int cpu;
2368
2369 /*
2370 * Copy the MPIDR <-> logical CPU ID mapping to hyp.
2371 * Only copy the set of online CPUs whose features have been checked
2372 * against the finalized system capabilities. The hypervisor will not
2373 * allow any other CPUs from the `possible` set to boot.
2374 */
2375 for_each_online_cpu(cpu)
2376 hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
2377 }
2378
2379 #define init_psci_0_1_impl_state(config, what) \
2380 config.psci_0_1_ ## what ## _implemented = psci_ops.what
2381
init_psci_relay(void)2382 static bool __init init_psci_relay(void)
2383 {
2384 /*
2385 * If PSCI has not been initialized, protected KVM cannot install
2386 * itself on newly booted CPUs.
2387 */
2388 if (!psci_ops.get_version) {
2389 kvm_err("Cannot initialize protected mode without PSCI\n");
2390 return false;
2391 }
2392
2393 kvm_host_psci_config.version = psci_ops.get_version();
2394 kvm_host_psci_config.smccc_version = arm_smccc_get_version();
2395
2396 if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
2397 kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
2398 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
2399 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
2400 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
2401 init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
2402 }
2403 return true;
2404 }
2405
init_subsystems(void)2406 static int __init init_subsystems(void)
2407 {
2408 int err = 0;
2409
2410 /*
2411 * Enable hardware so that subsystem initialisation can access EL2.
2412 */
2413 on_each_cpu(cpu_hyp_init, NULL, 1);
2414
2415 /*
2416 * Register CPU lower-power notifier
2417 */
2418 hyp_cpu_pm_init();
2419
2420 /*
2421 * Init HYP view of VGIC
2422 */
2423 err = kvm_vgic_hyp_init();
2424 switch (err) {
2425 case 0:
2426 vgic_present = true;
2427 break;
2428 case -ENODEV:
2429 case -ENXIO:
2430 /*
2431 * No VGIC? No pKVM for you.
2432 *
2433 * Protected mode assumes that VGICv3 is present, so no point
2434 * in trying to hobble along if vgic initialization fails.
2435 */
2436 if (is_protected_kvm_enabled())
2437 goto out;
2438
2439 /*
2440 * Otherwise, userspace could choose to implement a GIC for its
2441 * guest on non-cooperative hardware.
2442 */
2443 vgic_present = false;
2444 err = 0;
2445 break;
2446 default:
2447 goto out;
2448 }
2449
2450 if (kvm_mode == KVM_MODE_NV &&
2451 !(vgic_present && (kvm_vgic_global_state.type == VGIC_V3 ||
2452 kvm_vgic_global_state.has_gcie_v3_compat))) {
2453 kvm_err("NV support requires GICv3 or GICv5 with legacy support, giving up\n");
2454 err = -EINVAL;
2455 goto out;
2456 }
2457
2458 /*
2459 * Init HYP architected timer support
2460 */
2461 err = kvm_timer_hyp_init(vgic_present);
2462 if (err)
2463 goto out;
2464
2465 kvm_register_perf_callbacks();
2466
2467 err = kvm_hyp_trace_init();
2468 if (err)
2469 kvm_err("Failed to initialize Hyp tracing\n");
2470
2471 out:
2472 if (err)
2473 hyp_cpu_pm_exit();
2474
2475 if (err || !is_protected_kvm_enabled())
2476 on_each_cpu(cpu_hyp_uninit, NULL, 1);
2477
2478 return err;
2479 }
2480
teardown_subsystems(void)2481 static void __init teardown_subsystems(void)
2482 {
2483 kvm_unregister_perf_callbacks();
2484 hyp_cpu_pm_exit();
2485 }
2486
teardown_hyp_mode(void)2487 static void __init teardown_hyp_mode(void)
2488 {
2489 bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
2490 int cpu;
2491
2492 free_hyp_pgds();
2493 for_each_possible_cpu(cpu) {
2494 if (per_cpu(kvm_hyp_initialized, cpu))
2495 continue;
2496
2497 free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
2498
2499 if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu])
2500 continue;
2501
2502 if (free_sve) {
2503 struct cpu_sve_state *sve_state;
2504
2505 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
2506 free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
2507 }
2508
2509 free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
2510
2511 }
2512 }
2513
do_pkvm_init(u32 hyp_va_bits)2514 static int __init do_pkvm_init(u32 hyp_va_bits)
2515 {
2516 void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
2517 int ret;
2518
2519 preempt_disable();
2520 cpu_hyp_init_context();
2521 ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
2522 kern_hyp_va(per_cpu_base),
2523 hyp_va_bits);
2524 cpu_hyp_init_features();
2525
2526 /*
2527 * The stub hypercalls are now disabled, so set our local flag to
2528 * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu().
2529 */
2530 __this_cpu_write(kvm_hyp_initialized, 1);
2531 preempt_enable();
2532
2533 return ret;
2534 }
2535
get_hyp_id_aa64pfr0_el1(void)2536 static u64 get_hyp_id_aa64pfr0_el1(void)
2537 {
2538 /*
2539 * Track whether the system isn't affected by spectre/meltdown in the
2540 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
2541 * Although this is per-CPU, we make it global for simplicity, e.g., not
2542 * to have to worry about vcpu migration.
2543 *
2544 * Unlike for non-protected VMs, userspace cannot override this for
2545 * protected VMs.
2546 */
2547 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2548
2549 val &= ~(ID_AA64PFR0_EL1_CSV2 |
2550 ID_AA64PFR0_EL1_CSV3);
2551
2552 val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2,
2553 arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
2554 val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3,
2555 arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
2556
2557 return val;
2558 }
2559
kvm_hyp_init_symbols(void)2560 static void kvm_hyp_init_symbols(void)
2561 {
2562 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
2563 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2564 kvm_nvhe_sym(id_aa64pfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR2_EL1);
2565 kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
2566 kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
2567 kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
2568 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
2569 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
2570 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2571 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
2572 kvm_nvhe_sym(__icache_flags) = __icache_flags;
2573 kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
2574
2575 /* Propagate the FGT state to the nVHE side */
2576 kvm_nvhe_sym(hfgrtr_masks) = hfgrtr_masks;
2577 kvm_nvhe_sym(hfgwtr_masks) = hfgwtr_masks;
2578 kvm_nvhe_sym(hfgitr_masks) = hfgitr_masks;
2579 kvm_nvhe_sym(hdfgrtr_masks) = hdfgrtr_masks;
2580 kvm_nvhe_sym(hdfgwtr_masks) = hdfgwtr_masks;
2581 kvm_nvhe_sym(hafgrtr_masks) = hafgrtr_masks;
2582 kvm_nvhe_sym(hfgrtr2_masks) = hfgrtr2_masks;
2583 kvm_nvhe_sym(hfgwtr2_masks) = hfgwtr2_masks;
2584 kvm_nvhe_sym(hfgitr2_masks) = hfgitr2_masks;
2585 kvm_nvhe_sym(hdfgrtr2_masks)= hdfgrtr2_masks;
2586 kvm_nvhe_sym(hdfgwtr2_masks)= hdfgwtr2_masks;
2587 kvm_nvhe_sym(ich_hfgrtr_masks) = ich_hfgrtr_masks;
2588 kvm_nvhe_sym(ich_hfgwtr_masks) = ich_hfgwtr_masks;
2589 kvm_nvhe_sym(ich_hfgitr_masks) = ich_hfgitr_masks;
2590
2591 /*
2592 * Flush entire BSS since part of its data containing init symbols is read
2593 * while the MMU is off.
2594 */
2595 kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start),
2596 kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start));
2597 }
2598
kvm_hyp_init_protection(u32 hyp_va_bits)2599 static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
2600 {
2601 void *addr = phys_to_virt(hyp_mem_base);
2602 int ret;
2603
2604 ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
2605 if (ret)
2606 return ret;
2607
2608 ret = do_pkvm_init(hyp_va_bits);
2609 if (ret)
2610 return ret;
2611
2612 free_hyp_pgds();
2613
2614 return 0;
2615 }
2616
init_pkvm_host_sve_state(void)2617 static int init_pkvm_host_sve_state(void)
2618 {
2619 int cpu;
2620
2621 if (!system_supports_sve())
2622 return 0;
2623
2624 /* Allocate pages for host sve state in protected mode. */
2625 for_each_possible_cpu(cpu) {
2626 struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
2627
2628 if (!page)
2629 return -ENOMEM;
2630
2631 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
2632 }
2633
2634 /*
2635 * Don't map the pages in hyp since these are only used in protected
2636 * mode, which will (re)create its own mapping when initialized.
2637 */
2638
2639 return 0;
2640 }
2641
pkvm_check_sme_dvmsync_fw_call(void)2642 static int pkvm_check_sme_dvmsync_fw_call(void)
2643 {
2644 struct arm_smccc_res res;
2645
2646 if (!cpus_have_final_cap(ARM64_WORKAROUND_4193714))
2647 return 0;
2648
2649 arm_smccc_1_1_smc(ARM_SMCCC_CPU_WORKAROUND_4193714, &res);
2650 if (res.a0) {
2651 kvm_err("pKVM requires firmware support for C1-Pro erratum 4193714\n");
2652 return -ENODEV;
2653 }
2654
2655 return 0;
2656 }
2657
2658 /*
2659 * Finalizes the initialization of hyp mode, once everything else is initialized
2660 * and the initialziation process cannot fail.
2661 */
finalize_init_hyp_mode(void)2662 static void finalize_init_hyp_mode(void)
2663 {
2664 int cpu;
2665
2666 if (system_supports_sve() && is_protected_kvm_enabled()) {
2667 for_each_possible_cpu(cpu) {
2668 struct cpu_sve_state *sve_state;
2669
2670 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
2671 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
2672 kern_hyp_va(sve_state);
2673 }
2674 }
2675 }
2676
pkvm_hyp_init_ptrauth(void)2677 static void pkvm_hyp_init_ptrauth(void)
2678 {
2679 struct kvm_cpu_context *hyp_ctxt;
2680 int cpu;
2681
2682 for_each_possible_cpu(cpu) {
2683 hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
2684 hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
2685 hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
2686 hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
2687 hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
2688 hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
2689 hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
2690 hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
2691 hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
2692 hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
2693 hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
2694 }
2695 }
2696
2697 /* Inits Hyp-mode on all online CPUs */
init_hyp_mode(void)2698 static int __init init_hyp_mode(void)
2699 {
2700 u32 hyp_va_bits = kvm_hyp_va_bits();
2701 int cpu;
2702 int err = -ENOMEM;
2703
2704 /*
2705 * The protected Hyp-mode cannot be initialized if the memory pool
2706 * allocation has failed.
2707 */
2708 if (is_protected_kvm_enabled() && !hyp_mem_base)
2709 goto out_err;
2710
2711 /*
2712 * Allocate Hyp PGD and setup Hyp identity mapping
2713 */
2714 err = kvm_mmu_init(hyp_va_bits);
2715 if (err)
2716 goto out_err;
2717
2718 /*
2719 * Allocate stack pages for Hypervisor-mode
2720 */
2721 for_each_possible_cpu(cpu) {
2722 unsigned long stack_base;
2723
2724 stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
2725 if (!stack_base) {
2726 err = -ENOMEM;
2727 goto out_err;
2728 }
2729
2730 per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
2731 }
2732
2733 /*
2734 * Allocate and initialize pages for Hypervisor-mode percpu regions.
2735 */
2736 for_each_possible_cpu(cpu) {
2737 struct page *page;
2738 void *page_addr;
2739
2740 page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
2741 if (!page) {
2742 err = -ENOMEM;
2743 goto out_err;
2744 }
2745
2746 page_addr = page_address(page);
2747 memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
2748 kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
2749 }
2750
2751 kvm_nvhe_sym(hyp_nr_cpus) = num_possible_cpus();
2752
2753 /*
2754 * Map the Hyp-code called directly from the host
2755 */
2756 err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
2757 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
2758 if (err) {
2759 kvm_err("Cannot map world-switch code\n");
2760 goto out_err;
2761 }
2762
2763 err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_start),
2764 kvm_ksym_ref(__hyp_data_end), PAGE_HYP);
2765 if (err) {
2766 kvm_err("Cannot map .hyp.data section\n");
2767 goto out_err;
2768 }
2769
2770 err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
2771 kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
2772 if (err) {
2773 kvm_err("Cannot map .hyp.rodata section\n");
2774 goto out_err;
2775 }
2776
2777 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
2778 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
2779 if (err) {
2780 kvm_err("Cannot map rodata section\n");
2781 goto out_err;
2782 }
2783
2784 /*
2785 * .hyp.bss is guaranteed to be placed at the beginning of the .bss
2786 * section thanks to an assertion in the linker script. Map it RW and
2787 * the rest of .bss RO.
2788 */
2789 err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start),
2790 kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
2791 if (err) {
2792 kvm_err("Cannot map hyp bss section: %d\n", err);
2793 goto out_err;
2794 }
2795
2796 err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
2797 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
2798 if (err) {
2799 kvm_err("Cannot map bss section\n");
2800 goto out_err;
2801 }
2802
2803 /*
2804 * Map the Hyp stack pages
2805 */
2806 for_each_possible_cpu(cpu) {
2807 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2808 char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
2809
2810 err = create_hyp_stack(__pa(stack_base), ¶ms->stack_hyp_va);
2811 if (err) {
2812 kvm_err("Cannot map hyp stack\n");
2813 goto out_err;
2814 }
2815
2816 /*
2817 * Save the stack PA in nvhe_init_params. This will be needed
2818 * to recreate the stack mapping in protected nVHE mode.
2819 * __hyp_pa() won't do the right thing there, since the stack
2820 * has been mapped in the flexible private VA space.
2821 */
2822 params->stack_pa = __pa(stack_base);
2823 }
2824
2825 for_each_possible_cpu(cpu) {
2826 char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
2827 char *percpu_end = percpu_begin + nvhe_percpu_size();
2828
2829 /* Map Hyp percpu pages */
2830 err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
2831 if (err) {
2832 kvm_err("Cannot map hyp percpu region\n");
2833 goto out_err;
2834 }
2835
2836 /* Prepare the CPU initialization parameters */
2837 cpu_prepare_hyp_mode(cpu, hyp_va_bits);
2838 }
2839
2840 kvm_hyp_init_symbols();
2841
2842 if (is_protected_kvm_enabled()) {
2843 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
2844 cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
2845 pkvm_hyp_init_ptrauth();
2846
2847 init_cpu_logical_map();
2848
2849 if (!init_psci_relay()) {
2850 err = -ENODEV;
2851 goto out_err;
2852 }
2853
2854 err = init_pkvm_host_sve_state();
2855 if (err)
2856 goto out_err;
2857
2858 err = pkvm_check_sme_dvmsync_fw_call();
2859 if (err)
2860 goto out_err;
2861
2862 err = kvm_hyp_init_protection(hyp_va_bits);
2863 if (err) {
2864 kvm_err("Failed to init hyp memory protection\n");
2865 goto out_err;
2866 }
2867 }
2868
2869 return 0;
2870
2871 out_err:
2872 teardown_hyp_mode();
2873 kvm_err("error initializing Hyp mode: %d\n", err);
2874 return err;
2875 }
2876
kvm_mpidr_to_vcpu(struct kvm * kvm,unsigned long mpidr)2877 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
2878 {
2879 struct kvm_vcpu *vcpu = NULL;
2880 struct kvm_mpidr_data *data;
2881 unsigned long i;
2882
2883 mpidr &= MPIDR_HWID_BITMASK;
2884
2885 rcu_read_lock();
2886 data = rcu_dereference(kvm->arch.mpidr_data);
2887
2888 if (data) {
2889 u16 idx = kvm_mpidr_index(data, mpidr);
2890
2891 vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
2892 if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
2893 vcpu = NULL;
2894 }
2895
2896 rcu_read_unlock();
2897
2898 if (vcpu)
2899 return vcpu;
2900
2901 kvm_for_each_vcpu(i, vcpu, kvm) {
2902 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
2903 return vcpu;
2904 }
2905 return NULL;
2906 }
2907
kvm_arch_irqchip_in_kernel(struct kvm * kvm)2908 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2909 {
2910 return irqchip_in_kernel(kvm);
2911 }
2912
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)2913 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
2914 struct irq_bypass_producer *prod)
2915 {
2916 struct kvm_kernel_irqfd *irqfd =
2917 container_of(cons, struct kvm_kernel_irqfd, consumer);
2918 struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry;
2919
2920 /*
2921 * The only thing we have a chance of directly-injecting is LPIs. Maybe
2922 * one day...
2923 */
2924 if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
2925 return 0;
2926
2927 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
2928 &irqfd->irq_entry);
2929 }
2930
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)2931 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
2932 struct irq_bypass_producer *prod)
2933 {
2934 struct kvm_kernel_irqfd *irqfd =
2935 container_of(cons, struct kvm_kernel_irqfd, consumer);
2936 struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry;
2937
2938 if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
2939 return;
2940
2941 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq);
2942 }
2943
kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd * irqfd,struct kvm_kernel_irq_routing_entry * old,struct kvm_kernel_irq_routing_entry * new)2944 void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
2945 struct kvm_kernel_irq_routing_entry *old,
2946 struct kvm_kernel_irq_routing_entry *new)
2947 {
2948 if (old->type == KVM_IRQ_ROUTING_MSI &&
2949 new->type == KVM_IRQ_ROUTING_MSI &&
2950 !memcmp(&old->msi, &new->msi, sizeof(new->msi)))
2951 return;
2952
2953 /*
2954 * Remapping the vLPI requires taking the its_lock mutex to resolve
2955 * the new translation. We're in spinlock land at this point, so no
2956 * chance of resolving the translation.
2957 *
2958 * Unmap the vLPI and fall back to software LPI injection.
2959 */
2960 return kvm_vgic_v4_unset_forwarding(irqfd->kvm, irqfd->producer->irq);
2961 }
2962
kvm_arch_irq_bypass_stop(struct irq_bypass_consumer * cons)2963 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
2964 {
2965 struct kvm_kernel_irqfd *irqfd =
2966 container_of(cons, struct kvm_kernel_irqfd, consumer);
2967
2968 kvm_arm_halt_guest(irqfd->kvm);
2969 }
2970
kvm_arch_irq_bypass_start(struct irq_bypass_consumer * cons)2971 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
2972 {
2973 struct kvm_kernel_irqfd *irqfd =
2974 container_of(cons, struct kvm_kernel_irqfd, consumer);
2975
2976 kvm_arm_resume_guest(irqfd->kvm);
2977 }
2978
2979 /* Initialize Hyp-mode and memory mappings on all CPUs */
kvm_arm_init(void)2980 static __init int kvm_arm_init(void)
2981 {
2982 int err;
2983 bool in_hyp_mode;
2984
2985 if (!is_hyp_mode_available()) {
2986 kvm_info("HYP mode not available\n");
2987 return -ENODEV;
2988 }
2989
2990 if (kvm_get_mode() == KVM_MODE_NONE) {
2991 kvm_info("KVM disabled from command line\n");
2992 return -ENODEV;
2993 }
2994
2995 err = kvm_sys_reg_table_init();
2996 if (err) {
2997 kvm_info("Error initializing system register tables");
2998 return err;
2999 }
3000
3001 in_hyp_mode = is_kernel_in_hyp_mode();
3002
3003 if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
3004 cpus_have_final_cap(ARM64_WORKAROUND_1508412))
3005 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
3006 "Only trusted guests should be used on this system.\n");
3007
3008 err = kvm_set_ipa_limit();
3009 if (err)
3010 return err;
3011
3012 err = kvm_arm_init_sve();
3013 if (err)
3014 return err;
3015
3016 err = kvm_arm_vmid_alloc_init();
3017 if (err) {
3018 kvm_err("Failed to initialize VMID allocator.\n");
3019 return err;
3020 }
3021
3022 if (!in_hyp_mode) {
3023 err = init_hyp_mode();
3024 if (err)
3025 goto out_err;
3026 }
3027
3028 err = kvm_init_vector_slots();
3029 if (err) {
3030 kvm_err("Cannot initialise vector slots\n");
3031 goto out_hyp;
3032 }
3033
3034 err = init_subsystems();
3035 if (err)
3036 goto out_hyp;
3037
3038 kvm_info("%s%sVHE%s mode initialized successfully\n",
3039 in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
3040 "Protected " : "Hyp "),
3041 in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
3042 "h" : "n"),
3043 cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) ? "+NV2": "");
3044
3045 /*
3046 * FIXME: Do something reasonable if kvm_init() fails after pKVM
3047 * hypervisor protection is finalized.
3048 */
3049 err = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3050 if (err)
3051 goto out_subs;
3052
3053 /*
3054 * This should be called after initialization is done and failure isn't
3055 * possible anymore.
3056 */
3057 if (!in_hyp_mode)
3058 finalize_init_hyp_mode();
3059
3060 kvm_arm_initialised = true;
3061
3062 return 0;
3063
3064 out_subs:
3065 teardown_subsystems();
3066 out_hyp:
3067 if (!in_hyp_mode)
3068 teardown_hyp_mode();
3069 out_err:
3070 kvm_arm_vmid_alloc_free();
3071 return err;
3072 }
3073
early_kvm_mode_cfg(char * arg)3074 static int __init early_kvm_mode_cfg(char *arg)
3075 {
3076 if (!arg)
3077 return -EINVAL;
3078
3079 if (strcmp(arg, "none") == 0) {
3080 kvm_mode = KVM_MODE_NONE;
3081 return 0;
3082 }
3083
3084 if (!is_hyp_mode_available()) {
3085 pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
3086 return 0;
3087 }
3088
3089 if (strcmp(arg, "protected") == 0) {
3090 if (!is_kernel_in_hyp_mode())
3091 kvm_mode = KVM_MODE_PROTECTED;
3092 else
3093 pr_warn_once("Protected KVM not available with VHE\n");
3094
3095 return 0;
3096 }
3097
3098 if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
3099 kvm_mode = KVM_MODE_DEFAULT;
3100 return 0;
3101 }
3102
3103 if (strcmp(arg, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) {
3104 kvm_mode = KVM_MODE_NV;
3105 return 0;
3106 }
3107
3108 return -EINVAL;
3109 }
3110 early_param("kvm-arm.mode", early_kvm_mode_cfg);
3111
early_kvm_wfx_trap_policy_cfg(char * arg,enum kvm_wfx_trap_policy * p)3112 static int __init early_kvm_wfx_trap_policy_cfg(char *arg, enum kvm_wfx_trap_policy *p)
3113 {
3114 if (!arg)
3115 return -EINVAL;
3116
3117 if (strcmp(arg, "trap") == 0) {
3118 *p = KVM_WFX_TRAP;
3119 return 0;
3120 }
3121
3122 if (strcmp(arg, "notrap") == 0) {
3123 *p = KVM_WFX_NOTRAP;
3124 return 0;
3125 }
3126
3127 return -EINVAL;
3128 }
3129
early_kvm_wfi_trap_policy_cfg(char * arg)3130 static int __init early_kvm_wfi_trap_policy_cfg(char *arg)
3131 {
3132 return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfi_trap_policy);
3133 }
3134 early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg);
3135
early_kvm_wfe_trap_policy_cfg(char * arg)3136 static int __init early_kvm_wfe_trap_policy_cfg(char *arg)
3137 {
3138 return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfe_trap_policy);
3139 }
3140 early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg);
3141
kvm_get_mode(void)3142 enum kvm_mode kvm_get_mode(void)
3143 {
3144 return kvm_mode;
3145 }
3146
3147 module_init(kvm_arm_init);
3148