1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 */
6
7 #include <linux/bug.h>
8 #include <linux/cpu_pm.h>
9 #include <linux/entry-kvm.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16 #include <linux/fs.h>
17 #include <linux/mman.h>
18 #include <linux/sched.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_irqfd.h>
21 #include <linux/irqbypass.h>
22 #include <linux/sched/stat.h>
23 #include <linux/psci.h>
24 #include <trace/events/kvm.h>
25
26 #define CREATE_TRACE_POINTS
27 #include "trace_arm.h"
28
29 #include <linux/uaccess.h>
30 #include <asm/ptrace.h>
31 #include <asm/mman.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cacheflush.h>
34 #include <asm/cpufeature.h>
35 #include <asm/virt.h>
36 #include <asm/kvm_arm.h>
37 #include <asm/kvm_asm.h>
38 #include <asm/kvm_emulate.h>
39 #include <asm/kvm_mmu.h>
40 #include <asm/kvm_nested.h>
41 #include <asm/kvm_pkvm.h>
42 #include <asm/kvm_ptrauth.h>
43 #include <asm/sections.h>
44
45 #include <kvm/arm_hypercalls.h>
46 #include <kvm/arm_pmu.h>
47 #include <kvm/arm_psci.h>
48
49 #include "sys_regs.h"
50
51 static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
52
53 enum kvm_wfx_trap_policy {
54 KVM_WFX_NOTRAP_SINGLE_TASK, /* Default option */
55 KVM_WFX_NOTRAP,
56 KVM_WFX_TRAP,
57 };
58
59 static enum kvm_wfx_trap_policy kvm_wfi_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK;
60 static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK;
61
62 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
63
64 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
65 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
66
67 DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
68
69 static bool vgic_present, kvm_arm_initialised;
70
71 static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
72 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
73
is_kvm_arm_initialised(void)74 bool is_kvm_arm_initialised(void)
75 {
76 return kvm_arm_initialised;
77 }
78
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)79 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
80 {
81 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
82 }
83
84 /*
85 * This functions as an allow-list of protected VM capabilities.
86 * Features not explicitly allowed by this function are denied.
87 */
pkvm_ext_allowed(struct kvm * kvm,long ext)88 static bool pkvm_ext_allowed(struct kvm *kvm, long ext)
89 {
90 switch (ext) {
91 case KVM_CAP_IRQCHIP:
92 case KVM_CAP_ARM_PSCI:
93 case KVM_CAP_ARM_PSCI_0_2:
94 case KVM_CAP_NR_VCPUS:
95 case KVM_CAP_MAX_VCPUS:
96 case KVM_CAP_MAX_VCPU_ID:
97 case KVM_CAP_MSI_DEVID:
98 case KVM_CAP_ARM_VM_IPA_SIZE:
99 case KVM_CAP_ARM_PMU_V3:
100 case KVM_CAP_ARM_SVE:
101 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
102 case KVM_CAP_ARM_PTRAUTH_GENERIC:
103 return true;
104 default:
105 return false;
106 }
107 }
108
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)109 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
110 struct kvm_enable_cap *cap)
111 {
112 int r = -EINVAL;
113
114 if (cap->flags)
115 return -EINVAL;
116
117 if (kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, cap->cap))
118 return -EINVAL;
119
120 switch (cap->cap) {
121 case KVM_CAP_ARM_NISV_TO_USER:
122 r = 0;
123 set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
124 &kvm->arch.flags);
125 break;
126 case KVM_CAP_ARM_MTE:
127 mutex_lock(&kvm->lock);
128 if (system_supports_mte() && !kvm->created_vcpus) {
129 r = 0;
130 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
131 }
132 mutex_unlock(&kvm->lock);
133 break;
134 case KVM_CAP_ARM_SYSTEM_SUSPEND:
135 r = 0;
136 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
137 break;
138 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
139 mutex_lock(&kvm->slots_lock);
140 /*
141 * To keep things simple, allow changing the chunk
142 * size only when no memory slots have been created.
143 */
144 if (kvm_are_all_memslots_empty(kvm)) {
145 u64 new_cap = cap->args[0];
146
147 if (!new_cap || kvm_is_block_size_supported(new_cap)) {
148 r = 0;
149 kvm->arch.mmu.split_page_chunk_size = new_cap;
150 }
151 }
152 mutex_unlock(&kvm->slots_lock);
153 break;
154 default:
155 break;
156 }
157
158 return r;
159 }
160
kvm_arm_default_max_vcpus(void)161 static int kvm_arm_default_max_vcpus(void)
162 {
163 return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
164 }
165
166 /**
167 * kvm_arch_init_vm - initializes a VM data structure
168 * @kvm: pointer to the KVM struct
169 * @type: kvm device type
170 */
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)171 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
172 {
173 int ret;
174
175 mutex_init(&kvm->arch.config_lock);
176
177 #ifdef CONFIG_LOCKDEP
178 /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
179 mutex_lock(&kvm->lock);
180 mutex_lock(&kvm->arch.config_lock);
181 mutex_unlock(&kvm->arch.config_lock);
182 mutex_unlock(&kvm->lock);
183 #endif
184
185 kvm_init_nested(kvm);
186
187 ret = kvm_share_hyp(kvm, kvm + 1);
188 if (ret)
189 return ret;
190
191 ret = pkvm_init_host_vm(kvm);
192 if (ret)
193 goto err_unshare_kvm;
194
195 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) {
196 ret = -ENOMEM;
197 goto err_unshare_kvm;
198 }
199 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
200
201 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type);
202 if (ret)
203 goto err_free_cpumask;
204
205 kvm_vgic_early_init(kvm);
206
207 kvm_timer_init_vm(kvm);
208
209 /* The maximum number of VCPUs is limited by the host's GIC model */
210 kvm->max_vcpus = kvm_arm_default_max_vcpus();
211
212 kvm_arm_init_hypercalls(kvm);
213
214 bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
215
216 return 0;
217
218 err_free_cpumask:
219 free_cpumask_var(kvm->arch.supported_cpus);
220 err_unshare_kvm:
221 kvm_unshare_hyp(kvm, kvm + 1);
222 return ret;
223 }
224
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)225 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
226 {
227 return VM_FAULT_SIGBUS;
228 }
229
kvm_arch_create_vm_debugfs(struct kvm * kvm)230 void kvm_arch_create_vm_debugfs(struct kvm *kvm)
231 {
232 kvm_sys_regs_create_debugfs(kvm);
233 kvm_s2_ptdump_create_debugfs(kvm);
234 }
235
kvm_destroy_mpidr_data(struct kvm * kvm)236 static void kvm_destroy_mpidr_data(struct kvm *kvm)
237 {
238 struct kvm_mpidr_data *data;
239
240 mutex_lock(&kvm->arch.config_lock);
241
242 data = rcu_dereference_protected(kvm->arch.mpidr_data,
243 lockdep_is_held(&kvm->arch.config_lock));
244 if (data) {
245 rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
246 synchronize_rcu();
247 kfree(data);
248 }
249
250 mutex_unlock(&kvm->arch.config_lock);
251 }
252
253 /**
254 * kvm_arch_destroy_vm - destroy the VM data structure
255 * @kvm: pointer to the KVM struct
256 */
kvm_arch_destroy_vm(struct kvm * kvm)257 void kvm_arch_destroy_vm(struct kvm *kvm)
258 {
259 bitmap_free(kvm->arch.pmu_filter);
260 free_cpumask_var(kvm->arch.supported_cpus);
261
262 kvm_vgic_destroy(kvm);
263
264 if (is_protected_kvm_enabled())
265 pkvm_destroy_hyp_vm(kvm);
266
267 kvm_destroy_mpidr_data(kvm);
268
269 kfree(kvm->arch.sysreg_masks);
270 kvm_destroy_vcpus(kvm);
271
272 kvm_unshare_hyp(kvm, kvm + 1);
273
274 kvm_arm_teardown_hypercalls(kvm);
275 }
276
kvm_has_full_ptr_auth(void)277 static bool kvm_has_full_ptr_auth(void)
278 {
279 bool apa, gpa, api, gpi, apa3, gpa3;
280 u64 isar1, isar2, val;
281
282 /*
283 * Check that:
284 *
285 * - both Address and Generic auth are implemented for a given
286 * algorithm (Q5, IMPDEF or Q3)
287 * - only a single algorithm is implemented.
288 */
289 if (!system_has_full_ptr_auth())
290 return false;
291
292 isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
293 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
294
295 apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1);
296 val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1);
297 gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP);
298
299 api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1);
300 val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1);
301 gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP);
302
303 apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2);
304 val = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2);
305 gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP);
306
307 return (apa == gpa && api == gpi && apa3 == gpa3 &&
308 (apa + api + apa3) == 1);
309 }
310
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)311 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
312 {
313 int r;
314
315 if (kvm && kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, ext))
316 return 0;
317
318 switch (ext) {
319 case KVM_CAP_IRQCHIP:
320 r = vgic_present;
321 break;
322 case KVM_CAP_IOEVENTFD:
323 case KVM_CAP_USER_MEMORY:
324 case KVM_CAP_SYNC_MMU:
325 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
326 case KVM_CAP_ONE_REG:
327 case KVM_CAP_ARM_PSCI:
328 case KVM_CAP_ARM_PSCI_0_2:
329 case KVM_CAP_READONLY_MEM:
330 case KVM_CAP_MP_STATE:
331 case KVM_CAP_IMMEDIATE_EXIT:
332 case KVM_CAP_VCPU_EVENTS:
333 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
334 case KVM_CAP_ARM_NISV_TO_USER:
335 case KVM_CAP_ARM_INJECT_EXT_DABT:
336 case KVM_CAP_SET_GUEST_DEBUG:
337 case KVM_CAP_VCPU_ATTRIBUTES:
338 case KVM_CAP_PTP_KVM:
339 case KVM_CAP_ARM_SYSTEM_SUSPEND:
340 case KVM_CAP_IRQFD_RESAMPLE:
341 case KVM_CAP_COUNTER_OFFSET:
342 r = 1;
343 break;
344 case KVM_CAP_SET_GUEST_DEBUG2:
345 return KVM_GUESTDBG_VALID_MASK;
346 case KVM_CAP_ARM_SET_DEVICE_ADDR:
347 r = 1;
348 break;
349 case KVM_CAP_NR_VCPUS:
350 /*
351 * ARM64 treats KVM_CAP_NR_CPUS differently from all other
352 * architectures, as it does not always bound it to
353 * KVM_CAP_MAX_VCPUS. It should not matter much because
354 * this is just an advisory value.
355 */
356 r = min_t(unsigned int, num_online_cpus(),
357 kvm_arm_default_max_vcpus());
358 break;
359 case KVM_CAP_MAX_VCPUS:
360 case KVM_CAP_MAX_VCPU_ID:
361 if (kvm)
362 r = kvm->max_vcpus;
363 else
364 r = kvm_arm_default_max_vcpus();
365 break;
366 case KVM_CAP_MSI_DEVID:
367 if (!kvm)
368 r = -EINVAL;
369 else
370 r = kvm->arch.vgic.msis_require_devid;
371 break;
372 case KVM_CAP_ARM_USER_IRQ:
373 /*
374 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
375 * (bump this number if adding more devices)
376 */
377 r = 1;
378 break;
379 case KVM_CAP_ARM_MTE:
380 r = system_supports_mte();
381 break;
382 case KVM_CAP_STEAL_TIME:
383 r = kvm_arm_pvtime_supported();
384 break;
385 case KVM_CAP_ARM_EL1_32BIT:
386 r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
387 break;
388 case KVM_CAP_GUEST_DEBUG_HW_BPS:
389 r = get_num_brps();
390 break;
391 case KVM_CAP_GUEST_DEBUG_HW_WPS:
392 r = get_num_wrps();
393 break;
394 case KVM_CAP_ARM_PMU_V3:
395 r = kvm_arm_support_pmu_v3();
396 break;
397 case KVM_CAP_ARM_INJECT_SERROR_ESR:
398 r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
399 break;
400 case KVM_CAP_ARM_VM_IPA_SIZE:
401 r = get_kvm_ipa_limit();
402 break;
403 case KVM_CAP_ARM_SVE:
404 r = system_supports_sve();
405 break;
406 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
407 case KVM_CAP_ARM_PTRAUTH_GENERIC:
408 r = kvm_has_full_ptr_auth();
409 break;
410 case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
411 if (kvm)
412 r = kvm->arch.mmu.split_page_chunk_size;
413 else
414 r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
415 break;
416 case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
417 r = kvm_supported_block_sizes();
418 break;
419 case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES:
420 r = BIT(0);
421 break;
422 default:
423 r = 0;
424 }
425
426 return r;
427 }
428
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)429 long kvm_arch_dev_ioctl(struct file *filp,
430 unsigned int ioctl, unsigned long arg)
431 {
432 return -EINVAL;
433 }
434
kvm_arch_alloc_vm(void)435 struct kvm *kvm_arch_alloc_vm(void)
436 {
437 size_t sz = sizeof(struct kvm);
438
439 if (!has_vhe())
440 return kzalloc(sz, GFP_KERNEL_ACCOUNT);
441
442 return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO);
443 }
444
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)445 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
446 {
447 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
448 return -EBUSY;
449
450 if (id >= kvm->max_vcpus)
451 return -EINVAL;
452
453 return 0;
454 }
455
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)456 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
457 {
458 int err;
459
460 spin_lock_init(&vcpu->arch.mp_state_lock);
461
462 #ifdef CONFIG_LOCKDEP
463 /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
464 mutex_lock(&vcpu->mutex);
465 mutex_lock(&vcpu->kvm->arch.config_lock);
466 mutex_unlock(&vcpu->kvm->arch.config_lock);
467 mutex_unlock(&vcpu->mutex);
468 #endif
469
470 /* Force users to call KVM_ARM_VCPU_INIT */
471 vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
472
473 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
474
475 /* Set up the timer */
476 kvm_timer_vcpu_init(vcpu);
477
478 kvm_pmu_vcpu_init(vcpu);
479
480 kvm_arm_reset_debug_ptr(vcpu);
481
482 kvm_arm_pvtime_vcpu_init(&vcpu->arch);
483
484 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
485
486 /*
487 * This vCPU may have been created after mpidr_data was initialized.
488 * Throw out the pre-computed mappings if that is the case which forces
489 * KVM to fall back to iteratively searching the vCPUs.
490 */
491 kvm_destroy_mpidr_data(vcpu->kvm);
492
493 err = kvm_vgic_vcpu_init(vcpu);
494 if (err)
495 return err;
496
497 return kvm_share_hyp(vcpu, vcpu + 1);
498 }
499
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)500 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
501 {
502 }
503
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)504 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
505 {
506 if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
507 static_branch_dec(&userspace_irqchip_in_use);
508
509 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
510 kvm_timer_vcpu_terminate(vcpu);
511 kvm_pmu_vcpu_destroy(vcpu);
512 kvm_vgic_vcpu_destroy(vcpu);
513 kvm_arm_vcpu_destroy(vcpu);
514 }
515
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)516 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
517 {
518
519 }
520
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)521 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
522 {
523
524 }
525
vcpu_set_pauth_traps(struct kvm_vcpu * vcpu)526 static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
527 {
528 if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) {
529 /*
530 * Either we're running an L2 guest, and the API/APK bits come
531 * from L1's HCR_EL2, or API/APK are both set.
532 */
533 if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
534 u64 val;
535
536 val = __vcpu_sys_reg(vcpu, HCR_EL2);
537 val &= (HCR_API | HCR_APK);
538 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
539 vcpu->arch.hcr_el2 |= val;
540 } else {
541 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
542 }
543
544 /*
545 * Save the host keys if there is any chance for the guest
546 * to use pauth, as the entry code will reload the guest
547 * keys in that case.
548 */
549 if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
550 struct kvm_cpu_context *ctxt;
551
552 ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
553 ptrauth_save_keys(ctxt);
554 }
555 }
556 }
557
kvm_vcpu_should_clear_twi(struct kvm_vcpu * vcpu)558 static bool kvm_vcpu_should_clear_twi(struct kvm_vcpu *vcpu)
559 {
560 if (unlikely(kvm_wfi_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK))
561 return kvm_wfi_trap_policy == KVM_WFX_NOTRAP;
562
563 return single_task_running() &&
564 (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
565 vcpu->kvm->arch.vgic.nassgireq);
566 }
567
kvm_vcpu_should_clear_twe(struct kvm_vcpu * vcpu)568 static bool kvm_vcpu_should_clear_twe(struct kvm_vcpu *vcpu)
569 {
570 if (unlikely(kvm_wfe_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK))
571 return kvm_wfe_trap_policy == KVM_WFX_NOTRAP;
572
573 return single_task_running();
574 }
575
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)576 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
577 {
578 struct kvm_s2_mmu *mmu;
579 int *last_ran;
580
581 if (vcpu_has_nv(vcpu))
582 kvm_vcpu_load_hw_mmu(vcpu);
583
584 mmu = vcpu->arch.hw_mmu;
585 last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
586
587 /*
588 * We guarantee that both TLBs and I-cache are private to each
589 * vcpu. If detecting that a vcpu from the same VM has
590 * previously run on the same physical CPU, call into the
591 * hypervisor code to nuke the relevant contexts.
592 *
593 * We might get preempted before the vCPU actually runs, but
594 * over-invalidation doesn't affect correctness.
595 */
596 if (*last_ran != vcpu->vcpu_idx) {
597 kvm_call_hyp(__kvm_flush_cpu_context, mmu);
598 *last_ran = vcpu->vcpu_idx;
599 }
600
601 vcpu->cpu = cpu;
602
603 kvm_vgic_load(vcpu);
604 kvm_timer_vcpu_load(vcpu);
605 if (has_vhe())
606 kvm_vcpu_load_vhe(vcpu);
607 kvm_arch_vcpu_load_fp(vcpu);
608 kvm_vcpu_pmu_restore_guest(vcpu);
609 if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
610 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
611
612 if (kvm_vcpu_should_clear_twe(vcpu))
613 vcpu->arch.hcr_el2 &= ~HCR_TWE;
614 else
615 vcpu->arch.hcr_el2 |= HCR_TWE;
616
617 if (kvm_vcpu_should_clear_twi(vcpu))
618 vcpu->arch.hcr_el2 &= ~HCR_TWI;
619 else
620 vcpu->arch.hcr_el2 |= HCR_TWI;
621
622 vcpu_set_pauth_traps(vcpu);
623
624 kvm_arch_vcpu_load_debug_state_flags(vcpu);
625
626 if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
627 vcpu_set_on_unsupported_cpu(vcpu);
628 }
629
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)630 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
631 {
632 kvm_arch_vcpu_put_debug_state_flags(vcpu);
633 kvm_arch_vcpu_put_fp(vcpu);
634 if (has_vhe())
635 kvm_vcpu_put_vhe(vcpu);
636 kvm_timer_vcpu_put(vcpu);
637 kvm_vgic_put(vcpu);
638 kvm_vcpu_pmu_restore_host(vcpu);
639 if (vcpu_has_nv(vcpu))
640 kvm_vcpu_put_hw_mmu(vcpu);
641 kvm_arm_vmid_clear_active();
642
643 vcpu_clear_on_unsupported_cpu(vcpu);
644 vcpu->cpu = -1;
645 }
646
__kvm_arm_vcpu_power_off(struct kvm_vcpu * vcpu)647 static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
648 {
649 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
650 kvm_make_request(KVM_REQ_SLEEP, vcpu);
651 kvm_vcpu_kick(vcpu);
652 }
653
kvm_arm_vcpu_power_off(struct kvm_vcpu * vcpu)654 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
655 {
656 spin_lock(&vcpu->arch.mp_state_lock);
657 __kvm_arm_vcpu_power_off(vcpu);
658 spin_unlock(&vcpu->arch.mp_state_lock);
659 }
660
kvm_arm_vcpu_stopped(struct kvm_vcpu * vcpu)661 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
662 {
663 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
664 }
665
kvm_arm_vcpu_suspend(struct kvm_vcpu * vcpu)666 static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
667 {
668 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
669 kvm_make_request(KVM_REQ_SUSPEND, vcpu);
670 kvm_vcpu_kick(vcpu);
671 }
672
kvm_arm_vcpu_suspended(struct kvm_vcpu * vcpu)673 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
674 {
675 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
676 }
677
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)678 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
679 struct kvm_mp_state *mp_state)
680 {
681 *mp_state = READ_ONCE(vcpu->arch.mp_state);
682
683 return 0;
684 }
685
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)686 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
687 struct kvm_mp_state *mp_state)
688 {
689 int ret = 0;
690
691 spin_lock(&vcpu->arch.mp_state_lock);
692
693 switch (mp_state->mp_state) {
694 case KVM_MP_STATE_RUNNABLE:
695 WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
696 break;
697 case KVM_MP_STATE_STOPPED:
698 __kvm_arm_vcpu_power_off(vcpu);
699 break;
700 case KVM_MP_STATE_SUSPENDED:
701 kvm_arm_vcpu_suspend(vcpu);
702 break;
703 default:
704 ret = -EINVAL;
705 }
706
707 spin_unlock(&vcpu->arch.mp_state_lock);
708
709 return ret;
710 }
711
712 /**
713 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
714 * @v: The VCPU pointer
715 *
716 * If the guest CPU is not waiting for interrupts or an interrupt line is
717 * asserted, the CPU is by definition runnable.
718 */
kvm_arch_vcpu_runnable(struct kvm_vcpu * v)719 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
720 {
721 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
722 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
723 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
724 }
725
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)726 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
727 {
728 return vcpu_mode_priv(vcpu);
729 }
730
731 #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_arch_vcpu_get_ip(struct kvm_vcpu * vcpu)732 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
733 {
734 return *vcpu_pc(vcpu);
735 }
736 #endif
737
kvm_init_mpidr_data(struct kvm * kvm)738 static void kvm_init_mpidr_data(struct kvm *kvm)
739 {
740 struct kvm_mpidr_data *data = NULL;
741 unsigned long c, mask, nr_entries;
742 u64 aff_set = 0, aff_clr = ~0UL;
743 struct kvm_vcpu *vcpu;
744
745 mutex_lock(&kvm->arch.config_lock);
746
747 if (rcu_access_pointer(kvm->arch.mpidr_data) ||
748 atomic_read(&kvm->online_vcpus) == 1)
749 goto out;
750
751 kvm_for_each_vcpu(c, vcpu, kvm) {
752 u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
753 aff_set |= aff;
754 aff_clr &= aff;
755 }
756
757 /*
758 * A significant bit can be either 0 or 1, and will only appear in
759 * aff_set. Use aff_clr to weed out the useless stuff.
760 */
761 mask = aff_set ^ aff_clr;
762 nr_entries = BIT_ULL(hweight_long(mask));
763
764 /*
765 * Don't let userspace fool us. If we need more than a single page
766 * to describe the compressed MPIDR array, just fall back to the
767 * iterative method. Single vcpu VMs do not need this either.
768 */
769 if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
770 data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries),
771 GFP_KERNEL_ACCOUNT);
772
773 if (!data)
774 goto out;
775
776 data->mpidr_mask = mask;
777
778 kvm_for_each_vcpu(c, vcpu, kvm) {
779 u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
780 u16 index = kvm_mpidr_index(data, aff);
781
782 data->cmpidr_to_idx[index] = c;
783 }
784
785 rcu_assign_pointer(kvm->arch.mpidr_data, data);
786 out:
787 mutex_unlock(&kvm->arch.config_lock);
788 }
789
790 /*
791 * Handle both the initialisation that is being done when the vcpu is
792 * run for the first time, as well as the updates that must be
793 * performed each time we get a new thread dealing with this vcpu.
794 */
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)795 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
796 {
797 struct kvm *kvm = vcpu->kvm;
798 int ret;
799
800 if (!kvm_vcpu_initialized(vcpu))
801 return -ENOEXEC;
802
803 if (!kvm_arm_vcpu_is_finalized(vcpu))
804 return -EPERM;
805
806 ret = kvm_arch_vcpu_run_map_fp(vcpu);
807 if (ret)
808 return ret;
809
810 if (likely(vcpu_has_run_once(vcpu)))
811 return 0;
812
813 kvm_init_mpidr_data(kvm);
814
815 kvm_arm_vcpu_init_debug(vcpu);
816
817 if (likely(irqchip_in_kernel(kvm))) {
818 /*
819 * Map the VGIC hardware resources before running a vcpu the
820 * first time on this VM.
821 */
822 ret = kvm_vgic_map_resources(kvm);
823 if (ret)
824 return ret;
825 }
826
827 ret = kvm_finalize_sys_regs(vcpu);
828 if (ret)
829 return ret;
830
831 /*
832 * This needs to happen after any restriction has been applied
833 * to the feature set.
834 */
835 kvm_calculate_traps(vcpu);
836
837 ret = kvm_timer_enable(vcpu);
838 if (ret)
839 return ret;
840
841 ret = kvm_arm_pmu_v3_enable(vcpu);
842 if (ret)
843 return ret;
844
845 if (is_protected_kvm_enabled()) {
846 ret = pkvm_create_hyp_vm(kvm);
847 if (ret)
848 return ret;
849 }
850
851 if (!irqchip_in_kernel(kvm)) {
852 /*
853 * Tell the rest of the code that there are userspace irqchip
854 * VMs in the wild.
855 */
856 static_branch_inc(&userspace_irqchip_in_use);
857 }
858
859 /*
860 * Initialize traps for protected VMs.
861 * NOTE: Move to run in EL2 directly, rather than via a hypercall, once
862 * the code is in place for first run initialization at EL2.
863 */
864 if (kvm_vm_is_protected(kvm))
865 kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
866
867 mutex_lock(&kvm->arch.config_lock);
868 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
869 mutex_unlock(&kvm->arch.config_lock);
870
871 return ret;
872 }
873
kvm_arch_intc_initialized(struct kvm * kvm)874 bool kvm_arch_intc_initialized(struct kvm *kvm)
875 {
876 return vgic_initialized(kvm);
877 }
878
kvm_arm_halt_guest(struct kvm * kvm)879 void kvm_arm_halt_guest(struct kvm *kvm)
880 {
881 unsigned long i;
882 struct kvm_vcpu *vcpu;
883
884 kvm_for_each_vcpu(i, vcpu, kvm)
885 vcpu->arch.pause = true;
886 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
887 }
888
kvm_arm_resume_guest(struct kvm * kvm)889 void kvm_arm_resume_guest(struct kvm *kvm)
890 {
891 unsigned long i;
892 struct kvm_vcpu *vcpu;
893
894 kvm_for_each_vcpu(i, vcpu, kvm) {
895 vcpu->arch.pause = false;
896 __kvm_vcpu_wake_up(vcpu);
897 }
898 }
899
kvm_vcpu_sleep(struct kvm_vcpu * vcpu)900 static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu)
901 {
902 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
903
904 rcuwait_wait_event(wait,
905 (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
906 TASK_INTERRUPTIBLE);
907
908 if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) {
909 /* Awaken to handle a signal, request we sleep again later. */
910 kvm_make_request(KVM_REQ_SLEEP, vcpu);
911 }
912
913 /*
914 * Make sure we will observe a potential reset request if we've
915 * observed a change to the power state. Pairs with the smp_wmb() in
916 * kvm_psci_vcpu_on().
917 */
918 smp_rmb();
919 }
920
921 /**
922 * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
923 * @vcpu: The VCPU pointer
924 *
925 * Suspend execution of a vCPU until a valid wake event is detected, i.e. until
926 * the vCPU is runnable. The vCPU may or may not be scheduled out, depending
927 * on when a wake event arrives, e.g. there may already be a pending wake event.
928 */
kvm_vcpu_wfi(struct kvm_vcpu * vcpu)929 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
930 {
931 /*
932 * Sync back the state of the GIC CPU interface so that we have
933 * the latest PMR and group enables. This ensures that
934 * kvm_arch_vcpu_runnable has up-to-date data to decide whether
935 * we have pending interrupts, e.g. when determining if the
936 * vCPU should block.
937 *
938 * For the same reason, we want to tell GICv4 that we need
939 * doorbells to be signalled, should an interrupt become pending.
940 */
941 preempt_disable();
942 vcpu_set_flag(vcpu, IN_WFI);
943 kvm_vgic_put(vcpu);
944 preempt_enable();
945
946 kvm_vcpu_halt(vcpu);
947 vcpu_clear_flag(vcpu, IN_WFIT);
948
949 preempt_disable();
950 vcpu_clear_flag(vcpu, IN_WFI);
951 kvm_vgic_load(vcpu);
952 preempt_enable();
953 }
954
kvm_vcpu_suspend(struct kvm_vcpu * vcpu)955 static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
956 {
957 if (!kvm_arm_vcpu_suspended(vcpu))
958 return 1;
959
960 kvm_vcpu_wfi(vcpu);
961
962 /*
963 * The suspend state is sticky; we do not leave it until userspace
964 * explicitly marks the vCPU as runnable. Request that we suspend again
965 * later.
966 */
967 kvm_make_request(KVM_REQ_SUSPEND, vcpu);
968
969 /*
970 * Check to make sure the vCPU is actually runnable. If so, exit to
971 * userspace informing it of the wakeup condition.
972 */
973 if (kvm_arch_vcpu_runnable(vcpu)) {
974 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
975 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
976 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
977 return 0;
978 }
979
980 /*
981 * Otherwise, we were unblocked to process a different event, such as a
982 * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to
983 * process the event.
984 */
985 return 1;
986 }
987
988 /**
989 * check_vcpu_requests - check and handle pending vCPU requests
990 * @vcpu: the VCPU pointer
991 *
992 * Return: 1 if we should enter the guest
993 * 0 if we should exit to userspace
994 * < 0 if we should exit to userspace, where the return value indicates
995 * an error
996 */
check_vcpu_requests(struct kvm_vcpu * vcpu)997 static int check_vcpu_requests(struct kvm_vcpu *vcpu)
998 {
999 if (kvm_request_pending(vcpu)) {
1000 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
1001 kvm_vcpu_sleep(vcpu);
1002
1003 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1004 kvm_reset_vcpu(vcpu);
1005
1006 /*
1007 * Clear IRQ_PENDING requests that were made to guarantee
1008 * that a VCPU sees new virtual interrupts.
1009 */
1010 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
1011
1012 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
1013 kvm_update_stolen_time(vcpu);
1014
1015 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
1016 /* The distributor enable bits were changed */
1017 preempt_disable();
1018 vgic_v4_put(vcpu);
1019 vgic_v4_load(vcpu);
1020 preempt_enable();
1021 }
1022
1023 if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
1024 kvm_vcpu_reload_pmu(vcpu);
1025
1026 if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
1027 kvm_vcpu_pmu_restore_guest(vcpu);
1028
1029 if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
1030 return kvm_vcpu_suspend(vcpu);
1031
1032 if (kvm_dirty_ring_check_request(vcpu))
1033 return 0;
1034 }
1035
1036 return 1;
1037 }
1038
vcpu_mode_is_bad_32bit(struct kvm_vcpu * vcpu)1039 static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
1040 {
1041 if (likely(!vcpu_mode_is_32bit(vcpu)))
1042 return false;
1043
1044 if (vcpu_has_nv(vcpu))
1045 return true;
1046
1047 return !kvm_supports_32bit_el0();
1048 }
1049
1050 /**
1051 * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
1052 * @vcpu: The VCPU pointer
1053 * @ret: Pointer to write optional return code
1054 *
1055 * Returns: true if the VCPU needs to return to a preemptible + interruptible
1056 * and skip guest entry.
1057 *
1058 * This function disambiguates between two different types of exits: exits to a
1059 * preemptible + interruptible kernel context and exits to userspace. For an
1060 * exit to userspace, this function will write the return code to ret and return
1061 * true. For an exit to preemptible + interruptible kernel context (i.e. check
1062 * for pending work and re-enter), return true without writing to ret.
1063 */
kvm_vcpu_exit_request(struct kvm_vcpu * vcpu,int * ret)1064 static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
1065 {
1066 struct kvm_run *run = vcpu->run;
1067
1068 /*
1069 * If we're using a userspace irqchip, then check if we need
1070 * to tell a userspace irqchip about timer or PMU level
1071 * changes and if so, exit to userspace (the actual level
1072 * state gets updated in kvm_timer_update_run and
1073 * kvm_pmu_update_run below).
1074 */
1075 if (static_branch_unlikely(&userspace_irqchip_in_use)) {
1076 if (kvm_timer_should_notify_user(vcpu) ||
1077 kvm_pmu_should_notify_user(vcpu)) {
1078 *ret = -EINTR;
1079 run->exit_reason = KVM_EXIT_INTR;
1080 return true;
1081 }
1082 }
1083
1084 if (unlikely(vcpu_on_unsupported_cpu(vcpu))) {
1085 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1086 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED;
1087 run->fail_entry.cpu = smp_processor_id();
1088 *ret = 0;
1089 return true;
1090 }
1091
1092 return kvm_request_pending(vcpu) ||
1093 xfer_to_guest_mode_work_pending();
1094 }
1095
1096 /*
1097 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1098 * the vCPU is running.
1099 *
1100 * This must be noinstr as instrumentation may make use of RCU, and this is not
1101 * safe during the EQS.
1102 */
kvm_arm_vcpu_enter_exit(struct kvm_vcpu * vcpu)1103 static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
1104 {
1105 int ret;
1106
1107 guest_state_enter_irqoff();
1108 ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
1109 guest_state_exit_irqoff();
1110
1111 return ret;
1112 }
1113
1114 /**
1115 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
1116 * @vcpu: The VCPU pointer
1117 *
1118 * This function is called through the VCPU_RUN ioctl called from user space. It
1119 * will execute VM code in a loop until the time slice for the process is used
1120 * or some emulation is needed from user space in which case the function will
1121 * return with return value 0 and with the kvm_run structure filled in with the
1122 * required data for the requested emulation.
1123 */
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1124 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1125 {
1126 struct kvm_run *run = vcpu->run;
1127 int ret;
1128
1129 if (run->exit_reason == KVM_EXIT_MMIO) {
1130 ret = kvm_handle_mmio_return(vcpu);
1131 if (ret <= 0)
1132 return ret;
1133 }
1134
1135 vcpu_load(vcpu);
1136
1137 if (!vcpu->wants_to_run) {
1138 ret = -EINTR;
1139 goto out;
1140 }
1141
1142 kvm_sigset_activate(vcpu);
1143
1144 ret = 1;
1145 run->exit_reason = KVM_EXIT_UNKNOWN;
1146 run->flags = 0;
1147 while (ret > 0) {
1148 /*
1149 * Check conditions before entering the guest
1150 */
1151 ret = xfer_to_guest_mode_handle_work(vcpu);
1152 if (!ret)
1153 ret = 1;
1154
1155 if (ret > 0)
1156 ret = check_vcpu_requests(vcpu);
1157
1158 /*
1159 * Preparing the interrupts to be injected also
1160 * involves poking the GIC, which must be done in a
1161 * non-preemptible context.
1162 */
1163 preempt_disable();
1164
1165 /*
1166 * The VMID allocator only tracks active VMIDs per
1167 * physical CPU, and therefore the VMID allocated may not be
1168 * preserved on VMID roll-over if the task was preempted,
1169 * making a thread's VMID inactive. So we need to call
1170 * kvm_arm_vmid_update() in non-premptible context.
1171 */
1172 if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
1173 has_vhe())
1174 __load_stage2(vcpu->arch.hw_mmu,
1175 vcpu->arch.hw_mmu->arch);
1176
1177 kvm_pmu_flush_hwstate(vcpu);
1178
1179 local_irq_disable();
1180
1181 kvm_vgic_flush_hwstate(vcpu);
1182
1183 kvm_pmu_update_vcpu_events(vcpu);
1184
1185 /*
1186 * Ensure we set mode to IN_GUEST_MODE after we disable
1187 * interrupts and before the final VCPU requests check.
1188 * See the comment in kvm_vcpu_exiting_guest_mode() and
1189 * Documentation/virt/kvm/vcpu-requests.rst
1190 */
1191 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1192
1193 if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
1194 vcpu->mode = OUTSIDE_GUEST_MODE;
1195 isb(); /* Ensure work in x_flush_hwstate is committed */
1196 kvm_pmu_sync_hwstate(vcpu);
1197 if (static_branch_unlikely(&userspace_irqchip_in_use))
1198 kvm_timer_sync_user(vcpu);
1199 kvm_vgic_sync_hwstate(vcpu);
1200 local_irq_enable();
1201 preempt_enable();
1202 continue;
1203 }
1204
1205 kvm_arm_setup_debug(vcpu);
1206 kvm_arch_vcpu_ctxflush_fp(vcpu);
1207
1208 /**************************************************************
1209 * Enter the guest
1210 */
1211 trace_kvm_entry(*vcpu_pc(vcpu));
1212 guest_timing_enter_irqoff();
1213
1214 ret = kvm_arm_vcpu_enter_exit(vcpu);
1215
1216 vcpu->mode = OUTSIDE_GUEST_MODE;
1217 vcpu->stat.exits++;
1218 /*
1219 * Back from guest
1220 *************************************************************/
1221
1222 kvm_arm_clear_debug(vcpu);
1223
1224 /*
1225 * We must sync the PMU state before the vgic state so
1226 * that the vgic can properly sample the updated state of the
1227 * interrupt line.
1228 */
1229 kvm_pmu_sync_hwstate(vcpu);
1230
1231 /*
1232 * Sync the vgic state before syncing the timer state because
1233 * the timer code needs to know if the virtual timer
1234 * interrupts are active.
1235 */
1236 kvm_vgic_sync_hwstate(vcpu);
1237
1238 /*
1239 * Sync the timer hardware state before enabling interrupts as
1240 * we don't want vtimer interrupts to race with syncing the
1241 * timer virtual interrupt state.
1242 */
1243 if (static_branch_unlikely(&userspace_irqchip_in_use))
1244 kvm_timer_sync_user(vcpu);
1245
1246 kvm_arch_vcpu_ctxsync_fp(vcpu);
1247
1248 /*
1249 * We must ensure that any pending interrupts are taken before
1250 * we exit guest timing so that timer ticks are accounted as
1251 * guest time. Transiently unmask interrupts so that any
1252 * pending interrupts are taken.
1253 *
1254 * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
1255 * context synchronization event) is necessary to ensure that
1256 * pending interrupts are taken.
1257 */
1258 if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) {
1259 local_irq_enable();
1260 isb();
1261 local_irq_disable();
1262 }
1263
1264 guest_timing_exit_irqoff();
1265
1266 local_irq_enable();
1267
1268 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
1269
1270 /* Exit types that need handling before we can be preempted */
1271 handle_exit_early(vcpu, ret);
1272
1273 preempt_enable();
1274
1275 /*
1276 * The ARMv8 architecture doesn't give the hypervisor
1277 * a mechanism to prevent a guest from dropping to AArch32 EL0
1278 * if implemented by the CPU. If we spot the guest in such
1279 * state and that we decided it wasn't supposed to do so (like
1280 * with the asymmetric AArch32 case), return to userspace with
1281 * a fatal error.
1282 */
1283 if (vcpu_mode_is_bad_32bit(vcpu)) {
1284 /*
1285 * As we have caught the guest red-handed, decide that
1286 * it isn't fit for purpose anymore by making the vcpu
1287 * invalid. The VMM can try and fix it by issuing a
1288 * KVM_ARM_VCPU_INIT if it really wants to.
1289 */
1290 vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
1291 ret = ARM_EXCEPTION_IL;
1292 }
1293
1294 ret = handle_exit(vcpu, ret);
1295 }
1296
1297 /* Tell userspace about in-kernel device output levels */
1298 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
1299 kvm_timer_update_run(vcpu);
1300 kvm_pmu_update_run(vcpu);
1301 }
1302
1303 kvm_sigset_deactivate(vcpu);
1304
1305 out:
1306 /*
1307 * In the unlikely event that we are returning to userspace
1308 * with pending exceptions or PC adjustment, commit these
1309 * adjustments in order to give userspace a consistent view of
1310 * the vcpu state. Note that this relies on __kvm_adjust_pc()
1311 * being preempt-safe on VHE.
1312 */
1313 if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
1314 vcpu_get_flag(vcpu, INCREMENT_PC)))
1315 kvm_call_hyp(__kvm_adjust_pc, vcpu);
1316
1317 vcpu_put(vcpu);
1318 return ret;
1319 }
1320
vcpu_interrupt_line(struct kvm_vcpu * vcpu,int number,bool level)1321 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
1322 {
1323 int bit_index;
1324 bool set;
1325 unsigned long *hcr;
1326
1327 if (number == KVM_ARM_IRQ_CPU_IRQ)
1328 bit_index = __ffs(HCR_VI);
1329 else /* KVM_ARM_IRQ_CPU_FIQ */
1330 bit_index = __ffs(HCR_VF);
1331
1332 hcr = vcpu_hcr(vcpu);
1333 if (level)
1334 set = test_and_set_bit(bit_index, hcr);
1335 else
1336 set = test_and_clear_bit(bit_index, hcr);
1337
1338 /*
1339 * If we didn't change anything, no need to wake up or kick other CPUs
1340 */
1341 if (set == level)
1342 return 0;
1343
1344 /*
1345 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
1346 * trigger a world-switch round on the running physical CPU to set the
1347 * virtual IRQ/FIQ fields in the HCR appropriately.
1348 */
1349 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1350 kvm_vcpu_kick(vcpu);
1351
1352 return 0;
1353 }
1354
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_level,bool line_status)1355 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1356 bool line_status)
1357 {
1358 u32 irq = irq_level->irq;
1359 unsigned int irq_type, vcpu_id, irq_num;
1360 struct kvm_vcpu *vcpu = NULL;
1361 bool level = irq_level->level;
1362
1363 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
1364 vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
1365 vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
1366 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
1367
1368 trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
1369
1370 switch (irq_type) {
1371 case KVM_ARM_IRQ_TYPE_CPU:
1372 if (irqchip_in_kernel(kvm))
1373 return -ENXIO;
1374
1375 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1376 if (!vcpu)
1377 return -EINVAL;
1378
1379 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
1380 return -EINVAL;
1381
1382 return vcpu_interrupt_line(vcpu, irq_num, level);
1383 case KVM_ARM_IRQ_TYPE_PPI:
1384 if (!irqchip_in_kernel(kvm))
1385 return -ENXIO;
1386
1387 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1388 if (!vcpu)
1389 return -EINVAL;
1390
1391 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
1392 return -EINVAL;
1393
1394 return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL);
1395 case KVM_ARM_IRQ_TYPE_SPI:
1396 if (!irqchip_in_kernel(kvm))
1397 return -ENXIO;
1398
1399 if (irq_num < VGIC_NR_PRIVATE_IRQS)
1400 return -EINVAL;
1401
1402 return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL);
1403 }
1404
1405 return -EINVAL;
1406 }
1407
system_supported_vcpu_features(void)1408 static unsigned long system_supported_vcpu_features(void)
1409 {
1410 unsigned long features = KVM_VCPU_VALID_FEATURES;
1411
1412 if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
1413 clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
1414
1415 if (!kvm_arm_support_pmu_v3())
1416 clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
1417
1418 if (!system_supports_sve())
1419 clear_bit(KVM_ARM_VCPU_SVE, &features);
1420
1421 if (!kvm_has_full_ptr_auth()) {
1422 clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
1423 clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
1424 }
1425
1426 if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
1427 clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
1428
1429 return features;
1430 }
1431
kvm_vcpu_init_check_features(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1432 static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
1433 const struct kvm_vcpu_init *init)
1434 {
1435 unsigned long features = init->features[0];
1436 int i;
1437
1438 if (features & ~KVM_VCPU_VALID_FEATURES)
1439 return -ENOENT;
1440
1441 for (i = 1; i < ARRAY_SIZE(init->features); i++) {
1442 if (init->features[i])
1443 return -ENOENT;
1444 }
1445
1446 if (features & ~system_supported_vcpu_features())
1447 return -EINVAL;
1448
1449 /*
1450 * For now make sure that both address/generic pointer authentication
1451 * features are requested by the userspace together.
1452 */
1453 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
1454 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
1455 return -EINVAL;
1456
1457 if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
1458 return 0;
1459
1460 /* MTE is incompatible with AArch32 */
1461 if (kvm_has_mte(vcpu->kvm))
1462 return -EINVAL;
1463
1464 /* NV is incompatible with AArch32 */
1465 if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
1466 return -EINVAL;
1467
1468 return 0;
1469 }
1470
kvm_vcpu_init_changed(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1471 static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
1472 const struct kvm_vcpu_init *init)
1473 {
1474 unsigned long features = init->features[0];
1475
1476 return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
1477 KVM_VCPU_MAX_FEATURES);
1478 }
1479
kvm_setup_vcpu(struct kvm_vcpu * vcpu)1480 static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
1481 {
1482 struct kvm *kvm = vcpu->kvm;
1483 int ret = 0;
1484
1485 /*
1486 * When the vCPU has a PMU, but no PMU is set for the guest
1487 * yet, set the default one.
1488 */
1489 if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
1490 ret = kvm_arm_set_default_pmu(kvm);
1491
1492 /* Prepare for nested if required */
1493 if (!ret && vcpu_has_nv(vcpu))
1494 ret = kvm_vcpu_init_nested(vcpu);
1495
1496 return ret;
1497 }
1498
__kvm_vcpu_set_target(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1499 static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1500 const struct kvm_vcpu_init *init)
1501 {
1502 unsigned long features = init->features[0];
1503 struct kvm *kvm = vcpu->kvm;
1504 int ret = -EINVAL;
1505
1506 mutex_lock(&kvm->arch.config_lock);
1507
1508 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
1509 kvm_vcpu_init_changed(vcpu, init))
1510 goto out_unlock;
1511
1512 bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
1513
1514 ret = kvm_setup_vcpu(vcpu);
1515 if (ret)
1516 goto out_unlock;
1517
1518 /* Now we know what it is, we can reset it. */
1519 kvm_reset_vcpu(vcpu);
1520
1521 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
1522 vcpu_set_flag(vcpu, VCPU_INITIALIZED);
1523 ret = 0;
1524 out_unlock:
1525 mutex_unlock(&kvm->arch.config_lock);
1526 return ret;
1527 }
1528
kvm_vcpu_set_target(struct kvm_vcpu * vcpu,const struct kvm_vcpu_init * init)1529 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1530 const struct kvm_vcpu_init *init)
1531 {
1532 int ret;
1533
1534 if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
1535 init->target != kvm_target_cpu())
1536 return -EINVAL;
1537
1538 ret = kvm_vcpu_init_check_features(vcpu, init);
1539 if (ret)
1540 return ret;
1541
1542 if (!kvm_vcpu_initialized(vcpu))
1543 return __kvm_vcpu_set_target(vcpu, init);
1544
1545 if (kvm_vcpu_init_changed(vcpu, init))
1546 return -EINVAL;
1547
1548 kvm_reset_vcpu(vcpu);
1549 return 0;
1550 }
1551
kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu * vcpu,struct kvm_vcpu_init * init)1552 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
1553 struct kvm_vcpu_init *init)
1554 {
1555 bool power_off = false;
1556 int ret;
1557
1558 /*
1559 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid
1560 * reflecting it in the finalized feature set, thus limiting its scope
1561 * to a single KVM_ARM_VCPU_INIT call.
1562 */
1563 if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) {
1564 init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF);
1565 power_off = true;
1566 }
1567
1568 ret = kvm_vcpu_set_target(vcpu, init);
1569 if (ret)
1570 return ret;
1571
1572 /*
1573 * Ensure a rebooted VM will fault in RAM pages and detect if the
1574 * guest MMU is turned off and flush the caches as needed.
1575 *
1576 * S2FWB enforces all memory accesses to RAM being cacheable,
1577 * ensuring that the data side is always coherent. We still
1578 * need to invalidate the I-cache though, as FWB does *not*
1579 * imply CTR_EL0.DIC.
1580 */
1581 if (vcpu_has_run_once(vcpu)) {
1582 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1583 stage2_unmap_vm(vcpu->kvm);
1584 else
1585 icache_inval_all_pou();
1586 }
1587
1588 vcpu_reset_hcr(vcpu);
1589 vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
1590
1591 /*
1592 * Handle the "start in power-off" case.
1593 */
1594 spin_lock(&vcpu->arch.mp_state_lock);
1595
1596 if (power_off)
1597 __kvm_arm_vcpu_power_off(vcpu);
1598 else
1599 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
1600
1601 spin_unlock(&vcpu->arch.mp_state_lock);
1602
1603 return 0;
1604 }
1605
kvm_arm_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1606 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1607 struct kvm_device_attr *attr)
1608 {
1609 int ret = -ENXIO;
1610
1611 switch (attr->group) {
1612 default:
1613 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1614 break;
1615 }
1616
1617 return ret;
1618 }
1619
kvm_arm_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1620 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1621 struct kvm_device_attr *attr)
1622 {
1623 int ret = -ENXIO;
1624
1625 switch (attr->group) {
1626 default:
1627 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1628 break;
1629 }
1630
1631 return ret;
1632 }
1633
kvm_arm_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1634 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1635 struct kvm_device_attr *attr)
1636 {
1637 int ret = -ENXIO;
1638
1639 switch (attr->group) {
1640 default:
1641 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1642 break;
1643 }
1644
1645 return ret;
1646 }
1647
kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)1648 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1649 struct kvm_vcpu_events *events)
1650 {
1651 memset(events, 0, sizeof(*events));
1652
1653 return __kvm_arm_vcpu_get_events(vcpu, events);
1654 }
1655
kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)1656 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1657 struct kvm_vcpu_events *events)
1658 {
1659 int i;
1660
1661 /* check whether the reserved field is zero */
1662 for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1663 if (events->reserved[i])
1664 return -EINVAL;
1665
1666 /* check whether the pad field is zero */
1667 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1668 if (events->exception.pad[i])
1669 return -EINVAL;
1670
1671 return __kvm_arm_vcpu_set_events(vcpu, events);
1672 }
1673
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1674 long kvm_arch_vcpu_ioctl(struct file *filp,
1675 unsigned int ioctl, unsigned long arg)
1676 {
1677 struct kvm_vcpu *vcpu = filp->private_data;
1678 void __user *argp = (void __user *)arg;
1679 struct kvm_device_attr attr;
1680 long r;
1681
1682 switch (ioctl) {
1683 case KVM_ARM_VCPU_INIT: {
1684 struct kvm_vcpu_init init;
1685
1686 r = -EFAULT;
1687 if (copy_from_user(&init, argp, sizeof(init)))
1688 break;
1689
1690 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1691 break;
1692 }
1693 case KVM_SET_ONE_REG:
1694 case KVM_GET_ONE_REG: {
1695 struct kvm_one_reg reg;
1696
1697 r = -ENOEXEC;
1698 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1699 break;
1700
1701 r = -EFAULT;
1702 if (copy_from_user(®, argp, sizeof(reg)))
1703 break;
1704
1705 /*
1706 * We could owe a reset due to PSCI. Handle the pending reset
1707 * here to ensure userspace register accesses are ordered after
1708 * the reset.
1709 */
1710 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1711 kvm_reset_vcpu(vcpu);
1712
1713 if (ioctl == KVM_SET_ONE_REG)
1714 r = kvm_arm_set_reg(vcpu, ®);
1715 else
1716 r = kvm_arm_get_reg(vcpu, ®);
1717 break;
1718 }
1719 case KVM_GET_REG_LIST: {
1720 struct kvm_reg_list __user *user_list = argp;
1721 struct kvm_reg_list reg_list;
1722 unsigned n;
1723
1724 r = -ENOEXEC;
1725 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1726 break;
1727
1728 r = -EPERM;
1729 if (!kvm_arm_vcpu_is_finalized(vcpu))
1730 break;
1731
1732 r = -EFAULT;
1733 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
1734 break;
1735 n = reg_list.n;
1736 reg_list.n = kvm_arm_num_regs(vcpu);
1737 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
1738 break;
1739 r = -E2BIG;
1740 if (n < reg_list.n)
1741 break;
1742 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1743 break;
1744 }
1745 case KVM_SET_DEVICE_ATTR: {
1746 r = -EFAULT;
1747 if (copy_from_user(&attr, argp, sizeof(attr)))
1748 break;
1749 r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1750 break;
1751 }
1752 case KVM_GET_DEVICE_ATTR: {
1753 r = -EFAULT;
1754 if (copy_from_user(&attr, argp, sizeof(attr)))
1755 break;
1756 r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1757 break;
1758 }
1759 case KVM_HAS_DEVICE_ATTR: {
1760 r = -EFAULT;
1761 if (copy_from_user(&attr, argp, sizeof(attr)))
1762 break;
1763 r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1764 break;
1765 }
1766 case KVM_GET_VCPU_EVENTS: {
1767 struct kvm_vcpu_events events;
1768
1769 if (kvm_arm_vcpu_get_events(vcpu, &events))
1770 return -EINVAL;
1771
1772 if (copy_to_user(argp, &events, sizeof(events)))
1773 return -EFAULT;
1774
1775 return 0;
1776 }
1777 case KVM_SET_VCPU_EVENTS: {
1778 struct kvm_vcpu_events events;
1779
1780 if (copy_from_user(&events, argp, sizeof(events)))
1781 return -EFAULT;
1782
1783 return kvm_arm_vcpu_set_events(vcpu, &events);
1784 }
1785 case KVM_ARM_VCPU_FINALIZE: {
1786 int what;
1787
1788 if (!kvm_vcpu_initialized(vcpu))
1789 return -ENOEXEC;
1790
1791 if (get_user(what, (const int __user *)argp))
1792 return -EFAULT;
1793
1794 return kvm_arm_vcpu_finalize(vcpu, what);
1795 }
1796 default:
1797 r = -EINVAL;
1798 }
1799
1800 return r;
1801 }
1802
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)1803 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1804 {
1805
1806 }
1807
kvm_vm_ioctl_set_device_addr(struct kvm * kvm,struct kvm_arm_device_addr * dev_addr)1808 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1809 struct kvm_arm_device_addr *dev_addr)
1810 {
1811 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
1812 case KVM_ARM_DEVICE_VGIC_V2:
1813 if (!vgic_present)
1814 return -ENXIO;
1815 return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
1816 default:
1817 return -ENODEV;
1818 }
1819 }
1820
kvm_vm_has_attr(struct kvm * kvm,struct kvm_device_attr * attr)1821 static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1822 {
1823 switch (attr->group) {
1824 case KVM_ARM_VM_SMCCC_CTRL:
1825 return kvm_vm_smccc_has_attr(kvm, attr);
1826 default:
1827 return -ENXIO;
1828 }
1829 }
1830
kvm_vm_set_attr(struct kvm * kvm,struct kvm_device_attr * attr)1831 static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1832 {
1833 switch (attr->group) {
1834 case KVM_ARM_VM_SMCCC_CTRL:
1835 return kvm_vm_smccc_set_attr(kvm, attr);
1836 default:
1837 return -ENXIO;
1838 }
1839 }
1840
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1841 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1842 {
1843 struct kvm *kvm = filp->private_data;
1844 void __user *argp = (void __user *)arg;
1845 struct kvm_device_attr attr;
1846
1847 switch (ioctl) {
1848 case KVM_CREATE_IRQCHIP: {
1849 int ret;
1850 if (!vgic_present)
1851 return -ENXIO;
1852 mutex_lock(&kvm->lock);
1853 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1854 mutex_unlock(&kvm->lock);
1855 return ret;
1856 }
1857 case KVM_ARM_SET_DEVICE_ADDR: {
1858 struct kvm_arm_device_addr dev_addr;
1859
1860 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1861 return -EFAULT;
1862 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1863 }
1864 case KVM_ARM_PREFERRED_TARGET: {
1865 struct kvm_vcpu_init init = {
1866 .target = KVM_ARM_TARGET_GENERIC_V8,
1867 };
1868
1869 if (copy_to_user(argp, &init, sizeof(init)))
1870 return -EFAULT;
1871
1872 return 0;
1873 }
1874 case KVM_ARM_MTE_COPY_TAGS: {
1875 struct kvm_arm_copy_mte_tags copy_tags;
1876
1877 if (copy_from_user(©_tags, argp, sizeof(copy_tags)))
1878 return -EFAULT;
1879 return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags);
1880 }
1881 case KVM_ARM_SET_COUNTER_OFFSET: {
1882 struct kvm_arm_counter_offset offset;
1883
1884 if (copy_from_user(&offset, argp, sizeof(offset)))
1885 return -EFAULT;
1886 return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
1887 }
1888 case KVM_HAS_DEVICE_ATTR: {
1889 if (copy_from_user(&attr, argp, sizeof(attr)))
1890 return -EFAULT;
1891
1892 return kvm_vm_has_attr(kvm, &attr);
1893 }
1894 case KVM_SET_DEVICE_ATTR: {
1895 if (copy_from_user(&attr, argp, sizeof(attr)))
1896 return -EFAULT;
1897
1898 return kvm_vm_set_attr(kvm, &attr);
1899 }
1900 case KVM_ARM_GET_REG_WRITABLE_MASKS: {
1901 struct reg_mask_range range;
1902
1903 if (copy_from_user(&range, argp, sizeof(range)))
1904 return -EFAULT;
1905 return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range);
1906 }
1907 default:
1908 return -EINVAL;
1909 }
1910 }
1911
1912 /* unlocks vcpus from @vcpu_lock_idx and smaller */
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)1913 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
1914 {
1915 struct kvm_vcpu *tmp_vcpu;
1916
1917 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1918 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1919 mutex_unlock(&tmp_vcpu->mutex);
1920 }
1921 }
1922
unlock_all_vcpus(struct kvm * kvm)1923 void unlock_all_vcpus(struct kvm *kvm)
1924 {
1925 lockdep_assert_held(&kvm->lock);
1926
1927 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
1928 }
1929
1930 /* Returns true if all vcpus were locked, false otherwise */
lock_all_vcpus(struct kvm * kvm)1931 bool lock_all_vcpus(struct kvm *kvm)
1932 {
1933 struct kvm_vcpu *tmp_vcpu;
1934 unsigned long c;
1935
1936 lockdep_assert_held(&kvm->lock);
1937
1938 /*
1939 * Any time a vcpu is in an ioctl (including running), the
1940 * core KVM code tries to grab the vcpu->mutex.
1941 *
1942 * By grabbing the vcpu->mutex of all VCPUs we ensure that no
1943 * other VCPUs can fiddle with the state while we access it.
1944 */
1945 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
1946 if (!mutex_trylock(&tmp_vcpu->mutex)) {
1947 unlock_vcpus(kvm, c - 1);
1948 return false;
1949 }
1950 }
1951
1952 return true;
1953 }
1954
nvhe_percpu_size(void)1955 static unsigned long nvhe_percpu_size(void)
1956 {
1957 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
1958 (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
1959 }
1960
nvhe_percpu_order(void)1961 static unsigned long nvhe_percpu_order(void)
1962 {
1963 unsigned long size = nvhe_percpu_size();
1964
1965 return size ? get_order(size) : 0;
1966 }
1967
pkvm_host_sve_state_order(void)1968 static size_t pkvm_host_sve_state_order(void)
1969 {
1970 return get_order(pkvm_host_sve_state_size());
1971 }
1972
1973 /* A lookup table holding the hypervisor VA for each vector slot */
1974 static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
1975
kvm_init_vector_slot(void * base,enum arm64_hyp_spectre_vector slot)1976 static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
1977 {
1978 hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot);
1979 }
1980
kvm_init_vector_slots(void)1981 static int kvm_init_vector_slots(void)
1982 {
1983 int err;
1984 void *base;
1985
1986 base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
1987 kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
1988
1989 base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
1990 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
1991
1992 if (kvm_system_needs_idmapped_vectors() &&
1993 !is_protected_kvm_enabled()) {
1994 err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
1995 __BP_HARDEN_HYP_VECS_SZ, &base);
1996 if (err)
1997 return err;
1998 }
1999
2000 kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
2001 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
2002 return 0;
2003 }
2004
cpu_prepare_hyp_mode(int cpu,u32 hyp_va_bits)2005 static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
2006 {
2007 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2008 u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
2009 unsigned long tcr;
2010
2011 /*
2012 * Calculate the raw per-cpu offset without a translation from the
2013 * kernel's mapping to the linear mapping, and store it in tpidr_el2
2014 * so that we can use adr_l to access per-cpu variables in EL2.
2015 * Also drop the KASAN tag which gets in the way...
2016 */
2017 params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
2018 (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
2019
2020 params->mair_el2 = read_sysreg(mair_el1);
2021
2022 tcr = read_sysreg(tcr_el1);
2023 if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
2024 tcr |= TCR_EPD1_MASK;
2025 } else {
2026 tcr &= TCR_EL2_MASK;
2027 tcr |= TCR_EL2_RES1;
2028 }
2029 tcr &= ~TCR_T0SZ_MASK;
2030 tcr |= TCR_T0SZ(hyp_va_bits);
2031 tcr &= ~TCR_EL2_PS_MASK;
2032 tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
2033 if (kvm_lpa2_is_enabled())
2034 tcr |= TCR_EL2_DS;
2035 params->tcr_el2 = tcr;
2036
2037 params->pgd_pa = kvm_mmu_get_httbr();
2038 if (is_protected_kvm_enabled())
2039 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
2040 else
2041 params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
2042 if (cpus_have_final_cap(ARM64_KVM_HVHE))
2043 params->hcr_el2 |= HCR_E2H;
2044 params->vttbr = params->vtcr = 0;
2045
2046 /*
2047 * Flush the init params from the data cache because the struct will
2048 * be read while the MMU is off.
2049 */
2050 kvm_flush_dcache_to_poc(params, sizeof(*params));
2051 }
2052
hyp_install_host_vector(void)2053 static void hyp_install_host_vector(void)
2054 {
2055 struct kvm_nvhe_init_params *params;
2056 struct arm_smccc_res res;
2057
2058 /* Switch from the HYP stub to our own HYP init vector */
2059 __hyp_set_vectors(kvm_get_idmap_vector());
2060
2061 /*
2062 * Call initialization code, and switch to the full blown HYP code.
2063 * If the cpucaps haven't been finalized yet, something has gone very
2064 * wrong, and hyp will crash and burn when it uses any
2065 * cpus_have_*_cap() wrapper.
2066 */
2067 BUG_ON(!system_capabilities_finalized());
2068 params = this_cpu_ptr_nvhe_sym(kvm_init_params);
2069 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
2070 WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
2071 }
2072
cpu_init_hyp_mode(void)2073 static void cpu_init_hyp_mode(void)
2074 {
2075 hyp_install_host_vector();
2076
2077 /*
2078 * Disabling SSBD on a non-VHE system requires us to enable SSBS
2079 * at EL2.
2080 */
2081 if (this_cpu_has_cap(ARM64_SSBS) &&
2082 arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
2083 kvm_call_hyp_nvhe(__kvm_enable_ssbs);
2084 }
2085 }
2086
cpu_hyp_reset(void)2087 static void cpu_hyp_reset(void)
2088 {
2089 if (!is_kernel_in_hyp_mode())
2090 __hyp_reset_vectors();
2091 }
2092
2093 /*
2094 * EL2 vectors can be mapped and rerouted in a number of ways,
2095 * depending on the kernel configuration and CPU present:
2096 *
2097 * - If the CPU is affected by Spectre-v2, the hardening sequence is
2098 * placed in one of the vector slots, which is executed before jumping
2099 * to the real vectors.
2100 *
2101 * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
2102 * containing the hardening sequence is mapped next to the idmap page,
2103 * and executed before jumping to the real vectors.
2104 *
2105 * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
2106 * empty slot is selected, mapped next to the idmap page, and
2107 * executed before jumping to the real vectors.
2108 *
2109 * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
2110 * VHE, as we don't have hypervisor-specific mappings. If the system
2111 * is VHE and yet selects this capability, it will be ignored.
2112 */
cpu_set_hyp_vector(void)2113 static void cpu_set_hyp_vector(void)
2114 {
2115 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
2116 void *vector = hyp_spectre_vector_selector[data->slot];
2117
2118 if (!is_protected_kvm_enabled())
2119 *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
2120 else
2121 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
2122 }
2123
cpu_hyp_init_context(void)2124 static void cpu_hyp_init_context(void)
2125 {
2126 kvm_init_host_cpu_context(host_data_ptr(host_ctxt));
2127
2128 if (!is_kernel_in_hyp_mode())
2129 cpu_init_hyp_mode();
2130 }
2131
cpu_hyp_init_features(void)2132 static void cpu_hyp_init_features(void)
2133 {
2134 cpu_set_hyp_vector();
2135 kvm_arm_init_debug();
2136
2137 if (is_kernel_in_hyp_mode())
2138 kvm_timer_init_vhe();
2139
2140 if (vgic_present)
2141 kvm_vgic_init_cpu_hardware();
2142 }
2143
cpu_hyp_reinit(void)2144 static void cpu_hyp_reinit(void)
2145 {
2146 cpu_hyp_reset();
2147 cpu_hyp_init_context();
2148 cpu_hyp_init_features();
2149 }
2150
cpu_hyp_init(void * discard)2151 static void cpu_hyp_init(void *discard)
2152 {
2153 if (!__this_cpu_read(kvm_hyp_initialized)) {
2154 cpu_hyp_reinit();
2155 __this_cpu_write(kvm_hyp_initialized, 1);
2156 }
2157 }
2158
cpu_hyp_uninit(void * discard)2159 static void cpu_hyp_uninit(void *discard)
2160 {
2161 if (__this_cpu_read(kvm_hyp_initialized)) {
2162 cpu_hyp_reset();
2163 __this_cpu_write(kvm_hyp_initialized, 0);
2164 }
2165 }
2166
kvm_arch_hardware_enable(void)2167 int kvm_arch_hardware_enable(void)
2168 {
2169 /*
2170 * Most calls to this function are made with migration
2171 * disabled, but not with preemption disabled. The former is
2172 * enough to ensure correctness, but most of the helpers
2173 * expect the later and will throw a tantrum otherwise.
2174 */
2175 preempt_disable();
2176
2177 cpu_hyp_init(NULL);
2178
2179 kvm_vgic_cpu_up();
2180 kvm_timer_cpu_up();
2181
2182 preempt_enable();
2183
2184 return 0;
2185 }
2186
kvm_arch_hardware_disable(void)2187 void kvm_arch_hardware_disable(void)
2188 {
2189 kvm_timer_cpu_down();
2190 kvm_vgic_cpu_down();
2191
2192 if (!is_protected_kvm_enabled())
2193 cpu_hyp_uninit(NULL);
2194 }
2195
2196 #ifdef CONFIG_CPU_PM
hyp_init_cpu_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)2197 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
2198 unsigned long cmd,
2199 void *v)
2200 {
2201 /*
2202 * kvm_hyp_initialized is left with its old value over
2203 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
2204 * re-enable hyp.
2205 */
2206 switch (cmd) {
2207 case CPU_PM_ENTER:
2208 if (__this_cpu_read(kvm_hyp_initialized))
2209 /*
2210 * don't update kvm_hyp_initialized here
2211 * so that the hyp will be re-enabled
2212 * when we resume. See below.
2213 */
2214 cpu_hyp_reset();
2215
2216 return NOTIFY_OK;
2217 case CPU_PM_ENTER_FAILED:
2218 case CPU_PM_EXIT:
2219 if (__this_cpu_read(kvm_hyp_initialized))
2220 /* The hyp was enabled before suspend. */
2221 cpu_hyp_reinit();
2222
2223 return NOTIFY_OK;
2224
2225 default:
2226 return NOTIFY_DONE;
2227 }
2228 }
2229
2230 static struct notifier_block hyp_init_cpu_pm_nb = {
2231 .notifier_call = hyp_init_cpu_pm_notifier,
2232 };
2233
hyp_cpu_pm_init(void)2234 static void __init hyp_cpu_pm_init(void)
2235 {
2236 if (!is_protected_kvm_enabled())
2237 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
2238 }
hyp_cpu_pm_exit(void)2239 static void __init hyp_cpu_pm_exit(void)
2240 {
2241 if (!is_protected_kvm_enabled())
2242 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
2243 }
2244 #else
hyp_cpu_pm_init(void)2245 static inline void __init hyp_cpu_pm_init(void)
2246 {
2247 }
hyp_cpu_pm_exit(void)2248 static inline void __init hyp_cpu_pm_exit(void)
2249 {
2250 }
2251 #endif
2252
init_cpu_logical_map(void)2253 static void __init init_cpu_logical_map(void)
2254 {
2255 unsigned int cpu;
2256
2257 /*
2258 * Copy the MPIDR <-> logical CPU ID mapping to hyp.
2259 * Only copy the set of online CPUs whose features have been checked
2260 * against the finalized system capabilities. The hypervisor will not
2261 * allow any other CPUs from the `possible` set to boot.
2262 */
2263 for_each_online_cpu(cpu)
2264 hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
2265 }
2266
2267 #define init_psci_0_1_impl_state(config, what) \
2268 config.psci_0_1_ ## what ## _implemented = psci_ops.what
2269
init_psci_relay(void)2270 static bool __init init_psci_relay(void)
2271 {
2272 /*
2273 * If PSCI has not been initialized, protected KVM cannot install
2274 * itself on newly booted CPUs.
2275 */
2276 if (!psci_ops.get_version) {
2277 kvm_err("Cannot initialize protected mode without PSCI\n");
2278 return false;
2279 }
2280
2281 kvm_host_psci_config.version = psci_ops.get_version();
2282 kvm_host_psci_config.smccc_version = arm_smccc_get_version();
2283
2284 if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
2285 kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
2286 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
2287 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
2288 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
2289 init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
2290 }
2291 return true;
2292 }
2293
init_subsystems(void)2294 static int __init init_subsystems(void)
2295 {
2296 int err = 0;
2297
2298 /*
2299 * Enable hardware so that subsystem initialisation can access EL2.
2300 */
2301 on_each_cpu(cpu_hyp_init, NULL, 1);
2302
2303 /*
2304 * Register CPU lower-power notifier
2305 */
2306 hyp_cpu_pm_init();
2307
2308 /*
2309 * Init HYP view of VGIC
2310 */
2311 err = kvm_vgic_hyp_init();
2312 switch (err) {
2313 case 0:
2314 vgic_present = true;
2315 break;
2316 case -ENODEV:
2317 case -ENXIO:
2318 vgic_present = false;
2319 err = 0;
2320 break;
2321 default:
2322 goto out;
2323 }
2324
2325 /*
2326 * Init HYP architected timer support
2327 */
2328 err = kvm_timer_hyp_init(vgic_present);
2329 if (err)
2330 goto out;
2331
2332 kvm_register_perf_callbacks(NULL);
2333
2334 out:
2335 if (err)
2336 hyp_cpu_pm_exit();
2337
2338 if (err || !is_protected_kvm_enabled())
2339 on_each_cpu(cpu_hyp_uninit, NULL, 1);
2340
2341 return err;
2342 }
2343
teardown_subsystems(void)2344 static void __init teardown_subsystems(void)
2345 {
2346 kvm_unregister_perf_callbacks();
2347 hyp_cpu_pm_exit();
2348 }
2349
teardown_hyp_mode(void)2350 static void __init teardown_hyp_mode(void)
2351 {
2352 bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
2353 int cpu;
2354
2355 free_hyp_pgds();
2356 for_each_possible_cpu(cpu) {
2357 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
2358 free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
2359
2360 if (free_sve) {
2361 struct cpu_sve_state *sve_state;
2362
2363 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
2364 free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
2365 }
2366 }
2367 }
2368
do_pkvm_init(u32 hyp_va_bits)2369 static int __init do_pkvm_init(u32 hyp_va_bits)
2370 {
2371 void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
2372 int ret;
2373
2374 preempt_disable();
2375 cpu_hyp_init_context();
2376 ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
2377 num_possible_cpus(), kern_hyp_va(per_cpu_base),
2378 hyp_va_bits);
2379 cpu_hyp_init_features();
2380
2381 /*
2382 * The stub hypercalls are now disabled, so set our local flag to
2383 * prevent a later re-init attempt in kvm_arch_hardware_enable().
2384 */
2385 __this_cpu_write(kvm_hyp_initialized, 1);
2386 preempt_enable();
2387
2388 return ret;
2389 }
2390
get_hyp_id_aa64pfr0_el1(void)2391 static u64 get_hyp_id_aa64pfr0_el1(void)
2392 {
2393 /*
2394 * Track whether the system isn't affected by spectre/meltdown in the
2395 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
2396 * Although this is per-CPU, we make it global for simplicity, e.g., not
2397 * to have to worry about vcpu migration.
2398 *
2399 * Unlike for non-protected VMs, userspace cannot override this for
2400 * protected VMs.
2401 */
2402 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2403
2404 val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
2405 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
2406
2407 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
2408 arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
2409 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
2410 arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
2411
2412 return val;
2413 }
2414
kvm_hyp_init_symbols(void)2415 static void kvm_hyp_init_symbols(void)
2416 {
2417 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
2418 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2419 kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
2420 kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
2421 kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
2422 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
2423 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
2424 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2425 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
2426 kvm_nvhe_sym(__icache_flags) = __icache_flags;
2427 kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
2428 }
2429
kvm_hyp_init_protection(u32 hyp_va_bits)2430 static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
2431 {
2432 void *addr = phys_to_virt(hyp_mem_base);
2433 int ret;
2434
2435 ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
2436 if (ret)
2437 return ret;
2438
2439 ret = do_pkvm_init(hyp_va_bits);
2440 if (ret)
2441 return ret;
2442
2443 free_hyp_pgds();
2444
2445 return 0;
2446 }
2447
init_pkvm_host_sve_state(void)2448 static int init_pkvm_host_sve_state(void)
2449 {
2450 int cpu;
2451
2452 if (!system_supports_sve())
2453 return 0;
2454
2455 /* Allocate pages for host sve state in protected mode. */
2456 for_each_possible_cpu(cpu) {
2457 struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
2458
2459 if (!page)
2460 return -ENOMEM;
2461
2462 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
2463 }
2464
2465 /*
2466 * Don't map the pages in hyp since these are only used in protected
2467 * mode, which will (re)create its own mapping when initialized.
2468 */
2469
2470 return 0;
2471 }
2472
2473 /*
2474 * Finalizes the initialization of hyp mode, once everything else is initialized
2475 * and the initialziation process cannot fail.
2476 */
finalize_init_hyp_mode(void)2477 static void finalize_init_hyp_mode(void)
2478 {
2479 int cpu;
2480
2481 if (system_supports_sve() && is_protected_kvm_enabled()) {
2482 for_each_possible_cpu(cpu) {
2483 struct cpu_sve_state *sve_state;
2484
2485 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
2486 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
2487 kern_hyp_va(sve_state);
2488 }
2489 } else {
2490 for_each_possible_cpu(cpu) {
2491 struct user_fpsimd_state *fpsimd_state;
2492
2493 fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
2494 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
2495 kern_hyp_va(fpsimd_state);
2496 }
2497 }
2498 }
2499
pkvm_hyp_init_ptrauth(void)2500 static void pkvm_hyp_init_ptrauth(void)
2501 {
2502 struct kvm_cpu_context *hyp_ctxt;
2503 int cpu;
2504
2505 for_each_possible_cpu(cpu) {
2506 hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
2507 hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
2508 hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
2509 hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
2510 hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
2511 hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
2512 hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
2513 hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
2514 hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
2515 hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
2516 hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
2517 }
2518 }
2519
2520 /* Inits Hyp-mode on all online CPUs */
init_hyp_mode(void)2521 static int __init init_hyp_mode(void)
2522 {
2523 u32 hyp_va_bits;
2524 int cpu;
2525 int err = -ENOMEM;
2526
2527 /*
2528 * The protected Hyp-mode cannot be initialized if the memory pool
2529 * allocation has failed.
2530 */
2531 if (is_protected_kvm_enabled() && !hyp_mem_base)
2532 goto out_err;
2533
2534 /*
2535 * Allocate Hyp PGD and setup Hyp identity mapping
2536 */
2537 err = kvm_mmu_init(&hyp_va_bits);
2538 if (err)
2539 goto out_err;
2540
2541 /*
2542 * Allocate stack pages for Hypervisor-mode
2543 */
2544 for_each_possible_cpu(cpu) {
2545 unsigned long stack_page;
2546
2547 stack_page = __get_free_page(GFP_KERNEL);
2548 if (!stack_page) {
2549 err = -ENOMEM;
2550 goto out_err;
2551 }
2552
2553 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
2554 }
2555
2556 /*
2557 * Allocate and initialize pages for Hypervisor-mode percpu regions.
2558 */
2559 for_each_possible_cpu(cpu) {
2560 struct page *page;
2561 void *page_addr;
2562
2563 page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
2564 if (!page) {
2565 err = -ENOMEM;
2566 goto out_err;
2567 }
2568
2569 page_addr = page_address(page);
2570 memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
2571 kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
2572 }
2573
2574 /*
2575 * Map the Hyp-code called directly from the host
2576 */
2577 err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
2578 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
2579 if (err) {
2580 kvm_err("Cannot map world-switch code\n");
2581 goto out_err;
2582 }
2583
2584 err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
2585 kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
2586 if (err) {
2587 kvm_err("Cannot map .hyp.rodata section\n");
2588 goto out_err;
2589 }
2590
2591 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
2592 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
2593 if (err) {
2594 kvm_err("Cannot map rodata section\n");
2595 goto out_err;
2596 }
2597
2598 /*
2599 * .hyp.bss is guaranteed to be placed at the beginning of the .bss
2600 * section thanks to an assertion in the linker script. Map it RW and
2601 * the rest of .bss RO.
2602 */
2603 err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start),
2604 kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
2605 if (err) {
2606 kvm_err("Cannot map hyp bss section: %d\n", err);
2607 goto out_err;
2608 }
2609
2610 err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
2611 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
2612 if (err) {
2613 kvm_err("Cannot map bss section\n");
2614 goto out_err;
2615 }
2616
2617 /*
2618 * Map the Hyp stack pages
2619 */
2620 for_each_possible_cpu(cpu) {
2621 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2622 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2623
2624 err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va);
2625 if (err) {
2626 kvm_err("Cannot map hyp stack\n");
2627 goto out_err;
2628 }
2629
2630 /*
2631 * Save the stack PA in nvhe_init_params. This will be needed
2632 * to recreate the stack mapping in protected nVHE mode.
2633 * __hyp_pa() won't do the right thing there, since the stack
2634 * has been mapped in the flexible private VA space.
2635 */
2636 params->stack_pa = __pa(stack_page);
2637 }
2638
2639 for_each_possible_cpu(cpu) {
2640 char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
2641 char *percpu_end = percpu_begin + nvhe_percpu_size();
2642
2643 /* Map Hyp percpu pages */
2644 err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
2645 if (err) {
2646 kvm_err("Cannot map hyp percpu region\n");
2647 goto out_err;
2648 }
2649
2650 /* Prepare the CPU initialization parameters */
2651 cpu_prepare_hyp_mode(cpu, hyp_va_bits);
2652 }
2653
2654 kvm_hyp_init_symbols();
2655
2656 if (is_protected_kvm_enabled()) {
2657 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
2658 cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
2659 pkvm_hyp_init_ptrauth();
2660
2661 init_cpu_logical_map();
2662
2663 if (!init_psci_relay()) {
2664 err = -ENODEV;
2665 goto out_err;
2666 }
2667
2668 err = init_pkvm_host_sve_state();
2669 if (err)
2670 goto out_err;
2671
2672 err = kvm_hyp_init_protection(hyp_va_bits);
2673 if (err) {
2674 kvm_err("Failed to init hyp memory protection\n");
2675 goto out_err;
2676 }
2677 }
2678
2679 return 0;
2680
2681 out_err:
2682 teardown_hyp_mode();
2683 kvm_err("error initializing Hyp mode: %d\n", err);
2684 return err;
2685 }
2686
kvm_mpidr_to_vcpu(struct kvm * kvm,unsigned long mpidr)2687 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
2688 {
2689 struct kvm_vcpu *vcpu = NULL;
2690 struct kvm_mpidr_data *data;
2691 unsigned long i;
2692
2693 mpidr &= MPIDR_HWID_BITMASK;
2694
2695 rcu_read_lock();
2696 data = rcu_dereference(kvm->arch.mpidr_data);
2697
2698 if (data) {
2699 u16 idx = kvm_mpidr_index(data, mpidr);
2700
2701 vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
2702 if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
2703 vcpu = NULL;
2704 }
2705
2706 rcu_read_unlock();
2707
2708 if (vcpu)
2709 return vcpu;
2710
2711 kvm_for_each_vcpu(i, vcpu, kvm) {
2712 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
2713 return vcpu;
2714 }
2715 return NULL;
2716 }
2717
kvm_arch_irqchip_in_kernel(struct kvm * kvm)2718 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
2719 {
2720 return irqchip_in_kernel(kvm);
2721 }
2722
kvm_arch_has_irq_bypass(void)2723 bool kvm_arch_has_irq_bypass(void)
2724 {
2725 return true;
2726 }
2727
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)2728 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
2729 struct irq_bypass_producer *prod)
2730 {
2731 struct kvm_kernel_irqfd *irqfd =
2732 container_of(cons, struct kvm_kernel_irqfd, consumer);
2733
2734 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
2735 &irqfd->irq_entry);
2736 }
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)2737 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
2738 struct irq_bypass_producer *prod)
2739 {
2740 struct kvm_kernel_irqfd *irqfd =
2741 container_of(cons, struct kvm_kernel_irqfd, consumer);
2742
2743 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
2744 &irqfd->irq_entry);
2745 }
2746
kvm_arch_irq_bypass_stop(struct irq_bypass_consumer * cons)2747 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
2748 {
2749 struct kvm_kernel_irqfd *irqfd =
2750 container_of(cons, struct kvm_kernel_irqfd, consumer);
2751
2752 kvm_arm_halt_guest(irqfd->kvm);
2753 }
2754
kvm_arch_irq_bypass_start(struct irq_bypass_consumer * cons)2755 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
2756 {
2757 struct kvm_kernel_irqfd *irqfd =
2758 container_of(cons, struct kvm_kernel_irqfd, consumer);
2759
2760 kvm_arm_resume_guest(irqfd->kvm);
2761 }
2762
2763 /* Initialize Hyp-mode and memory mappings on all CPUs */
kvm_arm_init(void)2764 static __init int kvm_arm_init(void)
2765 {
2766 int err;
2767 bool in_hyp_mode;
2768
2769 if (!is_hyp_mode_available()) {
2770 kvm_info("HYP mode not available\n");
2771 return -ENODEV;
2772 }
2773
2774 if (kvm_get_mode() == KVM_MODE_NONE) {
2775 kvm_info("KVM disabled from command line\n");
2776 return -ENODEV;
2777 }
2778
2779 err = kvm_sys_reg_table_init();
2780 if (err) {
2781 kvm_info("Error initializing system register tables");
2782 return err;
2783 }
2784
2785 in_hyp_mode = is_kernel_in_hyp_mode();
2786
2787 if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
2788 cpus_have_final_cap(ARM64_WORKAROUND_1508412))
2789 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
2790 "Only trusted guests should be used on this system.\n");
2791
2792 err = kvm_set_ipa_limit();
2793 if (err)
2794 return err;
2795
2796 err = kvm_arm_init_sve();
2797 if (err)
2798 return err;
2799
2800 err = kvm_arm_vmid_alloc_init();
2801 if (err) {
2802 kvm_err("Failed to initialize VMID allocator.\n");
2803 return err;
2804 }
2805
2806 if (!in_hyp_mode) {
2807 err = init_hyp_mode();
2808 if (err)
2809 goto out_err;
2810 }
2811
2812 err = kvm_init_vector_slots();
2813 if (err) {
2814 kvm_err("Cannot initialise vector slots\n");
2815 goto out_hyp;
2816 }
2817
2818 err = init_subsystems();
2819 if (err)
2820 goto out_hyp;
2821
2822 kvm_info("%s%sVHE mode initialized successfully\n",
2823 in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
2824 "Protected " : "Hyp "),
2825 in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
2826 "h" : "n"));
2827
2828 /*
2829 * FIXME: Do something reasonable if kvm_init() fails after pKVM
2830 * hypervisor protection is finalized.
2831 */
2832 err = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2833 if (err)
2834 goto out_subs;
2835
2836 /*
2837 * This should be called after initialization is done and failure isn't
2838 * possible anymore.
2839 */
2840 if (!in_hyp_mode)
2841 finalize_init_hyp_mode();
2842
2843 kvm_arm_initialised = true;
2844
2845 return 0;
2846
2847 out_subs:
2848 teardown_subsystems();
2849 out_hyp:
2850 if (!in_hyp_mode)
2851 teardown_hyp_mode();
2852 out_err:
2853 kvm_arm_vmid_alloc_free();
2854 return err;
2855 }
2856
early_kvm_mode_cfg(char * arg)2857 static int __init early_kvm_mode_cfg(char *arg)
2858 {
2859 if (!arg)
2860 return -EINVAL;
2861
2862 if (strcmp(arg, "none") == 0) {
2863 kvm_mode = KVM_MODE_NONE;
2864 return 0;
2865 }
2866
2867 if (!is_hyp_mode_available()) {
2868 pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
2869 return 0;
2870 }
2871
2872 if (strcmp(arg, "protected") == 0) {
2873 if (!is_kernel_in_hyp_mode())
2874 kvm_mode = KVM_MODE_PROTECTED;
2875 else
2876 pr_warn_once("Protected KVM not available with VHE\n");
2877
2878 return 0;
2879 }
2880
2881 if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
2882 kvm_mode = KVM_MODE_DEFAULT;
2883 return 0;
2884 }
2885
2886 if (strcmp(arg, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) {
2887 kvm_mode = KVM_MODE_NV;
2888 return 0;
2889 }
2890
2891 return -EINVAL;
2892 }
2893 early_param("kvm-arm.mode", early_kvm_mode_cfg);
2894
early_kvm_wfx_trap_policy_cfg(char * arg,enum kvm_wfx_trap_policy * p)2895 static int __init early_kvm_wfx_trap_policy_cfg(char *arg, enum kvm_wfx_trap_policy *p)
2896 {
2897 if (!arg)
2898 return -EINVAL;
2899
2900 if (strcmp(arg, "trap") == 0) {
2901 *p = KVM_WFX_TRAP;
2902 return 0;
2903 }
2904
2905 if (strcmp(arg, "notrap") == 0) {
2906 *p = KVM_WFX_NOTRAP;
2907 return 0;
2908 }
2909
2910 return -EINVAL;
2911 }
2912
early_kvm_wfi_trap_policy_cfg(char * arg)2913 static int __init early_kvm_wfi_trap_policy_cfg(char *arg)
2914 {
2915 return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfi_trap_policy);
2916 }
2917 early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg);
2918
early_kvm_wfe_trap_policy_cfg(char * arg)2919 static int __init early_kvm_wfe_trap_policy_cfg(char *arg)
2920 {
2921 return early_kvm_wfx_trap_policy_cfg(arg, &kvm_wfe_trap_policy);
2922 }
2923 early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg);
2924
kvm_get_mode(void)2925 enum kvm_mode kvm_get_mode(void)
2926 {
2927 return kvm_mode;
2928 }
2929
2930 module_init(kvm_arm_init);
2931