1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9
10 #include <asm/kvm_emulate.h>
11
12 #include <nvhe/mem_protect.h>
13 #include <nvhe/memory.h>
14 #include <nvhe/pkvm.h>
15 #include <nvhe/trap_handler.h>
16
17 /* Used by icache_is_aliasing(). */
18 unsigned long __icache_flags;
19
20 /* Used by kvm_get_vttbr(). */
21 unsigned int kvm_arm_vmid_bits;
22
23 unsigned int kvm_host_sve_max_vl;
24
25 /*
26 * The currently loaded hyp vCPU for each physical CPU. Used in protected mode
27 * for both protected and non-protected VMs.
28 */
29 static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
30
pkvm_vcpu_reset_hcr(struct kvm_vcpu * vcpu)31 static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
32 {
33 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
34
35 if (has_hvhe())
36 vcpu->arch.hcr_el2 |= HCR_E2H;
37
38 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
39 /* route synchronous external abort exceptions to EL2 */
40 vcpu->arch.hcr_el2 |= HCR_TEA;
41 /* trap error record accesses */
42 vcpu->arch.hcr_el2 |= HCR_TERR;
43 }
44
45 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
46 vcpu->arch.hcr_el2 |= HCR_FWB;
47
48 if (cpus_have_final_cap(ARM64_HAS_EVT) &&
49 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
50 kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0) == read_cpuid(CTR_EL0))
51 vcpu->arch.hcr_el2 |= HCR_TID4;
52 else
53 vcpu->arch.hcr_el2 |= HCR_TID2;
54
55 if (vcpu_has_ptrauth(vcpu))
56 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
57
58 if (kvm_has_mte(vcpu->kvm))
59 vcpu->arch.hcr_el2 |= HCR_ATA;
60 }
61
pvm_init_traps_hcr(struct kvm_vcpu * vcpu)62 static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
63 {
64 struct kvm *kvm = vcpu->kvm;
65 u64 val = vcpu->arch.hcr_el2;
66
67 /* No support for AArch32. */
68 val |= HCR_RW;
69
70 /*
71 * Always trap:
72 * - Feature id registers: to control features exposed to guests
73 * - Implementation-defined features
74 */
75 val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1;
76
77 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
78 val |= HCR_TERR | HCR_TEA;
79 val &= ~(HCR_FIEN);
80 }
81
82 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
83 val &= ~(HCR_AMVOFFEN);
84
85 if (!kvm_has_mte(kvm)) {
86 val |= HCR_TID5;
87 val &= ~(HCR_DCT | HCR_ATA);
88 }
89
90 if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
91 val |= HCR_TLOR;
92
93 vcpu->arch.hcr_el2 = val;
94 }
95
pvm_init_traps_mdcr(struct kvm_vcpu * vcpu)96 static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
97 {
98 struct kvm *kvm = vcpu->kvm;
99 u64 val = vcpu->arch.mdcr_el2;
100
101 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) {
102 val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
103 val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK);
104 }
105
106 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP))
107 val |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
108
109 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
110 val |= MDCR_EL2_TDOSA;
111
112 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) {
113 val |= MDCR_EL2_TPMS;
114 val &= ~MDCR_EL2_E2PB_MASK;
115 }
116
117 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
118 val |= MDCR_EL2_TTRF;
119
120 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
121 val &= ~MDCR_EL2_E2TB_MASK;
122
123 /* Trap Debug Communications Channel registers */
124 if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
125 val |= MDCR_EL2_TDCC;
126
127 vcpu->arch.mdcr_el2 = val;
128 }
129
130 /*
131 * Check that cpu features that are neither trapped nor supported are not
132 * enabled for protected VMs.
133 */
pkvm_check_pvm_cpu_features(struct kvm_vcpu * vcpu)134 static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu)
135 {
136 struct kvm *kvm = vcpu->kvm;
137
138 /* No AArch32 support for protected guests. */
139 if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) ||
140 kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32))
141 return -EINVAL;
142
143 /*
144 * Linux guests assume support for floating-point and Advanced SIMD. Do
145 * not change the trapping behavior for these from the KVM default.
146 */
147 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) ||
148 !kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP))
149 return -EINVAL;
150
151 /* No SME support in KVM right now. Check to catch if it changes. */
152 if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
153 return -EINVAL;
154
155 return 0;
156 }
157
158 /*
159 * Initialize trap register values in protected mode.
160 */
pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu * hyp_vcpu)161 static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
162 {
163 struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
164 int ret;
165
166 vcpu->arch.mdcr_el2 = 0;
167
168 pkvm_vcpu_reset_hcr(vcpu);
169
170 if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu))) {
171 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
172
173 /* Trust the host for non-protected vcpu features. */
174 vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
175 return 0;
176 }
177
178 ret = pkvm_check_pvm_cpu_features(vcpu);
179 if (ret)
180 return ret;
181
182 pvm_init_traps_hcr(vcpu);
183 pvm_init_traps_mdcr(vcpu);
184 vcpu_set_hcrx(vcpu);
185
186 return 0;
187 }
188
189 /*
190 * Start the VM table handle at the offset defined instead of at 0.
191 * Mainly for sanity checking and debugging.
192 */
193 #define HANDLE_OFFSET 0x1000
194
195 /*
196 * Marks a reserved but not yet used entry in the VM table.
197 */
198 #define RESERVED_ENTRY ((void *)0xa110ca7ed)
199
vm_handle_to_idx(pkvm_handle_t handle)200 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
201 {
202 return handle - HANDLE_OFFSET;
203 }
204
idx_to_vm_handle(unsigned int idx)205 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
206 {
207 return idx + HANDLE_OFFSET;
208 }
209
210 /*
211 * Spinlock for protecting state related to the VM table. Protects writes
212 * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization.
213 * Also protects reads and writes to 'last_hyp_vcpu_lookup'.
214 */
215 DEFINE_HYP_SPINLOCK(vm_table_lock);
216
217 /*
218 * A table that tracks all VMs in protected mode.
219 * Allocated during hyp initialization and setup.
220 */
221 static struct pkvm_hyp_vm **vm_table;
222
pkvm_hyp_vm_table_init(void * tbl)223 void pkvm_hyp_vm_table_init(void *tbl)
224 {
225 WARN_ON(vm_table);
226 vm_table = tbl;
227 }
228
229 /*
230 * Return the hyp vm structure corresponding to the handle.
231 */
get_vm_by_handle(pkvm_handle_t handle)232 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
233 {
234 unsigned int idx = vm_handle_to_idx(handle);
235
236 if (unlikely(idx >= KVM_MAX_PVMS))
237 return NULL;
238
239 /* A reserved entry doesn't represent an initialized VM. */
240 if (unlikely(vm_table[idx] == RESERVED_ENTRY))
241 return NULL;
242
243 return vm_table[idx];
244 }
245
pkvm_load_hyp_vcpu(pkvm_handle_t handle,unsigned int vcpu_idx)246 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
247 unsigned int vcpu_idx)
248 {
249 struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
250 struct pkvm_hyp_vm *hyp_vm;
251
252 /* Cannot load a new vcpu without putting the old one first. */
253 if (__this_cpu_read(loaded_hyp_vcpu))
254 return NULL;
255
256 hyp_spin_lock(&vm_table_lock);
257 hyp_vm = get_vm_by_handle(handle);
258 if (!hyp_vm || hyp_vm->kvm.created_vcpus <= vcpu_idx)
259 goto unlock;
260
261 hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
262 if (!hyp_vcpu)
263 goto unlock;
264
265 /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
266 if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
267 hyp_vcpu = NULL;
268 goto unlock;
269 }
270
271 hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
272 hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
273 unlock:
274 hyp_spin_unlock(&vm_table_lock);
275
276 if (hyp_vcpu)
277 __this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
278 return hyp_vcpu;
279 }
280
pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)281 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
282 {
283 struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
284
285 hyp_spin_lock(&vm_table_lock);
286 hyp_vcpu->loaded_hyp_vcpu = NULL;
287 __this_cpu_write(loaded_hyp_vcpu, NULL);
288 hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
289 hyp_spin_unlock(&vm_table_lock);
290 }
291
pkvm_get_loaded_hyp_vcpu(void)292 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
293 {
294 return __this_cpu_read(loaded_hyp_vcpu);
295
296 }
297
get_pkvm_hyp_vm(pkvm_handle_t handle)298 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
299 {
300 struct pkvm_hyp_vm *hyp_vm;
301
302 hyp_spin_lock(&vm_table_lock);
303 hyp_vm = get_vm_by_handle(handle);
304 if (hyp_vm)
305 hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
306 hyp_spin_unlock(&vm_table_lock);
307
308 return hyp_vm;
309 }
310
put_pkvm_hyp_vm(struct pkvm_hyp_vm * hyp_vm)311 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm)
312 {
313 hyp_spin_lock(&vm_table_lock);
314 hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
315 hyp_spin_unlock(&vm_table_lock);
316 }
317
get_np_pkvm_hyp_vm(pkvm_handle_t handle)318 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
319 {
320 struct pkvm_hyp_vm *hyp_vm = get_pkvm_hyp_vm(handle);
321
322 if (hyp_vm && pkvm_hyp_vm_is_protected(hyp_vm)) {
323 put_pkvm_hyp_vm(hyp_vm);
324 hyp_vm = NULL;
325 }
326
327 return hyp_vm;
328 }
329
pkvm_init_features_from_host(struct pkvm_hyp_vm * hyp_vm,const struct kvm * host_kvm)330 static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
331 {
332 struct kvm *kvm = &hyp_vm->kvm;
333 unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
334 DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
335
336 /* CTR_EL0 is always under host control, even for protected VMs. */
337 hyp_vm->kvm.arch.ctr_el0 = host_kvm->arch.ctr_el0;
338
339 /* Preserve the vgic model so that GICv3 emulation works */
340 hyp_vm->kvm.arch.vgic.vgic_model = host_kvm->arch.vgic.vgic_model;
341
342 /* No restrictions for non-protected VMs. */
343 if (!kvm_vm_is_protected(kvm)) {
344 hyp_vm->kvm.arch.flags = host_arch_flags;
345 hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED);
346
347 bitmap_copy(kvm->arch.vcpu_features,
348 host_kvm->arch.vcpu_features,
349 KVM_VCPU_MAX_FEATURES);
350
351 if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &host_arch_flags))
352 hyp_vm->kvm.arch.midr_el1 = host_kvm->arch.midr_el1;
353
354 return;
355 }
356
357 if (kvm_pkvm_ext_allowed(kvm, KVM_CAP_ARM_MTE))
358 kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_MTE_ENABLED);
359
360 bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
361
362 set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
363
364 if (kvm_pkvm_ext_allowed(kvm, KVM_CAP_ARM_PMU_V3))
365 set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
366
367 if (kvm_pkvm_ext_allowed(kvm, KVM_CAP_ARM_PTRAUTH_ADDRESS))
368 set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
369
370 if (kvm_pkvm_ext_allowed(kvm, KVM_CAP_ARM_PTRAUTH_GENERIC))
371 set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
372
373 if (kvm_pkvm_ext_allowed(kvm, KVM_CAP_ARM_SVE)) {
374 set_bit(KVM_ARM_VCPU_SVE, allowed_features);
375 kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE);
376 }
377
378 bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
379 allowed_features, KVM_VCPU_MAX_FEATURES);
380 }
381
unpin_host_vcpu(struct kvm_vcpu * host_vcpu)382 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
383 {
384 if (host_vcpu)
385 hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
386 }
387
unpin_host_sve_state(struct pkvm_hyp_vcpu * hyp_vcpu)388 static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
389 {
390 void *sve_state;
391
392 if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
393 return;
394
395 sve_state = hyp_vcpu->vcpu.arch.sve_state;
396 hyp_unpin_shared_mem(sve_state,
397 sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
398 }
399
unpin_host_vcpus(struct pkvm_hyp_vcpu * hyp_vcpus[],unsigned int nr_vcpus)400 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
401 unsigned int nr_vcpus)
402 {
403 int i;
404
405 for (i = 0; i < nr_vcpus; i++) {
406 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vcpus[i];
407
408 if (!hyp_vcpu)
409 continue;
410
411 unpin_host_vcpu(hyp_vcpu->host_vcpu);
412 unpin_host_sve_state(hyp_vcpu);
413 }
414 }
415
init_pkvm_hyp_vm(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm,unsigned int nr_vcpus,pkvm_handle_t handle)416 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
417 unsigned int nr_vcpus, pkvm_handle_t handle)
418 {
419 struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
420 int idx = vm_handle_to_idx(handle);
421
422 hyp_vm->kvm.arch.pkvm.handle = handle;
423
424 hyp_vm->host_kvm = host_kvm;
425 hyp_vm->kvm.created_vcpus = nr_vcpus;
426 hyp_vm->kvm.arch.pkvm.is_protected = READ_ONCE(host_kvm->arch.pkvm.is_protected);
427 hyp_vm->kvm.arch.pkvm.is_created = true;
428 hyp_vm->kvm.arch.flags = 0;
429 pkvm_init_features_from_host(hyp_vm, host_kvm);
430
431 /* VMID 0 is reserved for the host */
432 atomic64_set(&mmu->vmid.id, idx + 1);
433
434 mmu->vtcr = host_mmu.arch.mmu.vtcr;
435 mmu->arch = &hyp_vm->kvm.arch;
436 mmu->pgt = &hyp_vm->pgt;
437 }
438
pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu * hyp_vcpu,struct kvm_vcpu * host_vcpu)439 static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
440 {
441 struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
442 unsigned int sve_max_vl;
443 size_t sve_state_size;
444 void *sve_state;
445 int ret = 0;
446
447 if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
448 vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
449 return 0;
450 }
451
452 /* Limit guest vector length to the maximum supported by the host. */
453 sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl);
454 sve_state_size = sve_state_size_from_vl(sve_max_vl);
455 sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state));
456
457 if (!sve_state || !sve_state_size) {
458 ret = -EINVAL;
459 goto err;
460 }
461
462 ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size);
463 if (ret)
464 goto err;
465
466 vcpu->arch.sve_state = sve_state;
467 vcpu->arch.sve_max_vl = sve_max_vl;
468
469 return 0;
470 err:
471 clear_bit(KVM_ARM_VCPU_SVE, vcpu->kvm->arch.vcpu_features);
472 return ret;
473 }
474
vm_copy_id_regs(struct pkvm_hyp_vcpu * hyp_vcpu)475 static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu)
476 {
477 struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
478 const struct kvm *host_kvm = hyp_vm->host_kvm;
479 struct kvm *kvm = &hyp_vm->kvm;
480
481 if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags))
482 return -EINVAL;
483
484 if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
485 return 0;
486
487 memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs));
488
489 return 0;
490 }
491
pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu * hyp_vcpu)492 static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu)
493 {
494 int ret = 0;
495
496 if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
497 kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
498 else
499 ret = vm_copy_id_regs(hyp_vcpu);
500
501 return ret;
502 }
503
init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,struct pkvm_hyp_vm * hyp_vm,struct kvm_vcpu * host_vcpu)504 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
505 struct pkvm_hyp_vm *hyp_vm,
506 struct kvm_vcpu *host_vcpu)
507 {
508 int ret = 0;
509
510 if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
511 return -EBUSY;
512
513 hyp_vcpu->host_vcpu = host_vcpu;
514
515 hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
516 hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
517 hyp_vcpu->vcpu.vcpu_idx = READ_ONCE(host_vcpu->vcpu_idx);
518
519 hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
520 hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
521 hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
522
523 ret = pkvm_vcpu_init_sysregs(hyp_vcpu);
524 if (ret)
525 goto done;
526
527 ret = pkvm_vcpu_init_traps(hyp_vcpu);
528 if (ret)
529 goto done;
530
531 ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
532 done:
533 if (ret)
534 unpin_host_vcpu(host_vcpu);
535 return ret;
536 }
537
find_free_vm_table_entry(void)538 static int find_free_vm_table_entry(void)
539 {
540 int i;
541
542 for (i = 0; i < KVM_MAX_PVMS; ++i) {
543 if (!vm_table[i])
544 return i;
545 }
546
547 return -ENOMEM;
548 }
549
550 /*
551 * Reserve a VM table entry.
552 *
553 * Return a unique handle to the VM on success,
554 * negative error code on failure.
555 */
allocate_vm_table_entry(void)556 static int allocate_vm_table_entry(void)
557 {
558 int idx;
559
560 hyp_assert_lock_held(&vm_table_lock);
561
562 /*
563 * Initializing protected state might have failed, yet a malicious
564 * host could trigger this function. Thus, ensure that 'vm_table'
565 * exists.
566 */
567 if (unlikely(!vm_table))
568 return -EINVAL;
569
570 idx = find_free_vm_table_entry();
571 if (unlikely(idx < 0))
572 return idx;
573
574 vm_table[idx] = RESERVED_ENTRY;
575
576 return idx;
577 }
578
__insert_vm_table_entry(pkvm_handle_t handle,struct pkvm_hyp_vm * hyp_vm)579 static int __insert_vm_table_entry(pkvm_handle_t handle,
580 struct pkvm_hyp_vm *hyp_vm)
581 {
582 unsigned int idx;
583
584 hyp_assert_lock_held(&vm_table_lock);
585
586 /*
587 * Initializing protected state might have failed, yet a malicious
588 * host could trigger this function. Thus, ensure that 'vm_table'
589 * exists.
590 */
591 if (unlikely(!vm_table))
592 return -EINVAL;
593
594 idx = vm_handle_to_idx(handle);
595 if (unlikely(idx >= KVM_MAX_PVMS))
596 return -EINVAL;
597
598 if (unlikely(vm_table[idx] != RESERVED_ENTRY))
599 return -EINVAL;
600
601 vm_table[idx] = hyp_vm;
602
603 return 0;
604 }
605
606 /*
607 * Insert a pointer to the initialized VM into the VM table.
608 *
609 * Return 0 on success, or negative error code on failure.
610 */
insert_vm_table_entry(pkvm_handle_t handle,struct pkvm_hyp_vm * hyp_vm)611 static int insert_vm_table_entry(pkvm_handle_t handle,
612 struct pkvm_hyp_vm *hyp_vm)
613 {
614 int ret;
615
616 hyp_spin_lock(&vm_table_lock);
617 ret = __insert_vm_table_entry(handle, hyp_vm);
618 hyp_spin_unlock(&vm_table_lock);
619
620 return ret;
621 }
622
623 /*
624 * Deallocate and remove the VM table entry corresponding to the handle.
625 */
remove_vm_table_entry(pkvm_handle_t handle)626 static void remove_vm_table_entry(pkvm_handle_t handle)
627 {
628 hyp_assert_lock_held(&vm_table_lock);
629 vm_table[vm_handle_to_idx(handle)] = NULL;
630 }
631
pkvm_get_hyp_vm_size(unsigned int nr_vcpus)632 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
633 {
634 return size_add(sizeof(struct pkvm_hyp_vm),
635 size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
636 }
637
map_donated_memory_noclear(unsigned long host_va,size_t size)638 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
639 {
640 void *va = (void *)kern_hyp_va(host_va);
641
642 if (!PAGE_ALIGNED(va))
643 return NULL;
644
645 if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
646 PAGE_ALIGN(size) >> PAGE_SHIFT))
647 return NULL;
648
649 return va;
650 }
651
map_donated_memory(unsigned long host_va,size_t size)652 static void *map_donated_memory(unsigned long host_va, size_t size)
653 {
654 void *va = map_donated_memory_noclear(host_va, size);
655
656 if (va)
657 memset(va, 0, size);
658
659 return va;
660 }
661
__unmap_donated_memory(void * va,size_t size)662 static void __unmap_donated_memory(void *va, size_t size)
663 {
664 kvm_flush_dcache_to_poc(va, size);
665 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
666 PAGE_ALIGN(size) >> PAGE_SHIFT));
667 }
668
unmap_donated_memory(void * va,size_t size)669 static void unmap_donated_memory(void *va, size_t size)
670 {
671 if (!va)
672 return;
673
674 memset(va, 0, size);
675 __unmap_donated_memory(va, size);
676 }
677
unmap_donated_memory_noclear(void * va,size_t size)678 static void unmap_donated_memory_noclear(void *va, size_t size)
679 {
680 if (!va)
681 return;
682
683 __unmap_donated_memory(va, size);
684 }
685
686 /*
687 * Reserves an entry in the hypervisor for a new VM in protected mode.
688 *
689 * Return a unique handle to the VM on success, negative error code on failure.
690 */
__pkvm_reserve_vm(void)691 int __pkvm_reserve_vm(void)
692 {
693 int ret;
694
695 hyp_spin_lock(&vm_table_lock);
696 ret = allocate_vm_table_entry();
697 hyp_spin_unlock(&vm_table_lock);
698
699 if (ret < 0)
700 return ret;
701
702 return idx_to_vm_handle(ret);
703 }
704
705 /*
706 * Removes a reserved entry, but only if is hasn't been used yet.
707 * Otherwise, the VM needs to be destroyed.
708 */
__pkvm_unreserve_vm(pkvm_handle_t handle)709 void __pkvm_unreserve_vm(pkvm_handle_t handle)
710 {
711 unsigned int idx = vm_handle_to_idx(handle);
712
713 if (unlikely(!vm_table))
714 return;
715
716 hyp_spin_lock(&vm_table_lock);
717 if (likely(idx < KVM_MAX_PVMS && vm_table[idx] == RESERVED_ENTRY))
718 remove_vm_table_entry(handle);
719 hyp_spin_unlock(&vm_table_lock);
720 }
721
722 /*
723 * Initialize the hypervisor copy of the VM state using host-donated memory.
724 *
725 * Unmap the donated memory from the host at stage 2.
726 *
727 * host_kvm: A pointer to the host's struct kvm.
728 * vm_hva: The host va of the area being donated for the VM state.
729 * Must be page aligned.
730 * pgd_hva: The host va of the area being donated for the stage-2 PGD for
731 * the VM. Must be page aligned. Its size is implied by the VM's
732 * VTCR.
733 *
734 * Return 0 success, negative error code on failure.
735 */
__pkvm_init_vm(struct kvm * host_kvm,unsigned long vm_hva,unsigned long pgd_hva)736 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
737 unsigned long pgd_hva)
738 {
739 struct pkvm_hyp_vm *hyp_vm = NULL;
740 size_t vm_size, pgd_size;
741 unsigned int nr_vcpus;
742 pkvm_handle_t handle;
743 void *pgd = NULL;
744 int ret;
745
746 ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
747 if (ret)
748 return ret;
749
750 nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
751 if (nr_vcpus < 1) {
752 ret = -EINVAL;
753 goto err_unpin_kvm;
754 }
755
756 handle = READ_ONCE(host_kvm->arch.pkvm.handle);
757 if (unlikely(handle < HANDLE_OFFSET)) {
758 ret = -EINVAL;
759 goto err_unpin_kvm;
760 }
761
762 vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
763 pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
764
765 ret = -ENOMEM;
766
767 hyp_vm = map_donated_memory(vm_hva, vm_size);
768 if (!hyp_vm)
769 goto err_remove_mappings;
770
771 pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
772 if (!pgd)
773 goto err_remove_mappings;
774
775 init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus, handle);
776
777 ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
778 if (ret)
779 goto err_remove_mappings;
780
781 /* Must be called last since this publishes the VM. */
782 ret = insert_vm_table_entry(handle, hyp_vm);
783 if (ret)
784 goto err_remove_mappings;
785
786 return 0;
787
788 err_remove_mappings:
789 unmap_donated_memory(hyp_vm, vm_size);
790 unmap_donated_memory(pgd, pgd_size);
791 err_unpin_kvm:
792 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
793 return ret;
794 }
795
796 /*
797 * Initialize the hypervisor copy of the vCPU state using host-donated memory.
798 *
799 * handle: The hypervisor handle for the vm.
800 * host_vcpu: A pointer to the corresponding host vcpu.
801 * vcpu_hva: The host va of the area being donated for the vcpu state.
802 * Must be page aligned. The size of the area must be equal to
803 * the page-aligned size of 'struct pkvm_hyp_vcpu'.
804 * Return 0 on success, negative error code on failure.
805 */
__pkvm_init_vcpu(pkvm_handle_t handle,struct kvm_vcpu * host_vcpu,unsigned long vcpu_hva)806 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
807 unsigned long vcpu_hva)
808 {
809 struct pkvm_hyp_vcpu *hyp_vcpu;
810 struct pkvm_hyp_vm *hyp_vm;
811 unsigned int idx;
812 int ret;
813
814 hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
815 if (!hyp_vcpu)
816 return -ENOMEM;
817
818 hyp_spin_lock(&vm_table_lock);
819
820 hyp_vm = get_vm_by_handle(handle);
821 if (!hyp_vm) {
822 ret = -ENOENT;
823 goto unlock;
824 }
825
826 ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
827 if (ret)
828 goto unlock;
829
830 idx = hyp_vcpu->vcpu.vcpu_idx;
831 if (idx >= hyp_vm->kvm.created_vcpus) {
832 ret = -EINVAL;
833 goto unlock;
834 }
835
836 if (hyp_vm->vcpus[idx]) {
837 ret = -EINVAL;
838 goto unlock;
839 }
840
841 hyp_vm->vcpus[idx] = hyp_vcpu;
842 unlock:
843 hyp_spin_unlock(&vm_table_lock);
844
845 if (ret)
846 unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
847 return ret;
848 }
849
850 static void
teardown_donated_memory(struct kvm_hyp_memcache * mc,void * addr,size_t size)851 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
852 {
853 size = PAGE_ALIGN(size);
854 memset(addr, 0, size);
855
856 for (void *start = addr; start < addr + size; start += PAGE_SIZE)
857 push_hyp_memcache(mc, start, hyp_virt_to_phys);
858
859 unmap_donated_memory_noclear(addr, size);
860 }
861
__pkvm_teardown_vm(pkvm_handle_t handle)862 int __pkvm_teardown_vm(pkvm_handle_t handle)
863 {
864 struct kvm_hyp_memcache *mc, *stage2_mc;
865 struct pkvm_hyp_vm *hyp_vm;
866 struct kvm *host_kvm;
867 unsigned int idx;
868 size_t vm_size;
869 int err;
870
871 hyp_spin_lock(&vm_table_lock);
872 hyp_vm = get_vm_by_handle(handle);
873 if (!hyp_vm) {
874 err = -ENOENT;
875 goto err_unlock;
876 }
877
878 if (WARN_ON(hyp_page_count(hyp_vm))) {
879 err = -EBUSY;
880 goto err_unlock;
881 }
882
883 host_kvm = hyp_vm->host_kvm;
884
885 /* Ensure the VMID is clean before it can be reallocated */
886 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
887 remove_vm_table_entry(handle);
888 hyp_spin_unlock(&vm_table_lock);
889
890 /* Reclaim guest pages (including page-table pages) */
891 mc = &host_kvm->arch.pkvm.teardown_mc;
892 stage2_mc = &host_kvm->arch.pkvm.stage2_teardown_mc;
893 reclaim_pgtable_pages(hyp_vm, stage2_mc);
894 unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->kvm.created_vcpus);
895
896 /* Push the metadata pages to the teardown memcache */
897 for (idx = 0; idx < hyp_vm->kvm.created_vcpus; ++idx) {
898 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
899 struct kvm_hyp_memcache *vcpu_mc;
900
901 if (!hyp_vcpu)
902 continue;
903
904 vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
905
906 while (vcpu_mc->nr_pages) {
907 void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
908
909 push_hyp_memcache(stage2_mc, addr, hyp_virt_to_phys);
910 unmap_donated_memory_noclear(addr, PAGE_SIZE);
911 }
912
913 teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
914 }
915
916 vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
917 teardown_donated_memory(mc, hyp_vm, vm_size);
918 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
919 return 0;
920
921 err_unlock:
922 hyp_spin_unlock(&vm_table_lock);
923 return err;
924 }
925