1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9
10 #include <asm/kvm_emulate.h>
11
12 #include <nvhe/mem_protect.h>
13 #include <nvhe/memory.h>
14 #include <nvhe/pkvm.h>
15 #include <nvhe/trap_handler.h>
16
17 /* Used by icache_is_aliasing(). */
18 unsigned long __icache_flags;
19
20 /* Used by kvm_get_vttbr(). */
21 unsigned int kvm_arm_vmid_bits;
22
23 unsigned int kvm_host_sve_max_vl;
24
25 /*
26 * The currently loaded hyp vCPU for each physical CPU. Used in protected mode
27 * for both protected and non-protected VMs.
28 */
29 static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
30
pkvm_vcpu_reset_hcr(struct kvm_vcpu * vcpu)31 static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
32 {
33 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
34
35 if (has_hvhe())
36 vcpu->arch.hcr_el2 |= HCR_E2H;
37
38 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
39 /* route synchronous external abort exceptions to EL2 */
40 vcpu->arch.hcr_el2 |= HCR_TEA;
41 /* trap error record accesses */
42 vcpu->arch.hcr_el2 |= HCR_TERR;
43 }
44
45 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
46 vcpu->arch.hcr_el2 |= HCR_FWB;
47
48 if (cpus_have_final_cap(ARM64_HAS_EVT) &&
49 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
50 kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0) == read_cpuid(CTR_EL0))
51 vcpu->arch.hcr_el2 |= HCR_TID4;
52 else
53 vcpu->arch.hcr_el2 |= HCR_TID2;
54
55 if (vcpu_has_ptrauth(vcpu))
56 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
57
58 if (kvm_has_mte(vcpu->kvm))
59 vcpu->arch.hcr_el2 |= HCR_ATA;
60 }
61
pvm_init_traps_hcr(struct kvm_vcpu * vcpu)62 static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
63 {
64 struct kvm *kvm = vcpu->kvm;
65 u64 val = vcpu->arch.hcr_el2;
66
67 /* No support for AArch32. */
68 val |= HCR_RW;
69
70 /*
71 * Always trap:
72 * - Feature id registers: to control features exposed to guests
73 * - Implementation-defined features
74 */
75 val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1;
76
77 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
78 val |= HCR_TERR | HCR_TEA;
79 val &= ~(HCR_FIEN);
80 }
81
82 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
83 val &= ~(HCR_AMVOFFEN);
84
85 if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) {
86 val |= HCR_TID5;
87 val &= ~(HCR_DCT | HCR_ATA);
88 }
89
90 if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
91 val |= HCR_TLOR;
92
93 vcpu->arch.hcr_el2 = val;
94 }
95
pvm_init_traps_mdcr(struct kvm_vcpu * vcpu)96 static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
97 {
98 struct kvm *kvm = vcpu->kvm;
99 u64 val = vcpu->arch.mdcr_el2;
100
101 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) {
102 val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
103 val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK);
104 }
105
106 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP))
107 val |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
108
109 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
110 val |= MDCR_EL2_TDOSA;
111
112 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) {
113 val |= MDCR_EL2_TPMS;
114 val &= ~MDCR_EL2_E2PB_MASK;
115 }
116
117 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
118 val |= MDCR_EL2_TTRF;
119
120 if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP))
121 val |= MDCR_EL2_E2TB_MASK;
122
123 /* Trap Debug Communications Channel registers */
124 if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
125 val |= MDCR_EL2_TDCC;
126
127 vcpu->arch.mdcr_el2 = val;
128 }
129
130 /*
131 * Check that cpu features that are neither trapped nor supported are not
132 * enabled for protected VMs.
133 */
pkvm_check_pvm_cpu_features(struct kvm_vcpu * vcpu)134 static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu)
135 {
136 struct kvm *kvm = vcpu->kvm;
137
138 /* No AArch32 support for protected guests. */
139 if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) ||
140 kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32))
141 return -EINVAL;
142
143 /*
144 * Linux guests assume support for floating-point and Advanced SIMD. Do
145 * not change the trapping behavior for these from the KVM default.
146 */
147 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) ||
148 !kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP))
149 return -EINVAL;
150
151 /* No SME support in KVM right now. Check to catch if it changes. */
152 if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
153 return -EINVAL;
154
155 return 0;
156 }
157
158 /*
159 * Initialize trap register values in protected mode.
160 */
pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu * hyp_vcpu)161 static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
162 {
163 struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
164 int ret;
165
166 vcpu->arch.mdcr_el2 = 0;
167
168 pkvm_vcpu_reset_hcr(vcpu);
169
170 if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu))) {
171 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
172
173 /* Trust the host for non-protected vcpu features. */
174 vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
175 return 0;
176 }
177
178 ret = pkvm_check_pvm_cpu_features(vcpu);
179 if (ret)
180 return ret;
181
182 pvm_init_traps_hcr(vcpu);
183 pvm_init_traps_mdcr(vcpu);
184 vcpu_set_hcrx(vcpu);
185
186 return 0;
187 }
188
189 /*
190 * Start the VM table handle at the offset defined instead of at 0.
191 * Mainly for sanity checking and debugging.
192 */
193 #define HANDLE_OFFSET 0x1000
194
195 /*
196 * Marks a reserved but not yet used entry in the VM table.
197 */
198 #define RESERVED_ENTRY ((void *)0xa110ca7ed)
199
vm_handle_to_idx(pkvm_handle_t handle)200 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
201 {
202 return handle - HANDLE_OFFSET;
203 }
204
idx_to_vm_handle(unsigned int idx)205 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
206 {
207 return idx + HANDLE_OFFSET;
208 }
209
210 /*
211 * Spinlock for protecting state related to the VM table. Protects writes
212 * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization.
213 * Also protects reads and writes to 'last_hyp_vcpu_lookup'.
214 */
215 DEFINE_HYP_SPINLOCK(vm_table_lock);
216
217 /*
218 * A table that tracks all VMs in protected mode.
219 * Allocated during hyp initialization and setup.
220 */
221 static struct pkvm_hyp_vm **vm_table;
222
pkvm_hyp_vm_table_init(void * tbl)223 void pkvm_hyp_vm_table_init(void *tbl)
224 {
225 WARN_ON(vm_table);
226 vm_table = tbl;
227 }
228
229 /*
230 * Return the hyp vm structure corresponding to the handle.
231 */
get_vm_by_handle(pkvm_handle_t handle)232 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
233 {
234 unsigned int idx = vm_handle_to_idx(handle);
235
236 if (unlikely(idx >= KVM_MAX_PVMS))
237 return NULL;
238
239 /* A reserved entry doesn't represent an initialized VM. */
240 if (unlikely(vm_table[idx] == RESERVED_ENTRY))
241 return NULL;
242
243 return vm_table[idx];
244 }
245
pkvm_load_hyp_vcpu(pkvm_handle_t handle,unsigned int vcpu_idx)246 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
247 unsigned int vcpu_idx)
248 {
249 struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
250 struct pkvm_hyp_vm *hyp_vm;
251
252 /* Cannot load a new vcpu without putting the old one first. */
253 if (__this_cpu_read(loaded_hyp_vcpu))
254 return NULL;
255
256 hyp_spin_lock(&vm_table_lock);
257 hyp_vm = get_vm_by_handle(handle);
258 if (!hyp_vm || hyp_vm->kvm.created_vcpus <= vcpu_idx)
259 goto unlock;
260
261 hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
262 if (!hyp_vcpu)
263 goto unlock;
264
265 /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
266 if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
267 hyp_vcpu = NULL;
268 goto unlock;
269 }
270
271 hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
272 hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
273 unlock:
274 hyp_spin_unlock(&vm_table_lock);
275
276 if (hyp_vcpu)
277 __this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
278 return hyp_vcpu;
279 }
280
pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)281 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
282 {
283 struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
284
285 hyp_spin_lock(&vm_table_lock);
286 hyp_vcpu->loaded_hyp_vcpu = NULL;
287 __this_cpu_write(loaded_hyp_vcpu, NULL);
288 hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
289 hyp_spin_unlock(&vm_table_lock);
290 }
291
pkvm_get_loaded_hyp_vcpu(void)292 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
293 {
294 return __this_cpu_read(loaded_hyp_vcpu);
295
296 }
297
get_pkvm_hyp_vm(pkvm_handle_t handle)298 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
299 {
300 struct pkvm_hyp_vm *hyp_vm;
301
302 hyp_spin_lock(&vm_table_lock);
303 hyp_vm = get_vm_by_handle(handle);
304 if (hyp_vm)
305 hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
306 hyp_spin_unlock(&vm_table_lock);
307
308 return hyp_vm;
309 }
310
put_pkvm_hyp_vm(struct pkvm_hyp_vm * hyp_vm)311 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm)
312 {
313 hyp_spin_lock(&vm_table_lock);
314 hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
315 hyp_spin_unlock(&vm_table_lock);
316 }
317
get_np_pkvm_hyp_vm(pkvm_handle_t handle)318 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
319 {
320 struct pkvm_hyp_vm *hyp_vm = get_pkvm_hyp_vm(handle);
321
322 if (hyp_vm && pkvm_hyp_vm_is_protected(hyp_vm)) {
323 put_pkvm_hyp_vm(hyp_vm);
324 hyp_vm = NULL;
325 }
326
327 return hyp_vm;
328 }
329
pkvm_init_features_from_host(struct pkvm_hyp_vm * hyp_vm,const struct kvm * host_kvm)330 static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
331 {
332 struct kvm *kvm = &hyp_vm->kvm;
333 unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
334 DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
335
336 /* CTR_EL0 is always under host control, even for protected VMs. */
337 hyp_vm->kvm.arch.ctr_el0 = host_kvm->arch.ctr_el0;
338
339 /* Preserve the vgic model so that GICv3 emulation works */
340 hyp_vm->kvm.arch.vgic.vgic_model = host_kvm->arch.vgic.vgic_model;
341
342 if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
343 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
344
345 /* No restrictions for non-protected VMs. */
346 if (!kvm_vm_is_protected(kvm)) {
347 hyp_vm->kvm.arch.flags = host_arch_flags;
348
349 bitmap_copy(kvm->arch.vcpu_features,
350 host_kvm->arch.vcpu_features,
351 KVM_VCPU_MAX_FEATURES);
352
353 if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &host_arch_flags))
354 hyp_vm->kvm.arch.midr_el1 = host_kvm->arch.midr_el1;
355
356 return;
357 }
358
359 bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
360
361 set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
362
363 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PMU_V3))
364 set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
365
366 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_ADDRESS))
367 set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
368
369 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC))
370 set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
371
372 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) {
373 set_bit(KVM_ARM_VCPU_SVE, allowed_features);
374 kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE);
375 }
376
377 bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
378 allowed_features, KVM_VCPU_MAX_FEATURES);
379 }
380
unpin_host_vcpu(struct kvm_vcpu * host_vcpu)381 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
382 {
383 if (host_vcpu)
384 hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
385 }
386
unpin_host_sve_state(struct pkvm_hyp_vcpu * hyp_vcpu)387 static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
388 {
389 void *sve_state;
390
391 if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
392 return;
393
394 sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
395 hyp_unpin_shared_mem(sve_state,
396 sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
397 }
398
unpin_host_vcpus(struct pkvm_hyp_vcpu * hyp_vcpus[],unsigned int nr_vcpus)399 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
400 unsigned int nr_vcpus)
401 {
402 int i;
403
404 for (i = 0; i < nr_vcpus; i++) {
405 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vcpus[i];
406
407 if (!hyp_vcpu)
408 continue;
409
410 unpin_host_vcpu(hyp_vcpu->host_vcpu);
411 unpin_host_sve_state(hyp_vcpu);
412 }
413 }
414
init_pkvm_hyp_vm(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm,unsigned int nr_vcpus,pkvm_handle_t handle)415 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
416 unsigned int nr_vcpus, pkvm_handle_t handle)
417 {
418 struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
419 int idx = vm_handle_to_idx(handle);
420
421 hyp_vm->kvm.arch.pkvm.handle = handle;
422
423 hyp_vm->host_kvm = host_kvm;
424 hyp_vm->kvm.created_vcpus = nr_vcpus;
425 hyp_vm->kvm.arch.pkvm.is_protected = READ_ONCE(host_kvm->arch.pkvm.is_protected);
426 hyp_vm->kvm.arch.pkvm.is_created = true;
427 hyp_vm->kvm.arch.flags = 0;
428 pkvm_init_features_from_host(hyp_vm, host_kvm);
429
430 /* VMID 0 is reserved for the host */
431 atomic64_set(&mmu->vmid.id, idx + 1);
432
433 mmu->vtcr = host_mmu.arch.mmu.vtcr;
434 mmu->arch = &hyp_vm->kvm.arch;
435 mmu->pgt = &hyp_vm->pgt;
436 }
437
pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu * hyp_vcpu,struct kvm_vcpu * host_vcpu)438 static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
439 {
440 struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
441 unsigned int sve_max_vl;
442 size_t sve_state_size;
443 void *sve_state;
444 int ret = 0;
445
446 if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
447 vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
448 return 0;
449 }
450
451 /* Limit guest vector length to the maximum supported by the host. */
452 sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl);
453 sve_state_size = sve_state_size_from_vl(sve_max_vl);
454 sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state));
455
456 if (!sve_state || !sve_state_size) {
457 ret = -EINVAL;
458 goto err;
459 }
460
461 ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size);
462 if (ret)
463 goto err;
464
465 vcpu->arch.sve_state = sve_state;
466 vcpu->arch.sve_max_vl = sve_max_vl;
467
468 return 0;
469 err:
470 clear_bit(KVM_ARM_VCPU_SVE, vcpu->kvm->arch.vcpu_features);
471 return ret;
472 }
473
init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,struct pkvm_hyp_vm * hyp_vm,struct kvm_vcpu * host_vcpu)474 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
475 struct pkvm_hyp_vm *hyp_vm,
476 struct kvm_vcpu *host_vcpu)
477 {
478 int ret = 0;
479
480 if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
481 return -EBUSY;
482
483 hyp_vcpu->host_vcpu = host_vcpu;
484
485 hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
486 hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
487 hyp_vcpu->vcpu.vcpu_idx = READ_ONCE(host_vcpu->vcpu_idx);
488
489 hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
490 hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
491 hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
492
493 if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
494 kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
495
496 ret = pkvm_vcpu_init_traps(hyp_vcpu);
497 if (ret)
498 goto done;
499
500 ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
501 done:
502 if (ret)
503 unpin_host_vcpu(host_vcpu);
504 return ret;
505 }
506
find_free_vm_table_entry(void)507 static int find_free_vm_table_entry(void)
508 {
509 int i;
510
511 for (i = 0; i < KVM_MAX_PVMS; ++i) {
512 if (!vm_table[i])
513 return i;
514 }
515
516 return -ENOMEM;
517 }
518
519 /*
520 * Reserve a VM table entry.
521 *
522 * Return a unique handle to the VM on success,
523 * negative error code on failure.
524 */
allocate_vm_table_entry(void)525 static int allocate_vm_table_entry(void)
526 {
527 int idx;
528
529 hyp_assert_lock_held(&vm_table_lock);
530
531 /*
532 * Initializing protected state might have failed, yet a malicious
533 * host could trigger this function. Thus, ensure that 'vm_table'
534 * exists.
535 */
536 if (unlikely(!vm_table))
537 return -EINVAL;
538
539 idx = find_free_vm_table_entry();
540 if (unlikely(idx < 0))
541 return idx;
542
543 vm_table[idx] = RESERVED_ENTRY;
544
545 return idx;
546 }
547
__insert_vm_table_entry(pkvm_handle_t handle,struct pkvm_hyp_vm * hyp_vm)548 static int __insert_vm_table_entry(pkvm_handle_t handle,
549 struct pkvm_hyp_vm *hyp_vm)
550 {
551 unsigned int idx;
552
553 hyp_assert_lock_held(&vm_table_lock);
554
555 /*
556 * Initializing protected state might have failed, yet a malicious
557 * host could trigger this function. Thus, ensure that 'vm_table'
558 * exists.
559 */
560 if (unlikely(!vm_table))
561 return -EINVAL;
562
563 idx = vm_handle_to_idx(handle);
564 if (unlikely(idx >= KVM_MAX_PVMS))
565 return -EINVAL;
566
567 if (unlikely(vm_table[idx] != RESERVED_ENTRY))
568 return -EINVAL;
569
570 vm_table[idx] = hyp_vm;
571
572 return 0;
573 }
574
575 /*
576 * Insert a pointer to the initialized VM into the VM table.
577 *
578 * Return 0 on success, or negative error code on failure.
579 */
insert_vm_table_entry(pkvm_handle_t handle,struct pkvm_hyp_vm * hyp_vm)580 static int insert_vm_table_entry(pkvm_handle_t handle,
581 struct pkvm_hyp_vm *hyp_vm)
582 {
583 int ret;
584
585 hyp_spin_lock(&vm_table_lock);
586 ret = __insert_vm_table_entry(handle, hyp_vm);
587 hyp_spin_unlock(&vm_table_lock);
588
589 return ret;
590 }
591
592 /*
593 * Deallocate and remove the VM table entry corresponding to the handle.
594 */
remove_vm_table_entry(pkvm_handle_t handle)595 static void remove_vm_table_entry(pkvm_handle_t handle)
596 {
597 hyp_assert_lock_held(&vm_table_lock);
598 vm_table[vm_handle_to_idx(handle)] = NULL;
599 }
600
pkvm_get_hyp_vm_size(unsigned int nr_vcpus)601 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
602 {
603 return size_add(sizeof(struct pkvm_hyp_vm),
604 size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
605 }
606
map_donated_memory_noclear(unsigned long host_va,size_t size)607 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
608 {
609 void *va = (void *)kern_hyp_va(host_va);
610
611 if (!PAGE_ALIGNED(va))
612 return NULL;
613
614 if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
615 PAGE_ALIGN(size) >> PAGE_SHIFT))
616 return NULL;
617
618 return va;
619 }
620
map_donated_memory(unsigned long host_va,size_t size)621 static void *map_donated_memory(unsigned long host_va, size_t size)
622 {
623 void *va = map_donated_memory_noclear(host_va, size);
624
625 if (va)
626 memset(va, 0, size);
627
628 return va;
629 }
630
__unmap_donated_memory(void * va,size_t size)631 static void __unmap_donated_memory(void *va, size_t size)
632 {
633 kvm_flush_dcache_to_poc(va, size);
634 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
635 PAGE_ALIGN(size) >> PAGE_SHIFT));
636 }
637
unmap_donated_memory(void * va,size_t size)638 static void unmap_donated_memory(void *va, size_t size)
639 {
640 if (!va)
641 return;
642
643 memset(va, 0, size);
644 __unmap_donated_memory(va, size);
645 }
646
unmap_donated_memory_noclear(void * va,size_t size)647 static void unmap_donated_memory_noclear(void *va, size_t size)
648 {
649 if (!va)
650 return;
651
652 __unmap_donated_memory(va, size);
653 }
654
655 /*
656 * Reserves an entry in the hypervisor for a new VM in protected mode.
657 *
658 * Return a unique handle to the VM on success, negative error code on failure.
659 */
__pkvm_reserve_vm(void)660 int __pkvm_reserve_vm(void)
661 {
662 int ret;
663
664 hyp_spin_lock(&vm_table_lock);
665 ret = allocate_vm_table_entry();
666 hyp_spin_unlock(&vm_table_lock);
667
668 if (ret < 0)
669 return ret;
670
671 return idx_to_vm_handle(ret);
672 }
673
674 /*
675 * Removes a reserved entry, but only if is hasn't been used yet.
676 * Otherwise, the VM needs to be destroyed.
677 */
__pkvm_unreserve_vm(pkvm_handle_t handle)678 void __pkvm_unreserve_vm(pkvm_handle_t handle)
679 {
680 unsigned int idx = vm_handle_to_idx(handle);
681
682 if (unlikely(!vm_table))
683 return;
684
685 hyp_spin_lock(&vm_table_lock);
686 if (likely(idx < KVM_MAX_PVMS && vm_table[idx] == RESERVED_ENTRY))
687 remove_vm_table_entry(handle);
688 hyp_spin_unlock(&vm_table_lock);
689 }
690
691 /*
692 * Initialize the hypervisor copy of the VM state using host-donated memory.
693 *
694 * Unmap the donated memory from the host at stage 2.
695 *
696 * host_kvm: A pointer to the host's struct kvm.
697 * vm_hva: The host va of the area being donated for the VM state.
698 * Must be page aligned.
699 * pgd_hva: The host va of the area being donated for the stage-2 PGD for
700 * the VM. Must be page aligned. Its size is implied by the VM's
701 * VTCR.
702 *
703 * Return 0 success, negative error code on failure.
704 */
__pkvm_init_vm(struct kvm * host_kvm,unsigned long vm_hva,unsigned long pgd_hva)705 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
706 unsigned long pgd_hva)
707 {
708 struct pkvm_hyp_vm *hyp_vm = NULL;
709 size_t vm_size, pgd_size;
710 unsigned int nr_vcpus;
711 pkvm_handle_t handle;
712 void *pgd = NULL;
713 int ret;
714
715 ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
716 if (ret)
717 return ret;
718
719 nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
720 if (nr_vcpus < 1) {
721 ret = -EINVAL;
722 goto err_unpin_kvm;
723 }
724
725 handle = READ_ONCE(host_kvm->arch.pkvm.handle);
726 if (unlikely(handle < HANDLE_OFFSET)) {
727 ret = -EINVAL;
728 goto err_unpin_kvm;
729 }
730
731 vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
732 pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
733
734 ret = -ENOMEM;
735
736 hyp_vm = map_donated_memory(vm_hva, vm_size);
737 if (!hyp_vm)
738 goto err_remove_mappings;
739
740 pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
741 if (!pgd)
742 goto err_remove_mappings;
743
744 init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus, handle);
745
746 ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
747 if (ret)
748 goto err_remove_mappings;
749
750 /* Must be called last since this publishes the VM. */
751 ret = insert_vm_table_entry(handle, hyp_vm);
752 if (ret)
753 goto err_remove_mappings;
754
755 return 0;
756
757 err_remove_mappings:
758 unmap_donated_memory(hyp_vm, vm_size);
759 unmap_donated_memory(pgd, pgd_size);
760 err_unpin_kvm:
761 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
762 return ret;
763 }
764
765 /*
766 * Initialize the hypervisor copy of the vCPU state using host-donated memory.
767 *
768 * handle: The hypervisor handle for the vm.
769 * host_vcpu: A pointer to the corresponding host vcpu.
770 * vcpu_hva: The host va of the area being donated for the vcpu state.
771 * Must be page aligned. The size of the area must be equal to
772 * the page-aligned size of 'struct pkvm_hyp_vcpu'.
773 * Return 0 on success, negative error code on failure.
774 */
__pkvm_init_vcpu(pkvm_handle_t handle,struct kvm_vcpu * host_vcpu,unsigned long vcpu_hva)775 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
776 unsigned long vcpu_hva)
777 {
778 struct pkvm_hyp_vcpu *hyp_vcpu;
779 struct pkvm_hyp_vm *hyp_vm;
780 unsigned int idx;
781 int ret;
782
783 hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
784 if (!hyp_vcpu)
785 return -ENOMEM;
786
787 hyp_spin_lock(&vm_table_lock);
788
789 hyp_vm = get_vm_by_handle(handle);
790 if (!hyp_vm) {
791 ret = -ENOENT;
792 goto unlock;
793 }
794
795 ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
796 if (ret)
797 goto unlock;
798
799 idx = hyp_vcpu->vcpu.vcpu_idx;
800 if (idx >= hyp_vm->kvm.created_vcpus) {
801 ret = -EINVAL;
802 goto unlock;
803 }
804
805 if (hyp_vm->vcpus[idx]) {
806 ret = -EINVAL;
807 goto unlock;
808 }
809
810 hyp_vm->vcpus[idx] = hyp_vcpu;
811 unlock:
812 hyp_spin_unlock(&vm_table_lock);
813
814 if (ret)
815 unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
816 return ret;
817 }
818
819 static void
teardown_donated_memory(struct kvm_hyp_memcache * mc,void * addr,size_t size)820 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
821 {
822 size = PAGE_ALIGN(size);
823 memset(addr, 0, size);
824
825 for (void *start = addr; start < addr + size; start += PAGE_SIZE)
826 push_hyp_memcache(mc, start, hyp_virt_to_phys);
827
828 unmap_donated_memory_noclear(addr, size);
829 }
830
__pkvm_teardown_vm(pkvm_handle_t handle)831 int __pkvm_teardown_vm(pkvm_handle_t handle)
832 {
833 struct kvm_hyp_memcache *mc, *stage2_mc;
834 struct pkvm_hyp_vm *hyp_vm;
835 struct kvm *host_kvm;
836 unsigned int idx;
837 size_t vm_size;
838 int err;
839
840 hyp_spin_lock(&vm_table_lock);
841 hyp_vm = get_vm_by_handle(handle);
842 if (!hyp_vm) {
843 err = -ENOENT;
844 goto err_unlock;
845 }
846
847 if (WARN_ON(hyp_page_count(hyp_vm))) {
848 err = -EBUSY;
849 goto err_unlock;
850 }
851
852 host_kvm = hyp_vm->host_kvm;
853
854 /* Ensure the VMID is clean before it can be reallocated */
855 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
856 remove_vm_table_entry(handle);
857 hyp_spin_unlock(&vm_table_lock);
858
859 /* Reclaim guest pages (including page-table pages) */
860 mc = &host_kvm->arch.pkvm.teardown_mc;
861 stage2_mc = &host_kvm->arch.pkvm.stage2_teardown_mc;
862 reclaim_pgtable_pages(hyp_vm, stage2_mc);
863 unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->kvm.created_vcpus);
864
865 /* Push the metadata pages to the teardown memcache */
866 for (idx = 0; idx < hyp_vm->kvm.created_vcpus; ++idx) {
867 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
868 struct kvm_hyp_memcache *vcpu_mc;
869
870 if (!hyp_vcpu)
871 continue;
872
873 vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
874
875 while (vcpu_mc->nr_pages) {
876 void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
877
878 push_hyp_memcache(stage2_mc, addr, hyp_virt_to_phys);
879 unmap_donated_memory_noclear(addr, PAGE_SIZE);
880 }
881
882 teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
883 }
884
885 vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
886 teardown_donated_memory(mc, hyp_vm, vm_size);
887 hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
888 return 0;
889
890 err_unlock:
891 hyp_spin_unlock(&vm_table_lock);
892 return err;
893 }
894