xref: /linux/arch/arm64/kvm/hyp/nvhe/pkvm.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 
10 #include <asm/kvm_emulate.h>
11 
12 #include <nvhe/mem_protect.h>
13 #include <nvhe/memory.h>
14 #include <nvhe/pkvm.h>
15 #include <nvhe/trap_handler.h>
16 
17 /* Used by icache_is_aliasing(). */
18 unsigned long __icache_flags;
19 
20 /* Used by kvm_get_vttbr(). */
21 unsigned int kvm_arm_vmid_bits;
22 
23 unsigned int kvm_host_sve_max_vl;
24 
25 /*
26  * The currently loaded hyp vCPU for each physical CPU. Used only when
27  * protected KVM is enabled, but for both protected and non-protected VMs.
28  */
29 static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
30 
pkvm_vcpu_reset_hcr(struct kvm_vcpu * vcpu)31 static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
32 {
33 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
34 
35 	if (has_hvhe())
36 		vcpu->arch.hcr_el2 |= HCR_E2H;
37 
38 	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
39 		/* route synchronous external abort exceptions to EL2 */
40 		vcpu->arch.hcr_el2 |= HCR_TEA;
41 		/* trap error record accesses */
42 		vcpu->arch.hcr_el2 |= HCR_TERR;
43 	}
44 
45 	if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
46 		vcpu->arch.hcr_el2 |= HCR_FWB;
47 
48 	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
49 	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
50 		vcpu->arch.hcr_el2 |= HCR_TID4;
51 	else
52 		vcpu->arch.hcr_el2 |= HCR_TID2;
53 
54 	if (vcpu_has_ptrauth(vcpu))
55 		vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
56 
57 	if (kvm_has_mte(vcpu->kvm))
58 		vcpu->arch.hcr_el2 |= HCR_ATA;
59 }
60 
pvm_init_traps_hcr(struct kvm_vcpu * vcpu)61 static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
62 {
63 	struct kvm *kvm = vcpu->kvm;
64 	u64 val = vcpu->arch.hcr_el2;
65 
66 	/* No support for AArch32. */
67 	val |= HCR_RW;
68 
69 	/*
70 	 * Always trap:
71 	 * - Feature id registers: to control features exposed to guests
72 	 * - Implementation-defined features
73 	 */
74 	val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1;
75 
76 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
77 		val |= HCR_TERR | HCR_TEA;
78 		val &= ~(HCR_FIEN);
79 	}
80 
81 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
82 		val &= ~(HCR_AMVOFFEN);
83 
84 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) {
85 		val |= HCR_TID5;
86 		val &= ~(HCR_DCT | HCR_ATA);
87 	}
88 
89 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
90 		val |= HCR_TLOR;
91 
92 	vcpu->arch.hcr_el2 = val;
93 }
94 
pvm_init_traps_mdcr(struct kvm_vcpu * vcpu)95 static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
96 {
97 	struct kvm *kvm = vcpu->kvm;
98 	u64 val = vcpu->arch.mdcr_el2;
99 
100 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) {
101 		val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
102 		val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK);
103 	}
104 
105 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP))
106 		val |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
107 
108 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
109 		val |= MDCR_EL2_TDOSA;
110 
111 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) {
112 		val |= MDCR_EL2_TPMS;
113 		val &= ~MDCR_EL2_E2PB_MASK;
114 	}
115 
116 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
117 		val |= MDCR_EL2_TTRF;
118 
119 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP))
120 		val |= MDCR_EL2_E2TB_MASK;
121 
122 	/* Trap Debug Communications Channel registers */
123 	if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
124 		val |= MDCR_EL2_TDCC;
125 
126 	vcpu->arch.mdcr_el2 = val;
127 }
128 
129 /*
130  * Check that cpu features that are neither trapped nor supported are not
131  * enabled for protected VMs.
132  */
pkvm_check_pvm_cpu_features(struct kvm_vcpu * vcpu)133 static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu)
134 {
135 	struct kvm *kvm = vcpu->kvm;
136 
137 	/* Protected KVM does not support AArch32 guests. */
138 	if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) ||
139 	    kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32))
140 		return -EINVAL;
141 
142 	/*
143 	 * Linux guests assume support for floating-point and Advanced SIMD. Do
144 	 * not change the trapping behavior for these from the KVM default.
145 	 */
146 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) ||
147 	    !kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP))
148 		return -EINVAL;
149 
150 	/* No SME support in KVM right now. Check to catch if it changes. */
151 	if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
152 		return -EINVAL;
153 
154 	return 0;
155 }
156 
157 /*
158  * Initialize trap register values in protected mode.
159  */
pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu * hyp_vcpu)160 static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
161 {
162 	struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
163 	int ret;
164 
165 	vcpu->arch.mdcr_el2 = 0;
166 
167 	pkvm_vcpu_reset_hcr(vcpu);
168 
169 	if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu)))
170 		return 0;
171 
172 	ret = pkvm_check_pvm_cpu_features(vcpu);
173 	if (ret)
174 		return ret;
175 
176 	pvm_init_traps_hcr(vcpu);
177 	pvm_init_traps_mdcr(vcpu);
178 
179 	return 0;
180 }
181 
182 /*
183  * Start the VM table handle at the offset defined instead of at 0.
184  * Mainly for sanity checking and debugging.
185  */
186 #define HANDLE_OFFSET 0x1000
187 
vm_handle_to_idx(pkvm_handle_t handle)188 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
189 {
190 	return handle - HANDLE_OFFSET;
191 }
192 
idx_to_vm_handle(unsigned int idx)193 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
194 {
195 	return idx + HANDLE_OFFSET;
196 }
197 
198 /*
199  * Spinlock for protecting state related to the VM table. Protects writes
200  * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization.
201  * Also protects reads and writes to 'last_hyp_vcpu_lookup'.
202  */
203 DEFINE_HYP_SPINLOCK(vm_table_lock);
204 
205 /*
206  * The table of VM entries for protected VMs in hyp.
207  * Allocated at hyp initialization and setup.
208  */
209 static struct pkvm_hyp_vm **vm_table;
210 
pkvm_hyp_vm_table_init(void * tbl)211 void pkvm_hyp_vm_table_init(void *tbl)
212 {
213 	WARN_ON(vm_table);
214 	vm_table = tbl;
215 }
216 
217 /*
218  * Return the hyp vm structure corresponding to the handle.
219  */
get_vm_by_handle(pkvm_handle_t handle)220 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
221 {
222 	unsigned int idx = vm_handle_to_idx(handle);
223 
224 	if (unlikely(idx >= KVM_MAX_PVMS))
225 		return NULL;
226 
227 	return vm_table[idx];
228 }
229 
pkvm_load_hyp_vcpu(pkvm_handle_t handle,unsigned int vcpu_idx)230 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
231 					 unsigned int vcpu_idx)
232 {
233 	struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
234 	struct pkvm_hyp_vm *hyp_vm;
235 
236 	/* Cannot load a new vcpu without putting the old one first. */
237 	if (__this_cpu_read(loaded_hyp_vcpu))
238 		return NULL;
239 
240 	hyp_spin_lock(&vm_table_lock);
241 	hyp_vm = get_vm_by_handle(handle);
242 	if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
243 		goto unlock;
244 
245 	hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
246 
247 	/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
248 	if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
249 		hyp_vcpu = NULL;
250 		goto unlock;
251 	}
252 
253 	hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
254 	hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
255 unlock:
256 	hyp_spin_unlock(&vm_table_lock);
257 
258 	if (hyp_vcpu)
259 		__this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
260 	return hyp_vcpu;
261 }
262 
pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)263 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
264 {
265 	struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
266 
267 	hyp_spin_lock(&vm_table_lock);
268 	hyp_vcpu->loaded_hyp_vcpu = NULL;
269 	__this_cpu_write(loaded_hyp_vcpu, NULL);
270 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
271 	hyp_spin_unlock(&vm_table_lock);
272 }
273 
pkvm_get_loaded_hyp_vcpu(void)274 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
275 {
276 	return __this_cpu_read(loaded_hyp_vcpu);
277 
278 }
279 
get_pkvm_hyp_vm(pkvm_handle_t handle)280 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
281 {
282 	struct pkvm_hyp_vm *hyp_vm;
283 
284 	hyp_spin_lock(&vm_table_lock);
285 	hyp_vm = get_vm_by_handle(handle);
286 	if (hyp_vm)
287 		hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
288 	hyp_spin_unlock(&vm_table_lock);
289 
290 	return hyp_vm;
291 }
292 
put_pkvm_hyp_vm(struct pkvm_hyp_vm * hyp_vm)293 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm)
294 {
295 	hyp_spin_lock(&vm_table_lock);
296 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
297 	hyp_spin_unlock(&vm_table_lock);
298 }
299 
get_np_pkvm_hyp_vm(pkvm_handle_t handle)300 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
301 {
302 	struct pkvm_hyp_vm *hyp_vm = get_pkvm_hyp_vm(handle);
303 
304 	if (hyp_vm && pkvm_hyp_vm_is_protected(hyp_vm)) {
305 		put_pkvm_hyp_vm(hyp_vm);
306 		hyp_vm = NULL;
307 	}
308 
309 	return hyp_vm;
310 }
311 
pkvm_init_features_from_host(struct pkvm_hyp_vm * hyp_vm,const struct kvm * host_kvm)312 static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
313 {
314 	struct kvm *kvm = &hyp_vm->kvm;
315 	unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
316 	DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
317 
318 	if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
319 		set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
320 
321 	/* No restrictions for non-protected VMs. */
322 	if (!kvm_vm_is_protected(kvm)) {
323 		hyp_vm->kvm.arch.flags = host_arch_flags;
324 
325 		bitmap_copy(kvm->arch.vcpu_features,
326 			    host_kvm->arch.vcpu_features,
327 			    KVM_VCPU_MAX_FEATURES);
328 		return;
329 	}
330 
331 	bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
332 
333 	set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
334 
335 	if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PMU_V3))
336 		set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
337 
338 	if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_ADDRESS))
339 		set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
340 
341 	if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC))
342 		set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
343 
344 	if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) {
345 		set_bit(KVM_ARM_VCPU_SVE, allowed_features);
346 		kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE);
347 	}
348 
349 	bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
350 		   allowed_features, KVM_VCPU_MAX_FEATURES);
351 }
352 
unpin_host_vcpu(struct kvm_vcpu * host_vcpu)353 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
354 {
355 	if (host_vcpu)
356 		hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
357 }
358 
unpin_host_vcpus(struct pkvm_hyp_vcpu * hyp_vcpus[],unsigned int nr_vcpus)359 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
360 			     unsigned int nr_vcpus)
361 {
362 	int i;
363 
364 	for (i = 0; i < nr_vcpus; i++)
365 		unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
366 }
367 
init_pkvm_hyp_vm(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm,unsigned int nr_vcpus)368 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
369 			     unsigned int nr_vcpus)
370 {
371 	hyp_vm->host_kvm = host_kvm;
372 	hyp_vm->kvm.created_vcpus = nr_vcpus;
373 	hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
374 	hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
375 	hyp_vm->kvm.arch.flags = 0;
376 	pkvm_init_features_from_host(hyp_vm, host_kvm);
377 }
378 
pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu * hyp_vcpu,struct kvm_vcpu * host_vcpu)379 static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
380 {
381 	struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
382 
383 	if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
384 		vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
385 }
386 
init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,struct pkvm_hyp_vm * hyp_vm,struct kvm_vcpu * host_vcpu,unsigned int vcpu_idx)387 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
388 			      struct pkvm_hyp_vm *hyp_vm,
389 			      struct kvm_vcpu *host_vcpu,
390 			      unsigned int vcpu_idx)
391 {
392 	int ret = 0;
393 
394 	if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
395 		return -EBUSY;
396 
397 	if (host_vcpu->vcpu_idx != vcpu_idx) {
398 		ret = -EINVAL;
399 		goto done;
400 	}
401 
402 	hyp_vcpu->host_vcpu = host_vcpu;
403 
404 	hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
405 	hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
406 	hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
407 
408 	hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
409 	hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
410 	hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
411 
412 	if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
413 		kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
414 
415 	ret = pkvm_vcpu_init_traps(hyp_vcpu);
416 	if (ret)
417 		goto done;
418 
419 	pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
420 done:
421 	if (ret)
422 		unpin_host_vcpu(host_vcpu);
423 	return ret;
424 }
425 
find_free_vm_table_entry(struct kvm * host_kvm)426 static int find_free_vm_table_entry(struct kvm *host_kvm)
427 {
428 	int i;
429 
430 	for (i = 0; i < KVM_MAX_PVMS; ++i) {
431 		if (!vm_table[i])
432 			return i;
433 	}
434 
435 	return -ENOMEM;
436 }
437 
438 /*
439  * Allocate a VM table entry and insert a pointer to the new vm.
440  *
441  * Return a unique handle to the protected VM on success,
442  * negative error code on failure.
443  */
insert_vm_table_entry(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm)444 static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
445 					   struct pkvm_hyp_vm *hyp_vm)
446 {
447 	struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
448 	int idx;
449 
450 	hyp_assert_lock_held(&vm_table_lock);
451 
452 	/*
453 	 * Initializing protected state might have failed, yet a malicious
454 	 * host could trigger this function. Thus, ensure that 'vm_table'
455 	 * exists.
456 	 */
457 	if (unlikely(!vm_table))
458 		return -EINVAL;
459 
460 	idx = find_free_vm_table_entry(host_kvm);
461 	if (idx < 0)
462 		return idx;
463 
464 	hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
465 
466 	/* VMID 0 is reserved for the host */
467 	atomic64_set(&mmu->vmid.id, idx + 1);
468 
469 	mmu->arch = &hyp_vm->kvm.arch;
470 	mmu->pgt = &hyp_vm->pgt;
471 
472 	vm_table[idx] = hyp_vm;
473 	return hyp_vm->kvm.arch.pkvm.handle;
474 }
475 
476 /*
477  * Deallocate and remove the VM table entry corresponding to the handle.
478  */
remove_vm_table_entry(pkvm_handle_t handle)479 static void remove_vm_table_entry(pkvm_handle_t handle)
480 {
481 	hyp_assert_lock_held(&vm_table_lock);
482 	vm_table[vm_handle_to_idx(handle)] = NULL;
483 }
484 
pkvm_get_hyp_vm_size(unsigned int nr_vcpus)485 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
486 {
487 	return size_add(sizeof(struct pkvm_hyp_vm),
488 		size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
489 }
490 
map_donated_memory_noclear(unsigned long host_va,size_t size)491 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
492 {
493 	void *va = (void *)kern_hyp_va(host_va);
494 
495 	if (!PAGE_ALIGNED(va))
496 		return NULL;
497 
498 	if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
499 				   PAGE_ALIGN(size) >> PAGE_SHIFT))
500 		return NULL;
501 
502 	return va;
503 }
504 
map_donated_memory(unsigned long host_va,size_t size)505 static void *map_donated_memory(unsigned long host_va, size_t size)
506 {
507 	void *va = map_donated_memory_noclear(host_va, size);
508 
509 	if (va)
510 		memset(va, 0, size);
511 
512 	return va;
513 }
514 
__unmap_donated_memory(void * va,size_t size)515 static void __unmap_donated_memory(void *va, size_t size)
516 {
517 	kvm_flush_dcache_to_poc(va, size);
518 	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
519 				       PAGE_ALIGN(size) >> PAGE_SHIFT));
520 }
521 
unmap_donated_memory(void * va,size_t size)522 static void unmap_donated_memory(void *va, size_t size)
523 {
524 	if (!va)
525 		return;
526 
527 	memset(va, 0, size);
528 	__unmap_donated_memory(va, size);
529 }
530 
unmap_donated_memory_noclear(void * va,size_t size)531 static void unmap_donated_memory_noclear(void *va, size_t size)
532 {
533 	if (!va)
534 		return;
535 
536 	__unmap_donated_memory(va, size);
537 }
538 
539 /*
540  * Initialize the hypervisor copy of the protected VM state using the
541  * memory donated by the host.
542  *
543  * Unmaps the donated memory from the host at stage 2.
544  *
545  * host_kvm: A pointer to the host's struct kvm.
546  * vm_hva: The host va of the area being donated for the VM state.
547  *	   Must be page aligned.
548  * pgd_hva: The host va of the area being donated for the stage-2 PGD for
549  *	    the VM. Must be page aligned. Its size is implied by the VM's
550  *	    VTCR.
551  *
552  * Return a unique handle to the protected VM on success,
553  * negative error code on failure.
554  */
__pkvm_init_vm(struct kvm * host_kvm,unsigned long vm_hva,unsigned long pgd_hva)555 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
556 		   unsigned long pgd_hva)
557 {
558 	struct pkvm_hyp_vm *hyp_vm = NULL;
559 	size_t vm_size, pgd_size;
560 	unsigned int nr_vcpus;
561 	void *pgd = NULL;
562 	int ret;
563 
564 	ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
565 	if (ret)
566 		return ret;
567 
568 	nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
569 	if (nr_vcpus < 1) {
570 		ret = -EINVAL;
571 		goto err_unpin_kvm;
572 	}
573 
574 	vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
575 	pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
576 
577 	ret = -ENOMEM;
578 
579 	hyp_vm = map_donated_memory(vm_hva, vm_size);
580 	if (!hyp_vm)
581 		goto err_remove_mappings;
582 
583 	pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
584 	if (!pgd)
585 		goto err_remove_mappings;
586 
587 	init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
588 
589 	hyp_spin_lock(&vm_table_lock);
590 	ret = insert_vm_table_entry(host_kvm, hyp_vm);
591 	if (ret < 0)
592 		goto err_unlock;
593 
594 	ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
595 	if (ret)
596 		goto err_remove_vm_table_entry;
597 	hyp_spin_unlock(&vm_table_lock);
598 
599 	return hyp_vm->kvm.arch.pkvm.handle;
600 
601 err_remove_vm_table_entry:
602 	remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
603 err_unlock:
604 	hyp_spin_unlock(&vm_table_lock);
605 err_remove_mappings:
606 	unmap_donated_memory(hyp_vm, vm_size);
607 	unmap_donated_memory(pgd, pgd_size);
608 err_unpin_kvm:
609 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
610 	return ret;
611 }
612 
613 /*
614  * Initialize the hypervisor copy of the protected vCPU state using the
615  * memory donated by the host.
616  *
617  * handle: The handle for the protected vm.
618  * host_vcpu: A pointer to the corresponding host vcpu.
619  * vcpu_hva: The host va of the area being donated for the vcpu state.
620  *	     Must be page aligned. The size of the area must be equal to
621  *	     the page-aligned size of 'struct pkvm_hyp_vcpu'.
622  * Return 0 on success, negative error code on failure.
623  */
__pkvm_init_vcpu(pkvm_handle_t handle,struct kvm_vcpu * host_vcpu,unsigned long vcpu_hva)624 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
625 		     unsigned long vcpu_hva)
626 {
627 	struct pkvm_hyp_vcpu *hyp_vcpu;
628 	struct pkvm_hyp_vm *hyp_vm;
629 	unsigned int idx;
630 	int ret;
631 
632 	hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
633 	if (!hyp_vcpu)
634 		return -ENOMEM;
635 
636 	hyp_spin_lock(&vm_table_lock);
637 
638 	hyp_vm = get_vm_by_handle(handle);
639 	if (!hyp_vm) {
640 		ret = -ENOENT;
641 		goto unlock;
642 	}
643 
644 	idx = hyp_vm->nr_vcpus;
645 	if (idx >= hyp_vm->kvm.created_vcpus) {
646 		ret = -EINVAL;
647 		goto unlock;
648 	}
649 
650 	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
651 	if (ret)
652 		goto unlock;
653 
654 	hyp_vm->vcpus[idx] = hyp_vcpu;
655 	hyp_vm->nr_vcpus++;
656 unlock:
657 	hyp_spin_unlock(&vm_table_lock);
658 
659 	if (ret) {
660 		unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
661 		return ret;
662 	}
663 
664 	return 0;
665 }
666 
667 static void
teardown_donated_memory(struct kvm_hyp_memcache * mc,void * addr,size_t size)668 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
669 {
670 	size = PAGE_ALIGN(size);
671 	memset(addr, 0, size);
672 
673 	for (void *start = addr; start < addr + size; start += PAGE_SIZE)
674 		push_hyp_memcache(mc, start, hyp_virt_to_phys);
675 
676 	unmap_donated_memory_noclear(addr, size);
677 }
678 
__pkvm_teardown_vm(pkvm_handle_t handle)679 int __pkvm_teardown_vm(pkvm_handle_t handle)
680 {
681 	struct kvm_hyp_memcache *mc;
682 	struct pkvm_hyp_vm *hyp_vm;
683 	struct kvm *host_kvm;
684 	unsigned int idx;
685 	size_t vm_size;
686 	int err;
687 
688 	hyp_spin_lock(&vm_table_lock);
689 	hyp_vm = get_vm_by_handle(handle);
690 	if (!hyp_vm) {
691 		err = -ENOENT;
692 		goto err_unlock;
693 	}
694 
695 	if (WARN_ON(hyp_page_count(hyp_vm))) {
696 		err = -EBUSY;
697 		goto err_unlock;
698 	}
699 
700 	host_kvm = hyp_vm->host_kvm;
701 
702 	/* Ensure the VMID is clean before it can be reallocated */
703 	__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
704 	remove_vm_table_entry(handle);
705 	hyp_spin_unlock(&vm_table_lock);
706 
707 	/* Reclaim guest pages (including page-table pages) */
708 	mc = &host_kvm->arch.pkvm.teardown_mc;
709 	reclaim_guest_pages(hyp_vm, mc);
710 	unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
711 
712 	/* Push the metadata pages to the teardown memcache */
713 	for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
714 		struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
715 		struct kvm_hyp_memcache *vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
716 
717 		while (vcpu_mc->nr_pages) {
718 			void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
719 
720 			push_hyp_memcache(mc, addr, hyp_virt_to_phys);
721 			unmap_donated_memory_noclear(addr, PAGE_SIZE);
722 		}
723 
724 		teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
725 	}
726 
727 	vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
728 	teardown_donated_memory(mc, hyp_vm, vm_size);
729 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
730 	return 0;
731 
732 err_unlock:
733 	hyp_spin_unlock(&vm_table_lock);
734 	return err;
735 }
736