xref: /linux/arch/arm64/kvm/hyp/nvhe/pkvm.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 #include <nvhe/fixed_config.h>
10 #include <nvhe/mem_protect.h>
11 #include <nvhe/memory.h>
12 #include <nvhe/pkvm.h>
13 #include <nvhe/trap_handler.h>
14 
15 /* Used by icache_is_aliasing(). */
16 unsigned long __icache_flags;
17 
18 /* Used by kvm_get_vttbr(). */
19 unsigned int kvm_arm_vmid_bits;
20 
21 /*
22  * Set trap register values based on features in ID_AA64PFR0.
23  */
24 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
25 {
26 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
27 	u64 hcr_set = HCR_RW;
28 	u64 hcr_clear = 0;
29 	u64 cptr_set = 0;
30 	u64 cptr_clear = 0;
31 
32 	/* Protected KVM does not support AArch32 guests. */
33 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
34 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
35 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
36 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
37 
38 	/*
39 	 * Linux guests assume support for floating-point and Advanced SIMD. Do
40 	 * not change the trapping behavior for these from the KVM default.
41 	 */
42 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
43 				PVM_ID_AA64PFR0_ALLOW));
44 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
45 				PVM_ID_AA64PFR0_ALLOW));
46 
47 	if (has_hvhe())
48 		hcr_set |= HCR_E2H;
49 
50 	/* Trap RAS unless all current versions are supported */
51 	if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
52 	    ID_AA64PFR0_EL1_RAS_V1P1) {
53 		hcr_set |= HCR_TERR | HCR_TEA;
54 		hcr_clear |= HCR_FIEN;
55 	}
56 
57 	/* Trap AMU */
58 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
59 		hcr_clear |= HCR_AMVOFFEN;
60 		cptr_set |= CPTR_EL2_TAM;
61 	}
62 
63 	/* Trap SVE */
64 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
65 		if (has_hvhe())
66 			cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
67 		else
68 			cptr_set |= CPTR_EL2_TZ;
69 	}
70 
71 	vcpu->arch.hcr_el2 |= hcr_set;
72 	vcpu->arch.hcr_el2 &= ~hcr_clear;
73 	vcpu->arch.cptr_el2 |= cptr_set;
74 	vcpu->arch.cptr_el2 &= ~cptr_clear;
75 }
76 
77 /*
78  * Set trap register values based on features in ID_AA64PFR1.
79  */
80 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
81 {
82 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
83 	u64 hcr_set = 0;
84 	u64 hcr_clear = 0;
85 
86 	/* Memory Tagging: Trap and Treat as Untagged if not supported. */
87 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
88 		hcr_set |= HCR_TID5;
89 		hcr_clear |= HCR_DCT | HCR_ATA;
90 	}
91 
92 	vcpu->arch.hcr_el2 |= hcr_set;
93 	vcpu->arch.hcr_el2 &= ~hcr_clear;
94 }
95 
96 /*
97  * Set trap register values based on features in ID_AA64DFR0.
98  */
99 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
100 {
101 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
102 	u64 mdcr_set = 0;
103 	u64 mdcr_clear = 0;
104 	u64 cptr_set = 0;
105 
106 	/* Trap/constrain PMU */
107 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
108 		mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
109 		mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
110 			      MDCR_EL2_HPMN_MASK;
111 	}
112 
113 	/* Trap Debug */
114 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
115 		mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
116 
117 	/* Trap OS Double Lock */
118 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
119 		mdcr_set |= MDCR_EL2_TDOSA;
120 
121 	/* Trap SPE */
122 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
123 		mdcr_set |= MDCR_EL2_TPMS;
124 		mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
125 	}
126 
127 	/* Trap Trace Filter */
128 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
129 		mdcr_set |= MDCR_EL2_TTRF;
130 
131 	/* Trap Trace */
132 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
133 		if (has_hvhe())
134 			cptr_set |= CPACR_EL1_TTA;
135 		else
136 			cptr_set |= CPTR_EL2_TTA;
137 	}
138 
139 	/* Trap External Trace */
140 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
141 		mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
142 
143 	vcpu->arch.mdcr_el2 |= mdcr_set;
144 	vcpu->arch.mdcr_el2 &= ~mdcr_clear;
145 	vcpu->arch.cptr_el2 |= cptr_set;
146 }
147 
148 /*
149  * Set trap register values based on features in ID_AA64MMFR0.
150  */
151 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
152 {
153 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
154 	u64 mdcr_set = 0;
155 
156 	/* Trap Debug Communications Channel registers */
157 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
158 		mdcr_set |= MDCR_EL2_TDCC;
159 
160 	vcpu->arch.mdcr_el2 |= mdcr_set;
161 }
162 
163 /*
164  * Set trap register values based on features in ID_AA64MMFR1.
165  */
166 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
167 {
168 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
169 	u64 hcr_set = 0;
170 
171 	/* Trap LOR */
172 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
173 		hcr_set |= HCR_TLOR;
174 
175 	vcpu->arch.hcr_el2 |= hcr_set;
176 }
177 
178 /*
179  * Set baseline trap register values.
180  */
181 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
182 {
183 	const u64 hcr_trap_feat_regs = HCR_TID3;
184 	const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
185 
186 	/*
187 	 * Always trap:
188 	 * - Feature id registers: to control features exposed to guests
189 	 * - Implementation-defined features
190 	 */
191 	vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
192 
193 	/* Clear res0 and set res1 bits to trap potential new features. */
194 	vcpu->arch.hcr_el2 &= ~(HCR_RES0);
195 	vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
196 	if (!has_hvhe()) {
197 		vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
198 		vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
199 	}
200 }
201 
202 /*
203  * Initialize trap register values in protected mode.
204  */
205 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
206 {
207 	pvm_init_trap_regs(vcpu);
208 	pvm_init_traps_aa64pfr0(vcpu);
209 	pvm_init_traps_aa64pfr1(vcpu);
210 	pvm_init_traps_aa64dfr0(vcpu);
211 	pvm_init_traps_aa64mmfr0(vcpu);
212 	pvm_init_traps_aa64mmfr1(vcpu);
213 }
214 
215 /*
216  * Start the VM table handle at the offset defined instead of at 0.
217  * Mainly for sanity checking and debugging.
218  */
219 #define HANDLE_OFFSET 0x1000
220 
221 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
222 {
223 	return handle - HANDLE_OFFSET;
224 }
225 
226 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
227 {
228 	return idx + HANDLE_OFFSET;
229 }
230 
231 /*
232  * Spinlock for protecting state related to the VM table. Protects writes
233  * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
234  * 'last_hyp_vcpu_lookup'.
235  */
236 static DEFINE_HYP_SPINLOCK(vm_table_lock);
237 
238 /*
239  * The table of VM entries for protected VMs in hyp.
240  * Allocated at hyp initialization and setup.
241  */
242 static struct pkvm_hyp_vm **vm_table;
243 
244 void pkvm_hyp_vm_table_init(void *tbl)
245 {
246 	WARN_ON(vm_table);
247 	vm_table = tbl;
248 }
249 
250 void pkvm_host_fpsimd_state_init(void)
251 {
252 	unsigned long i;
253 
254 	for (i = 0; i < hyp_nr_cpus; i++) {
255 		struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
256 
257 		host_data->fpsimd_state = &host_data->host_ctxt.fp_regs;
258 	}
259 }
260 
261 /*
262  * Return the hyp vm structure corresponding to the handle.
263  */
264 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
265 {
266 	unsigned int idx = vm_handle_to_idx(handle);
267 
268 	if (unlikely(idx >= KVM_MAX_PVMS))
269 		return NULL;
270 
271 	return vm_table[idx];
272 }
273 
274 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
275 					 unsigned int vcpu_idx)
276 {
277 	struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
278 	struct pkvm_hyp_vm *hyp_vm;
279 
280 	hyp_spin_lock(&vm_table_lock);
281 	hyp_vm = get_vm_by_handle(handle);
282 	if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
283 		goto unlock;
284 
285 	hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
286 	hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
287 unlock:
288 	hyp_spin_unlock(&vm_table_lock);
289 	return hyp_vcpu;
290 }
291 
292 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
293 {
294 	struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
295 
296 	hyp_spin_lock(&vm_table_lock);
297 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
298 	hyp_spin_unlock(&vm_table_lock);
299 }
300 
301 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
302 {
303 	if (host_vcpu)
304 		hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
305 }
306 
307 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
308 			     unsigned int nr_vcpus)
309 {
310 	int i;
311 
312 	for (i = 0; i < nr_vcpus; i++)
313 		unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
314 }
315 
316 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
317 			     unsigned int nr_vcpus)
318 {
319 	hyp_vm->host_kvm = host_kvm;
320 	hyp_vm->kvm.created_vcpus = nr_vcpus;
321 	hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
322 }
323 
324 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
325 			      struct pkvm_hyp_vm *hyp_vm,
326 			      struct kvm_vcpu *host_vcpu,
327 			      unsigned int vcpu_idx)
328 {
329 	int ret = 0;
330 
331 	if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
332 		return -EBUSY;
333 
334 	if (host_vcpu->vcpu_idx != vcpu_idx) {
335 		ret = -EINVAL;
336 		goto done;
337 	}
338 
339 	hyp_vcpu->host_vcpu = host_vcpu;
340 
341 	hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
342 	hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
343 	hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
344 
345 	hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
346 	hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
347 done:
348 	if (ret)
349 		unpin_host_vcpu(host_vcpu);
350 	return ret;
351 }
352 
353 static int find_free_vm_table_entry(struct kvm *host_kvm)
354 {
355 	int i;
356 
357 	for (i = 0; i < KVM_MAX_PVMS; ++i) {
358 		if (!vm_table[i])
359 			return i;
360 	}
361 
362 	return -ENOMEM;
363 }
364 
365 /*
366  * Allocate a VM table entry and insert a pointer to the new vm.
367  *
368  * Return a unique handle to the protected VM on success,
369  * negative error code on failure.
370  */
371 static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
372 					   struct pkvm_hyp_vm *hyp_vm)
373 {
374 	struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
375 	int idx;
376 
377 	hyp_assert_lock_held(&vm_table_lock);
378 
379 	/*
380 	 * Initializing protected state might have failed, yet a malicious
381 	 * host could trigger this function. Thus, ensure that 'vm_table'
382 	 * exists.
383 	 */
384 	if (unlikely(!vm_table))
385 		return -EINVAL;
386 
387 	idx = find_free_vm_table_entry(host_kvm);
388 	if (idx < 0)
389 		return idx;
390 
391 	hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
392 
393 	/* VMID 0 is reserved for the host */
394 	atomic64_set(&mmu->vmid.id, idx + 1);
395 
396 	mmu->arch = &hyp_vm->kvm.arch;
397 	mmu->pgt = &hyp_vm->pgt;
398 
399 	vm_table[idx] = hyp_vm;
400 	return hyp_vm->kvm.arch.pkvm.handle;
401 }
402 
403 /*
404  * Deallocate and remove the VM table entry corresponding to the handle.
405  */
406 static void remove_vm_table_entry(pkvm_handle_t handle)
407 {
408 	hyp_assert_lock_held(&vm_table_lock);
409 	vm_table[vm_handle_to_idx(handle)] = NULL;
410 }
411 
412 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
413 {
414 	return size_add(sizeof(struct pkvm_hyp_vm),
415 		size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
416 }
417 
418 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
419 {
420 	void *va = (void *)kern_hyp_va(host_va);
421 
422 	if (!PAGE_ALIGNED(va))
423 		return NULL;
424 
425 	if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
426 				   PAGE_ALIGN(size) >> PAGE_SHIFT))
427 		return NULL;
428 
429 	return va;
430 }
431 
432 static void *map_donated_memory(unsigned long host_va, size_t size)
433 {
434 	void *va = map_donated_memory_noclear(host_va, size);
435 
436 	if (va)
437 		memset(va, 0, size);
438 
439 	return va;
440 }
441 
442 static void __unmap_donated_memory(void *va, size_t size)
443 {
444 	kvm_flush_dcache_to_poc(va, size);
445 	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
446 				       PAGE_ALIGN(size) >> PAGE_SHIFT));
447 }
448 
449 static void unmap_donated_memory(void *va, size_t size)
450 {
451 	if (!va)
452 		return;
453 
454 	memset(va, 0, size);
455 	__unmap_donated_memory(va, size);
456 }
457 
458 static void unmap_donated_memory_noclear(void *va, size_t size)
459 {
460 	if (!va)
461 		return;
462 
463 	__unmap_donated_memory(va, size);
464 }
465 
466 /*
467  * Initialize the hypervisor copy of the protected VM state using the
468  * memory donated by the host.
469  *
470  * Unmaps the donated memory from the host at stage 2.
471  *
472  * host_kvm: A pointer to the host's struct kvm.
473  * vm_hva: The host va of the area being donated for the VM state.
474  *	   Must be page aligned.
475  * pgd_hva: The host va of the area being donated for the stage-2 PGD for
476  *	    the VM. Must be page aligned. Its size is implied by the VM's
477  *	    VTCR.
478  *
479  * Return a unique handle to the protected VM on success,
480  * negative error code on failure.
481  */
482 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
483 		   unsigned long pgd_hva)
484 {
485 	struct pkvm_hyp_vm *hyp_vm = NULL;
486 	size_t vm_size, pgd_size;
487 	unsigned int nr_vcpus;
488 	void *pgd = NULL;
489 	int ret;
490 
491 	ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
492 	if (ret)
493 		return ret;
494 
495 	nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
496 	if (nr_vcpus < 1) {
497 		ret = -EINVAL;
498 		goto err_unpin_kvm;
499 	}
500 
501 	vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
502 	pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
503 
504 	ret = -ENOMEM;
505 
506 	hyp_vm = map_donated_memory(vm_hva, vm_size);
507 	if (!hyp_vm)
508 		goto err_remove_mappings;
509 
510 	pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
511 	if (!pgd)
512 		goto err_remove_mappings;
513 
514 	init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
515 
516 	hyp_spin_lock(&vm_table_lock);
517 	ret = insert_vm_table_entry(host_kvm, hyp_vm);
518 	if (ret < 0)
519 		goto err_unlock;
520 
521 	ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
522 	if (ret)
523 		goto err_remove_vm_table_entry;
524 	hyp_spin_unlock(&vm_table_lock);
525 
526 	return hyp_vm->kvm.arch.pkvm.handle;
527 
528 err_remove_vm_table_entry:
529 	remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
530 err_unlock:
531 	hyp_spin_unlock(&vm_table_lock);
532 err_remove_mappings:
533 	unmap_donated_memory(hyp_vm, vm_size);
534 	unmap_donated_memory(pgd, pgd_size);
535 err_unpin_kvm:
536 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
537 	return ret;
538 }
539 
540 /*
541  * Initialize the hypervisor copy of the protected vCPU state using the
542  * memory donated by the host.
543  *
544  * handle: The handle for the protected vm.
545  * host_vcpu: A pointer to the corresponding host vcpu.
546  * vcpu_hva: The host va of the area being donated for the vcpu state.
547  *	     Must be page aligned. The size of the area must be equal to
548  *	     the page-aligned size of 'struct pkvm_hyp_vcpu'.
549  * Return 0 on success, negative error code on failure.
550  */
551 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
552 		     unsigned long vcpu_hva)
553 {
554 	struct pkvm_hyp_vcpu *hyp_vcpu;
555 	struct pkvm_hyp_vm *hyp_vm;
556 	unsigned int idx;
557 	int ret;
558 
559 	hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
560 	if (!hyp_vcpu)
561 		return -ENOMEM;
562 
563 	hyp_spin_lock(&vm_table_lock);
564 
565 	hyp_vm = get_vm_by_handle(handle);
566 	if (!hyp_vm) {
567 		ret = -ENOENT;
568 		goto unlock;
569 	}
570 
571 	idx = hyp_vm->nr_vcpus;
572 	if (idx >= hyp_vm->kvm.created_vcpus) {
573 		ret = -EINVAL;
574 		goto unlock;
575 	}
576 
577 	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
578 	if (ret)
579 		goto unlock;
580 
581 	hyp_vm->vcpus[idx] = hyp_vcpu;
582 	hyp_vm->nr_vcpus++;
583 unlock:
584 	hyp_spin_unlock(&vm_table_lock);
585 
586 	if (ret)
587 		unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
588 
589 	return ret;
590 }
591 
592 static void
593 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
594 {
595 	size = PAGE_ALIGN(size);
596 	memset(addr, 0, size);
597 
598 	for (void *start = addr; start < addr + size; start += PAGE_SIZE)
599 		push_hyp_memcache(mc, start, hyp_virt_to_phys);
600 
601 	unmap_donated_memory_noclear(addr, size);
602 }
603 
604 int __pkvm_teardown_vm(pkvm_handle_t handle)
605 {
606 	struct kvm_hyp_memcache *mc;
607 	struct pkvm_hyp_vm *hyp_vm;
608 	struct kvm *host_kvm;
609 	unsigned int idx;
610 	size_t vm_size;
611 	int err;
612 
613 	hyp_spin_lock(&vm_table_lock);
614 	hyp_vm = get_vm_by_handle(handle);
615 	if (!hyp_vm) {
616 		err = -ENOENT;
617 		goto err_unlock;
618 	}
619 
620 	if (WARN_ON(hyp_page_count(hyp_vm))) {
621 		err = -EBUSY;
622 		goto err_unlock;
623 	}
624 
625 	host_kvm = hyp_vm->host_kvm;
626 
627 	/* Ensure the VMID is clean before it can be reallocated */
628 	__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
629 	remove_vm_table_entry(handle);
630 	hyp_spin_unlock(&vm_table_lock);
631 
632 	/* Reclaim guest pages (including page-table pages) */
633 	mc = &host_kvm->arch.pkvm.teardown_mc;
634 	reclaim_guest_pages(hyp_vm, mc);
635 	unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
636 
637 	/* Push the metadata pages to the teardown memcache */
638 	for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
639 		struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
640 
641 		teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
642 	}
643 
644 	vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
645 	teardown_donated_memory(mc, hyp_vm, vm_size);
646 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
647 	return 0;
648 
649 err_unlock:
650 	hyp_spin_unlock(&vm_table_lock);
651 	return err;
652 }
653