xref: /linux/arch/arm64/kvm/hyp/nvhe/pkvm.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 #include <nvhe/fixed_config.h>
10 #include <nvhe/mem_protect.h>
11 #include <nvhe/memory.h>
12 #include <nvhe/pkvm.h>
13 #include <nvhe/trap_handler.h>
14 
15 /* Used by icache_is_vpipt(). */
16 unsigned long __icache_flags;
17 
18 /* Used by kvm_get_vttbr(). */
19 unsigned int kvm_arm_vmid_bits;
20 
21 /*
22  * Set trap register values based on features in ID_AA64PFR0.
23  */
24 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
25 {
26 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
27 	u64 hcr_set = HCR_RW;
28 	u64 hcr_clear = 0;
29 	u64 cptr_set = 0;
30 
31 	/* Protected KVM does not support AArch32 guests. */
32 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
33 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
34 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
35 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
36 
37 	/*
38 	 * Linux guests assume support for floating-point and Advanced SIMD. Do
39 	 * not change the trapping behavior for these from the KVM default.
40 	 */
41 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
42 				PVM_ID_AA64PFR0_ALLOW));
43 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
44 				PVM_ID_AA64PFR0_ALLOW));
45 
46 	/* Trap RAS unless all current versions are supported */
47 	if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
48 	    ID_AA64PFR0_EL1_RAS_V1P1) {
49 		hcr_set |= HCR_TERR | HCR_TEA;
50 		hcr_clear |= HCR_FIEN;
51 	}
52 
53 	/* Trap AMU */
54 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
55 		hcr_clear |= HCR_AMVOFFEN;
56 		cptr_set |= CPTR_EL2_TAM;
57 	}
58 
59 	/* Trap SVE */
60 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids))
61 		cptr_set |= CPTR_EL2_TZ;
62 
63 	vcpu->arch.hcr_el2 |= hcr_set;
64 	vcpu->arch.hcr_el2 &= ~hcr_clear;
65 	vcpu->arch.cptr_el2 |= cptr_set;
66 }
67 
68 /*
69  * Set trap register values based on features in ID_AA64PFR1.
70  */
71 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
72 {
73 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
74 	u64 hcr_set = 0;
75 	u64 hcr_clear = 0;
76 
77 	/* Memory Tagging: Trap and Treat as Untagged if not supported. */
78 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
79 		hcr_set |= HCR_TID5;
80 		hcr_clear |= HCR_DCT | HCR_ATA;
81 	}
82 
83 	vcpu->arch.hcr_el2 |= hcr_set;
84 	vcpu->arch.hcr_el2 &= ~hcr_clear;
85 }
86 
87 /*
88  * Set trap register values based on features in ID_AA64DFR0.
89  */
90 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
91 {
92 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
93 	u64 mdcr_set = 0;
94 	u64 mdcr_clear = 0;
95 	u64 cptr_set = 0;
96 
97 	/* Trap/constrain PMU */
98 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
99 		mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
100 		mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
101 			      MDCR_EL2_HPMN_MASK;
102 	}
103 
104 	/* Trap Debug */
105 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
106 		mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
107 
108 	/* Trap OS Double Lock */
109 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
110 		mdcr_set |= MDCR_EL2_TDOSA;
111 
112 	/* Trap SPE */
113 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
114 		mdcr_set |= MDCR_EL2_TPMS;
115 		mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
116 	}
117 
118 	/* Trap Trace Filter */
119 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
120 		mdcr_set |= MDCR_EL2_TTRF;
121 
122 	/* Trap Trace */
123 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids))
124 		cptr_set |= CPTR_EL2_TTA;
125 
126 	vcpu->arch.mdcr_el2 |= mdcr_set;
127 	vcpu->arch.mdcr_el2 &= ~mdcr_clear;
128 	vcpu->arch.cptr_el2 |= cptr_set;
129 }
130 
131 /*
132  * Set trap register values based on features in ID_AA64MMFR0.
133  */
134 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
135 {
136 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
137 	u64 mdcr_set = 0;
138 
139 	/* Trap Debug Communications Channel registers */
140 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
141 		mdcr_set |= MDCR_EL2_TDCC;
142 
143 	vcpu->arch.mdcr_el2 |= mdcr_set;
144 }
145 
146 /*
147  * Set trap register values based on features in ID_AA64MMFR1.
148  */
149 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
150 {
151 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
152 	u64 hcr_set = 0;
153 
154 	/* Trap LOR */
155 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
156 		hcr_set |= HCR_TLOR;
157 
158 	vcpu->arch.hcr_el2 |= hcr_set;
159 }
160 
161 /*
162  * Set baseline trap register values.
163  */
164 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
165 {
166 	const u64 hcr_trap_feat_regs = HCR_TID3;
167 	const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
168 
169 	/*
170 	 * Always trap:
171 	 * - Feature id registers: to control features exposed to guests
172 	 * - Implementation-defined features
173 	 */
174 	vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
175 
176 	/* Clear res0 and set res1 bits to trap potential new features. */
177 	vcpu->arch.hcr_el2 &= ~(HCR_RES0);
178 	vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
179 	vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
180 	vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
181 }
182 
183 /*
184  * Initialize trap register values for protected VMs.
185  */
186 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
187 {
188 	pvm_init_trap_regs(vcpu);
189 	pvm_init_traps_aa64pfr0(vcpu);
190 	pvm_init_traps_aa64pfr1(vcpu);
191 	pvm_init_traps_aa64dfr0(vcpu);
192 	pvm_init_traps_aa64mmfr0(vcpu);
193 	pvm_init_traps_aa64mmfr1(vcpu);
194 }
195 
196 /*
197  * Start the VM table handle at the offset defined instead of at 0.
198  * Mainly for sanity checking and debugging.
199  */
200 #define HANDLE_OFFSET 0x1000
201 
202 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
203 {
204 	return handle - HANDLE_OFFSET;
205 }
206 
207 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
208 {
209 	return idx + HANDLE_OFFSET;
210 }
211 
212 /*
213  * Spinlock for protecting state related to the VM table. Protects writes
214  * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
215  * 'last_hyp_vcpu_lookup'.
216  */
217 static DEFINE_HYP_SPINLOCK(vm_table_lock);
218 
219 /*
220  * The table of VM entries for protected VMs in hyp.
221  * Allocated at hyp initialization and setup.
222  */
223 static struct pkvm_hyp_vm **vm_table;
224 
225 void pkvm_hyp_vm_table_init(void *tbl)
226 {
227 	WARN_ON(vm_table);
228 	vm_table = tbl;
229 }
230 
231 /*
232  * Return the hyp vm structure corresponding to the handle.
233  */
234 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
235 {
236 	unsigned int idx = vm_handle_to_idx(handle);
237 
238 	if (unlikely(idx >= KVM_MAX_PVMS))
239 		return NULL;
240 
241 	return vm_table[idx];
242 }
243 
244 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
245 					 unsigned int vcpu_idx)
246 {
247 	struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
248 	struct pkvm_hyp_vm *hyp_vm;
249 
250 	hyp_spin_lock(&vm_table_lock);
251 	hyp_vm = get_vm_by_handle(handle);
252 	if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
253 		goto unlock;
254 
255 	hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
256 	hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
257 unlock:
258 	hyp_spin_unlock(&vm_table_lock);
259 	return hyp_vcpu;
260 }
261 
262 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
263 {
264 	struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
265 
266 	hyp_spin_lock(&vm_table_lock);
267 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
268 	hyp_spin_unlock(&vm_table_lock);
269 }
270 
271 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
272 {
273 	if (host_vcpu)
274 		hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
275 }
276 
277 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
278 			     unsigned int nr_vcpus)
279 {
280 	int i;
281 
282 	for (i = 0; i < nr_vcpus; i++)
283 		unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
284 }
285 
286 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
287 			     unsigned int nr_vcpus)
288 {
289 	hyp_vm->host_kvm = host_kvm;
290 	hyp_vm->kvm.created_vcpus = nr_vcpus;
291 	hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
292 }
293 
294 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
295 			      struct pkvm_hyp_vm *hyp_vm,
296 			      struct kvm_vcpu *host_vcpu,
297 			      unsigned int vcpu_idx)
298 {
299 	int ret = 0;
300 
301 	if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
302 		return -EBUSY;
303 
304 	if (host_vcpu->vcpu_idx != vcpu_idx) {
305 		ret = -EINVAL;
306 		goto done;
307 	}
308 
309 	hyp_vcpu->host_vcpu = host_vcpu;
310 
311 	hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
312 	hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
313 	hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
314 
315 	hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
316 	hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
317 done:
318 	if (ret)
319 		unpin_host_vcpu(host_vcpu);
320 	return ret;
321 }
322 
323 static int find_free_vm_table_entry(struct kvm *host_kvm)
324 {
325 	int i;
326 
327 	for (i = 0; i < KVM_MAX_PVMS; ++i) {
328 		if (!vm_table[i])
329 			return i;
330 	}
331 
332 	return -ENOMEM;
333 }
334 
335 /*
336  * Allocate a VM table entry and insert a pointer to the new vm.
337  *
338  * Return a unique handle to the protected VM on success,
339  * negative error code on failure.
340  */
341 static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
342 					   struct pkvm_hyp_vm *hyp_vm)
343 {
344 	struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
345 	int idx;
346 
347 	hyp_assert_lock_held(&vm_table_lock);
348 
349 	/*
350 	 * Initializing protected state might have failed, yet a malicious
351 	 * host could trigger this function. Thus, ensure that 'vm_table'
352 	 * exists.
353 	 */
354 	if (unlikely(!vm_table))
355 		return -EINVAL;
356 
357 	idx = find_free_vm_table_entry(host_kvm);
358 	if (idx < 0)
359 		return idx;
360 
361 	hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
362 
363 	/* VMID 0 is reserved for the host */
364 	atomic64_set(&mmu->vmid.id, idx + 1);
365 
366 	mmu->arch = &hyp_vm->kvm.arch;
367 	mmu->pgt = &hyp_vm->pgt;
368 
369 	vm_table[idx] = hyp_vm;
370 	return hyp_vm->kvm.arch.pkvm.handle;
371 }
372 
373 /*
374  * Deallocate and remove the VM table entry corresponding to the handle.
375  */
376 static void remove_vm_table_entry(pkvm_handle_t handle)
377 {
378 	hyp_assert_lock_held(&vm_table_lock);
379 	vm_table[vm_handle_to_idx(handle)] = NULL;
380 }
381 
382 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
383 {
384 	return size_add(sizeof(struct pkvm_hyp_vm),
385 		size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
386 }
387 
388 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
389 {
390 	void *va = (void *)kern_hyp_va(host_va);
391 
392 	if (!PAGE_ALIGNED(va))
393 		return NULL;
394 
395 	if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
396 				   PAGE_ALIGN(size) >> PAGE_SHIFT))
397 		return NULL;
398 
399 	return va;
400 }
401 
402 static void *map_donated_memory(unsigned long host_va, size_t size)
403 {
404 	void *va = map_donated_memory_noclear(host_va, size);
405 
406 	if (va)
407 		memset(va, 0, size);
408 
409 	return va;
410 }
411 
412 static void __unmap_donated_memory(void *va, size_t size)
413 {
414 	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
415 				       PAGE_ALIGN(size) >> PAGE_SHIFT));
416 }
417 
418 static void unmap_donated_memory(void *va, size_t size)
419 {
420 	if (!va)
421 		return;
422 
423 	memset(va, 0, size);
424 	__unmap_donated_memory(va, size);
425 }
426 
427 static void unmap_donated_memory_noclear(void *va, size_t size)
428 {
429 	if (!va)
430 		return;
431 
432 	__unmap_donated_memory(va, size);
433 }
434 
435 /*
436  * Initialize the hypervisor copy of the protected VM state using the
437  * memory donated by the host.
438  *
439  * Unmaps the donated memory from the host at stage 2.
440  *
441  * host_kvm: A pointer to the host's struct kvm.
442  * vm_hva: The host va of the area being donated for the VM state.
443  *	   Must be page aligned.
444  * pgd_hva: The host va of the area being donated for the stage-2 PGD for
445  *	    the VM. Must be page aligned. Its size is implied by the VM's
446  *	    VTCR.
447  *
448  * Return a unique handle to the protected VM on success,
449  * negative error code on failure.
450  */
451 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
452 		   unsigned long pgd_hva)
453 {
454 	struct pkvm_hyp_vm *hyp_vm = NULL;
455 	size_t vm_size, pgd_size;
456 	unsigned int nr_vcpus;
457 	void *pgd = NULL;
458 	int ret;
459 
460 	ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
461 	if (ret)
462 		return ret;
463 
464 	nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
465 	if (nr_vcpus < 1) {
466 		ret = -EINVAL;
467 		goto err_unpin_kvm;
468 	}
469 
470 	vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
471 	pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
472 
473 	ret = -ENOMEM;
474 
475 	hyp_vm = map_donated_memory(vm_hva, vm_size);
476 	if (!hyp_vm)
477 		goto err_remove_mappings;
478 
479 	pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
480 	if (!pgd)
481 		goto err_remove_mappings;
482 
483 	init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
484 
485 	hyp_spin_lock(&vm_table_lock);
486 	ret = insert_vm_table_entry(host_kvm, hyp_vm);
487 	if (ret < 0)
488 		goto err_unlock;
489 
490 	ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
491 	if (ret)
492 		goto err_remove_vm_table_entry;
493 	hyp_spin_unlock(&vm_table_lock);
494 
495 	return hyp_vm->kvm.arch.pkvm.handle;
496 
497 err_remove_vm_table_entry:
498 	remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
499 err_unlock:
500 	hyp_spin_unlock(&vm_table_lock);
501 err_remove_mappings:
502 	unmap_donated_memory(hyp_vm, vm_size);
503 	unmap_donated_memory(pgd, pgd_size);
504 err_unpin_kvm:
505 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
506 	return ret;
507 }
508 
509 /*
510  * Initialize the hypervisor copy of the protected vCPU state using the
511  * memory donated by the host.
512  *
513  * handle: The handle for the protected vm.
514  * host_vcpu: A pointer to the corresponding host vcpu.
515  * vcpu_hva: The host va of the area being donated for the vcpu state.
516  *	     Must be page aligned. The size of the area must be equal to
517  *	     the page-aligned size of 'struct pkvm_hyp_vcpu'.
518  * Return 0 on success, negative error code on failure.
519  */
520 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
521 		     unsigned long vcpu_hva)
522 {
523 	struct pkvm_hyp_vcpu *hyp_vcpu;
524 	struct pkvm_hyp_vm *hyp_vm;
525 	unsigned int idx;
526 	int ret;
527 
528 	hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
529 	if (!hyp_vcpu)
530 		return -ENOMEM;
531 
532 	hyp_spin_lock(&vm_table_lock);
533 
534 	hyp_vm = get_vm_by_handle(handle);
535 	if (!hyp_vm) {
536 		ret = -ENOENT;
537 		goto unlock;
538 	}
539 
540 	idx = hyp_vm->nr_vcpus;
541 	if (idx >= hyp_vm->kvm.created_vcpus) {
542 		ret = -EINVAL;
543 		goto unlock;
544 	}
545 
546 	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
547 	if (ret)
548 		goto unlock;
549 
550 	hyp_vm->vcpus[idx] = hyp_vcpu;
551 	hyp_vm->nr_vcpus++;
552 unlock:
553 	hyp_spin_unlock(&vm_table_lock);
554 
555 	if (ret)
556 		unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
557 
558 	return ret;
559 }
560 
561 static void
562 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
563 {
564 	size = PAGE_ALIGN(size);
565 	memset(addr, 0, size);
566 
567 	for (void *start = addr; start < addr + size; start += PAGE_SIZE)
568 		push_hyp_memcache(mc, start, hyp_virt_to_phys);
569 
570 	unmap_donated_memory_noclear(addr, size);
571 }
572 
573 int __pkvm_teardown_vm(pkvm_handle_t handle)
574 {
575 	struct kvm_hyp_memcache *mc;
576 	struct pkvm_hyp_vm *hyp_vm;
577 	struct kvm *host_kvm;
578 	unsigned int idx;
579 	size_t vm_size;
580 	int err;
581 
582 	hyp_spin_lock(&vm_table_lock);
583 	hyp_vm = get_vm_by_handle(handle);
584 	if (!hyp_vm) {
585 		err = -ENOENT;
586 		goto err_unlock;
587 	}
588 
589 	if (WARN_ON(hyp_page_count(hyp_vm))) {
590 		err = -EBUSY;
591 		goto err_unlock;
592 	}
593 
594 	host_kvm = hyp_vm->host_kvm;
595 
596 	/* Ensure the VMID is clean before it can be reallocated */
597 	__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
598 	remove_vm_table_entry(handle);
599 	hyp_spin_unlock(&vm_table_lock);
600 
601 	/* Reclaim guest pages (including page-table pages) */
602 	mc = &host_kvm->arch.pkvm.teardown_mc;
603 	reclaim_guest_pages(hyp_vm, mc);
604 	unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
605 
606 	/* Push the metadata pages to the teardown memcache */
607 	for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
608 		struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
609 
610 		teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
611 	}
612 
613 	vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
614 	teardown_donated_memory(mc, hyp_vm, vm_size);
615 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
616 	return 0;
617 
618 err_unlock:
619 	hyp_spin_unlock(&vm_table_lock);
620 	return err;
621 }
622