xref: /linux/arch/arm64/kvm/hyp/nvhe/pkvm.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 #include <nvhe/fixed_config.h>
10 #include <nvhe/mem_protect.h>
11 #include <nvhe/memory.h>
12 #include <nvhe/pkvm.h>
13 #include <nvhe/trap_handler.h>
14 
15 /* Used by icache_is_aliasing(). */
16 unsigned long __icache_flags;
17 
18 /* Used by kvm_get_vttbr(). */
19 unsigned int kvm_arm_vmid_bits;
20 
21 unsigned int kvm_host_sve_max_vl;
22 
23 /*
24  * Set trap register values based on features in ID_AA64PFR0.
25  */
pvm_init_traps_aa64pfr0(struct kvm_vcpu * vcpu)26 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
27 {
28 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
29 	u64 hcr_set = HCR_RW;
30 	u64 hcr_clear = 0;
31 	u64 cptr_set = 0;
32 	u64 cptr_clear = 0;
33 
34 	/* Protected KVM does not support AArch32 guests. */
35 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
36 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL0_IMP);
37 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
38 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL1_IMP);
39 
40 	/*
41 	 * Linux guests assume support for floating-point and Advanced SIMD. Do
42 	 * not change the trapping behavior for these from the KVM default.
43 	 */
44 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
45 				PVM_ID_AA64PFR0_ALLOW));
46 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
47 				PVM_ID_AA64PFR0_ALLOW));
48 
49 	if (has_hvhe())
50 		hcr_set |= HCR_E2H;
51 
52 	/* Trap RAS unless all current versions are supported */
53 	if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
54 	    ID_AA64PFR0_EL1_RAS_V1P1) {
55 		hcr_set |= HCR_TERR | HCR_TEA;
56 		hcr_clear |= HCR_FIEN;
57 	}
58 
59 	/* Trap AMU */
60 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
61 		hcr_clear |= HCR_AMVOFFEN;
62 		cptr_set |= CPTR_EL2_TAM;
63 	}
64 
65 	/* Trap SVE */
66 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
67 		if (has_hvhe())
68 			cptr_clear |= CPACR_ELx_ZEN;
69 		else
70 			cptr_set |= CPTR_EL2_TZ;
71 	}
72 
73 	vcpu->arch.hcr_el2 |= hcr_set;
74 	vcpu->arch.hcr_el2 &= ~hcr_clear;
75 	vcpu->arch.cptr_el2 |= cptr_set;
76 	vcpu->arch.cptr_el2 &= ~cptr_clear;
77 }
78 
79 /*
80  * Set trap register values based on features in ID_AA64PFR1.
81  */
pvm_init_traps_aa64pfr1(struct kvm_vcpu * vcpu)82 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
83 {
84 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
85 	u64 hcr_set = 0;
86 	u64 hcr_clear = 0;
87 
88 	/* Memory Tagging: Trap and Treat as Untagged if not supported. */
89 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
90 		hcr_set |= HCR_TID5;
91 		hcr_clear |= HCR_DCT | HCR_ATA;
92 	}
93 
94 	vcpu->arch.hcr_el2 |= hcr_set;
95 	vcpu->arch.hcr_el2 &= ~hcr_clear;
96 }
97 
98 /*
99  * Set trap register values based on features in ID_AA64DFR0.
100  */
pvm_init_traps_aa64dfr0(struct kvm_vcpu * vcpu)101 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
102 {
103 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
104 	u64 mdcr_set = 0;
105 	u64 mdcr_clear = 0;
106 	u64 cptr_set = 0;
107 
108 	/* Trap/constrain PMU */
109 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
110 		mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
111 		mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
112 			      MDCR_EL2_HPMN_MASK;
113 	}
114 
115 	/* Trap Debug */
116 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
117 		mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
118 
119 	/* Trap OS Double Lock */
120 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
121 		mdcr_set |= MDCR_EL2_TDOSA;
122 
123 	/* Trap SPE */
124 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
125 		mdcr_set |= MDCR_EL2_TPMS;
126 		mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
127 	}
128 
129 	/* Trap Trace Filter */
130 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
131 		mdcr_set |= MDCR_EL2_TTRF;
132 
133 	/* Trap Trace */
134 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
135 		if (has_hvhe())
136 			cptr_set |= CPACR_EL1_TTA;
137 		else
138 			cptr_set |= CPTR_EL2_TTA;
139 	}
140 
141 	/* Trap External Trace */
142 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
143 		mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
144 
145 	vcpu->arch.mdcr_el2 |= mdcr_set;
146 	vcpu->arch.mdcr_el2 &= ~mdcr_clear;
147 	vcpu->arch.cptr_el2 |= cptr_set;
148 }
149 
150 /*
151  * Set trap register values based on features in ID_AA64MMFR0.
152  */
pvm_init_traps_aa64mmfr0(struct kvm_vcpu * vcpu)153 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
154 {
155 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
156 	u64 mdcr_set = 0;
157 
158 	/* Trap Debug Communications Channel registers */
159 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
160 		mdcr_set |= MDCR_EL2_TDCC;
161 
162 	vcpu->arch.mdcr_el2 |= mdcr_set;
163 }
164 
165 /*
166  * Set trap register values based on features in ID_AA64MMFR1.
167  */
pvm_init_traps_aa64mmfr1(struct kvm_vcpu * vcpu)168 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
169 {
170 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
171 	u64 hcr_set = 0;
172 
173 	/* Trap LOR */
174 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
175 		hcr_set |= HCR_TLOR;
176 
177 	vcpu->arch.hcr_el2 |= hcr_set;
178 }
179 
180 /*
181  * Set baseline trap register values.
182  */
pvm_init_trap_regs(struct kvm_vcpu * vcpu)183 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
184 {
185 	const u64 hcr_trap_feat_regs = HCR_TID3;
186 	const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
187 
188 	/*
189 	 * Always trap:
190 	 * - Feature id registers: to control features exposed to guests
191 	 * - Implementation-defined features
192 	 */
193 	vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
194 
195 	/* Clear res0 and set res1 bits to trap potential new features. */
196 	vcpu->arch.hcr_el2 &= ~(HCR_RES0);
197 	vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
198 	if (!has_hvhe()) {
199 		vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
200 		vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
201 	}
202 }
203 
204 /*
205  * Initialize trap register values in protected mode.
206  */
__pkvm_vcpu_init_traps(struct kvm_vcpu * vcpu)207 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
208 {
209 	pvm_init_trap_regs(vcpu);
210 	pvm_init_traps_aa64pfr0(vcpu);
211 	pvm_init_traps_aa64pfr1(vcpu);
212 	pvm_init_traps_aa64dfr0(vcpu);
213 	pvm_init_traps_aa64mmfr0(vcpu);
214 	pvm_init_traps_aa64mmfr1(vcpu);
215 }
216 
217 /*
218  * Start the VM table handle at the offset defined instead of at 0.
219  * Mainly for sanity checking and debugging.
220  */
221 #define HANDLE_OFFSET 0x1000
222 
vm_handle_to_idx(pkvm_handle_t handle)223 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
224 {
225 	return handle - HANDLE_OFFSET;
226 }
227 
idx_to_vm_handle(unsigned int idx)228 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
229 {
230 	return idx + HANDLE_OFFSET;
231 }
232 
233 /*
234  * Spinlock for protecting state related to the VM table. Protects writes
235  * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
236  * 'last_hyp_vcpu_lookup'.
237  */
238 static DEFINE_HYP_SPINLOCK(vm_table_lock);
239 
240 /*
241  * The table of VM entries for protected VMs in hyp.
242  * Allocated at hyp initialization and setup.
243  */
244 static struct pkvm_hyp_vm **vm_table;
245 
pkvm_hyp_vm_table_init(void * tbl)246 void pkvm_hyp_vm_table_init(void *tbl)
247 {
248 	WARN_ON(vm_table);
249 	vm_table = tbl;
250 }
251 
252 /*
253  * Return the hyp vm structure corresponding to the handle.
254  */
get_vm_by_handle(pkvm_handle_t handle)255 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
256 {
257 	unsigned int idx = vm_handle_to_idx(handle);
258 
259 	if (unlikely(idx >= KVM_MAX_PVMS))
260 		return NULL;
261 
262 	return vm_table[idx];
263 }
264 
pkvm_load_hyp_vcpu(pkvm_handle_t handle,unsigned int vcpu_idx)265 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
266 					 unsigned int vcpu_idx)
267 {
268 	struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
269 	struct pkvm_hyp_vm *hyp_vm;
270 
271 	hyp_spin_lock(&vm_table_lock);
272 	hyp_vm = get_vm_by_handle(handle);
273 	if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
274 		goto unlock;
275 
276 	hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
277 	hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
278 unlock:
279 	hyp_spin_unlock(&vm_table_lock);
280 	return hyp_vcpu;
281 }
282 
pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)283 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
284 {
285 	struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
286 
287 	hyp_spin_lock(&vm_table_lock);
288 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
289 	hyp_spin_unlock(&vm_table_lock);
290 }
291 
unpin_host_vcpu(struct kvm_vcpu * host_vcpu)292 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
293 {
294 	if (host_vcpu)
295 		hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
296 }
297 
unpin_host_vcpus(struct pkvm_hyp_vcpu * hyp_vcpus[],unsigned int nr_vcpus)298 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
299 			     unsigned int nr_vcpus)
300 {
301 	int i;
302 
303 	for (i = 0; i < nr_vcpus; i++)
304 		unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
305 }
306 
init_pkvm_hyp_vm(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm,unsigned int nr_vcpus)307 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
308 			     unsigned int nr_vcpus)
309 {
310 	hyp_vm->host_kvm = host_kvm;
311 	hyp_vm->kvm.created_vcpus = nr_vcpus;
312 	hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
313 }
314 
init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,struct pkvm_hyp_vm * hyp_vm,struct kvm_vcpu * host_vcpu,unsigned int vcpu_idx)315 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
316 			      struct pkvm_hyp_vm *hyp_vm,
317 			      struct kvm_vcpu *host_vcpu,
318 			      unsigned int vcpu_idx)
319 {
320 	int ret = 0;
321 
322 	if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
323 		return -EBUSY;
324 
325 	if (host_vcpu->vcpu_idx != vcpu_idx) {
326 		ret = -EINVAL;
327 		goto done;
328 	}
329 
330 	hyp_vcpu->host_vcpu = host_vcpu;
331 
332 	hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
333 	hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
334 	hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
335 
336 	hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
337 	hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
338 done:
339 	if (ret)
340 		unpin_host_vcpu(host_vcpu);
341 	return ret;
342 }
343 
find_free_vm_table_entry(struct kvm * host_kvm)344 static int find_free_vm_table_entry(struct kvm *host_kvm)
345 {
346 	int i;
347 
348 	for (i = 0; i < KVM_MAX_PVMS; ++i) {
349 		if (!vm_table[i])
350 			return i;
351 	}
352 
353 	return -ENOMEM;
354 }
355 
356 /*
357  * Allocate a VM table entry and insert a pointer to the new vm.
358  *
359  * Return a unique handle to the protected VM on success,
360  * negative error code on failure.
361  */
insert_vm_table_entry(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm)362 static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
363 					   struct pkvm_hyp_vm *hyp_vm)
364 {
365 	struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
366 	int idx;
367 
368 	hyp_assert_lock_held(&vm_table_lock);
369 
370 	/*
371 	 * Initializing protected state might have failed, yet a malicious
372 	 * host could trigger this function. Thus, ensure that 'vm_table'
373 	 * exists.
374 	 */
375 	if (unlikely(!vm_table))
376 		return -EINVAL;
377 
378 	idx = find_free_vm_table_entry(host_kvm);
379 	if (idx < 0)
380 		return idx;
381 
382 	hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
383 
384 	/* VMID 0 is reserved for the host */
385 	atomic64_set(&mmu->vmid.id, idx + 1);
386 
387 	mmu->arch = &hyp_vm->kvm.arch;
388 	mmu->pgt = &hyp_vm->pgt;
389 
390 	vm_table[idx] = hyp_vm;
391 	return hyp_vm->kvm.arch.pkvm.handle;
392 }
393 
394 /*
395  * Deallocate and remove the VM table entry corresponding to the handle.
396  */
remove_vm_table_entry(pkvm_handle_t handle)397 static void remove_vm_table_entry(pkvm_handle_t handle)
398 {
399 	hyp_assert_lock_held(&vm_table_lock);
400 	vm_table[vm_handle_to_idx(handle)] = NULL;
401 }
402 
pkvm_get_hyp_vm_size(unsigned int nr_vcpus)403 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
404 {
405 	return size_add(sizeof(struct pkvm_hyp_vm),
406 		size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
407 }
408 
map_donated_memory_noclear(unsigned long host_va,size_t size)409 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
410 {
411 	void *va = (void *)kern_hyp_va(host_va);
412 
413 	if (!PAGE_ALIGNED(va))
414 		return NULL;
415 
416 	if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
417 				   PAGE_ALIGN(size) >> PAGE_SHIFT))
418 		return NULL;
419 
420 	return va;
421 }
422 
map_donated_memory(unsigned long host_va,size_t size)423 static void *map_donated_memory(unsigned long host_va, size_t size)
424 {
425 	void *va = map_donated_memory_noclear(host_va, size);
426 
427 	if (va)
428 		memset(va, 0, size);
429 
430 	return va;
431 }
432 
__unmap_donated_memory(void * va,size_t size)433 static void __unmap_donated_memory(void *va, size_t size)
434 {
435 	kvm_flush_dcache_to_poc(va, size);
436 	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
437 				       PAGE_ALIGN(size) >> PAGE_SHIFT));
438 }
439 
unmap_donated_memory(void * va,size_t size)440 static void unmap_donated_memory(void *va, size_t size)
441 {
442 	if (!va)
443 		return;
444 
445 	memset(va, 0, size);
446 	__unmap_donated_memory(va, size);
447 }
448 
unmap_donated_memory_noclear(void * va,size_t size)449 static void unmap_donated_memory_noclear(void *va, size_t size)
450 {
451 	if (!va)
452 		return;
453 
454 	__unmap_donated_memory(va, size);
455 }
456 
457 /*
458  * Initialize the hypervisor copy of the protected VM state using the
459  * memory donated by the host.
460  *
461  * Unmaps the donated memory from the host at stage 2.
462  *
463  * host_kvm: A pointer to the host's struct kvm.
464  * vm_hva: The host va of the area being donated for the VM state.
465  *	   Must be page aligned.
466  * pgd_hva: The host va of the area being donated for the stage-2 PGD for
467  *	    the VM. Must be page aligned. Its size is implied by the VM's
468  *	    VTCR.
469  *
470  * Return a unique handle to the protected VM on success,
471  * negative error code on failure.
472  */
__pkvm_init_vm(struct kvm * host_kvm,unsigned long vm_hva,unsigned long pgd_hva)473 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
474 		   unsigned long pgd_hva)
475 {
476 	struct pkvm_hyp_vm *hyp_vm = NULL;
477 	size_t vm_size, pgd_size;
478 	unsigned int nr_vcpus;
479 	void *pgd = NULL;
480 	int ret;
481 
482 	ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
483 	if (ret)
484 		return ret;
485 
486 	nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
487 	if (nr_vcpus < 1) {
488 		ret = -EINVAL;
489 		goto err_unpin_kvm;
490 	}
491 
492 	vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
493 	pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
494 
495 	ret = -ENOMEM;
496 
497 	hyp_vm = map_donated_memory(vm_hva, vm_size);
498 	if (!hyp_vm)
499 		goto err_remove_mappings;
500 
501 	pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
502 	if (!pgd)
503 		goto err_remove_mappings;
504 
505 	init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
506 
507 	hyp_spin_lock(&vm_table_lock);
508 	ret = insert_vm_table_entry(host_kvm, hyp_vm);
509 	if (ret < 0)
510 		goto err_unlock;
511 
512 	ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
513 	if (ret)
514 		goto err_remove_vm_table_entry;
515 	hyp_spin_unlock(&vm_table_lock);
516 
517 	return hyp_vm->kvm.arch.pkvm.handle;
518 
519 err_remove_vm_table_entry:
520 	remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
521 err_unlock:
522 	hyp_spin_unlock(&vm_table_lock);
523 err_remove_mappings:
524 	unmap_donated_memory(hyp_vm, vm_size);
525 	unmap_donated_memory(pgd, pgd_size);
526 err_unpin_kvm:
527 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
528 	return ret;
529 }
530 
531 /*
532  * Initialize the hypervisor copy of the protected vCPU state using the
533  * memory donated by the host.
534  *
535  * handle: The handle for the protected vm.
536  * host_vcpu: A pointer to the corresponding host vcpu.
537  * vcpu_hva: The host va of the area being donated for the vcpu state.
538  *	     Must be page aligned. The size of the area must be equal to
539  *	     the page-aligned size of 'struct pkvm_hyp_vcpu'.
540  * Return 0 on success, negative error code on failure.
541  */
__pkvm_init_vcpu(pkvm_handle_t handle,struct kvm_vcpu * host_vcpu,unsigned long vcpu_hva)542 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
543 		     unsigned long vcpu_hva)
544 {
545 	struct pkvm_hyp_vcpu *hyp_vcpu;
546 	struct pkvm_hyp_vm *hyp_vm;
547 	unsigned int idx;
548 	int ret;
549 
550 	hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
551 	if (!hyp_vcpu)
552 		return -ENOMEM;
553 
554 	hyp_spin_lock(&vm_table_lock);
555 
556 	hyp_vm = get_vm_by_handle(handle);
557 	if (!hyp_vm) {
558 		ret = -ENOENT;
559 		goto unlock;
560 	}
561 
562 	idx = hyp_vm->nr_vcpus;
563 	if (idx >= hyp_vm->kvm.created_vcpus) {
564 		ret = -EINVAL;
565 		goto unlock;
566 	}
567 
568 	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
569 	if (ret)
570 		goto unlock;
571 
572 	hyp_vm->vcpus[idx] = hyp_vcpu;
573 	hyp_vm->nr_vcpus++;
574 unlock:
575 	hyp_spin_unlock(&vm_table_lock);
576 
577 	if (ret)
578 		unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
579 
580 	hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
581 
582 	return ret;
583 }
584 
585 static void
teardown_donated_memory(struct kvm_hyp_memcache * mc,void * addr,size_t size)586 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
587 {
588 	size = PAGE_ALIGN(size);
589 	memset(addr, 0, size);
590 
591 	for (void *start = addr; start < addr + size; start += PAGE_SIZE)
592 		push_hyp_memcache(mc, start, hyp_virt_to_phys);
593 
594 	unmap_donated_memory_noclear(addr, size);
595 }
596 
__pkvm_teardown_vm(pkvm_handle_t handle)597 int __pkvm_teardown_vm(pkvm_handle_t handle)
598 {
599 	struct kvm_hyp_memcache *mc;
600 	struct pkvm_hyp_vm *hyp_vm;
601 	struct kvm *host_kvm;
602 	unsigned int idx;
603 	size_t vm_size;
604 	int err;
605 
606 	hyp_spin_lock(&vm_table_lock);
607 	hyp_vm = get_vm_by_handle(handle);
608 	if (!hyp_vm) {
609 		err = -ENOENT;
610 		goto err_unlock;
611 	}
612 
613 	if (WARN_ON(hyp_page_count(hyp_vm))) {
614 		err = -EBUSY;
615 		goto err_unlock;
616 	}
617 
618 	host_kvm = hyp_vm->host_kvm;
619 
620 	/* Ensure the VMID is clean before it can be reallocated */
621 	__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
622 	remove_vm_table_entry(handle);
623 	hyp_spin_unlock(&vm_table_lock);
624 
625 	/* Reclaim guest pages (including page-table pages) */
626 	mc = &host_kvm->arch.pkvm.teardown_mc;
627 	reclaim_guest_pages(hyp_vm, mc);
628 	unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
629 
630 	/* Push the metadata pages to the teardown memcache */
631 	for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
632 		struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
633 
634 		teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
635 	}
636 
637 	vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
638 	teardown_donated_memory(mc, hyp_vm, vm_size);
639 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
640 	return 0;
641 
642 err_unlock:
643 	hyp_spin_unlock(&vm_table_lock);
644 	return err;
645 }
646