xref: /linux/arch/arm64/kvm/hyp/nvhe/pkvm.c (revision 02e5f74ef08d3e6afec438d571487d0d0cec3c48)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <linux/mm.h>
9 
10 #include <asm/kvm_emulate.h>
11 
12 #include <nvhe/mem_protect.h>
13 #include <nvhe/memory.h>
14 #include <nvhe/pkvm.h>
15 #include <nvhe/trap_handler.h>
16 
17 /* Used by icache_is_aliasing(). */
18 unsigned long __icache_flags;
19 
20 /* Used by kvm_get_vttbr(). */
21 unsigned int kvm_arm_vmid_bits;
22 
23 unsigned int kvm_host_sve_max_vl;
24 
25 /*
26  * The currently loaded hyp vCPU for each physical CPU. Used in protected mode
27  * for both protected and non-protected VMs.
28  */
29 static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
30 
pkvm_vcpu_reset_hcr(struct kvm_vcpu * vcpu)31 static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
32 {
33 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
34 
35 	if (has_hvhe())
36 		vcpu->arch.hcr_el2 |= HCR_E2H;
37 
38 	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
39 		/* route synchronous external abort exceptions to EL2 */
40 		vcpu->arch.hcr_el2 |= HCR_TEA;
41 		/* trap error record accesses */
42 		vcpu->arch.hcr_el2 |= HCR_TERR;
43 	}
44 
45 	if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
46 		vcpu->arch.hcr_el2 |= HCR_FWB;
47 
48 	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
49 	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
50 	    kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0) == read_cpuid(CTR_EL0))
51 		vcpu->arch.hcr_el2 |= HCR_TID4;
52 	else
53 		vcpu->arch.hcr_el2 |= HCR_TID2;
54 
55 	if (vcpu_has_ptrauth(vcpu))
56 		vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
57 
58 	if (kvm_has_mte(vcpu->kvm))
59 		vcpu->arch.hcr_el2 |= HCR_ATA;
60 }
61 
pvm_init_traps_hcr(struct kvm_vcpu * vcpu)62 static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
63 {
64 	struct kvm *kvm = vcpu->kvm;
65 	u64 val = vcpu->arch.hcr_el2;
66 
67 	/* No support for AArch32. */
68 	val |= HCR_RW;
69 
70 	/*
71 	 * Always trap:
72 	 * - Feature id registers: to control features exposed to guests
73 	 * - Implementation-defined features
74 	 */
75 	val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1;
76 
77 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
78 		val |= HCR_TERR | HCR_TEA;
79 		val &= ~(HCR_FIEN);
80 	}
81 
82 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
83 		val &= ~(HCR_AMVOFFEN);
84 
85 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) {
86 		val |= HCR_TID5;
87 		val &= ~(HCR_DCT | HCR_ATA);
88 	}
89 
90 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
91 		val |= HCR_TLOR;
92 
93 	vcpu->arch.hcr_el2 = val;
94 }
95 
pvm_init_traps_mdcr(struct kvm_vcpu * vcpu)96 static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
97 {
98 	struct kvm *kvm = vcpu->kvm;
99 	u64 val = vcpu->arch.mdcr_el2;
100 
101 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) {
102 		val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
103 		val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK);
104 	}
105 
106 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP))
107 		val |= MDCR_EL2_TDRA | MDCR_EL2_TDA;
108 
109 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
110 		val |= MDCR_EL2_TDOSA;
111 
112 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) {
113 		val |= MDCR_EL2_TPMS;
114 		val &= ~MDCR_EL2_E2PB_MASK;
115 	}
116 
117 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
118 		val |= MDCR_EL2_TTRF;
119 
120 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP))
121 		val |= MDCR_EL2_E2TB_MASK;
122 
123 	/* Trap Debug Communications Channel registers */
124 	if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
125 		val |= MDCR_EL2_TDCC;
126 
127 	vcpu->arch.mdcr_el2 = val;
128 }
129 
130 /*
131  * Check that cpu features that are neither trapped nor supported are not
132  * enabled for protected VMs.
133  */
pkvm_check_pvm_cpu_features(struct kvm_vcpu * vcpu)134 static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu)
135 {
136 	struct kvm *kvm = vcpu->kvm;
137 
138 	/* No AArch32 support for protected guests. */
139 	if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) ||
140 	    kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32))
141 		return -EINVAL;
142 
143 	/*
144 	 * Linux guests assume support for floating-point and Advanced SIMD. Do
145 	 * not change the trapping behavior for these from the KVM default.
146 	 */
147 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) ||
148 	    !kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP))
149 		return -EINVAL;
150 
151 	/* No SME support in KVM right now. Check to catch if it changes. */
152 	if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
153 		return -EINVAL;
154 
155 	return 0;
156 }
157 
158 /*
159  * Initialize trap register values in protected mode.
160  */
pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu * hyp_vcpu)161 static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
162 {
163 	struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
164 	int ret;
165 
166 	vcpu->arch.mdcr_el2 = 0;
167 
168 	pkvm_vcpu_reset_hcr(vcpu);
169 
170 	if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu))) {
171 		struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
172 
173 		/* Trust the host for non-protected vcpu features. */
174 		vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
175 		memcpy(vcpu->arch.fgt, host_vcpu->arch.fgt, sizeof(vcpu->arch.fgt));
176 		return 0;
177 	}
178 
179 	ret = pkvm_check_pvm_cpu_features(vcpu);
180 	if (ret)
181 		return ret;
182 
183 	pvm_init_traps_hcr(vcpu);
184 	pvm_init_traps_mdcr(vcpu);
185 	vcpu_set_hcrx(vcpu);
186 
187 	return 0;
188 }
189 
190 /*
191  * Start the VM table handle at the offset defined instead of at 0.
192  * Mainly for sanity checking and debugging.
193  */
194 #define HANDLE_OFFSET 0x1000
195 
196 /*
197  * Marks a reserved but not yet used entry in the VM table.
198  */
199 #define RESERVED_ENTRY ((void *)0xa110ca7ed)
200 
vm_handle_to_idx(pkvm_handle_t handle)201 static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
202 {
203 	return handle - HANDLE_OFFSET;
204 }
205 
idx_to_vm_handle(unsigned int idx)206 static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
207 {
208 	return idx + HANDLE_OFFSET;
209 }
210 
211 /*
212  * Spinlock for protecting state related to the VM table. Protects writes
213  * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization.
214  * Also protects reads and writes to 'last_hyp_vcpu_lookup'.
215  */
216 DEFINE_HYP_SPINLOCK(vm_table_lock);
217 
218 /*
219  * A table that tracks all VMs in protected mode.
220  * Allocated during hyp initialization and setup.
221  */
222 static struct pkvm_hyp_vm **vm_table;
223 
pkvm_hyp_vm_table_init(void * tbl)224 void pkvm_hyp_vm_table_init(void *tbl)
225 {
226 	WARN_ON(vm_table);
227 	vm_table = tbl;
228 }
229 
230 /*
231  * Return the hyp vm structure corresponding to the handle.
232  */
get_vm_by_handle(pkvm_handle_t handle)233 static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
234 {
235 	unsigned int idx = vm_handle_to_idx(handle);
236 
237 	if (unlikely(idx >= KVM_MAX_PVMS))
238 		return NULL;
239 
240 	/* A reserved entry doesn't represent an initialized VM. */
241 	if (unlikely(vm_table[idx] == RESERVED_ENTRY))
242 		return NULL;
243 
244 	return vm_table[idx];
245 }
246 
pkvm_load_hyp_vcpu(pkvm_handle_t handle,unsigned int vcpu_idx)247 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
248 					 unsigned int vcpu_idx)
249 {
250 	struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
251 	struct pkvm_hyp_vm *hyp_vm;
252 
253 	/* Cannot load a new vcpu without putting the old one first. */
254 	if (__this_cpu_read(loaded_hyp_vcpu))
255 		return NULL;
256 
257 	hyp_spin_lock(&vm_table_lock);
258 	hyp_vm = get_vm_by_handle(handle);
259 	if (!hyp_vm || hyp_vm->kvm.created_vcpus <= vcpu_idx)
260 		goto unlock;
261 
262 	hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
263 	if (!hyp_vcpu)
264 		goto unlock;
265 
266 	/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
267 	if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
268 		hyp_vcpu = NULL;
269 		goto unlock;
270 	}
271 
272 	hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
273 	hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
274 unlock:
275 	hyp_spin_unlock(&vm_table_lock);
276 
277 	if (hyp_vcpu)
278 		__this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
279 	return hyp_vcpu;
280 }
281 
pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)282 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
283 {
284 	struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
285 
286 	hyp_spin_lock(&vm_table_lock);
287 	hyp_vcpu->loaded_hyp_vcpu = NULL;
288 	__this_cpu_write(loaded_hyp_vcpu, NULL);
289 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
290 	hyp_spin_unlock(&vm_table_lock);
291 }
292 
pkvm_get_loaded_hyp_vcpu(void)293 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
294 {
295 	return __this_cpu_read(loaded_hyp_vcpu);
296 
297 }
298 
get_pkvm_hyp_vm(pkvm_handle_t handle)299 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
300 {
301 	struct pkvm_hyp_vm *hyp_vm;
302 
303 	hyp_spin_lock(&vm_table_lock);
304 	hyp_vm = get_vm_by_handle(handle);
305 	if (hyp_vm)
306 		hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
307 	hyp_spin_unlock(&vm_table_lock);
308 
309 	return hyp_vm;
310 }
311 
put_pkvm_hyp_vm(struct pkvm_hyp_vm * hyp_vm)312 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm)
313 {
314 	hyp_spin_lock(&vm_table_lock);
315 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
316 	hyp_spin_unlock(&vm_table_lock);
317 }
318 
get_np_pkvm_hyp_vm(pkvm_handle_t handle)319 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
320 {
321 	struct pkvm_hyp_vm *hyp_vm = get_pkvm_hyp_vm(handle);
322 
323 	if (hyp_vm && pkvm_hyp_vm_is_protected(hyp_vm)) {
324 		put_pkvm_hyp_vm(hyp_vm);
325 		hyp_vm = NULL;
326 	}
327 
328 	return hyp_vm;
329 }
330 
pkvm_init_features_from_host(struct pkvm_hyp_vm * hyp_vm,const struct kvm * host_kvm)331 static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm)
332 {
333 	struct kvm *kvm = &hyp_vm->kvm;
334 	unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
335 	DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
336 
337 	/* CTR_EL0 is always under host control, even for protected VMs. */
338 	hyp_vm->kvm.arch.ctr_el0 = host_kvm->arch.ctr_el0;
339 
340 	if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
341 		set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
342 
343 	/* No restrictions for non-protected VMs. */
344 	if (!kvm_vm_is_protected(kvm)) {
345 		hyp_vm->kvm.arch.flags = host_arch_flags;
346 
347 		bitmap_copy(kvm->arch.vcpu_features,
348 			    host_kvm->arch.vcpu_features,
349 			    KVM_VCPU_MAX_FEATURES);
350 
351 		if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &host_arch_flags))
352 			hyp_vm->kvm.arch.midr_el1 = host_kvm->arch.midr_el1;
353 
354 		return;
355 	}
356 
357 	bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES);
358 
359 	set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features);
360 
361 	if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PMU_V3))
362 		set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features);
363 
364 	if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_ADDRESS))
365 		set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features);
366 
367 	if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC))
368 		set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features);
369 
370 	if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) {
371 		set_bit(KVM_ARM_VCPU_SVE, allowed_features);
372 		kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE);
373 	}
374 
375 	bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
376 		   allowed_features, KVM_VCPU_MAX_FEATURES);
377 }
378 
unpin_host_vcpu(struct kvm_vcpu * host_vcpu)379 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
380 {
381 	if (host_vcpu)
382 		hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
383 }
384 
unpin_host_sve_state(struct pkvm_hyp_vcpu * hyp_vcpu)385 static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
386 {
387 	void *sve_state;
388 
389 	if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
390 		return;
391 
392 	sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state);
393 	hyp_unpin_shared_mem(sve_state,
394 			     sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
395 }
396 
unpin_host_vcpus(struct pkvm_hyp_vcpu * hyp_vcpus[],unsigned int nr_vcpus)397 static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
398 			     unsigned int nr_vcpus)
399 {
400 	int i;
401 
402 	for (i = 0; i < nr_vcpus; i++) {
403 		struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vcpus[i];
404 
405 		if (!hyp_vcpu)
406 			continue;
407 
408 		unpin_host_vcpu(hyp_vcpu->host_vcpu);
409 		unpin_host_sve_state(hyp_vcpu);
410 	}
411 }
412 
init_pkvm_hyp_vm(struct kvm * host_kvm,struct pkvm_hyp_vm * hyp_vm,unsigned int nr_vcpus,pkvm_handle_t handle)413 static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
414 			     unsigned int nr_vcpus, pkvm_handle_t handle)
415 {
416 	struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
417 	int idx = vm_handle_to_idx(handle);
418 
419 	hyp_vm->kvm.arch.pkvm.handle = handle;
420 
421 	hyp_vm->host_kvm = host_kvm;
422 	hyp_vm->kvm.created_vcpus = nr_vcpus;
423 	hyp_vm->kvm.arch.pkvm.is_protected = READ_ONCE(host_kvm->arch.pkvm.is_protected);
424 	hyp_vm->kvm.arch.pkvm.is_created = true;
425 	hyp_vm->kvm.arch.flags = 0;
426 	pkvm_init_features_from_host(hyp_vm, host_kvm);
427 
428 	/* VMID 0 is reserved for the host */
429 	atomic64_set(&mmu->vmid.id, idx + 1);
430 
431 	mmu->vtcr = host_mmu.arch.mmu.vtcr;
432 	mmu->arch = &hyp_vm->kvm.arch;
433 	mmu->pgt = &hyp_vm->pgt;
434 }
435 
pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu * hyp_vcpu,struct kvm_vcpu * host_vcpu)436 static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *host_vcpu)
437 {
438 	struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
439 	unsigned int sve_max_vl;
440 	size_t sve_state_size;
441 	void *sve_state;
442 	int ret = 0;
443 
444 	if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) {
445 		vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
446 		return 0;
447 	}
448 
449 	/* Limit guest vector length to the maximum supported by the host. */
450 	sve_max_vl = min(READ_ONCE(host_vcpu->arch.sve_max_vl), kvm_host_sve_max_vl);
451 	sve_state_size = sve_state_size_from_vl(sve_max_vl);
452 	sve_state = kern_hyp_va(READ_ONCE(host_vcpu->arch.sve_state));
453 
454 	if (!sve_state || !sve_state_size) {
455 		ret = -EINVAL;
456 		goto err;
457 	}
458 
459 	ret = hyp_pin_shared_mem(sve_state, sve_state + sve_state_size);
460 	if (ret)
461 		goto err;
462 
463 	vcpu->arch.sve_state = sve_state;
464 	vcpu->arch.sve_max_vl = sve_max_vl;
465 
466 	return 0;
467 err:
468 	clear_bit(KVM_ARM_VCPU_SVE, vcpu->kvm->arch.vcpu_features);
469 	return ret;
470 }
471 
init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,struct pkvm_hyp_vm * hyp_vm,struct kvm_vcpu * host_vcpu)472 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
473 			      struct pkvm_hyp_vm *hyp_vm,
474 			      struct kvm_vcpu *host_vcpu)
475 {
476 	int ret = 0;
477 
478 	if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
479 		return -EBUSY;
480 
481 	hyp_vcpu->host_vcpu = host_vcpu;
482 
483 	hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
484 	hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
485 	hyp_vcpu->vcpu.vcpu_idx = READ_ONCE(host_vcpu->vcpu_idx);
486 
487 	hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
488 	hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
489 	hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
490 
491 	if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
492 		kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
493 
494 	ret = pkvm_vcpu_init_traps(hyp_vcpu);
495 	if (ret)
496 		goto done;
497 
498 	ret = pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu);
499 done:
500 	if (ret)
501 		unpin_host_vcpu(host_vcpu);
502 	return ret;
503 }
504 
find_free_vm_table_entry(void)505 static int find_free_vm_table_entry(void)
506 {
507 	int i;
508 
509 	for (i = 0; i < KVM_MAX_PVMS; ++i) {
510 		if (!vm_table[i])
511 			return i;
512 	}
513 
514 	return -ENOMEM;
515 }
516 
517 /*
518  * Reserve a VM table entry.
519  *
520  * Return a unique handle to the VM on success,
521  * negative error code on failure.
522  */
allocate_vm_table_entry(void)523 static int allocate_vm_table_entry(void)
524 {
525 	int idx;
526 
527 	hyp_assert_lock_held(&vm_table_lock);
528 
529 	/*
530 	 * Initializing protected state might have failed, yet a malicious
531 	 * host could trigger this function. Thus, ensure that 'vm_table'
532 	 * exists.
533 	 */
534 	if (unlikely(!vm_table))
535 		return -EINVAL;
536 
537 	idx = find_free_vm_table_entry();
538 	if (unlikely(idx < 0))
539 		return idx;
540 
541 	vm_table[idx] = RESERVED_ENTRY;
542 
543 	return idx;
544 }
545 
__insert_vm_table_entry(pkvm_handle_t handle,struct pkvm_hyp_vm * hyp_vm)546 static int __insert_vm_table_entry(pkvm_handle_t handle,
547 				   struct pkvm_hyp_vm *hyp_vm)
548 {
549 	unsigned int idx;
550 
551 	hyp_assert_lock_held(&vm_table_lock);
552 
553 	/*
554 	 * Initializing protected state might have failed, yet a malicious
555 	 * host could trigger this function. Thus, ensure that 'vm_table'
556 	 * exists.
557 	 */
558 	if (unlikely(!vm_table))
559 		return -EINVAL;
560 
561 	idx = vm_handle_to_idx(handle);
562 	if (unlikely(idx >= KVM_MAX_PVMS))
563 		return -EINVAL;
564 
565 	if (unlikely(vm_table[idx] != RESERVED_ENTRY))
566 		return -EINVAL;
567 
568 	vm_table[idx] = hyp_vm;
569 
570 	return 0;
571 }
572 
573 /*
574  * Insert a pointer to the initialized VM into the VM table.
575  *
576  * Return 0 on success, or negative error code on failure.
577  */
insert_vm_table_entry(pkvm_handle_t handle,struct pkvm_hyp_vm * hyp_vm)578 static int insert_vm_table_entry(pkvm_handle_t handle,
579 				 struct pkvm_hyp_vm *hyp_vm)
580 {
581 	int ret;
582 
583 	hyp_spin_lock(&vm_table_lock);
584 	ret = __insert_vm_table_entry(handle, hyp_vm);
585 	hyp_spin_unlock(&vm_table_lock);
586 
587 	return ret;
588 }
589 
590 /*
591  * Deallocate and remove the VM table entry corresponding to the handle.
592  */
remove_vm_table_entry(pkvm_handle_t handle)593 static void remove_vm_table_entry(pkvm_handle_t handle)
594 {
595 	hyp_assert_lock_held(&vm_table_lock);
596 	vm_table[vm_handle_to_idx(handle)] = NULL;
597 }
598 
pkvm_get_hyp_vm_size(unsigned int nr_vcpus)599 static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
600 {
601 	return size_add(sizeof(struct pkvm_hyp_vm),
602 		size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
603 }
604 
map_donated_memory_noclear(unsigned long host_va,size_t size)605 static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
606 {
607 	void *va = (void *)kern_hyp_va(host_va);
608 
609 	if (!PAGE_ALIGNED(va))
610 		return NULL;
611 
612 	if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
613 				   PAGE_ALIGN(size) >> PAGE_SHIFT))
614 		return NULL;
615 
616 	return va;
617 }
618 
map_donated_memory(unsigned long host_va,size_t size)619 static void *map_donated_memory(unsigned long host_va, size_t size)
620 {
621 	void *va = map_donated_memory_noclear(host_va, size);
622 
623 	if (va)
624 		memset(va, 0, size);
625 
626 	return va;
627 }
628 
__unmap_donated_memory(void * va,size_t size)629 static void __unmap_donated_memory(void *va, size_t size)
630 {
631 	kvm_flush_dcache_to_poc(va, size);
632 	WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
633 				       PAGE_ALIGN(size) >> PAGE_SHIFT));
634 }
635 
unmap_donated_memory(void * va,size_t size)636 static void unmap_donated_memory(void *va, size_t size)
637 {
638 	if (!va)
639 		return;
640 
641 	memset(va, 0, size);
642 	__unmap_donated_memory(va, size);
643 }
644 
unmap_donated_memory_noclear(void * va,size_t size)645 static void unmap_donated_memory_noclear(void *va, size_t size)
646 {
647 	if (!va)
648 		return;
649 
650 	__unmap_donated_memory(va, size);
651 }
652 
653 /*
654  * Reserves an entry in the hypervisor for a new VM in protected mode.
655  *
656  * Return a unique handle to the VM on success, negative error code on failure.
657  */
__pkvm_reserve_vm(void)658 int __pkvm_reserve_vm(void)
659 {
660 	int ret;
661 
662 	hyp_spin_lock(&vm_table_lock);
663 	ret = allocate_vm_table_entry();
664 	hyp_spin_unlock(&vm_table_lock);
665 
666 	if (ret < 0)
667 		return ret;
668 
669 	return idx_to_vm_handle(ret);
670 }
671 
672 /*
673  * Removes a reserved entry, but only if is hasn't been used yet.
674  * Otherwise, the VM needs to be destroyed.
675  */
__pkvm_unreserve_vm(pkvm_handle_t handle)676 void __pkvm_unreserve_vm(pkvm_handle_t handle)
677 {
678 	unsigned int idx = vm_handle_to_idx(handle);
679 
680 	if (unlikely(!vm_table))
681 		return;
682 
683 	hyp_spin_lock(&vm_table_lock);
684 	if (likely(idx < KVM_MAX_PVMS && vm_table[idx] == RESERVED_ENTRY))
685 		remove_vm_table_entry(handle);
686 	hyp_spin_unlock(&vm_table_lock);
687 }
688 
689 /*
690  * Initialize the hypervisor copy of the VM state using host-donated memory.
691  *
692  * Unmap the donated memory from the host at stage 2.
693  *
694  * host_kvm: A pointer to the host's struct kvm.
695  * vm_hva: The host va of the area being donated for the VM state.
696  *	   Must be page aligned.
697  * pgd_hva: The host va of the area being donated for the stage-2 PGD for
698  *	    the VM. Must be page aligned. Its size is implied by the VM's
699  *	    VTCR.
700  *
701  * Return 0 success, negative error code on failure.
702  */
__pkvm_init_vm(struct kvm * host_kvm,unsigned long vm_hva,unsigned long pgd_hva)703 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
704 		   unsigned long pgd_hva)
705 {
706 	struct pkvm_hyp_vm *hyp_vm = NULL;
707 	size_t vm_size, pgd_size;
708 	unsigned int nr_vcpus;
709 	pkvm_handle_t handle;
710 	void *pgd = NULL;
711 	int ret;
712 
713 	ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
714 	if (ret)
715 		return ret;
716 
717 	nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
718 	if (nr_vcpus < 1) {
719 		ret = -EINVAL;
720 		goto err_unpin_kvm;
721 	}
722 
723 	handle = READ_ONCE(host_kvm->arch.pkvm.handle);
724 	if (unlikely(handle < HANDLE_OFFSET)) {
725 		ret = -EINVAL;
726 		goto err_unpin_kvm;
727 	}
728 
729 	vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
730 	pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
731 
732 	ret = -ENOMEM;
733 
734 	hyp_vm = map_donated_memory(vm_hva, vm_size);
735 	if (!hyp_vm)
736 		goto err_remove_mappings;
737 
738 	pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
739 	if (!pgd)
740 		goto err_remove_mappings;
741 
742 	init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus, handle);
743 
744 	ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
745 	if (ret)
746 		goto err_remove_mappings;
747 
748 	/* Must be called last since this publishes the VM. */
749 	ret = insert_vm_table_entry(handle, hyp_vm);
750 	if (ret)
751 		goto err_remove_mappings;
752 
753 	return 0;
754 
755 err_remove_mappings:
756 	unmap_donated_memory(hyp_vm, vm_size);
757 	unmap_donated_memory(pgd, pgd_size);
758 err_unpin_kvm:
759 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
760 	return ret;
761 }
762 
763 /*
764  * Initialize the hypervisor copy of the vCPU state using host-donated memory.
765  *
766  * handle: The hypervisor handle for the vm.
767  * host_vcpu: A pointer to the corresponding host vcpu.
768  * vcpu_hva: The host va of the area being donated for the vcpu state.
769  *	     Must be page aligned. The size of the area must be equal to
770  *	     the page-aligned size of 'struct pkvm_hyp_vcpu'.
771  * Return 0 on success, negative error code on failure.
772  */
__pkvm_init_vcpu(pkvm_handle_t handle,struct kvm_vcpu * host_vcpu,unsigned long vcpu_hva)773 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
774 		     unsigned long vcpu_hva)
775 {
776 	struct pkvm_hyp_vcpu *hyp_vcpu;
777 	struct pkvm_hyp_vm *hyp_vm;
778 	unsigned int idx;
779 	int ret;
780 
781 	hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
782 	if (!hyp_vcpu)
783 		return -ENOMEM;
784 
785 	hyp_spin_lock(&vm_table_lock);
786 
787 	hyp_vm = get_vm_by_handle(handle);
788 	if (!hyp_vm) {
789 		ret = -ENOENT;
790 		goto unlock;
791 	}
792 
793 	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
794 	if (ret)
795 		goto unlock;
796 
797 	idx = hyp_vcpu->vcpu.vcpu_idx;
798 	if (idx >= hyp_vm->kvm.created_vcpus) {
799 		ret = -EINVAL;
800 		goto unlock;
801 	}
802 
803 	if (hyp_vm->vcpus[idx]) {
804 		ret = -EINVAL;
805 		goto unlock;
806 	}
807 
808 	hyp_vm->vcpus[idx] = hyp_vcpu;
809 unlock:
810 	hyp_spin_unlock(&vm_table_lock);
811 
812 	if (ret)
813 		unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
814 	return ret;
815 }
816 
817 static void
teardown_donated_memory(struct kvm_hyp_memcache * mc,void * addr,size_t size)818 teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
819 {
820 	size = PAGE_ALIGN(size);
821 	memset(addr, 0, size);
822 
823 	for (void *start = addr; start < addr + size; start += PAGE_SIZE)
824 		push_hyp_memcache(mc, start, hyp_virt_to_phys);
825 
826 	unmap_donated_memory_noclear(addr, size);
827 }
828 
__pkvm_teardown_vm(pkvm_handle_t handle)829 int __pkvm_teardown_vm(pkvm_handle_t handle)
830 {
831 	struct kvm_hyp_memcache *mc, *stage2_mc;
832 	struct pkvm_hyp_vm *hyp_vm;
833 	struct kvm *host_kvm;
834 	unsigned int idx;
835 	size_t vm_size;
836 	int err;
837 
838 	hyp_spin_lock(&vm_table_lock);
839 	hyp_vm = get_vm_by_handle(handle);
840 	if (!hyp_vm) {
841 		err = -ENOENT;
842 		goto err_unlock;
843 	}
844 
845 	if (WARN_ON(hyp_page_count(hyp_vm))) {
846 		err = -EBUSY;
847 		goto err_unlock;
848 	}
849 
850 	host_kvm = hyp_vm->host_kvm;
851 
852 	/* Ensure the VMID is clean before it can be reallocated */
853 	__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
854 	remove_vm_table_entry(handle);
855 	hyp_spin_unlock(&vm_table_lock);
856 
857 	/* Reclaim guest pages (including page-table pages) */
858 	mc = &host_kvm->arch.pkvm.teardown_mc;
859 	stage2_mc = &host_kvm->arch.pkvm.stage2_teardown_mc;
860 	reclaim_pgtable_pages(hyp_vm, stage2_mc);
861 	unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->kvm.created_vcpus);
862 
863 	/* Push the metadata pages to the teardown memcache */
864 	for (idx = 0; idx < hyp_vm->kvm.created_vcpus; ++idx) {
865 		struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
866 		struct kvm_hyp_memcache *vcpu_mc;
867 
868 		if (!hyp_vcpu)
869 			continue;
870 
871 		vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
872 
873 		while (vcpu_mc->nr_pages) {
874 			void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
875 
876 			push_hyp_memcache(stage2_mc, addr, hyp_virt_to_phys);
877 			unmap_donated_memory_noclear(addr, PAGE_SIZE);
878 		}
879 
880 		teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
881 	}
882 
883 	vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
884 	teardown_donated_memory(mc, hyp_vm, vm_size);
885 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
886 	return 0;
887 
888 err_unlock:
889 	hyp_spin_unlock(&vm_table_lock);
890 	return err;
891 }
892