xref: /linux/arch/arm64/kvm/hyp/nvhe/hyp-main.c (revision 51d90a15fedf8366cb96ef68d0ea2d0bf15417d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - Google Inc
4  * Author: Andrew Scull <ascull@google.com>
5  */
6 
7 #include <hyp/adjust_pc.h>
8 #include <hyp/switch.h>
9 
10 #include <asm/pgtable-types.h>
11 #include <asm/kvm_asm.h>
12 #include <asm/kvm_emulate.h>
13 #include <asm/kvm_host.h>
14 #include <asm/kvm_hyp.h>
15 #include <asm/kvm_mmu.h>
16 
17 #include <nvhe/ffa.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20 #include <nvhe/pkvm.h>
21 #include <nvhe/trap_handler.h>
22 
23 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
24 
25 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
26 
__hyp_sve_save_guest(struct kvm_vcpu * vcpu)27 static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
28 {
29 	__vcpu_assign_sys_reg(vcpu, ZCR_EL1, read_sysreg_el1(SYS_ZCR));
30 	/*
31 	 * On saving/restoring guest sve state, always use the maximum VL for
32 	 * the guest. The layout of the data when saving the sve state depends
33 	 * on the VL, so use a consistent (i.e., the maximum) guest VL.
34 	 */
35 	sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
36 	__sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
37 	write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
38 }
39 
__hyp_sve_restore_host(void)40 static void __hyp_sve_restore_host(void)
41 {
42 	struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
43 
44 	/*
45 	 * On saving/restoring host sve state, always use the maximum VL for
46 	 * the host. The layout of the data when saving the sve state depends
47 	 * on the VL, so use a consistent (i.e., the maximum) host VL.
48 	 *
49 	 * Note that this constrains the PE to the maximum shared VL
50 	 * that was discovered, if we wish to use larger VLs this will
51 	 * need to be revisited.
52 	 */
53 	write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
54 	__sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
55 			    &sve_state->fpsr,
56 			    true);
57 	write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
58 }
59 
fpsimd_sve_flush(void)60 static void fpsimd_sve_flush(void)
61 {
62 	*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
63 }
64 
fpsimd_sve_sync(struct kvm_vcpu * vcpu)65 static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
66 {
67 	bool has_fpmr;
68 
69 	if (!guest_owns_fp_regs())
70 		return;
71 
72 	/*
73 	 * Traps have been disabled by __deactivate_cptr_traps(), but there
74 	 * hasn't necessarily been a context synchronization event yet.
75 	 */
76 	isb();
77 
78 	if (vcpu_has_sve(vcpu))
79 		__hyp_sve_save_guest(vcpu);
80 	else
81 		__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
82 
83 	has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
84 	if (has_fpmr)
85 		__vcpu_assign_sys_reg(vcpu, FPMR, read_sysreg_s(SYS_FPMR));
86 
87 	if (system_supports_sve())
88 		__hyp_sve_restore_host();
89 	else
90 		__fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
91 
92 	if (has_fpmr)
93 		write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
94 
95 	*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
96 }
97 
flush_debug_state(struct pkvm_hyp_vcpu * hyp_vcpu)98 static void flush_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
99 {
100 	struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
101 
102 	hyp_vcpu->vcpu.arch.debug_owner = host_vcpu->arch.debug_owner;
103 
104 	if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
105 		hyp_vcpu->vcpu.arch.vcpu_debug_state = host_vcpu->arch.vcpu_debug_state;
106 	else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
107 		hyp_vcpu->vcpu.arch.external_debug_state = host_vcpu->arch.external_debug_state;
108 }
109 
sync_debug_state(struct pkvm_hyp_vcpu * hyp_vcpu)110 static void sync_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
111 {
112 	struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
113 
114 	if (kvm_guest_owns_debug_regs(&hyp_vcpu->vcpu))
115 		host_vcpu->arch.vcpu_debug_state = hyp_vcpu->vcpu.arch.vcpu_debug_state;
116 	else if (kvm_host_owns_debug_regs(&hyp_vcpu->vcpu))
117 		host_vcpu->arch.external_debug_state = hyp_vcpu->vcpu.arch.external_debug_state;
118 }
119 
flush_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)120 static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
121 {
122 	struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
123 
124 	fpsimd_sve_flush();
125 	flush_debug_state(hyp_vcpu);
126 
127 	hyp_vcpu->vcpu.arch.ctxt	= host_vcpu->arch.ctxt;
128 
129 	hyp_vcpu->vcpu.arch.mdcr_el2	= host_vcpu->arch.mdcr_el2;
130 	hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
131 	hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
132 						 (HCR_TWI | HCR_TWE);
133 
134 	hyp_vcpu->vcpu.arch.iflags	= host_vcpu->arch.iflags;
135 
136 	hyp_vcpu->vcpu.arch.vsesr_el2	= host_vcpu->arch.vsesr_el2;
137 
138 	hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
139 }
140 
sync_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)141 static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
142 {
143 	struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
144 	struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
145 	struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
146 	unsigned int i;
147 
148 	fpsimd_sve_sync(&hyp_vcpu->vcpu);
149 	sync_debug_state(hyp_vcpu);
150 
151 	host_vcpu->arch.ctxt		= hyp_vcpu->vcpu.arch.ctxt;
152 
153 	host_vcpu->arch.hcr_el2		= hyp_vcpu->vcpu.arch.hcr_el2;
154 
155 	host_vcpu->arch.fault		= hyp_vcpu->vcpu.arch.fault;
156 
157 	host_vcpu->arch.iflags		= hyp_vcpu->vcpu.arch.iflags;
158 
159 	host_cpu_if->vgic_hcr		= hyp_cpu_if->vgic_hcr;
160 	host_cpu_if->vgic_vmcr		= hyp_cpu_if->vgic_vmcr;
161 	for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
162 		host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
163 }
164 
handle___pkvm_vcpu_load(struct kvm_cpu_context * host_ctxt)165 static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
166 {
167 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
168 	DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
169 	DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
170 	struct pkvm_hyp_vcpu *hyp_vcpu;
171 
172 	if (!is_protected_kvm_enabled())
173 		return;
174 
175 	hyp_vcpu = pkvm_load_hyp_vcpu(handle, vcpu_idx);
176 	if (!hyp_vcpu)
177 		return;
178 
179 	if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
180 		/* Propagate WFx trapping flags */
181 		hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI);
182 		hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI);
183 	}
184 }
185 
handle___pkvm_vcpu_put(struct kvm_cpu_context * host_ctxt)186 static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
187 {
188 	struct pkvm_hyp_vcpu *hyp_vcpu;
189 
190 	if (!is_protected_kvm_enabled())
191 		return;
192 
193 	hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
194 	if (hyp_vcpu)
195 		pkvm_put_hyp_vcpu(hyp_vcpu);
196 }
197 
handle___kvm_vcpu_run(struct kvm_cpu_context * host_ctxt)198 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
199 {
200 	DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
201 	int ret;
202 
203 	if (unlikely(is_protected_kvm_enabled())) {
204 		struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
205 
206 		/*
207 		 * KVM (and pKVM) doesn't support SME guests for now, and
208 		 * ensures that SME features aren't enabled in pstate when
209 		 * loading a vcpu. Therefore, if SME features enabled the host
210 		 * is misbehaving.
211 		 */
212 		if (unlikely(system_supports_sme() && read_sysreg_s(SYS_SVCR))) {
213 			ret = -EINVAL;
214 			goto out;
215 		}
216 
217 		if (!hyp_vcpu) {
218 			ret = -EINVAL;
219 			goto out;
220 		}
221 
222 		flush_hyp_vcpu(hyp_vcpu);
223 
224 		ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
225 
226 		sync_hyp_vcpu(hyp_vcpu);
227 	} else {
228 		struct kvm_vcpu *vcpu = kern_hyp_va(host_vcpu);
229 
230 		/* The host is fully trusted, run its vCPU directly. */
231 		fpsimd_lazy_switch_to_guest(vcpu);
232 		ret = __kvm_vcpu_run(vcpu);
233 		fpsimd_lazy_switch_to_host(vcpu);
234 	}
235 out:
236 	cpu_reg(host_ctxt, 1) =  ret;
237 }
238 
pkvm_refill_memcache(struct pkvm_hyp_vcpu * hyp_vcpu)239 static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu)
240 {
241 	struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
242 
243 	return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache,
244 			       host_vcpu->arch.pkvm_memcache.nr_pages,
245 			       &host_vcpu->arch.pkvm_memcache);
246 }
247 
handle___pkvm_host_share_guest(struct kvm_cpu_context * host_ctxt)248 static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
249 {
250 	DECLARE_REG(u64, pfn, host_ctxt, 1);
251 	DECLARE_REG(u64, gfn, host_ctxt, 2);
252 	DECLARE_REG(u64, nr_pages, host_ctxt, 3);
253 	DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
254 	struct pkvm_hyp_vcpu *hyp_vcpu;
255 	int ret = -EINVAL;
256 
257 	if (!is_protected_kvm_enabled())
258 		goto out;
259 
260 	hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
261 	if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
262 		goto out;
263 
264 	ret = pkvm_refill_memcache(hyp_vcpu);
265 	if (ret)
266 		goto out;
267 
268 	ret = __pkvm_host_share_guest(pfn, gfn, nr_pages, hyp_vcpu, prot);
269 out:
270 	cpu_reg(host_ctxt, 1) =  ret;
271 }
272 
handle___pkvm_host_unshare_guest(struct kvm_cpu_context * host_ctxt)273 static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
274 {
275 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
276 	DECLARE_REG(u64, gfn, host_ctxt, 2);
277 	DECLARE_REG(u64, nr_pages, host_ctxt, 3);
278 	struct pkvm_hyp_vm *hyp_vm;
279 	int ret = -EINVAL;
280 
281 	if (!is_protected_kvm_enabled())
282 		goto out;
283 
284 	hyp_vm = get_np_pkvm_hyp_vm(handle);
285 	if (!hyp_vm)
286 		goto out;
287 
288 	ret = __pkvm_host_unshare_guest(gfn, nr_pages, hyp_vm);
289 	put_pkvm_hyp_vm(hyp_vm);
290 out:
291 	cpu_reg(host_ctxt, 1) =  ret;
292 }
293 
handle___pkvm_host_relax_perms_guest(struct kvm_cpu_context * host_ctxt)294 static void handle___pkvm_host_relax_perms_guest(struct kvm_cpu_context *host_ctxt)
295 {
296 	DECLARE_REG(u64, gfn, host_ctxt, 1);
297 	DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 2);
298 	struct pkvm_hyp_vcpu *hyp_vcpu;
299 	int ret = -EINVAL;
300 
301 	if (!is_protected_kvm_enabled())
302 		goto out;
303 
304 	hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
305 	if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
306 		goto out;
307 
308 	ret = __pkvm_host_relax_perms_guest(gfn, hyp_vcpu, prot);
309 out:
310 	cpu_reg(host_ctxt, 1) = ret;
311 }
312 
handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context * host_ctxt)313 static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt)
314 {
315 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
316 	DECLARE_REG(u64, gfn, host_ctxt, 2);
317 	DECLARE_REG(u64, nr_pages, host_ctxt, 3);
318 	struct pkvm_hyp_vm *hyp_vm;
319 	int ret = -EINVAL;
320 
321 	if (!is_protected_kvm_enabled())
322 		goto out;
323 
324 	hyp_vm = get_np_pkvm_hyp_vm(handle);
325 	if (!hyp_vm)
326 		goto out;
327 
328 	ret = __pkvm_host_wrprotect_guest(gfn, nr_pages, hyp_vm);
329 	put_pkvm_hyp_vm(hyp_vm);
330 out:
331 	cpu_reg(host_ctxt, 1) = ret;
332 }
333 
handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context * host_ctxt)334 static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *host_ctxt)
335 {
336 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
337 	DECLARE_REG(u64, gfn, host_ctxt, 2);
338 	DECLARE_REG(u64, nr_pages, host_ctxt, 3);
339 	DECLARE_REG(bool, mkold, host_ctxt, 4);
340 	struct pkvm_hyp_vm *hyp_vm;
341 	int ret = -EINVAL;
342 
343 	if (!is_protected_kvm_enabled())
344 		goto out;
345 
346 	hyp_vm = get_np_pkvm_hyp_vm(handle);
347 	if (!hyp_vm)
348 		goto out;
349 
350 	ret = __pkvm_host_test_clear_young_guest(gfn, nr_pages, mkold, hyp_vm);
351 	put_pkvm_hyp_vm(hyp_vm);
352 out:
353 	cpu_reg(host_ctxt, 1) = ret;
354 }
355 
handle___pkvm_host_mkyoung_guest(struct kvm_cpu_context * host_ctxt)356 static void handle___pkvm_host_mkyoung_guest(struct kvm_cpu_context *host_ctxt)
357 {
358 	DECLARE_REG(u64, gfn, host_ctxt, 1);
359 	struct pkvm_hyp_vcpu *hyp_vcpu;
360 	int ret = -EINVAL;
361 
362 	if (!is_protected_kvm_enabled())
363 		goto out;
364 
365 	hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
366 	if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
367 		goto out;
368 
369 	ret = __pkvm_host_mkyoung_guest(gfn, hyp_vcpu);
370 out:
371 	cpu_reg(host_ctxt, 1) =  ret;
372 }
373 
handle___kvm_adjust_pc(struct kvm_cpu_context * host_ctxt)374 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
375 {
376 	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
377 
378 	__kvm_adjust_pc(kern_hyp_va(vcpu));
379 }
380 
handle___kvm_flush_vm_context(struct kvm_cpu_context * host_ctxt)381 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
382 {
383 	__kvm_flush_vm_context();
384 }
385 
handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context * host_ctxt)386 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
387 {
388 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
389 	DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
390 	DECLARE_REG(int, level, host_ctxt, 3);
391 
392 	__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
393 }
394 
handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context * host_ctxt)395 static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctxt)
396 {
397 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
398 	DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
399 	DECLARE_REG(int, level, host_ctxt, 3);
400 
401 	__kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level);
402 }
403 
404 static void
handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context * host_ctxt)405 handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt)
406 {
407 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
408 	DECLARE_REG(phys_addr_t, start, host_ctxt, 2);
409 	DECLARE_REG(unsigned long, pages, host_ctxt, 3);
410 
411 	__kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages);
412 }
413 
handle___kvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt)414 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
415 {
416 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
417 
418 	__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
419 }
420 
handle___pkvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt)421 static void handle___pkvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
422 {
423 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
424 	struct pkvm_hyp_vm *hyp_vm;
425 
426 	if (!is_protected_kvm_enabled())
427 		return;
428 
429 	hyp_vm = get_np_pkvm_hyp_vm(handle);
430 	if (!hyp_vm)
431 		return;
432 
433 	__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
434 	put_pkvm_hyp_vm(hyp_vm);
435 }
436 
handle___kvm_flush_cpu_context(struct kvm_cpu_context * host_ctxt)437 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
438 {
439 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
440 
441 	__kvm_flush_cpu_context(kern_hyp_va(mmu));
442 }
443 
handle___kvm_timer_set_cntvoff(struct kvm_cpu_context * host_ctxt)444 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
445 {
446 	__kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
447 }
448 
handle___kvm_enable_ssbs(struct kvm_cpu_context * host_ctxt)449 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
450 {
451 	u64 tmp;
452 
453 	tmp = read_sysreg_el2(SYS_SCTLR);
454 	tmp |= SCTLR_ELx_DSSBS;
455 	write_sysreg_el2(tmp, SYS_SCTLR);
456 }
457 
handle___vgic_v3_get_gic_config(struct kvm_cpu_context * host_ctxt)458 static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
459 {
460 	cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
461 }
462 
handle___vgic_v3_init_lrs(struct kvm_cpu_context * host_ctxt)463 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
464 {
465 	__vgic_v3_init_lrs();
466 }
467 
handle___vgic_v3_save_aprs(struct kvm_cpu_context * host_ctxt)468 static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
469 {
470 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
471 
472 	__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
473 }
474 
handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context * host_ctxt)475 static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
476 {
477 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
478 
479 	__vgic_v3_restore_vmcr_aprs(kern_hyp_va(cpu_if));
480 }
481 
handle___pkvm_init(struct kvm_cpu_context * host_ctxt)482 static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
483 {
484 	DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
485 	DECLARE_REG(unsigned long, size, host_ctxt, 2);
486 	DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
487 	DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
488 	DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
489 
490 	/*
491 	 * __pkvm_init() will return only if an error occurred, otherwise it
492 	 * will tail-call in __pkvm_init_finalise() which will have to deal
493 	 * with the host context directly.
494 	 */
495 	cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
496 					    hyp_va_bits);
497 }
498 
handle___pkvm_cpu_set_vector(struct kvm_cpu_context * host_ctxt)499 static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
500 {
501 	DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
502 
503 	cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
504 }
505 
handle___pkvm_host_share_hyp(struct kvm_cpu_context * host_ctxt)506 static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
507 {
508 	DECLARE_REG(u64, pfn, host_ctxt, 1);
509 
510 	cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
511 }
512 
handle___pkvm_host_unshare_hyp(struct kvm_cpu_context * host_ctxt)513 static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
514 {
515 	DECLARE_REG(u64, pfn, host_ctxt, 1);
516 
517 	cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
518 }
519 
handle___pkvm_create_private_mapping(struct kvm_cpu_context * host_ctxt)520 static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
521 {
522 	DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
523 	DECLARE_REG(size_t, size, host_ctxt, 2);
524 	DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
525 
526 	/*
527 	 * __pkvm_create_private_mapping() populates a pointer with the
528 	 * hypervisor start address of the allocation.
529 	 *
530 	 * However, handle___pkvm_create_private_mapping() hypercall crosses the
531 	 * EL1/EL2 boundary so the pointer would not be valid in this context.
532 	 *
533 	 * Instead pass the allocation address as the return value (or return
534 	 * ERR_PTR() on failure).
535 	 */
536 	unsigned long haddr;
537 	int err = __pkvm_create_private_mapping(phys, size, prot, &haddr);
538 
539 	if (err)
540 		haddr = (unsigned long)ERR_PTR(err);
541 
542 	cpu_reg(host_ctxt, 1) = haddr;
543 }
544 
handle___pkvm_prot_finalize(struct kvm_cpu_context * host_ctxt)545 static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
546 {
547 	cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
548 }
549 
handle___pkvm_reserve_vm(struct kvm_cpu_context * host_ctxt)550 static void handle___pkvm_reserve_vm(struct kvm_cpu_context *host_ctxt)
551 {
552 	cpu_reg(host_ctxt, 1) = __pkvm_reserve_vm();
553 }
554 
handle___pkvm_unreserve_vm(struct kvm_cpu_context * host_ctxt)555 static void handle___pkvm_unreserve_vm(struct kvm_cpu_context *host_ctxt)
556 {
557 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
558 
559 	__pkvm_unreserve_vm(handle);
560 }
561 
handle___pkvm_init_vm(struct kvm_cpu_context * host_ctxt)562 static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
563 {
564 	DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
565 	DECLARE_REG(unsigned long, vm_hva, host_ctxt, 2);
566 	DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 3);
567 
568 	host_kvm = kern_hyp_va(host_kvm);
569 	cpu_reg(host_ctxt, 1) = __pkvm_init_vm(host_kvm, vm_hva, pgd_hva);
570 }
571 
handle___pkvm_init_vcpu(struct kvm_cpu_context * host_ctxt)572 static void handle___pkvm_init_vcpu(struct kvm_cpu_context *host_ctxt)
573 {
574 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
575 	DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2);
576 	DECLARE_REG(unsigned long, vcpu_hva, host_ctxt, 3);
577 
578 	host_vcpu = kern_hyp_va(host_vcpu);
579 	cpu_reg(host_ctxt, 1) = __pkvm_init_vcpu(handle, host_vcpu, vcpu_hva);
580 }
581 
handle___pkvm_teardown_vm(struct kvm_cpu_context * host_ctxt)582 static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
583 {
584 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
585 
586 	cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
587 }
588 
589 typedef void (*hcall_t)(struct kvm_cpu_context *);
590 
591 #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
592 
593 static const hcall_t host_hcall[] = {
594 	/* ___kvm_hyp_init */
595 	HANDLE_FUNC(__pkvm_init),
596 	HANDLE_FUNC(__pkvm_create_private_mapping),
597 	HANDLE_FUNC(__pkvm_cpu_set_vector),
598 	HANDLE_FUNC(__kvm_enable_ssbs),
599 	HANDLE_FUNC(__vgic_v3_init_lrs),
600 	HANDLE_FUNC(__vgic_v3_get_gic_config),
601 	HANDLE_FUNC(__pkvm_prot_finalize),
602 
603 	HANDLE_FUNC(__pkvm_host_share_hyp),
604 	HANDLE_FUNC(__pkvm_host_unshare_hyp),
605 	HANDLE_FUNC(__pkvm_host_share_guest),
606 	HANDLE_FUNC(__pkvm_host_unshare_guest),
607 	HANDLE_FUNC(__pkvm_host_relax_perms_guest),
608 	HANDLE_FUNC(__pkvm_host_wrprotect_guest),
609 	HANDLE_FUNC(__pkvm_host_test_clear_young_guest),
610 	HANDLE_FUNC(__pkvm_host_mkyoung_guest),
611 	HANDLE_FUNC(__kvm_adjust_pc),
612 	HANDLE_FUNC(__kvm_vcpu_run),
613 	HANDLE_FUNC(__kvm_flush_vm_context),
614 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
615 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh),
616 	HANDLE_FUNC(__kvm_tlb_flush_vmid),
617 	HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
618 	HANDLE_FUNC(__kvm_flush_cpu_context),
619 	HANDLE_FUNC(__kvm_timer_set_cntvoff),
620 	HANDLE_FUNC(__vgic_v3_save_aprs),
621 	HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
622 	HANDLE_FUNC(__pkvm_reserve_vm),
623 	HANDLE_FUNC(__pkvm_unreserve_vm),
624 	HANDLE_FUNC(__pkvm_init_vm),
625 	HANDLE_FUNC(__pkvm_init_vcpu),
626 	HANDLE_FUNC(__pkvm_teardown_vm),
627 	HANDLE_FUNC(__pkvm_vcpu_load),
628 	HANDLE_FUNC(__pkvm_vcpu_put),
629 	HANDLE_FUNC(__pkvm_tlb_flush_vmid),
630 };
631 
handle_host_hcall(struct kvm_cpu_context * host_ctxt)632 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
633 {
634 	DECLARE_REG(unsigned long, id, host_ctxt, 0);
635 	unsigned long hcall_min = 0;
636 	hcall_t hfn;
637 
638 	/*
639 	 * If pKVM has been initialised then reject any calls to the
640 	 * early "privileged" hypercalls. Note that we cannot reject
641 	 * calls to __pkvm_prot_finalize for two reasons: (1) The static
642 	 * key used to determine initialisation must be toggled prior to
643 	 * finalisation and (2) finalisation is performed on a per-CPU
644 	 * basis. This is all fine, however, since __pkvm_prot_finalize
645 	 * returns -EPERM after the first call for a given CPU.
646 	 */
647 	if (static_branch_unlikely(&kvm_protected_mode_initialized))
648 		hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
649 
650 	id &= ~ARM_SMCCC_CALL_HINTS;
651 	id -= KVM_HOST_SMCCC_ID(0);
652 
653 	if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
654 		goto inval;
655 
656 	hfn = host_hcall[id];
657 	if (unlikely(!hfn))
658 		goto inval;
659 
660 	cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
661 	hfn(host_ctxt);
662 
663 	return;
664 inval:
665 	cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
666 }
667 
default_host_smc_handler(struct kvm_cpu_context * host_ctxt)668 static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
669 {
670 	__kvm_hyp_host_forward_smc(host_ctxt);
671 }
672 
handle_host_smc(struct kvm_cpu_context * host_ctxt)673 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
674 {
675 	DECLARE_REG(u64, func_id, host_ctxt, 0);
676 	bool handled;
677 
678 	func_id &= ~ARM_SMCCC_CALL_HINTS;
679 
680 	handled = kvm_host_psci_handler(host_ctxt, func_id);
681 	if (!handled)
682 		handled = kvm_host_ffa_handler(host_ctxt, func_id);
683 	if (!handled)
684 		default_host_smc_handler(host_ctxt);
685 
686 	/* SMC was trapped, move ELR past the current PC. */
687 	kvm_skip_host_instr();
688 }
689 
handle_trap(struct kvm_cpu_context * host_ctxt)690 void handle_trap(struct kvm_cpu_context *host_ctxt)
691 {
692 	u64 esr = read_sysreg_el2(SYS_ESR);
693 
694 	switch (ESR_ELx_EC(esr)) {
695 	case ESR_ELx_EC_HVC64:
696 		handle_host_hcall(host_ctxt);
697 		break;
698 	case ESR_ELx_EC_SMC64:
699 		handle_host_smc(host_ctxt);
700 		break;
701 	case ESR_ELx_EC_IABT_LOW:
702 	case ESR_ELx_EC_DABT_LOW:
703 		handle_host_mem_abort(host_ctxt);
704 		break;
705 	default:
706 		BUG();
707 	}
708 }
709