xref: /linux/arch/arm64/kvm/hyp/nvhe/hyp-main.c (revision ec8a42e7343234802b9054874fe01810880289ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - Google Inc
4  * Author: Andrew Scull <ascull@google.com>
5  */
6 
7 #include <hyp/switch.h>
8 
9 #include <asm/kvm_asm.h>
10 #include <asm/kvm_emulate.h>
11 #include <asm/kvm_host.h>
12 #include <asm/kvm_hyp.h>
13 #include <asm/kvm_mmu.h>
14 
15 #include <nvhe/trap_handler.h>
16 
17 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
18 
19 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
20 
21 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
22 {
23 	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
24 
25 	cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
26 }
27 
28 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
29 {
30 	__kvm_flush_vm_context();
31 }
32 
33 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
34 {
35 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
36 	DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
37 	DECLARE_REG(int, level, host_ctxt, 3);
38 
39 	__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
40 }
41 
42 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
43 {
44 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
45 
46 	__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
47 }
48 
49 static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
50 {
51 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
52 
53 	__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
54 }
55 
56 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
57 {
58 	__kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
59 }
60 
61 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
62 {
63 	u64 tmp;
64 
65 	tmp = read_sysreg_el2(SYS_SCTLR);
66 	tmp |= SCTLR_ELx_DSSBS;
67 	write_sysreg_el2(tmp, SYS_SCTLR);
68 }
69 
70 static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
71 {
72 	cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
73 }
74 
75 static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
76 {
77 	cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
78 }
79 
80 static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
81 {
82 	__vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
83 }
84 
85 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
86 {
87 	__vgic_v3_init_lrs();
88 }
89 
90 static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
91 {
92 	cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
93 }
94 
95 static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
96 {
97 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
98 
99 	__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
100 }
101 
102 static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
103 {
104 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
105 
106 	__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
107 }
108 
109 typedef void (*hcall_t)(struct kvm_cpu_context *);
110 
111 #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x)
112 
113 static const hcall_t *host_hcall[] = {
114 	HANDLE_FUNC(__kvm_vcpu_run),
115 	HANDLE_FUNC(__kvm_flush_vm_context),
116 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
117 	HANDLE_FUNC(__kvm_tlb_flush_vmid),
118 	HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
119 	HANDLE_FUNC(__kvm_timer_set_cntvoff),
120 	HANDLE_FUNC(__kvm_enable_ssbs),
121 	HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
122 	HANDLE_FUNC(__vgic_v3_read_vmcr),
123 	HANDLE_FUNC(__vgic_v3_write_vmcr),
124 	HANDLE_FUNC(__vgic_v3_init_lrs),
125 	HANDLE_FUNC(__kvm_get_mdcr_el2),
126 	HANDLE_FUNC(__vgic_v3_save_aprs),
127 	HANDLE_FUNC(__vgic_v3_restore_aprs),
128 };
129 
130 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
131 {
132 	DECLARE_REG(unsigned long, id, host_ctxt, 0);
133 	const hcall_t *kfn;
134 	hcall_t hfn;
135 
136 	id -= KVM_HOST_SMCCC_ID(0);
137 
138 	if (unlikely(id >= ARRAY_SIZE(host_hcall)))
139 		goto inval;
140 
141 	kfn = host_hcall[id];
142 	if (unlikely(!kfn))
143 		goto inval;
144 
145 	cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
146 
147 	hfn = kimg_fn_hyp_va(kfn);
148 	hfn(host_ctxt);
149 
150 	return;
151 inval:
152 	cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
153 }
154 
155 static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
156 {
157 	__kvm_hyp_host_forward_smc(host_ctxt);
158 }
159 
160 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
161 {
162 	bool handled;
163 
164 	handled = kvm_host_psci_handler(host_ctxt);
165 	if (!handled)
166 		default_host_smc_handler(host_ctxt);
167 
168 	/* SMC was trapped, move ELR past the current PC. */
169 	kvm_skip_host_instr();
170 }
171 
172 void handle_trap(struct kvm_cpu_context *host_ctxt)
173 {
174 	u64 esr = read_sysreg_el2(SYS_ESR);
175 
176 	switch (ESR_ELx_EC(esr)) {
177 	case ESR_ELx_EC_HVC64:
178 		handle_host_hcall(host_ctxt);
179 		break;
180 	case ESR_ELx_EC_SMC64:
181 		handle_host_smc(host_ctxt);
182 		break;
183 	default:
184 		hyp_panic();
185 	}
186 }
187