xref: /linux/arch/arm64/include/asm/kvm_emulate.h (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * Derived from arch/arm/include/kvm_emulate.h
6  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #ifndef __ARM64_KVM_EMULATE_H__
23 #define __ARM64_KVM_EMULATE_H__
24 
25 #include <linux/kvm_host.h>
26 
27 #include <asm/esr.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_mmio.h>
30 #include <asm/ptrace.h>
31 #include <asm/cputype.h>
32 #include <asm/virt.h>
33 
34 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
35 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
36 
37 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
38 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
39 
40 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
41 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
42 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
43 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
44 
45 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
46 {
47 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
48 	if (is_kernel_in_hyp_mode())
49 		vcpu->arch.hcr_el2 |= HCR_E2H;
50 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
51 		vcpu->arch.hcr_el2 &= ~HCR_RW;
52 }
53 
54 static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
55 {
56 	return vcpu->arch.hcr_el2;
57 }
58 
59 static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
60 {
61 	vcpu->arch.hcr_el2 = hcr;
62 }
63 
64 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
65 {
66 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
67 }
68 
69 static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
70 {
71 	return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
72 }
73 
74 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
75 {
76 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
77 }
78 
79 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
80 {
81 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
82 }
83 
84 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
85 {
86 	if (vcpu_mode_is_32bit(vcpu))
87 		return kvm_condition_valid32(vcpu);
88 
89 	return true;
90 }
91 
92 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
93 {
94 	if (vcpu_mode_is_32bit(vcpu))
95 		kvm_skip_instr32(vcpu, is_wide_instr);
96 	else
97 		*vcpu_pc(vcpu) += 4;
98 }
99 
100 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
101 {
102 	*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
103 }
104 
105 /*
106  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
107  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
108  * AArch32 with banked registers.
109  */
110 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
111 					 u8 reg_num)
112 {
113 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
114 }
115 
116 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
117 				unsigned long val)
118 {
119 	if (reg_num != 31)
120 		vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
121 }
122 
123 /* Get vcpu SPSR for current mode */
124 static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
125 {
126 	if (vcpu_mode_is_32bit(vcpu))
127 		return vcpu_spsr32(vcpu);
128 
129 	return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
130 }
131 
132 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
133 {
134 	u32 mode;
135 
136 	if (vcpu_mode_is_32bit(vcpu)) {
137 		mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
138 		return mode > COMPAT_PSR_MODE_USR;
139 	}
140 
141 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
142 
143 	return mode != PSR_MODE_EL0t;
144 }
145 
146 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
147 {
148 	return vcpu->arch.fault.esr_el2;
149 }
150 
151 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
152 {
153 	u32 esr = kvm_vcpu_get_hsr(vcpu);
154 
155 	if (esr & ESR_ELx_CV)
156 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
157 
158 	return -1;
159 }
160 
161 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
162 {
163 	return vcpu->arch.fault.far_el2;
164 }
165 
166 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
167 {
168 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
169 }
170 
171 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
172 {
173 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
174 }
175 
176 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
177 {
178 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
179 }
180 
181 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
182 {
183 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
184 }
185 
186 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
187 {
188 	return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
189 }
190 
191 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
192 {
193 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
194 }
195 
196 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
197 {
198 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
199 		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
200 }
201 
202 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
203 {
204 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
205 }
206 
207 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
208 {
209 	return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
210 }
211 
212 /* This one is not specific to Data Abort */
213 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
214 {
215 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
216 }
217 
218 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
219 {
220 	return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
221 }
222 
223 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
224 {
225 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
226 }
227 
228 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
229 {
230 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
231 }
232 
233 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
234 {
235 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
236 }
237 
238 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
239 {
240 	switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
241 	case FSC_SEA:
242 	case FSC_SEA_TTW0:
243 	case FSC_SEA_TTW1:
244 	case FSC_SEA_TTW2:
245 	case FSC_SEA_TTW3:
246 	case FSC_SECC:
247 	case FSC_SECC_TTW0:
248 	case FSC_SECC_TTW1:
249 	case FSC_SECC_TTW2:
250 	case FSC_SECC_TTW3:
251 		return true;
252 	default:
253 		return false;
254 	}
255 }
256 
257 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
258 {
259 	u32 esr = kvm_vcpu_get_hsr(vcpu);
260 	return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
261 }
262 
263 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
264 {
265 	return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
266 }
267 
268 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
269 {
270 	if (vcpu_mode_is_32bit(vcpu))
271 		*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
272 	else
273 		vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
274 }
275 
276 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
277 {
278 	if (vcpu_mode_is_32bit(vcpu))
279 		return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
280 
281 	return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
282 }
283 
284 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
285 						    unsigned long data,
286 						    unsigned int len)
287 {
288 	if (kvm_vcpu_is_be(vcpu)) {
289 		switch (len) {
290 		case 1:
291 			return data & 0xff;
292 		case 2:
293 			return be16_to_cpu(data & 0xffff);
294 		case 4:
295 			return be32_to_cpu(data & 0xffffffff);
296 		default:
297 			return be64_to_cpu(data);
298 		}
299 	} else {
300 		switch (len) {
301 		case 1:
302 			return data & 0xff;
303 		case 2:
304 			return le16_to_cpu(data & 0xffff);
305 		case 4:
306 			return le32_to_cpu(data & 0xffffffff);
307 		default:
308 			return le64_to_cpu(data);
309 		}
310 	}
311 
312 	return data;		/* Leave LE untouched */
313 }
314 
315 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
316 						    unsigned long data,
317 						    unsigned int len)
318 {
319 	if (kvm_vcpu_is_be(vcpu)) {
320 		switch (len) {
321 		case 1:
322 			return data & 0xff;
323 		case 2:
324 			return cpu_to_be16(data & 0xffff);
325 		case 4:
326 			return cpu_to_be32(data & 0xffffffff);
327 		default:
328 			return cpu_to_be64(data);
329 		}
330 	} else {
331 		switch (len) {
332 		case 1:
333 			return data & 0xff;
334 		case 2:
335 			return cpu_to_le16(data & 0xffff);
336 		case 4:
337 			return cpu_to_le32(data & 0xffffffff);
338 		default:
339 			return cpu_to_le64(data);
340 		}
341 	}
342 
343 	return data;		/* Leave LE untouched */
344 }
345 
346 #endif /* __ARM64_KVM_EMULATE_H__ */
347