1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/include/kvm_emulate.h 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #ifndef __ARM64_KVM_EMULATE_H__ 23 #define __ARM64_KVM_EMULATE_H__ 24 25 #include <linux/kvm_host.h> 26 27 #include <asm/esr.h> 28 #include <asm/kvm_arm.h> 29 #include <asm/kvm_hyp.h> 30 #include <asm/kvm_mmio.h> 31 #include <asm/ptrace.h> 32 #include <asm/cputype.h> 33 #include <asm/virt.h> 34 35 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); 36 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); 37 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); 38 39 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 40 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); 41 42 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 43 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 44 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 45 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 46 void kvm_inject_undef32(struct kvm_vcpu *vcpu); 47 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); 48 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); 49 50 static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) 51 { 52 return !(vcpu->arch.hcr_el2 & HCR_RW); 53 } 54 55 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 56 { 57 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 58 if (is_kernel_in_hyp_mode()) 59 vcpu->arch.hcr_el2 |= HCR_E2H; 60 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { 61 /* route synchronous external abort exceptions to EL2 */ 62 vcpu->arch.hcr_el2 |= HCR_TEA; 63 /* trap error record accesses */ 64 vcpu->arch.hcr_el2 |= HCR_TERR; 65 } 66 67 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) 68 vcpu->arch.hcr_el2 &= ~HCR_RW; 69 70 /* 71 * TID3: trap feature register accesses that we virtualise. 72 * For now this is conditional, since no AArch32 feature regs 73 * are currently virtualised. 74 */ 75 if (!vcpu_el1_is_32bit(vcpu)) 76 vcpu->arch.hcr_el2 |= HCR_TID3; 77 } 78 79 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) 80 { 81 return (unsigned long *)&vcpu->arch.hcr_el2; 82 } 83 84 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) 85 { 86 vcpu->arch.vsesr_el2 = vsesr; 87 } 88 89 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 90 { 91 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 92 } 93 94 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) 95 { 96 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; 97 } 98 99 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) 100 { 101 if (vcpu->arch.sysregs_loaded_on_cpu) 102 return read_sysreg_el1(elr); 103 else 104 return *__vcpu_elr_el1(vcpu); 105 } 106 107 static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) 108 { 109 if (vcpu->arch.sysregs_loaded_on_cpu) 110 write_sysreg_el1(v, elr); 111 else 112 *__vcpu_elr_el1(vcpu) = v; 113 } 114 115 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 116 { 117 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; 118 } 119 120 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 121 { 122 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); 123 } 124 125 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 126 { 127 if (vcpu_mode_is_32bit(vcpu)) 128 return kvm_condition_valid32(vcpu); 129 130 return true; 131 } 132 133 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 134 { 135 if (vcpu_mode_is_32bit(vcpu)) 136 kvm_skip_instr32(vcpu, is_wide_instr); 137 else 138 *vcpu_pc(vcpu) += 4; 139 } 140 141 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 142 { 143 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; 144 } 145 146 /* 147 * vcpu_get_reg and vcpu_set_reg should always be passed a register number 148 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on 149 * AArch32 with banked registers. 150 */ 151 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, 152 u8 reg_num) 153 { 154 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 155 } 156 157 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 158 unsigned long val) 159 { 160 if (reg_num != 31) 161 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; 162 } 163 164 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) 165 { 166 if (vcpu_mode_is_32bit(vcpu)) 167 return vcpu_read_spsr32(vcpu); 168 169 if (vcpu->arch.sysregs_loaded_on_cpu) 170 return read_sysreg_el1(spsr); 171 else 172 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; 173 } 174 175 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) 176 { 177 if (vcpu_mode_is_32bit(vcpu)) { 178 vcpu_write_spsr32(vcpu, v); 179 return; 180 } 181 182 if (vcpu->arch.sysregs_loaded_on_cpu) 183 write_sysreg_el1(v, spsr); 184 else 185 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; 186 } 187 188 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 189 { 190 u32 mode; 191 192 if (vcpu_mode_is_32bit(vcpu)) { 193 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; 194 return mode > COMPAT_PSR_MODE_USR; 195 } 196 197 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 198 199 return mode != PSR_MODE_EL0t; 200 } 201 202 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) 203 { 204 return vcpu->arch.fault.esr_el2; 205 } 206 207 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 208 { 209 u32 esr = kvm_vcpu_get_hsr(vcpu); 210 211 if (esr & ESR_ELx_CV) 212 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 213 214 return -1; 215 } 216 217 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) 218 { 219 return vcpu->arch.fault.far_el2; 220 } 221 222 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) 223 { 224 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; 225 } 226 227 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) 228 { 229 return vcpu->arch.fault.disr_el1; 230 } 231 232 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) 233 { 234 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; 235 } 236 237 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) 238 { 239 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); 240 } 241 242 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) 243 { 244 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); 245 } 246 247 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) 248 { 249 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 250 } 251 252 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 253 { 254 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); 255 } 256 257 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 258 { 259 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || 260 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ 261 } 262 263 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) 264 { 265 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); 266 } 267 268 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) 269 { 270 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); 271 } 272 273 /* This one is not specific to Data Abort */ 274 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) 275 { 276 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); 277 } 278 279 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) 280 { 281 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 282 } 283 284 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 285 { 286 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 287 } 288 289 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 290 { 291 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; 292 } 293 294 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) 295 { 296 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; 297 } 298 299 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) 300 { 301 switch (kvm_vcpu_trap_get_fault(vcpu)) { 302 case FSC_SEA: 303 case FSC_SEA_TTW0: 304 case FSC_SEA_TTW1: 305 case FSC_SEA_TTW2: 306 case FSC_SEA_TTW3: 307 case FSC_SECC: 308 case FSC_SECC_TTW0: 309 case FSC_SECC_TTW1: 310 case FSC_SECC_TTW2: 311 case FSC_SECC_TTW3: 312 return true; 313 default: 314 return false; 315 } 316 } 317 318 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) 319 { 320 u32 esr = kvm_vcpu_get_hsr(vcpu); 321 return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; 322 } 323 324 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 325 { 326 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; 327 } 328 329 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) 330 { 331 if (vcpu_mode_is_32bit(vcpu)) { 332 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; 333 } else { 334 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); 335 sctlr |= (1 << 25); 336 vcpu_write_sys_reg(vcpu, SCTLR_EL1, sctlr); 337 } 338 } 339 340 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) 341 { 342 if (vcpu_mode_is_32bit(vcpu)) 343 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); 344 345 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); 346 } 347 348 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, 349 unsigned long data, 350 unsigned int len) 351 { 352 if (kvm_vcpu_is_be(vcpu)) { 353 switch (len) { 354 case 1: 355 return data & 0xff; 356 case 2: 357 return be16_to_cpu(data & 0xffff); 358 case 4: 359 return be32_to_cpu(data & 0xffffffff); 360 default: 361 return be64_to_cpu(data); 362 } 363 } else { 364 switch (len) { 365 case 1: 366 return data & 0xff; 367 case 2: 368 return le16_to_cpu(data & 0xffff); 369 case 4: 370 return le32_to_cpu(data & 0xffffffff); 371 default: 372 return le64_to_cpu(data); 373 } 374 } 375 376 return data; /* Leave LE untouched */ 377 } 378 379 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, 380 unsigned long data, 381 unsigned int len) 382 { 383 if (kvm_vcpu_is_be(vcpu)) { 384 switch (len) { 385 case 1: 386 return data & 0xff; 387 case 2: 388 return cpu_to_be16(data & 0xffff); 389 case 4: 390 return cpu_to_be32(data & 0xffffffff); 391 default: 392 return cpu_to_be64(data); 393 } 394 } else { 395 switch (len) { 396 case 1: 397 return data & 0xff; 398 case 2: 399 return cpu_to_le16(data & 0xffff); 400 case 4: 401 return cpu_to_le32(data & 0xffffffff); 402 default: 403 return cpu_to_le64(data); 404 } 405 } 406 407 return data; /* Leave LE untouched */ 408 } 409 410 #endif /* __ARM64_KVM_EMULATE_H__ */ 411