1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * AArch64 processor specific defines 4 * 5 * Copyright (C) 2018, Red Hat, Inc. 6 */ 7 #ifndef SELFTEST_KVM_PROCESSOR_H 8 #define SELFTEST_KVM_PROCESSOR_H 9 10 #include "kvm_util.h" 11 #include "ucall_common.h" 12 13 #include <linux/stringify.h> 14 #include <linux/types.h> 15 #include <asm/brk-imm.h> 16 #include <asm/esr.h> 17 #include <asm/sysreg.h> 18 19 20 #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ 21 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) 22 23 /* 24 * KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert 25 * SYS_* register definitions in asm/sysreg.h to use in KVM 26 * calls such as vcpu_get_reg() and vcpu_set_reg(). 27 */ 28 #define KVM_ARM64_SYS_REG(sys_reg_id) \ 29 ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \ 30 sys_reg_Op1(sys_reg_id), \ 31 sys_reg_CRn(sys_reg_id), \ 32 sys_reg_CRm(sys_reg_id), \ 33 sys_reg_Op2(sys_reg_id)) 34 35 /* 36 * Default MAIR 37 * index attribute 38 * DEVICE_nGnRnE 0 0000:0000 39 * DEVICE_nGnRE 1 0000:0100 40 * DEVICE_GRE 2 0000:1100 41 * NORMAL_NC 3 0100:0100 42 * NORMAL 4 1111:1111 43 * NORMAL_WT 5 1011:1011 44 */ 45 46 /* Linux doesn't use these memory types, so let's define them. */ 47 #define MAIR_ATTR_DEVICE_GRE UL(0x0c) 48 #define MAIR_ATTR_NORMAL_WT UL(0xbb) 49 50 #define MT_DEVICE_nGnRnE 0 51 #define MT_DEVICE_nGnRE 1 52 #define MT_DEVICE_GRE 2 53 #define MT_NORMAL_NC 3 54 #define MT_NORMAL 4 55 #define MT_NORMAL_WT 5 56 57 #define DEFAULT_MAIR_EL1 \ 58 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 59 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 60 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ 61 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 62 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 63 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) 64 65 /* TCR_EL1 specific flags */ 66 #define TCR_T0SZ_OFFSET 0 67 #define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) 68 69 #define TCR_IRGN0_SHIFT 8 70 #define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT) 71 #define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT) 72 #define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT) 73 #define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT) 74 #define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT) 75 76 #define TCR_ORGN0_SHIFT 10 77 #define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT) 78 #define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT) 79 #define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT) 80 #define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT) 81 #define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT) 82 83 #define TCR_SH0_SHIFT 12 84 #define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT) 85 #define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT) 86 87 #define TCR_TG0_SHIFT 14 88 #define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT) 89 #define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT) 90 #define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT) 91 #define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT) 92 93 #define TCR_EPD1_SHIFT 23 94 #define TCR_EPD1_MASK (UL(1) << TCR_EPD1_SHIFT) 95 96 #define TCR_IPS_SHIFT 32 97 #define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT) 98 #define TCR_IPS_52_BITS (UL(6) << TCR_IPS_SHIFT) 99 #define TCR_IPS_48_BITS (UL(5) << TCR_IPS_SHIFT) 100 #define TCR_IPS_40_BITS (UL(2) << TCR_IPS_SHIFT) 101 #define TCR_IPS_36_BITS (UL(1) << TCR_IPS_SHIFT) 102 103 #define TCR_TBI1 (UL(1) << 38) 104 #define TCR_HA (UL(1) << 39) 105 #define TCR_DS (UL(1) << 59) 106 107 /* 108 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). 109 */ 110 #define PTE_ATTRINDX(t) ((t) << 2) 111 #define PTE_ATTRINDX_MASK GENMASK(4, 2) 112 #define PTE_ATTRINDX_SHIFT 2 113 114 #define PTE_VALID BIT(0) 115 #define PGD_TYPE_TABLE BIT(1) 116 #define PUD_TYPE_TABLE BIT(1) 117 #define PMD_TYPE_TABLE BIT(1) 118 #define PTE_TYPE_PAGE BIT(1) 119 120 #define PTE_SHARED (UL(3) << 8) /* SH[1:0], inner shareable */ 121 #define PTE_AF BIT(10) 122 123 #define PTE_ADDR_MASK(page_shift) GENMASK(47, (page_shift)) 124 #define PTE_ADDR_51_48 GENMASK(15, 12) 125 #define PTE_ADDR_51_48_SHIFT 12 126 #define PTE_ADDR_MASK_LPA2(page_shift) GENMASK(49, (page_shift)) 127 #define PTE_ADDR_51_50_LPA2 GENMASK(9, 8) 128 #define PTE_ADDR_51_50_LPA2_SHIFT 8 129 130 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); 131 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 132 struct kvm_vcpu_init *init, void *guest_code); 133 134 struct ex_regs { 135 u64 regs[31]; 136 u64 sp; 137 u64 pc; 138 u64 pstate; 139 }; 140 141 #define VECTOR_NUM 16 142 143 enum { 144 VECTOR_SYNC_CURRENT_SP0, 145 VECTOR_IRQ_CURRENT_SP0, 146 VECTOR_FIQ_CURRENT_SP0, 147 VECTOR_ERROR_CURRENT_SP0, 148 149 VECTOR_SYNC_CURRENT, 150 VECTOR_IRQ_CURRENT, 151 VECTOR_FIQ_CURRENT, 152 VECTOR_ERROR_CURRENT, 153 154 VECTOR_SYNC_LOWER_64, 155 VECTOR_IRQ_LOWER_64, 156 VECTOR_FIQ_LOWER_64, 157 VECTOR_ERROR_LOWER_64, 158 159 VECTOR_SYNC_LOWER_32, 160 VECTOR_IRQ_LOWER_32, 161 VECTOR_FIQ_LOWER_32, 162 VECTOR_ERROR_LOWER_32, 163 }; 164 165 #define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \ 166 (v) == VECTOR_SYNC_CURRENT || \ 167 (v) == VECTOR_SYNC_LOWER_64 || \ 168 (v) == VECTOR_SYNC_LOWER_32) 169 170 void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, 171 uint32_t *ipa16k, uint32_t *ipa64k); 172 173 void vm_init_descriptor_tables(struct kvm_vm *vm); 174 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); 175 176 typedef void(*handler_fn)(struct ex_regs *); 177 void vm_install_exception_handler(struct kvm_vm *vm, 178 int vector, handler_fn handler); 179 void vm_install_sync_handler(struct kvm_vm *vm, 180 int vector, int ec, handler_fn handler); 181 182 uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level); 183 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); 184 185 static inline void cpu_relax(void) 186 { 187 asm volatile("yield" ::: "memory"); 188 } 189 190 #define isb() asm volatile("isb" : : : "memory") 191 #define dsb(opt) asm volatile("dsb " #opt : : : "memory") 192 #define dmb(opt) asm volatile("dmb " #opt : : : "memory") 193 194 #define dma_wmb() dmb(oshst) 195 #define __iowmb() dma_wmb() 196 197 #define dma_rmb() dmb(oshld) 198 199 #define __iormb(v) \ 200 ({ \ 201 unsigned long tmp; \ 202 \ 203 dma_rmb(); \ 204 \ 205 /* \ 206 * Courtesy of arch/arm64/include/asm/io.h: \ 207 * Create a dummy control dependency from the IO read to any \ 208 * later instructions. This ensures that a subsequent call \ 209 * to udelay() will be ordered due to the ISB in __delay(). \ 210 */ \ 211 asm volatile("eor %0, %1, %1\n" \ 212 "cbnz %0, ." \ 213 : "=r" (tmp) : "r" ((unsigned long)(v)) \ 214 : "memory"); \ 215 }) 216 217 static __always_inline void __raw_writel(u32 val, volatile void *addr) 218 { 219 asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); 220 } 221 222 static __always_inline u32 __raw_readl(const volatile void *addr) 223 { 224 u32 val; 225 asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); 226 return val; 227 } 228 229 static __always_inline void __raw_writeq(u64 val, volatile void *addr) 230 { 231 asm volatile("str %0, [%1]" : : "rZ" (val), "r" (addr)); 232 } 233 234 static __always_inline u64 __raw_readq(const volatile void *addr) 235 { 236 u64 val; 237 asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); 238 return val; 239 } 240 241 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) 242 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) 243 #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) 244 #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) 245 246 #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));}) 247 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) 248 #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c));}) 249 #define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; }) 250 251 252 static inline void local_irq_enable(void) 253 { 254 asm volatile("msr daifclr, #3" : : : "memory"); 255 } 256 257 static inline void local_irq_disable(void) 258 { 259 asm volatile("msr daifset, #3" : : : "memory"); 260 } 261 262 static inline void local_serror_enable(void) 263 { 264 asm volatile("msr daifclr, #4" : : : "memory"); 265 } 266 267 static inline void local_serror_disable(void) 268 { 269 asm volatile("msr daifset, #4" : : : "memory"); 270 } 271 272 /** 273 * struct arm_smccc_res - Result from SMC/HVC call 274 * @a0-a3 result values from registers 0 to 3 275 */ 276 struct arm_smccc_res { 277 unsigned long a0; 278 unsigned long a1; 279 unsigned long a2; 280 unsigned long a3; 281 }; 282 283 /** 284 * smccc_hvc - Invoke a SMCCC function using the hvc conduit 285 * @function_id: the SMCCC function to be called 286 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 287 * @res: pointer to write the return values from registers x0-x3 288 * 289 */ 290 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 291 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 292 uint64_t arg6, struct arm_smccc_res *res); 293 294 /** 295 * smccc_smc - Invoke a SMCCC function using the smc conduit 296 * @function_id: the SMCCC function to be called 297 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 298 * @res: pointer to write the return values from registers x0-x3 299 * 300 */ 301 void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 302 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 303 uint64_t arg6, struct arm_smccc_res *res); 304 305 /* Execute a Wait For Interrupt instruction. */ 306 void wfi(void); 307 308 void test_wants_mte(void); 309 void test_disable_default_vgic(void); 310 311 bool vm_supports_el2(struct kvm_vm *vm); 312 313 static inline bool test_supports_el2(void) 314 { 315 struct kvm_vm *vm = vm_create(1); 316 bool supported = vm_supports_el2(vm); 317 318 kvm_vm_free(vm); 319 return supported; 320 } 321 322 static inline bool vcpu_has_el2(struct kvm_vcpu *vcpu) 323 { 324 return vcpu->init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2); 325 } 326 327 #define MAPPED_EL2_SYSREG(el2, el1) \ 328 case SYS_##el1: \ 329 if (vcpu_has_el2(vcpu)) \ 330 alias = SYS_##el2; \ 331 break 332 333 334 static __always_inline u64 ctxt_reg_alias(struct kvm_vcpu *vcpu, u32 encoding) 335 { 336 u32 alias = encoding; 337 338 BUILD_BUG_ON(!__builtin_constant_p(encoding)); 339 340 switch (encoding) { 341 MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1); 342 MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1); 343 MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1); 344 MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1); 345 MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1); 346 MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1); 347 MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1); 348 MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1); 349 MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1); 350 MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1); 351 MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1); 352 MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1); 353 MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1); 354 MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1); 355 MAPPED_EL2_SYSREG(POR_EL2, POR_EL1); 356 MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1); 357 MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1); 358 MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1); 359 MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1); 360 MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1); 361 MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1); 362 MAPPED_EL2_SYSREG(CNTHCTL_EL2, CNTKCTL_EL1); 363 case SYS_SP_EL1: 364 if (!vcpu_has_el2(vcpu)) 365 return ARM64_CORE_REG(sp_el1); 366 367 alias = SYS_SP_EL2; 368 break; 369 default: 370 BUILD_BUG(); 371 } 372 373 return KVM_ARM64_SYS_REG(alias); 374 } 375 376 void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init); 377 378 static inline unsigned int get_current_el(void) 379 { 380 return (read_sysreg(CurrentEL) >> 2) & 0x3; 381 } 382 383 #define do_smccc(...) \ 384 do { \ 385 if (get_current_el() == 2) \ 386 smccc_smc(__VA_ARGS__); \ 387 else \ 388 smccc_hvc(__VA_ARGS__); \ 389 } while (0) 390 391 #endif /* SELFTEST_KVM_PROCESSOR_H */ 392