1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * AArch64 processor specific defines 4 * 5 * Copyright (C) 2018, Red Hat, Inc. 6 */ 7 #ifndef SELFTEST_KVM_PROCESSOR_H 8 #define SELFTEST_KVM_PROCESSOR_H 9 10 #include "kvm_util.h" 11 #include "ucall_common.h" 12 13 #include <linux/stringify.h> 14 #include <linux/types.h> 15 #include <asm/brk-imm.h> 16 #include <asm/esr.h> 17 #include <asm/sysreg.h> 18 19 20 #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ 21 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) 22 23 /* 24 * KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert 25 * SYS_* register definitions in asm/sysreg.h to use in KVM 26 * calls such as vcpu_get_reg() and vcpu_set_reg(). 27 */ 28 #define KVM_ARM64_SYS_REG(sys_reg_id) \ 29 ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \ 30 sys_reg_Op1(sys_reg_id), \ 31 sys_reg_CRn(sys_reg_id), \ 32 sys_reg_CRm(sys_reg_id), \ 33 sys_reg_Op2(sys_reg_id)) 34 35 /* 36 * Default MAIR 37 * index attribute 38 * DEVICE_nGnRnE 0 0000:0000 39 * DEVICE_nGnRE 1 0000:0100 40 * DEVICE_GRE 2 0000:1100 41 * NORMAL_NC 3 0100:0100 42 * NORMAL 4 1111:1111 43 * NORMAL_WT 5 1011:1011 44 */ 45 46 /* Linux doesn't use these memory types, so let's define them. */ 47 #define MAIR_ATTR_DEVICE_GRE UL(0x0c) 48 #define MAIR_ATTR_NORMAL_WT UL(0xbb) 49 50 #define MT_DEVICE_nGnRnE 0 51 #define MT_DEVICE_nGnRE 1 52 #define MT_DEVICE_GRE 2 53 #define MT_NORMAL_NC 3 54 #define MT_NORMAL 4 55 #define MT_NORMAL_WT 5 56 57 #define DEFAULT_MAIR_EL1 \ 58 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 59 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 60 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ 61 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 62 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 63 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) 64 65 /* TCR_EL1 specific flags */ 66 #define TCR_T0SZ_OFFSET 0 67 #define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) 68 69 #define TCR_IRGN0_SHIFT 8 70 #define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT) 71 #define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT) 72 #define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT) 73 #define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT) 74 #define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT) 75 76 #define TCR_ORGN0_SHIFT 10 77 #define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT) 78 #define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT) 79 #define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT) 80 #define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT) 81 #define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT) 82 83 #define TCR_SH0_SHIFT 12 84 #define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT) 85 #define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT) 86 87 #define TCR_TG0_SHIFT 14 88 #define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT) 89 #define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT) 90 #define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT) 91 #define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT) 92 93 #define TCR_IPS_SHIFT 32 94 #define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT) 95 #define TCR_IPS_52_BITS (UL(6) << TCR_IPS_SHIFT) 96 #define TCR_IPS_48_BITS (UL(5) << TCR_IPS_SHIFT) 97 #define TCR_IPS_40_BITS (UL(2) << TCR_IPS_SHIFT) 98 #define TCR_IPS_36_BITS (UL(1) << TCR_IPS_SHIFT) 99 100 #define TCR_HA (UL(1) << 39) 101 #define TCR_DS (UL(1) << 59) 102 103 /* 104 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). 105 */ 106 #define PTE_ATTRINDX(t) ((t) << 2) 107 #define PTE_ATTRINDX_MASK GENMASK(4, 2) 108 #define PTE_ATTRINDX_SHIFT 2 109 110 #define PTE_VALID BIT(0) 111 #define PGD_TYPE_TABLE BIT(1) 112 #define PUD_TYPE_TABLE BIT(1) 113 #define PMD_TYPE_TABLE BIT(1) 114 #define PTE_TYPE_PAGE BIT(1) 115 116 #define PTE_SHARED (UL(3) << 8) /* SH[1:0], inner shareable */ 117 #define PTE_AF BIT(10) 118 119 #define PTE_ADDR_MASK(page_shift) GENMASK(47, (page_shift)) 120 #define PTE_ADDR_51_48 GENMASK(15, 12) 121 #define PTE_ADDR_51_48_SHIFT 12 122 #define PTE_ADDR_MASK_LPA2(page_shift) GENMASK(49, (page_shift)) 123 #define PTE_ADDR_51_50_LPA2 GENMASK(9, 8) 124 #define PTE_ADDR_51_50_LPA2_SHIFT 8 125 126 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); 127 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 128 struct kvm_vcpu_init *init, void *guest_code); 129 130 struct ex_regs { 131 u64 regs[31]; 132 u64 sp; 133 u64 pc; 134 u64 pstate; 135 }; 136 137 #define VECTOR_NUM 16 138 139 enum { 140 VECTOR_SYNC_CURRENT_SP0, 141 VECTOR_IRQ_CURRENT_SP0, 142 VECTOR_FIQ_CURRENT_SP0, 143 VECTOR_ERROR_CURRENT_SP0, 144 145 VECTOR_SYNC_CURRENT, 146 VECTOR_IRQ_CURRENT, 147 VECTOR_FIQ_CURRENT, 148 VECTOR_ERROR_CURRENT, 149 150 VECTOR_SYNC_LOWER_64, 151 VECTOR_IRQ_LOWER_64, 152 VECTOR_FIQ_LOWER_64, 153 VECTOR_ERROR_LOWER_64, 154 155 VECTOR_SYNC_LOWER_32, 156 VECTOR_IRQ_LOWER_32, 157 VECTOR_FIQ_LOWER_32, 158 VECTOR_ERROR_LOWER_32, 159 }; 160 161 #define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \ 162 (v) == VECTOR_SYNC_CURRENT || \ 163 (v) == VECTOR_SYNC_LOWER_64 || \ 164 (v) == VECTOR_SYNC_LOWER_32) 165 166 void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, 167 uint32_t *ipa16k, uint32_t *ipa64k); 168 169 void vm_init_descriptor_tables(struct kvm_vm *vm); 170 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); 171 172 typedef void(*handler_fn)(struct ex_regs *); 173 void vm_install_exception_handler(struct kvm_vm *vm, 174 int vector, handler_fn handler); 175 void vm_install_sync_handler(struct kvm_vm *vm, 176 int vector, int ec, handler_fn handler); 177 178 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); 179 180 static inline void cpu_relax(void) 181 { 182 asm volatile("yield" ::: "memory"); 183 } 184 185 #define isb() asm volatile("isb" : : : "memory") 186 #define dsb(opt) asm volatile("dsb " #opt : : : "memory") 187 #define dmb(opt) asm volatile("dmb " #opt : : : "memory") 188 189 #define dma_wmb() dmb(oshst) 190 #define __iowmb() dma_wmb() 191 192 #define dma_rmb() dmb(oshld) 193 194 #define __iormb(v) \ 195 ({ \ 196 unsigned long tmp; \ 197 \ 198 dma_rmb(); \ 199 \ 200 /* \ 201 * Courtesy of arch/arm64/include/asm/io.h: \ 202 * Create a dummy control dependency from the IO read to any \ 203 * later instructions. This ensures that a subsequent call \ 204 * to udelay() will be ordered due to the ISB in __delay(). \ 205 */ \ 206 asm volatile("eor %0, %1, %1\n" \ 207 "cbnz %0, ." \ 208 : "=r" (tmp) : "r" ((unsigned long)(v)) \ 209 : "memory"); \ 210 }) 211 212 static __always_inline void __raw_writel(u32 val, volatile void *addr) 213 { 214 asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); 215 } 216 217 static __always_inline u32 __raw_readl(const volatile void *addr) 218 { 219 u32 val; 220 asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); 221 return val; 222 } 223 224 static __always_inline void __raw_writeq(u64 val, volatile void *addr) 225 { 226 asm volatile("str %0, [%1]" : : "rZ" (val), "r" (addr)); 227 } 228 229 static __always_inline u64 __raw_readq(const volatile void *addr) 230 { 231 u64 val; 232 asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); 233 return val; 234 } 235 236 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) 237 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) 238 #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) 239 #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) 240 241 #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));}) 242 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) 243 #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c));}) 244 #define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; }) 245 246 247 static inline void local_irq_enable(void) 248 { 249 asm volatile("msr daifclr, #3" : : : "memory"); 250 } 251 252 static inline void local_irq_disable(void) 253 { 254 asm volatile("msr daifset, #3" : : : "memory"); 255 } 256 257 static inline void local_serror_enable(void) 258 { 259 asm volatile("msr daifclr, #4" : : : "memory"); 260 } 261 262 static inline void local_serror_disable(void) 263 { 264 asm volatile("msr daifset, #4" : : : "memory"); 265 } 266 267 /** 268 * struct arm_smccc_res - Result from SMC/HVC call 269 * @a0-a3 result values from registers 0 to 3 270 */ 271 struct arm_smccc_res { 272 unsigned long a0; 273 unsigned long a1; 274 unsigned long a2; 275 unsigned long a3; 276 }; 277 278 /** 279 * smccc_hvc - Invoke a SMCCC function using the hvc conduit 280 * @function_id: the SMCCC function to be called 281 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 282 * @res: pointer to write the return values from registers x0-x3 283 * 284 */ 285 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 286 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 287 uint64_t arg6, struct arm_smccc_res *res); 288 289 /** 290 * smccc_smc - Invoke a SMCCC function using the smc conduit 291 * @function_id: the SMCCC function to be called 292 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 293 * @res: pointer to write the return values from registers x0-x3 294 * 295 */ 296 void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 297 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 298 uint64_t arg6, struct arm_smccc_res *res); 299 300 /* Execute a Wait For Interrupt instruction. */ 301 void wfi(void); 302 303 #endif /* SELFTEST_KVM_PROCESSOR_H */ 304