1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * AArch64 processor specific defines 4 * 5 * Copyright (C) 2018, Red Hat, Inc. 6 */ 7 #ifndef SELFTEST_KVM_PROCESSOR_H 8 #define SELFTEST_KVM_PROCESSOR_H 9 10 #include "kvm_util.h" 11 #include "ucall_common.h" 12 13 #include <linux/stringify.h> 14 #include <linux/types.h> 15 #include <asm/sysreg.h> 16 17 18 #define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ 19 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) 20 21 /* 22 * KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert 23 * SYS_* register definitions in asm/sysreg.h to use in KVM 24 * calls such as vcpu_get_reg() and vcpu_set_reg(). 25 */ 26 #define KVM_ARM64_SYS_REG(sys_reg_id) \ 27 ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \ 28 sys_reg_Op1(sys_reg_id), \ 29 sys_reg_CRn(sys_reg_id), \ 30 sys_reg_CRm(sys_reg_id), \ 31 sys_reg_Op2(sys_reg_id)) 32 33 /* 34 * Default MAIR 35 * index attribute 36 * DEVICE_nGnRnE 0 0000:0000 37 * DEVICE_nGnRE 1 0000:0100 38 * DEVICE_GRE 2 0000:1100 39 * NORMAL_NC 3 0100:0100 40 * NORMAL 4 1111:1111 41 * NORMAL_WT 5 1011:1011 42 */ 43 44 /* Linux doesn't use these memory types, so let's define them. */ 45 #define MAIR_ATTR_DEVICE_GRE UL(0x0c) 46 #define MAIR_ATTR_NORMAL_WT UL(0xbb) 47 48 #define MT_DEVICE_nGnRnE 0 49 #define MT_DEVICE_nGnRE 1 50 #define MT_DEVICE_GRE 2 51 #define MT_NORMAL_NC 3 52 #define MT_NORMAL 4 53 #define MT_NORMAL_WT 5 54 55 #define DEFAULT_MAIR_EL1 \ 56 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ 57 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ 58 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ 59 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ 60 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 61 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) 62 63 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); 64 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 65 struct kvm_vcpu_init *init, void *guest_code); 66 67 struct ex_regs { 68 u64 regs[31]; 69 u64 sp; 70 u64 pc; 71 u64 pstate; 72 }; 73 74 #define VECTOR_NUM 16 75 76 enum { 77 VECTOR_SYNC_CURRENT_SP0, 78 VECTOR_IRQ_CURRENT_SP0, 79 VECTOR_FIQ_CURRENT_SP0, 80 VECTOR_ERROR_CURRENT_SP0, 81 82 VECTOR_SYNC_CURRENT, 83 VECTOR_IRQ_CURRENT, 84 VECTOR_FIQ_CURRENT, 85 VECTOR_ERROR_CURRENT, 86 87 VECTOR_SYNC_LOWER_64, 88 VECTOR_IRQ_LOWER_64, 89 VECTOR_FIQ_LOWER_64, 90 VECTOR_ERROR_LOWER_64, 91 92 VECTOR_SYNC_LOWER_32, 93 VECTOR_IRQ_LOWER_32, 94 VECTOR_FIQ_LOWER_32, 95 VECTOR_ERROR_LOWER_32, 96 }; 97 98 #define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \ 99 (v) == VECTOR_SYNC_CURRENT || \ 100 (v) == VECTOR_SYNC_LOWER_64 || \ 101 (v) == VECTOR_SYNC_LOWER_32) 102 103 #define ESR_EC_NUM 64 104 #define ESR_EC_SHIFT 26 105 #define ESR_EC_MASK (ESR_EC_NUM - 1) 106 107 #define ESR_EC_UNKNOWN 0x0 108 #define ESR_EC_SVC64 0x15 109 #define ESR_EC_IABT 0x21 110 #define ESR_EC_DABT 0x25 111 #define ESR_EC_HW_BP_CURRENT 0x31 112 #define ESR_EC_SSTEP_CURRENT 0x33 113 #define ESR_EC_WP_CURRENT 0x35 114 #define ESR_EC_BRK_INS 0x3c 115 116 /* Access flag */ 117 #define PTE_AF (1ULL << 10) 118 119 /* Access flag update enable/disable */ 120 #define TCR_EL1_HA (1ULL << 39) 121 122 void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, 123 uint32_t *ipa16k, uint32_t *ipa64k); 124 125 void vm_init_descriptor_tables(struct kvm_vm *vm); 126 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); 127 128 typedef void(*handler_fn)(struct ex_regs *); 129 void vm_install_exception_handler(struct kvm_vm *vm, 130 int vector, handler_fn handler); 131 void vm_install_sync_handler(struct kvm_vm *vm, 132 int vector, int ec, handler_fn handler); 133 134 uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva); 135 136 static inline void cpu_relax(void) 137 { 138 asm volatile("yield" ::: "memory"); 139 } 140 141 #define isb() asm volatile("isb" : : : "memory") 142 #define dsb(opt) asm volatile("dsb " #opt : : : "memory") 143 #define dmb(opt) asm volatile("dmb " #opt : : : "memory") 144 145 #define dma_wmb() dmb(oshst) 146 #define __iowmb() dma_wmb() 147 148 #define dma_rmb() dmb(oshld) 149 150 #define __iormb(v) \ 151 ({ \ 152 unsigned long tmp; \ 153 \ 154 dma_rmb(); \ 155 \ 156 /* \ 157 * Courtesy of arch/arm64/include/asm/io.h: \ 158 * Create a dummy control dependency from the IO read to any \ 159 * later instructions. This ensures that a subsequent call \ 160 * to udelay() will be ordered due to the ISB in __delay(). \ 161 */ \ 162 asm volatile("eor %0, %1, %1\n" \ 163 "cbnz %0, ." \ 164 : "=r" (tmp) : "r" ((unsigned long)(v)) \ 165 : "memory"); \ 166 }) 167 168 static __always_inline void __raw_writel(u32 val, volatile void *addr) 169 { 170 asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); 171 } 172 173 static __always_inline u32 __raw_readl(const volatile void *addr) 174 { 175 u32 val; 176 asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); 177 return val; 178 } 179 180 static __always_inline void __raw_writeq(u64 val, volatile void *addr) 181 { 182 asm volatile("str %0, [%1]" : : "rZ" (val), "r" (addr)); 183 } 184 185 static __always_inline u64 __raw_readq(const volatile void *addr) 186 { 187 u64 val; 188 asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); 189 return val; 190 } 191 192 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) 193 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) 194 #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) 195 #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) 196 197 #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));}) 198 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) 199 #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c));}) 200 #define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; }) 201 202 203 static inline void local_irq_enable(void) 204 { 205 asm volatile("msr daifclr, #3" : : : "memory"); 206 } 207 208 static inline void local_irq_disable(void) 209 { 210 asm volatile("msr daifset, #3" : : : "memory"); 211 } 212 213 /** 214 * struct arm_smccc_res - Result from SMC/HVC call 215 * @a0-a3 result values from registers 0 to 3 216 */ 217 struct arm_smccc_res { 218 unsigned long a0; 219 unsigned long a1; 220 unsigned long a2; 221 unsigned long a3; 222 }; 223 224 /** 225 * smccc_hvc - Invoke a SMCCC function using the hvc conduit 226 * @function_id: the SMCCC function to be called 227 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 228 * @res: pointer to write the return values from registers x0-x3 229 * 230 */ 231 void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 232 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 233 uint64_t arg6, struct arm_smccc_res *res); 234 235 /** 236 * smccc_smc - Invoke a SMCCC function using the smc conduit 237 * @function_id: the SMCCC function to be called 238 * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7 239 * @res: pointer to write the return values from registers x0-x3 240 * 241 */ 242 void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, 243 uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, 244 uint64_t arg6, struct arm_smccc_res *res); 245 246 #endif /* SELFTEST_KVM_PROCESSOR_H */ 247