1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM_KVM_ASM_H__ 8 #define __ARM_KVM_ASM_H__ 9 10 #include <asm/hyp_image.h> 11 #include <asm/insn.h> 12 #include <asm/virt.h> 13 #include <asm/sysreg.h> 14 15 #define ARM_EXIT_WITH_SERROR_BIT 31 16 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) 17 #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) 18 #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT)) 19 20 #define ARM_EXCEPTION_IRQ 0 21 #define ARM_EXCEPTION_EL1_SERROR 1 22 #define ARM_EXCEPTION_TRAP 2 23 #define ARM_EXCEPTION_IL 3 24 /* The hyp-stub will return this for any kvm_call_hyp() call */ 25 #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR 26 27 #define kvm_arm_exception_type \ 28 {ARM_EXCEPTION_IRQ, "IRQ" }, \ 29 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \ 30 {ARM_EXCEPTION_TRAP, "TRAP" }, \ 31 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" } 32 33 /* 34 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code 35 * that jumps over this. 36 */ 37 #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE) 38 39 #define KVM_HOST_SMCCC_ID(id) \ 40 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 41 ARM_SMCCC_SMC_64, \ 42 ARM_SMCCC_OWNER_VENDOR_HYP, \ 43 (id)) 44 45 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name) 46 47 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0 48 49 #ifndef __ASSEMBLY__ 50 51 #include <linux/mm.h> 52 53 enum __kvm_host_smccc_func { 54 /* Hypercalls available only prior to pKVM finalisation */ 55 /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ 56 __KVM_HOST_SMCCC_FUNC___pkvm_init = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, 57 __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping, 58 __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector, 59 __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, 60 __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, 61 __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, 62 __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, 63 64 /* Hypercalls available after pKVM finalisation */ 65 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, 66 __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp, 67 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest, 68 __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest, 69 __KVM_HOST_SMCCC_FUNC___pkvm_host_relax_perms_guest, 70 __KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest, 71 __KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest, 72 __KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest, 73 __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, 74 __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run, 75 __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, 76 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa, 77 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh, 78 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid, 79 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range, 80 __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, 81 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, 82 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs, 83 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs, 84 __KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm, 85 __KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm, 86 __KVM_HOST_SMCCC_FUNC___pkvm_init_vm, 87 __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu, 88 __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm, 89 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load, 90 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put, 91 __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid, 92 }; 93 94 #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] 95 #define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[] 96 97 /* 98 * Define a pair of symbols sharing the same name but one defined in 99 * VHE and the other in nVHE hyp implementations. 100 */ 101 #define DECLARE_KVM_HYP_SYM(sym) \ 102 DECLARE_KVM_VHE_SYM(sym); \ 103 DECLARE_KVM_NVHE_SYM(sym) 104 105 #define DECLARE_KVM_VHE_PER_CPU(type, sym) \ 106 DECLARE_PER_CPU(type, sym) 107 #define DECLARE_KVM_NVHE_PER_CPU(type, sym) \ 108 DECLARE_PER_CPU(type, kvm_nvhe_sym(sym)) 109 110 #define DECLARE_KVM_HYP_PER_CPU(type, sym) \ 111 DECLARE_KVM_VHE_PER_CPU(type, sym); \ 112 DECLARE_KVM_NVHE_PER_CPU(type, sym) 113 114 /* 115 * Compute pointer to a symbol defined in nVHE percpu region. 116 * Returns NULL if percpu memory has not been allocated yet. 117 */ 118 #define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id()) 119 #define per_cpu_ptr_nvhe_sym(sym, cpu) \ 120 ({ \ 121 unsigned long base, off; \ 122 base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \ 123 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \ 124 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \ 125 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \ 126 }) 127 128 #if defined(__KVM_NVHE_HYPERVISOR__) 129 130 #define CHOOSE_NVHE_SYM(sym) sym 131 #define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym) 132 133 /* The nVHE hypervisor shouldn't even try to access VHE symbols */ 134 extern void *__nvhe_undefined_symbol; 135 #define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol 136 #define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol) 137 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol) 138 139 #elif defined(__KVM_VHE_HYPERVISOR__) 140 141 #define CHOOSE_VHE_SYM(sym) sym 142 #define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym) 143 144 /* The VHE hypervisor shouldn't even try to access nVHE symbols */ 145 extern void *__vhe_undefined_symbol; 146 #define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol 147 #define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol) 148 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol) 149 150 #else 151 152 /* 153 * BIG FAT WARNINGS: 154 * 155 * - Don't be tempted to change the following is_kernel_in_hyp_mode() 156 * to has_vhe(). has_vhe() is implemented as a *final* capability, 157 * while this is used early at boot time, when the capabilities are 158 * not final yet.... 159 * 160 * - Don't let the nVHE hypervisor have access to this, as it will 161 * pick the *wrong* symbol (yes, it runs at EL2...). 162 */ 163 #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \ 164 ? CHOOSE_VHE_SYM(sym) \ 165 : CHOOSE_NVHE_SYM(sym)) 166 167 #define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \ 168 ? this_cpu_ptr(&sym) \ 169 : this_cpu_ptr_nvhe_sym(sym)) 170 171 #define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \ 172 ? per_cpu_ptr(&sym, cpu) \ 173 : per_cpu_ptr_nvhe_sym(sym, cpu)) 174 175 #define CHOOSE_VHE_SYM(sym) sym 176 #define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym) 177 178 #endif 179 180 struct kvm_nvhe_init_params { 181 unsigned long mair_el2; 182 unsigned long tcr_el2; 183 unsigned long tpidr_el2; 184 unsigned long stack_hyp_va; 185 unsigned long stack_pa; 186 phys_addr_t pgd_pa; 187 unsigned long hcr_el2; 188 unsigned long vttbr; 189 unsigned long vtcr; 190 unsigned long tmp; 191 }; 192 193 /* 194 * Used by the host in EL1 to dump the nVHE hypervisor backtrace on 195 * hyp_panic() in non-protected mode. 196 * 197 * @stack_base: hyp VA of the hyp_stack base. 198 * @overflow_stack_base: hyp VA of the hyp_overflow_stack base. 199 * @fp: hyp FP where the backtrace begins. 200 * @pc: hyp PC where the backtrace begins. 201 */ 202 struct kvm_nvhe_stacktrace_info { 203 unsigned long stack_base; 204 unsigned long overflow_stack_base; 205 unsigned long fp; 206 unsigned long pc; 207 }; 208 209 /* Translate a kernel address @ptr into its equivalent linear mapping */ 210 #define kvm_ksym_ref(ptr) \ 211 ({ \ 212 void *val = (ptr); \ 213 if (!is_kernel_in_hyp_mode()) \ 214 val = lm_alias((ptr)); \ 215 val; \ 216 }) 217 #define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym)) 218 219 struct kvm; 220 struct kvm_vcpu; 221 struct kvm_s2_mmu; 222 223 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); 224 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); 225 #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) 226 #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) 227 228 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[]; 229 DECLARE_KVM_NVHE_SYM(__per_cpu_start); 230 DECLARE_KVM_NVHE_SYM(__per_cpu_end); 231 232 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); 233 #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) 234 235 extern void __kvm_flush_vm_context(void); 236 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu); 237 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, 238 int level); 239 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, 240 phys_addr_t ipa, 241 int level); 242 extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, 243 phys_addr_t start, unsigned long pages); 244 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); 245 246 extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding); 247 248 extern void __kvm_timer_set_cntvoff(u64 cntvoff); 249 extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 250 extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 251 extern void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 252 253 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 254 255 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu); 256 257 extern u64 __vgic_v3_get_gic_config(void); 258 extern void __vgic_v3_init_lrs(void); 259 260 #define __KVM_EXTABLE(from, to) \ 261 " .pushsection __kvm_ex_table, \"a\"\n" \ 262 " .align 3\n" \ 263 " .long (" #from " - .), (" #to " - .)\n" \ 264 " .popsection\n" 265 266 267 #define __kvm_at(at_op, addr) \ 268 ( { \ 269 int __kvm_at_err = 0; \ 270 u64 spsr, elr; \ 271 asm volatile( \ 272 " mrs %1, spsr_el2\n" \ 273 " mrs %2, elr_el2\n" \ 274 "1: " __msr_s(at_op, "%3") "\n" \ 275 " isb\n" \ 276 " b 9f\n" \ 277 "2: msr spsr_el2, %1\n" \ 278 " msr elr_el2, %2\n" \ 279 " mov %w0, %4\n" \ 280 "9:\n" \ 281 __KVM_EXTABLE(1b, 2b) \ 282 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ 283 : "r" (addr), "i" (-EFAULT)); \ 284 __kvm_at_err; \ 285 } ) 286 287 void __noreturn hyp_panic(void); 288 asmlinkage void kvm_unexpected_el2_exception(void); 289 asmlinkage void __noreturn hyp_panic(void); 290 asmlinkage void __noreturn hyp_panic_bad_stack(void); 291 asmlinkage void kvm_unexpected_el2_exception(void); 292 struct kvm_cpu_context; 293 void handle_trap(struct kvm_cpu_context *host_ctxt); 294 asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on); 295 void __noreturn __pkvm_init_finalise(void); 296 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); 297 void kvm_patch_vector_branch(struct alt_instr *alt, 298 __le32 *origptr, __le32 *updptr, int nr_inst); 299 void kvm_get_kimage_voffset(struct alt_instr *alt, 300 __le32 *origptr, __le32 *updptr, int nr_inst); 301 void kvm_compute_final_ctr_el0(struct alt_instr *alt, 302 __le32 *origptr, __le32 *updptr, int nr_inst); 303 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, 304 u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar); 305 306 #else /* __ASSEMBLY__ */ 307 308 .macro get_host_ctxt reg, tmp 309 adr_this_cpu \reg, kvm_host_data, \tmp 310 add \reg, \reg, #HOST_DATA_CONTEXT 311 .endm 312 313 .macro get_vcpu_ptr vcpu, ctxt 314 get_host_ctxt \ctxt, \vcpu 315 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 316 .endm 317 318 .macro get_loaded_vcpu vcpu, ctxt 319 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu 320 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 321 .endm 322 323 .macro set_loaded_vcpu vcpu, ctxt, tmp 324 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp 325 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 326 .endm 327 328 /* 329 * KVM extable for unexpected exceptions. 330 * Create a struct kvm_exception_table_entry output to a section that can be 331 * mapped by EL2. The table is not sorted. 332 * 333 * The caller must ensure: 334 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented 335 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. 336 */ 337 .macro _kvm_extable, from, to 338 .pushsection __kvm_ex_table, "a" 339 .align 3 340 .long (\from - .), (\to - .) 341 .popsection 342 .endm 343 344 #define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x) 345 #define CPU_LR_OFFSET CPU_XREG_OFFSET(30) 346 #define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8) 347 348 /* 349 * We treat x18 as callee-saved as the host may use it as a platform 350 * register (e.g. for shadow call stack). 351 */ 352 .macro save_callee_saved_regs ctxt 353 str x18, [\ctxt, #CPU_XREG_OFFSET(18)] 354 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 355 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 356 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 357 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 358 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 359 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 360 .endm 361 362 .macro restore_callee_saved_regs ctxt 363 // We require \ctxt is not x18-x28 364 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] 365 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 366 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 367 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 368 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 369 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 370 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 371 .endm 372 373 .macro save_sp_el0 ctxt, tmp 374 mrs \tmp, sp_el0 375 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] 376 .endm 377 378 .macro restore_sp_el0 ctxt, tmp 379 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] 380 msr sp_el0, \tmp 381 .endm 382 383 #endif 384 385 #endif /* __ARM_KVM_ASM_H__ */ 386