xref: /linux/arch/arm64/include/asm/kvm_hyp.h (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM64_KVM_HYP_H__
8 #define __ARM64_KVM_HYP_H__
9 
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
12 #include <asm/alternative.h>
13 #include <asm/sysreg.h>
14 
15 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
16 DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
17 DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
18 
19 #define read_sysreg_elx(r,nvh,vh)					\
20 	({								\
21 		u64 reg;						\
22 		asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh),	\
23 					 __mrs_s("%0", r##vh),		\
24 					 ARM64_HAS_VIRT_HOST_EXTN)	\
25 			     : "=r" (reg));				\
26 		reg;							\
27 	})
28 
29 #define write_sysreg_elx(v,r,nvh,vh)					\
30 	do {								\
31 		u64 __val = (u64)(v);					\
32 		asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"),	\
33 					 __msr_s(r##vh, "%x0"),		\
34 					 ARM64_HAS_VIRT_HOST_EXTN)	\
35 					 : : "rZ" (__val));		\
36 	} while (0)
37 
38 /*
39  * Unified accessors for registers that have a different encoding
40  * between VHE and non-VHE. They must be specified without their "ELx"
41  * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
42  */
43 
44 #define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
45 #define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
46 #define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
47 #define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)
48 #define read_sysreg_el2(r)	read_sysreg_elx(r, _EL2, _EL1)
49 #define write_sysreg_el2(v,r)	write_sysreg_elx(v, r, _EL2, _EL1)
50 
51 /*
52  * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
53  * static inline can allow the compiler to out-of-line this. KVM always wants
54  * the macro version as its always inlined.
55  */
56 #define __kvm_swab32(x)	___constant_swab32(x)
57 
58 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
59 
60 void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
61 void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
62 void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
63 void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
64 void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
65 void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
66 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
67 
68 #ifdef __KVM_NVHE_HYPERVISOR__
69 void __timer_enable_traps(struct kvm_vcpu *vcpu);
70 void __timer_disable_traps(struct kvm_vcpu *vcpu);
71 #endif
72 
73 #ifdef __KVM_NVHE_HYPERVISOR__
74 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
75 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
76 #else
77 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
78 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
79 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
80 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
81 #endif
82 
83 void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
84 void __debug_switch_to_host(struct kvm_vcpu *vcpu);
85 
86 #ifdef __KVM_NVHE_HYPERVISOR__
87 void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
88 void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
89 #endif
90 
91 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
92 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
93 void __sve_restore_state(void *sve_pffr, u32 *fpsr);
94 
95 #ifndef __KVM_NVHE_HYPERVISOR__
96 void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
97 void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
98 #endif
99 
100 u64 __guest_enter(struct kvm_vcpu *vcpu);
101 
102 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
103 
104 #ifdef __KVM_NVHE_HYPERVISOR__
105 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
106 			       u64 elr, u64 par);
107 #endif
108 
109 #ifdef __KVM_NVHE_HYPERVISOR__
110 void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
111 			    phys_addr_t pgd, void *sp, void *cont_fn);
112 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
113 		unsigned long *per_cpu_base, u32 hyp_va_bits);
114 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
115 #endif
116 
117 extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
118 extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
119 extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
120 extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
121 extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
122 extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
123 extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
124 extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
125 extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
126 
127 extern unsigned long kvm_nvhe_sym(__icache_flags);
128 extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
129 
130 #endif /* __ARM64_KVM_HYP_H__ */
131