xref: /linux/arch/arm64/include/asm/virt.h (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ASM__VIRT_H
8 #define __ASM__VIRT_H
9 
10 /*
11  * The arm64 hcall implementation uses x0 to specify the hcall
12  * number. A value less than HVC_STUB_HCALL_NR indicates a special
13  * hcall, such as set vector. Any other value is handled in a
14  * hypervisor specific way.
15  *
16  * The hypercall is allowed to clobber any of the caller-saved
17  * registers (x0-x18), so it is advisable to use it through the
18  * indirection of a function call (as implemented in hyp-stub.S).
19  */
20 
21 /*
22  * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
23  *
24  * @x1: Physical address of the new vector table.
25  */
26 #define HVC_SET_VECTORS 0
27 
28 /*
29  * HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
30  */
31 #define HVC_SOFT_RESTART 1
32 
33 /*
34  * HVC_RESET_VECTORS - Restore the vectors to the original HYP stubs
35  */
36 #define HVC_RESET_VECTORS 2
37 
38 /*
39  * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
40  */
41 #define HVC_FINALISE_EL2	3
42 
43 /*
44  * HVC_GET_ICH_VTR_EL2 - Retrieve the ICH_VTR_EL2 value
45  */
46 #define HVC_GET_ICH_VTR_EL2	4
47 
48 /* Max number of HYP stub hypercalls */
49 #define HVC_STUB_HCALL_NR 5
50 
51 /* Error returned when an invalid stub number is passed into x0 */
52 #define HVC_STUB_ERR	0xbadca11
53 
54 #define BOOT_CPU_MODE_EL1	(0xe11)
55 #define BOOT_CPU_MODE_EL2	(0xe12)
56 
57 /*
58  * Flags returned together with the boot mode, but not preserved in
59  * __boot_cpu_mode. Used by the idreg override code to work out the
60  * boot state.
61  */
62 #define BOOT_CPU_FLAG_E2H	BIT_ULL(32)
63 
64 #ifndef __ASSEMBLER__
65 
66 #include <asm/ptrace.h>
67 #include <asm/sections.h>
68 #include <asm/sysreg.h>
69 #include <asm/cpufeature.h>
70 
71 /*
72  * __boot_cpu_mode records what mode CPUs were booted in.
73  * A correctly-implemented bootloader must start all CPUs in the same mode:
74  * In this case, both 32bit halves of __boot_cpu_mode will contain the
75  * same value (either BOOT_CPU_MODE_EL1 if booted in EL1, BOOT_CPU_MODE_EL2 if
76  * booted in EL2).
77  *
78  * Should the bootloader fail to do this, the two values will be different.
79  * This allows the kernel to flag an error when the secondaries have come up.
80  */
81 extern u32 __boot_cpu_mode[2];
82 
83 #define ARM64_VECTOR_TABLE_LEN	SZ_2K
84 
85 void __hyp_set_vectors(phys_addr_t phys_vector_base);
86 void __hyp_reset_vectors(void);
87 bool is_kvm_arm_initialised(void);
88 
89 DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
90 
91 static inline bool is_pkvm_initialized(void)
92 {
93 	return IS_ENABLED(CONFIG_KVM) &&
94 	       static_branch_likely(&kvm_protected_mode_initialized);
95 }
96 
97 #ifdef CONFIG_KVM
98 bool pkvm_force_reclaim_guest_page(phys_addr_t phys);
99 #else
100 static inline bool pkvm_force_reclaim_guest_page(phys_addr_t phys)
101 {
102 	return false;
103 }
104 #endif
105 
106 /* Reports the availability of HYP mode */
107 static inline bool is_hyp_mode_available(void)
108 {
109 	/*
110 	 * If KVM protected mode is initialized, all CPUs must have been booted
111 	 * in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1.
112 	 */
113 	if (is_pkvm_initialized())
114 		return true;
115 
116 	return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
117 		__boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
118 }
119 
120 /* Check if the bootloader has booted CPUs in different modes */
121 static inline bool is_hyp_mode_mismatched(void)
122 {
123 	/*
124 	 * If KVM protected mode is initialized, all CPUs must have been booted
125 	 * in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1.
126 	 */
127 	if (is_pkvm_initialized())
128 		return false;
129 
130 	return __boot_cpu_mode[0] != __boot_cpu_mode[1];
131 }
132 
133 static __always_inline bool is_kernel_in_hyp_mode(void)
134 {
135 	BUILD_BUG_ON(__is_defined(__KVM_NVHE_HYPERVISOR__) ||
136 		     __is_defined(__KVM_VHE_HYPERVISOR__));
137 	return read_sysreg(CurrentEL) == CurrentEL_EL2;
138 }
139 
140 static __always_inline bool has_vhe(void)
141 {
142 	/*
143 	 * Code only run in VHE/NVHE hyp context can assume VHE is present or
144 	 * absent. Otherwise fall back to caps.
145 	 * This allows the compiler to discard VHE-specific code from the
146 	 * nVHE object, reducing the number of external symbol references
147 	 * needed to link.
148 	 */
149 	if (is_vhe_hyp_code())
150 		return true;
151 	else if (is_nvhe_hyp_code())
152 		return false;
153 	else
154 		return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
155 }
156 
157 static __always_inline bool is_protected_kvm_enabled(void)
158 {
159 	if (is_vhe_hyp_code())
160 		return false;
161 	else
162 		return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE);
163 }
164 
165 static __always_inline bool has_hvhe(void)
166 {
167 	if (is_vhe_hyp_code())
168 		return false;
169 
170 	return cpus_have_final_cap(ARM64_KVM_HVHE);
171 }
172 
173 static inline bool is_hyp_nvhe(void)
174 {
175 	return is_hyp_mode_available() && !is_kernel_in_hyp_mode();
176 }
177 
178 #endif /* __ASSEMBLER__ */
179 
180 #endif /* ! __ASM__VIRT_H */
181