xref: /linux/arch/arm64/include/asm/virt.h (revision c48a7c44a1d02516309015b6134c9bb982e17008)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ASM__VIRT_H
8 #define __ASM__VIRT_H
9 
10 /*
11  * The arm64 hcall implementation uses x0 to specify the hcall
12  * number. A value less than HVC_STUB_HCALL_NR indicates a special
13  * hcall, such as set vector. Any other value is handled in a
14  * hypervisor specific way.
15  *
16  * The hypercall is allowed to clobber any of the caller-saved
17  * registers (x0-x18), so it is advisable to use it through the
18  * indirection of a function call (as implemented in hyp-stub.S).
19  */
20 
21 /*
22  * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
23  *
24  * @x1: Physical address of the new vector table.
25  */
26 #define HVC_SET_VECTORS 0
27 
28 /*
29  * HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
30  */
31 #define HVC_SOFT_RESTART 1
32 
33 /*
34  * HVC_RESET_VECTORS - Restore the vectors to the original HYP stubs
35  */
36 #define HVC_RESET_VECTORS 2
37 
38 /*
39  * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible
40  */
41 #define HVC_FINALISE_EL2	3
42 
43 /* Max number of HYP stub hypercalls */
44 #define HVC_STUB_HCALL_NR 4
45 
46 /* Error returned when an invalid stub number is passed into x0 */
47 #define HVC_STUB_ERR	0xbadca11
48 
49 #define BOOT_CPU_MODE_EL1	(0xe11)
50 #define BOOT_CPU_MODE_EL2	(0xe12)
51 
52 /*
53  * Flags returned together with the boot mode, but not preserved in
54  * __boot_cpu_mode. Used by the idreg override code to work out the
55  * boot state.
56  */
57 #define BOOT_CPU_FLAG_E2H	BIT_ULL(32)
58 
59 #ifndef __ASSEMBLY__
60 
61 #include <asm/ptrace.h>
62 #include <asm/sections.h>
63 #include <asm/sysreg.h>
64 #include <asm/cpufeature.h>
65 
66 /*
67  * __boot_cpu_mode records what mode CPUs were booted in.
68  * A correctly-implemented bootloader must start all CPUs in the same mode:
69  * In this case, both 32bit halves of __boot_cpu_mode will contain the
70  * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2).
71  *
72  * Should the bootloader fail to do this, the two values will be different.
73  * This allows the kernel to flag an error when the secondaries have come up.
74  */
75 extern u32 __boot_cpu_mode[2];
76 
77 #define ARM64_VECTOR_TABLE_LEN	SZ_2K
78 
79 void __hyp_set_vectors(phys_addr_t phys_vector_base);
80 void __hyp_reset_vectors(void);
81 bool is_kvm_arm_initialised(void);
82 
83 DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
84 
85 /* Reports the availability of HYP mode */
86 static inline bool is_hyp_mode_available(void)
87 {
88 	/*
89 	 * If KVM protected mode is initialized, all CPUs must have been booted
90 	 * in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1.
91 	 */
92 	if (IS_ENABLED(CONFIG_KVM) &&
93 	    static_branch_likely(&kvm_protected_mode_initialized))
94 		return true;
95 
96 	return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
97 		__boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
98 }
99 
100 /* Check if the bootloader has booted CPUs in different modes */
101 static inline bool is_hyp_mode_mismatched(void)
102 {
103 	/*
104 	 * If KVM protected mode is initialized, all CPUs must have been booted
105 	 * in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1.
106 	 */
107 	if (IS_ENABLED(CONFIG_KVM) &&
108 	    static_branch_likely(&kvm_protected_mode_initialized))
109 		return false;
110 
111 	return __boot_cpu_mode[0] != __boot_cpu_mode[1];
112 }
113 
114 static __always_inline bool is_kernel_in_hyp_mode(void)
115 {
116 	BUILD_BUG_ON(__is_defined(__KVM_NVHE_HYPERVISOR__) ||
117 		     __is_defined(__KVM_VHE_HYPERVISOR__));
118 	return read_sysreg(CurrentEL) == CurrentEL_EL2;
119 }
120 
121 static __always_inline bool has_vhe(void)
122 {
123 	/*
124 	 * Code only run in VHE/NVHE hyp context can assume VHE is present or
125 	 * absent. Otherwise fall back to caps.
126 	 * This allows the compiler to discard VHE-specific code from the
127 	 * nVHE object, reducing the number of external symbol references
128 	 * needed to link.
129 	 */
130 	if (is_vhe_hyp_code())
131 		return true;
132 	else if (is_nvhe_hyp_code())
133 		return false;
134 	else
135 		return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
136 }
137 
138 static __always_inline bool is_protected_kvm_enabled(void)
139 {
140 	if (is_vhe_hyp_code())
141 		return false;
142 	else
143 		return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE);
144 }
145 
146 static __always_inline bool has_hvhe(void)
147 {
148 	if (is_vhe_hyp_code())
149 		return false;
150 
151 	return cpus_have_final_cap(ARM64_KVM_HVHE);
152 }
153 
154 static inline bool is_hyp_nvhe(void)
155 {
156 	return is_hyp_mode_available() && !is_kernel_in_hyp_mode();
157 }
158 
159 #endif /* __ASSEMBLY__ */
160 
161 #endif /* ! __ASM__VIRT_H */
162