xref: /linux/arch/arm64/kernel/cpufeature.c (revision e0c1b49f5b674cca7b10549c53b3791d0bbc90a8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU feature definitions
4  *
5  * Copyright (C) 2015 ARM Ltd.
6  *
7  * A note for the weary kernel hacker: the code here is confusing and hard to
8  * follow! That's partly because it's solving a nasty problem, but also because
9  * there's a little bit of over-abstraction that tends to obscure what's going
10  * on behind a maze of helper functions and macros.
11  *
12  * The basic problem is that hardware folks have started gluing together CPUs
13  * with distinct architectural features; in some cases even creating SoCs where
14  * user-visible instructions are available only on a subset of the available
15  * cores. We try to address this by snapshotting the feature registers of the
16  * boot CPU and comparing these with the feature registers of each secondary
17  * CPU when bringing them up. If there is a mismatch, then we update the
18  * snapshot state to indicate the lowest-common denominator of the feature,
19  * known as the "safe" value. This snapshot state can be queried to view the
20  * "sanitised" value of a feature register.
21  *
22  * The sanitised register values are used to decide which capabilities we
23  * have in the system. These may be in the form of traditional "hwcaps"
24  * advertised to userspace or internal "cpucaps" which are used to configure
25  * things like alternative patching and static keys. While a feature mismatch
26  * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch
27  * may prevent a CPU from being onlined at all.
28  *
29  * Some implementation details worth remembering:
30  *
31  * - Mismatched features are *always* sanitised to a "safe" value, which
32  *   usually indicates that the feature is not supported.
33  *
34  * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
35  *   warning when onlining an offending CPU and the kernel will be tainted
36  *   with TAINT_CPU_OUT_OF_SPEC.
37  *
38  * - Features marked as FTR_VISIBLE have their sanitised value visible to
39  *   userspace. FTR_VISIBLE features in registers that are only visible
40  *   to EL0 by trapping *must* have a corresponding HWCAP so that late
41  *   onlining of CPUs cannot lead to features disappearing at runtime.
42  *
43  * - A "feature" is typically a 4-bit register field. A "capability" is the
44  *   high-level description derived from the sanitised field value.
45  *
46  * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
47  *   scheme for fields in ID registers") to understand when feature fields
48  *   may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly).
49  *
50  * - KVM exposes its own view of the feature registers to guest operating
51  *   systems regardless of FTR_VISIBLE. This is typically driven from the
52  *   sanitised register values to allow virtual CPUs to be migrated between
53  *   arbitrary physical CPUs, but some features not present on the host are
54  *   also advertised and emulated. Look at sys_reg_descs[] for the gory
55  *   details.
56  *
57  * - If the arm64_ftr_bits[] for a register has a missing field, then this
58  *   field is treated as STRICT RES0, including for read_sanitised_ftr_reg().
59  *   This is stronger than FTR_HIDDEN and can be used to hide features from
60  *   KVM guests.
61  */
62 
63 #define pr_fmt(fmt) "CPU features: " fmt
64 
65 #include <linux/bsearch.h>
66 #include <linux/cpumask.h>
67 #include <linux/crash_dump.h>
68 #include <linux/sort.h>
69 #include <linux/stop_machine.h>
70 #include <linux/sysfs.h>
71 #include <linux/types.h>
72 #include <linux/minmax.h>
73 #include <linux/mm.h>
74 #include <linux/cpu.h>
75 #include <linux/kasan.h>
76 #include <asm/cpu.h>
77 #include <asm/cpufeature.h>
78 #include <asm/cpu_ops.h>
79 #include <asm/fpsimd.h>
80 #include <asm/insn.h>
81 #include <asm/kvm_host.h>
82 #include <asm/mmu_context.h>
83 #include <asm/mte.h>
84 #include <asm/processor.h>
85 #include <asm/smp.h>
86 #include <asm/sysreg.h>
87 #include <asm/traps.h>
88 #include <asm/virt.h>
89 
90 /* Kernel representation of AT_HWCAP and AT_HWCAP2 */
91 static unsigned long elf_hwcap __read_mostly;
92 
93 #ifdef CONFIG_COMPAT
94 #define COMPAT_ELF_HWCAP_DEFAULT	\
95 				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
96 				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
97 				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
98 				 COMPAT_HWCAP_LPAE)
99 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
100 unsigned int compat_elf_hwcap2 __read_mostly;
101 #endif
102 
103 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
104 EXPORT_SYMBOL(cpu_hwcaps);
105 static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
106 
107 /* Need also bit for ARM64_CB_PATCH */
108 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
109 
110 bool arm64_use_ng_mappings = false;
111 EXPORT_SYMBOL(arm64_use_ng_mappings);
112 
113 /*
114  * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
115  * support it?
116  */
117 static bool __read_mostly allow_mismatched_32bit_el0;
118 
119 /*
120  * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
121  * seen at least one CPU capable of 32-bit EL0.
122  */
123 DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
124 
125 /*
126  * Mask of CPUs supporting 32-bit EL0.
127  * Only valid if arm64_mismatched_32bit_el0 is enabled.
128  */
129 static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
130 
131 /*
132  * Flag to indicate if we have computed the system wide
133  * capabilities based on the boot time active CPUs. This
134  * will be used to determine if a new booting CPU should
135  * go through the verification process to make sure that it
136  * supports the system capabilities, without using a hotplug
137  * notifier. This is also used to decide if we could use
138  * the fast path for checking constant CPU caps.
139  */
140 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
141 EXPORT_SYMBOL(arm64_const_caps_ready);
142 static inline void finalize_system_capabilities(void)
143 {
144 	static_branch_enable(&arm64_const_caps_ready);
145 }
146 
147 void dump_cpu_features(void)
148 {
149 	/* file-wide pr_fmt adds "CPU features: " prefix */
150 	pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
151 }
152 
153 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
154 EXPORT_SYMBOL(cpu_hwcap_keys);
155 
156 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
157 	{						\
158 		.sign = SIGNED,				\
159 		.visible = VISIBLE,			\
160 		.strict = STRICT,			\
161 		.type = TYPE,				\
162 		.shift = SHIFT,				\
163 		.width = WIDTH,				\
164 		.safe_val = SAFE_VAL,			\
165 	}
166 
167 /* Define a feature with unsigned values */
168 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
169 	__ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
170 
171 /* Define a feature with a signed value */
172 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
173 	__ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
174 
175 #define ARM64_FTR_END					\
176 	{						\
177 		.width = 0,				\
178 	}
179 
180 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
181 
182 static bool __system_matches_cap(unsigned int n);
183 
184 /*
185  * NOTE: Any changes to the visibility of features should be kept in
186  * sync with the documentation of the CPU feature register ABI.
187  */
188 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
189 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
190 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0),
191 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
192 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
193 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
194 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
195 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
196 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
197 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
198 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
199 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
200 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
201 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
202 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
203 	ARM64_FTR_END,
204 };
205 
206 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
207 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
208 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
209 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
210 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
211 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
212 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
213 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
214 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
215 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
216 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
217 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
218 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
219 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
220 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
221 		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
222 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
223 		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
224 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
225 	ARM64_FTR_END,
226 };
227 
228 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
229 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
230 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
231 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
232 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
233 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0),
234 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0),
235 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
236 				   FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
237 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
238 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
239 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
240 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
241 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
242 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
243 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
244 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
245 	ARM64_FTR_END,
246 };
247 
248 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
249 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
250 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
251 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
252 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
253 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
254 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
255 				    FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
256 	ARM64_FTR_END,
257 };
258 
259 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
260 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
261 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
262 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
263 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
264 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
265 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
266 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
267 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
268 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
269 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
270 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
271 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
272 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
273 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
274 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
275 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
276 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
277 		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
278 	ARM64_FTR_END,
279 };
280 
281 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
282 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
283 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
284 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
285 	/*
286 	 * Page size not being supported at Stage-2 is not fatal. You
287 	 * just give up KVM if PAGE_SIZE isn't supported there. Go fix
288 	 * your favourite nesting hypervisor.
289 	 *
290 	 * There is a small corner case where the hypervisor explicitly
291 	 * advertises a given granule size at Stage-2 (value 2) on some
292 	 * vCPUs, and uses the fallback to Stage-1 (value 0) for other
293 	 * vCPUs. Although this is not forbidden by the architecture, it
294 	 * indicates that the hypervisor is being silly (or buggy).
295 	 *
296 	 * We make no effort to cope with this and pretend that if these
297 	 * fields are inconsistent across vCPUs, then it isn't worth
298 	 * trying to bring KVM up.
299 	 */
300 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
301 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
302 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
303 	/*
304 	 * We already refuse to boot CPUs that don't support our configured
305 	 * page size, so we can only detect mismatches for a page size other
306 	 * than the one we're currently using. Unfortunately, SoCs like this
307 	 * exist in the wild so, even though we don't like it, we'll have to go
308 	 * along with it and treat them as non-strict.
309 	 */
310 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
311 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
312 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
313 
314 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
315 	/* Linux shouldn't care about secure memory */
316 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
317 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
318 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
319 	/*
320 	 * Differing PARange is fine as long as all peripherals and memory are mapped
321 	 * within the minimum PARange of all CPUs
322 	 */
323 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
324 	ARM64_FTR_END,
325 };
326 
327 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
328 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
329 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
330 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
331 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
332 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
333 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
334 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
335 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
336 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
337 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
338 	ARM64_FTR_END,
339 };
340 
341 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
342 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
343 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
344 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
345 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
346 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
347 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
348 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
349 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
350 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
351 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
352 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
353 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
354 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
355 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
356 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
357 	ARM64_FTR_END,
358 };
359 
360 static const struct arm64_ftr_bits ftr_ctr[] = {
361 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
362 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
363 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
364 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
365 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
366 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
367 	/*
368 	 * Linux can handle differing I-cache policies. Userspace JITs will
369 	 * make use of *minLine.
370 	 * If we have differing I-cache policies, report it as the weakest - VIPT.
371 	 */
372 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_L1IP_SHIFT, 2, ICACHE_POLICY_VIPT),	/* L1Ip */
373 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
374 	ARM64_FTR_END,
375 };
376 
377 static struct arm64_ftr_override __ro_after_init no_override = { };
378 
379 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
380 	.name		= "SYS_CTR_EL0",
381 	.ftr_bits	= ftr_ctr,
382 	.override	= &no_override,
383 };
384 
385 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
386 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf),
387 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0),
388 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0),
389 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0),
390 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0),
391 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf),
392 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0),
393 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0),
394 	ARM64_FTR_END,
395 };
396 
397 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
398 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
399 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
400 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
401 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
402 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
403 	/*
404 	 * We can instantiate multiple PMU instances with different levels
405 	 * of support.
406 	 */
407 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
408 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
409 	ARM64_FTR_END,
410 };
411 
412 static const struct arm64_ftr_bits ftr_mvfr2[] = {
413 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0),
414 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0),
415 	ARM64_FTR_END,
416 };
417 
418 static const struct arm64_ftr_bits ftr_dczid[] = {
419 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_DZP_SHIFT, 1, 1),
420 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_BS_SHIFT, 4, 0),
421 	ARM64_FTR_END,
422 };
423 
424 static const struct arm64_ftr_bits ftr_gmid[] = {
425 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, SYS_GMID_EL1_BS_SHIFT, 4, 0),
426 	ARM64_FTR_END,
427 };
428 
429 static const struct arm64_ftr_bits ftr_id_isar0[] = {
430 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
431 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
432 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_COPROC_SHIFT, 4, 0),
433 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_CMPBRANCH_SHIFT, 4, 0),
434 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITFIELD_SHIFT, 4, 0),
435 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITCOUNT_SHIFT, 4, 0),
436 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_SWAP_SHIFT, 4, 0),
437 	ARM64_FTR_END,
438 };
439 
440 static const struct arm64_ftr_bits ftr_id_isar5[] = {
441 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
442 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
443 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
444 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
445 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
446 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
447 	ARM64_FTR_END,
448 };
449 
450 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
451 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EVT_SHIFT, 4, 0),
452 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CCIDX_SHIFT, 4, 0),
453 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_LSM_SHIFT, 4, 0),
454 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0),
455 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0),
456 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0),
457 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0),
458 
459 	/*
460 	 * SpecSEI = 1 indicates that the PE might generate an SError on an
461 	 * external abort on speculative read. It is safe to assume that an
462 	 * SError might be generated than it will not be. Hence it has been
463 	 * classified as FTR_HIGHER_SAFE.
464 	 */
465 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_SPECSEI_SHIFT, 4, 0),
466 	ARM64_FTR_END,
467 };
468 
469 static const struct arm64_ftr_bits ftr_id_isar4[] = {
470 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SWP_FRAC_SHIFT, 4, 0),
471 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_PSR_M_SHIFT, 4, 0),
472 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT, 4, 0),
473 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_BARRIER_SHIFT, 4, 0),
474 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SMC_SHIFT, 4, 0),
475 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WRITEBACK_SHIFT, 4, 0),
476 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WITHSHIFTS_SHIFT, 4, 0),
477 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_UNPRIV_SHIFT, 4, 0),
478 	ARM64_FTR_END,
479 };
480 
481 static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
482 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_ETS_SHIFT, 4, 0),
483 	ARM64_FTR_END,
484 };
485 
486 static const struct arm64_ftr_bits ftr_id_isar6[] = {
487 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
488 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
489 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
490 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
491 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
492 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
493 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
494 	ARM64_FTR_END,
495 };
496 
497 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
498 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0),
499 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0),
500 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0),
501 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0),
502 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0),
503 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0),
504 	ARM64_FTR_END,
505 };
506 
507 static const struct arm64_ftr_bits ftr_id_pfr1[] = {
508 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GIC_SHIFT, 4, 0),
509 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRT_FRAC_SHIFT, 4, 0),
510 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SEC_FRAC_SHIFT, 4, 0),
511 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GENTIMER_SHIFT, 4, 0),
512 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRTUALIZATION_SHIFT, 4, 0),
513 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_MPROGMOD_SHIFT, 4, 0),
514 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SECURITY_SHIFT, 4, 0),
515 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_PROGMOD_SHIFT, 4, 0),
516 	ARM64_FTR_END,
517 };
518 
519 static const struct arm64_ftr_bits ftr_id_pfr2[] = {
520 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
521 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
522 	ARM64_FTR_END,
523 };
524 
525 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
526 	/* [31:28] TraceFilt */
527 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_PERFMON_SHIFT, 4, 0xf),
528 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
529 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
530 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
531 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0),
532 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0),
533 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0),
534 	ARM64_FTR_END,
535 };
536 
537 static const struct arm64_ftr_bits ftr_id_dfr1[] = {
538 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_MTPMU_SHIFT, 4, 0),
539 	ARM64_FTR_END,
540 };
541 
542 static const struct arm64_ftr_bits ftr_zcr[] = {
543 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
544 		ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0),	/* LEN */
545 	ARM64_FTR_END,
546 };
547 
548 /*
549  * Common ftr bits for a 32bit register with all hidden, strict
550  * attributes, with 4bit feature fields and a default safe value of
551  * 0. Covers the following 32bit registers:
552  * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
553  */
554 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
555 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
556 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
557 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
558 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
559 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
560 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
561 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
562 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
563 	ARM64_FTR_END,
564 };
565 
566 /* Table for a single 32bit feature value */
567 static const struct arm64_ftr_bits ftr_single32[] = {
568 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
569 	ARM64_FTR_END,
570 };
571 
572 static const struct arm64_ftr_bits ftr_raz[] = {
573 	ARM64_FTR_END,
574 };
575 
576 #define ARM64_FTR_REG_OVERRIDE(id, table, ovr) {		\
577 		.sys_id = id,					\
578 		.reg = 	&(struct arm64_ftr_reg){		\
579 			.name = #id,				\
580 			.override = (ovr),			\
581 			.ftr_bits = &((table)[0]),		\
582 	}}
583 
584 #define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override)
585 
586 struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
587 struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
588 struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
589 
590 static const struct __ftr_reg_entry {
591 	u32			sys_id;
592 	struct arm64_ftr_reg 	*reg;
593 } arm64_ftr_regs[] = {
594 
595 	/* Op1 = 0, CRn = 0, CRm = 1 */
596 	ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
597 	ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1),
598 	ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
599 	ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
600 	ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
601 	ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
602 	ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
603 
604 	/* Op1 = 0, CRn = 0, CRm = 2 */
605 	ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0),
606 	ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
607 	ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
608 	ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
609 	ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4),
610 	ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
611 	ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
612 	ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
613 
614 	/* Op1 = 0, CRn = 0, CRm = 3 */
615 	ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
616 	ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
617 	ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
618 	ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2),
619 	ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1),
620 	ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),
621 
622 	/* Op1 = 0, CRn = 0, CRm = 4 */
623 	ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
624 	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
625 			       &id_aa64pfr1_override),
626 	ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
627 
628 	/* Op1 = 0, CRn = 0, CRm = 5 */
629 	ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
630 	ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
631 
632 	/* Op1 = 0, CRn = 0, CRm = 6 */
633 	ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
634 	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
635 			       &id_aa64isar1_override),
636 
637 	/* Op1 = 0, CRn = 0, CRm = 7 */
638 	ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
639 	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
640 			       &id_aa64mmfr1_override),
641 	ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
642 
643 	/* Op1 = 0, CRn = 1, CRm = 2 */
644 	ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
645 
646 	/* Op1 = 1, CRn = 0, CRm = 0 */
647 	ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
648 
649 	/* Op1 = 3, CRn = 0, CRm = 0 */
650 	{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
651 	ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
652 
653 	/* Op1 = 3, CRn = 14, CRm = 0 */
654 	ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
655 };
656 
657 static int search_cmp_ftr_reg(const void *id, const void *regp)
658 {
659 	return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
660 }
661 
662 /*
663  * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
664  * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the
665  * ascending order of sys_id, we use binary search to find a matching
666  * entry.
667  *
668  * returns - Upon success,  matching ftr_reg entry for id.
669  *         - NULL on failure. It is upto the caller to decide
670  *	     the impact of a failure.
671  */
672 static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
673 {
674 	const struct __ftr_reg_entry *ret;
675 
676 	ret = bsearch((const void *)(unsigned long)sys_id,
677 			arm64_ftr_regs,
678 			ARRAY_SIZE(arm64_ftr_regs),
679 			sizeof(arm64_ftr_regs[0]),
680 			search_cmp_ftr_reg);
681 	if (ret)
682 		return ret->reg;
683 	return NULL;
684 }
685 
686 /*
687  * get_arm64_ftr_reg - Looks up a feature register entry using
688  * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn().
689  *
690  * returns - Upon success,  matching ftr_reg entry for id.
691  *         - NULL on failure but with an WARN_ON().
692  */
693 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
694 {
695 	struct arm64_ftr_reg *reg;
696 
697 	reg = get_arm64_ftr_reg_nowarn(sys_id);
698 
699 	/*
700 	 * Requesting a non-existent register search is an error. Warn
701 	 * and let the caller handle it.
702 	 */
703 	WARN_ON(!reg);
704 	return reg;
705 }
706 
707 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
708 			       s64 ftr_val)
709 {
710 	u64 mask = arm64_ftr_mask(ftrp);
711 
712 	reg &= ~mask;
713 	reg |= (ftr_val << ftrp->shift) & mask;
714 	return reg;
715 }
716 
717 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
718 				s64 cur)
719 {
720 	s64 ret = 0;
721 
722 	switch (ftrp->type) {
723 	case FTR_EXACT:
724 		ret = ftrp->safe_val;
725 		break;
726 	case FTR_LOWER_SAFE:
727 		ret = min(new, cur);
728 		break;
729 	case FTR_HIGHER_OR_ZERO_SAFE:
730 		if (!cur || !new)
731 			break;
732 		fallthrough;
733 	case FTR_HIGHER_SAFE:
734 		ret = max(new, cur);
735 		break;
736 	default:
737 		BUG();
738 	}
739 
740 	return ret;
741 }
742 
743 static void __init sort_ftr_regs(void)
744 {
745 	unsigned int i;
746 
747 	for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) {
748 		const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg;
749 		const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits;
750 		unsigned int j = 0;
751 
752 		/*
753 		 * Features here must be sorted in descending order with respect
754 		 * to their shift values and should not overlap with each other.
755 		 */
756 		for (; ftr_bits->width != 0; ftr_bits++, j++) {
757 			unsigned int width = ftr_reg->ftr_bits[j].width;
758 			unsigned int shift = ftr_reg->ftr_bits[j].shift;
759 			unsigned int prev_shift;
760 
761 			WARN((shift  + width) > 64,
762 				"%s has invalid feature at shift %d\n",
763 				ftr_reg->name, shift);
764 
765 			/*
766 			 * Skip the first feature. There is nothing to
767 			 * compare against for now.
768 			 */
769 			if (j == 0)
770 				continue;
771 
772 			prev_shift = ftr_reg->ftr_bits[j - 1].shift;
773 			WARN((shift + width) > prev_shift,
774 				"%s has feature overlap at shift %d\n",
775 				ftr_reg->name, shift);
776 		}
777 
778 		/*
779 		 * Skip the first register. There is nothing to
780 		 * compare against for now.
781 		 */
782 		if (i == 0)
783 			continue;
784 		/*
785 		 * Registers here must be sorted in ascending order with respect
786 		 * to sys_id for subsequent binary search in get_arm64_ftr_reg()
787 		 * to work correctly.
788 		 */
789 		BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
790 	}
791 }
792 
793 /*
794  * Initialise the CPU feature register from Boot CPU values.
795  * Also initiliases the strict_mask for the register.
796  * Any bits that are not covered by an arm64_ftr_bits entry are considered
797  * RES0 for the system-wide value, and must strictly match.
798  */
799 static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
800 {
801 	u64 val = 0;
802 	u64 strict_mask = ~0x0ULL;
803 	u64 user_mask = 0;
804 	u64 valid_mask = 0;
805 
806 	const struct arm64_ftr_bits *ftrp;
807 	struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
808 
809 	if (!reg)
810 		return;
811 
812 	for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
813 		u64 ftr_mask = arm64_ftr_mask(ftrp);
814 		s64 ftr_new = arm64_ftr_value(ftrp, new);
815 		s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val);
816 
817 		if ((ftr_mask & reg->override->mask) == ftr_mask) {
818 			s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new);
819 			char *str = NULL;
820 
821 			if (ftr_ovr != tmp) {
822 				/* Unsafe, remove the override */
823 				reg->override->mask &= ~ftr_mask;
824 				reg->override->val &= ~ftr_mask;
825 				tmp = ftr_ovr;
826 				str = "ignoring override";
827 			} else if (ftr_new != tmp) {
828 				/* Override was valid */
829 				ftr_new = tmp;
830 				str = "forced";
831 			} else if (ftr_ovr == tmp) {
832 				/* Override was the safe value */
833 				str = "already set";
834 			}
835 
836 			if (str)
837 				pr_warn("%s[%d:%d]: %s to %llx\n",
838 					reg->name,
839 					ftrp->shift + ftrp->width - 1,
840 					ftrp->shift, str, tmp);
841 		} else if ((ftr_mask & reg->override->val) == ftr_mask) {
842 			reg->override->val &= ~ftr_mask;
843 			pr_warn("%s[%d:%d]: impossible override, ignored\n",
844 				reg->name,
845 				ftrp->shift + ftrp->width - 1,
846 				ftrp->shift);
847 		}
848 
849 		val = arm64_ftr_set_value(ftrp, val, ftr_new);
850 
851 		valid_mask |= ftr_mask;
852 		if (!ftrp->strict)
853 			strict_mask &= ~ftr_mask;
854 		if (ftrp->visible)
855 			user_mask |= ftr_mask;
856 		else
857 			reg->user_val = arm64_ftr_set_value(ftrp,
858 							    reg->user_val,
859 							    ftrp->safe_val);
860 	}
861 
862 	val &= valid_mask;
863 
864 	reg->sys_val = val;
865 	reg->strict_mask = strict_mask;
866 	reg->user_mask = user_mask;
867 }
868 
869 extern const struct arm64_cpu_capabilities arm64_errata[];
870 static const struct arm64_cpu_capabilities arm64_features[];
871 
872 static void __init
873 init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
874 {
875 	for (; caps->matches; caps++) {
876 		if (WARN(caps->capability >= ARM64_NCAPS,
877 			"Invalid capability %d\n", caps->capability))
878 			continue;
879 		if (WARN(cpu_hwcaps_ptrs[caps->capability],
880 			"Duplicate entry for capability %d\n",
881 			caps->capability))
882 			continue;
883 		cpu_hwcaps_ptrs[caps->capability] = caps;
884 	}
885 }
886 
887 static void __init init_cpu_hwcaps_indirect_list(void)
888 {
889 	init_cpu_hwcaps_indirect_list_from_array(arm64_features);
890 	init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
891 }
892 
893 static void __init setup_boot_cpu_capabilities(void);
894 
895 static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
896 {
897 	init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
898 	init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
899 	init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
900 	init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
901 	init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
902 	init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
903 	init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
904 	init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
905 	init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
906 	init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
907 	init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
908 	init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
909 	init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
910 	init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
911 	init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
912 	init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
913 	init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
914 	init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
915 	init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
916 	init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
917 	init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
918 }
919 
920 void __init init_cpu_features(struct cpuinfo_arm64 *info)
921 {
922 	/* Before we start using the tables, make sure it is sorted */
923 	sort_ftr_regs();
924 
925 	init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
926 	init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
927 	init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
928 	init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
929 	init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
930 	init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
931 	init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
932 	init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
933 	init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
934 	init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
935 	init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
936 	init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
937 	init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
938 
939 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
940 		init_32bit_cpu_features(&info->aarch32);
941 
942 	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
943 		init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
944 		vec_init_vq_map(ARM64_VEC_SVE);
945 	}
946 
947 	if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
948 		init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
949 
950 	/*
951 	 * Initialize the indirect array of CPU hwcaps capabilities pointers
952 	 * before we handle the boot CPU below.
953 	 */
954 	init_cpu_hwcaps_indirect_list();
955 
956 	/*
957 	 * Detect and enable early CPU capabilities based on the boot CPU,
958 	 * after we have initialised the CPU feature infrastructure.
959 	 */
960 	setup_boot_cpu_capabilities();
961 }
962 
963 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
964 {
965 	const struct arm64_ftr_bits *ftrp;
966 
967 	for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
968 		s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
969 		s64 ftr_new = arm64_ftr_value(ftrp, new);
970 
971 		if (ftr_cur == ftr_new)
972 			continue;
973 		/* Find a safe value */
974 		ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
975 		reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
976 	}
977 
978 }
979 
980 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
981 {
982 	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
983 
984 	if (!regp)
985 		return 0;
986 
987 	update_cpu_ftr_reg(regp, val);
988 	if ((boot & regp->strict_mask) == (val & regp->strict_mask))
989 		return 0;
990 	pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
991 			regp->name, boot, cpu, val);
992 	return 1;
993 }
994 
995 static void relax_cpu_ftr_reg(u32 sys_id, int field)
996 {
997 	const struct arm64_ftr_bits *ftrp;
998 	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
999 
1000 	if (!regp)
1001 		return;
1002 
1003 	for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) {
1004 		if (ftrp->shift == field) {
1005 			regp->strict_mask &= ~arm64_ftr_mask(ftrp);
1006 			break;
1007 		}
1008 	}
1009 
1010 	/* Bogus field? */
1011 	WARN_ON(!ftrp->width);
1012 }
1013 
1014 static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
1015 					 struct cpuinfo_arm64 *boot)
1016 {
1017 	static bool boot_cpu_32bit_regs_overridden = false;
1018 
1019 	if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
1020 		return;
1021 
1022 	if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
1023 		return;
1024 
1025 	boot->aarch32 = info->aarch32;
1026 	init_32bit_cpu_features(&boot->aarch32);
1027 	boot_cpu_32bit_regs_overridden = true;
1028 }
1029 
1030 static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
1031 				     struct cpuinfo_32bit *boot)
1032 {
1033 	int taint = 0;
1034 	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1035 
1036 	/*
1037 	 * If we don't have AArch32 at EL1, then relax the strictness of
1038 	 * EL1-dependent register fields to avoid spurious sanity check fails.
1039 	 */
1040 	if (!id_aa64pfr0_32bit_el1(pfr0)) {
1041 		relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_SMC_SHIFT);
1042 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRT_FRAC_SHIFT);
1043 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SEC_FRAC_SHIFT);
1044 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRTUALIZATION_SHIFT);
1045 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SECURITY_SHIFT);
1046 		relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_PROGMOD_SHIFT);
1047 	}
1048 
1049 	taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
1050 				      info->reg_id_dfr0, boot->reg_id_dfr0);
1051 	taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu,
1052 				      info->reg_id_dfr1, boot->reg_id_dfr1);
1053 	taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
1054 				      info->reg_id_isar0, boot->reg_id_isar0);
1055 	taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
1056 				      info->reg_id_isar1, boot->reg_id_isar1);
1057 	taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
1058 				      info->reg_id_isar2, boot->reg_id_isar2);
1059 	taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
1060 				      info->reg_id_isar3, boot->reg_id_isar3);
1061 	taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
1062 				      info->reg_id_isar4, boot->reg_id_isar4);
1063 	taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
1064 				      info->reg_id_isar5, boot->reg_id_isar5);
1065 	taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
1066 				      info->reg_id_isar6, boot->reg_id_isar6);
1067 
1068 	/*
1069 	 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
1070 	 * ACTLR formats could differ across CPUs and therefore would have to
1071 	 * be trapped for virtualization anyway.
1072 	 */
1073 	taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
1074 				      info->reg_id_mmfr0, boot->reg_id_mmfr0);
1075 	taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
1076 				      info->reg_id_mmfr1, boot->reg_id_mmfr1);
1077 	taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
1078 				      info->reg_id_mmfr2, boot->reg_id_mmfr2);
1079 	taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
1080 				      info->reg_id_mmfr3, boot->reg_id_mmfr3);
1081 	taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu,
1082 				      info->reg_id_mmfr4, boot->reg_id_mmfr4);
1083 	taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu,
1084 				      info->reg_id_mmfr5, boot->reg_id_mmfr5);
1085 	taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
1086 				      info->reg_id_pfr0, boot->reg_id_pfr0);
1087 	taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
1088 				      info->reg_id_pfr1, boot->reg_id_pfr1);
1089 	taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu,
1090 				      info->reg_id_pfr2, boot->reg_id_pfr2);
1091 	taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
1092 				      info->reg_mvfr0, boot->reg_mvfr0);
1093 	taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
1094 				      info->reg_mvfr1, boot->reg_mvfr1);
1095 	taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
1096 				      info->reg_mvfr2, boot->reg_mvfr2);
1097 
1098 	return taint;
1099 }
1100 
1101 /*
1102  * Update system wide CPU feature registers with the values from a
1103  * non-boot CPU. Also performs SANITY checks to make sure that there
1104  * aren't any insane variations from that of the boot CPU.
1105  */
1106 void update_cpu_features(int cpu,
1107 			 struct cpuinfo_arm64 *info,
1108 			 struct cpuinfo_arm64 *boot)
1109 {
1110 	int taint = 0;
1111 
1112 	/*
1113 	 * The kernel can handle differing I-cache policies, but otherwise
1114 	 * caches should look identical. Userspace JITs will make use of
1115 	 * *minLine.
1116 	 */
1117 	taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
1118 				      info->reg_ctr, boot->reg_ctr);
1119 
1120 	/*
1121 	 * Userspace may perform DC ZVA instructions. Mismatched block sizes
1122 	 * could result in too much or too little memory being zeroed if a
1123 	 * process is preempted and migrated between CPUs.
1124 	 */
1125 	taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
1126 				      info->reg_dczid, boot->reg_dczid);
1127 
1128 	/* If different, timekeeping will be broken (especially with KVM) */
1129 	taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
1130 				      info->reg_cntfrq, boot->reg_cntfrq);
1131 
1132 	/*
1133 	 * The kernel uses self-hosted debug features and expects CPUs to
1134 	 * support identical debug features. We presently need CTX_CMPs, WRPs,
1135 	 * and BRPs to be identical.
1136 	 * ID_AA64DFR1 is currently RES0.
1137 	 */
1138 	taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
1139 				      info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
1140 	taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
1141 				      info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
1142 	/*
1143 	 * Even in big.LITTLE, processors should be identical instruction-set
1144 	 * wise.
1145 	 */
1146 	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
1147 				      info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
1148 	taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
1149 				      info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
1150 
1151 	/*
1152 	 * Differing PARange support is fine as long as all peripherals and
1153 	 * memory are mapped within the minimum PARange of all CPUs.
1154 	 * Linux should not care about secure memory.
1155 	 */
1156 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
1157 				      info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
1158 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
1159 				      info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
1160 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
1161 				      info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
1162 
1163 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
1164 				      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
1165 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
1166 				      info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
1167 
1168 	taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
1169 				      info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
1170 
1171 	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
1172 		taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
1173 					info->reg_zcr, boot->reg_zcr);
1174 
1175 		/* Probe vector lengths, unless we already gave up on SVE */
1176 		if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
1177 		    !system_capabilities_finalized())
1178 			vec_update_vq_map(ARM64_VEC_SVE);
1179 	}
1180 
1181 	/*
1182 	 * The kernel uses the LDGM/STGM instructions and the number of tags
1183 	 * they read/write depends on the GMID_EL1.BS field. Check that the
1184 	 * value is the same on all CPUs.
1185 	 */
1186 	if (IS_ENABLED(CONFIG_ARM64_MTE) &&
1187 	    id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
1188 		taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
1189 					      info->reg_gmid, boot->reg_gmid);
1190 	}
1191 
1192 	/*
1193 	 * If we don't have AArch32 at all then skip the checks entirely
1194 	 * as the register values may be UNKNOWN and we're not going to be
1195 	 * using them for anything.
1196 	 *
1197 	 * This relies on a sanitised view of the AArch64 ID registers
1198 	 * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
1199 	 */
1200 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
1201 		lazy_init_32bit_cpu_features(info, boot);
1202 		taint |= update_32bit_cpu_features(cpu, &info->aarch32,
1203 						   &boot->aarch32);
1204 	}
1205 
1206 	/*
1207 	 * Mismatched CPU features are a recipe for disaster. Don't even
1208 	 * pretend to support them.
1209 	 */
1210 	if (taint) {
1211 		pr_warn_once("Unsupported CPU feature variation detected.\n");
1212 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1213 	}
1214 }
1215 
1216 u64 read_sanitised_ftr_reg(u32 id)
1217 {
1218 	struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
1219 
1220 	if (!regp)
1221 		return 0;
1222 	return regp->sys_val;
1223 }
1224 EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
1225 
1226 #define read_sysreg_case(r)	\
1227 	case r:		val = read_sysreg_s(r); break;
1228 
1229 /*
1230  * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
1231  * Read the system register on the current CPU
1232  */
1233 u64 __read_sysreg_by_encoding(u32 sys_id)
1234 {
1235 	struct arm64_ftr_reg *regp;
1236 	u64 val;
1237 
1238 	switch (sys_id) {
1239 	read_sysreg_case(SYS_ID_PFR0_EL1);
1240 	read_sysreg_case(SYS_ID_PFR1_EL1);
1241 	read_sysreg_case(SYS_ID_PFR2_EL1);
1242 	read_sysreg_case(SYS_ID_DFR0_EL1);
1243 	read_sysreg_case(SYS_ID_DFR1_EL1);
1244 	read_sysreg_case(SYS_ID_MMFR0_EL1);
1245 	read_sysreg_case(SYS_ID_MMFR1_EL1);
1246 	read_sysreg_case(SYS_ID_MMFR2_EL1);
1247 	read_sysreg_case(SYS_ID_MMFR3_EL1);
1248 	read_sysreg_case(SYS_ID_MMFR4_EL1);
1249 	read_sysreg_case(SYS_ID_MMFR5_EL1);
1250 	read_sysreg_case(SYS_ID_ISAR0_EL1);
1251 	read_sysreg_case(SYS_ID_ISAR1_EL1);
1252 	read_sysreg_case(SYS_ID_ISAR2_EL1);
1253 	read_sysreg_case(SYS_ID_ISAR3_EL1);
1254 	read_sysreg_case(SYS_ID_ISAR4_EL1);
1255 	read_sysreg_case(SYS_ID_ISAR5_EL1);
1256 	read_sysreg_case(SYS_ID_ISAR6_EL1);
1257 	read_sysreg_case(SYS_MVFR0_EL1);
1258 	read_sysreg_case(SYS_MVFR1_EL1);
1259 	read_sysreg_case(SYS_MVFR2_EL1);
1260 
1261 	read_sysreg_case(SYS_ID_AA64PFR0_EL1);
1262 	read_sysreg_case(SYS_ID_AA64PFR1_EL1);
1263 	read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
1264 	read_sysreg_case(SYS_ID_AA64DFR0_EL1);
1265 	read_sysreg_case(SYS_ID_AA64DFR1_EL1);
1266 	read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
1267 	read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
1268 	read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
1269 	read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
1270 	read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
1271 
1272 	read_sysreg_case(SYS_CNTFRQ_EL0);
1273 	read_sysreg_case(SYS_CTR_EL0);
1274 	read_sysreg_case(SYS_DCZID_EL0);
1275 
1276 	default:
1277 		BUG();
1278 		return 0;
1279 	}
1280 
1281 	regp  = get_arm64_ftr_reg(sys_id);
1282 	if (regp) {
1283 		val &= ~regp->override->mask;
1284 		val |= (regp->override->val & regp->override->mask);
1285 	}
1286 
1287 	return val;
1288 }
1289 
1290 #include <linux/irqchip/arm-gic-v3.h>
1291 
1292 static bool
1293 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
1294 {
1295 	int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
1296 
1297 	return val >= entry->min_field_value;
1298 }
1299 
1300 static bool
1301 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
1302 {
1303 	u64 val;
1304 
1305 	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
1306 	if (scope == SCOPE_SYSTEM)
1307 		val = read_sanitised_ftr_reg(entry->sys_reg);
1308 	else
1309 		val = __read_sysreg_by_encoding(entry->sys_reg);
1310 
1311 	return feature_matches(val, entry);
1312 }
1313 
1314 const struct cpumask *system_32bit_el0_cpumask(void)
1315 {
1316 	if (!system_supports_32bit_el0())
1317 		return cpu_none_mask;
1318 
1319 	if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
1320 		return cpu_32bit_el0_mask;
1321 
1322 	return cpu_possible_mask;
1323 }
1324 
1325 static int __init parse_32bit_el0_param(char *str)
1326 {
1327 	allow_mismatched_32bit_el0 = true;
1328 	return 0;
1329 }
1330 early_param("allow_mismatched_32bit_el0", parse_32bit_el0_param);
1331 
1332 static ssize_t aarch32_el0_show(struct device *dev,
1333 				struct device_attribute *attr, char *buf)
1334 {
1335 	const struct cpumask *mask = system_32bit_el0_cpumask();
1336 
1337 	return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(mask));
1338 }
1339 static const DEVICE_ATTR_RO(aarch32_el0);
1340 
1341 static int __init aarch32_el0_sysfs_init(void)
1342 {
1343 	if (!allow_mismatched_32bit_el0)
1344 		return 0;
1345 
1346 	return device_create_file(cpu_subsys.dev_root, &dev_attr_aarch32_el0);
1347 }
1348 device_initcall(aarch32_el0_sysfs_init);
1349 
1350 static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
1351 {
1352 	if (!has_cpuid_feature(entry, scope))
1353 		return allow_mismatched_32bit_el0;
1354 
1355 	if (scope == SCOPE_SYSTEM)
1356 		pr_info("detected: 32-bit EL0 Support\n");
1357 
1358 	return true;
1359 }
1360 
1361 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
1362 {
1363 	bool has_sre;
1364 
1365 	if (!has_cpuid_feature(entry, scope))
1366 		return false;
1367 
1368 	has_sre = gic_enable_sre();
1369 	if (!has_sre)
1370 		pr_warn_once("%s present but disabled by higher exception level\n",
1371 			     entry->desc);
1372 
1373 	return has_sre;
1374 }
1375 
1376 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
1377 {
1378 	u32 midr = read_cpuid_id();
1379 
1380 	/* Cavium ThunderX pass 1.x and 2.x */
1381 	return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
1382 		MIDR_CPU_VAR_REV(0, 0),
1383 		MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
1384 }
1385 
1386 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
1387 {
1388 	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1389 
1390 	return cpuid_feature_extract_signed_field(pfr0,
1391 					ID_AA64PFR0_FP_SHIFT) < 0;
1392 }
1393 
1394 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
1395 			  int scope)
1396 {
1397 	u64 ctr;
1398 
1399 	if (scope == SCOPE_SYSTEM)
1400 		ctr = arm64_ftr_reg_ctrel0.sys_val;
1401 	else
1402 		ctr = read_cpuid_effective_cachetype();
1403 
1404 	return ctr & BIT(CTR_IDC_SHIFT);
1405 }
1406 
1407 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
1408 {
1409 	/*
1410 	 * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
1411 	 * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
1412 	 * to the CTR_EL0 on this CPU and emulate it with the real/safe
1413 	 * value.
1414 	 */
1415 	if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
1416 		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
1417 }
1418 
1419 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
1420 			  int scope)
1421 {
1422 	u64 ctr;
1423 
1424 	if (scope == SCOPE_SYSTEM)
1425 		ctr = arm64_ftr_reg_ctrel0.sys_val;
1426 	else
1427 		ctr = read_cpuid_cachetype();
1428 
1429 	return ctr & BIT(CTR_DIC_SHIFT);
1430 }
1431 
1432 static bool __maybe_unused
1433 has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
1434 {
1435 	/*
1436 	 * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
1437 	 * may share TLB entries with a CPU stuck in the crashed
1438 	 * kernel.
1439 	 */
1440 	if (is_kdump_kernel())
1441 		return false;
1442 
1443 	if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
1444 		return false;
1445 
1446 	return has_cpuid_feature(entry, scope);
1447 }
1448 
1449 /*
1450  * This check is triggered during the early boot before the cpufeature
1451  * is initialised. Checking the status on the local CPU allows the boot
1452  * CPU to detect the need for non-global mappings and thus avoiding a
1453  * pagetable re-write after all the CPUs are booted. This check will be
1454  * anyway run on individual CPUs, allowing us to get the consistent
1455  * state once the SMP CPUs are up and thus make the switch to non-global
1456  * mappings if required.
1457  */
1458 bool kaslr_requires_kpti(void)
1459 {
1460 	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
1461 		return false;
1462 
1463 	/*
1464 	 * E0PD does a similar job to KPTI so can be used instead
1465 	 * where available.
1466 	 */
1467 	if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
1468 		u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1469 		if (cpuid_feature_extract_unsigned_field(mmfr2,
1470 						ID_AA64MMFR2_E0PD_SHIFT))
1471 			return false;
1472 	}
1473 
1474 	/*
1475 	 * Systems affected by Cavium erratum 24756 are incompatible
1476 	 * with KPTI.
1477 	 */
1478 	if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
1479 		extern const struct midr_range cavium_erratum_27456_cpus[];
1480 
1481 		if (is_midr_in_range_list(read_cpuid_id(),
1482 					  cavium_erratum_27456_cpus))
1483 			return false;
1484 	}
1485 
1486 	return kaslr_offset() > 0;
1487 }
1488 
1489 static bool __meltdown_safe = true;
1490 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
1491 
1492 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
1493 				int scope)
1494 {
1495 	/* List of CPUs that are not vulnerable and don't need KPTI */
1496 	static const struct midr_range kpti_safe_list[] = {
1497 		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
1498 		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1499 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
1500 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
1501 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
1502 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1503 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1504 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1505 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1506 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
1507 		MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
1508 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
1509 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
1510 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
1511 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
1512 		{ /* sentinel */ }
1513 	};
1514 	char const *str = "kpti command line option";
1515 	bool meltdown_safe;
1516 
1517 	meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
1518 
1519 	/* Defer to CPU feature registers */
1520 	if (has_cpuid_feature(entry, scope))
1521 		meltdown_safe = true;
1522 
1523 	if (!meltdown_safe)
1524 		__meltdown_safe = false;
1525 
1526 	/*
1527 	 * For reasons that aren't entirely clear, enabling KPTI on Cavium
1528 	 * ThunderX leads to apparent I-cache corruption of kernel text, which
1529 	 * ends as well as you might imagine. Don't even try. We cannot rely
1530 	 * on the cpus_have_*cap() helpers here to detect the CPU erratum
1531 	 * because cpucap detection order may change. However, since we know
1532 	 * affected CPUs are always in a homogeneous configuration, it is
1533 	 * safe to rely on this_cpu_has_cap() here.
1534 	 */
1535 	if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
1536 		str = "ARM64_WORKAROUND_CAVIUM_27456";
1537 		__kpti_forced = -1;
1538 	}
1539 
1540 	/* Useful for KASLR robustness */
1541 	if (kaslr_requires_kpti()) {
1542 		if (!__kpti_forced) {
1543 			str = "KASLR";
1544 			__kpti_forced = 1;
1545 		}
1546 	}
1547 
1548 	if (cpu_mitigations_off() && !__kpti_forced) {
1549 		str = "mitigations=off";
1550 		__kpti_forced = -1;
1551 	}
1552 
1553 	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
1554 		pr_info_once("kernel page table isolation disabled by kernel configuration\n");
1555 		return false;
1556 	}
1557 
1558 	/* Forced? */
1559 	if (__kpti_forced) {
1560 		pr_info_once("kernel page table isolation forced %s by %s\n",
1561 			     __kpti_forced > 0 ? "ON" : "OFF", str);
1562 		return __kpti_forced > 0;
1563 	}
1564 
1565 	return !meltdown_safe;
1566 }
1567 
1568 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1569 static void __nocfi
1570 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1571 {
1572 	typedef void (kpti_remap_fn)(int, int, phys_addr_t);
1573 	extern kpti_remap_fn idmap_kpti_install_ng_mappings;
1574 	kpti_remap_fn *remap_fn;
1575 
1576 	int cpu = smp_processor_id();
1577 
1578 	/*
1579 	 * We don't need to rewrite the page-tables if either we've done
1580 	 * it already or we have KASLR enabled and therefore have not
1581 	 * created any global mappings at all.
1582 	 */
1583 	if (arm64_use_ng_mappings)
1584 		return;
1585 
1586 	remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings));
1587 
1588 	cpu_install_idmap();
1589 	remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
1590 	cpu_uninstall_idmap();
1591 
1592 	if (!cpu)
1593 		arm64_use_ng_mappings = true;
1594 }
1595 #else
1596 static void
1597 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1598 {
1599 }
1600 #endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
1601 
1602 static int __init parse_kpti(char *str)
1603 {
1604 	bool enabled;
1605 	int ret = strtobool(str, &enabled);
1606 
1607 	if (ret)
1608 		return ret;
1609 
1610 	__kpti_forced = enabled ? 1 : -1;
1611 	return 0;
1612 }
1613 early_param("kpti", parse_kpti);
1614 
1615 #ifdef CONFIG_ARM64_HW_AFDBM
1616 static inline void __cpu_enable_hw_dbm(void)
1617 {
1618 	u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
1619 
1620 	write_sysreg(tcr, tcr_el1);
1621 	isb();
1622 	local_flush_tlb_all();
1623 }
1624 
1625 static bool cpu_has_broken_dbm(void)
1626 {
1627 	/* List of CPUs which have broken DBM support. */
1628 	static const struct midr_range cpus[] = {
1629 #ifdef CONFIG_ARM64_ERRATUM_1024718
1630 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1631 		/* Kryo4xx Silver (rdpe => r1p0) */
1632 		MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
1633 #endif
1634 		{},
1635 	};
1636 
1637 	return is_midr_in_range_list(read_cpuid_id(), cpus);
1638 }
1639 
1640 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
1641 {
1642 	return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
1643 	       !cpu_has_broken_dbm();
1644 }
1645 
1646 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
1647 {
1648 	if (cpu_can_use_dbm(cap))
1649 		__cpu_enable_hw_dbm();
1650 }
1651 
1652 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
1653 		       int __unused)
1654 {
1655 	static bool detected = false;
1656 	/*
1657 	 * DBM is a non-conflicting feature. i.e, the kernel can safely
1658 	 * run a mix of CPUs with and without the feature. So, we
1659 	 * unconditionally enable the capability to allow any late CPU
1660 	 * to use the feature. We only enable the control bits on the
1661 	 * CPU, if it actually supports.
1662 	 *
1663 	 * We have to make sure we print the "feature" detection only
1664 	 * when at least one CPU actually uses it. So check if this CPU
1665 	 * can actually use it and print the message exactly once.
1666 	 *
1667 	 * This is safe as all CPUs (including secondary CPUs - due to the
1668 	 * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
1669 	 * goes through the "matches" check exactly once. Also if a CPU
1670 	 * matches the criteria, it is guaranteed that the CPU will turn
1671 	 * the DBM on, as the capability is unconditionally enabled.
1672 	 */
1673 	if (!detected && cpu_can_use_dbm(cap)) {
1674 		detected = true;
1675 		pr_info("detected: Hardware dirty bit management\n");
1676 	}
1677 
1678 	return true;
1679 }
1680 
1681 #endif
1682 
1683 #ifdef CONFIG_ARM64_AMU_EXTN
1684 
1685 /*
1686  * The "amu_cpus" cpumask only signals that the CPU implementation for the
1687  * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide
1688  * information regarding all the events that it supports. When a CPU bit is
1689  * set in the cpumask, the user of this feature can only rely on the presence
1690  * of the 4 fixed counters for that CPU. But this does not guarantee that the
1691  * counters are enabled or access to these counters is enabled by code
1692  * executed at higher exception levels (firmware).
1693  */
1694 static struct cpumask amu_cpus __read_mostly;
1695 
1696 bool cpu_has_amu_feat(int cpu)
1697 {
1698 	return cpumask_test_cpu(cpu, &amu_cpus);
1699 }
1700 
1701 int get_cpu_with_amu_feat(void)
1702 {
1703 	return cpumask_any(&amu_cpus);
1704 }
1705 
1706 static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
1707 {
1708 	if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
1709 		pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
1710 			smp_processor_id());
1711 		cpumask_set_cpu(smp_processor_id(), &amu_cpus);
1712 		update_freq_counters_refs();
1713 	}
1714 }
1715 
1716 static bool has_amu(const struct arm64_cpu_capabilities *cap,
1717 		    int __unused)
1718 {
1719 	/*
1720 	 * The AMU extension is a non-conflicting feature: the kernel can
1721 	 * safely run a mix of CPUs with and without support for the
1722 	 * activity monitors extension. Therefore, unconditionally enable
1723 	 * the capability to allow any late CPU to use the feature.
1724 	 *
1725 	 * With this feature unconditionally enabled, the cpu_enable
1726 	 * function will be called for all CPUs that match the criteria,
1727 	 * including secondary and hotplugged, marking this feature as
1728 	 * present on that respective CPU. The enable function will also
1729 	 * print a detection message.
1730 	 */
1731 
1732 	return true;
1733 }
1734 #else
1735 int get_cpu_with_amu_feat(void)
1736 {
1737 	return nr_cpu_ids;
1738 }
1739 #endif
1740 
1741 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
1742 {
1743 	return is_kernel_in_hyp_mode();
1744 }
1745 
1746 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
1747 {
1748 	/*
1749 	 * Copy register values that aren't redirected by hardware.
1750 	 *
1751 	 * Before code patching, we only set tpidr_el1, all CPUs need to copy
1752 	 * this value to tpidr_el2 before we patch the code. Once we've done
1753 	 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
1754 	 * do anything here.
1755 	 */
1756 	if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
1757 		write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
1758 }
1759 
1760 static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
1761 {
1762 	u64 val = read_sysreg_s(SYS_CLIDR_EL1);
1763 
1764 	/* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
1765 	WARN_ON(CLIDR_LOUU(val) || CLIDR_LOUIS(val));
1766 }
1767 
1768 #ifdef CONFIG_ARM64_PAN
1769 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
1770 {
1771 	/*
1772 	 * We modify PSTATE. This won't work from irq context as the PSTATE
1773 	 * is discarded once we return from the exception.
1774 	 */
1775 	WARN_ON_ONCE(in_interrupt());
1776 
1777 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
1778 	set_pstate_pan(1);
1779 }
1780 #endif /* CONFIG_ARM64_PAN */
1781 
1782 #ifdef CONFIG_ARM64_RAS_EXTN
1783 static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1784 {
1785 	/* Firmware may have left a deferred SError in this register. */
1786 	write_sysreg_s(0, SYS_DISR_EL1);
1787 }
1788 #endif /* CONFIG_ARM64_RAS_EXTN */
1789 
1790 #ifdef CONFIG_ARM64_PTR_AUTH
1791 static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
1792 {
1793 	int boot_val, sec_val;
1794 
1795 	/* We don't expect to be called with SCOPE_SYSTEM */
1796 	WARN_ON(scope == SCOPE_SYSTEM);
1797 	/*
1798 	 * The ptr-auth feature levels are not intercompatible with lower
1799 	 * levels. Hence we must match ptr-auth feature level of the secondary
1800 	 * CPUs with that of the boot CPU. The level of boot cpu is fetched
1801 	 * from the sanitised register whereas direct register read is done for
1802 	 * the secondary CPUs.
1803 	 * The sanitised feature state is guaranteed to match that of the
1804 	 * boot CPU as a mismatched secondary CPU is parked before it gets
1805 	 * a chance to update the state, with the capability.
1806 	 */
1807 	boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg),
1808 					       entry->field_pos, entry->sign);
1809 	if (scope & SCOPE_BOOT_CPU)
1810 		return boot_val >= entry->min_field_value;
1811 	/* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */
1812 	sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
1813 					      entry->field_pos, entry->sign);
1814 	return sec_val == boot_val;
1815 }
1816 
1817 static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
1818 				     int scope)
1819 {
1820 	return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
1821 	       has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
1822 }
1823 
1824 static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
1825 			     int __unused)
1826 {
1827 	return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
1828 	       __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
1829 }
1830 #endif /* CONFIG_ARM64_PTR_AUTH */
1831 
1832 #ifdef CONFIG_ARM64_E0PD
1833 static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
1834 {
1835 	if (this_cpu_has_cap(ARM64_HAS_E0PD))
1836 		sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
1837 }
1838 #endif /* CONFIG_ARM64_E0PD */
1839 
1840 #ifdef CONFIG_ARM64_PSEUDO_NMI
1841 static bool enable_pseudo_nmi;
1842 
1843 static int __init early_enable_pseudo_nmi(char *p)
1844 {
1845 	return strtobool(p, &enable_pseudo_nmi);
1846 }
1847 early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
1848 
1849 static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
1850 				   int scope)
1851 {
1852 	return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
1853 }
1854 #endif
1855 
1856 #ifdef CONFIG_ARM64_BTI
1857 static void bti_enable(const struct arm64_cpu_capabilities *__unused)
1858 {
1859 	/*
1860 	 * Use of X16/X17 for tail-calls and trampolines that jump to
1861 	 * function entry points using BR is a requirement for
1862 	 * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI.
1863 	 * So, be strict and forbid other BRs using other registers to
1864 	 * jump onto a PACIxSP instruction:
1865 	 */
1866 	sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
1867 	isb();
1868 }
1869 #endif /* CONFIG_ARM64_BTI */
1870 
1871 #ifdef CONFIG_ARM64_MTE
1872 static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
1873 {
1874 	sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
1875 	isb();
1876 
1877 	/*
1878 	 * Clear the tags in the zero page. This needs to be done via the
1879 	 * linear map which has the Tagged attribute.
1880 	 */
1881 	if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
1882 		mte_clear_page_tags(lm_alias(empty_zero_page));
1883 
1884 	kasan_init_hw_tags_cpu();
1885 }
1886 #endif /* CONFIG_ARM64_MTE */
1887 
1888 #ifdef CONFIG_KVM
1889 static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
1890 {
1891 	if (kvm_get_mode() != KVM_MODE_PROTECTED)
1892 		return false;
1893 
1894 	if (is_kernel_in_hyp_mode()) {
1895 		pr_warn("Protected KVM not available with VHE\n");
1896 		return false;
1897 	}
1898 
1899 	return true;
1900 }
1901 #endif /* CONFIG_KVM */
1902 
1903 /* Internal helper functions to match cpu capability type */
1904 static bool
1905 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
1906 {
1907 	return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
1908 }
1909 
1910 static bool
1911 cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
1912 {
1913 	return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
1914 }
1915 
1916 static bool
1917 cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
1918 {
1919 	return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
1920 }
1921 
1922 static const struct arm64_cpu_capabilities arm64_features[] = {
1923 	{
1924 		.desc = "GIC system register CPU interface",
1925 		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
1926 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1927 		.matches = has_useable_gicv3_cpuif,
1928 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1929 		.field_pos = ID_AA64PFR0_GIC_SHIFT,
1930 		.sign = FTR_UNSIGNED,
1931 		.min_field_value = 1,
1932 	},
1933 	{
1934 		.desc = "Enhanced Counter Virtualization",
1935 		.capability = ARM64_HAS_ECV,
1936 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1937 		.matches = has_cpuid_feature,
1938 		.sys_reg = SYS_ID_AA64MMFR0_EL1,
1939 		.field_pos = ID_AA64MMFR0_ECV_SHIFT,
1940 		.sign = FTR_UNSIGNED,
1941 		.min_field_value = 1,
1942 	},
1943 #ifdef CONFIG_ARM64_PAN
1944 	{
1945 		.desc = "Privileged Access Never",
1946 		.capability = ARM64_HAS_PAN,
1947 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1948 		.matches = has_cpuid_feature,
1949 		.sys_reg = SYS_ID_AA64MMFR1_EL1,
1950 		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
1951 		.sign = FTR_UNSIGNED,
1952 		.min_field_value = 1,
1953 		.cpu_enable = cpu_enable_pan,
1954 	},
1955 #endif /* CONFIG_ARM64_PAN */
1956 #ifdef CONFIG_ARM64_EPAN
1957 	{
1958 		.desc = "Enhanced Privileged Access Never",
1959 		.capability = ARM64_HAS_EPAN,
1960 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1961 		.matches = has_cpuid_feature,
1962 		.sys_reg = SYS_ID_AA64MMFR1_EL1,
1963 		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
1964 		.sign = FTR_UNSIGNED,
1965 		.min_field_value = 3,
1966 	},
1967 #endif /* CONFIG_ARM64_EPAN */
1968 #ifdef CONFIG_ARM64_LSE_ATOMICS
1969 	{
1970 		.desc = "LSE atomic instructions",
1971 		.capability = ARM64_HAS_LSE_ATOMICS,
1972 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1973 		.matches = has_cpuid_feature,
1974 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
1975 		.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
1976 		.sign = FTR_UNSIGNED,
1977 		.min_field_value = 2,
1978 	},
1979 #endif /* CONFIG_ARM64_LSE_ATOMICS */
1980 	{
1981 		.desc = "Software prefetching using PRFM",
1982 		.capability = ARM64_HAS_NO_HW_PREFETCH,
1983 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1984 		.matches = has_no_hw_prefetch,
1985 	},
1986 	{
1987 		.desc = "Virtualization Host Extensions",
1988 		.capability = ARM64_HAS_VIRT_HOST_EXTN,
1989 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1990 		.matches = runs_at_el2,
1991 		.cpu_enable = cpu_copy_el2regs,
1992 	},
1993 	{
1994 		.capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
1995 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
1996 		.matches = has_32bit_el0,
1997 		.sys_reg = SYS_ID_AA64PFR0_EL1,
1998 		.sign = FTR_UNSIGNED,
1999 		.field_pos = ID_AA64PFR0_EL0_SHIFT,
2000 		.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
2001 	},
2002 #ifdef CONFIG_KVM
2003 	{
2004 		.desc = "32-bit EL1 Support",
2005 		.capability = ARM64_HAS_32BIT_EL1,
2006 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2007 		.matches = has_cpuid_feature,
2008 		.sys_reg = SYS_ID_AA64PFR0_EL1,
2009 		.sign = FTR_UNSIGNED,
2010 		.field_pos = ID_AA64PFR0_EL1_SHIFT,
2011 		.min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
2012 	},
2013 	{
2014 		.desc = "Protected KVM",
2015 		.capability = ARM64_KVM_PROTECTED_MODE,
2016 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2017 		.matches = is_kvm_protected_mode,
2018 	},
2019 #endif
2020 	{
2021 		.desc = "Kernel page table isolation (KPTI)",
2022 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
2023 		.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
2024 		/*
2025 		 * The ID feature fields below are used to indicate that
2026 		 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
2027 		 * more details.
2028 		 */
2029 		.sys_reg = SYS_ID_AA64PFR0_EL1,
2030 		.field_pos = ID_AA64PFR0_CSV3_SHIFT,
2031 		.min_field_value = 1,
2032 		.matches = unmap_kernel_at_el0,
2033 		.cpu_enable = kpti_install_ng_mappings,
2034 	},
2035 	{
2036 		/* FP/SIMD is not implemented */
2037 		.capability = ARM64_HAS_NO_FPSIMD,
2038 		.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
2039 		.min_field_value = 0,
2040 		.matches = has_no_fpsimd,
2041 	},
2042 #ifdef CONFIG_ARM64_PMEM
2043 	{
2044 		.desc = "Data cache clean to Point of Persistence",
2045 		.capability = ARM64_HAS_DCPOP,
2046 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2047 		.matches = has_cpuid_feature,
2048 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2049 		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
2050 		.min_field_value = 1,
2051 	},
2052 	{
2053 		.desc = "Data cache clean to Point of Deep Persistence",
2054 		.capability = ARM64_HAS_DCPODP,
2055 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2056 		.matches = has_cpuid_feature,
2057 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2058 		.sign = FTR_UNSIGNED,
2059 		.field_pos = ID_AA64ISAR1_DPB_SHIFT,
2060 		.min_field_value = 2,
2061 	},
2062 #endif
2063 #ifdef CONFIG_ARM64_SVE
2064 	{
2065 		.desc = "Scalable Vector Extension",
2066 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2067 		.capability = ARM64_SVE,
2068 		.sys_reg = SYS_ID_AA64PFR0_EL1,
2069 		.sign = FTR_UNSIGNED,
2070 		.field_pos = ID_AA64PFR0_SVE_SHIFT,
2071 		.min_field_value = ID_AA64PFR0_SVE,
2072 		.matches = has_cpuid_feature,
2073 		.cpu_enable = sve_kernel_enable,
2074 	},
2075 #endif /* CONFIG_ARM64_SVE */
2076 #ifdef CONFIG_ARM64_RAS_EXTN
2077 	{
2078 		.desc = "RAS Extension Support",
2079 		.capability = ARM64_HAS_RAS_EXTN,
2080 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2081 		.matches = has_cpuid_feature,
2082 		.sys_reg = SYS_ID_AA64PFR0_EL1,
2083 		.sign = FTR_UNSIGNED,
2084 		.field_pos = ID_AA64PFR0_RAS_SHIFT,
2085 		.min_field_value = ID_AA64PFR0_RAS_V1,
2086 		.cpu_enable = cpu_clear_disr,
2087 	},
2088 #endif /* CONFIG_ARM64_RAS_EXTN */
2089 #ifdef CONFIG_ARM64_AMU_EXTN
2090 	{
2091 		/*
2092 		 * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
2093 		 * Therefore, don't provide .desc as we don't want the detection
2094 		 * message to be shown until at least one CPU is detected to
2095 		 * support the feature.
2096 		 */
2097 		.capability = ARM64_HAS_AMU_EXTN,
2098 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2099 		.matches = has_amu,
2100 		.sys_reg = SYS_ID_AA64PFR0_EL1,
2101 		.sign = FTR_UNSIGNED,
2102 		.field_pos = ID_AA64PFR0_AMU_SHIFT,
2103 		.min_field_value = ID_AA64PFR0_AMU,
2104 		.cpu_enable = cpu_amu_enable,
2105 	},
2106 #endif /* CONFIG_ARM64_AMU_EXTN */
2107 	{
2108 		.desc = "Data cache clean to the PoU not required for I/D coherence",
2109 		.capability = ARM64_HAS_CACHE_IDC,
2110 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2111 		.matches = has_cache_idc,
2112 		.cpu_enable = cpu_emulate_effective_ctr,
2113 	},
2114 	{
2115 		.desc = "Instruction cache invalidation not required for I/D coherence",
2116 		.capability = ARM64_HAS_CACHE_DIC,
2117 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2118 		.matches = has_cache_dic,
2119 	},
2120 	{
2121 		.desc = "Stage-2 Force Write-Back",
2122 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2123 		.capability = ARM64_HAS_STAGE2_FWB,
2124 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
2125 		.sign = FTR_UNSIGNED,
2126 		.field_pos = ID_AA64MMFR2_FWB_SHIFT,
2127 		.min_field_value = 1,
2128 		.matches = has_cpuid_feature,
2129 		.cpu_enable = cpu_has_fwb,
2130 	},
2131 	{
2132 		.desc = "ARMv8.4 Translation Table Level",
2133 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2134 		.capability = ARM64_HAS_ARMv8_4_TTL,
2135 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
2136 		.sign = FTR_UNSIGNED,
2137 		.field_pos = ID_AA64MMFR2_TTL_SHIFT,
2138 		.min_field_value = 1,
2139 		.matches = has_cpuid_feature,
2140 	},
2141 	{
2142 		.desc = "TLB range maintenance instructions",
2143 		.capability = ARM64_HAS_TLB_RANGE,
2144 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2145 		.matches = has_cpuid_feature,
2146 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
2147 		.field_pos = ID_AA64ISAR0_TLB_SHIFT,
2148 		.sign = FTR_UNSIGNED,
2149 		.min_field_value = ID_AA64ISAR0_TLB_RANGE,
2150 	},
2151 #ifdef CONFIG_ARM64_HW_AFDBM
2152 	{
2153 		/*
2154 		 * Since we turn this on always, we don't want the user to
2155 		 * think that the feature is available when it may not be.
2156 		 * So hide the description.
2157 		 *
2158 		 * .desc = "Hardware pagetable Dirty Bit Management",
2159 		 *
2160 		 */
2161 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2162 		.capability = ARM64_HW_DBM,
2163 		.sys_reg = SYS_ID_AA64MMFR1_EL1,
2164 		.sign = FTR_UNSIGNED,
2165 		.field_pos = ID_AA64MMFR1_HADBS_SHIFT,
2166 		.min_field_value = 2,
2167 		.matches = has_hw_dbm,
2168 		.cpu_enable = cpu_enable_hw_dbm,
2169 	},
2170 #endif
2171 	{
2172 		.desc = "CRC32 instructions",
2173 		.capability = ARM64_HAS_CRC32,
2174 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2175 		.matches = has_cpuid_feature,
2176 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
2177 		.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
2178 		.min_field_value = 1,
2179 	},
2180 	{
2181 		.desc = "Speculative Store Bypassing Safe (SSBS)",
2182 		.capability = ARM64_SSBS,
2183 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2184 		.matches = has_cpuid_feature,
2185 		.sys_reg = SYS_ID_AA64PFR1_EL1,
2186 		.field_pos = ID_AA64PFR1_SSBS_SHIFT,
2187 		.sign = FTR_UNSIGNED,
2188 		.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
2189 	},
2190 #ifdef CONFIG_ARM64_CNP
2191 	{
2192 		.desc = "Common not Private translations",
2193 		.capability = ARM64_HAS_CNP,
2194 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2195 		.matches = has_useable_cnp,
2196 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
2197 		.sign = FTR_UNSIGNED,
2198 		.field_pos = ID_AA64MMFR2_CNP_SHIFT,
2199 		.min_field_value = 1,
2200 		.cpu_enable = cpu_enable_cnp,
2201 	},
2202 #endif
2203 	{
2204 		.desc = "Speculation barrier (SB)",
2205 		.capability = ARM64_HAS_SB,
2206 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2207 		.matches = has_cpuid_feature,
2208 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2209 		.field_pos = ID_AA64ISAR1_SB_SHIFT,
2210 		.sign = FTR_UNSIGNED,
2211 		.min_field_value = 1,
2212 	},
2213 #ifdef CONFIG_ARM64_PTR_AUTH
2214 	{
2215 		.desc = "Address authentication (architected algorithm)",
2216 		.capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
2217 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2218 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2219 		.sign = FTR_UNSIGNED,
2220 		.field_pos = ID_AA64ISAR1_APA_SHIFT,
2221 		.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
2222 		.matches = has_address_auth_cpucap,
2223 	},
2224 	{
2225 		.desc = "Address authentication (IMP DEF algorithm)",
2226 		.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
2227 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2228 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2229 		.sign = FTR_UNSIGNED,
2230 		.field_pos = ID_AA64ISAR1_API_SHIFT,
2231 		.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
2232 		.matches = has_address_auth_cpucap,
2233 	},
2234 	{
2235 		.capability = ARM64_HAS_ADDRESS_AUTH,
2236 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2237 		.matches = has_address_auth_metacap,
2238 	},
2239 	{
2240 		.desc = "Generic authentication (architected algorithm)",
2241 		.capability = ARM64_HAS_GENERIC_AUTH_ARCH,
2242 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2243 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2244 		.sign = FTR_UNSIGNED,
2245 		.field_pos = ID_AA64ISAR1_GPA_SHIFT,
2246 		.min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
2247 		.matches = has_cpuid_feature,
2248 	},
2249 	{
2250 		.desc = "Generic authentication (IMP DEF algorithm)",
2251 		.capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
2252 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2253 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2254 		.sign = FTR_UNSIGNED,
2255 		.field_pos = ID_AA64ISAR1_GPI_SHIFT,
2256 		.min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
2257 		.matches = has_cpuid_feature,
2258 	},
2259 	{
2260 		.capability = ARM64_HAS_GENERIC_AUTH,
2261 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2262 		.matches = has_generic_auth,
2263 	},
2264 #endif /* CONFIG_ARM64_PTR_AUTH */
2265 #ifdef CONFIG_ARM64_PSEUDO_NMI
2266 	{
2267 		/*
2268 		 * Depends on having GICv3
2269 		 */
2270 		.desc = "IRQ priority masking",
2271 		.capability = ARM64_HAS_IRQ_PRIO_MASKING,
2272 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2273 		.matches = can_use_gic_priorities,
2274 		.sys_reg = SYS_ID_AA64PFR0_EL1,
2275 		.field_pos = ID_AA64PFR0_GIC_SHIFT,
2276 		.sign = FTR_UNSIGNED,
2277 		.min_field_value = 1,
2278 	},
2279 #endif
2280 #ifdef CONFIG_ARM64_E0PD
2281 	{
2282 		.desc = "E0PD",
2283 		.capability = ARM64_HAS_E0PD,
2284 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2285 		.sys_reg = SYS_ID_AA64MMFR2_EL1,
2286 		.sign = FTR_UNSIGNED,
2287 		.field_pos = ID_AA64MMFR2_E0PD_SHIFT,
2288 		.matches = has_cpuid_feature,
2289 		.min_field_value = 1,
2290 		.cpu_enable = cpu_enable_e0pd,
2291 	},
2292 #endif
2293 #ifdef CONFIG_ARCH_RANDOM
2294 	{
2295 		.desc = "Random Number Generator",
2296 		.capability = ARM64_HAS_RNG,
2297 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2298 		.matches = has_cpuid_feature,
2299 		.sys_reg = SYS_ID_AA64ISAR0_EL1,
2300 		.field_pos = ID_AA64ISAR0_RNDR_SHIFT,
2301 		.sign = FTR_UNSIGNED,
2302 		.min_field_value = 1,
2303 	},
2304 #endif
2305 #ifdef CONFIG_ARM64_BTI
2306 	{
2307 		.desc = "Branch Target Identification",
2308 		.capability = ARM64_BTI,
2309 #ifdef CONFIG_ARM64_BTI_KERNEL
2310 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2311 #else
2312 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2313 #endif
2314 		.matches = has_cpuid_feature,
2315 		.cpu_enable = bti_enable,
2316 		.sys_reg = SYS_ID_AA64PFR1_EL1,
2317 		.field_pos = ID_AA64PFR1_BT_SHIFT,
2318 		.min_field_value = ID_AA64PFR1_BT_BTI,
2319 		.sign = FTR_UNSIGNED,
2320 	},
2321 #endif
2322 #ifdef CONFIG_ARM64_MTE
2323 	{
2324 		.desc = "Memory Tagging Extension",
2325 		.capability = ARM64_MTE,
2326 		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2327 		.matches = has_cpuid_feature,
2328 		.sys_reg = SYS_ID_AA64PFR1_EL1,
2329 		.field_pos = ID_AA64PFR1_MTE_SHIFT,
2330 		.min_field_value = ID_AA64PFR1_MTE,
2331 		.sign = FTR_UNSIGNED,
2332 		.cpu_enable = cpu_enable_mte,
2333 	},
2334 	{
2335 		.desc = "Asymmetric MTE Tag Check Fault",
2336 		.capability = ARM64_MTE_ASYMM,
2337 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2338 		.matches = has_cpuid_feature,
2339 		.sys_reg = SYS_ID_AA64PFR1_EL1,
2340 		.field_pos = ID_AA64PFR1_MTE_SHIFT,
2341 		.min_field_value = ID_AA64PFR1_MTE_ASYMM,
2342 		.sign = FTR_UNSIGNED,
2343 	},
2344 #endif /* CONFIG_ARM64_MTE */
2345 	{
2346 		.desc = "RCpc load-acquire (LDAPR)",
2347 		.capability = ARM64_HAS_LDAPR,
2348 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2349 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
2350 		.sign = FTR_UNSIGNED,
2351 		.field_pos = ID_AA64ISAR1_LRCPC_SHIFT,
2352 		.matches = has_cpuid_feature,
2353 		.min_field_value = 1,
2354 	},
2355 	{},
2356 };
2357 
2358 #define HWCAP_CPUID_MATCH(reg, field, s, min_value)				\
2359 		.matches = has_cpuid_feature,					\
2360 		.sys_reg = reg,							\
2361 		.field_pos = field,						\
2362 		.sign = s,							\
2363 		.min_field_value = min_value,
2364 
2365 #define __HWCAP_CAP(name, cap_type, cap)					\
2366 		.desc = name,							\
2367 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,				\
2368 		.hwcap_type = cap_type,						\
2369 		.hwcap = cap,							\
2370 
2371 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)			\
2372 	{									\
2373 		__HWCAP_CAP(#cap, cap_type, cap)				\
2374 		HWCAP_CPUID_MATCH(reg, field, s, min_value)			\
2375 	}
2376 
2377 #define HWCAP_MULTI_CAP(list, cap_type, cap)					\
2378 	{									\
2379 		__HWCAP_CAP(#cap, cap_type, cap)				\
2380 		.matches = cpucap_multi_entry_cap_matches,			\
2381 		.match_list = list,						\
2382 	}
2383 
2384 #define HWCAP_CAP_MATCH(match, cap_type, cap)					\
2385 	{									\
2386 		__HWCAP_CAP(#cap, cap_type, cap)				\
2387 		.matches = match,						\
2388 	}
2389 
2390 #ifdef CONFIG_ARM64_PTR_AUTH
2391 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
2392 	{
2393 		HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
2394 				  FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
2395 	},
2396 	{
2397 		HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
2398 				  FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
2399 	},
2400 	{},
2401 };
2402 
2403 static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
2404 	{
2405 		HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
2406 				  FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
2407 	},
2408 	{
2409 		HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
2410 				  FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
2411 	},
2412 	{},
2413 };
2414 #endif
2415 
2416 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
2417 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
2418 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
2419 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
2420 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
2421 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
2422 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
2423 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
2424 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
2425 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
2426 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
2427 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
2428 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
2429 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
2430 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
2431 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
2432 	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
2433 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
2434 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
2435 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
2436 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
2437 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
2438 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
2439 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
2440 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
2441 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
2442 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
2443 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
2444 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
2445 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
2446 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
2447 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
2448 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
2449 	HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
2450 #ifdef CONFIG_ARM64_SVE
2451 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
2452 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
2453 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
2454 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
2455 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
2456 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
2457 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
2458 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
2459 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
2460 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
2461 	HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
2462 #endif
2463 	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
2464 #ifdef CONFIG_ARM64_BTI
2465 	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
2466 #endif
2467 #ifdef CONFIG_ARM64_PTR_AUTH
2468 	HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
2469 	HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
2470 #endif
2471 #ifdef CONFIG_ARM64_MTE
2472 	HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
2473 #endif /* CONFIG_ARM64_MTE */
2474 	HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
2475 	{},
2476 };
2477 
2478 #ifdef CONFIG_COMPAT
2479 static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
2480 {
2481 	/*
2482 	 * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
2483 	 * in line with that of arm32 as in vfp_init(). We make sure that the
2484 	 * check is future proof, by making sure value is non-zero.
2485 	 */
2486 	u32 mvfr1;
2487 
2488 	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
2489 	if (scope == SCOPE_SYSTEM)
2490 		mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
2491 	else
2492 		mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
2493 
2494 	return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
2495 		cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
2496 		cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
2497 }
2498 #endif
2499 
2500 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
2501 #ifdef CONFIG_COMPAT
2502 	HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
2503 	HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
2504 	/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
2505 	HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
2506 	HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
2507 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
2508 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
2509 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
2510 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
2511 	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
2512 #endif
2513 	{},
2514 };
2515 
2516 static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
2517 {
2518 	switch (cap->hwcap_type) {
2519 	case CAP_HWCAP:
2520 		cpu_set_feature(cap->hwcap);
2521 		break;
2522 #ifdef CONFIG_COMPAT
2523 	case CAP_COMPAT_HWCAP:
2524 		compat_elf_hwcap |= (u32)cap->hwcap;
2525 		break;
2526 	case CAP_COMPAT_HWCAP2:
2527 		compat_elf_hwcap2 |= (u32)cap->hwcap;
2528 		break;
2529 #endif
2530 	default:
2531 		WARN_ON(1);
2532 		break;
2533 	}
2534 }
2535 
2536 /* Check if we have a particular HWCAP enabled */
2537 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
2538 {
2539 	bool rc;
2540 
2541 	switch (cap->hwcap_type) {
2542 	case CAP_HWCAP:
2543 		rc = cpu_have_feature(cap->hwcap);
2544 		break;
2545 #ifdef CONFIG_COMPAT
2546 	case CAP_COMPAT_HWCAP:
2547 		rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
2548 		break;
2549 	case CAP_COMPAT_HWCAP2:
2550 		rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
2551 		break;
2552 #endif
2553 	default:
2554 		WARN_ON(1);
2555 		rc = false;
2556 	}
2557 
2558 	return rc;
2559 }
2560 
2561 static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
2562 {
2563 	/* We support emulation of accesses to CPU ID feature registers */
2564 	cpu_set_named_feature(CPUID);
2565 	for (; hwcaps->matches; hwcaps++)
2566 		if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
2567 			cap_set_elf_hwcap(hwcaps);
2568 }
2569 
2570 static void update_cpu_capabilities(u16 scope_mask)
2571 {
2572 	int i;
2573 	const struct arm64_cpu_capabilities *caps;
2574 
2575 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2576 	for (i = 0; i < ARM64_NCAPS; i++) {
2577 		caps = cpu_hwcaps_ptrs[i];
2578 		if (!caps || !(caps->type & scope_mask) ||
2579 		    cpus_have_cap(caps->capability) ||
2580 		    !caps->matches(caps, cpucap_default_scope(caps)))
2581 			continue;
2582 
2583 		if (caps->desc)
2584 			pr_info("detected: %s\n", caps->desc);
2585 		cpus_set_cap(caps->capability);
2586 
2587 		if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
2588 			set_bit(caps->capability, boot_capabilities);
2589 	}
2590 }
2591 
2592 /*
2593  * Enable all the available capabilities on this CPU. The capabilities
2594  * with BOOT_CPU scope are handled separately and hence skipped here.
2595  */
2596 static int cpu_enable_non_boot_scope_capabilities(void *__unused)
2597 {
2598 	int i;
2599 	u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
2600 
2601 	for_each_available_cap(i) {
2602 		const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
2603 
2604 		if (WARN_ON(!cap))
2605 			continue;
2606 
2607 		if (!(cap->type & non_boot_scope))
2608 			continue;
2609 
2610 		if (cap->cpu_enable)
2611 			cap->cpu_enable(cap);
2612 	}
2613 	return 0;
2614 }
2615 
2616 /*
2617  * Run through the enabled capabilities and enable() it on all active
2618  * CPUs
2619  */
2620 static void __init enable_cpu_capabilities(u16 scope_mask)
2621 {
2622 	int i;
2623 	const struct arm64_cpu_capabilities *caps;
2624 	bool boot_scope;
2625 
2626 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2627 	boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
2628 
2629 	for (i = 0; i < ARM64_NCAPS; i++) {
2630 		unsigned int num;
2631 
2632 		caps = cpu_hwcaps_ptrs[i];
2633 		if (!caps || !(caps->type & scope_mask))
2634 			continue;
2635 		num = caps->capability;
2636 		if (!cpus_have_cap(num))
2637 			continue;
2638 
2639 		/* Ensure cpus_have_const_cap(num) works */
2640 		static_branch_enable(&cpu_hwcap_keys[num]);
2641 
2642 		if (boot_scope && caps->cpu_enable)
2643 			/*
2644 			 * Capabilities with SCOPE_BOOT_CPU scope are finalised
2645 			 * before any secondary CPU boots. Thus, each secondary
2646 			 * will enable the capability as appropriate via
2647 			 * check_local_cpu_capabilities(). The only exception is
2648 			 * the boot CPU, for which the capability must be
2649 			 * enabled here. This approach avoids costly
2650 			 * stop_machine() calls for this case.
2651 			 */
2652 			caps->cpu_enable(caps);
2653 	}
2654 
2655 	/*
2656 	 * For all non-boot scope capabilities, use stop_machine()
2657 	 * as it schedules the work allowing us to modify PSTATE,
2658 	 * instead of on_each_cpu() which uses an IPI, giving us a
2659 	 * PSTATE that disappears when we return.
2660 	 */
2661 	if (!boot_scope)
2662 		stop_machine(cpu_enable_non_boot_scope_capabilities,
2663 			     NULL, cpu_online_mask);
2664 }
2665 
2666 /*
2667  * Run through the list of capabilities to check for conflicts.
2668  * If the system has already detected a capability, take necessary
2669  * action on this CPU.
2670  */
2671 static void verify_local_cpu_caps(u16 scope_mask)
2672 {
2673 	int i;
2674 	bool cpu_has_cap, system_has_cap;
2675 	const struct arm64_cpu_capabilities *caps;
2676 
2677 	scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2678 
2679 	for (i = 0; i < ARM64_NCAPS; i++) {
2680 		caps = cpu_hwcaps_ptrs[i];
2681 		if (!caps || !(caps->type & scope_mask))
2682 			continue;
2683 
2684 		cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
2685 		system_has_cap = cpus_have_cap(caps->capability);
2686 
2687 		if (system_has_cap) {
2688 			/*
2689 			 * Check if the new CPU misses an advertised feature,
2690 			 * which is not safe to miss.
2691 			 */
2692 			if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
2693 				break;
2694 			/*
2695 			 * We have to issue cpu_enable() irrespective of
2696 			 * whether the CPU has it or not, as it is enabeld
2697 			 * system wide. It is upto the call back to take
2698 			 * appropriate action on this CPU.
2699 			 */
2700 			if (caps->cpu_enable)
2701 				caps->cpu_enable(caps);
2702 		} else {
2703 			/*
2704 			 * Check if the CPU has this capability if it isn't
2705 			 * safe to have when the system doesn't.
2706 			 */
2707 			if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
2708 				break;
2709 		}
2710 	}
2711 
2712 	if (i < ARM64_NCAPS) {
2713 		pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
2714 			smp_processor_id(), caps->capability,
2715 			caps->desc, system_has_cap, cpu_has_cap);
2716 
2717 		if (cpucap_panic_on_conflict(caps))
2718 			cpu_panic_kernel();
2719 		else
2720 			cpu_die_early();
2721 	}
2722 }
2723 
2724 /*
2725  * Check for CPU features that are used in early boot
2726  * based on the Boot CPU value.
2727  */
2728 static void check_early_cpu_features(void)
2729 {
2730 	verify_cpu_asid_bits();
2731 
2732 	verify_local_cpu_caps(SCOPE_BOOT_CPU);
2733 }
2734 
2735 static void
2736 __verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
2737 {
2738 
2739 	for (; caps->matches; caps++)
2740 		if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
2741 			pr_crit("CPU%d: missing HWCAP: %s\n",
2742 					smp_processor_id(), caps->desc);
2743 			cpu_die_early();
2744 		}
2745 }
2746 
2747 static void verify_local_elf_hwcaps(void)
2748 {
2749 	__verify_local_elf_hwcaps(arm64_elf_hwcaps);
2750 
2751 	if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
2752 		__verify_local_elf_hwcaps(compat_elf_hwcaps);
2753 }
2754 
2755 static void verify_sve_features(void)
2756 {
2757 	u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
2758 	u64 zcr = read_zcr_features();
2759 
2760 	unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
2761 	unsigned int len = zcr & ZCR_ELx_LEN_MASK;
2762 
2763 	if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) {
2764 		pr_crit("CPU%d: SVE: vector length support mismatch\n",
2765 			smp_processor_id());
2766 		cpu_die_early();
2767 	}
2768 
2769 	/* Add checks on other ZCR bits here if necessary */
2770 }
2771 
2772 static void verify_hyp_capabilities(void)
2773 {
2774 	u64 safe_mmfr1, mmfr0, mmfr1;
2775 	int parange, ipa_max;
2776 	unsigned int safe_vmid_bits, vmid_bits;
2777 
2778 	if (!IS_ENABLED(CONFIG_KVM))
2779 		return;
2780 
2781 	safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
2782 	mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
2783 	mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
2784 
2785 	/* Verify VMID bits */
2786 	safe_vmid_bits = get_vmid_bits(safe_mmfr1);
2787 	vmid_bits = get_vmid_bits(mmfr1);
2788 	if (vmid_bits < safe_vmid_bits) {
2789 		pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id());
2790 		cpu_die_early();
2791 	}
2792 
2793 	/* Verify IPA range */
2794 	parange = cpuid_feature_extract_unsigned_field(mmfr0,
2795 				ID_AA64MMFR0_PARANGE_SHIFT);
2796 	ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
2797 	if (ipa_max < get_kvm_ipa_limit()) {
2798 		pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
2799 		cpu_die_early();
2800 	}
2801 }
2802 
2803 /*
2804  * Run through the enabled system capabilities and enable() it on this CPU.
2805  * The capabilities were decided based on the available CPUs at the boot time.
2806  * Any new CPU should match the system wide status of the capability. If the
2807  * new CPU doesn't have a capability which the system now has enabled, we
2808  * cannot do anything to fix it up and could cause unexpected failures. So
2809  * we park the CPU.
2810  */
2811 static void verify_local_cpu_capabilities(void)
2812 {
2813 	/*
2814 	 * The capabilities with SCOPE_BOOT_CPU are checked from
2815 	 * check_early_cpu_features(), as they need to be verified
2816 	 * on all secondary CPUs.
2817 	 */
2818 	verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
2819 	verify_local_elf_hwcaps();
2820 
2821 	if (system_supports_sve())
2822 		verify_sve_features();
2823 
2824 	if (is_hyp_mode_available())
2825 		verify_hyp_capabilities();
2826 }
2827 
2828 void check_local_cpu_capabilities(void)
2829 {
2830 	/*
2831 	 * All secondary CPUs should conform to the early CPU features
2832 	 * in use by the kernel based on boot CPU.
2833 	 */
2834 	check_early_cpu_features();
2835 
2836 	/*
2837 	 * If we haven't finalised the system capabilities, this CPU gets
2838 	 * a chance to update the errata work arounds and local features.
2839 	 * Otherwise, this CPU should verify that it has all the system
2840 	 * advertised capabilities.
2841 	 */
2842 	if (!system_capabilities_finalized())
2843 		update_cpu_capabilities(SCOPE_LOCAL_CPU);
2844 	else
2845 		verify_local_cpu_capabilities();
2846 }
2847 
2848 static void __init setup_boot_cpu_capabilities(void)
2849 {
2850 	/* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
2851 	update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
2852 	/* Enable the SCOPE_BOOT_CPU capabilities alone right away */
2853 	enable_cpu_capabilities(SCOPE_BOOT_CPU);
2854 }
2855 
2856 bool this_cpu_has_cap(unsigned int n)
2857 {
2858 	if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
2859 		const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
2860 
2861 		if (cap)
2862 			return cap->matches(cap, SCOPE_LOCAL_CPU);
2863 	}
2864 
2865 	return false;
2866 }
2867 
2868 /*
2869  * This helper function is used in a narrow window when,
2870  * - The system wide safe registers are set with all the SMP CPUs and,
2871  * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
2872  * In all other cases cpus_have_{const_}cap() should be used.
2873  */
2874 static bool __maybe_unused __system_matches_cap(unsigned int n)
2875 {
2876 	if (n < ARM64_NCAPS) {
2877 		const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
2878 
2879 		if (cap)
2880 			return cap->matches(cap, SCOPE_SYSTEM);
2881 	}
2882 	return false;
2883 }
2884 
2885 void cpu_set_feature(unsigned int num)
2886 {
2887 	WARN_ON(num >= MAX_CPU_FEATURES);
2888 	elf_hwcap |= BIT(num);
2889 }
2890 EXPORT_SYMBOL_GPL(cpu_set_feature);
2891 
2892 bool cpu_have_feature(unsigned int num)
2893 {
2894 	WARN_ON(num >= MAX_CPU_FEATURES);
2895 	return elf_hwcap & BIT(num);
2896 }
2897 EXPORT_SYMBOL_GPL(cpu_have_feature);
2898 
2899 unsigned long cpu_get_elf_hwcap(void)
2900 {
2901 	/*
2902 	 * We currently only populate the first 32 bits of AT_HWCAP. Please
2903 	 * note that for userspace compatibility we guarantee that bits 62
2904 	 * and 63 will always be returned as 0.
2905 	 */
2906 	return lower_32_bits(elf_hwcap);
2907 }
2908 
2909 unsigned long cpu_get_elf_hwcap2(void)
2910 {
2911 	return upper_32_bits(elf_hwcap);
2912 }
2913 
2914 static void __init setup_system_capabilities(void)
2915 {
2916 	/*
2917 	 * We have finalised the system-wide safe feature
2918 	 * registers, finalise the capabilities that depend
2919 	 * on it. Also enable all the available capabilities,
2920 	 * that are not enabled already.
2921 	 */
2922 	update_cpu_capabilities(SCOPE_SYSTEM);
2923 	enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
2924 }
2925 
2926 void __init setup_cpu_features(void)
2927 {
2928 	u32 cwg;
2929 
2930 	setup_system_capabilities();
2931 	setup_elf_hwcaps(arm64_elf_hwcaps);
2932 
2933 	if (system_supports_32bit_el0())
2934 		setup_elf_hwcaps(compat_elf_hwcaps);
2935 
2936 	if (system_uses_ttbr0_pan())
2937 		pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
2938 
2939 	sve_setup();
2940 	minsigstksz_setup();
2941 
2942 	/* Advertise that we have computed the system capabilities */
2943 	finalize_system_capabilities();
2944 
2945 	/*
2946 	 * Check for sane CTR_EL0.CWG value.
2947 	 */
2948 	cwg = cache_type_cwg();
2949 	if (!cwg)
2950 		pr_warn("No Cache Writeback Granule information, assuming %d\n",
2951 			ARCH_DMA_MINALIGN);
2952 }
2953 
2954 static int enable_mismatched_32bit_el0(unsigned int cpu)
2955 {
2956 	/*
2957 	 * The first 32-bit-capable CPU we detected and so can no longer
2958 	 * be offlined by userspace. -1 indicates we haven't yet onlined
2959 	 * a 32-bit-capable CPU.
2960 	 */
2961 	static int lucky_winner = -1;
2962 
2963 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
2964 	bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
2965 
2966 	if (cpu_32bit) {
2967 		cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
2968 		static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
2969 	}
2970 
2971 	if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit)
2972 		return 0;
2973 
2974 	if (lucky_winner >= 0)
2975 		return 0;
2976 
2977 	/*
2978 	 * We've detected a mismatch. We need to keep one of our CPUs with
2979 	 * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting
2980 	 * every CPU in the system for a 32-bit task.
2981 	 */
2982 	lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
2983 							 cpu_active_mask);
2984 	get_cpu_device(lucky_winner)->offline_disabled = true;
2985 	setup_elf_hwcaps(compat_elf_hwcaps);
2986 	pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
2987 		cpu, lucky_winner);
2988 	return 0;
2989 }
2990 
2991 static int __init init_32bit_el0_mask(void)
2992 {
2993 	if (!allow_mismatched_32bit_el0)
2994 		return 0;
2995 
2996 	if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
2997 		return -ENOMEM;
2998 
2999 	return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
3000 				 "arm64/mismatched_32bit_el0:online",
3001 				 enable_mismatched_32bit_el0, NULL);
3002 }
3003 subsys_initcall_sync(init_32bit_el0_mask);
3004 
3005 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
3006 {
3007 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
3008 }
3009 
3010 /*
3011  * We emulate only the following system register space.
3012  * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
3013  * See Table C5-6 System instruction encodings for System register accesses,
3014  * ARMv8 ARM(ARM DDI 0487A.f) for more details.
3015  */
3016 static inline bool __attribute_const__ is_emulated(u32 id)
3017 {
3018 	return (sys_reg_Op0(id) == 0x3 &&
3019 		sys_reg_CRn(id) == 0x0 &&
3020 		sys_reg_Op1(id) == 0x0 &&
3021 		(sys_reg_CRm(id) == 0 ||
3022 		 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
3023 }
3024 
3025 /*
3026  * With CRm == 0, reg should be one of :
3027  * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
3028  */
3029 static inline int emulate_id_reg(u32 id, u64 *valp)
3030 {
3031 	switch (id) {
3032 	case SYS_MIDR_EL1:
3033 		*valp = read_cpuid_id();
3034 		break;
3035 	case SYS_MPIDR_EL1:
3036 		*valp = SYS_MPIDR_SAFE_VAL;
3037 		break;
3038 	case SYS_REVIDR_EL1:
3039 		/* IMPLEMENTATION DEFINED values are emulated with 0 */
3040 		*valp = 0;
3041 		break;
3042 	default:
3043 		return -EINVAL;
3044 	}
3045 
3046 	return 0;
3047 }
3048 
3049 static int emulate_sys_reg(u32 id, u64 *valp)
3050 {
3051 	struct arm64_ftr_reg *regp;
3052 
3053 	if (!is_emulated(id))
3054 		return -EINVAL;
3055 
3056 	if (sys_reg_CRm(id) == 0)
3057 		return emulate_id_reg(id, valp);
3058 
3059 	regp = get_arm64_ftr_reg_nowarn(id);
3060 	if (regp)
3061 		*valp = arm64_ftr_reg_user_value(regp);
3062 	else
3063 		/*
3064 		 * The untracked registers are either IMPLEMENTATION DEFINED
3065 		 * (e.g, ID_AFR0_EL1) or reserved RAZ.
3066 		 */
3067 		*valp = 0;
3068 	return 0;
3069 }
3070 
3071 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
3072 {
3073 	int rc;
3074 	u64 val;
3075 
3076 	rc = emulate_sys_reg(sys_reg, &val);
3077 	if (!rc) {
3078 		pt_regs_write_reg(regs, rt, val);
3079 		arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
3080 	}
3081 	return rc;
3082 }
3083 
3084 static int emulate_mrs(struct pt_regs *regs, u32 insn)
3085 {
3086 	u32 sys_reg, rt;
3087 
3088 	/*
3089 	 * sys_reg values are defined as used in mrs/msr instruction.
3090 	 * shift the imm value to get the encoding.
3091 	 */
3092 	sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
3093 	rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
3094 	return do_emulate_mrs(regs, sys_reg, rt);
3095 }
3096 
3097 static struct undef_hook mrs_hook = {
3098 	.instr_mask = 0xffff0000,
3099 	.instr_val  = 0xd5380000,
3100 	.pstate_mask = PSR_AA32_MODE_MASK,
3101 	.pstate_val = PSR_MODE_EL0t,
3102 	.fn = emulate_mrs,
3103 };
3104 
3105 static int __init enable_mrs_emulation(void)
3106 {
3107 	register_undef_hook(&mrs_hook);
3108 	return 0;
3109 }
3110 
3111 core_initcall(enable_mrs_emulation);
3112 
3113 enum mitigation_state arm64_get_meltdown_state(void)
3114 {
3115 	if (__meltdown_safe)
3116 		return SPECTRE_UNAFFECTED;
3117 
3118 	if (arm64_kernel_unmapped_at_el0())
3119 		return SPECTRE_MITIGATED;
3120 
3121 	return SPECTRE_VULNERABLE;
3122 }
3123 
3124 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
3125 			  char *buf)
3126 {
3127 	switch (arm64_get_meltdown_state()) {
3128 	case SPECTRE_UNAFFECTED:
3129 		return sprintf(buf, "Not affected\n");
3130 
3131 	case SPECTRE_MITIGATED:
3132 		return sprintf(buf, "Mitigation: PTI\n");
3133 
3134 	default:
3135 		return sprintf(buf, "Vulnerable\n");
3136 	}
3137 }
3138