xref: /linux/arch/arm64/include/asm/cpufeature.h (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
4  */
5 
6 #ifndef __ASM_CPUFEATURE_H
7 #define __ASM_CPUFEATURE_H
8 
9 #include <asm/alternative-macros.h>
10 #include <asm/cpucaps.h>
11 #include <asm/cputype.h>
12 #include <asm/hwcap.h>
13 #include <asm/sysreg.h>
14 
15 #define MAX_CPU_FEATURES	128
16 #define cpu_feature(x)		KERNEL_HWCAP_ ## x
17 
18 #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR	0
19 #define ARM64_SW_FEATURE_OVERRIDE_HVHE		4
20 #define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF	8
21 
22 #ifndef __ASSEMBLY__
23 
24 #include <linux/bug.h>
25 #include <linux/jump_label.h>
26 #include <linux/kernel.h>
27 #include <linux/cpumask.h>
28 
29 /*
30  * CPU feature register tracking
31  *
32  * The safe value of a CPUID feature field is dependent on the implications
33  * of the values assigned to it by the architecture. Based on the relationship
34  * between the values, the features are classified into 3 types - LOWER_SAFE,
35  * HIGHER_SAFE and EXACT.
36  *
37  * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
38  * for HIGHER_SAFE. It is expected that all CPUs have the same value for
39  * a field when EXACT is specified, failing which, the safe value specified
40  * in the table is chosen.
41  */
42 
43 enum ftr_type {
44 	FTR_EXACT,			/* Use a predefined safe value */
45 	FTR_LOWER_SAFE,			/* Smaller value is safe */
46 	FTR_HIGHER_SAFE,		/* Bigger value is safe */
47 	FTR_HIGHER_OR_ZERO_SAFE,	/* Bigger value is safe, but 0 is biggest */
48 };
49 
50 #define FTR_STRICT	true	/* SANITY check strict matching required */
51 #define FTR_NONSTRICT	false	/* SANITY check ignored */
52 
53 #define FTR_SIGNED	true	/* Value should be treated as signed */
54 #define FTR_UNSIGNED	false	/* Value should be treated as unsigned */
55 
56 #define FTR_VISIBLE	true	/* Feature visible to the user space */
57 #define FTR_HIDDEN	false	/* Feature is hidden from the user */
58 
59 #define FTR_VISIBLE_IF_IS_ENABLED(config)		\
60 	(IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
61 
62 struct arm64_ftr_bits {
63 	bool		sign;	/* Value is signed ? */
64 	bool		visible;
65 	bool		strict;	/* CPU Sanity check: strict matching required ? */
66 	enum ftr_type	type;
67 	u8		shift;
68 	u8		width;
69 	s64		safe_val; /* safe value for FTR_EXACT features */
70 };
71 
72 /*
73  * Describe the early feature override to the core override code:
74  *
75  * @val			Values that are to be merged into the final
76  *			sanitised value of the register. Only the bitfields
77  *			set to 1 in @mask are valid
78  * @mask		Mask of the features that are overridden by @val
79  *
80  * A @mask field set to full-1 indicates that the corresponding field
81  * in @val is a valid override.
82  *
83  * A @mask field set to full-0 with the corresponding @val field set
84  * to full-0 denotes that this field has no override
85  *
86  * A @mask field set to full-0 with the corresponding @val field set
87  * to full-1 denotes that this field has an invalid override.
88  */
89 struct arm64_ftr_override {
90 	u64		val;
91 	u64		mask;
92 };
93 
94 /*
95  * @arm64_ftr_reg - Feature register
96  * @strict_mask		Bits which should match across all CPUs for sanity.
97  * @sys_val		Safe value across the CPUs (system view)
98  */
99 struct arm64_ftr_reg {
100 	const char			*name;
101 	u64				strict_mask;
102 	u64				user_mask;
103 	u64				sys_val;
104 	u64				user_val;
105 	struct arm64_ftr_override	*override;
106 	const struct arm64_ftr_bits	*ftr_bits;
107 };
108 
109 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
110 
111 /*
112  * CPU capabilities:
113  *
114  * We use arm64_cpu_capabilities to represent system features, errata work
115  * arounds (both used internally by kernel and tracked in system_cpucaps) and
116  * ELF HWCAPs (which are exposed to user).
117  *
118  * To support systems with heterogeneous CPUs, we need to make sure that we
119  * detect the capabilities correctly on the system and take appropriate
120  * measures to ensure there are no incompatibilities.
121  *
122  * This comment tries to explain how we treat the capabilities.
123  * Each capability has the following list of attributes :
124  *
125  * 1) Scope of Detection : The system detects a given capability by
126  *    performing some checks at runtime. This could be, e.g, checking the
127  *    value of a field in CPU ID feature register or checking the cpu
128  *    model. The capability provides a call back ( @matches() ) to
129  *    perform the check. Scope defines how the checks should be performed.
130  *    There are three cases:
131  *
132  *     a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
133  *        matches. This implies, we have to run the check on all the
134  *        booting CPUs, until the system decides that state of the
135  *        capability is finalised. (See section 2 below)
136  *		Or
137  *     b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
138  *        matches. This implies, we run the check only once, when the
139  *        system decides to finalise the state of the capability. If the
140  *        capability relies on a field in one of the CPU ID feature
141  *        registers, we use the sanitised value of the register from the
142  *        CPU feature infrastructure to make the decision.
143  *		Or
144  *     c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
145  *        feature. This category is for features that are "finalised"
146  *        (or used) by the kernel very early even before the SMP cpus
147  *        are brought up.
148  *
149  *    The process of detection is usually denoted by "update" capability
150  *    state in the code.
151  *
152  * 2) Finalise the state : The kernel should finalise the state of a
153  *    capability at some point during its execution and take necessary
154  *    actions if any. Usually, this is done, after all the boot-time
155  *    enabled CPUs are brought up by the kernel, so that it can make
156  *    better decision based on the available set of CPUs. However, there
157  *    are some special cases, where the action is taken during the early
158  *    boot by the primary boot CPU. (e.g, running the kernel at EL2 with
159  *    Virtualisation Host Extensions). The kernel usually disallows any
160  *    changes to the state of a capability once it finalises the capability
161  *    and takes any action, as it may be impossible to execute the actions
162  *    safely. A CPU brought up after a capability is "finalised" is
163  *    referred to as "Late CPU" w.r.t the capability. e.g, all secondary
164  *    CPUs are treated "late CPUs" for capabilities determined by the boot
165  *    CPU.
166  *
167  *    At the moment there are two passes of finalising the capabilities.
168  *      a) Boot CPU scope capabilities - Finalised by primary boot CPU via
169  *         setup_boot_cpu_capabilities().
170  *      b) Everything except (a) - Run via setup_system_capabilities().
171  *
172  * 3) Verification: When a CPU is brought online (e.g, by user or by the
173  *    kernel), the kernel should make sure that it is safe to use the CPU,
174  *    by verifying that the CPU is compliant with the state of the
175  *    capabilities finalised already. This happens via :
176  *
177  *	secondary_start_kernel()-> check_local_cpu_capabilities()
178  *
179  *    As explained in (2) above, capabilities could be finalised at
180  *    different points in the execution. Each newly booted CPU is verified
181  *    against the capabilities that have been finalised by the time it
182  *    boots.
183  *
184  *	a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
185  *	except for the primary boot CPU.
186  *
187  *	b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
188  *	user after the kernel boot are verified against the capability.
189  *
190  *    If there is a conflict, the kernel takes an action, based on the
191  *    severity (e.g, a CPU could be prevented from booting or cause a
192  *    kernel panic). The CPU is allowed to "affect" the state of the
193  *    capability, if it has not been finalised already. See section 5
194  *    for more details on conflicts.
195  *
196  * 4) Action: As mentioned in (2), the kernel can take an action for each
197  *    detected capability, on all CPUs on the system. Appropriate actions
198  *    include, turning on an architectural feature, modifying the control
199  *    registers (e.g, SCTLR, TCR etc.) or patching the kernel via
200  *    alternatives. The kernel patching is batched and performed at later
201  *    point. The actions are always initiated only after the capability
202  *    is finalised. This is usally denoted by "enabling" the capability.
203  *    The actions are initiated as follows :
204  *	a) Action is triggered on all online CPUs, after the capability is
205  *	finalised, invoked within the stop_machine() context from
206  *	enable_cpu_capabilitie().
207  *
208  *	b) Any late CPU, brought up after (1), the action is triggered via:
209  *
210  *	  check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
211  *
212  * 5) Conflicts: Based on the state of the capability on a late CPU vs.
213  *    the system state, we could have the following combinations :
214  *
215  *		x-----------------------------x
216  *		| Type  | System   | Late CPU |
217  *		|-----------------------------|
218  *		|  a    |   y      |    n     |
219  *		|-----------------------------|
220  *		|  b    |   n      |    y     |
221  *		x-----------------------------x
222  *
223  *     Two separate flag bits are defined to indicate whether each kind of
224  *     conflict can be allowed:
225  *		ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
226  *		ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
227  *
228  *     Case (a) is not permitted for a capability that the system requires
229  *     all CPUs to have in order for the capability to be enabled. This is
230  *     typical for capabilities that represent enhanced functionality.
231  *
232  *     Case (b) is not permitted for a capability that must be enabled
233  *     during boot if any CPU in the system requires it in order to run
234  *     safely. This is typical for erratum work arounds that cannot be
235  *     enabled after the corresponding capability is finalised.
236  *
237  *     In some non-typical cases either both (a) and (b), or neither,
238  *     should be permitted. This can be described by including neither
239  *     or both flags in the capability's type field.
240  *
241  *     In case of a conflict, the CPU is prevented from booting. If the
242  *     ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
243  *     then a kernel panic is triggered.
244  */
245 
246 
247 /*
248  * Decide how the capability is detected.
249  * On any local CPU vs System wide vs the primary boot CPU
250  */
251 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU		((u16)BIT(0))
252 #define ARM64_CPUCAP_SCOPE_SYSTEM		((u16)BIT(1))
253 /*
254  * The capabilitiy is detected on the Boot CPU and is used by kernel
255  * during early boot. i.e, the capability should be "detected" and
256  * "enabled" as early as possibly on all booting CPUs.
257  */
258 #define ARM64_CPUCAP_SCOPE_BOOT_CPU		((u16)BIT(2))
259 #define ARM64_CPUCAP_SCOPE_MASK			\
260 	(ARM64_CPUCAP_SCOPE_SYSTEM	|	\
261 	 ARM64_CPUCAP_SCOPE_LOCAL_CPU	|	\
262 	 ARM64_CPUCAP_SCOPE_BOOT_CPU)
263 
264 #define SCOPE_SYSTEM				ARM64_CPUCAP_SCOPE_SYSTEM
265 #define SCOPE_LOCAL_CPU				ARM64_CPUCAP_SCOPE_LOCAL_CPU
266 #define SCOPE_BOOT_CPU				ARM64_CPUCAP_SCOPE_BOOT_CPU
267 #define SCOPE_ALL				ARM64_CPUCAP_SCOPE_MASK
268 
269 /*
270  * Is it permitted for a late CPU to have this capability when system
271  * hasn't already enabled it ?
272  */
273 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU	((u16)BIT(4))
274 /* Is it safe for a late CPU to miss this capability when system has it */
275 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU	((u16)BIT(5))
276 /* Panic when a conflict is detected */
277 #define ARM64_CPUCAP_PANIC_ON_CONFLICT		((u16)BIT(6))
278 
279 /*
280  * CPU errata workarounds that need to be enabled at boot time if one or
281  * more CPUs in the system requires it. When one of these capabilities
282  * has been enabled, it is safe to allow any CPU to boot that doesn't
283  * require the workaround. However, it is not safe if a "late" CPU
284  * requires a workaround and the system hasn't enabled it already.
285  */
286 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM		\
287 	(ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
288 /*
289  * CPU feature detected at boot time based on system-wide value of a
290  * feature. It is safe for a late CPU to have this feature even though
291  * the system hasn't enabled it, although the feature will not be used
292  * by Linux in this case. If the system has enabled this feature already,
293  * then every late CPU must have it.
294  */
295 #define ARM64_CPUCAP_SYSTEM_FEATURE	\
296 	(ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
297 /*
298  * CPU feature detected at boot time based on feature of one or more CPUs.
299  * All possible conflicts for a late CPU are ignored.
300  * NOTE: this means that a late CPU with the feature will *not* cause the
301  * capability to be advertised by cpus_have_*cap()!
302  */
303 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE		\
304 	(ARM64_CPUCAP_SCOPE_LOCAL_CPU		|	\
305 	 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU	|	\
306 	 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
307 
308 /*
309  * CPU feature detected at boot time, on one or more CPUs. A late CPU
310  * is not allowed to have the capability when the system doesn't have it.
311  * It is Ok for a late CPU to miss the feature.
312  */
313 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE	\
314 	(ARM64_CPUCAP_SCOPE_LOCAL_CPU		|	\
315 	 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
316 
317 /*
318  * CPU feature used early in the boot based on the boot CPU. All secondary
319  * CPUs must match the state of the capability as detected by the boot CPU. In
320  * case of a conflict, a kernel panic is triggered.
321  */
322 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE		\
323 	(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
324 
325 /*
326  * CPU feature used early in the boot based on the boot CPU. It is safe for a
327  * late CPU to have this feature even though the boot CPU hasn't enabled it,
328  * although the feature will not be used by Linux in this case. If the boot CPU
329  * has enabled this feature already, then every late CPU must have it.
330  */
331 #define ARM64_CPUCAP_BOOT_CPU_FEATURE                  \
332 	(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
333 
334 struct arm64_cpu_capabilities {
335 	const char *desc;
336 	u16 capability;
337 	u16 type;
338 	bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
339 	/*
340 	 * Take the appropriate actions to configure this capability
341 	 * for this CPU. If the capability is detected by the kernel
342 	 * this will be called on all the CPUs in the system,
343 	 * including the hotplugged CPUs, regardless of whether the
344 	 * capability is available on that specific CPU. This is
345 	 * useful for some capabilities (e.g, working around CPU
346 	 * errata), where all the CPUs must take some action (e.g,
347 	 * changing system control/configuration). Thus, if an action
348 	 * is required only if the CPU has the capability, then the
349 	 * routine must check it before taking any action.
350 	 */
351 	void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
352 	union {
353 		struct {	/* To be used for erratum handling only */
354 			struct midr_range midr_range;
355 			const struct arm64_midr_revidr {
356 				u32 midr_rv;		/* revision/variant */
357 				u32 revidr_mask;
358 			} * const fixed_revs;
359 		};
360 
361 		const struct midr_range *midr_range_list;
362 		struct {	/* Feature register checking */
363 			u32 sys_reg;
364 			u8 field_pos;
365 			u8 field_width;
366 			u8 min_field_value;
367 			u8 max_field_value;
368 			u8 hwcap_type;
369 			bool sign;
370 			unsigned long hwcap;
371 		};
372 	};
373 
374 	/*
375 	 * An optional list of "matches/cpu_enable" pair for the same
376 	 * "capability" of the same "type" as described by the parent.
377 	 * Only matches(), cpu_enable() and fields relevant to these
378 	 * methods are significant in the list. The cpu_enable is
379 	 * invoked only if the corresponding entry "matches()".
380 	 * However, if a cpu_enable() method is associated
381 	 * with multiple matches(), care should be taken that either
382 	 * the match criteria are mutually exclusive, or that the
383 	 * method is robust against being called multiple times.
384 	 */
385 	const struct arm64_cpu_capabilities *match_list;
386 	const struct cpumask *cpus;
387 };
388 
389 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
390 {
391 	return cap->type & ARM64_CPUCAP_SCOPE_MASK;
392 }
393 
394 /*
395  * Generic helper for handling capabilities with multiple (match,enable) pairs
396  * of call backs, sharing the same capability bit.
397  * Iterate over each entry to see if at least one matches.
398  */
399 static inline bool
400 cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
401 			       int scope)
402 {
403 	const struct arm64_cpu_capabilities *caps;
404 
405 	for (caps = entry->match_list; caps->matches; caps++)
406 		if (caps->matches(caps, scope))
407 			return true;
408 
409 	return false;
410 }
411 
412 static __always_inline bool is_vhe_hyp_code(void)
413 {
414 	/* Only defined for code run in VHE hyp context */
415 	return __is_defined(__KVM_VHE_HYPERVISOR__);
416 }
417 
418 static __always_inline bool is_nvhe_hyp_code(void)
419 {
420 	/* Only defined for code run in NVHE hyp context */
421 	return __is_defined(__KVM_NVHE_HYPERVISOR__);
422 }
423 
424 static __always_inline bool is_hyp_code(void)
425 {
426 	return is_vhe_hyp_code() || is_nvhe_hyp_code();
427 }
428 
429 extern DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
430 
431 extern DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
432 
433 #define for_each_available_cap(cap)		\
434 	for_each_set_bit(cap, system_cpucaps, ARM64_NCAPS)
435 
436 bool this_cpu_has_cap(unsigned int cap);
437 void cpu_set_feature(unsigned int num);
438 bool cpu_have_feature(unsigned int num);
439 unsigned long cpu_get_elf_hwcap(void);
440 unsigned long cpu_get_elf_hwcap2(void);
441 
442 #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
443 #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
444 
445 static __always_inline bool boot_capabilities_finalized(void)
446 {
447 	return alternative_has_cap_likely(ARM64_ALWAYS_BOOT);
448 }
449 
450 static __always_inline bool system_capabilities_finalized(void)
451 {
452 	return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
453 }
454 
455 /*
456  * Test for a capability with a runtime check.
457  *
458  * Before the capability is detected, this returns false.
459  */
460 static __always_inline bool cpus_have_cap(unsigned int num)
461 {
462 	if (__builtin_constant_p(num) && !cpucap_is_possible(num))
463 		return false;
464 	if (num >= ARM64_NCAPS)
465 		return false;
466 	return arch_test_bit(num, system_cpucaps);
467 }
468 
469 /*
470  * Test for a capability without a runtime check.
471  *
472  * Before boot capabilities are finalized, this will BUG().
473  * After boot capabilities are finalized, this is patched to avoid a runtime
474  * check.
475  *
476  * @num must be a compile-time constant.
477  */
478 static __always_inline bool cpus_have_final_boot_cap(int num)
479 {
480 	if (boot_capabilities_finalized())
481 		return alternative_has_cap_unlikely(num);
482 	else
483 		BUG();
484 }
485 
486 /*
487  * Test for a capability without a runtime check.
488  *
489  * Before system capabilities are finalized, this will BUG().
490  * After system capabilities are finalized, this is patched to avoid a runtime
491  * check.
492  *
493  * @num must be a compile-time constant.
494  */
495 static __always_inline bool cpus_have_final_cap(int num)
496 {
497 	if (system_capabilities_finalized())
498 		return alternative_has_cap_unlikely(num);
499 	else
500 		BUG();
501 }
502 
503 static inline int __attribute_const__
504 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
505 {
506 	return (s64)(features << (64 - width - field)) >> (64 - width);
507 }
508 
509 static inline int __attribute_const__
510 cpuid_feature_extract_signed_field(u64 features, int field)
511 {
512 	return cpuid_feature_extract_signed_field_width(features, field, 4);
513 }
514 
515 static __always_inline unsigned int __attribute_const__
516 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
517 {
518 	return (u64)(features << (64 - width - field)) >> (64 - width);
519 }
520 
521 static __always_inline unsigned int __attribute_const__
522 cpuid_feature_extract_unsigned_field(u64 features, int field)
523 {
524 	return cpuid_feature_extract_unsigned_field_width(features, field, 4);
525 }
526 
527 /*
528  * Fields that identify the version of the Performance Monitors Extension do
529  * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
530  * "Alternative ID scheme used for the Performance Monitors Extension version".
531  */
532 static inline u64 __attribute_const__
533 cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
534 {
535 	u64 val = cpuid_feature_extract_unsigned_field(features, field);
536 	u64 mask = GENMASK_ULL(field + 3, field);
537 
538 	/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
539 	if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
540 		val = 0;
541 
542 	if (val > cap) {
543 		features &= ~mask;
544 		features |= (cap << field) & mask;
545 	}
546 
547 	return features;
548 }
549 
550 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
551 {
552 	return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
553 }
554 
555 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
556 {
557 	return (reg->user_val | (reg->sys_val & reg->user_mask));
558 }
559 
560 static inline int __attribute_const__
561 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
562 {
563 	if (WARN_ON_ONCE(!width))
564 		width = 4;
565 	return (sign) ?
566 		cpuid_feature_extract_signed_field_width(features, field, width) :
567 		cpuid_feature_extract_unsigned_field_width(features, field, width);
568 }
569 
570 static inline int __attribute_const__
571 cpuid_feature_extract_field(u64 features, int field, bool sign)
572 {
573 	return cpuid_feature_extract_field_width(features, field, 4, sign);
574 }
575 
576 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
577 {
578 	return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
579 }
580 
581 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
582 {
583 	return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
584 		cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
585 }
586 
587 static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
588 {
589 	u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
590 
591 	return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
592 }
593 
594 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
595 {
596 	u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
597 
598 	return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
599 }
600 
601 static inline bool id_aa64pfr0_sve(u64 pfr0)
602 {
603 	u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
604 
605 	return val > 0;
606 }
607 
608 static inline bool id_aa64pfr1_sme(u64 pfr1)
609 {
610 	u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
611 
612 	return val > 0;
613 }
614 
615 static inline bool id_aa64pfr1_mte(u64 pfr1)
616 {
617 	u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
618 
619 	return val >= ID_AA64PFR1_EL1_MTE_MTE2;
620 }
621 
622 void __init setup_boot_cpu_features(void);
623 void __init setup_system_features(void);
624 void __init setup_user_features(void);
625 
626 void check_local_cpu_capabilities(void);
627 
628 u64 read_sanitised_ftr_reg(u32 id);
629 u64 __read_sysreg_by_encoding(u32 sys_id);
630 
631 static inline bool cpu_supports_mixed_endian_el0(void)
632 {
633 	return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
634 }
635 
636 
637 static inline bool supports_csv2p3(int scope)
638 {
639 	u64 pfr0;
640 	u8 csv2_val;
641 
642 	if (scope == SCOPE_LOCAL_CPU)
643 		pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
644 	else
645 		pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
646 
647 	csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
648 							ID_AA64PFR0_EL1_CSV2_SHIFT);
649 	return csv2_val == 3;
650 }
651 
652 static inline bool supports_clearbhb(int scope)
653 {
654 	u64 isar2;
655 
656 	if (scope == SCOPE_LOCAL_CPU)
657 		isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
658 	else
659 		isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
660 
661 	return cpuid_feature_extract_unsigned_field(isar2,
662 						    ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
663 }
664 
665 const struct cpumask *system_32bit_el0_cpumask(void);
666 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
667 
668 static inline bool system_supports_32bit_el0(void)
669 {
670 	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
671 
672 	return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
673 	       id_aa64pfr0_32bit_el0(pfr0);
674 }
675 
676 static inline bool system_supports_4kb_granule(void)
677 {
678 	u64 mmfr0;
679 	u32 val;
680 
681 	mmfr0 =	read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
682 	val = cpuid_feature_extract_unsigned_field(mmfr0,
683 						ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
684 
685 	return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
686 	       (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
687 }
688 
689 static inline bool system_supports_64kb_granule(void)
690 {
691 	u64 mmfr0;
692 	u32 val;
693 
694 	mmfr0 =	read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
695 	val = cpuid_feature_extract_unsigned_field(mmfr0,
696 						ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
697 
698 	return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
699 	       (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
700 }
701 
702 static inline bool system_supports_16kb_granule(void)
703 {
704 	u64 mmfr0;
705 	u32 val;
706 
707 	mmfr0 =	read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
708 	val = cpuid_feature_extract_unsigned_field(mmfr0,
709 						ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
710 
711 	return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
712 	       (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
713 }
714 
715 static inline bool system_supports_mixed_endian_el0(void)
716 {
717 	return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
718 }
719 
720 static inline bool system_supports_mixed_endian(void)
721 {
722 	u64 mmfr0;
723 	u32 val;
724 
725 	mmfr0 =	read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
726 	val = cpuid_feature_extract_unsigned_field(mmfr0,
727 						ID_AA64MMFR0_EL1_BIGEND_SHIFT);
728 
729 	return val == 0x1;
730 }
731 
732 static __always_inline bool system_supports_fpsimd(void)
733 {
734 	return alternative_has_cap_likely(ARM64_HAS_FPSIMD);
735 }
736 
737 static inline bool system_uses_hw_pan(void)
738 {
739 	return alternative_has_cap_unlikely(ARM64_HAS_PAN);
740 }
741 
742 static inline bool system_uses_ttbr0_pan(void)
743 {
744 	return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
745 		!system_uses_hw_pan();
746 }
747 
748 static __always_inline bool system_supports_sve(void)
749 {
750 	return alternative_has_cap_unlikely(ARM64_SVE);
751 }
752 
753 static __always_inline bool system_supports_sme(void)
754 {
755 	return alternative_has_cap_unlikely(ARM64_SME);
756 }
757 
758 static __always_inline bool system_supports_sme2(void)
759 {
760 	return alternative_has_cap_unlikely(ARM64_SME2);
761 }
762 
763 static __always_inline bool system_supports_fa64(void)
764 {
765 	return alternative_has_cap_unlikely(ARM64_SME_FA64);
766 }
767 
768 static __always_inline bool system_supports_tpidr2(void)
769 {
770 	return system_supports_sme();
771 }
772 
773 static __always_inline bool system_supports_fpmr(void)
774 {
775 	return alternative_has_cap_unlikely(ARM64_HAS_FPMR);
776 }
777 
778 static __always_inline bool system_supports_cnp(void)
779 {
780 	return alternative_has_cap_unlikely(ARM64_HAS_CNP);
781 }
782 
783 static inline bool system_supports_address_auth(void)
784 {
785 	return cpus_have_final_boot_cap(ARM64_HAS_ADDRESS_AUTH);
786 }
787 
788 static inline bool system_supports_generic_auth(void)
789 {
790 	return alternative_has_cap_unlikely(ARM64_HAS_GENERIC_AUTH);
791 }
792 
793 static inline bool system_has_full_ptr_auth(void)
794 {
795 	return system_supports_address_auth() && system_supports_generic_auth();
796 }
797 
798 static __always_inline bool system_uses_irq_prio_masking(void)
799 {
800 	return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
801 }
802 
803 static inline bool system_supports_mte(void)
804 {
805 	return alternative_has_cap_unlikely(ARM64_MTE);
806 }
807 
808 static inline bool system_has_prio_mask_debugging(void)
809 {
810 	return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
811 	       system_uses_irq_prio_masking();
812 }
813 
814 static inline bool system_supports_bti(void)
815 {
816 	return cpus_have_final_cap(ARM64_BTI);
817 }
818 
819 static inline bool system_supports_bti_kernel(void)
820 {
821 	return IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
822 		cpus_have_final_boot_cap(ARM64_BTI);
823 }
824 
825 static inline bool system_supports_tlb_range(void)
826 {
827 	return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
828 }
829 
830 static inline bool system_supports_lpa2(void)
831 {
832 	return cpus_have_final_cap(ARM64_HAS_LPA2);
833 }
834 
835 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
836 bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
837 
838 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
839 {
840 	switch (parange) {
841 	case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
842 	case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
843 	case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
844 	case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
845 	case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
846 	case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
847 	case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
848 	/*
849 	 * A future PE could use a value unknown to the kernel.
850 	 * However, by the "D10.1.4 Principles of the ID scheme
851 	 * for fields in ID registers", ARM DDI 0487C.a, any new
852 	 * value is guaranteed to be higher than what we know already.
853 	 * As a safe limit, we return the limit supported by the kernel.
854 	 */
855 	default: return CONFIG_ARM64_PA_BITS;
856 	}
857 }
858 
859 /* Check whether hardware update of the Access flag is supported */
860 static inline bool cpu_has_hw_af(void)
861 {
862 	u64 mmfr1;
863 
864 	if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
865 		return false;
866 
867 	/*
868 	 * Use cached version to avoid emulated msr operation on KVM
869 	 * guests.
870 	 */
871 	mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
872 	return cpuid_feature_extract_unsigned_field(mmfr1,
873 						ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
874 }
875 
876 static inline bool cpu_has_pan(void)
877 {
878 	u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
879 	return cpuid_feature_extract_unsigned_field(mmfr1,
880 						    ID_AA64MMFR1_EL1_PAN_SHIFT);
881 }
882 
883 #ifdef CONFIG_ARM64_AMU_EXTN
884 /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
885 extern bool cpu_has_amu_feat(int cpu);
886 #else
887 static inline bool cpu_has_amu_feat(int cpu)
888 {
889 	return false;
890 }
891 #endif
892 
893 /* Get a cpu that supports the Activity Monitors Unit (AMU) */
894 extern int get_cpu_with_amu_feat(void);
895 
896 static inline unsigned int get_vmid_bits(u64 mmfr1)
897 {
898 	int vmid_bits;
899 
900 	vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
901 						ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
902 	if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
903 		return 16;
904 
905 	/*
906 	 * Return the default here even if any reserved
907 	 * value is fetched from the system register.
908 	 */
909 	return 8;
910 }
911 
912 s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
913 struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
914 
915 extern struct arm64_ftr_override id_aa64mmfr0_override;
916 extern struct arm64_ftr_override id_aa64mmfr1_override;
917 extern struct arm64_ftr_override id_aa64mmfr2_override;
918 extern struct arm64_ftr_override id_aa64pfr0_override;
919 extern struct arm64_ftr_override id_aa64pfr1_override;
920 extern struct arm64_ftr_override id_aa64zfr0_override;
921 extern struct arm64_ftr_override id_aa64smfr0_override;
922 extern struct arm64_ftr_override id_aa64isar1_override;
923 extern struct arm64_ftr_override id_aa64isar2_override;
924 
925 extern struct arm64_ftr_override arm64_sw_feature_override;
926 
927 static inline
928 u64 arm64_apply_feature_override(u64 val, int feat, int width,
929 				 const struct arm64_ftr_override *override)
930 {
931 	u64 oval = override->val;
932 
933 	/*
934 	 * When it encounters an invalid override (e.g., an override that
935 	 * cannot be honoured due to a missing CPU feature), the early idreg
936 	 * override code will set the mask to 0x0 and the value to non-zero for
937 	 * the field in question. In order to determine whether the override is
938 	 * valid or not for the field we are interested in, we first need to
939 	 * disregard bits belonging to other fields.
940 	 */
941 	oval &= GENMASK_ULL(feat + width - 1, feat);
942 
943 	/*
944 	 * The override is valid if all value bits are accounted for in the
945 	 * mask. If so, replace the masked bits with the override value.
946 	 */
947 	if (oval == (oval & override->mask)) {
948 		val &= ~override->mask;
949 		val |= oval;
950 	}
951 
952 	/* Extract the field from the updated value */
953 	return cpuid_feature_extract_unsigned_field(val, feat);
954 }
955 
956 static inline bool arm64_test_sw_feature_override(int feat)
957 {
958 	/*
959 	 * Software features are pseudo CPU features that have no underlying
960 	 * CPUID system register value to apply the override to.
961 	 */
962 	return arm64_apply_feature_override(0, feat, 4,
963 					    &arm64_sw_feature_override);
964 }
965 
966 static inline bool kaslr_disabled_cmdline(void)
967 {
968 	return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
969 }
970 
971 u32 get_kvm_ipa_limit(void);
972 void dump_cpu_features(void);
973 
974 static inline bool cpu_has_bti(void)
975 {
976 	if (!IS_ENABLED(CONFIG_ARM64_BTI))
977 		return false;
978 
979 	return arm64_apply_feature_override(read_cpuid(ID_AA64PFR1_EL1),
980 					    ID_AA64PFR1_EL1_BT_SHIFT, 4,
981 					    &id_aa64pfr1_override);
982 }
983 
984 static inline bool cpu_has_pac(void)
985 {
986 	u64 isar1, isar2;
987 
988 	if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
989 		return false;
990 
991 	isar1 = read_cpuid(ID_AA64ISAR1_EL1);
992 	isar2 = read_cpuid(ID_AA64ISAR2_EL1);
993 
994 	if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_APA_SHIFT, 4,
995 					 &id_aa64isar1_override))
996 		return true;
997 
998 	if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_API_SHIFT, 4,
999 					 &id_aa64isar1_override))
1000 		return true;
1001 
1002 	return arm64_apply_feature_override(isar2, ID_AA64ISAR2_EL1_APA3_SHIFT, 4,
1003 					    &id_aa64isar2_override);
1004 }
1005 
1006 static inline bool cpu_has_lva(void)
1007 {
1008 	u64 mmfr2;
1009 
1010 	mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1011 	mmfr2 &= ~id_aa64mmfr2_override.mask;
1012 	mmfr2 |= id_aa64mmfr2_override.val;
1013 	return cpuid_feature_extract_unsigned_field(mmfr2,
1014 						    ID_AA64MMFR2_EL1_VARange_SHIFT);
1015 }
1016 
1017 static inline bool cpu_has_lpa2(void)
1018 {
1019 #ifdef CONFIG_ARM64_LPA2
1020 	u64 mmfr0;
1021 	int feat;
1022 
1023 	mmfr0 = read_sysreg(id_aa64mmfr0_el1);
1024 	mmfr0 &= ~id_aa64mmfr0_override.mask;
1025 	mmfr0 |= id_aa64mmfr0_override.val;
1026 	feat = cpuid_feature_extract_signed_field(mmfr0,
1027 						  ID_AA64MMFR0_EL1_TGRAN_SHIFT);
1028 
1029 	return feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2;
1030 #else
1031 	return false;
1032 #endif
1033 }
1034 
1035 #endif /* __ASSEMBLY__ */
1036 
1037 #endif
1038