xref: /linux/arch/arm64/kernel/cpu_errata.c (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7 
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/fpsimd.h>
15 #include <asm/kvm_asm.h>
16 #include <asm/smp_plat.h>
17 
18 static u64 target_impl_cpu_num;
19 static struct target_impl_cpu *target_impl_cpus;
20 
21 bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
22 {
23 	if (target_impl_cpu_num || !num || !impl_cpus)
24 		return false;
25 
26 	target_impl_cpu_num = num;
27 	target_impl_cpus = impl_cpus;
28 	return true;
29 }
30 
31 static inline bool is_midr_in_range(struct midr_range const *range)
32 {
33 	int i;
34 
35 	if (!target_impl_cpu_num)
36 		return midr_is_cpu_model_range(read_cpuid_id(), range->model,
37 					       range->rv_min, range->rv_max);
38 
39 	for (i = 0; i < target_impl_cpu_num; i++) {
40 		if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
41 					    range->model,
42 					    range->rv_min, range->rv_max))
43 			return true;
44 	}
45 	return false;
46 }
47 
48 bool is_midr_in_range_list(struct midr_range const *ranges)
49 {
50 	while (ranges->model)
51 		if (is_midr_in_range(ranges++))
52 			return true;
53 	return false;
54 }
55 EXPORT_SYMBOL_GPL(is_midr_in_range_list);
56 
57 static bool __maybe_unused
58 __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
59 			 u32 midr, u32 revidr)
60 {
61 	const struct arm64_midr_revidr *fix;
62 	if (!is_midr_in_range(&entry->midr_range))
63 		return false;
64 
65 	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
66 	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
67 		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
68 			return false;
69 	return true;
70 }
71 
72 static bool __maybe_unused
73 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
74 {
75 	int i;
76 
77 	if (!target_impl_cpu_num) {
78 		WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
79 		return __is_affected_midr_range(entry, read_cpuid_id(),
80 						read_cpuid(REVIDR_EL1));
81 	}
82 
83 	for (i = 0; i < target_impl_cpu_num; i++) {
84 		if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
85 					     target_impl_cpus[i].midr))
86 			return true;
87 	}
88 	return false;
89 }
90 
91 static bool __maybe_unused
92 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
93 			    int scope)
94 {
95 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
96 	return is_midr_in_range_list(entry->midr_range_list);
97 }
98 
99 static bool __maybe_unused
100 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
101 {
102 	u32 model;
103 
104 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
105 
106 	model = read_cpuid_id();
107 	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
108 		 MIDR_ARCHITECTURE_MASK;
109 
110 	return model == entry->midr_range.model;
111 }
112 
113 static bool
114 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
115 			  int scope)
116 {
117 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
118 	u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
119 	u64 ctr_raw, ctr_real;
120 
121 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
122 
123 	/*
124 	 * We want to make sure that all the CPUs in the system expose
125 	 * a consistent CTR_EL0 to make sure that applications behaves
126 	 * correctly with migration.
127 	 *
128 	 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
129 	 *
130 	 * 1) It is safe if the system doesn't support IDC, as CPU anyway
131 	 *    reports IDC = 0, consistent with the rest.
132 	 *
133 	 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
134 	 *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
135 	 *
136 	 * So, we need to make sure either the raw CTR_EL0 or the effective
137 	 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
138 	 */
139 	ctr_raw = read_cpuid_cachetype() & mask;
140 	ctr_real = read_cpuid_effective_cachetype() & mask;
141 
142 	return (ctr_real != sys) && (ctr_raw != sys);
143 }
144 
145 #ifdef CONFIG_ARM64_ERRATUM_4311569
146 static DEFINE_STATIC_KEY_FALSE(arm_si_l1_workaround_4311569);
147 static int __init early_arm_si_l1_workaround_4311569_cfg(char *arg)
148 {
149 	static_branch_enable(&arm_si_l1_workaround_4311569);
150 	pr_info("Enabling cache maintenance workaround for ARM SI-L1 erratum 4311569\n");
151 
152 	return 0;
153 }
154 early_param("arm_si_l1_workaround_4311569", early_arm_si_l1_workaround_4311569_cfg);
155 
156 /*
157  * We have some earlier use cases to call cache maintenance operation functions, for example,
158  * dcache_inval_poc() and dcache_clean_poc() in head.S, before making decision to turn on this
159  * workaround. Since the scope of this workaround is limited to non-coherent DMA agents, its
160  * safe to have the workaround off by default.
161  */
162 static bool
163 need_arm_si_l1_workaround_4311569(const struct arm64_cpu_capabilities *entry, int scope)
164 {
165 	return static_branch_unlikely(&arm_si_l1_workaround_4311569);
166 }
167 #endif
168 
169 static void
170 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
171 {
172 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
173 	bool enable_uct_trap = false;
174 
175 	/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
176 	if ((read_cpuid_cachetype() & mask) !=
177 	    (arm64_ftr_reg_ctrel0.sys_val & mask))
178 		enable_uct_trap = true;
179 
180 	/* ... or if the system is affected by an erratum */
181 	if (cap->capability == ARM64_WORKAROUND_1542419)
182 		enable_uct_trap = true;
183 
184 	if (enable_uct_trap)
185 		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
186 }
187 
188 #ifdef CONFIG_ARM64_ERRATUM_1463225
189 static bool
190 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
191 			       int scope)
192 {
193 	return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
194 }
195 #endif
196 
197 static void __maybe_unused
198 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
199 {
200 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
201 }
202 
203 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
204 	.matches = is_affected_midr_range,			\
205 	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
206 
207 #define CAP_MIDR_ALL_VERSIONS(model)					\
208 	.matches = is_affected_midr_range,				\
209 	.midr_range = MIDR_ALL_VERSIONS(model)
210 
211 #define MIDR_FIXED(rev, revidr_mask) \
212 	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
213 
214 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
215 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
216 	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
217 
218 #define CAP_MIDR_RANGE_LIST(list)				\
219 	.matches = is_affected_midr_range_list,			\
220 	.midr_range_list = list
221 
222 /* Errata affecting a range of revisions of  given model variant */
223 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
224 	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
225 
226 /* Errata affecting a single variant/revision of a model */
227 #define ERRATA_MIDR_REV(model, var, rev)	\
228 	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
229 
230 /* Errata affecting all variants/revisions of a given a model */
231 #define ERRATA_MIDR_ALL_VERSIONS(model)				\
232 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
233 	CAP_MIDR_ALL_VERSIONS(model)
234 
235 /* Errata affecting a list of midr ranges, with same work around */
236 #define ERRATA_MIDR_RANGE_LIST(midr_list)			\
237 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
238 	CAP_MIDR_RANGE_LIST(midr_list)
239 
240 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
241 	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
242 	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
243 	{},
244 };
245 
246 static bool __maybe_unused
247 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
248 			 int scope)
249 {
250 	int i;
251 
252 	if (!is_affected_midr_range_list(entry, scope) ||
253 	    !is_hyp_mode_available())
254 		return false;
255 
256 	for_each_possible_cpu(i) {
257 		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
258 			return true;
259 	}
260 
261 	return false;
262 }
263 
264 static bool __maybe_unused
265 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
266 				int scope)
267 {
268 	bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
269 	const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
270 
271 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
272 	return is_midr_in_range(&range) && has_dic;
273 }
274 
275 static const struct midr_range impdef_pmuv3_cpus[] = {
276 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
277 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
278 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
279 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
280 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
281 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
282 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
283 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
284 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
285 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
286 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
287 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
288 	{},
289 };
290 
291 static bool has_impdef_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
292 {
293 	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
294 	unsigned int pmuver;
295 
296 	if (!is_kernel_in_hyp_mode())
297 		return false;
298 
299 	pmuver = cpuid_feature_extract_unsigned_field(dfr0,
300 						      ID_AA64DFR0_EL1_PMUVer_SHIFT);
301 	if (pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
302 		return false;
303 
304 	return is_midr_in_range_list(impdef_pmuv3_cpus);
305 }
306 
307 static void cpu_enable_impdef_pmuv3_traps(const struct arm64_cpu_capabilities *__unused)
308 {
309 	sysreg_clear_set_s(SYS_HACR_EL2, 0, BIT(56));
310 }
311 
312 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
313 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
314 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
315 	{
316 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
317 	},
318 	{
319 		.midr_range.model = MIDR_QCOM_KRYO,
320 		.matches = is_kryo_midr,
321 	},
322 #endif
323 #ifdef CONFIG_ARM64_ERRATUM_1286807
324 	{
325 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
326 	},
327 	{
328 		/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
329 		ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
330 	},
331 #endif
332 #ifdef CONFIG_ARM64_ERRATUM_2441007
333 	{
334 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
335 	},
336 #endif
337 #ifdef CONFIG_ARM64_ERRATUM_2441009
338 	{
339 		/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
340 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
341 	},
342 #endif
343 	{},
344 };
345 #endif
346 
347 #ifdef CONFIG_CAVIUM_ERRATUM_23154
348 static const struct midr_range cavium_erratum_23154_cpus[] = {
349 	MIDR_ALL_VERSIONS(MIDR_THUNDERX),
350 	MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
351 	MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
352 	MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
353 	MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
354 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
355 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
356 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
357 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
358 	{},
359 };
360 #endif
361 
362 #ifdef CONFIG_CAVIUM_ERRATUM_27456
363 static const struct midr_range cavium_erratum_27456_cpus[] = {
364 	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
365 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
366 	/* Cavium ThunderX, T81 pass 1.0 */
367 	MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
368 	{},
369 };
370 #endif
371 
372 #ifdef CONFIG_CAVIUM_ERRATUM_30115
373 static const struct midr_range cavium_erratum_30115_cpus[] = {
374 	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
375 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
376 	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
377 	MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
378 	/* Cavium ThunderX, T83 pass 1.0 */
379 	MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
380 	{},
381 };
382 #endif
383 
384 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
385 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
386 	{
387 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
388 	},
389 	{
390 		.midr_range.model = MIDR_QCOM_KRYO,
391 		.matches = is_kryo_midr,
392 	},
393 	{},
394 };
395 #endif
396 
397 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
398 static const struct midr_range workaround_clean_cache[] = {
399 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
400 	defined(CONFIG_ARM64_ERRATUM_827319) || \
401 	defined(CONFIG_ARM64_ERRATUM_824069)
402 	/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
403 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
404 #endif
405 #ifdef	CONFIG_ARM64_ERRATUM_819472
406 	/* Cortex-A53 r0p[01] : ARM errata 819472 */
407 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
408 #endif
409 	{},
410 };
411 #endif
412 
413 #ifdef CONFIG_ARM64_ERRATUM_1418040
414 /*
415  * - 1188873 affects r0p0 to r2p0
416  * - 1418040 affects r0p0 to r3p1
417  */
418 static const struct midr_range erratum_1418040_list[] = {
419 	/* Cortex-A76 r0p0 to r3p1 */
420 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
421 	/* Neoverse-N1 r0p0 to r3p1 */
422 	MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
423 	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
424 	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
425 	{},
426 };
427 #endif
428 
429 #ifdef CONFIG_ARM64_ERRATUM_845719
430 static const struct midr_range erratum_845719_list[] = {
431 	/* Cortex-A53 r0p[01234] */
432 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
433 	/* Brahma-B53 r0p[0] */
434 	MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
435 	/* Kryo2XX Silver rAp4 */
436 	MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
437 	{},
438 };
439 #endif
440 
441 #ifdef CONFIG_ARM64_ERRATUM_843419
442 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
443 	{
444 		/* Cortex-A53 r0p[01234] */
445 		.matches = is_affected_midr_range,
446 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
447 		MIDR_FIXED(0x4, BIT(8)),
448 	},
449 	{
450 		/* Brahma-B53 r0p[0] */
451 		.matches = is_affected_midr_range,
452 		ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
453 	},
454 	{},
455 };
456 #endif
457 
458 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
459 static const struct midr_range erratum_speculative_at_list[] = {
460 #ifdef CONFIG_ARM64_ERRATUM_1165522
461 	/* Cortex A76 r0p0 to r2p0 */
462 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
463 #endif
464 #ifdef CONFIG_ARM64_ERRATUM_1319367
465 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
466 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
467 #endif
468 #ifdef CONFIG_ARM64_ERRATUM_1530923
469 	/* Cortex A55 r0p0 to r2p0 */
470 	MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
471 	/* Kryo4xx Silver (rdpe => r1p0) */
472 	MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
473 #endif
474 	{},
475 };
476 #endif
477 
478 #ifdef CONFIG_ARM64_ERRATUM_1463225
479 static const struct midr_range erratum_1463225[] = {
480 	/* Cortex-A76 r0p0 - r3p1 */
481 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
482 	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
483 	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
484 	{},
485 };
486 #endif
487 
488 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
489 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
490 #ifdef CONFIG_ARM64_ERRATUM_2139208
491 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
492 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
493 #endif
494 #ifdef CONFIG_ARM64_ERRATUM_2119858
495 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
496 	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
497 #endif
498 	{},
499 };
500 #endif	/* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
501 
502 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
503 static const struct midr_range tsb_flush_fail_cpus[] = {
504 #ifdef CONFIG_ARM64_ERRATUM_2067961
505 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
506 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
507 #endif
508 #ifdef CONFIG_ARM64_ERRATUM_2054223
509 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
510 #endif
511 	{},
512 };
513 #endif	/* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
514 
515 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
516 static struct midr_range trbe_write_out_of_range_cpus[] = {
517 #ifdef CONFIG_ARM64_ERRATUM_2253138
518 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
519 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
520 #endif
521 #ifdef CONFIG_ARM64_ERRATUM_2224489
522 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
523 	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
524 #endif
525 	{},
526 };
527 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
528 
529 #ifdef CONFIG_ARM64_ERRATUM_1742098
530 static struct midr_range broken_aarch32_aes[] = {
531 	MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
532 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
533 	{},
534 };
535 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
536 
537 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
538 static const struct midr_range erratum_spec_unpriv_load_list[] = {
539 #ifdef CONFIG_ARM64_ERRATUM_3117295
540 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
541 #endif
542 #ifdef CONFIG_ARM64_ERRATUM_2966298
543 	/* Cortex-A520 r0p0 to r0p1 */
544 	MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
545 #endif
546 	{},
547 };
548 #endif
549 
550 #ifdef CONFIG_ARM64_ERRATUM_3194386
551 static const struct midr_range erratum_spec_ssbs_list[] = {
552 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
553 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
554 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
555 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
556 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
557 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
558 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
559 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720AE),
560 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
561 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
562 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
563 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
564 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
565 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
566 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
567 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
568 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
569 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
570 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
571 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
572 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
573 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
574 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE),
575 	{}
576 };
577 #endif
578 
579 #ifdef CONFIG_ARM64_ERRATUM_4193714
580 static bool has_sme_dvmsync_erratum(const struct arm64_cpu_capabilities *entry,
581 				    int scope)
582 {
583 	if (!id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1)))
584 		return false;
585 
586 	return is_affected_midr_range(entry, scope);
587 }
588 
589 static void cpu_enable_sme_dvmsync(const struct arm64_cpu_capabilities *__unused)
590 {
591 	if (this_cpu_has_cap(ARM64_WORKAROUND_4193714))
592 		sme_enable_dvmsync();
593 }
594 #endif
595 
596 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
597 static const struct midr_range erratum_ac03_cpu_38_list[] = {
598 	MIDR_ALL_VERSIONS(MIDR_AMPERE1),
599 	MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
600 	{},
601 };
602 #endif
603 
604 #ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
605 static const struct midr_range erratum_ac04_cpu_23_list[] = {
606 	MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
607 	{},
608 };
609 #endif
610 
611 const struct arm64_cpu_capabilities arm64_errata[] = {
612 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
613 	{
614 		.desc = "ARM errata 826319, 827319, 824069, or 819472",
615 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
616 		ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
617 		.cpu_enable = cpu_enable_cache_maint_trap,
618 	},
619 #endif
620 #ifdef CONFIG_ARM64_ERRATUM_832075
621 	{
622 	/* Cortex-A57 r0p0 - r1p2 */
623 		.desc = "ARM erratum 832075",
624 		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
625 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
626 				  0, 0,
627 				  1, 2),
628 	},
629 #endif
630 #ifdef CONFIG_ARM64_ERRATUM_834220
631 	{
632 	/* Cortex-A57 r0p0 - r1p2 */
633 		.desc = "ARM erratum 834220",
634 		.capability = ARM64_WORKAROUND_834220,
635 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
636 				  0, 0,
637 				  1, 2),
638 	},
639 #endif
640 #ifdef CONFIG_ARM64_ERRATUM_843419
641 	{
642 		.desc = "ARM erratum 843419",
643 		.capability = ARM64_WORKAROUND_843419,
644 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
645 		.matches = cpucap_multi_entry_cap_matches,
646 		.match_list = erratum_843419_list,
647 	},
648 #endif
649 #ifdef CONFIG_ARM64_ERRATUM_845719
650 	{
651 		.desc = "ARM erratum 845719",
652 		.capability = ARM64_WORKAROUND_845719,
653 		ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
654 	},
655 #endif
656 #ifdef CONFIG_CAVIUM_ERRATUM_23154
657 	{
658 		.desc = "Cavium errata 23154 and 38545",
659 		.capability = ARM64_WORKAROUND_CAVIUM_23154,
660 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
661 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
662 	},
663 #endif
664 #ifdef CONFIG_CAVIUM_ERRATUM_27456
665 	{
666 		.desc = "Cavium erratum 27456",
667 		.capability = ARM64_WORKAROUND_CAVIUM_27456,
668 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
669 	},
670 #endif
671 #ifdef CONFIG_CAVIUM_ERRATUM_30115
672 	{
673 		.desc = "Cavium erratum 30115",
674 		.capability = ARM64_WORKAROUND_CAVIUM_30115,
675 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
676 	},
677 #endif
678 	{
679 		.desc = "Mismatched cache type (CTR_EL0)",
680 		.capability = ARM64_MISMATCHED_CACHE_TYPE,
681 		.matches = has_mismatched_cache_type,
682 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
683 		.cpu_enable = cpu_enable_trap_ctr_access,
684 	},
685 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
686 	{
687 		.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
688 		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
689 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
690 		.matches = cpucap_multi_entry_cap_matches,
691 		.match_list = qcom_erratum_1003_list,
692 	},
693 #endif
694 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
695 	{
696 		.desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
697 		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
698 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
699 		.matches = cpucap_multi_entry_cap_matches,
700 		.match_list = arm64_repeat_tlbi_list,
701 	},
702 #endif
703 #ifdef CONFIG_ARM64_ERRATUM_858921
704 	{
705 	/* Cortex-A73 all versions */
706 		.desc = "ARM erratum 858921",
707 		.capability = ARM64_WORKAROUND_858921,
708 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
709 	},
710 #endif
711 	{
712 		.desc = "Spectre-v2",
713 		.capability = ARM64_SPECTRE_V2,
714 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
715 		.matches = has_spectre_v2,
716 		.cpu_enable = spectre_v2_enable_mitigation,
717 	},
718 #ifdef CONFIG_RANDOMIZE_BASE
719 	{
720 	/* Must come after the Spectre-v2 entry */
721 		.desc = "Spectre-v3a",
722 		.capability = ARM64_SPECTRE_V3A,
723 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
724 		.matches = has_spectre_v3a,
725 		.cpu_enable = spectre_v3a_enable_mitigation,
726 	},
727 #endif
728 	{
729 		.desc = "Spectre-v4",
730 		.capability = ARM64_SPECTRE_V4,
731 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
732 		.matches = has_spectre_v4,
733 		.cpu_enable = spectre_v4_enable_mitigation,
734 	},
735 	{
736 		.desc = "Spectre-BHB",
737 		.capability = ARM64_SPECTRE_BHB,
738 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
739 		.matches = is_spectre_bhb_affected,
740 		.cpu_enable = spectre_bhb_enable_mitigation,
741 	},
742 #ifdef CONFIG_ARM64_ERRATUM_1418040
743 	{
744 		.desc = "ARM erratum 1418040",
745 		.capability = ARM64_WORKAROUND_1418040,
746 		ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
747 		/*
748 		 * We need to allow affected CPUs to come in late, but
749 		 * also need the non-affected CPUs to be able to come
750 		 * in at any point in time. Wonderful.
751 		 */
752 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
753 	},
754 #endif
755 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
756 	{
757 		.desc = "ARM errata 1165522, 1319367, or 1530923",
758 		.capability = ARM64_WORKAROUND_SPECULATIVE_AT,
759 		ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
760 	},
761 #endif
762 #ifdef CONFIG_ARM64_ERRATUM_1463225
763 	{
764 		.desc = "ARM erratum 1463225",
765 		.capability = ARM64_WORKAROUND_1463225,
766 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
767 		.matches = has_cortex_a76_erratum_1463225,
768 		.midr_range_list = erratum_1463225,
769 	},
770 #endif
771 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
772 	{
773 		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
774 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
775 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
776 		.matches = needs_tx2_tvm_workaround,
777 	},
778 	{
779 		.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
780 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
781 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
782 	},
783 #endif
784 #ifdef CONFIG_ARM64_ERRATUM_1542419
785 	{
786 		/* we depend on the firmware portion for correctness */
787 		.desc = "ARM erratum 1542419 (kernel portion)",
788 		.capability = ARM64_WORKAROUND_1542419,
789 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
790 		.matches = has_neoverse_n1_erratum_1542419,
791 		.cpu_enable = cpu_enable_trap_ctr_access,
792 	},
793 #endif
794 #ifdef CONFIG_ARM64_ERRATUM_1508412
795 	{
796 		/* we depend on the firmware portion for correctness */
797 		.desc = "ARM erratum 1508412 (kernel portion)",
798 		.capability = ARM64_WORKAROUND_1508412,
799 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
800 				  0, 0,
801 				  1, 0),
802 	},
803 #endif
804 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
805 	{
806 		/* NVIDIA Carmel */
807 		.desc = "NVIDIA Carmel CNP erratum",
808 		.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
809 		ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
810 	},
811 #endif
812 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
813 	{
814 		/*
815 		 * The erratum work around is handled within the TRBE
816 		 * driver and can be applied per-cpu. So, we can allow
817 		 * a late CPU to come online with this erratum.
818 		 */
819 		.desc = "ARM erratum 2119858 or 2139208",
820 		.capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
821 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
822 		CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
823 	},
824 #endif
825 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
826 	{
827 		.desc = "ARM erratum 2067961 or 2054223",
828 		.capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
829 		ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
830 	},
831 #endif
832 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
833 	{
834 		.desc = "ARM erratum 2253138 or 2224489",
835 		.capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
836 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
837 		CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
838 	},
839 #endif
840 #ifdef CONFIG_ARM64_ERRATUM_2645198
841 	{
842 		.desc = "ARM erratum 2645198",
843 		.capability = ARM64_WORKAROUND_2645198,
844 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
845 	},
846 #endif
847 #ifdef CONFIG_ARM64_ERRATUM_2077057
848 	{
849 		.desc = "ARM erratum 2077057",
850 		.capability = ARM64_WORKAROUND_2077057,
851 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
852 	},
853 #endif
854 #ifdef CONFIG_ARM64_ERRATUM_2064142
855 	{
856 		.desc = "ARM erratum 2064142",
857 		.capability = ARM64_WORKAROUND_2064142,
858 
859 		/* Cortex-A510 r0p0 - r0p2 */
860 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
861 	},
862 #endif
863 #ifdef CONFIG_ARM64_ERRATUM_2457168
864 	{
865 		.desc = "ARM erratum 2457168",
866 		.capability = ARM64_WORKAROUND_2457168,
867 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
868 
869 		/* Cortex-A510 r0p0-r1p1 */
870 		CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
871 	},
872 #endif
873 #ifdef CONFIG_ARM64_ERRATUM_2038923
874 	{
875 		.desc = "ARM erratum 2038923",
876 		.capability = ARM64_WORKAROUND_2038923,
877 
878 		/* Cortex-A510 r0p0 - r0p2 */
879 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
880 	},
881 #endif
882 #ifdef CONFIG_ARM64_ERRATUM_1902691
883 	{
884 		.desc = "ARM erratum 1902691",
885 		.capability = ARM64_WORKAROUND_1902691,
886 
887 		/* Cortex-A510 r0p0 - r0p1 */
888 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
889 	},
890 #endif
891 #ifdef CONFIG_ARM64_ERRATUM_1742098
892 	{
893 		.desc = "ARM erratum 1742098",
894 		.capability = ARM64_WORKAROUND_1742098,
895 		CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
896 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
897 	},
898 #endif
899 #ifdef CONFIG_ARM64_ERRATUM_2658417
900 	{
901 		.desc = "ARM erratum 2658417",
902 		.capability = ARM64_WORKAROUND_2658417,
903 		/* Cortex-A510 r0p0 - r1p1 */
904 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
905 		MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
906 	},
907 #endif
908 #ifdef CONFIG_ARM64_ERRATUM_3194386
909 	{
910 		.desc = "SSBS not fully self-synchronizing",
911 		.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
912 		ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
913 	},
914 #endif
915 #ifdef CONFIG_ARM64_ERRATUM_4311569
916 	{
917 		.capability = ARM64_WORKAROUND_4311569,
918 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
919 		.matches = need_arm_si_l1_workaround_4311569,
920 	},
921 #endif
922 #ifdef CONFIG_ARM64_ERRATUM_4193714
923 	{
924 		.desc = "C1-Pro SME DVMSync early acknowledgement",
925 		.capability = ARM64_WORKAROUND_4193714,
926 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
927 		.matches = has_sme_dvmsync_erratum,
928 		.cpu_enable = cpu_enable_sme_dvmsync,
929 		/* C1-Pro r0p0 - r1p2 (the latter only when REVIDR_EL1[0]==0) */
930 		.midr_range = MIDR_RANGE(MIDR_C1_PRO, 0, 0, 1, 2),
931 		MIDR_FIXED(MIDR_CPU_VAR_REV(1, 2), BIT(0)),
932 	},
933 #endif
934 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
935 	{
936 		.desc = "ARM errata 2966298, 3117295",
937 		.capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
938 		/* Cortex-A520 r0p0 - r0p1 */
939 		ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
940 	},
941 #endif
942 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
943 	{
944 		.desc = "AmpereOne erratum AC03_CPU_38",
945 		.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
946 		ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
947 	},
948 #endif
949 #ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
950 	{
951 		.desc = "AmpereOne erratum AC04_CPU_23",
952 		.capability = ARM64_WORKAROUND_AMPERE_AC04_CPU_23,
953 		ERRATA_MIDR_RANGE_LIST(erratum_ac04_cpu_23_list),
954 	},
955 #endif
956 	{
957 		.desc = "Broken CNTVOFF_EL2",
958 		.capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
959 		ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) {
960 					MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1),
961 					{}
962 				})),
963 	},
964 	{
965 		.desc = "Apple IMPDEF PMUv3 Traps",
966 		.capability = ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS,
967 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
968 		.matches = has_impdef_pmuv3,
969 		.cpu_enable = cpu_enable_impdef_pmuv3_traps,
970 	},
971 	{
972 	}
973 };
974