xref: /linux/arch/arm64/kernel/cpu_errata.c (revision 3fed7e0059f0af1d0e71e165145a1e3030526835)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7 
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
16 
17 static u64 target_impl_cpu_num;
18 static struct target_impl_cpu *target_impl_cpus;
19 
20 bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
21 {
22 	if (target_impl_cpu_num || !num || !impl_cpus)
23 		return false;
24 
25 	target_impl_cpu_num = num;
26 	target_impl_cpus = impl_cpus;
27 	return true;
28 }
29 
30 static inline bool is_midr_in_range(struct midr_range const *range)
31 {
32 	int i;
33 
34 	if (!target_impl_cpu_num)
35 		return midr_is_cpu_model_range(read_cpuid_id(), range->model,
36 					       range->rv_min, range->rv_max);
37 
38 	for (i = 0; i < target_impl_cpu_num; i++) {
39 		if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
40 					    range->model,
41 					    range->rv_min, range->rv_max))
42 			return true;
43 	}
44 	return false;
45 }
46 
47 bool is_midr_in_range_list(struct midr_range const *ranges)
48 {
49 	while (ranges->model)
50 		if (is_midr_in_range(ranges++))
51 			return true;
52 	return false;
53 }
54 EXPORT_SYMBOL_GPL(is_midr_in_range_list);
55 
56 static bool __maybe_unused
57 __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
58 			 u32 midr, u32 revidr)
59 {
60 	const struct arm64_midr_revidr *fix;
61 	if (!is_midr_in_range(&entry->midr_range))
62 		return false;
63 
64 	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
65 	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
66 		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
67 			return false;
68 	return true;
69 }
70 
71 static bool __maybe_unused
72 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
73 {
74 	int i;
75 
76 	if (!target_impl_cpu_num) {
77 		WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
78 		return __is_affected_midr_range(entry, read_cpuid_id(),
79 						read_cpuid(REVIDR_EL1));
80 	}
81 
82 	for (i = 0; i < target_impl_cpu_num; i++) {
83 		if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
84 					     target_impl_cpus[i].midr))
85 			return true;
86 	}
87 	return false;
88 }
89 
90 static bool __maybe_unused
91 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
92 			    int scope)
93 {
94 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
95 	return is_midr_in_range_list(entry->midr_range_list);
96 }
97 
98 static bool __maybe_unused
99 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
100 {
101 	u32 model;
102 
103 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
104 
105 	model = read_cpuid_id();
106 	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
107 		 MIDR_ARCHITECTURE_MASK;
108 
109 	return model == entry->midr_range.model;
110 }
111 
112 static bool
113 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
114 			  int scope)
115 {
116 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
117 	u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
118 	u64 ctr_raw, ctr_real;
119 
120 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
121 
122 	/*
123 	 * We want to make sure that all the CPUs in the system expose
124 	 * a consistent CTR_EL0 to make sure that applications behaves
125 	 * correctly with migration.
126 	 *
127 	 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
128 	 *
129 	 * 1) It is safe if the system doesn't support IDC, as CPU anyway
130 	 *    reports IDC = 0, consistent with the rest.
131 	 *
132 	 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
133 	 *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
134 	 *
135 	 * So, we need to make sure either the raw CTR_EL0 or the effective
136 	 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
137 	 */
138 	ctr_raw = read_cpuid_cachetype() & mask;
139 	ctr_real = read_cpuid_effective_cachetype() & mask;
140 
141 	return (ctr_real != sys) && (ctr_raw != sys);
142 }
143 
144 #ifdef CONFIG_ARM64_ERRATUM_4311569
145 static DEFINE_STATIC_KEY_FALSE(arm_si_l1_workaround_4311569);
146 static int __init early_arm_si_l1_workaround_4311569_cfg(char *arg)
147 {
148 	static_branch_enable(&arm_si_l1_workaround_4311569);
149 	pr_info("Enabling cache maintenance workaround for ARM SI-L1 erratum 4311569\n");
150 
151 	return 0;
152 }
153 early_param("arm_si_l1_workaround_4311569", early_arm_si_l1_workaround_4311569_cfg);
154 
155 /*
156  * We have some earlier use cases to call cache maintenance operation functions, for example,
157  * dcache_inval_poc() and dcache_clean_poc() in head.S, before making decision to turn on this
158  * workaround. Since the scope of this workaround is limited to non-coherent DMA agents, its
159  * safe to have the workaround off by default.
160  */
161 static bool
162 need_arm_si_l1_workaround_4311569(const struct arm64_cpu_capabilities *entry, int scope)
163 {
164 	return static_branch_unlikely(&arm_si_l1_workaround_4311569);
165 }
166 #endif
167 
168 static void
169 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
170 {
171 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
172 	bool enable_uct_trap = false;
173 
174 	/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
175 	if ((read_cpuid_cachetype() & mask) !=
176 	    (arm64_ftr_reg_ctrel0.sys_val & mask))
177 		enable_uct_trap = true;
178 
179 	/* ... or if the system is affected by an erratum */
180 	if (cap->capability == ARM64_WORKAROUND_1542419)
181 		enable_uct_trap = true;
182 
183 	if (enable_uct_trap)
184 		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
185 }
186 
187 #ifdef CONFIG_ARM64_ERRATUM_1463225
188 static bool
189 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
190 			       int scope)
191 {
192 	return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
193 }
194 #endif
195 
196 static void __maybe_unused
197 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
198 {
199 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
200 }
201 
202 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
203 	.matches = is_affected_midr_range,			\
204 	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
205 
206 #define CAP_MIDR_ALL_VERSIONS(model)					\
207 	.matches = is_affected_midr_range,				\
208 	.midr_range = MIDR_ALL_VERSIONS(model)
209 
210 #define MIDR_FIXED(rev, revidr_mask) \
211 	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
212 
213 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
214 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
215 	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
216 
217 #define CAP_MIDR_RANGE_LIST(list)				\
218 	.matches = is_affected_midr_range_list,			\
219 	.midr_range_list = list
220 
221 /* Errata affecting a range of revisions of  given model variant */
222 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
223 	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
224 
225 /* Errata affecting a single variant/revision of a model */
226 #define ERRATA_MIDR_REV(model, var, rev)	\
227 	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
228 
229 /* Errata affecting all variants/revisions of a given a model */
230 #define ERRATA_MIDR_ALL_VERSIONS(model)				\
231 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
232 	CAP_MIDR_ALL_VERSIONS(model)
233 
234 /* Errata affecting a list of midr ranges, with same work around */
235 #define ERRATA_MIDR_RANGE_LIST(midr_list)			\
236 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
237 	CAP_MIDR_RANGE_LIST(midr_list)
238 
239 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
240 	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
241 	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
242 	{},
243 };
244 
245 static bool __maybe_unused
246 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
247 			 int scope)
248 {
249 	int i;
250 
251 	if (!is_affected_midr_range_list(entry, scope) ||
252 	    !is_hyp_mode_available())
253 		return false;
254 
255 	for_each_possible_cpu(i) {
256 		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
257 			return true;
258 	}
259 
260 	return false;
261 }
262 
263 static bool __maybe_unused
264 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
265 				int scope)
266 {
267 	bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
268 	const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
269 
270 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
271 	return is_midr_in_range(&range) && has_dic;
272 }
273 
274 static const struct midr_range impdef_pmuv3_cpus[] = {
275 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
276 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
277 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
278 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
279 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
280 	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
281 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
282 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
283 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
284 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
285 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
286 	MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
287 	{},
288 };
289 
290 static bool has_impdef_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
291 {
292 	u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
293 	unsigned int pmuver;
294 
295 	if (!is_kernel_in_hyp_mode())
296 		return false;
297 
298 	pmuver = cpuid_feature_extract_unsigned_field(dfr0,
299 						      ID_AA64DFR0_EL1_PMUVer_SHIFT);
300 	if (pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
301 		return false;
302 
303 	return is_midr_in_range_list(impdef_pmuv3_cpus);
304 }
305 
306 static void cpu_enable_impdef_pmuv3_traps(const struct arm64_cpu_capabilities *__unused)
307 {
308 	sysreg_clear_set_s(SYS_HACR_EL2, 0, BIT(56));
309 }
310 
311 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
312 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
313 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
314 	{
315 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
316 	},
317 	{
318 		.midr_range.model = MIDR_QCOM_KRYO,
319 		.matches = is_kryo_midr,
320 	},
321 #endif
322 #ifdef CONFIG_ARM64_ERRATUM_1286807
323 	{
324 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
325 	},
326 	{
327 		/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
328 		ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
329 	},
330 #endif
331 #ifdef CONFIG_ARM64_ERRATUM_2441007
332 	{
333 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
334 	},
335 #endif
336 #ifdef CONFIG_ARM64_ERRATUM_2441009
337 	{
338 		/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
339 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
340 	},
341 #endif
342 	{},
343 };
344 #endif
345 
346 #ifdef CONFIG_CAVIUM_ERRATUM_23154
347 static const struct midr_range cavium_erratum_23154_cpus[] = {
348 	MIDR_ALL_VERSIONS(MIDR_THUNDERX),
349 	MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
350 	MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
351 	MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
352 	MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
353 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
354 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
355 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
356 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
357 	{},
358 };
359 #endif
360 
361 #ifdef CONFIG_CAVIUM_ERRATUM_27456
362 static const struct midr_range cavium_erratum_27456_cpus[] = {
363 	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
364 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
365 	/* Cavium ThunderX, T81 pass 1.0 */
366 	MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
367 	{},
368 };
369 #endif
370 
371 #ifdef CONFIG_CAVIUM_ERRATUM_30115
372 static const struct midr_range cavium_erratum_30115_cpus[] = {
373 	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
374 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
375 	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
376 	MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
377 	/* Cavium ThunderX, T83 pass 1.0 */
378 	MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
379 	{},
380 };
381 #endif
382 
383 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
384 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
385 	{
386 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
387 	},
388 	{
389 		.midr_range.model = MIDR_QCOM_KRYO,
390 		.matches = is_kryo_midr,
391 	},
392 	{},
393 };
394 #endif
395 
396 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
397 static const struct midr_range workaround_clean_cache[] = {
398 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
399 	defined(CONFIG_ARM64_ERRATUM_827319) || \
400 	defined(CONFIG_ARM64_ERRATUM_824069)
401 	/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
402 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
403 #endif
404 #ifdef	CONFIG_ARM64_ERRATUM_819472
405 	/* Cortex-A53 r0p[01] : ARM errata 819472 */
406 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
407 #endif
408 	{},
409 };
410 #endif
411 
412 #ifdef CONFIG_ARM64_ERRATUM_1418040
413 /*
414  * - 1188873 affects r0p0 to r2p0
415  * - 1418040 affects r0p0 to r3p1
416  */
417 static const struct midr_range erratum_1418040_list[] = {
418 	/* Cortex-A76 r0p0 to r3p1 */
419 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
420 	/* Neoverse-N1 r0p0 to r3p1 */
421 	MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
422 	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
423 	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
424 	{},
425 };
426 #endif
427 
428 #ifdef CONFIG_ARM64_ERRATUM_845719
429 static const struct midr_range erratum_845719_list[] = {
430 	/* Cortex-A53 r0p[01234] */
431 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
432 	/* Brahma-B53 r0p[0] */
433 	MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
434 	/* Kryo2XX Silver rAp4 */
435 	MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
436 	{},
437 };
438 #endif
439 
440 #ifdef CONFIG_ARM64_ERRATUM_843419
441 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
442 	{
443 		/* Cortex-A53 r0p[01234] */
444 		.matches = is_affected_midr_range,
445 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
446 		MIDR_FIXED(0x4, BIT(8)),
447 	},
448 	{
449 		/* Brahma-B53 r0p[0] */
450 		.matches = is_affected_midr_range,
451 		ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
452 	},
453 	{},
454 };
455 #endif
456 
457 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
458 static const struct midr_range erratum_speculative_at_list[] = {
459 #ifdef CONFIG_ARM64_ERRATUM_1165522
460 	/* Cortex A76 r0p0 to r2p0 */
461 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
462 #endif
463 #ifdef CONFIG_ARM64_ERRATUM_1319367
464 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
465 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
466 #endif
467 #ifdef CONFIG_ARM64_ERRATUM_1530923
468 	/* Cortex A55 r0p0 to r2p0 */
469 	MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
470 	/* Kryo4xx Silver (rdpe => r1p0) */
471 	MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
472 #endif
473 	{},
474 };
475 #endif
476 
477 #ifdef CONFIG_ARM64_ERRATUM_1463225
478 static const struct midr_range erratum_1463225[] = {
479 	/* Cortex-A76 r0p0 - r3p1 */
480 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
481 	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
482 	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
483 	{},
484 };
485 #endif
486 
487 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
488 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
489 #ifdef CONFIG_ARM64_ERRATUM_2139208
490 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
491 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
492 #endif
493 #ifdef CONFIG_ARM64_ERRATUM_2119858
494 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
495 	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
496 #endif
497 	{},
498 };
499 #endif	/* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
500 
501 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
502 static const struct midr_range tsb_flush_fail_cpus[] = {
503 #ifdef CONFIG_ARM64_ERRATUM_2067961
504 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
505 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
506 #endif
507 #ifdef CONFIG_ARM64_ERRATUM_2054223
508 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
509 #endif
510 	{},
511 };
512 #endif	/* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
513 
514 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
515 static struct midr_range trbe_write_out_of_range_cpus[] = {
516 #ifdef CONFIG_ARM64_ERRATUM_2253138
517 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
518 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
519 #endif
520 #ifdef CONFIG_ARM64_ERRATUM_2224489
521 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
522 	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
523 #endif
524 	{},
525 };
526 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
527 
528 #ifdef CONFIG_ARM64_ERRATUM_1742098
529 static struct midr_range broken_aarch32_aes[] = {
530 	MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
531 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
532 	{},
533 };
534 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
535 
536 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
537 static const struct midr_range erratum_spec_unpriv_load_list[] = {
538 #ifdef CONFIG_ARM64_ERRATUM_3117295
539 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
540 #endif
541 #ifdef CONFIG_ARM64_ERRATUM_2966298
542 	/* Cortex-A520 r0p0 to r0p1 */
543 	MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
544 #endif
545 	{},
546 };
547 #endif
548 
549 #ifdef CONFIG_ARM64_ERRATUM_3194386
550 static const struct midr_range erratum_spec_ssbs_list[] = {
551 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
552 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
553 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
554 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
555 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
556 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
557 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
558 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720AE),
559 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
560 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
561 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
562 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
563 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
564 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
565 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
566 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
567 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
568 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
569 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
570 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
571 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
572 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
573 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE),
574 	{}
575 };
576 #endif
577 
578 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
579 static const struct midr_range erratum_ac03_cpu_38_list[] = {
580 	MIDR_ALL_VERSIONS(MIDR_AMPERE1),
581 	MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
582 	{},
583 };
584 #endif
585 
586 #ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
587 static const struct midr_range erratum_ac04_cpu_23_list[] = {
588 	MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
589 	{},
590 };
591 #endif
592 
593 const struct arm64_cpu_capabilities arm64_errata[] = {
594 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
595 	{
596 		.desc = "ARM errata 826319, 827319, 824069, or 819472",
597 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
598 		ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
599 		.cpu_enable = cpu_enable_cache_maint_trap,
600 	},
601 #endif
602 #ifdef CONFIG_ARM64_ERRATUM_832075
603 	{
604 	/* Cortex-A57 r0p0 - r1p2 */
605 		.desc = "ARM erratum 832075",
606 		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
607 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
608 				  0, 0,
609 				  1, 2),
610 	},
611 #endif
612 #ifdef CONFIG_ARM64_ERRATUM_834220
613 	{
614 	/* Cortex-A57 r0p0 - r1p2 */
615 		.desc = "ARM erratum 834220",
616 		.capability = ARM64_WORKAROUND_834220,
617 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
618 				  0, 0,
619 				  1, 2),
620 	},
621 #endif
622 #ifdef CONFIG_ARM64_ERRATUM_843419
623 	{
624 		.desc = "ARM erratum 843419",
625 		.capability = ARM64_WORKAROUND_843419,
626 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
627 		.matches = cpucap_multi_entry_cap_matches,
628 		.match_list = erratum_843419_list,
629 	},
630 #endif
631 #ifdef CONFIG_ARM64_ERRATUM_845719
632 	{
633 		.desc = "ARM erratum 845719",
634 		.capability = ARM64_WORKAROUND_845719,
635 		ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
636 	},
637 #endif
638 #ifdef CONFIG_CAVIUM_ERRATUM_23154
639 	{
640 		.desc = "Cavium errata 23154 and 38545",
641 		.capability = ARM64_WORKAROUND_CAVIUM_23154,
642 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
643 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
644 	},
645 #endif
646 #ifdef CONFIG_CAVIUM_ERRATUM_27456
647 	{
648 		.desc = "Cavium erratum 27456",
649 		.capability = ARM64_WORKAROUND_CAVIUM_27456,
650 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
651 	},
652 #endif
653 #ifdef CONFIG_CAVIUM_ERRATUM_30115
654 	{
655 		.desc = "Cavium erratum 30115",
656 		.capability = ARM64_WORKAROUND_CAVIUM_30115,
657 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
658 	},
659 #endif
660 	{
661 		.desc = "Mismatched cache type (CTR_EL0)",
662 		.capability = ARM64_MISMATCHED_CACHE_TYPE,
663 		.matches = has_mismatched_cache_type,
664 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
665 		.cpu_enable = cpu_enable_trap_ctr_access,
666 	},
667 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
668 	{
669 		.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
670 		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
671 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
672 		.matches = cpucap_multi_entry_cap_matches,
673 		.match_list = qcom_erratum_1003_list,
674 	},
675 #endif
676 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
677 	{
678 		.desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
679 		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
680 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
681 		.matches = cpucap_multi_entry_cap_matches,
682 		.match_list = arm64_repeat_tlbi_list,
683 	},
684 #endif
685 #ifdef CONFIG_ARM64_ERRATUM_858921
686 	{
687 	/* Cortex-A73 all versions */
688 		.desc = "ARM erratum 858921",
689 		.capability = ARM64_WORKAROUND_858921,
690 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
691 	},
692 #endif
693 	{
694 		.desc = "Spectre-v2",
695 		.capability = ARM64_SPECTRE_V2,
696 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
697 		.matches = has_spectre_v2,
698 		.cpu_enable = spectre_v2_enable_mitigation,
699 	},
700 #ifdef CONFIG_RANDOMIZE_BASE
701 	{
702 	/* Must come after the Spectre-v2 entry */
703 		.desc = "Spectre-v3a",
704 		.capability = ARM64_SPECTRE_V3A,
705 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
706 		.matches = has_spectre_v3a,
707 		.cpu_enable = spectre_v3a_enable_mitigation,
708 	},
709 #endif
710 	{
711 		.desc = "Spectre-v4",
712 		.capability = ARM64_SPECTRE_V4,
713 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
714 		.matches = has_spectre_v4,
715 		.cpu_enable = spectre_v4_enable_mitigation,
716 	},
717 	{
718 		.desc = "Spectre-BHB",
719 		.capability = ARM64_SPECTRE_BHB,
720 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
721 		.matches = is_spectre_bhb_affected,
722 		.cpu_enable = spectre_bhb_enable_mitigation,
723 	},
724 #ifdef CONFIG_ARM64_ERRATUM_1418040
725 	{
726 		.desc = "ARM erratum 1418040",
727 		.capability = ARM64_WORKAROUND_1418040,
728 		ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
729 		/*
730 		 * We need to allow affected CPUs to come in late, but
731 		 * also need the non-affected CPUs to be able to come
732 		 * in at any point in time. Wonderful.
733 		 */
734 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
735 	},
736 #endif
737 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
738 	{
739 		.desc = "ARM errata 1165522, 1319367, or 1530923",
740 		.capability = ARM64_WORKAROUND_SPECULATIVE_AT,
741 		ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
742 	},
743 #endif
744 #ifdef CONFIG_ARM64_ERRATUM_1463225
745 	{
746 		.desc = "ARM erratum 1463225",
747 		.capability = ARM64_WORKAROUND_1463225,
748 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
749 		.matches = has_cortex_a76_erratum_1463225,
750 		.midr_range_list = erratum_1463225,
751 	},
752 #endif
753 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
754 	{
755 		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
756 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
757 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
758 		.matches = needs_tx2_tvm_workaround,
759 	},
760 	{
761 		.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
762 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
763 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
764 	},
765 #endif
766 #ifdef CONFIG_ARM64_ERRATUM_1542419
767 	{
768 		/* we depend on the firmware portion for correctness */
769 		.desc = "ARM erratum 1542419 (kernel portion)",
770 		.capability = ARM64_WORKAROUND_1542419,
771 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
772 		.matches = has_neoverse_n1_erratum_1542419,
773 		.cpu_enable = cpu_enable_trap_ctr_access,
774 	},
775 #endif
776 #ifdef CONFIG_ARM64_ERRATUM_1508412
777 	{
778 		/* we depend on the firmware portion for correctness */
779 		.desc = "ARM erratum 1508412 (kernel portion)",
780 		.capability = ARM64_WORKAROUND_1508412,
781 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
782 				  0, 0,
783 				  1, 0),
784 	},
785 #endif
786 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
787 	{
788 		/* NVIDIA Carmel */
789 		.desc = "NVIDIA Carmel CNP erratum",
790 		.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
791 		ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
792 	},
793 #endif
794 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
795 	{
796 		/*
797 		 * The erratum work around is handled within the TRBE
798 		 * driver and can be applied per-cpu. So, we can allow
799 		 * a late CPU to come online with this erratum.
800 		 */
801 		.desc = "ARM erratum 2119858 or 2139208",
802 		.capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
803 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
804 		CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
805 	},
806 #endif
807 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
808 	{
809 		.desc = "ARM erratum 2067961 or 2054223",
810 		.capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
811 		ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
812 	},
813 #endif
814 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
815 	{
816 		.desc = "ARM erratum 2253138 or 2224489",
817 		.capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
818 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
819 		CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
820 	},
821 #endif
822 #ifdef CONFIG_ARM64_ERRATUM_2645198
823 	{
824 		.desc = "ARM erratum 2645198",
825 		.capability = ARM64_WORKAROUND_2645198,
826 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
827 	},
828 #endif
829 #ifdef CONFIG_ARM64_ERRATUM_2077057
830 	{
831 		.desc = "ARM erratum 2077057",
832 		.capability = ARM64_WORKAROUND_2077057,
833 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
834 	},
835 #endif
836 #ifdef CONFIG_ARM64_ERRATUM_2064142
837 	{
838 		.desc = "ARM erratum 2064142",
839 		.capability = ARM64_WORKAROUND_2064142,
840 
841 		/* Cortex-A510 r0p0 - r0p2 */
842 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
843 	},
844 #endif
845 #ifdef CONFIG_ARM64_ERRATUM_2457168
846 	{
847 		.desc = "ARM erratum 2457168",
848 		.capability = ARM64_WORKAROUND_2457168,
849 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
850 
851 		/* Cortex-A510 r0p0-r1p1 */
852 		CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
853 	},
854 #endif
855 #ifdef CONFIG_ARM64_ERRATUM_2038923
856 	{
857 		.desc = "ARM erratum 2038923",
858 		.capability = ARM64_WORKAROUND_2038923,
859 
860 		/* Cortex-A510 r0p0 - r0p2 */
861 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
862 	},
863 #endif
864 #ifdef CONFIG_ARM64_ERRATUM_1902691
865 	{
866 		.desc = "ARM erratum 1902691",
867 		.capability = ARM64_WORKAROUND_1902691,
868 
869 		/* Cortex-A510 r0p0 - r0p1 */
870 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
871 	},
872 #endif
873 #ifdef CONFIG_ARM64_ERRATUM_1742098
874 	{
875 		.desc = "ARM erratum 1742098",
876 		.capability = ARM64_WORKAROUND_1742098,
877 		CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
878 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
879 	},
880 #endif
881 #ifdef CONFIG_ARM64_ERRATUM_2658417
882 	{
883 		.desc = "ARM erratum 2658417",
884 		.capability = ARM64_WORKAROUND_2658417,
885 		/* Cortex-A510 r0p0 - r1p1 */
886 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
887 		MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
888 	},
889 #endif
890 #ifdef CONFIG_ARM64_ERRATUM_3194386
891 	{
892 		.desc = "SSBS not fully self-synchronizing",
893 		.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
894 		ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
895 	},
896 #endif
897 #ifdef CONFIG_ARM64_ERRATUM_4311569
898 	{
899 		.capability = ARM64_WORKAROUND_4311569,
900 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
901 		.matches = need_arm_si_l1_workaround_4311569,
902 	},
903 #endif
904 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
905 	{
906 		.desc = "ARM errata 2966298, 3117295",
907 		.capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
908 		/* Cortex-A520 r0p0 - r0p1 */
909 		ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
910 	},
911 #endif
912 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
913 	{
914 		.desc = "AmpereOne erratum AC03_CPU_38",
915 		.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
916 		ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
917 	},
918 #endif
919 #ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
920 	{
921 		.desc = "AmpereOne erratum AC04_CPU_23",
922 		.capability = ARM64_WORKAROUND_AMPERE_AC04_CPU_23,
923 		ERRATA_MIDR_RANGE_LIST(erratum_ac04_cpu_23_list),
924 	},
925 #endif
926 	{
927 		.desc = "Broken CNTVOFF_EL2",
928 		.capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
929 		ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) {
930 					MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1),
931 					{}
932 				})),
933 	},
934 	{
935 		.desc = "Apple IMPDEF PMUv3 Traps",
936 		.capability = ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS,
937 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
938 		.matches = has_impdef_pmuv3,
939 		.cpu_enable = cpu_enable_impdef_pmuv3_traps,
940 	},
941 	{
942 	}
943 };
944