xref: /linux/arch/arm64/kernel/cpu_errata.c (revision 86edf6bdcf0571c07103b8751e9d792a4b808e97)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7 
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
16 
17 static u64 target_impl_cpu_num;
18 static struct target_impl_cpu *target_impl_cpus;
19 
20 bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
21 {
22 	if (target_impl_cpu_num || !num || !impl_cpus)
23 		return false;
24 
25 	target_impl_cpu_num = num;
26 	target_impl_cpus = impl_cpus;
27 	return true;
28 }
29 
30 static inline bool is_midr_in_range(struct midr_range const *range)
31 {
32 	int i;
33 
34 	if (!target_impl_cpu_num)
35 		return midr_is_cpu_model_range(read_cpuid_id(), range->model,
36 					       range->rv_min, range->rv_max);
37 
38 	for (i = 0; i < target_impl_cpu_num; i++) {
39 		if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
40 					    range->model,
41 					    range->rv_min, range->rv_max))
42 			return true;
43 	}
44 	return false;
45 }
46 
47 bool is_midr_in_range_list(struct midr_range const *ranges)
48 {
49 	while (ranges->model)
50 		if (is_midr_in_range(ranges++))
51 			return true;
52 	return false;
53 }
54 EXPORT_SYMBOL_GPL(is_midr_in_range_list);
55 
56 static bool __maybe_unused
57 __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
58 			 u32 midr, u32 revidr)
59 {
60 	const struct arm64_midr_revidr *fix;
61 	if (!is_midr_in_range(&entry->midr_range))
62 		return false;
63 
64 	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
65 	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
66 		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
67 			return false;
68 	return true;
69 }
70 
71 static bool __maybe_unused
72 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
73 {
74 	int i;
75 
76 	if (!target_impl_cpu_num) {
77 		WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
78 		return __is_affected_midr_range(entry, read_cpuid_id(),
79 						read_cpuid(REVIDR_EL1));
80 	}
81 
82 	for (i = 0; i < target_impl_cpu_num; i++) {
83 		if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
84 					     target_impl_cpus[i].midr))
85 			return true;
86 	}
87 	return false;
88 }
89 
90 static bool __maybe_unused
91 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
92 			    int scope)
93 {
94 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
95 	return is_midr_in_range_list(entry->midr_range_list);
96 }
97 
98 static bool __maybe_unused
99 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
100 {
101 	u32 model;
102 
103 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
104 
105 	model = read_cpuid_id();
106 	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
107 		 MIDR_ARCHITECTURE_MASK;
108 
109 	return model == entry->midr_range.model;
110 }
111 
112 static bool
113 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
114 			  int scope)
115 {
116 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
117 	u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
118 	u64 ctr_raw, ctr_real;
119 
120 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
121 
122 	/*
123 	 * We want to make sure that all the CPUs in the system expose
124 	 * a consistent CTR_EL0 to make sure that applications behaves
125 	 * correctly with migration.
126 	 *
127 	 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
128 	 *
129 	 * 1) It is safe if the system doesn't support IDC, as CPU anyway
130 	 *    reports IDC = 0, consistent with the rest.
131 	 *
132 	 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
133 	 *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
134 	 *
135 	 * So, we need to make sure either the raw CTR_EL0 or the effective
136 	 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
137 	 */
138 	ctr_raw = read_cpuid_cachetype() & mask;
139 	ctr_real = read_cpuid_effective_cachetype() & mask;
140 
141 	return (ctr_real != sys) && (ctr_raw != sys);
142 }
143 
144 static void
145 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
146 {
147 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
148 	bool enable_uct_trap = false;
149 
150 	/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
151 	if ((read_cpuid_cachetype() & mask) !=
152 	    (arm64_ftr_reg_ctrel0.sys_val & mask))
153 		enable_uct_trap = true;
154 
155 	/* ... or if the system is affected by an erratum */
156 	if (cap->capability == ARM64_WORKAROUND_1542419)
157 		enable_uct_trap = true;
158 
159 	if (enable_uct_trap)
160 		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
161 }
162 
163 #ifdef CONFIG_ARM64_ERRATUM_1463225
164 static bool
165 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
166 			       int scope)
167 {
168 	return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
169 }
170 #endif
171 
172 static void __maybe_unused
173 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
174 {
175 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
176 }
177 
178 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
179 	.matches = is_affected_midr_range,			\
180 	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
181 
182 #define CAP_MIDR_ALL_VERSIONS(model)					\
183 	.matches = is_affected_midr_range,				\
184 	.midr_range = MIDR_ALL_VERSIONS(model)
185 
186 #define MIDR_FIXED(rev, revidr_mask) \
187 	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
188 
189 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
190 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
191 	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
192 
193 #define CAP_MIDR_RANGE_LIST(list)				\
194 	.matches = is_affected_midr_range_list,			\
195 	.midr_range_list = list
196 
197 /* Errata affecting a range of revisions of  given model variant */
198 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
199 	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
200 
201 /* Errata affecting a single variant/revision of a model */
202 #define ERRATA_MIDR_REV(model, var, rev)	\
203 	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
204 
205 /* Errata affecting all variants/revisions of a given a model */
206 #define ERRATA_MIDR_ALL_VERSIONS(model)				\
207 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
208 	CAP_MIDR_ALL_VERSIONS(model)
209 
210 /* Errata affecting a list of midr ranges, with same work around */
211 #define ERRATA_MIDR_RANGE_LIST(midr_list)			\
212 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
213 	CAP_MIDR_RANGE_LIST(midr_list)
214 
215 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
216 	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
217 	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
218 	{},
219 };
220 
221 static bool __maybe_unused
222 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
223 			 int scope)
224 {
225 	int i;
226 
227 	if (!is_affected_midr_range_list(entry, scope) ||
228 	    !is_hyp_mode_available())
229 		return false;
230 
231 	for_each_possible_cpu(i) {
232 		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
233 			return true;
234 	}
235 
236 	return false;
237 }
238 
239 static bool __maybe_unused
240 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
241 				int scope)
242 {
243 	bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
244 	const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
245 
246 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
247 	return is_midr_in_range(&range) && has_dic;
248 }
249 
250 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
251 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
252 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
253 	{
254 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
255 	},
256 	{
257 		.midr_range.model = MIDR_QCOM_KRYO,
258 		.matches = is_kryo_midr,
259 	},
260 #endif
261 #ifdef CONFIG_ARM64_ERRATUM_1286807
262 	{
263 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
264 	},
265 	{
266 		/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
267 		ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
268 	},
269 #endif
270 #ifdef CONFIG_ARM64_ERRATUM_2441007
271 	{
272 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
273 	},
274 #endif
275 #ifdef CONFIG_ARM64_ERRATUM_2441009
276 	{
277 		/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
278 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
279 	},
280 #endif
281 	{},
282 };
283 #endif
284 
285 #ifdef CONFIG_CAVIUM_ERRATUM_23154
286 static const struct midr_range cavium_erratum_23154_cpus[] = {
287 	MIDR_ALL_VERSIONS(MIDR_THUNDERX),
288 	MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
289 	MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
290 	MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
291 	MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
292 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
293 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
294 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
295 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
296 	{},
297 };
298 #endif
299 
300 #ifdef CONFIG_CAVIUM_ERRATUM_27456
301 const struct midr_range cavium_erratum_27456_cpus[] = {
302 	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
303 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
304 	/* Cavium ThunderX, T81 pass 1.0 */
305 	MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
306 	{},
307 };
308 #endif
309 
310 #ifdef CONFIG_CAVIUM_ERRATUM_30115
311 static const struct midr_range cavium_erratum_30115_cpus[] = {
312 	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
313 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
314 	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
315 	MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
316 	/* Cavium ThunderX, T83 pass 1.0 */
317 	MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
318 	{},
319 };
320 #endif
321 
322 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
323 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
324 	{
325 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
326 	},
327 	{
328 		.midr_range.model = MIDR_QCOM_KRYO,
329 		.matches = is_kryo_midr,
330 	},
331 	{},
332 };
333 #endif
334 
335 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
336 static const struct midr_range workaround_clean_cache[] = {
337 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
338 	defined(CONFIG_ARM64_ERRATUM_827319) || \
339 	defined(CONFIG_ARM64_ERRATUM_824069)
340 	/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
341 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
342 #endif
343 #ifdef	CONFIG_ARM64_ERRATUM_819472
344 	/* Cortex-A53 r0p[01] : ARM errata 819472 */
345 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
346 #endif
347 	{},
348 };
349 #endif
350 
351 #ifdef CONFIG_ARM64_ERRATUM_1418040
352 /*
353  * - 1188873 affects r0p0 to r2p0
354  * - 1418040 affects r0p0 to r3p1
355  */
356 static const struct midr_range erratum_1418040_list[] = {
357 	/* Cortex-A76 r0p0 to r3p1 */
358 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
359 	/* Neoverse-N1 r0p0 to r3p1 */
360 	MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
361 	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
362 	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
363 	{},
364 };
365 #endif
366 
367 #ifdef CONFIG_ARM64_ERRATUM_845719
368 static const struct midr_range erratum_845719_list[] = {
369 	/* Cortex-A53 r0p[01234] */
370 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
371 	/* Brahma-B53 r0p[0] */
372 	MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
373 	/* Kryo2XX Silver rAp4 */
374 	MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
375 	{},
376 };
377 #endif
378 
379 #ifdef CONFIG_ARM64_ERRATUM_843419
380 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
381 	{
382 		/* Cortex-A53 r0p[01234] */
383 		.matches = is_affected_midr_range,
384 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
385 		MIDR_FIXED(0x4, BIT(8)),
386 	},
387 	{
388 		/* Brahma-B53 r0p[0] */
389 		.matches = is_affected_midr_range,
390 		ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
391 	},
392 	{},
393 };
394 #endif
395 
396 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
397 static const struct midr_range erratum_speculative_at_list[] = {
398 #ifdef CONFIG_ARM64_ERRATUM_1165522
399 	/* Cortex A76 r0p0 to r2p0 */
400 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
401 #endif
402 #ifdef CONFIG_ARM64_ERRATUM_1319367
403 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
404 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
405 #endif
406 #ifdef CONFIG_ARM64_ERRATUM_1530923
407 	/* Cortex A55 r0p0 to r2p0 */
408 	MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
409 	/* Kryo4xx Silver (rdpe => r1p0) */
410 	MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
411 #endif
412 	{},
413 };
414 #endif
415 
416 #ifdef CONFIG_ARM64_ERRATUM_1463225
417 static const struct midr_range erratum_1463225[] = {
418 	/* Cortex-A76 r0p0 - r3p1 */
419 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
420 	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
421 	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
422 	{},
423 };
424 #endif
425 
426 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
427 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
428 #ifdef CONFIG_ARM64_ERRATUM_2139208
429 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
430 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
431 #endif
432 #ifdef CONFIG_ARM64_ERRATUM_2119858
433 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
434 	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
435 #endif
436 	{},
437 };
438 #endif	/* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
439 
440 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
441 static const struct midr_range tsb_flush_fail_cpus[] = {
442 #ifdef CONFIG_ARM64_ERRATUM_2067961
443 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
444 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
445 #endif
446 #ifdef CONFIG_ARM64_ERRATUM_2054223
447 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
448 #endif
449 	{},
450 };
451 #endif	/* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
452 
453 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
454 static struct midr_range trbe_write_out_of_range_cpus[] = {
455 #ifdef CONFIG_ARM64_ERRATUM_2253138
456 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
457 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
458 #endif
459 #ifdef CONFIG_ARM64_ERRATUM_2224489
460 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
461 	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
462 #endif
463 	{},
464 };
465 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
466 
467 #ifdef CONFIG_ARM64_ERRATUM_1742098
468 static struct midr_range broken_aarch32_aes[] = {
469 	MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
470 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
471 	{},
472 };
473 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
474 
475 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
476 static const struct midr_range erratum_spec_unpriv_load_list[] = {
477 #ifdef CONFIG_ARM64_ERRATUM_3117295
478 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
479 #endif
480 #ifdef CONFIG_ARM64_ERRATUM_2966298
481 	/* Cortex-A520 r0p0 to r0p1 */
482 	MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
483 #endif
484 	{},
485 };
486 #endif
487 
488 #ifdef CONFIG_ARM64_ERRATUM_3194386
489 static const struct midr_range erratum_spec_ssbs_list[] = {
490 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
491 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
492 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
493 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
494 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
495 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
496 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
497 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
498 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
499 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
500 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
501 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
502 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
503 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
504 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
505 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
506 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
507 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
508 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
509 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
510 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
511 	{}
512 };
513 #endif
514 
515 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
516 static const struct midr_range erratum_ac03_cpu_38_list[] = {
517 	MIDR_ALL_VERSIONS(MIDR_AMPERE1),
518 	MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
519 	{},
520 };
521 #endif
522 
523 const struct arm64_cpu_capabilities arm64_errata[] = {
524 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
525 	{
526 		.desc = "ARM errata 826319, 827319, 824069, or 819472",
527 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
528 		ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
529 		.cpu_enable = cpu_enable_cache_maint_trap,
530 	},
531 #endif
532 #ifdef CONFIG_ARM64_ERRATUM_832075
533 	{
534 	/* Cortex-A57 r0p0 - r1p2 */
535 		.desc = "ARM erratum 832075",
536 		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
537 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
538 				  0, 0,
539 				  1, 2),
540 	},
541 #endif
542 #ifdef CONFIG_ARM64_ERRATUM_834220
543 	{
544 	/* Cortex-A57 r0p0 - r1p2 */
545 		.desc = "ARM erratum 834220",
546 		.capability = ARM64_WORKAROUND_834220,
547 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
548 				  0, 0,
549 				  1, 2),
550 	},
551 #endif
552 #ifdef CONFIG_ARM64_ERRATUM_843419
553 	{
554 		.desc = "ARM erratum 843419",
555 		.capability = ARM64_WORKAROUND_843419,
556 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
557 		.matches = cpucap_multi_entry_cap_matches,
558 		.match_list = erratum_843419_list,
559 	},
560 #endif
561 #ifdef CONFIG_ARM64_ERRATUM_845719
562 	{
563 		.desc = "ARM erratum 845719",
564 		.capability = ARM64_WORKAROUND_845719,
565 		ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
566 	},
567 #endif
568 #ifdef CONFIG_CAVIUM_ERRATUM_23154
569 	{
570 		.desc = "Cavium errata 23154 and 38545",
571 		.capability = ARM64_WORKAROUND_CAVIUM_23154,
572 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
573 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
574 	},
575 #endif
576 #ifdef CONFIG_CAVIUM_ERRATUM_27456
577 	{
578 		.desc = "Cavium erratum 27456",
579 		.capability = ARM64_WORKAROUND_CAVIUM_27456,
580 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
581 	},
582 #endif
583 #ifdef CONFIG_CAVIUM_ERRATUM_30115
584 	{
585 		.desc = "Cavium erratum 30115",
586 		.capability = ARM64_WORKAROUND_CAVIUM_30115,
587 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
588 	},
589 #endif
590 	{
591 		.desc = "Mismatched cache type (CTR_EL0)",
592 		.capability = ARM64_MISMATCHED_CACHE_TYPE,
593 		.matches = has_mismatched_cache_type,
594 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
595 		.cpu_enable = cpu_enable_trap_ctr_access,
596 	},
597 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
598 	{
599 		.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
600 		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
601 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
602 		.matches = cpucap_multi_entry_cap_matches,
603 		.match_list = qcom_erratum_1003_list,
604 	},
605 #endif
606 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
607 	{
608 		.desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
609 		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
610 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
611 		.matches = cpucap_multi_entry_cap_matches,
612 		.match_list = arm64_repeat_tlbi_list,
613 	},
614 #endif
615 #ifdef CONFIG_ARM64_ERRATUM_858921
616 	{
617 	/* Cortex-A73 all versions */
618 		.desc = "ARM erratum 858921",
619 		.capability = ARM64_WORKAROUND_858921,
620 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
621 	},
622 #endif
623 	{
624 		.desc = "Spectre-v2",
625 		.capability = ARM64_SPECTRE_V2,
626 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
627 		.matches = has_spectre_v2,
628 		.cpu_enable = spectre_v2_enable_mitigation,
629 	},
630 #ifdef CONFIG_RANDOMIZE_BASE
631 	{
632 	/* Must come after the Spectre-v2 entry */
633 		.desc = "Spectre-v3a",
634 		.capability = ARM64_SPECTRE_V3A,
635 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
636 		.matches = has_spectre_v3a,
637 		.cpu_enable = spectre_v3a_enable_mitigation,
638 	},
639 #endif
640 	{
641 		.desc = "Spectre-v4",
642 		.capability = ARM64_SPECTRE_V4,
643 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
644 		.matches = has_spectre_v4,
645 		.cpu_enable = spectre_v4_enable_mitigation,
646 	},
647 	{
648 		.desc = "Spectre-BHB",
649 		.capability = ARM64_SPECTRE_BHB,
650 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
651 		.matches = is_spectre_bhb_affected,
652 		.cpu_enable = spectre_bhb_enable_mitigation,
653 	},
654 #ifdef CONFIG_ARM64_ERRATUM_1418040
655 	{
656 		.desc = "ARM erratum 1418040",
657 		.capability = ARM64_WORKAROUND_1418040,
658 		ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
659 		/*
660 		 * We need to allow affected CPUs to come in late, but
661 		 * also need the non-affected CPUs to be able to come
662 		 * in at any point in time. Wonderful.
663 		 */
664 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
665 	},
666 #endif
667 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
668 	{
669 		.desc = "ARM errata 1165522, 1319367, or 1530923",
670 		.capability = ARM64_WORKAROUND_SPECULATIVE_AT,
671 		ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
672 	},
673 #endif
674 #ifdef CONFIG_ARM64_ERRATUM_1463225
675 	{
676 		.desc = "ARM erratum 1463225",
677 		.capability = ARM64_WORKAROUND_1463225,
678 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
679 		.matches = has_cortex_a76_erratum_1463225,
680 		.midr_range_list = erratum_1463225,
681 	},
682 #endif
683 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
684 	{
685 		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
686 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
687 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
688 		.matches = needs_tx2_tvm_workaround,
689 	},
690 	{
691 		.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
692 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
693 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
694 	},
695 #endif
696 #ifdef CONFIG_ARM64_ERRATUM_1542419
697 	{
698 		/* we depend on the firmware portion for correctness */
699 		.desc = "ARM erratum 1542419 (kernel portion)",
700 		.capability = ARM64_WORKAROUND_1542419,
701 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
702 		.matches = has_neoverse_n1_erratum_1542419,
703 		.cpu_enable = cpu_enable_trap_ctr_access,
704 	},
705 #endif
706 #ifdef CONFIG_ARM64_ERRATUM_1508412
707 	{
708 		/* we depend on the firmware portion for correctness */
709 		.desc = "ARM erratum 1508412 (kernel portion)",
710 		.capability = ARM64_WORKAROUND_1508412,
711 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
712 				  0, 0,
713 				  1, 0),
714 	},
715 #endif
716 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
717 	{
718 		/* NVIDIA Carmel */
719 		.desc = "NVIDIA Carmel CNP erratum",
720 		.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
721 		ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
722 	},
723 #endif
724 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
725 	{
726 		/*
727 		 * The erratum work around is handled within the TRBE
728 		 * driver and can be applied per-cpu. So, we can allow
729 		 * a late CPU to come online with this erratum.
730 		 */
731 		.desc = "ARM erratum 2119858 or 2139208",
732 		.capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
733 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
734 		CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
735 	},
736 #endif
737 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
738 	{
739 		.desc = "ARM erratum 2067961 or 2054223",
740 		.capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
741 		ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
742 	},
743 #endif
744 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
745 	{
746 		.desc = "ARM erratum 2253138 or 2224489",
747 		.capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
748 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
749 		CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
750 	},
751 #endif
752 #ifdef CONFIG_ARM64_ERRATUM_2645198
753 	{
754 		.desc = "ARM erratum 2645198",
755 		.capability = ARM64_WORKAROUND_2645198,
756 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
757 	},
758 #endif
759 #ifdef CONFIG_ARM64_ERRATUM_2077057
760 	{
761 		.desc = "ARM erratum 2077057",
762 		.capability = ARM64_WORKAROUND_2077057,
763 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
764 	},
765 #endif
766 #ifdef CONFIG_ARM64_ERRATUM_2064142
767 	{
768 		.desc = "ARM erratum 2064142",
769 		.capability = ARM64_WORKAROUND_2064142,
770 
771 		/* Cortex-A510 r0p0 - r0p2 */
772 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
773 	},
774 #endif
775 #ifdef CONFIG_ARM64_ERRATUM_2457168
776 	{
777 		.desc = "ARM erratum 2457168",
778 		.capability = ARM64_WORKAROUND_2457168,
779 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
780 
781 		/* Cortex-A510 r0p0-r1p1 */
782 		CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
783 	},
784 #endif
785 #ifdef CONFIG_ARM64_ERRATUM_2038923
786 	{
787 		.desc = "ARM erratum 2038923",
788 		.capability = ARM64_WORKAROUND_2038923,
789 
790 		/* Cortex-A510 r0p0 - r0p2 */
791 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
792 	},
793 #endif
794 #ifdef CONFIG_ARM64_ERRATUM_1902691
795 	{
796 		.desc = "ARM erratum 1902691",
797 		.capability = ARM64_WORKAROUND_1902691,
798 
799 		/* Cortex-A510 r0p0 - r0p1 */
800 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
801 	},
802 #endif
803 #ifdef CONFIG_ARM64_ERRATUM_1742098
804 	{
805 		.desc = "ARM erratum 1742098",
806 		.capability = ARM64_WORKAROUND_1742098,
807 		CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
808 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
809 	},
810 #endif
811 #ifdef CONFIG_ARM64_ERRATUM_2658417
812 	{
813 		.desc = "ARM erratum 2658417",
814 		.capability = ARM64_WORKAROUND_2658417,
815 		/* Cortex-A510 r0p0 - r1p1 */
816 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
817 		MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
818 	},
819 #endif
820 #ifdef CONFIG_ARM64_ERRATUM_3194386
821 	{
822 		.desc = "SSBS not fully self-synchronizing",
823 		.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
824 		ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
825 	},
826 #endif
827 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
828 	{
829 		.desc = "ARM errata 2966298, 3117295",
830 		.capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
831 		/* Cortex-A520 r0p0 - r0p1 */
832 		ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
833 	},
834 #endif
835 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
836 	{
837 		.desc = "AmpereOne erratum AC03_CPU_38",
838 		.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
839 		ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
840 	},
841 #endif
842 	{
843 		.desc = "Broken CNTVOFF_EL2",
844 		.capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
845 		ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) {
846 					MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1),
847 					{}
848 				})),
849 	},
850 	{
851 	}
852 };
853