xref: /linux/arch/arm64/kernel/cpu_errata.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7 
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
16 
17 static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities * entry,int scope)18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
19 {
20 	const struct arm64_midr_revidr *fix;
21 	u32 midr = read_cpuid_id(), revidr;
22 
23 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
24 	if (!is_midr_in_range(midr, &entry->midr_range))
25 		return false;
26 
27 	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
28 	revidr = read_cpuid(REVIDR_EL1);
29 	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
30 		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
31 			return false;
32 
33 	return true;
34 }
35 
36 static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities * entry,int scope)37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
38 			    int scope)
39 {
40 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41 	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
42 }
43 
44 static bool __maybe_unused
is_kryo_midr(const struct arm64_cpu_capabilities * entry,int scope)45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
46 {
47 	u32 model;
48 
49 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
50 
51 	model = read_cpuid_id();
52 	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53 		 MIDR_ARCHITECTURE_MASK;
54 
55 	return model == entry->midr_range.model;
56 }
57 
58 static bool
has_mismatched_cache_type(const struct arm64_cpu_capabilities * entry,int scope)59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
60 			  int scope)
61 {
62 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
63 	u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
64 	u64 ctr_raw, ctr_real;
65 
66 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
67 
68 	/*
69 	 * We want to make sure that all the CPUs in the system expose
70 	 * a consistent CTR_EL0 to make sure that applications behaves
71 	 * correctly with migration.
72 	 *
73 	 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
74 	 *
75 	 * 1) It is safe if the system doesn't support IDC, as CPU anyway
76 	 *    reports IDC = 0, consistent with the rest.
77 	 *
78 	 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
79 	 *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
80 	 *
81 	 * So, we need to make sure either the raw CTR_EL0 or the effective
82 	 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
83 	 */
84 	ctr_raw = read_cpuid_cachetype() & mask;
85 	ctr_real = read_cpuid_effective_cachetype() & mask;
86 
87 	return (ctr_real != sys) && (ctr_raw != sys);
88 }
89 
90 static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities * cap)91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
92 {
93 	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
94 	bool enable_uct_trap = false;
95 
96 	/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
97 	if ((read_cpuid_cachetype() & mask) !=
98 	    (arm64_ftr_reg_ctrel0.sys_val & mask))
99 		enable_uct_trap = true;
100 
101 	/* ... or if the system is affected by an erratum */
102 	if (cap->capability == ARM64_WORKAROUND_1542419)
103 		enable_uct_trap = true;
104 
105 	if (enable_uct_trap)
106 		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
107 }
108 
109 #ifdef CONFIG_ARM64_ERRATUM_1463225
110 static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities * entry,int scope)111 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
112 			       int scope)
113 {
114 	return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
115 }
116 #endif
117 
118 static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities * __unused)119 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
120 {
121 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
122 }
123 
124 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
125 	.matches = is_affected_midr_range,			\
126 	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
127 
128 #define CAP_MIDR_ALL_VERSIONS(model)					\
129 	.matches = is_affected_midr_range,				\
130 	.midr_range = MIDR_ALL_VERSIONS(model)
131 
132 #define MIDR_FIXED(rev, revidr_mask) \
133 	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
134 
135 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
136 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
137 	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
138 
139 #define CAP_MIDR_RANGE_LIST(list)				\
140 	.matches = is_affected_midr_range_list,			\
141 	.midr_range_list = list
142 
143 /* Errata affecting a range of revisions of  given model variant */
144 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
145 	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
146 
147 /* Errata affecting a single variant/revision of a model */
148 #define ERRATA_MIDR_REV(model, var, rev)	\
149 	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
150 
151 /* Errata affecting all variants/revisions of a given a model */
152 #define ERRATA_MIDR_ALL_VERSIONS(model)				\
153 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
154 	CAP_MIDR_ALL_VERSIONS(model)
155 
156 /* Errata affecting a list of midr ranges, with same work around */
157 #define ERRATA_MIDR_RANGE_LIST(midr_list)			\
158 	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
159 	CAP_MIDR_RANGE_LIST(midr_list)
160 
161 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
162 	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
163 	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
164 	{},
165 };
166 
167 static bool __maybe_unused
needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities * entry,int scope)168 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
169 			 int scope)
170 {
171 	int i;
172 
173 	if (!is_affected_midr_range_list(entry, scope) ||
174 	    !is_hyp_mode_available())
175 		return false;
176 
177 	for_each_possible_cpu(i) {
178 		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
179 			return true;
180 	}
181 
182 	return false;
183 }
184 
185 static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities * entry,int scope)186 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
187 				int scope)
188 {
189 	u32 midr = read_cpuid_id();
190 	bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
191 	const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
192 
193 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
194 	return is_midr_in_range(midr, &range) && has_dic;
195 }
196 
197 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
198 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
199 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
200 	{
201 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
202 	},
203 	{
204 		.midr_range.model = MIDR_QCOM_KRYO,
205 		.matches = is_kryo_midr,
206 	},
207 #endif
208 #ifdef CONFIG_ARM64_ERRATUM_1286807
209 	{
210 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
211 	},
212 	{
213 		/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
214 		ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
215 	},
216 #endif
217 #ifdef CONFIG_ARM64_ERRATUM_2441007
218 	{
219 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
220 	},
221 #endif
222 #ifdef CONFIG_ARM64_ERRATUM_2441009
223 	{
224 		/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
225 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
226 	},
227 #endif
228 	{},
229 };
230 #endif
231 
232 #ifdef CONFIG_CAVIUM_ERRATUM_23154
233 static const struct midr_range cavium_erratum_23154_cpus[] = {
234 	MIDR_ALL_VERSIONS(MIDR_THUNDERX),
235 	MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
236 	MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
237 	MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
238 	MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
239 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
240 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
241 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
242 	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
243 	{},
244 };
245 #endif
246 
247 #ifdef CONFIG_CAVIUM_ERRATUM_27456
248 const struct midr_range cavium_erratum_27456_cpus[] = {
249 	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
250 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
251 	/* Cavium ThunderX, T81 pass 1.0 */
252 	MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
253 	{},
254 };
255 #endif
256 
257 #ifdef CONFIG_CAVIUM_ERRATUM_30115
258 static const struct midr_range cavium_erratum_30115_cpus[] = {
259 	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
260 	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
261 	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
262 	MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
263 	/* Cavium ThunderX, T83 pass 1.0 */
264 	MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
265 	{},
266 };
267 #endif
268 
269 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
270 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
271 	{
272 		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
273 	},
274 	{
275 		.midr_range.model = MIDR_QCOM_KRYO,
276 		.matches = is_kryo_midr,
277 	},
278 	{},
279 };
280 #endif
281 
282 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
283 static const struct midr_range workaround_clean_cache[] = {
284 #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
285 	defined(CONFIG_ARM64_ERRATUM_827319) || \
286 	defined(CONFIG_ARM64_ERRATUM_824069)
287 	/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
288 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
289 #endif
290 #ifdef	CONFIG_ARM64_ERRATUM_819472
291 	/* Cortex-A53 r0p[01] : ARM errata 819472 */
292 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
293 #endif
294 	{},
295 };
296 #endif
297 
298 #ifdef CONFIG_ARM64_ERRATUM_1418040
299 /*
300  * - 1188873 affects r0p0 to r2p0
301  * - 1418040 affects r0p0 to r3p1
302  */
303 static const struct midr_range erratum_1418040_list[] = {
304 	/* Cortex-A76 r0p0 to r3p1 */
305 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
306 	/* Neoverse-N1 r0p0 to r3p1 */
307 	MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
308 	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
309 	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
310 	{},
311 };
312 #endif
313 
314 #ifdef CONFIG_ARM64_ERRATUM_845719
315 static const struct midr_range erratum_845719_list[] = {
316 	/* Cortex-A53 r0p[01234] */
317 	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
318 	/* Brahma-B53 r0p[0] */
319 	MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
320 	/* Kryo2XX Silver rAp4 */
321 	MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
322 	{},
323 };
324 #endif
325 
326 #ifdef CONFIG_ARM64_ERRATUM_843419
327 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
328 	{
329 		/* Cortex-A53 r0p[01234] */
330 		.matches = is_affected_midr_range,
331 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
332 		MIDR_FIXED(0x4, BIT(8)),
333 	},
334 	{
335 		/* Brahma-B53 r0p[0] */
336 		.matches = is_affected_midr_range,
337 		ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
338 	},
339 	{},
340 };
341 #endif
342 
343 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
344 static const struct midr_range erratum_speculative_at_list[] = {
345 #ifdef CONFIG_ARM64_ERRATUM_1165522
346 	/* Cortex A76 r0p0 to r2p0 */
347 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
348 #endif
349 #ifdef CONFIG_ARM64_ERRATUM_1319367
350 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
351 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
352 #endif
353 #ifdef CONFIG_ARM64_ERRATUM_1530923
354 	/* Cortex A55 r0p0 to r2p0 */
355 	MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
356 	/* Kryo4xx Silver (rdpe => r1p0) */
357 	MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
358 #endif
359 	{},
360 };
361 #endif
362 
363 #ifdef CONFIG_ARM64_ERRATUM_1463225
364 static const struct midr_range erratum_1463225[] = {
365 	/* Cortex-A76 r0p0 - r3p1 */
366 	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
367 	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
368 	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
369 	{},
370 };
371 #endif
372 
373 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
374 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
375 #ifdef CONFIG_ARM64_ERRATUM_2139208
376 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
377 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
378 #endif
379 #ifdef CONFIG_ARM64_ERRATUM_2119858
380 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
381 	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
382 #endif
383 	{},
384 };
385 #endif	/* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
386 
387 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
388 static const struct midr_range tsb_flush_fail_cpus[] = {
389 #ifdef CONFIG_ARM64_ERRATUM_2067961
390 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
391 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
392 #endif
393 #ifdef CONFIG_ARM64_ERRATUM_2054223
394 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
395 #endif
396 	{},
397 };
398 #endif	/* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
399 
400 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
401 static struct midr_range trbe_write_out_of_range_cpus[] = {
402 #ifdef CONFIG_ARM64_ERRATUM_2253138
403 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
404 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
405 #endif
406 #ifdef CONFIG_ARM64_ERRATUM_2224489
407 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
408 	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
409 #endif
410 	{},
411 };
412 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
413 
414 #ifdef CONFIG_ARM64_ERRATUM_1742098
415 static struct midr_range broken_aarch32_aes[] = {
416 	MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
417 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
418 	{},
419 };
420 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
421 
422 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
423 static const struct midr_range erratum_spec_unpriv_load_list[] = {
424 #ifdef CONFIG_ARM64_ERRATUM_3117295
425 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
426 #endif
427 #ifdef CONFIG_ARM64_ERRATUM_2966298
428 	/* Cortex-A520 r0p0 to r0p1 */
429 	MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
430 #endif
431 	{},
432 };
433 #endif
434 
435 #ifdef CONFIG_ARM64_ERRATUM_3194386
436 static const struct midr_range erratum_spec_ssbs_list[] = {
437 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
438 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
439 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
440 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
441 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
442 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
443 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
444 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
445 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
446 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
447 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
448 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
449 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
450 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
451 	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
452 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
453 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
454 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
455 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
456 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
457 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
458 	{}
459 };
460 #endif
461 
462 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
463 static const struct midr_range erratum_ac03_cpu_38_list[] = {
464 	MIDR_ALL_VERSIONS(MIDR_AMPERE1),
465 	MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
466 	{},
467 };
468 #endif
469 
470 const struct arm64_cpu_capabilities arm64_errata[] = {
471 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
472 	{
473 		.desc = "ARM errata 826319, 827319, 824069, or 819472",
474 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
475 		ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
476 		.cpu_enable = cpu_enable_cache_maint_trap,
477 	},
478 #endif
479 #ifdef CONFIG_ARM64_ERRATUM_832075
480 	{
481 	/* Cortex-A57 r0p0 - r1p2 */
482 		.desc = "ARM erratum 832075",
483 		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
484 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
485 				  0, 0,
486 				  1, 2),
487 	},
488 #endif
489 #ifdef CONFIG_ARM64_ERRATUM_834220
490 	{
491 	/* Cortex-A57 r0p0 - r1p2 */
492 		.desc = "ARM erratum 834220",
493 		.capability = ARM64_WORKAROUND_834220,
494 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
495 				  0, 0,
496 				  1, 2),
497 	},
498 #endif
499 #ifdef CONFIG_ARM64_ERRATUM_843419
500 	{
501 		.desc = "ARM erratum 843419",
502 		.capability = ARM64_WORKAROUND_843419,
503 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
504 		.matches = cpucap_multi_entry_cap_matches,
505 		.match_list = erratum_843419_list,
506 	},
507 #endif
508 #ifdef CONFIG_ARM64_ERRATUM_845719
509 	{
510 		.desc = "ARM erratum 845719",
511 		.capability = ARM64_WORKAROUND_845719,
512 		ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
513 	},
514 #endif
515 #ifdef CONFIG_CAVIUM_ERRATUM_23154
516 	{
517 		.desc = "Cavium errata 23154 and 38545",
518 		.capability = ARM64_WORKAROUND_CAVIUM_23154,
519 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
520 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
521 	},
522 #endif
523 #ifdef CONFIG_CAVIUM_ERRATUM_27456
524 	{
525 		.desc = "Cavium erratum 27456",
526 		.capability = ARM64_WORKAROUND_CAVIUM_27456,
527 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
528 	},
529 #endif
530 #ifdef CONFIG_CAVIUM_ERRATUM_30115
531 	{
532 		.desc = "Cavium erratum 30115",
533 		.capability = ARM64_WORKAROUND_CAVIUM_30115,
534 		ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
535 	},
536 #endif
537 	{
538 		.desc = "Mismatched cache type (CTR_EL0)",
539 		.capability = ARM64_MISMATCHED_CACHE_TYPE,
540 		.matches = has_mismatched_cache_type,
541 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
542 		.cpu_enable = cpu_enable_trap_ctr_access,
543 	},
544 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
545 	{
546 		.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
547 		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
548 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
549 		.matches = cpucap_multi_entry_cap_matches,
550 		.match_list = qcom_erratum_1003_list,
551 	},
552 #endif
553 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
554 	{
555 		.desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
556 		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
557 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
558 		.matches = cpucap_multi_entry_cap_matches,
559 		.match_list = arm64_repeat_tlbi_list,
560 	},
561 #endif
562 #ifdef CONFIG_ARM64_ERRATUM_858921
563 	{
564 	/* Cortex-A73 all versions */
565 		.desc = "ARM erratum 858921",
566 		.capability = ARM64_WORKAROUND_858921,
567 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
568 	},
569 #endif
570 	{
571 		.desc = "Spectre-v2",
572 		.capability = ARM64_SPECTRE_V2,
573 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
574 		.matches = has_spectre_v2,
575 		.cpu_enable = spectre_v2_enable_mitigation,
576 	},
577 #ifdef CONFIG_RANDOMIZE_BASE
578 	{
579 	/* Must come after the Spectre-v2 entry */
580 		.desc = "Spectre-v3a",
581 		.capability = ARM64_SPECTRE_V3A,
582 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
583 		.matches = has_spectre_v3a,
584 		.cpu_enable = spectre_v3a_enable_mitigation,
585 	},
586 #endif
587 	{
588 		.desc = "Spectre-v4",
589 		.capability = ARM64_SPECTRE_V4,
590 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
591 		.matches = has_spectre_v4,
592 		.cpu_enable = spectre_v4_enable_mitigation,
593 	},
594 	{
595 		.desc = "Spectre-BHB",
596 		.capability = ARM64_SPECTRE_BHB,
597 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
598 		.matches = is_spectre_bhb_affected,
599 		.cpu_enable = spectre_bhb_enable_mitigation,
600 	},
601 #ifdef CONFIG_ARM64_ERRATUM_1418040
602 	{
603 		.desc = "ARM erratum 1418040",
604 		.capability = ARM64_WORKAROUND_1418040,
605 		ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
606 		/*
607 		 * We need to allow affected CPUs to come in late, but
608 		 * also need the non-affected CPUs to be able to come
609 		 * in at any point in time. Wonderful.
610 		 */
611 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
612 	},
613 #endif
614 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
615 	{
616 		.desc = "ARM errata 1165522, 1319367, or 1530923",
617 		.capability = ARM64_WORKAROUND_SPECULATIVE_AT,
618 		ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
619 	},
620 #endif
621 #ifdef CONFIG_ARM64_ERRATUM_1463225
622 	{
623 		.desc = "ARM erratum 1463225",
624 		.capability = ARM64_WORKAROUND_1463225,
625 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
626 		.matches = has_cortex_a76_erratum_1463225,
627 		.midr_range_list = erratum_1463225,
628 	},
629 #endif
630 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
631 	{
632 		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
633 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
634 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
635 		.matches = needs_tx2_tvm_workaround,
636 	},
637 	{
638 		.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
639 		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
640 		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
641 	},
642 #endif
643 #ifdef CONFIG_ARM64_ERRATUM_1542419
644 	{
645 		/* we depend on the firmware portion for correctness */
646 		.desc = "ARM erratum 1542419 (kernel portion)",
647 		.capability = ARM64_WORKAROUND_1542419,
648 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
649 		.matches = has_neoverse_n1_erratum_1542419,
650 		.cpu_enable = cpu_enable_trap_ctr_access,
651 	},
652 #endif
653 #ifdef CONFIG_ARM64_ERRATUM_1508412
654 	{
655 		/* we depend on the firmware portion for correctness */
656 		.desc = "ARM erratum 1508412 (kernel portion)",
657 		.capability = ARM64_WORKAROUND_1508412,
658 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
659 				  0, 0,
660 				  1, 0),
661 	},
662 #endif
663 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
664 	{
665 		/* NVIDIA Carmel */
666 		.desc = "NVIDIA Carmel CNP erratum",
667 		.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
668 		ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
669 	},
670 #endif
671 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
672 	{
673 		/*
674 		 * The erratum work around is handled within the TRBE
675 		 * driver and can be applied per-cpu. So, we can allow
676 		 * a late CPU to come online with this erratum.
677 		 */
678 		.desc = "ARM erratum 2119858 or 2139208",
679 		.capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
680 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
681 		CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
682 	},
683 #endif
684 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
685 	{
686 		.desc = "ARM erratum 2067961 or 2054223",
687 		.capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
688 		ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
689 	},
690 #endif
691 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
692 	{
693 		.desc = "ARM erratum 2253138 or 2224489",
694 		.capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
695 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
696 		CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
697 	},
698 #endif
699 #ifdef CONFIG_ARM64_ERRATUM_2645198
700 	{
701 		.desc = "ARM erratum 2645198",
702 		.capability = ARM64_WORKAROUND_2645198,
703 		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
704 	},
705 #endif
706 #ifdef CONFIG_ARM64_ERRATUM_2077057
707 	{
708 		.desc = "ARM erratum 2077057",
709 		.capability = ARM64_WORKAROUND_2077057,
710 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
711 	},
712 #endif
713 #ifdef CONFIG_ARM64_ERRATUM_2064142
714 	{
715 		.desc = "ARM erratum 2064142",
716 		.capability = ARM64_WORKAROUND_2064142,
717 
718 		/* Cortex-A510 r0p0 - r0p2 */
719 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
720 	},
721 #endif
722 #ifdef CONFIG_ARM64_ERRATUM_2457168
723 	{
724 		.desc = "ARM erratum 2457168",
725 		.capability = ARM64_WORKAROUND_2457168,
726 		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
727 
728 		/* Cortex-A510 r0p0-r1p1 */
729 		CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
730 	},
731 #endif
732 #ifdef CONFIG_ARM64_ERRATUM_2038923
733 	{
734 		.desc = "ARM erratum 2038923",
735 		.capability = ARM64_WORKAROUND_2038923,
736 
737 		/* Cortex-A510 r0p0 - r0p2 */
738 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
739 	},
740 #endif
741 #ifdef CONFIG_ARM64_ERRATUM_1902691
742 	{
743 		.desc = "ARM erratum 1902691",
744 		.capability = ARM64_WORKAROUND_1902691,
745 
746 		/* Cortex-A510 r0p0 - r0p1 */
747 		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
748 	},
749 #endif
750 #ifdef CONFIG_ARM64_ERRATUM_1742098
751 	{
752 		.desc = "ARM erratum 1742098",
753 		.capability = ARM64_WORKAROUND_1742098,
754 		CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
755 		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
756 	},
757 #endif
758 #ifdef CONFIG_ARM64_ERRATUM_2658417
759 	{
760 		.desc = "ARM erratum 2658417",
761 		.capability = ARM64_WORKAROUND_2658417,
762 		/* Cortex-A510 r0p0 - r1p1 */
763 		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
764 		MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
765 	},
766 #endif
767 #ifdef CONFIG_ARM64_ERRATUM_3194386
768 	{
769 		.desc = "SSBS not fully self-synchronizing",
770 		.capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
771 		ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
772 	},
773 #endif
774 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
775 	{
776 		.desc = "ARM errata 2966298, 3117295",
777 		.capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
778 		/* Cortex-A520 r0p0 - r0p1 */
779 		ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
780 	},
781 #endif
782 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
783 	{
784 		.desc = "AmpereOne erratum AC03_CPU_38",
785 		.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
786 		ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
787 	},
788 #endif
789 	{
790 	}
791 };
792