xref: /freebsd/sys/arm64/arm64/cpu_errata.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018 Andrew Turner
5  * All rights reserved.
6  *
7  * This software was developed by SRI International and the University of
8  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
9  * ("CTSRD"), as part of the DARPA CRASH research programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "opt_platform.h"
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/systm.h>
39 
40 #include <machine/cpu.h>
41 
42 #include <dev/psci/psci.h>
43 #include <dev/psci/smccc.h>
44 
45 typedef void (cpu_quirk_install)(void);
46 struct cpu_quirks {
47 	cpu_quirk_install *quirk_install;
48 	u_int		midr_mask;
49 	u_int		midr_value;
50 #define	CPU_QUIRK_POST_DEVICE	(1 << 0)	/* After device attach */
51 						/* e.g. needs SMCCC */
52 	u_int		flags;
53 };
54 
55 static enum {
56 	SSBD_FORCE_ON,
57 	SSBD_FORCE_OFF,
58 	SSBD_KERNEL,
59 } ssbd_method = SSBD_KERNEL;
60 
61 static cpu_quirk_install install_psci_bp_hardening;
62 static cpu_quirk_install install_ssbd_workaround;
63 static cpu_quirk_install install_thunderx_bcast_tlbi_workaround;
64 
65 static struct cpu_quirks cpu_quirks[] = {
66 	{
67 		.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
68 		.midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0),
69 		.quirk_install = install_psci_bp_hardening,
70 		.flags = CPU_QUIRK_POST_DEVICE,
71 	},
72 	{
73 		.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
74 		.midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0),
75 		.quirk_install = install_psci_bp_hardening,
76 		.flags = CPU_QUIRK_POST_DEVICE,
77 	},
78 	{
79 		.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
80 		.midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0),
81 		.quirk_install = install_psci_bp_hardening,
82 		.flags = CPU_QUIRK_POST_DEVICE,
83 	},
84 	{
85 		.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
86 		.midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0),
87 		.quirk_install = install_psci_bp_hardening,
88 		.flags = CPU_QUIRK_POST_DEVICE,
89 	},
90 	{
91 		.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
92 		.midr_value =
93 		    CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0),
94 		.quirk_install = install_psci_bp_hardening,
95 		.flags = CPU_QUIRK_POST_DEVICE,
96 	},
97 	{
98 		.midr_mask = 0,
99 		.midr_value = 0,
100 		.quirk_install = install_ssbd_workaround,
101 		.flags = CPU_QUIRK_POST_DEVICE,
102 	},
103 	{
104 		.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
105 		.midr_value =
106 		    CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, 0),
107 		.quirk_install = install_thunderx_bcast_tlbi_workaround,
108 	},
109 	{
110 		.midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
111 		.midr_value =
112 		    CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX_81XX, 0, 0),
113 		.quirk_install = install_thunderx_bcast_tlbi_workaround,
114 	},
115 };
116 
117 static void
install_psci_bp_hardening(void)118 install_psci_bp_hardening(void)
119 {
120 	/* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
121 	if (!psci_present)
122 		return;
123 
124 	if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) != SMCCC_RET_SUCCESS)
125 		return;
126 
127 	PCPU_SET(bp_harden, smccc_arch_workaround_1);
128 }
129 
130 static void
install_ssbd_workaround(void)131 install_ssbd_workaround(void)
132 {
133 	char *env;
134 
135 	if (PCPU_GET(cpuid) == 0) {
136 		env = kern_getenv("kern.cfg.ssbd");
137 		if (env != NULL) {
138 			if (strcmp(env, "force-on") == 0) {
139 				ssbd_method = SSBD_FORCE_ON;
140 			} else if (strcmp(env, "force-off") == 0) {
141 				ssbd_method = SSBD_FORCE_OFF;
142 			}
143 		}
144 	}
145 
146 	/* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
147 	if (!psci_present)
148 		return;
149 
150 	/* Enable the workaround on this CPU if it's enabled in the firmware */
151 	if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS)
152 		return;
153 
154 	switch(ssbd_method) {
155 	case SSBD_FORCE_ON:
156 		smccc_arch_workaround_2(1);
157 		break;
158 	case SSBD_FORCE_OFF:
159 		smccc_arch_workaround_2(0);
160 		break;
161 	case SSBD_KERNEL:
162 	default:
163 		PCPU_SET(ssbd, smccc_arch_workaround_2);
164 		break;
165 	}
166 }
167 
168 /*
169  * Workaround Cavium erratum 27456.
170  *
171  * Invalidate the local icache when changing address spaces.
172  */
173 static void
install_thunderx_bcast_tlbi_workaround(void)174 install_thunderx_bcast_tlbi_workaround(void)
175 {
176 	u_int midr;
177 
178 	midr = get_midr();
179 	if (CPU_PART(midr) == CPU_PART_THUNDERX_81XX)
180 		PCPU_SET(bcast_tlbi_workaround, 1);
181 	else if (CPU_PART(midr) == CPU_PART_THUNDERX) {
182 		if (CPU_VAR(midr) == 0) {
183 			/* ThunderX 1.x */
184 			PCPU_SET(bcast_tlbi_workaround, 1);
185 		} else if (CPU_VAR(midr) == 1 && CPU_REV(midr) <= 1) {
186 			/* ThunderX 2.0 - 2.1 */
187 			PCPU_SET(bcast_tlbi_workaround, 1);
188 		}
189 	}
190 }
191 
192 static void
install_cpu_errata_flags(u_int mask,u_int flags)193 install_cpu_errata_flags(u_int mask, u_int flags)
194 {
195 	u_int midr;
196 	size_t i;
197 
198 	midr = get_midr();
199 
200 	for (i = 0; i < nitems(cpu_quirks); i++) {
201 		if ((midr & cpu_quirks[i].midr_mask) ==
202 		    cpu_quirks[i].midr_value &&
203 		    (cpu_quirks[i].flags & mask) == flags) {
204 			cpu_quirks[i].quirk_install();
205 		}
206 	}
207 }
208 
209 /*
210  * Install any CPU errata we need. On CPU 0 we only install the errata that
211  * don't depend on device drivers as this is called early in the boot process.
212  * On other CPUs the device drivers have already attached so install all
213  * applicable errata.
214  */
215 void
install_cpu_errata(void)216 install_cpu_errata(void)
217 {
218 	/*
219 	 * Only install early CPU errata on CPU 0, device drivers may not
220 	 * have attached and some workarounds depend on them, e.g. to query
221 	 * SMCCC.
222 	 */
223 	if (PCPU_GET(cpuid) == 0) {
224 		install_cpu_errata_flags(CPU_QUIRK_POST_DEVICE, 0);
225 	} else {
226 		install_cpu_errata_flags(0, 0);
227 	}
228 }
229 
230 /*
231  * Install any errata workarounds that depend on device drivers, e.g. use
232  * SMCCC to install a workaround.
233  */
234 static void
install_cpu_errata_late(void * dummy __unused)235 install_cpu_errata_late(void *dummy __unused)
236 {
237 	MPASS(PCPU_GET(cpuid) == 0);
238 	install_cpu_errata_flags(CPU_QUIRK_POST_DEVICE, CPU_QUIRK_POST_DEVICE);
239 }
240 SYSINIT(install_cpu_errata_late, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
241     install_cpu_errata_late, NULL);
242