1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2025 Arm Ltd
5 * Copyright (c) 2018 Andrew Turner
6 *
7 * This software was developed by SRI International and the University of
8 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
9 * ("CTSRD"), as part of the DARPA CRASH research programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/pcpu.h>
36 #include <sys/systm.h>
37
38 #include <machine/cpu.h>
39 #include <machine/cpu_feat.h>
40
41 #include <dev/psci/psci.h>
42 #include <dev/psci/smccc.h>
43
44 static enum {
45 SSBD_FORCE_ON,
46 SSBD_FORCE_OFF,
47 SSBD_KERNEL,
48 } ssbd_method = SSBD_KERNEL;
49
50 struct psci_bp_hardening_impl {
51 u_int midr_mask;
52 u_int midr_value;
53 };
54
55 static struct psci_bp_hardening_impl psci_bp_hardening_impl[] = {
56 {
57 .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
58 .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A57,0,0),
59 },
60 {
61 .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
62 .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A72,0,0),
63 },
64 {
65 .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
66 .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A73,0,0),
67 },
68 {
69 .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
70 .midr_value = CPU_ID_RAW(CPU_IMPL_ARM, CPU_PART_CORTEX_A75,0,0),
71 },
72 {
73 .midr_mask = CPU_IMPL_MASK | CPU_PART_MASK,
74 .midr_value =
75 CPU_ID_RAW(CPU_IMPL_CAVIUM, CPU_PART_THUNDERX2, 0,0),
76 }
77 };
78
79 static cpu_feat_en
psci_bp_hardening_check(const struct cpu_feat * feat __unused,u_int midr)80 psci_bp_hardening_check(const struct cpu_feat *feat __unused, u_int midr)
81 {
82 size_t i;
83
84 for (i = 0; i < nitems(psci_bp_hardening_impl); i++) {
85 if ((midr & psci_bp_hardening_impl[i].midr_mask) ==
86 psci_bp_hardening_impl[i].midr_value) {
87 /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
88 if (!psci_present)
89 return (FEAT_ALWAYS_DISABLE);
90
91 if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_1) !=
92 SMCCC_RET_SUCCESS)
93 return (FEAT_ALWAYS_DISABLE);
94
95 return (FEAT_DEFAULT_ENABLE);
96 }
97 }
98
99 return (FEAT_ALWAYS_DISABLE);
100 }
101
102 static bool
psci_bp_hardening_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status __unused,u_int * errata_list __unused,u_int errata_count __unused)103 psci_bp_hardening_enable(const struct cpu_feat *feat __unused,
104 cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
105 u_int errata_count __unused)
106 {
107 PCPU_SET(bp_harden, smccc_arch_workaround_1);
108
109 return (true);
110 }
111
112 CPU_FEAT(feat_csv2_missing, "Branch Predictor Hardening",
113 psci_bp_hardening_check, NULL, psci_bp_hardening_enable, NULL,
114 CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
115
116 static cpu_feat_en
ssbd_workaround_check(const struct cpu_feat * feat __unused,u_int midr __unused)117 ssbd_workaround_check(const struct cpu_feat *feat __unused, u_int midr __unused)
118 {
119 char *env;
120
121 if (PCPU_GET(cpuid) == 0) {
122 env = kern_getenv("kern.cfg.ssbd");
123 if (env != NULL) {
124 if (strcmp(env, "force-on") == 0) {
125 ssbd_method = SSBD_FORCE_ON;
126 } else if (strcmp(env, "force-off") == 0) {
127 ssbd_method = SSBD_FORCE_OFF;
128 }
129 }
130 }
131
132 /* SMCCC depends on PSCI. If PSCI is missing so is SMCCC */
133 if (!psci_present)
134 return (FEAT_ALWAYS_DISABLE);
135
136 /* Enable the workaround on this CPU if it's enabled in the firmware */
137 if (smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) != SMCCC_RET_SUCCESS)
138 return (FEAT_ALWAYS_DISABLE);
139
140 return (FEAT_DEFAULT_ENABLE);
141 }
142
143 static bool
ssbd_workaround_enable(const struct cpu_feat * feat __unused,cpu_feat_errata errata_status __unused,u_int * errata_list __unused,u_int errata_count __unused)144 ssbd_workaround_enable(const struct cpu_feat *feat __unused,
145 cpu_feat_errata errata_status __unused, u_int *errata_list __unused,
146 u_int errata_count __unused)
147 {
148 switch(ssbd_method) {
149 case SSBD_FORCE_ON:
150 smccc_arch_workaround_2(1);
151 break;
152 case SSBD_FORCE_OFF:
153 smccc_arch_workaround_2(0);
154 break;
155 case SSBD_KERNEL:
156 default:
157 PCPU_SET(ssbd, smccc_arch_workaround_2);
158 break;
159 }
160
161 return (true);
162 }
163
164 CPU_FEAT(feat_ssbs_missing, "Speculator Store Bypass Disable Workaround",
165 ssbd_workaround_check, NULL, ssbd_workaround_enable, NULL,
166 CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU);
167