1 /*-
2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/pcpu.h>
32 #include <sys/smp.h>
33 #include <sys/sysctl.h>
34
35 #include <machine/cpu.h>
36 #include <machine/cpuinfo.h>
37 #include <machine/elf.h>
38 #include <machine/md_var.h>
39
40 void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
41
42 int disable_bp_hardening;
43 int spectre_v2_safe = 1;
44
45 struct cpuinfo cpuinfo =
46 {
47 /* Use safe defaults for start */
48 .dcache_line_size = 32,
49 .dcache_line_mask = 31,
50 .icache_line_size = 32,
51 .icache_line_mask = 31,
52 };
53
54 static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
55 "CPU");
56 static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
57 "CPU quirks");
58
59 /*
60 * Tunable CPU quirks.
61 * Be careful, ACTRL cannot be changed if CPU is started in secure
62 * mode(world) and write to ACTRL can cause exception!
63 * These quirks are intended for optimizing CPU performance, not for
64 * applying errata workarounds. Nobody can expect that CPU with unfixed
65 * errata is stable enough to execute the kernel until quirks are applied.
66 */
67 static uint32_t cpu_quirks_actlr_mask;
68 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
69 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
70 "Bits to be masked in ACTLR");
71
72 static uint32_t cpu_quirks_actlr_set;
73 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
74 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
75 "Bits to be set in ACTLR");
76
77 static int
sysctl_hw_cpu_quirks_actrl_value(SYSCTL_HANDLER_ARGS)78 sysctl_hw_cpu_quirks_actrl_value(SYSCTL_HANDLER_ARGS)
79 {
80 uint32_t reg;
81
82 reg = cp15_actlr_get();
83 return (SYSCTL_OUT(req, ®, sizeof(reg)));
84 }
85 SYSCTL_PROC(_hw_cpu_quirks, OID_AUTO, actlr_value,
86 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
87 sysctl_hw_cpu_quirks_actrl_value, "IU",
88 "Value of ACTLR");
89
90 /* Read and parse CPU id scheme */
91 void
cpuinfo_init(void)92 cpuinfo_init(void)
93 {
94 uint32_t tmp;
95
96 /*
97 * Prematurely fetch CPU quirks. Standard fetch for tunable
98 * sysctls is handled using SYSINIT, thus too late for boot CPU.
99 * Keep names in sync with sysctls.
100 */
101 TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
102 TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
103
104 cpuinfo.midr = cp15_midr_get();
105 /* Test old version id schemes first */
106 if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
107 if (CPU_ID_ISOLD(cpuinfo.midr)) {
108 /* obsolete ARMv2 or ARMv3 CPU */
109 cpuinfo.midr = 0;
110 return;
111 }
112 if (CPU_ID_IS7(cpuinfo.midr)) {
113 if ((cpuinfo.midr & (1 << 23)) == 0) {
114 /* obsolete ARMv3 CPU */
115 cpuinfo.midr = 0;
116 return;
117 }
118 /* ARMv4T CPU */
119 cpuinfo.architecture = 1;
120 cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
121 } else {
122 /* ARM new id scheme */
123 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
124 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
125 }
126 } else {
127 /* non ARM -> must be new id scheme */
128 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
129 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
130 }
131 /* Parse rest of MIDR */
132 cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
133 cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
134 cpuinfo.patch = cpuinfo.midr & 0x0F;
135
136 /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
137 cpuinfo.ctr = cp15_ctr_get();
138 cpuinfo.tcmtr = cp15_tcmtr_get();
139 cpuinfo.tlbtr = cp15_tlbtr_get();
140 cpuinfo.mpidr = cp15_mpidr_get();
141 cpuinfo.revidr = cp15_revidr_get();
142
143 /* if CPU is not v7 cpu id scheme */
144 if (cpuinfo.architecture != 0xF)
145 return;
146 cpuinfo.id_pfr0 = cp15_id_pfr0_get();
147 cpuinfo.id_pfr1 = cp15_id_pfr1_get();
148 cpuinfo.id_dfr0 = cp15_id_dfr0_get();
149 cpuinfo.id_afr0 = cp15_id_afr0_get();
150 cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
151 cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
152 cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
153 cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
154 cpuinfo.id_isar0 = cp15_id_isar0_get();
155 cpuinfo.id_isar1 = cp15_id_isar1_get();
156 cpuinfo.id_isar2 = cp15_id_isar2_get();
157 cpuinfo.id_isar3 = cp15_id_isar3_get();
158 cpuinfo.id_isar4 = cp15_id_isar4_get();
159 cpuinfo.id_isar5 = cp15_id_isar5_get();
160
161 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
162 cpuinfo.cbar = cp15_cbar_get();
163 */
164 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
165 cpuinfo.ccsidr = cp15_ccsidr_get();
166 cpuinfo.clidr = cp15_clidr_get();
167 }
168
169 /* Test if revidr is implemented */
170 if (cpuinfo.revidr == cpuinfo.midr)
171 cpuinfo.revidr = 0;
172
173 /* parsed bits of above registers */
174 /* id_mmfr0 */
175 cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF;
176 cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
177 cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
178 cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
179 /* id_mmfr2 */
180 cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
181 /* id_mmfr3 */
182 cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
183 cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
184 /* id_pfr1 */
185 cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
186 cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
187 cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
188 /* mpidr */
189 cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
190
191 /* L1 Cache sizes */
192 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
193 cpuinfo.dcache_line_size =
194 1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
195 cpuinfo.icache_line_size =
196 1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
197 } else {
198 cpuinfo.dcache_line_size =
199 1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
200 cpuinfo.icache_line_size =
201 1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
202 }
203 cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
204 cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
205
206 /* Fill AT_HWCAP bits. */
207 elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */
208 elf_hwcap |= HWCAP_TLS | HWCAP_EDSP; /* Required for v6+ CPUs */
209
210 tmp = (cpuinfo.id_isar0 >> 24) & 0xF; /* Divide_instrs */
211 if (tmp >= 1)
212 elf_hwcap |= HWCAP_IDIVT;
213 if (tmp >= 2)
214 elf_hwcap |= HWCAP_IDIVA;
215
216 tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; /* State1 */
217 if (tmp >= 1)
218 elf_hwcap |= HWCAP_THUMB;
219
220 tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; /* State3 */
221 if (tmp >= 1)
222 elf_hwcap |= HWCAP_THUMBEE;
223
224 tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; /* VMSA */
225 if (tmp >= 5)
226 elf_hwcap |= HWCAP_LPAE;
227
228 /* Fill AT_HWCAP2 bits. */
229 tmp = (cpuinfo.id_isar5 >> 4) & 0xF; /* AES */
230 if (tmp >= 1)
231 elf_hwcap2 |= HWCAP2_AES;
232 if (tmp >= 2)
233 elf_hwcap2 |= HWCAP2_PMULL;
234
235 tmp = (cpuinfo.id_isar5 >> 8) & 0xF; /* SHA1 */
236 if (tmp >= 1)
237 elf_hwcap2 |= HWCAP2_SHA1;
238
239 tmp = (cpuinfo.id_isar5 >> 12) & 0xF; /* SHA2 */
240 if (tmp >= 1)
241 elf_hwcap2 |= HWCAP2_SHA2;
242
243 tmp = (cpuinfo.id_isar5 >> 16) & 0xF; /* CRC32 */
244 if (tmp >= 1)
245 elf_hwcap2 |= HWCAP2_CRC32;
246 }
247
248 /*
249 * Get bits that must be set or cleared in ACLR register.
250 * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
251 * Its expected that SCU is in operational state before this
252 * function is called.
253 */
254 static void
cpuinfo_get_actlr_modifier(uint32_t * actlr_mask,uint32_t * actlr_set)255 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
256 {
257
258 *actlr_mask = 0;
259 *actlr_set = 0;
260
261 if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
262 switch (cpuinfo.part_number) {
263 case CPU_ARCH_CORTEX_A75:
264 case CPU_ARCH_CORTEX_A73:
265 case CPU_ARCH_CORTEX_A72:
266 case CPU_ARCH_CORTEX_A57:
267 case CPU_ARCH_CORTEX_A53:
268 /* Nothing to do for AArch32 */
269 break;
270 case CPU_ARCH_CORTEX_A17:
271 case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
272 /*
273 * Enable SMP mode
274 */
275 *actlr_mask = (1 << 6);
276 *actlr_set = (1 << 6);
277 break;
278 case CPU_ARCH_CORTEX_A15:
279 /*
280 * Enable snoop-delayed exclusive handling
281 * Enable SMP mode
282 */
283 *actlr_mask = (1U << 31) |(1 << 6);
284 *actlr_set = (1U << 31) |(1 << 6);
285 break;
286 case CPU_ARCH_CORTEX_A9:
287 /*
288 * Disable exclusive L1/L2 cache control
289 * Enable SMP mode
290 * Enable Cache and TLB maintenance broadcast
291 */
292 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
293 *actlr_set = (1 << 6) | (1 << 0);
294 break;
295 case CPU_ARCH_CORTEX_A8:
296 /*
297 * Enable L2 cache
298 * Enable L1 data cache hardware alias checks
299 */
300 *actlr_mask = (1 << 1) | (1 << 0);
301 *actlr_set = (1 << 1);
302 break;
303 case CPU_ARCH_CORTEX_A7:
304 /*
305 * Enable SMP mode
306 */
307 *actlr_mask = (1 << 6);
308 *actlr_set = (1 << 6);
309 break;
310 case CPU_ARCH_CORTEX_A5:
311 /*
312 * Disable exclusive L1/L2 cache control
313 * Enable SMP mode
314 * Enable Cache and TLB maintenance broadcast
315 */
316 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
317 *actlr_set = (1 << 6) | (1 << 0);
318 break;
319 case CPU_ARCH_ARM1176:
320 /*
321 * Restrict cache size to 16KB
322 * Enable the return stack
323 * Enable dynamic branch prediction
324 * Enable static branch prediction
325 */
326 *actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
327 *actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
328 break;
329 }
330 return;
331 }
332 }
333
334 /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
335 void
cpuinfo_reinit_mmu(uint32_t ttb)336 cpuinfo_reinit_mmu(uint32_t ttb)
337 {
338 uint32_t actlr_mask;
339 uint32_t actlr_set;
340
341 cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
342 actlr_mask |= cpu_quirks_actlr_mask;
343 actlr_set |= cpu_quirks_actlr_set;
344 reinit_mmu(ttb, actlr_mask, actlr_set);
345 }
346
347 static bool
modify_actlr(uint32_t clear,uint32_t set)348 modify_actlr(uint32_t clear, uint32_t set)
349 {
350 uint32_t reg, newreg;
351
352 reg = cp15_actlr_get();
353 newreg = reg;
354 newreg &= ~clear;
355 newreg |= set;
356 if (reg == newreg)
357 return (true);
358 cp15_actlr_set(newreg);
359
360 reg = cp15_actlr_get();
361 if (reg == newreg)
362 return (true);
363 return (false);
364 }
365
366 /* Apply/restore BP hardening on current core. */
367 static int
apply_bp_hardening(bool enable,int kind,bool actrl,uint32_t set_mask)368 apply_bp_hardening(bool enable, int kind, bool actrl, uint32_t set_mask)
369 {
370 if (enable) {
371 if (actrl && !modify_actlr(0, set_mask))
372 return (-1);
373 PCPU_SET(bp_harden_kind, kind);
374 } else {
375 PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
376 if (actrl)
377 modify_actlr(~0, PCPU_GET(original_actlr));
378 spectre_v2_safe = 0;
379 }
380 return (0);
381 }
382
383 static void
handle_bp_hardening(bool enable)384 handle_bp_hardening(bool enable)
385 {
386 int kind;
387 char *kind_str;
388
389 kind = PCPU_BP_HARDEN_KIND_NONE;
390 /*
391 * Note: Access to ACTRL is locked to secure world on most boards.
392 * This means that full BP hardening depends on updated u-boot/firmware
393 * or is impossible at all (if secure monitor is in on-chip ROM).
394 */
395 if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
396 switch (cpuinfo.part_number) {
397 case CPU_ARCH_CORTEX_A8:
398 /*
399 * For Cortex-A8, IBE bit must be set otherwise
400 * BPIALL is effectively NOP.
401 * Unfortunately, Cortex-A is also affected by
402 * ARM erratum 687067 which causes non-working
403 * BPIALL if IBE bit is set and 'Instruction L1 System
404 * Array Debug Register 0' is not 0.
405 * This register is not reset on power-up and is
406 * accessible only from secure world, so we cannot do
407 * nothing (nor detect) to fix this issue.
408 * I afraid that on chip ROM based secure monitor on
409 * AM335x (BeagleBone) doesn't reset this debug
410 * register.
411 */
412 kind = PCPU_BP_HARDEN_KIND_BPIALL;
413 if (apply_bp_hardening(enable, kind, true, 1 << 6) != 0)
414 goto actlr_err;
415 break;
416 break;
417
418 case CPU_ARCH_CORTEX_A9:
419 case CPU_ARCH_CORTEX_A12:
420 case CPU_ARCH_CORTEX_A17:
421 case CPU_ARCH_CORTEX_A57:
422 case CPU_ARCH_CORTEX_A72:
423 case CPU_ARCH_CORTEX_A73:
424 case CPU_ARCH_CORTEX_A75:
425 kind = PCPU_BP_HARDEN_KIND_BPIALL;
426 if (apply_bp_hardening(enable, kind, false, 0) != 0)
427 goto actlr_err;
428 break;
429
430 case CPU_ARCH_CORTEX_A15:
431 /*
432 * For Cortex-A15, set 'Enable invalidates of BTB' bit.
433 * Despite this, the BPIALL is still effectively NOP,
434 * but with this bit set, the ICIALLU also flushes
435 * branch predictor as side effect.
436 */
437 kind = PCPU_BP_HARDEN_KIND_ICIALLU;
438 if (apply_bp_hardening(enable, kind, true, 1 << 0) != 0)
439 goto actlr_err;
440 break;
441
442 default:
443 break;
444 }
445 } else if (cpuinfo.implementer == CPU_IMPLEMENTER_QCOM) {
446 printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative "
447 "branch attacks. !!!\n"
448 "Qualcomm Krait cores are known (or believed) to be "
449 "vulnerable to \n"
450 "speculative branch attacks, no mitigation exists yet.\n",
451 PCPU_GET(cpuid));
452 goto unkonown_mitigation;
453 } else {
454 goto unkonown_mitigation;
455 }
456
457 if (bootverbose) {
458 switch (kind) {
459 case PCPU_BP_HARDEN_KIND_NONE:
460 kind_str = "not necessary";
461 break;
462 case PCPU_BP_HARDEN_KIND_BPIALL:
463 kind_str = "BPIALL";
464 break;
465 case PCPU_BP_HARDEN_KIND_ICIALLU:
466 kind_str = "ICIALLU";
467 break;
468 default:
469 panic("Unknown BP hardering kind (%d).", kind);
470 }
471 printf("CPU(%d) applied BP hardening: %s\n", PCPU_GET(cpuid),
472 kind_str);
473 }
474
475 return;
476
477 unkonown_mitigation:
478 PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
479 spectre_v2_safe = 0;
480 return;
481
482 actlr_err:
483 PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
484 spectre_v2_safe = 0;
485 printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative branch "
486 "attacks. !!!\n"
487 "We cannot enable required bit(s) in ACTRL register\n"
488 "because it's locked by secure monitor and/or firmware.\n",
489 PCPU_GET(cpuid));
490 }
491
492 void
cpuinfo_init_bp_hardening(void)493 cpuinfo_init_bp_hardening(void)
494 {
495
496 /*
497 * Store original unmodified ACTRL, so we can restore it when
498 * BP hardening is disabled by sysctl.
499 */
500 PCPU_SET(original_actlr, cp15_actlr_get());
501 handle_bp_hardening(true);
502 }
503
504 static void
bp_hardening_action(void * arg)505 bp_hardening_action(void *arg)
506 {
507
508 handle_bp_hardening(disable_bp_hardening == 0);
509 }
510
511 static int
sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS)512 sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS)
513 {
514 int rv;
515
516 rv = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
517
518 if (!rv && req->newptr) {
519 spectre_v2_safe = 1;
520 dmb();
521 #ifdef SMP
522 smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier,
523 bp_hardening_action, NULL, NULL);
524 #else
525 bp_hardening_action(NULL);
526 #endif
527 }
528
529 return (rv);
530 }
531
532 SYSCTL_PROC(_machdep, OID_AUTO, disable_bp_hardening,
533 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
534 &disable_bp_hardening, 0, sysctl_disable_bp_hardening, "I",
535 "Disable BP hardening mitigation.");
536
537 SYSCTL_INT(_machdep, OID_AUTO, spectre_v2_safe, CTLFLAG_RD,
538 &spectre_v2_safe, 0, "System is safe to Spectre Version 2 attacks");
539