xref: /linux/arch/arm64/kernel/proton-pack.c (revision 24172e0d79900908cf5ebf366600616d29c9b417)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
4  * detailed at:
5  *
6  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
7  *
8  * This code was originally written hastily under an awful lot of stress and so
9  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10  * instantly makes me feel ill. Thanks, Jann. Thann.
11  *
12  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13  * Copyright (C) 2020 Google LLC
14  *
15  * "If there's something strange in your neighbourhood, who you gonna call?"
16  *
17  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
18  */
19 
20 #include <linux/arm-smccc.h>
21 #include <linux/bpf.h>
22 #include <linux/cpu.h>
23 #include <linux/device.h>
24 #include <linux/nospec.h>
25 #include <linux/prctl.h>
26 #include <linux/sched/task_stack.h>
27 
28 #include <asm/debug-monitors.h>
29 #include <asm/insn.h>
30 #include <asm/spectre.h>
31 #include <asm/traps.h>
32 #include <asm/vectors.h>
33 #include <asm/virt.h>
34 
35 /*
36  * We try to ensure that the mitigation state can never change as the result of
37  * onlining a late CPU.
38  */
update_mitigation_state(enum mitigation_state * oldp,enum mitigation_state new)39 static void update_mitigation_state(enum mitigation_state *oldp,
40 				    enum mitigation_state new)
41 {
42 	enum mitigation_state state;
43 
44 	do {
45 		state = READ_ONCE(*oldp);
46 		if (new <= state)
47 			break;
48 
49 		/* Userspace almost certainly can't deal with this. */
50 		if (WARN_ON(system_capabilities_finalized()))
51 			break;
52 	} while (cmpxchg_relaxed(oldp, state, new) != state);
53 }
54 
55 /*
56  * Spectre v1.
57  *
58  * The kernel can't protect userspace for this one: it's each person for
59  * themselves. Advertise what we're doing and be done with it.
60  */
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)61 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
62 			    char *buf)
63 {
64 	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
65 }
66 
67 /*
68  * Spectre v2.
69  *
70  * This one sucks. A CPU is either:
71  *
72  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
73  * - Mitigated in hardware and listed in our "safe list".
74  * - Mitigated in software by firmware.
75  * - Mitigated in software by a CPU-specific dance in the kernel and a
76  *   firmware call at EL2.
77  * - Vulnerable.
78  *
79  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
80  * different camps.
81  */
82 static enum mitigation_state spectre_v2_state;
83 
84 static bool __read_mostly __nospectre_v2;
parse_spectre_v2_param(char * str)85 static int __init parse_spectre_v2_param(char *str)
86 {
87 	__nospectre_v2 = true;
88 	return 0;
89 }
90 early_param("nospectre_v2", parse_spectre_v2_param);
91 
spectre_v2_mitigations_off(void)92 static bool spectre_v2_mitigations_off(void)
93 {
94 	return __nospectre_v2 || cpu_mitigations_off();
95 }
96 
get_bhb_affected_string(enum mitigation_state bhb_state)97 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
98 {
99 	switch (bhb_state) {
100 	case SPECTRE_UNAFFECTED:
101 		return "";
102 	default:
103 	case SPECTRE_VULNERABLE:
104 		return ", but not BHB";
105 	case SPECTRE_MITIGATED:
106 		return ", BHB";
107 	}
108 }
109 
_unprivileged_ebpf_enabled(void)110 static bool _unprivileged_ebpf_enabled(void)
111 {
112 #ifdef CONFIG_BPF_SYSCALL
113 	return !sysctl_unprivileged_bpf_disabled;
114 #else
115 	return false;
116 #endif
117 }
118 
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)119 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
120 			    char *buf)
121 {
122 	enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
123 	const char *bhb_str = get_bhb_affected_string(bhb_state);
124 	const char *v2_str = "Branch predictor hardening";
125 
126 	switch (spectre_v2_state) {
127 	case SPECTRE_UNAFFECTED:
128 		if (bhb_state == SPECTRE_UNAFFECTED)
129 			return sprintf(buf, "Not affected\n");
130 
131 		/*
132 		 * Platforms affected by Spectre-BHB can't report
133 		 * "Not affected" for Spectre-v2.
134 		 */
135 		v2_str = "CSV2";
136 		fallthrough;
137 	case SPECTRE_MITIGATED:
138 		if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
139 			return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
140 
141 		return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
142 	case SPECTRE_VULNERABLE:
143 		fallthrough;
144 	default:
145 		return sprintf(buf, "Vulnerable\n");
146 	}
147 }
148 
spectre_v2_get_cpu_hw_mitigation_state(void)149 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
150 {
151 	u64 pfr0;
152 	static const struct midr_range spectre_v2_safe_list[] = {
153 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
154 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
155 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
156 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
157 		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
158 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
159 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
160 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
161 		{ /* sentinel */ }
162 	};
163 
164 	/* If the CPU has CSV2 set, we're safe */
165 	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
166 	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
167 		return SPECTRE_UNAFFECTED;
168 
169 	/* Alternatively, we have a list of unaffected CPUs */
170 	if (is_midr_in_range_list(spectre_v2_safe_list))
171 		return SPECTRE_UNAFFECTED;
172 
173 	return SPECTRE_VULNERABLE;
174 }
175 
spectre_v2_get_cpu_fw_mitigation_state(void)176 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
177 {
178 	int ret;
179 	struct arm_smccc_res res;
180 
181 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
182 			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
183 
184 	ret = res.a0;
185 	switch (ret) {
186 	case SMCCC_RET_SUCCESS:
187 		return SPECTRE_MITIGATED;
188 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
189 		return SPECTRE_UNAFFECTED;
190 	default:
191 		fallthrough;
192 	case SMCCC_RET_NOT_SUPPORTED:
193 		return SPECTRE_VULNERABLE;
194 	}
195 }
196 
has_spectre_v2(const struct arm64_cpu_capabilities * entry,int scope)197 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
198 {
199 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
200 
201 	if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
202 		return false;
203 
204 	if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
205 		return false;
206 
207 	return true;
208 }
209 
arm64_get_spectre_v2_state(void)210 enum mitigation_state arm64_get_spectre_v2_state(void)
211 {
212 	return spectre_v2_state;
213 }
214 
215 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
216 
install_bp_hardening_cb(bp_hardening_cb_t fn)217 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
218 {
219 	__this_cpu_write(bp_hardening_data.fn, fn);
220 
221 	/*
222 	 * Vinz Clortho takes the hyp_vecs start/end "keys" at
223 	 * the door when we're a guest. Skip the hyp-vectors work.
224 	 */
225 	if (!is_hyp_mode_available())
226 		return;
227 
228 	__this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
229 }
230 
231 /* Called during entry so must be noinstr */
call_smc_arch_workaround_1(void)232 static noinstr void call_smc_arch_workaround_1(void)
233 {
234 	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
235 }
236 
237 /* Called during entry so must be noinstr */
call_hvc_arch_workaround_1(void)238 static noinstr void call_hvc_arch_workaround_1(void)
239 {
240 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
241 }
242 
243 /* Called during entry so must be noinstr */
qcom_link_stack_sanitisation(void)244 static noinstr void qcom_link_stack_sanitisation(void)
245 {
246 	u64 tmp;
247 
248 	asm volatile("mov	%0, x30		\n"
249 		     ".rept	16		\n"
250 		     "bl	. + 4		\n"
251 		     ".endr			\n"
252 		     "mov	x30, %0		\n"
253 		     : "=&r" (tmp));
254 }
255 
spectre_v2_get_sw_mitigation_cb(void)256 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
257 {
258 	u32 midr = read_cpuid_id();
259 	if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
260 	    ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
261 		return NULL;
262 
263 	return qcom_link_stack_sanitisation;
264 }
265 
spectre_v2_enable_fw_mitigation(void)266 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
267 {
268 	bp_hardening_cb_t cb;
269 	enum mitigation_state state;
270 
271 	state = spectre_v2_get_cpu_fw_mitigation_state();
272 	if (state != SPECTRE_MITIGATED)
273 		return state;
274 
275 	if (spectre_v2_mitigations_off())
276 		return SPECTRE_VULNERABLE;
277 
278 	switch (arm_smccc_1_1_get_conduit()) {
279 	case SMCCC_CONDUIT_HVC:
280 		cb = call_hvc_arch_workaround_1;
281 		break;
282 
283 	case SMCCC_CONDUIT_SMC:
284 		cb = call_smc_arch_workaround_1;
285 		break;
286 
287 	default:
288 		return SPECTRE_VULNERABLE;
289 	}
290 
291 	/*
292 	 * Prefer a CPU-specific workaround if it exists. Note that we
293 	 * still rely on firmware for the mitigation at EL2.
294 	 */
295 	cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
296 	install_bp_hardening_cb(cb);
297 	return SPECTRE_MITIGATED;
298 }
299 
spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities * __unused)300 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
301 {
302 	enum mitigation_state state;
303 
304 	WARN_ON(preemptible());
305 
306 	state = spectre_v2_get_cpu_hw_mitigation_state();
307 	if (state == SPECTRE_VULNERABLE)
308 		state = spectre_v2_enable_fw_mitigation();
309 
310 	update_mitigation_state(&spectre_v2_state, state);
311 }
312 
313 /*
314  * Spectre-v3a.
315  *
316  * Phew, there's not an awful lot to do here! We just instruct EL2 to use
317  * an indirect trampoline for the hyp vectors so that guests can't read
318  * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
319  */
has_spectre_v3a(const struct arm64_cpu_capabilities * entry,int scope)320 bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
321 {
322 	static const struct midr_range spectre_v3a_unsafe_list[] = {
323 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
324 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
325 		{},
326 	};
327 
328 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
329 	return is_midr_in_range_list(spectre_v3a_unsafe_list);
330 }
331 
spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities * __unused)332 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
333 {
334 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
335 
336 	if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
337 		data->slot += HYP_VECTOR_INDIRECT;
338 }
339 
340 /*
341  * Spectre v4.
342  *
343  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
344  * either:
345  *
346  * - Mitigated in hardware and listed in our "safe list".
347  * - Mitigated in hardware via PSTATE.SSBS.
348  * - Mitigated in software by firmware (sometimes referred to as SSBD).
349  *
350  * Wait, that doesn't sound so bad, does it? Keep reading...
351  *
352  * A major source of headaches is that the software mitigation is enabled both
353  * on a per-task basis, but can also be forced on for the kernel, necessitating
354  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
355  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
356  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
357  * so you can have systems that have both firmware and SSBS mitigations. This
358  * means we actually have to reject late onlining of CPUs with mitigations if
359  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
360  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
361  *
362  * The only good part is that if the firmware mitigation is present, then it is
363  * present for all CPUs, meaning we don't have to worry about late onlining of a
364  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
365  *
366  * Give me a VAX-11/780 any day of the week...
367  */
368 static enum mitigation_state spectre_v4_state;
369 
370 /* This is the per-cpu state tracking whether we need to talk to firmware */
371 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
372 
373 enum spectre_v4_policy {
374 	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
375 	SPECTRE_V4_POLICY_MITIGATION_ENABLED,
376 	SPECTRE_V4_POLICY_MITIGATION_DISABLED,
377 };
378 
379 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
380 
381 static const struct spectre_v4_param {
382 	const char		*str;
383 	enum spectre_v4_policy	policy;
384 } spectre_v4_params[] = {
385 	{ "force-on",	SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
386 	{ "force-off",	SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
387 	{ "kernel",	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
388 };
parse_spectre_v4_param(char * str)389 static int __init parse_spectre_v4_param(char *str)
390 {
391 	int i;
392 
393 	if (!str || !str[0])
394 		return -EINVAL;
395 
396 	for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
397 		const struct spectre_v4_param *param = &spectre_v4_params[i];
398 
399 		if (strncmp(str, param->str, strlen(param->str)))
400 			continue;
401 
402 		__spectre_v4_policy = param->policy;
403 		return 0;
404 	}
405 
406 	return -EINVAL;
407 }
408 early_param("ssbd", parse_spectre_v4_param);
409 
410 /*
411  * Because this was all written in a rush by people working in different silos,
412  * we've ended up with multiple command line options to control the same thing.
413  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
414  * with contradictory parameters. The mitigation is always either "off",
415  * "dynamic" or "on".
416  */
spectre_v4_mitigations_off(void)417 static bool spectre_v4_mitigations_off(void)
418 {
419 	return cpu_mitigations_off() ||
420 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
421 }
422 
423 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
spectre_v4_mitigations_dynamic(void)424 static bool spectre_v4_mitigations_dynamic(void)
425 {
426 	return !spectre_v4_mitigations_off() &&
427 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
428 }
429 
spectre_v4_mitigations_on(void)430 static bool spectre_v4_mitigations_on(void)
431 {
432 	return !spectre_v4_mitigations_off() &&
433 	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
434 }
435 
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)436 ssize_t cpu_show_spec_store_bypass(struct device *dev,
437 				   struct device_attribute *attr, char *buf)
438 {
439 	switch (spectre_v4_state) {
440 	case SPECTRE_UNAFFECTED:
441 		return sprintf(buf, "Not affected\n");
442 	case SPECTRE_MITIGATED:
443 		return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
444 	case SPECTRE_VULNERABLE:
445 		fallthrough;
446 	default:
447 		return sprintf(buf, "Vulnerable\n");
448 	}
449 }
450 
arm64_get_spectre_v4_state(void)451 enum mitigation_state arm64_get_spectre_v4_state(void)
452 {
453 	return spectre_v4_state;
454 }
455 
spectre_v4_get_cpu_hw_mitigation_state(void)456 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
457 {
458 	static const struct midr_range spectre_v4_safe_list[] = {
459 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
460 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
461 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
462 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
463 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
464 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
465 		{ /* sentinel */ },
466 	};
467 
468 	if (is_midr_in_range_list(spectre_v4_safe_list))
469 		return SPECTRE_UNAFFECTED;
470 
471 	/* CPU features are detected first */
472 	if (this_cpu_has_cap(ARM64_SSBS))
473 		return SPECTRE_MITIGATED;
474 
475 	return SPECTRE_VULNERABLE;
476 }
477 
spectre_v4_get_cpu_fw_mitigation_state(void)478 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
479 {
480 	int ret;
481 	struct arm_smccc_res res;
482 
483 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
484 			     ARM_SMCCC_ARCH_WORKAROUND_2, &res);
485 
486 	ret = res.a0;
487 	switch (ret) {
488 	case SMCCC_RET_SUCCESS:
489 		return SPECTRE_MITIGATED;
490 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
491 		fallthrough;
492 	case SMCCC_RET_NOT_REQUIRED:
493 		return SPECTRE_UNAFFECTED;
494 	default:
495 		fallthrough;
496 	case SMCCC_RET_NOT_SUPPORTED:
497 		return SPECTRE_VULNERABLE;
498 	}
499 }
500 
has_spectre_v4(const struct arm64_cpu_capabilities * cap,int scope)501 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
502 {
503 	enum mitigation_state state;
504 
505 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
506 
507 	state = spectre_v4_get_cpu_hw_mitigation_state();
508 	if (state == SPECTRE_VULNERABLE)
509 		state = spectre_v4_get_cpu_fw_mitigation_state();
510 
511 	return state != SPECTRE_UNAFFECTED;
512 }
513 
try_emulate_el1_ssbs(struct pt_regs * regs,u32 instr)514 bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr)
515 {
516 	const u32 instr_mask = ~(1U << PSTATE_Imm_shift);
517 	const u32 instr_val = 0xd500401f | PSTATE_SSBS;
518 
519 	if ((instr & instr_mask) != instr_val)
520 		return false;
521 
522 	if (instr & BIT(PSTATE_Imm_shift))
523 		regs->pstate |= PSR_SSBS_BIT;
524 	else
525 		regs->pstate &= ~PSR_SSBS_BIT;
526 
527 	arm64_skip_faulting_instruction(regs, 4);
528 	return true;
529 }
530 
spectre_v4_enable_hw_mitigation(void)531 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
532 {
533 	enum mitigation_state state;
534 
535 	/*
536 	 * If the system is mitigated but this CPU doesn't have SSBS, then
537 	 * we must be on the safelist and there's nothing more to do.
538 	 */
539 	state = spectre_v4_get_cpu_hw_mitigation_state();
540 	if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
541 		return state;
542 
543 	if (spectre_v4_mitigations_off()) {
544 		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
545 		set_pstate_ssbs(1);
546 		return SPECTRE_VULNERABLE;
547 	}
548 
549 	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
550 	set_pstate_ssbs(0);
551 
552 	/*
553 	 * SSBS is self-synchronizing and is intended to affect subsequent
554 	 * speculative instructions, but some CPUs can speculate with a stale
555 	 * value of SSBS.
556 	 *
557 	 * Mitigate this with an unconditional speculation barrier, as CPUs
558 	 * could mis-speculate branches and bypass a conditional barrier.
559 	 */
560 	if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386))
561 		spec_bar();
562 
563 	return SPECTRE_MITIGATED;
564 }
565 
566 /*
567  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
568  * we fallthrough and check whether firmware needs to be called on this CPU.
569  */
spectre_v4_patch_fw_mitigation_enable(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)570 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
571 						  __le32 *origptr,
572 						  __le32 *updptr, int nr_inst)
573 {
574 	BUG_ON(nr_inst != 1); /* Branch -> NOP */
575 
576 	if (spectre_v4_mitigations_off())
577 		return;
578 
579 	if (cpus_have_cap(ARM64_SSBS))
580 		return;
581 
582 	if (spectre_v4_mitigations_dynamic())
583 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
584 }
585 
586 /*
587  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
588  * to call into firmware to adjust the mitigation state.
589  */
smccc_patch_fw_mitigation_conduit(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)590 void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
591 					       __le32 *origptr,
592 					       __le32 *updptr, int nr_inst)
593 {
594 	u32 insn;
595 
596 	BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
597 
598 	switch (arm_smccc_1_1_get_conduit()) {
599 	case SMCCC_CONDUIT_HVC:
600 		insn = aarch64_insn_get_hvc_value();
601 		break;
602 	case SMCCC_CONDUIT_SMC:
603 		insn = aarch64_insn_get_smc_value();
604 		break;
605 	default:
606 		return;
607 	}
608 
609 	*updptr = cpu_to_le32(insn);
610 }
611 
spectre_v4_enable_fw_mitigation(void)612 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
613 {
614 	enum mitigation_state state;
615 
616 	state = spectre_v4_get_cpu_fw_mitigation_state();
617 	if (state != SPECTRE_MITIGATED)
618 		return state;
619 
620 	if (spectre_v4_mitigations_off()) {
621 		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
622 		return SPECTRE_VULNERABLE;
623 	}
624 
625 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
626 
627 	if (spectre_v4_mitigations_dynamic())
628 		__this_cpu_write(arm64_ssbd_callback_required, 1);
629 
630 	return SPECTRE_MITIGATED;
631 }
632 
spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities * __unused)633 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
634 {
635 	enum mitigation_state state;
636 
637 	WARN_ON(preemptible());
638 
639 	state = spectre_v4_enable_hw_mitigation();
640 	if (state == SPECTRE_VULNERABLE)
641 		state = spectre_v4_enable_fw_mitigation();
642 
643 	update_mitigation_state(&spectre_v4_state, state);
644 }
645 
__update_pstate_ssbs(struct pt_regs * regs,bool state)646 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
647 {
648 	u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
649 
650 	if (state)
651 		regs->pstate |= bit;
652 	else
653 		regs->pstate &= ~bit;
654 }
655 
spectre_v4_enable_task_mitigation(struct task_struct * tsk)656 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
657 {
658 	struct pt_regs *regs = task_pt_regs(tsk);
659 	bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
660 
661 	if (spectre_v4_mitigations_off())
662 		ssbs = true;
663 	else if (spectre_v4_mitigations_dynamic() && !kthread)
664 		ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
665 
666 	__update_pstate_ssbs(regs, ssbs);
667 }
668 
669 /*
670  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
671  * This is interesting because the "speculation disabled" behaviour can be
672  * configured so that it is preserved across exec(), which means that the
673  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
674  * from userspace.
675  */
ssbd_prctl_enable_mitigation(struct task_struct * task)676 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
677 {
678 	task_clear_spec_ssb_noexec(task);
679 	task_set_spec_ssb_disable(task);
680 	set_tsk_thread_flag(task, TIF_SSBD);
681 }
682 
ssbd_prctl_disable_mitigation(struct task_struct * task)683 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
684 {
685 	task_clear_spec_ssb_noexec(task);
686 	task_clear_spec_ssb_disable(task);
687 	clear_tsk_thread_flag(task, TIF_SSBD);
688 }
689 
ssbd_prctl_set(struct task_struct * task,unsigned long ctrl)690 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
691 {
692 	switch (ctrl) {
693 	case PR_SPEC_ENABLE:
694 		/* Enable speculation: disable mitigation */
695 		/*
696 		 * Force disabled speculation prevents it from being
697 		 * re-enabled.
698 		 */
699 		if (task_spec_ssb_force_disable(task))
700 			return -EPERM;
701 
702 		/*
703 		 * If the mitigation is forced on, then speculation is forced
704 		 * off and we again prevent it from being re-enabled.
705 		 */
706 		if (spectre_v4_mitigations_on())
707 			return -EPERM;
708 
709 		ssbd_prctl_disable_mitigation(task);
710 		break;
711 	case PR_SPEC_FORCE_DISABLE:
712 		/* Force disable speculation: force enable mitigation */
713 		/*
714 		 * If the mitigation is forced off, then speculation is forced
715 		 * on and we prevent it from being disabled.
716 		 */
717 		if (spectre_v4_mitigations_off())
718 			return -EPERM;
719 
720 		task_set_spec_ssb_force_disable(task);
721 		fallthrough;
722 	case PR_SPEC_DISABLE:
723 		/* Disable speculation: enable mitigation */
724 		/* Same as PR_SPEC_FORCE_DISABLE */
725 		if (spectre_v4_mitigations_off())
726 			return -EPERM;
727 
728 		ssbd_prctl_enable_mitigation(task);
729 		break;
730 	case PR_SPEC_DISABLE_NOEXEC:
731 		/* Disable speculation until execve(): enable mitigation */
732 		/*
733 		 * If the mitigation state is forced one way or the other, then
734 		 * we must fail now before we try to toggle it on execve().
735 		 */
736 		if (task_spec_ssb_force_disable(task) ||
737 		    spectre_v4_mitigations_off() ||
738 		    spectre_v4_mitigations_on()) {
739 			return -EPERM;
740 		}
741 
742 		ssbd_prctl_enable_mitigation(task);
743 		task_set_spec_ssb_noexec(task);
744 		break;
745 	default:
746 		return -ERANGE;
747 	}
748 
749 	spectre_v4_enable_task_mitigation(task);
750 	return 0;
751 }
752 
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)753 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
754 			     unsigned long ctrl)
755 {
756 	switch (which) {
757 	case PR_SPEC_STORE_BYPASS:
758 		return ssbd_prctl_set(task, ctrl);
759 	default:
760 		return -ENODEV;
761 	}
762 }
763 
ssbd_prctl_get(struct task_struct * task)764 static int ssbd_prctl_get(struct task_struct *task)
765 {
766 	switch (spectre_v4_state) {
767 	case SPECTRE_UNAFFECTED:
768 		return PR_SPEC_NOT_AFFECTED;
769 	case SPECTRE_MITIGATED:
770 		if (spectre_v4_mitigations_on())
771 			return PR_SPEC_NOT_AFFECTED;
772 
773 		if (spectre_v4_mitigations_dynamic())
774 			break;
775 
776 		/* Mitigations are disabled, so we're vulnerable. */
777 		fallthrough;
778 	case SPECTRE_VULNERABLE:
779 		fallthrough;
780 	default:
781 		return PR_SPEC_ENABLE;
782 	}
783 
784 	/* Check the mitigation state for this task */
785 	if (task_spec_ssb_force_disable(task))
786 		return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
787 
788 	if (task_spec_ssb_noexec(task))
789 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
790 
791 	if (task_spec_ssb_disable(task))
792 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
793 
794 	return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
795 }
796 
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)797 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
798 {
799 	switch (which) {
800 	case PR_SPEC_STORE_BYPASS:
801 		return ssbd_prctl_get(task);
802 	default:
803 		return -ENODEV;
804 	}
805 }
806 
807 /*
808  * Spectre BHB.
809  *
810  * A CPU is either:
811  * - Mitigated by a branchy loop a CPU specific number of times, and listed
812  *   in our "loop mitigated list".
813  * - Mitigated in software by the firmware Spectre v2 call.
814  * - Has the ClearBHB instruction to perform the mitigation.
815  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
816  *   software mitigation in the vectors is needed.
817  * - Has CSV2.3, so is unaffected.
818  */
819 static enum mitigation_state spectre_bhb_state;
820 
arm64_get_spectre_bhb_state(void)821 enum mitigation_state arm64_get_spectre_bhb_state(void)
822 {
823 	return spectre_bhb_state;
824 }
825 
826 enum bhb_mitigation_bits {
827 	BHB_LOOP,
828 	BHB_FW,
829 	BHB_HW,
830 	BHB_INSN,
831 };
832 static unsigned long system_bhb_mitigations;
833 
834 /*
835  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
836  * SCOPE_SYSTEM call will give the right answer.
837  */
is_spectre_bhb_safe(int scope)838 static bool is_spectre_bhb_safe(int scope)
839 {
840 	static const struct midr_range spectre_bhb_safe_list[] = {
841 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
842 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
843 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
844 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
845 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
846 		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
847 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
848 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
849 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
850 		{},
851 	};
852 	static bool all_safe = true;
853 
854 	if (scope != SCOPE_LOCAL_CPU)
855 		return all_safe;
856 
857 	if (is_midr_in_range_list(spectre_bhb_safe_list))
858 		return true;
859 
860 	all_safe = false;
861 
862 	return false;
863 }
864 
spectre_bhb_loop_affected(void)865 static u8 spectre_bhb_loop_affected(void)
866 {
867 	u8 k = 0;
868 
869 	static const struct midr_range spectre_bhb_k132_list[] = {
870 		MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
871 		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
872 		{},
873 	};
874 	static const struct midr_range spectre_bhb_k38_list[] = {
875 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
876 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
877 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A720AE),
878 		{},
879 	};
880 	static const struct midr_range spectre_bhb_k32_list[] = {
881 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
882 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
883 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
884 		MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
885 		MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
886 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
887 		MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
888 		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
889 		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
890 		{},
891 	};
892 	static const struct midr_range spectre_bhb_k24_list[] = {
893 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
894 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE),
895 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
896 		MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
897 		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
898 		MIDR_ALL_VERSIONS(MIDR_HISI_HIP09),
899 		{},
900 	};
901 	static const struct midr_range spectre_bhb_k11_list[] = {
902 		MIDR_ALL_VERSIONS(MIDR_AMPERE1),
903 		{},
904 	};
905 	static const struct midr_range spectre_bhb_k8_list[] = {
906 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
907 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
908 		{},
909 	};
910 
911 	if (is_midr_in_range_list(spectre_bhb_k132_list))
912 		k = 132;
913 	else if (is_midr_in_range_list(spectre_bhb_k38_list))
914 		k = 38;
915 	else if (is_midr_in_range_list(spectre_bhb_k32_list))
916 		k = 32;
917 	else if (is_midr_in_range_list(spectre_bhb_k24_list))
918 		k = 24;
919 	else if (is_midr_in_range_list(spectre_bhb_k11_list))
920 		k = 11;
921 	else if (is_midr_in_range_list(spectre_bhb_k8_list))
922 		k =  8;
923 
924 	return k;
925 }
926 
spectre_bhb_get_cpu_fw_mitigation_state(void)927 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
928 {
929 	int ret;
930 	struct arm_smccc_res res;
931 
932 	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
933 			     ARM_SMCCC_ARCH_WORKAROUND_3, &res);
934 
935 	ret = res.a0;
936 	switch (ret) {
937 	case SMCCC_RET_SUCCESS:
938 		return SPECTRE_MITIGATED;
939 	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
940 		return SPECTRE_UNAFFECTED;
941 	default:
942 		fallthrough;
943 	case SMCCC_RET_NOT_SUPPORTED:
944 		return SPECTRE_VULNERABLE;
945 	}
946 }
947 
has_spectre_bhb_fw_mitigation(void)948 static bool has_spectre_bhb_fw_mitigation(void)
949 {
950 	enum mitigation_state fw_state;
951 	bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
952 
953 	fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
954 	return has_smccc && fw_state == SPECTRE_MITIGATED;
955 }
956 
supports_ecbhb(int scope)957 static bool supports_ecbhb(int scope)
958 {
959 	u64 mmfr1;
960 
961 	if (scope == SCOPE_LOCAL_CPU)
962 		mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
963 	else
964 		mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
965 
966 	return cpuid_feature_extract_unsigned_field(mmfr1,
967 						    ID_AA64MMFR1_EL1_ECBHB_SHIFT);
968 }
969 
970 static u8 max_bhb_k;
971 
is_spectre_bhb_affected(const struct arm64_cpu_capabilities * entry,int scope)972 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
973 			     int scope)
974 {
975 	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
976 
977 	if (supports_csv2p3(scope))
978 		return false;
979 
980 	if (is_spectre_bhb_safe(scope))
981 		return false;
982 
983 	/*
984 	 * At this point the core isn't known to be "safe" so we're going to
985 	 * assume it's vulnerable. We still need to update `max_bhb_k` though,
986 	 * but only if we aren't mitigating with clearbhb though.
987 	 */
988 	if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
989 		max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
990 
991 	return true;
992 }
993 
get_spectre_bhb_loop_value(void)994 u8 get_spectre_bhb_loop_value(void)
995 {
996 	return max_bhb_k;
997 }
998 
this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)999 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
1000 {
1001 	const char *v = arm64_get_bp_hardening_vector(slot);
1002 
1003 	__this_cpu_write(this_cpu_vector, v);
1004 
1005 	/*
1006 	 * When KPTI is in use, the vectors are switched when exiting to
1007 	 * user-space.
1008 	 */
1009 	if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
1010 		return;
1011 
1012 	write_sysreg(v, vbar_el1);
1013 	isb();
1014 }
1015 
1016 bool __read_mostly __nospectre_bhb;
parse_spectre_bhb_param(char * str)1017 static int __init parse_spectre_bhb_param(char *str)
1018 {
1019 	__nospectre_bhb = true;
1020 	return 0;
1021 }
1022 early_param("nospectre_bhb", parse_spectre_bhb_param);
1023 
spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities * entry)1024 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1025 {
1026 	bp_hardening_cb_t cpu_cb;
1027 	enum mitigation_state state = SPECTRE_VULNERABLE;
1028 	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
1029 
1030 	if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1031 		return;
1032 
1033 	if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
1034 		/* No point mitigating Spectre-BHB alone. */
1035 	} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1036 		state = SPECTRE_MITIGATED;
1037 		set_bit(BHB_HW, &system_bhb_mitigations);
1038 	} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1039 		/*
1040 		 * Ensure KVM uses the indirect vector which will have ClearBHB
1041 		 * added.
1042 		 */
1043 		if (!data->slot)
1044 			data->slot = HYP_VECTOR_INDIRECT;
1045 
1046 		this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1047 		state = SPECTRE_MITIGATED;
1048 		set_bit(BHB_INSN, &system_bhb_mitigations);
1049 	} else if (spectre_bhb_loop_affected()) {
1050 		/*
1051 		 * Ensure KVM uses the indirect vector which will have the
1052 		 * branchy-loop added. A57/A72-r0 will already have selected
1053 		 * the spectre-indirect vector, which is sufficient for BHB
1054 		 * too.
1055 		 */
1056 		if (!data->slot)
1057 			data->slot = HYP_VECTOR_INDIRECT;
1058 
1059 		this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1060 		state = SPECTRE_MITIGATED;
1061 		set_bit(BHB_LOOP, &system_bhb_mitigations);
1062 	} else if (has_spectre_bhb_fw_mitigation()) {
1063 		/*
1064 		 * Ensure KVM uses one of the spectre bp_hardening
1065 		 * vectors. The indirect vector doesn't include the EL3
1066 		 * call, so needs upgrading to
1067 		 * HYP_VECTOR_SPECTRE_INDIRECT.
1068 		 */
1069 		if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1070 			data->slot += 1;
1071 
1072 		this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1073 
1074 		/*
1075 		 * The WA3 call in the vectors supersedes the WA1 call
1076 		 * made during context-switch. Uninstall any firmware
1077 		 * bp_hardening callback.
1078 		 */
1079 		cpu_cb = spectre_v2_get_sw_mitigation_cb();
1080 		if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1081 			__this_cpu_write(bp_hardening_data.fn, NULL);
1082 
1083 		state = SPECTRE_MITIGATED;
1084 		set_bit(BHB_FW, &system_bhb_mitigations);
1085 	}
1086 
1087 	update_mitigation_state(&spectre_bhb_state, state);
1088 }
1089 
is_spectre_bhb_fw_mitigated(void)1090 bool is_spectre_bhb_fw_mitigated(void)
1091 {
1092 	return test_bit(BHB_FW, &system_bhb_mitigations);
1093 }
1094 
1095 /* Patched to NOP when enabled */
spectre_bhb_patch_loop_mitigation_enable(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1096 void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
1097 						     __le32 *origptr,
1098 						      __le32 *updptr, int nr_inst)
1099 {
1100 	BUG_ON(nr_inst != 1);
1101 
1102 	if (test_bit(BHB_LOOP, &system_bhb_mitigations))
1103 		*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1104 }
1105 
1106 /* Patched to NOP when enabled */
spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1107 void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
1108 						   __le32 *origptr,
1109 						   __le32 *updptr, int nr_inst)
1110 {
1111 	BUG_ON(nr_inst != 1);
1112 
1113 	if (test_bit(BHB_FW, &system_bhb_mitigations))
1114 		*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1115 }
1116 
1117 /* Patched to correct the immediate */
spectre_bhb_patch_loop_iter(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1118 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1119 				   __le32 *origptr, __le32 *updptr, int nr_inst)
1120 {
1121 	u8 rd;
1122 	u32 insn;
1123 
1124 	BUG_ON(nr_inst != 1); /* MOV -> MOV */
1125 
1126 	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1127 		return;
1128 
1129 	insn = le32_to_cpu(*origptr);
1130 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1131 	insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
1132 					 AARCH64_INSN_VARIANT_64BIT,
1133 					 AARCH64_INSN_MOVEWIDE_ZERO);
1134 	*updptr++ = cpu_to_le32(insn);
1135 }
1136 
1137 /* Patched to mov WA3 when supported */
spectre_bhb_patch_wa3(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1138 void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
1139 				   __le32 *origptr, __le32 *updptr, int nr_inst)
1140 {
1141 	u8 rd;
1142 	u32 insn;
1143 
1144 	BUG_ON(nr_inst != 1); /* MOV -> MOV */
1145 
1146 	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
1147 	    !test_bit(BHB_FW, &system_bhb_mitigations))
1148 		return;
1149 
1150 	insn = le32_to_cpu(*origptr);
1151 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1152 
1153 	insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
1154 						  AARCH64_INSN_VARIANT_32BIT,
1155 						  AARCH64_INSN_REG_ZR, rd,
1156 						  ARM_SMCCC_ARCH_WORKAROUND_3);
1157 	if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
1158 		return;
1159 
1160 	*updptr++ = cpu_to_le32(insn);
1161 }
1162 
1163 /* Patched to NOP when not supported */
spectre_bhb_patch_clearbhb(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)1164 void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
1165 				   __le32 *origptr, __le32 *updptr, int nr_inst)
1166 {
1167 	BUG_ON(nr_inst != 2);
1168 
1169 	if (test_bit(BHB_INSN, &system_bhb_mitigations))
1170 		return;
1171 
1172 	*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1173 	*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1174 }
1175 
1176 #ifdef CONFIG_BPF_SYSCALL
1177 #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
unpriv_ebpf_notify(int new_state)1178 void unpriv_ebpf_notify(int new_state)
1179 {
1180 	if (spectre_v2_state == SPECTRE_VULNERABLE ||
1181 	    spectre_bhb_state != SPECTRE_MITIGATED)
1182 		return;
1183 
1184 	if (!new_state)
1185 		pr_err("WARNING: %s", EBPF_WARN);
1186 }
1187 #endif
1188 
spectre_print_disabled_mitigations(void)1189 void spectre_print_disabled_mitigations(void)
1190 {
1191 	/* Keep a single copy of the common message suffix to avoid duplication. */
1192 	const char *spectre_disabled_suffix = "mitigation disabled by command-line option\n";
1193 
1194 	if (spectre_v2_mitigations_off())
1195 		pr_info("spectre-v2 %s", spectre_disabled_suffix);
1196 
1197 	if (spectre_v4_mitigations_off())
1198 		pr_info("spectre-v4 %s", spectre_disabled_suffix);
1199 
1200 	if (__nospectre_bhb || cpu_mitigations_off())
1201 		pr_info("spectre-bhb %s", spectre_disabled_suffix);
1202 }
1203