xref: /linux/arch/arm64/kernel/entry-common.c (revision 6fb44438a5e1897a72dd11139274735256be8069)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/kasan.h>
10 #include <linux/linkage.h>
11 #include <linux/livepatch.h>
12 #include <linux/lockdep.h>
13 #include <linux/ptrace.h>
14 #include <linux/resume_user_mode.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/thread_info.h>
18 
19 #include <asm/cpufeature.h>
20 #include <asm/daifflags.h>
21 #include <asm/esr.h>
22 #include <asm/exception.h>
23 #include <asm/irq_regs.h>
24 #include <asm/kprobes.h>
25 #include <asm/mmu.h>
26 #include <asm/processor.h>
27 #include <asm/sdei.h>
28 #include <asm/stacktrace.h>
29 #include <asm/sysreg.h>
30 #include <asm/system_misc.h>
31 
32 /*
33  * Handle IRQ/context state management when entering from kernel mode.
34  * Before this function is called it is not safe to call regular kernel code,
35  * instrumentable code, or any code which may trigger an exception.
36  *
37  * This is intended to match the logic in irqentry_enter(), handling the kernel
38  * mode transitions only.
39  */
__enter_from_kernel_mode(struct pt_regs * regs)40 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
41 {
42 	regs->exit_rcu = false;
43 
44 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
45 		lockdep_hardirqs_off(CALLER_ADDR0);
46 		ct_irq_enter();
47 		trace_hardirqs_off_finish();
48 
49 		regs->exit_rcu = true;
50 		return;
51 	}
52 
53 	lockdep_hardirqs_off(CALLER_ADDR0);
54 	rcu_irq_enter_check_tick();
55 	trace_hardirqs_off_finish();
56 }
57 
enter_from_kernel_mode(struct pt_regs * regs)58 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
59 {
60 	__enter_from_kernel_mode(regs);
61 	mte_check_tfsr_entry();
62 	mte_disable_tco_entry(current);
63 }
64 
65 /*
66  * Handle IRQ/context state management when exiting to kernel mode.
67  * After this function returns it is not safe to call regular kernel code,
68  * instrumentable code, or any code which may trigger an exception.
69  *
70  * This is intended to match the logic in irqentry_exit(), handling the kernel
71  * mode transitions only, and with preemption handled elsewhere.
72  */
__exit_to_kernel_mode(struct pt_regs * regs)73 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
74 {
75 	lockdep_assert_irqs_disabled();
76 
77 	if (interrupts_enabled(regs)) {
78 		if (regs->exit_rcu) {
79 			trace_hardirqs_on_prepare();
80 			lockdep_hardirqs_on_prepare();
81 			ct_irq_exit();
82 			lockdep_hardirqs_on(CALLER_ADDR0);
83 			return;
84 		}
85 
86 		trace_hardirqs_on();
87 	} else {
88 		if (regs->exit_rcu)
89 			ct_irq_exit();
90 	}
91 }
92 
exit_to_kernel_mode(struct pt_regs * regs)93 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
94 {
95 	mte_check_tfsr_exit();
96 	__exit_to_kernel_mode(regs);
97 }
98 
99 /*
100  * Handle IRQ/context state management when entering from user mode.
101  * Before this function is called it is not safe to call regular kernel code,
102  * instrumentable code, or any code which may trigger an exception.
103  */
__enter_from_user_mode(void)104 static __always_inline void __enter_from_user_mode(void)
105 {
106 	lockdep_hardirqs_off(CALLER_ADDR0);
107 	CT_WARN_ON(ct_state() != CT_STATE_USER);
108 	user_exit_irqoff();
109 	trace_hardirqs_off_finish();
110 	mte_disable_tco_entry(current);
111 }
112 
enter_from_user_mode(struct pt_regs * regs)113 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
114 {
115 	__enter_from_user_mode();
116 }
117 
118 /*
119  * Handle IRQ/context state management when exiting to user mode.
120  * After this function returns it is not safe to call regular kernel code,
121  * instrumentable code, or any code which may trigger an exception.
122  */
__exit_to_user_mode(void)123 static __always_inline void __exit_to_user_mode(void)
124 {
125 	trace_hardirqs_on_prepare();
126 	lockdep_hardirqs_on_prepare();
127 	user_enter_irqoff();
128 	lockdep_hardirqs_on(CALLER_ADDR0);
129 }
130 
do_notify_resume(struct pt_regs * regs,unsigned long thread_flags)131 static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
132 {
133 	do {
134 		local_irq_enable();
135 
136 		if (thread_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
137 			schedule();
138 
139 		if (thread_flags & _TIF_UPROBE)
140 			uprobe_notify_resume(regs);
141 
142 		if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
143 			clear_thread_flag(TIF_MTE_ASYNC_FAULT);
144 			send_sig_fault(SIGSEGV, SEGV_MTEAERR,
145 				       (void __user *)NULL, current);
146 		}
147 
148 		if (thread_flags & _TIF_PATCH_PENDING)
149 			klp_update_patch_state(current);
150 
151 		if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
152 			do_signal(regs);
153 
154 		if (thread_flags & _TIF_NOTIFY_RESUME)
155 			resume_user_mode_work(regs);
156 
157 		if (thread_flags & _TIF_FOREIGN_FPSTATE)
158 			fpsimd_restore_current_state();
159 
160 		local_irq_disable();
161 		thread_flags = read_thread_flags();
162 	} while (thread_flags & _TIF_WORK_MASK);
163 }
164 
exit_to_user_mode_prepare(struct pt_regs * regs)165 static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
166 {
167 	unsigned long flags;
168 
169 	local_irq_disable();
170 
171 	flags = read_thread_flags();
172 	if (unlikely(flags & _TIF_WORK_MASK))
173 		do_notify_resume(regs, flags);
174 
175 	local_daif_mask();
176 
177 	lockdep_sys_exit();
178 }
179 
exit_to_user_mode(struct pt_regs * regs)180 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
181 {
182 	exit_to_user_mode_prepare(regs);
183 	mte_check_tfsr_exit();
184 	__exit_to_user_mode();
185 }
186 
asm_exit_to_user_mode(struct pt_regs * regs)187 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
188 {
189 	exit_to_user_mode(regs);
190 }
191 
192 /*
193  * Handle IRQ/context state management when entering an NMI from user/kernel
194  * mode. Before this function is called it is not safe to call regular kernel
195  * code, instrumentable code, or any code which may trigger an exception.
196  */
arm64_enter_nmi(struct pt_regs * regs)197 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
198 {
199 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
200 
201 	__nmi_enter();
202 	lockdep_hardirqs_off(CALLER_ADDR0);
203 	lockdep_hardirq_enter();
204 	ct_nmi_enter();
205 
206 	trace_hardirqs_off_finish();
207 	ftrace_nmi_enter();
208 }
209 
210 /*
211  * Handle IRQ/context state management when exiting an NMI from user/kernel
212  * mode. After this function returns it is not safe to call regular kernel
213  * code, instrumentable code, or any code which may trigger an exception.
214  */
arm64_exit_nmi(struct pt_regs * regs)215 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
216 {
217 	bool restore = regs->lockdep_hardirqs;
218 
219 	ftrace_nmi_exit();
220 	if (restore) {
221 		trace_hardirqs_on_prepare();
222 		lockdep_hardirqs_on_prepare();
223 	}
224 
225 	ct_nmi_exit();
226 	lockdep_hardirq_exit();
227 	if (restore)
228 		lockdep_hardirqs_on(CALLER_ADDR0);
229 	__nmi_exit();
230 }
231 
232 /*
233  * Handle IRQ/context state management when entering a debug exception from
234  * kernel mode. Before this function is called it is not safe to call regular
235  * kernel code, instrumentable code, or any code which may trigger an exception.
236  */
arm64_enter_el1_dbg(struct pt_regs * regs)237 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
238 {
239 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
240 
241 	lockdep_hardirqs_off(CALLER_ADDR0);
242 	ct_nmi_enter();
243 
244 	trace_hardirqs_off_finish();
245 }
246 
247 /*
248  * Handle IRQ/context state management when exiting a debug exception from
249  * kernel mode. After this function returns it is not safe to call regular
250  * kernel code, instrumentable code, or any code which may trigger an exception.
251  */
arm64_exit_el1_dbg(struct pt_regs * regs)252 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
253 {
254 	bool restore = regs->lockdep_hardirqs;
255 
256 	if (restore) {
257 		trace_hardirqs_on_prepare();
258 		lockdep_hardirqs_on_prepare();
259 	}
260 
261 	ct_nmi_exit();
262 	if (restore)
263 		lockdep_hardirqs_on(CALLER_ADDR0);
264 }
265 
266 #ifdef CONFIG_PREEMPT_DYNAMIC
267 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
268 #define need_irq_preemption() \
269 	(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
270 #else
271 #define need_irq_preemption()	(IS_ENABLED(CONFIG_PREEMPTION))
272 #endif
273 
arm64_preempt_schedule_irq(void)274 static void __sched arm64_preempt_schedule_irq(void)
275 {
276 	if (!need_irq_preemption())
277 		return;
278 
279 	/*
280 	 * Note: thread_info::preempt_count includes both thread_info::count
281 	 * and thread_info::need_resched, and is not equivalent to
282 	 * preempt_count().
283 	 */
284 	if (READ_ONCE(current_thread_info()->preempt_count) != 0)
285 		return;
286 
287 	/*
288 	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
289 	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
290 	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
291 	 * DAIF we must have handled an NMI, so skip preemption.
292 	 */
293 	if (system_uses_irq_prio_masking() && read_sysreg(daif))
294 		return;
295 
296 	/*
297 	 * Preempting a task from an IRQ means we leave copies of PSTATE
298 	 * on the stack. cpufeature's enable calls may modify PSTATE, but
299 	 * resuming one of these preempted tasks would undo those changes.
300 	 *
301 	 * Only allow a task to be preempted once cpufeatures have been
302 	 * enabled.
303 	 */
304 	if (system_capabilities_finalized())
305 		preempt_schedule_irq();
306 }
307 
do_interrupt_handler(struct pt_regs * regs,void (* handler)(struct pt_regs *))308 static void do_interrupt_handler(struct pt_regs *regs,
309 				 void (*handler)(struct pt_regs *))
310 {
311 	struct pt_regs *old_regs = set_irq_regs(regs);
312 
313 	if (on_thread_stack())
314 		call_on_irq_stack(regs, handler);
315 	else
316 		handler(regs);
317 
318 	set_irq_regs(old_regs);
319 }
320 
321 extern void (*handle_arch_irq)(struct pt_regs *);
322 extern void (*handle_arch_fiq)(struct pt_regs *);
323 
__panic_unhandled(struct pt_regs * regs,const char * vector,unsigned long esr)324 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
325 				      unsigned long esr)
326 {
327 	arm64_enter_nmi(regs);
328 
329 	console_verbose();
330 
331 	pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
332 		vector, smp_processor_id(), esr,
333 		esr_get_class_string(esr));
334 
335 	__show_regs(regs);
336 	panic("Unhandled exception");
337 }
338 
339 #define UNHANDLED(el, regsize, vector)							\
340 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)	\
341 {											\
342 	const char *desc = #regsize "-bit " #el " " #vector;				\
343 	__panic_unhandled(regs, desc, read_sysreg(esr_el1));				\
344 }
345 
346 #ifdef CONFIG_ARM64_ERRATUM_1463225
347 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
348 
cortex_a76_erratum_1463225_svc_handler(void)349 static void cortex_a76_erratum_1463225_svc_handler(void)
350 {
351 	u64 reg, val;
352 
353 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
354 		return;
355 
356 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
357 		return;
358 
359 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
360 	reg = read_sysreg(mdscr_el1);
361 	val = reg | MDSCR_EL1_SS | MDSCR_EL1_KDE;
362 	write_sysreg(val, mdscr_el1);
363 	asm volatile("msr daifclr, #8");
364 	isb();
365 
366 	/* We will have taken a single-step exception by this point */
367 
368 	write_sysreg(reg, mdscr_el1);
369 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
370 }
371 
372 static __always_inline bool
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)373 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
374 {
375 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
376 		return false;
377 
378 	/*
379 	 * We've taken a dummy step exception from the kernel to ensure
380 	 * that interrupts are re-enabled on the syscall path. Return back
381 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
382 	 * masked so that we can safely restore the mdscr and get on with
383 	 * handling the syscall.
384 	 */
385 	regs->pstate |= PSR_D_BIT;
386 	return true;
387 }
388 #else /* CONFIG_ARM64_ERRATUM_1463225 */
cortex_a76_erratum_1463225_svc_handler(void)389 static void cortex_a76_erratum_1463225_svc_handler(void) { }
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)390 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
391 {
392 	return false;
393 }
394 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
395 
396 /*
397  * As per the ABI exit SME streaming mode and clear the SVE state not
398  * shared with FPSIMD on syscall entry.
399  */
fpsimd_syscall_enter(void)400 static inline void fpsimd_syscall_enter(void)
401 {
402 	/* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */
403 	if (system_supports_sme())
404 		sme_smstop_sm();
405 
406 	/*
407 	 * The CPU is not in streaming mode. If non-streaming SVE is not
408 	 * supported, there is no SVE state that needs to be discarded.
409 	 */
410 	if (!system_supports_sve())
411 		return;
412 
413 	if (test_thread_flag(TIF_SVE)) {
414 		unsigned int sve_vq_minus_one;
415 
416 		sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
417 		sve_flush_live(true, sve_vq_minus_one);
418 	}
419 
420 	/*
421 	 * Any live non-FPSIMD SVE state has been zeroed. Allow
422 	 * fpsimd_save_user_state() to lazily discard SVE state until either
423 	 * the live state is unbound or fpsimd_syscall_exit() is called.
424 	 */
425 	__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD);
426 }
427 
fpsimd_syscall_exit(void)428 static __always_inline void fpsimd_syscall_exit(void)
429 {
430 	if (!system_supports_sve())
431 		return;
432 
433 	/*
434 	 * The current task's user FPSIMD/SVE/SME state is now bound to this
435 	 * CPU. The fpsimd_last_state.to_save value is either:
436 	 *
437 	 * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU
438 	 *   since fpsimd_syscall_enter().
439 	 *
440 	 * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at
441 	 *   any point.
442 	 *
443 	 * Reset this to FP_STATE_CURRENT to stop lazy discarding.
444 	 */
445 	__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT);
446 }
447 
448 /*
449  * In debug exception context, we explicitly disable preemption despite
450  * having interrupts disabled.
451  * This serves two purposes: it makes it much less likely that we would
452  * accidentally schedule in exception context and it will force a warning
453  * if we somehow manage to schedule by accident.
454  */
debug_exception_enter(struct pt_regs * regs)455 static void debug_exception_enter(struct pt_regs *regs)
456 {
457 	preempt_disable();
458 
459 	/* This code is a bit fragile.  Test it. */
460 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
461 }
462 NOKPROBE_SYMBOL(debug_exception_enter);
463 
debug_exception_exit(struct pt_regs * regs)464 static void debug_exception_exit(struct pt_regs *regs)
465 {
466 	preempt_enable_no_resched();
467 }
468 NOKPROBE_SYMBOL(debug_exception_exit);
469 
470 UNHANDLED(el1t, 64, sync)
471 UNHANDLED(el1t, 64, irq)
472 UNHANDLED(el1t, 64, fiq)
473 UNHANDLED(el1t, 64, error)
474 
el1_abort(struct pt_regs * regs,unsigned long esr)475 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
476 {
477 	unsigned long far = read_sysreg(far_el1);
478 
479 	enter_from_kernel_mode(regs);
480 	local_daif_inherit(regs);
481 	do_mem_abort(far, esr, regs);
482 	local_daif_mask();
483 	exit_to_kernel_mode(regs);
484 }
485 
el1_pc(struct pt_regs * regs,unsigned long esr)486 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
487 {
488 	unsigned long far = read_sysreg(far_el1);
489 
490 	enter_from_kernel_mode(regs);
491 	local_daif_inherit(regs);
492 	do_sp_pc_abort(far, esr, regs);
493 	local_daif_mask();
494 	exit_to_kernel_mode(regs);
495 }
496 
el1_undef(struct pt_regs * regs,unsigned long esr)497 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
498 {
499 	enter_from_kernel_mode(regs);
500 	local_daif_inherit(regs);
501 	do_el1_undef(regs, esr);
502 	local_daif_mask();
503 	exit_to_kernel_mode(regs);
504 }
505 
el1_bti(struct pt_regs * regs,unsigned long esr)506 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
507 {
508 	enter_from_kernel_mode(regs);
509 	local_daif_inherit(regs);
510 	do_el1_bti(regs, esr);
511 	local_daif_mask();
512 	exit_to_kernel_mode(regs);
513 }
514 
el1_gcs(struct pt_regs * regs,unsigned long esr)515 static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
516 {
517 	enter_from_kernel_mode(regs);
518 	local_daif_inherit(regs);
519 	do_el1_gcs(regs, esr);
520 	local_daif_mask();
521 	exit_to_kernel_mode(regs);
522 }
523 
el1_mops(struct pt_regs * regs,unsigned long esr)524 static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
525 {
526 	enter_from_kernel_mode(regs);
527 	local_daif_inherit(regs);
528 	do_el1_mops(regs, esr);
529 	local_daif_mask();
530 	exit_to_kernel_mode(regs);
531 }
532 
el1_breakpt(struct pt_regs * regs,unsigned long esr)533 static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
534 {
535 	arm64_enter_el1_dbg(regs);
536 	debug_exception_enter(regs);
537 	do_breakpoint(esr, regs);
538 	debug_exception_exit(regs);
539 	arm64_exit_el1_dbg(regs);
540 }
541 
el1_softstp(struct pt_regs * regs,unsigned long esr)542 static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
543 {
544 	arm64_enter_el1_dbg(regs);
545 	if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
546 		debug_exception_enter(regs);
547 		/*
548 		 * After handling a breakpoint, we suspend the breakpoint
549 		 * and use single-step to move to the next instruction.
550 		 * If we are stepping a suspended breakpoint there's nothing more to do:
551 		 * the single-step is complete.
552 		 */
553 		if (!try_step_suspended_breakpoints(regs))
554 			do_el1_softstep(esr, regs);
555 		debug_exception_exit(regs);
556 	}
557 	arm64_exit_el1_dbg(regs);
558 }
559 
el1_watchpt(struct pt_regs * regs,unsigned long esr)560 static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr)
561 {
562 	/* Watchpoints are the only debug exception to write FAR_EL1 */
563 	unsigned long far = read_sysreg(far_el1);
564 
565 	arm64_enter_el1_dbg(regs);
566 	debug_exception_enter(regs);
567 	do_watchpoint(far, esr, regs);
568 	debug_exception_exit(regs);
569 	arm64_exit_el1_dbg(regs);
570 }
571 
el1_brk64(struct pt_regs * regs,unsigned long esr)572 static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr)
573 {
574 	arm64_enter_el1_dbg(regs);
575 	debug_exception_enter(regs);
576 	do_el1_brk64(esr, regs);
577 	debug_exception_exit(regs);
578 	arm64_exit_el1_dbg(regs);
579 }
580 
el1_fpac(struct pt_regs * regs,unsigned long esr)581 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
582 {
583 	enter_from_kernel_mode(regs);
584 	local_daif_inherit(regs);
585 	do_el1_fpac(regs, esr);
586 	local_daif_mask();
587 	exit_to_kernel_mode(regs);
588 }
589 
el1h_64_sync_handler(struct pt_regs * regs)590 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
591 {
592 	unsigned long esr = read_sysreg(esr_el1);
593 
594 	switch (ESR_ELx_EC(esr)) {
595 	case ESR_ELx_EC_DABT_CUR:
596 	case ESR_ELx_EC_IABT_CUR:
597 		el1_abort(regs, esr);
598 		break;
599 	/*
600 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
601 	 * recursive exception when trying to push the initial pt_regs.
602 	 */
603 	case ESR_ELx_EC_PC_ALIGN:
604 		el1_pc(regs, esr);
605 		break;
606 	case ESR_ELx_EC_SYS64:
607 	case ESR_ELx_EC_UNKNOWN:
608 		el1_undef(regs, esr);
609 		break;
610 	case ESR_ELx_EC_BTI:
611 		el1_bti(regs, esr);
612 		break;
613 	case ESR_ELx_EC_GCS:
614 		el1_gcs(regs, esr);
615 		break;
616 	case ESR_ELx_EC_MOPS:
617 		el1_mops(regs, esr);
618 		break;
619 	case ESR_ELx_EC_BREAKPT_CUR:
620 		el1_breakpt(regs, esr);
621 		break;
622 	case ESR_ELx_EC_SOFTSTP_CUR:
623 		el1_softstp(regs, esr);
624 		break;
625 	case ESR_ELx_EC_WATCHPT_CUR:
626 		el1_watchpt(regs, esr);
627 		break;
628 	case ESR_ELx_EC_BRK64:
629 		el1_brk64(regs, esr);
630 		break;
631 	case ESR_ELx_EC_FPAC:
632 		el1_fpac(regs, esr);
633 		break;
634 	default:
635 		__panic_unhandled(regs, "64-bit el1h sync", esr);
636 	}
637 }
638 
__el1_pnmi(struct pt_regs * regs,void (* handler)(struct pt_regs *))639 static __always_inline void __el1_pnmi(struct pt_regs *regs,
640 				       void (*handler)(struct pt_regs *))
641 {
642 	arm64_enter_nmi(regs);
643 	do_interrupt_handler(regs, handler);
644 	arm64_exit_nmi(regs);
645 }
646 
__el1_irq(struct pt_regs * regs,void (* handler)(struct pt_regs *))647 static __always_inline void __el1_irq(struct pt_regs *regs,
648 				      void (*handler)(struct pt_regs *))
649 {
650 	enter_from_kernel_mode(regs);
651 
652 	irq_enter_rcu();
653 	do_interrupt_handler(regs, handler);
654 	irq_exit_rcu();
655 
656 	arm64_preempt_schedule_irq();
657 
658 	exit_to_kernel_mode(regs);
659 }
el1_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))660 static void noinstr el1_interrupt(struct pt_regs *regs,
661 				  void (*handler)(struct pt_regs *))
662 {
663 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
664 
665 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
666 		__el1_pnmi(regs, handler);
667 	else
668 		__el1_irq(regs, handler);
669 }
670 
el1h_64_irq_handler(struct pt_regs * regs)671 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
672 {
673 	el1_interrupt(regs, handle_arch_irq);
674 }
675 
el1h_64_fiq_handler(struct pt_regs * regs)676 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
677 {
678 	el1_interrupt(regs, handle_arch_fiq);
679 }
680 
el1h_64_error_handler(struct pt_regs * regs)681 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
682 {
683 	unsigned long esr = read_sysreg(esr_el1);
684 
685 	local_daif_restore(DAIF_ERRCTX);
686 	arm64_enter_nmi(regs);
687 	do_serror(regs, esr);
688 	arm64_exit_nmi(regs);
689 }
690 
el0_da(struct pt_regs * regs,unsigned long esr)691 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
692 {
693 	unsigned long far = read_sysreg(far_el1);
694 
695 	enter_from_user_mode(regs);
696 	local_daif_restore(DAIF_PROCCTX);
697 	do_mem_abort(far, esr, regs);
698 	exit_to_user_mode(regs);
699 }
700 
el0_ia(struct pt_regs * regs,unsigned long esr)701 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
702 {
703 	unsigned long far = read_sysreg(far_el1);
704 
705 	/*
706 	 * We've taken an instruction abort from userspace and not yet
707 	 * re-enabled IRQs. If the address is a kernel address, apply
708 	 * BP hardening prior to enabling IRQs and pre-emption.
709 	 */
710 	if (!is_ttbr0_addr(far))
711 		arm64_apply_bp_hardening();
712 
713 	enter_from_user_mode(regs);
714 	local_daif_restore(DAIF_PROCCTX);
715 	do_mem_abort(far, esr, regs);
716 	exit_to_user_mode(regs);
717 }
718 
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)719 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
720 {
721 	enter_from_user_mode(regs);
722 	local_daif_restore(DAIF_PROCCTX);
723 	do_fpsimd_acc(esr, regs);
724 	exit_to_user_mode(regs);
725 }
726 
el0_sve_acc(struct pt_regs * regs,unsigned long esr)727 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
728 {
729 	enter_from_user_mode(regs);
730 	local_daif_restore(DAIF_PROCCTX);
731 	do_sve_acc(esr, regs);
732 	exit_to_user_mode(regs);
733 }
734 
el0_sme_acc(struct pt_regs * regs,unsigned long esr)735 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
736 {
737 	enter_from_user_mode(regs);
738 	local_daif_restore(DAIF_PROCCTX);
739 	do_sme_acc(esr, regs);
740 	exit_to_user_mode(regs);
741 }
742 
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)743 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
744 {
745 	enter_from_user_mode(regs);
746 	local_daif_restore(DAIF_PROCCTX);
747 	do_fpsimd_exc(esr, regs);
748 	exit_to_user_mode(regs);
749 }
750 
el0_sys(struct pt_regs * regs,unsigned long esr)751 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
752 {
753 	enter_from_user_mode(regs);
754 	local_daif_restore(DAIF_PROCCTX);
755 	do_el0_sys(esr, regs);
756 	exit_to_user_mode(regs);
757 }
758 
el0_pc(struct pt_regs * regs,unsigned long esr)759 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
760 {
761 	unsigned long far = read_sysreg(far_el1);
762 
763 	if (!is_ttbr0_addr(instruction_pointer(regs)))
764 		arm64_apply_bp_hardening();
765 
766 	enter_from_user_mode(regs);
767 	local_daif_restore(DAIF_PROCCTX);
768 	do_sp_pc_abort(far, esr, regs);
769 	exit_to_user_mode(regs);
770 }
771 
el0_sp(struct pt_regs * regs,unsigned long esr)772 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
773 {
774 	enter_from_user_mode(regs);
775 	local_daif_restore(DAIF_PROCCTX);
776 	do_sp_pc_abort(regs->sp, esr, regs);
777 	exit_to_user_mode(regs);
778 }
779 
el0_undef(struct pt_regs * regs,unsigned long esr)780 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
781 {
782 	enter_from_user_mode(regs);
783 	local_daif_restore(DAIF_PROCCTX);
784 	do_el0_undef(regs, esr);
785 	exit_to_user_mode(regs);
786 }
787 
el0_bti(struct pt_regs * regs)788 static void noinstr el0_bti(struct pt_regs *regs)
789 {
790 	enter_from_user_mode(regs);
791 	local_daif_restore(DAIF_PROCCTX);
792 	do_el0_bti(regs);
793 	exit_to_user_mode(regs);
794 }
795 
el0_mops(struct pt_regs * regs,unsigned long esr)796 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
797 {
798 	enter_from_user_mode(regs);
799 	local_daif_restore(DAIF_PROCCTX);
800 	do_el0_mops(regs, esr);
801 	exit_to_user_mode(regs);
802 }
803 
el0_gcs(struct pt_regs * regs,unsigned long esr)804 static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr)
805 {
806 	enter_from_user_mode(regs);
807 	local_daif_restore(DAIF_PROCCTX);
808 	do_el0_gcs(regs, esr);
809 	exit_to_user_mode(regs);
810 }
811 
el0_inv(struct pt_regs * regs,unsigned long esr)812 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
813 {
814 	enter_from_user_mode(regs);
815 	local_daif_restore(DAIF_PROCCTX);
816 	bad_el0_sync(regs, 0, esr);
817 	exit_to_user_mode(regs);
818 }
819 
el0_breakpt(struct pt_regs * regs,unsigned long esr)820 static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
821 {
822 	if (!is_ttbr0_addr(regs->pc))
823 		arm64_apply_bp_hardening();
824 
825 	enter_from_user_mode(regs);
826 	debug_exception_enter(regs);
827 	do_breakpoint(esr, regs);
828 	debug_exception_exit(regs);
829 	local_daif_restore(DAIF_PROCCTX);
830 	exit_to_user_mode(regs);
831 }
832 
el0_softstp(struct pt_regs * regs,unsigned long esr)833 static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
834 {
835 	if (!is_ttbr0_addr(regs->pc))
836 		arm64_apply_bp_hardening();
837 
838 	enter_from_user_mode(regs);
839 	/*
840 	 * After handling a breakpoint, we suspend the breakpoint
841 	 * and use single-step to move to the next instruction.
842 	 * If we are stepping a suspended breakpoint there's nothing more to do:
843 	 * the single-step is complete.
844 	 */
845 	if (!try_step_suspended_breakpoints(regs)) {
846 		local_daif_restore(DAIF_PROCCTX);
847 		do_el0_softstep(esr, regs);
848 	}
849 	exit_to_user_mode(regs);
850 }
851 
el0_watchpt(struct pt_regs * regs,unsigned long esr)852 static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr)
853 {
854 	/* Watchpoints are the only debug exception to write FAR_EL1 */
855 	unsigned long far = read_sysreg(far_el1);
856 
857 	enter_from_user_mode(regs);
858 	debug_exception_enter(regs);
859 	do_watchpoint(far, esr, regs);
860 	debug_exception_exit(regs);
861 	local_daif_restore(DAIF_PROCCTX);
862 	exit_to_user_mode(regs);
863 }
864 
el0_brk64(struct pt_regs * regs,unsigned long esr)865 static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr)
866 {
867 	enter_from_user_mode(regs);
868 	local_daif_restore(DAIF_PROCCTX);
869 	do_el0_brk64(esr, regs);
870 	exit_to_user_mode(regs);
871 }
872 
el0_svc(struct pt_regs * regs)873 static void noinstr el0_svc(struct pt_regs *regs)
874 {
875 	enter_from_user_mode(regs);
876 	cortex_a76_erratum_1463225_svc_handler();
877 	fpsimd_syscall_enter();
878 	local_daif_restore(DAIF_PROCCTX);
879 	do_el0_svc(regs);
880 	exit_to_user_mode(regs);
881 	fpsimd_syscall_exit();
882 }
883 
el0_fpac(struct pt_regs * regs,unsigned long esr)884 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
885 {
886 	enter_from_user_mode(regs);
887 	local_daif_restore(DAIF_PROCCTX);
888 	do_el0_fpac(regs, esr);
889 	exit_to_user_mode(regs);
890 }
891 
el0t_64_sync_handler(struct pt_regs * regs)892 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
893 {
894 	unsigned long esr = read_sysreg(esr_el1);
895 
896 	switch (ESR_ELx_EC(esr)) {
897 	case ESR_ELx_EC_SVC64:
898 		el0_svc(regs);
899 		break;
900 	case ESR_ELx_EC_DABT_LOW:
901 		el0_da(regs, esr);
902 		break;
903 	case ESR_ELx_EC_IABT_LOW:
904 		el0_ia(regs, esr);
905 		break;
906 	case ESR_ELx_EC_FP_ASIMD:
907 		el0_fpsimd_acc(regs, esr);
908 		break;
909 	case ESR_ELx_EC_SVE:
910 		el0_sve_acc(regs, esr);
911 		break;
912 	case ESR_ELx_EC_SME:
913 		el0_sme_acc(regs, esr);
914 		break;
915 	case ESR_ELx_EC_FP_EXC64:
916 		el0_fpsimd_exc(regs, esr);
917 		break;
918 	case ESR_ELx_EC_SYS64:
919 	case ESR_ELx_EC_WFx:
920 		el0_sys(regs, esr);
921 		break;
922 	case ESR_ELx_EC_SP_ALIGN:
923 		el0_sp(regs, esr);
924 		break;
925 	case ESR_ELx_EC_PC_ALIGN:
926 		el0_pc(regs, esr);
927 		break;
928 	case ESR_ELx_EC_UNKNOWN:
929 		el0_undef(regs, esr);
930 		break;
931 	case ESR_ELx_EC_BTI:
932 		el0_bti(regs);
933 		break;
934 	case ESR_ELx_EC_MOPS:
935 		el0_mops(regs, esr);
936 		break;
937 	case ESR_ELx_EC_GCS:
938 		el0_gcs(regs, esr);
939 		break;
940 	case ESR_ELx_EC_BREAKPT_LOW:
941 		el0_breakpt(regs, esr);
942 		break;
943 	case ESR_ELx_EC_SOFTSTP_LOW:
944 		el0_softstp(regs, esr);
945 		break;
946 	case ESR_ELx_EC_WATCHPT_LOW:
947 		el0_watchpt(regs, esr);
948 		break;
949 	case ESR_ELx_EC_BRK64:
950 		el0_brk64(regs, esr);
951 		break;
952 	case ESR_ELx_EC_FPAC:
953 		el0_fpac(regs, esr);
954 		break;
955 	default:
956 		el0_inv(regs, esr);
957 	}
958 }
959 
el0_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))960 static void noinstr el0_interrupt(struct pt_regs *regs,
961 				  void (*handler)(struct pt_regs *))
962 {
963 	enter_from_user_mode(regs);
964 
965 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
966 
967 	if (regs->pc & BIT(55))
968 		arm64_apply_bp_hardening();
969 
970 	irq_enter_rcu();
971 	do_interrupt_handler(regs, handler);
972 	irq_exit_rcu();
973 
974 	exit_to_user_mode(regs);
975 }
976 
__el0_irq_handler_common(struct pt_regs * regs)977 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
978 {
979 	el0_interrupt(regs, handle_arch_irq);
980 }
981 
el0t_64_irq_handler(struct pt_regs * regs)982 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
983 {
984 	__el0_irq_handler_common(regs);
985 }
986 
__el0_fiq_handler_common(struct pt_regs * regs)987 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
988 {
989 	el0_interrupt(regs, handle_arch_fiq);
990 }
991 
el0t_64_fiq_handler(struct pt_regs * regs)992 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
993 {
994 	__el0_fiq_handler_common(regs);
995 }
996 
__el0_error_handler_common(struct pt_regs * regs)997 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
998 {
999 	unsigned long esr = read_sysreg(esr_el1);
1000 
1001 	enter_from_user_mode(regs);
1002 	local_daif_restore(DAIF_ERRCTX);
1003 	arm64_enter_nmi(regs);
1004 	do_serror(regs, esr);
1005 	arm64_exit_nmi(regs);
1006 	local_daif_restore(DAIF_PROCCTX);
1007 	exit_to_user_mode(regs);
1008 }
1009 
el0t_64_error_handler(struct pt_regs * regs)1010 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
1011 {
1012 	__el0_error_handler_common(regs);
1013 }
1014 
1015 #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)1016 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
1017 {
1018 	enter_from_user_mode(regs);
1019 	local_daif_restore(DAIF_PROCCTX);
1020 	do_el0_cp15(esr, regs);
1021 	exit_to_user_mode(regs);
1022 }
1023 
el0_svc_compat(struct pt_regs * regs)1024 static void noinstr el0_svc_compat(struct pt_regs *regs)
1025 {
1026 	enter_from_user_mode(regs);
1027 	cortex_a76_erratum_1463225_svc_handler();
1028 	local_daif_restore(DAIF_PROCCTX);
1029 	do_el0_svc_compat(regs);
1030 	exit_to_user_mode(regs);
1031 }
1032 
el0_bkpt32(struct pt_regs * regs,unsigned long esr)1033 static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr)
1034 {
1035 	enter_from_user_mode(regs);
1036 	local_daif_restore(DAIF_PROCCTX);
1037 	do_bkpt32(esr, regs);
1038 	exit_to_user_mode(regs);
1039 }
1040 
el0t_32_sync_handler(struct pt_regs * regs)1041 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
1042 {
1043 	unsigned long esr = read_sysreg(esr_el1);
1044 
1045 	switch (ESR_ELx_EC(esr)) {
1046 	case ESR_ELx_EC_SVC32:
1047 		el0_svc_compat(regs);
1048 		break;
1049 	case ESR_ELx_EC_DABT_LOW:
1050 		el0_da(regs, esr);
1051 		break;
1052 	case ESR_ELx_EC_IABT_LOW:
1053 		el0_ia(regs, esr);
1054 		break;
1055 	case ESR_ELx_EC_FP_ASIMD:
1056 		el0_fpsimd_acc(regs, esr);
1057 		break;
1058 	case ESR_ELx_EC_FP_EXC32:
1059 		el0_fpsimd_exc(regs, esr);
1060 		break;
1061 	case ESR_ELx_EC_PC_ALIGN:
1062 		el0_pc(regs, esr);
1063 		break;
1064 	case ESR_ELx_EC_UNKNOWN:
1065 	case ESR_ELx_EC_CP14_MR:
1066 	case ESR_ELx_EC_CP14_LS:
1067 	case ESR_ELx_EC_CP14_64:
1068 		el0_undef(regs, esr);
1069 		break;
1070 	case ESR_ELx_EC_CP15_32:
1071 	case ESR_ELx_EC_CP15_64:
1072 		el0_cp15(regs, esr);
1073 		break;
1074 	case ESR_ELx_EC_BREAKPT_LOW:
1075 		el0_breakpt(regs, esr);
1076 		break;
1077 	case ESR_ELx_EC_SOFTSTP_LOW:
1078 		el0_softstp(regs, esr);
1079 		break;
1080 	case ESR_ELx_EC_WATCHPT_LOW:
1081 		el0_watchpt(regs, esr);
1082 		break;
1083 	case ESR_ELx_EC_BKPT32:
1084 		el0_bkpt32(regs, esr);
1085 		break;
1086 	default:
1087 		el0_inv(regs, esr);
1088 	}
1089 }
1090 
el0t_32_irq_handler(struct pt_regs * regs)1091 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
1092 {
1093 	__el0_irq_handler_common(regs);
1094 }
1095 
el0t_32_fiq_handler(struct pt_regs * regs)1096 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
1097 {
1098 	__el0_fiq_handler_common(regs);
1099 }
1100 
el0t_32_error_handler(struct pt_regs * regs)1101 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
1102 {
1103 	__el0_error_handler_common(regs);
1104 }
1105 #else /* CONFIG_COMPAT */
1106 UNHANDLED(el0t, 32, sync)
1107 UNHANDLED(el0t, 32, irq)
1108 UNHANDLED(el0t, 32, fiq)
1109 UNHANDLED(el0t, 32, error)
1110 #endif /* CONFIG_COMPAT */
1111 
handle_bad_stack(struct pt_regs * regs)1112 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
1113 {
1114 	unsigned long esr = read_sysreg(esr_el1);
1115 	unsigned long far = read_sysreg(far_el1);
1116 
1117 	arm64_enter_nmi(regs);
1118 	panic_bad_stack(regs, esr, far);
1119 }
1120 
1121 #ifdef CONFIG_ARM_SDE_INTERFACE
1122 asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs * regs,struct sdei_registered_event * arg)1123 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
1124 {
1125 	unsigned long ret;
1126 
1127 	/*
1128 	 * We didn't take an exception to get here, so the HW hasn't
1129 	 * set/cleared bits in PSTATE that we may rely on.
1130 	 *
1131 	 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
1132 	 * whether PSTATE bits are inherited unchanged or generated from
1133 	 * scratch, and the TF-A implementation always clears PAN and always
1134 	 * clears UAO. There are no other known implementations.
1135 	 *
1136 	 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
1137 	 * PSTATE is modified upon architectural exceptions, and so PAN is
1138 	 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
1139 	 * cleared.
1140 	 *
1141 	 * We must explicitly reset PAN to the expected state, including
1142 	 * clearing it when the host isn't using it, in case a VM had it set.
1143 	 */
1144 	if (system_uses_hw_pan())
1145 		set_pstate_pan(1);
1146 	else if (cpu_has_pan())
1147 		set_pstate_pan(0);
1148 
1149 	arm64_enter_nmi(regs);
1150 	ret = do_sdei_event(regs, arg);
1151 	arm64_exit_nmi(regs);
1152 
1153 	return ret;
1154 }
1155 #endif /* CONFIG_ARM_SDE_INTERFACE */
1156