xref: /linux/arch/arm64/kernel/entry-common.c (revision aec3202247b4ab41c5bf3b9f704a2d9a323a051b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/irq-entry-common.h>
10 #include <linux/kasan.h>
11 #include <linux/linkage.h>
12 #include <linux/livepatch.h>
13 #include <linux/lockdep.h>
14 #include <linux/ptrace.h>
15 #include <linux/resume_user_mode.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/thread_info.h>
19 
20 #include <asm/cpufeature.h>
21 #include <asm/daifflags.h>
22 #include <asm/esr.h>
23 #include <asm/exception.h>
24 #include <asm/irq_regs.h>
25 #include <asm/kprobes.h>
26 #include <asm/mmu.h>
27 #include <asm/processor.h>
28 #include <asm/sdei.h>
29 #include <asm/stacktrace.h>
30 #include <asm/sysreg.h>
31 #include <asm/system_misc.h>
32 
33 /*
34  * Handle IRQ/context state management when entering from kernel mode.
35  * Before this function is called it is not safe to call regular kernel code,
36  * instrumentable code, or any code which may trigger an exception.
37  */
38 static noinstr irqentry_state_t arm64_enter_from_kernel_mode(struct pt_regs *regs)
39 {
40 	irqentry_state_t state;
41 
42 	state = irqentry_enter_from_kernel_mode(regs);
43 	mte_check_tfsr_entry();
44 	mte_disable_tco_entry(current);
45 
46 	return state;
47 }
48 
49 /*
50  * Handle IRQ/context state management when exiting to kernel mode.
51  * After this function returns it is not safe to call regular kernel code,
52  * instrumentable code, or any code which may trigger an exception.
53  */
54 static void noinstr arm64_exit_to_kernel_mode(struct pt_regs *regs,
55 					      irqentry_state_t state)
56 {
57 	local_irq_disable();
58 	irqentry_exit_to_kernel_mode_preempt(regs, state);
59 	local_daif_mask();
60 	mte_check_tfsr_exit();
61 	irqentry_exit_to_kernel_mode_after_preempt(regs, state);
62 }
63 
64 /*
65  * Handle IRQ/context state management when entering from user mode.
66  * Before this function is called it is not safe to call regular kernel code,
67  * instrumentable code, or any code which may trigger an exception.
68  */
69 static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs)
70 {
71 	enter_from_user_mode(regs);
72 	mte_disable_tco_entry(current);
73 }
74 
75 /*
76  * Handle IRQ/context state management when exiting to user mode.
77  * After this function returns it is not safe to call regular kernel code,
78  * instrumentable code, or any code which may trigger an exception.
79  */
80 
81 static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs)
82 {
83 	local_irq_disable();
84 	exit_to_user_mode_prepare_legacy(regs);
85 	local_daif_mask();
86 	mte_check_tfsr_exit();
87 	exit_to_user_mode();
88 }
89 
90 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
91 {
92 	arm64_exit_to_user_mode(regs);
93 }
94 
95 /*
96  * Handle IRQ/context state management when entering a debug exception from
97  * kernel mode. Before this function is called it is not safe to call regular
98  * kernel code, instrumentable code, or any code which may trigger an exception.
99  */
100 static noinstr irqentry_state_t arm64_enter_el1_dbg(struct pt_regs *regs)
101 {
102 	irqentry_state_t state;
103 
104 	state.lockdep = lockdep_hardirqs_enabled();
105 
106 	lockdep_hardirqs_off(CALLER_ADDR0);
107 	ct_nmi_enter();
108 
109 	trace_hardirqs_off_finish();
110 
111 	return state;
112 }
113 
114 /*
115  * Handle IRQ/context state management when exiting a debug exception from
116  * kernel mode. After this function returns it is not safe to call regular
117  * kernel code, instrumentable code, or any code which may trigger an exception.
118  */
119 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs,
120 				       irqentry_state_t state)
121 {
122 	if (state.lockdep) {
123 		trace_hardirqs_on_prepare();
124 		lockdep_hardirqs_on_prepare();
125 	}
126 
127 	ct_nmi_exit();
128 	if (state.lockdep)
129 		lockdep_hardirqs_on(CALLER_ADDR0);
130 }
131 
132 static void do_interrupt_handler(struct pt_regs *regs,
133 				 void (*handler)(struct pt_regs *))
134 {
135 	struct pt_regs *old_regs = set_irq_regs(regs);
136 
137 	if (on_thread_stack())
138 		call_on_irq_stack(regs, handler);
139 	else
140 		handler(regs);
141 
142 	set_irq_regs(old_regs);
143 }
144 
145 extern void (*handle_arch_irq)(struct pt_regs *);
146 extern void (*handle_arch_fiq)(struct pt_regs *);
147 
148 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
149 				      unsigned long esr)
150 {
151 	irqentry_nmi_enter(regs);
152 
153 	console_verbose();
154 
155 	pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
156 		vector, smp_processor_id(), esr,
157 		esr_get_class_string(esr));
158 
159 	__show_regs(regs);
160 	panic("Unhandled exception");
161 }
162 
163 #define UNHANDLED(el, regsize, vector)							\
164 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)	\
165 {											\
166 	const char *desc = #regsize "-bit " #el " " #vector;				\
167 	__panic_unhandled(regs, desc, read_sysreg(esr_el1));				\
168 }
169 
170 #ifdef CONFIG_ARM64_ERRATUM_1463225
171 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
172 
173 static void cortex_a76_erratum_1463225_svc_handler(void)
174 {
175 	u64 reg, val;
176 
177 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
178 		return;
179 
180 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
181 		return;
182 
183 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
184 	reg = read_sysreg(mdscr_el1);
185 	val = reg | MDSCR_EL1_SS | MDSCR_EL1_KDE;
186 	write_sysreg(val, mdscr_el1);
187 	asm volatile("msr daifclr, #8");
188 	isb();
189 
190 	/* We will have taken a single-step exception by this point */
191 
192 	write_sysreg(reg, mdscr_el1);
193 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
194 }
195 
196 static __always_inline bool
197 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
198 {
199 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
200 		return false;
201 
202 	/*
203 	 * We've taken a dummy step exception from the kernel to ensure
204 	 * that interrupts are re-enabled on the syscall path. Return back
205 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
206 	 * masked so that we can safely restore the mdscr and get on with
207 	 * handling the syscall.
208 	 */
209 	regs->pstate |= PSR_D_BIT;
210 	return true;
211 }
212 #else /* CONFIG_ARM64_ERRATUM_1463225 */
213 static void cortex_a76_erratum_1463225_svc_handler(void) { }
214 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
215 {
216 	return false;
217 }
218 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
219 
220 /*
221  * As per the ABI exit SME streaming mode and clear the SVE state not
222  * shared with FPSIMD on syscall entry.
223  */
224 static inline void fpsimd_syscall_enter(void)
225 {
226 	/* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */
227 	if (system_supports_sme())
228 		sme_smstop_sm();
229 
230 	/*
231 	 * The CPU is not in streaming mode. If non-streaming SVE is not
232 	 * supported, there is no SVE state that needs to be discarded.
233 	 */
234 	if (!system_supports_sve())
235 		return;
236 
237 	if (test_thread_flag(TIF_SVE)) {
238 		unsigned int sve_vq_minus_one;
239 
240 		sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
241 		sve_flush_live(true, sve_vq_minus_one);
242 	}
243 
244 	/*
245 	 * Any live non-FPSIMD SVE state has been zeroed. Allow
246 	 * fpsimd_save_user_state() to lazily discard SVE state until either
247 	 * the live state is unbound or fpsimd_syscall_exit() is called.
248 	 */
249 	__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD);
250 }
251 
252 static __always_inline void fpsimd_syscall_exit(void)
253 {
254 	if (!system_supports_sve())
255 		return;
256 
257 	/*
258 	 * The current task's user FPSIMD/SVE/SME state is now bound to this
259 	 * CPU. The fpsimd_last_state.to_save value is either:
260 	 *
261 	 * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU
262 	 *   since fpsimd_syscall_enter().
263 	 *
264 	 * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at
265 	 *   any point.
266 	 *
267 	 * Reset this to FP_STATE_CURRENT to stop lazy discarding.
268 	 */
269 	__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT);
270 }
271 
272 /*
273  * In debug exception context, we explicitly disable preemption despite
274  * having interrupts disabled.
275  * This serves two purposes: it makes it much less likely that we would
276  * accidentally schedule in exception context and it will force a warning
277  * if we somehow manage to schedule by accident.
278  */
279 static void debug_exception_enter(struct pt_regs *regs)
280 {
281 	preempt_disable();
282 
283 	/* This code is a bit fragile.  Test it. */
284 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
285 }
286 NOKPROBE_SYMBOL(debug_exception_enter);
287 
288 static void debug_exception_exit(struct pt_regs *regs)
289 {
290 	preempt_enable_no_resched();
291 }
292 NOKPROBE_SYMBOL(debug_exception_exit);
293 
294 UNHANDLED(el1t, 64, sync)
295 UNHANDLED(el1t, 64, irq)
296 UNHANDLED(el1t, 64, fiq)
297 UNHANDLED(el1t, 64, error)
298 
299 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
300 {
301 	unsigned long far = read_sysreg(far_el1);
302 	irqentry_state_t state;
303 
304 	state = arm64_enter_from_kernel_mode(regs);
305 	local_daif_inherit(regs);
306 	do_mem_abort(far, esr, regs);
307 	arm64_exit_to_kernel_mode(regs, state);
308 }
309 
310 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
311 {
312 	unsigned long far = read_sysreg(far_el1);
313 	irqentry_state_t state;
314 
315 	state = arm64_enter_from_kernel_mode(regs);
316 	local_daif_inherit(regs);
317 	do_sp_pc_abort(far, esr, regs);
318 	arm64_exit_to_kernel_mode(regs, state);
319 }
320 
321 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
322 {
323 	irqentry_state_t state;
324 
325 	state = arm64_enter_from_kernel_mode(regs);
326 	local_daif_inherit(regs);
327 	do_el1_undef(regs, esr);
328 	arm64_exit_to_kernel_mode(regs, state);
329 }
330 
331 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
332 {
333 	irqentry_state_t state;
334 
335 	state = arm64_enter_from_kernel_mode(regs);
336 	local_daif_inherit(regs);
337 	do_el1_bti(regs, esr);
338 	arm64_exit_to_kernel_mode(regs, state);
339 }
340 
341 static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
342 {
343 	irqentry_state_t state;
344 
345 	state = arm64_enter_from_kernel_mode(regs);
346 	local_daif_inherit(regs);
347 	do_el1_gcs(regs, esr);
348 	arm64_exit_to_kernel_mode(regs, state);
349 }
350 
351 static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
352 {
353 	irqentry_state_t state;
354 
355 	state = arm64_enter_from_kernel_mode(regs);
356 	local_daif_inherit(regs);
357 	do_el1_mops(regs, esr);
358 	arm64_exit_to_kernel_mode(regs, state);
359 }
360 
361 static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
362 {
363 	irqentry_state_t state;
364 
365 	state = arm64_enter_el1_dbg(regs);
366 	debug_exception_enter(regs);
367 	do_breakpoint(esr, regs);
368 	debug_exception_exit(regs);
369 	arm64_exit_el1_dbg(regs, state);
370 }
371 
372 static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
373 {
374 	irqentry_state_t state;
375 
376 	state = arm64_enter_el1_dbg(regs);
377 	if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
378 		debug_exception_enter(regs);
379 		/*
380 		 * After handling a breakpoint, we suspend the breakpoint
381 		 * and use single-step to move to the next instruction.
382 		 * If we are stepping a suspended breakpoint there's nothing more to do:
383 		 * the single-step is complete.
384 		 */
385 		if (!try_step_suspended_breakpoints(regs))
386 			do_el1_softstep(esr, regs);
387 		debug_exception_exit(regs);
388 	}
389 	arm64_exit_el1_dbg(regs, state);
390 }
391 
392 static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr)
393 {
394 	/* Watchpoints are the only debug exception to write FAR_EL1 */
395 	unsigned long far = read_sysreg(far_el1);
396 	irqentry_state_t state;
397 
398 	state = arm64_enter_el1_dbg(regs);
399 	debug_exception_enter(regs);
400 	do_watchpoint(far, esr, regs);
401 	debug_exception_exit(regs);
402 	arm64_exit_el1_dbg(regs, state);
403 }
404 
405 static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr)
406 {
407 	irqentry_state_t state;
408 
409 	state = arm64_enter_el1_dbg(regs);
410 	debug_exception_enter(regs);
411 	do_el1_brk64(esr, regs);
412 	debug_exception_exit(regs);
413 	arm64_exit_el1_dbg(regs, state);
414 }
415 
416 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
417 {
418 	irqentry_state_t state;
419 
420 	state = arm64_enter_from_kernel_mode(regs);
421 	local_daif_inherit(regs);
422 	do_el1_fpac(regs, esr);
423 	arm64_exit_to_kernel_mode(regs, state);
424 }
425 
426 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
427 {
428 	unsigned long esr = read_sysreg(esr_el1);
429 
430 	switch (ESR_ELx_EC(esr)) {
431 	case ESR_ELx_EC_DABT_CUR:
432 	case ESR_ELx_EC_IABT_CUR:
433 		el1_abort(regs, esr);
434 		break;
435 	/*
436 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
437 	 * recursive exception when trying to push the initial pt_regs.
438 	 */
439 	case ESR_ELx_EC_PC_ALIGN:
440 		el1_pc(regs, esr);
441 		break;
442 	case ESR_ELx_EC_SYS64:
443 	case ESR_ELx_EC_UNKNOWN:
444 		el1_undef(regs, esr);
445 		break;
446 	case ESR_ELx_EC_BTI:
447 		el1_bti(regs, esr);
448 		break;
449 	case ESR_ELx_EC_GCS:
450 		el1_gcs(regs, esr);
451 		break;
452 	case ESR_ELx_EC_MOPS:
453 		el1_mops(regs, esr);
454 		break;
455 	case ESR_ELx_EC_BREAKPT_CUR:
456 		el1_breakpt(regs, esr);
457 		break;
458 	case ESR_ELx_EC_SOFTSTP_CUR:
459 		el1_softstp(regs, esr);
460 		break;
461 	case ESR_ELx_EC_WATCHPT_CUR:
462 		el1_watchpt(regs, esr);
463 		break;
464 	case ESR_ELx_EC_BRK64:
465 		el1_brk64(regs, esr);
466 		break;
467 	case ESR_ELx_EC_FPAC:
468 		el1_fpac(regs, esr);
469 		break;
470 	default:
471 		__panic_unhandled(regs, "64-bit el1h sync", esr);
472 	}
473 }
474 
475 static __always_inline void __el1_pnmi(struct pt_regs *regs,
476 				       void (*handler)(struct pt_regs *))
477 {
478 	irqentry_state_t state;
479 
480 	state = irqentry_nmi_enter(regs);
481 	do_interrupt_handler(regs, handler);
482 	irqentry_nmi_exit(regs, state);
483 }
484 
485 static __always_inline void __el1_irq(struct pt_regs *regs,
486 				      void (*handler)(struct pt_regs *))
487 {
488 	irqentry_state_t state;
489 
490 	state = arm64_enter_from_kernel_mode(regs);
491 
492 	irq_enter_rcu();
493 	do_interrupt_handler(regs, handler);
494 	irq_exit_rcu();
495 
496 	arm64_exit_to_kernel_mode(regs, state);
497 }
498 static void noinstr el1_interrupt(struct pt_regs *regs,
499 				  void (*handler)(struct pt_regs *))
500 {
501 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
502 
503 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && regs_irqs_disabled(regs))
504 		__el1_pnmi(regs, handler);
505 	else
506 		__el1_irq(regs, handler);
507 }
508 
509 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
510 {
511 	el1_interrupt(regs, handle_arch_irq);
512 }
513 
514 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
515 {
516 	el1_interrupt(regs, handle_arch_fiq);
517 }
518 
519 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
520 {
521 	unsigned long esr = read_sysreg(esr_el1);
522 	irqentry_state_t state;
523 
524 	local_daif_restore(DAIF_ERRCTX);
525 	state = irqentry_nmi_enter(regs);
526 	do_serror(regs, esr);
527 	irqentry_nmi_exit(regs, state);
528 }
529 
530 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
531 {
532 	unsigned long far = read_sysreg(far_el1);
533 
534 	arm64_enter_from_user_mode(regs);
535 	local_daif_restore(DAIF_PROCCTX);
536 	do_mem_abort(far, esr, regs);
537 	arm64_exit_to_user_mode(regs);
538 }
539 
540 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
541 {
542 	unsigned long far = read_sysreg(far_el1);
543 
544 	/*
545 	 * We've taken an instruction abort from userspace and not yet
546 	 * re-enabled IRQs. If the address is a kernel address, apply
547 	 * BP hardening prior to enabling IRQs and pre-emption.
548 	 */
549 	if (!is_ttbr0_addr(far))
550 		arm64_apply_bp_hardening();
551 
552 	arm64_enter_from_user_mode(regs);
553 	local_daif_restore(DAIF_PROCCTX);
554 	do_mem_abort(far, esr, regs);
555 	arm64_exit_to_user_mode(regs);
556 }
557 
558 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
559 {
560 	arm64_enter_from_user_mode(regs);
561 	local_daif_restore(DAIF_PROCCTX);
562 	do_fpsimd_acc(esr, regs);
563 	arm64_exit_to_user_mode(regs);
564 }
565 
566 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
567 {
568 	arm64_enter_from_user_mode(regs);
569 	local_daif_restore(DAIF_PROCCTX);
570 	do_sve_acc(esr, regs);
571 	arm64_exit_to_user_mode(regs);
572 }
573 
574 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
575 {
576 	arm64_enter_from_user_mode(regs);
577 	local_daif_restore(DAIF_PROCCTX);
578 	do_sme_acc(esr, regs);
579 	arm64_exit_to_user_mode(regs);
580 }
581 
582 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
583 {
584 	arm64_enter_from_user_mode(regs);
585 	local_daif_restore(DAIF_PROCCTX);
586 	do_fpsimd_exc(esr, regs);
587 	arm64_exit_to_user_mode(regs);
588 }
589 
590 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
591 {
592 	arm64_enter_from_user_mode(regs);
593 	local_daif_restore(DAIF_PROCCTX);
594 	do_el0_sys(esr, regs);
595 	arm64_exit_to_user_mode(regs);
596 }
597 
598 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
599 {
600 	unsigned long far = read_sysreg(far_el1);
601 
602 	if (!is_ttbr0_addr(instruction_pointer(regs)))
603 		arm64_apply_bp_hardening();
604 
605 	arm64_enter_from_user_mode(regs);
606 	local_daif_restore(DAIF_PROCCTX);
607 	do_sp_pc_abort(far, esr, regs);
608 	arm64_exit_to_user_mode(regs);
609 }
610 
611 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
612 {
613 	arm64_enter_from_user_mode(regs);
614 	local_daif_restore(DAIF_PROCCTX);
615 	do_sp_pc_abort(regs->sp, esr, regs);
616 	arm64_exit_to_user_mode(regs);
617 }
618 
619 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
620 {
621 	arm64_enter_from_user_mode(regs);
622 	local_daif_restore(DAIF_PROCCTX);
623 	do_el0_undef(regs, esr);
624 	arm64_exit_to_user_mode(regs);
625 }
626 
627 static void noinstr el0_bti(struct pt_regs *regs)
628 {
629 	arm64_enter_from_user_mode(regs);
630 	local_daif_restore(DAIF_PROCCTX);
631 	do_el0_bti(regs);
632 	arm64_exit_to_user_mode(regs);
633 }
634 
635 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
636 {
637 	arm64_enter_from_user_mode(regs);
638 	local_daif_restore(DAIF_PROCCTX);
639 	do_el0_mops(regs, esr);
640 	arm64_exit_to_user_mode(regs);
641 }
642 
643 static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr)
644 {
645 	arm64_enter_from_user_mode(regs);
646 	local_daif_restore(DAIF_PROCCTX);
647 	do_el0_gcs(regs, esr);
648 	arm64_exit_to_user_mode(regs);
649 }
650 
651 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
652 {
653 	arm64_enter_from_user_mode(regs);
654 	local_daif_restore(DAIF_PROCCTX);
655 	bad_el0_sync(regs, 0, esr);
656 	arm64_exit_to_user_mode(regs);
657 }
658 
659 static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
660 {
661 	if (!is_ttbr0_addr(regs->pc))
662 		arm64_apply_bp_hardening();
663 
664 	arm64_enter_from_user_mode(regs);
665 	debug_exception_enter(regs);
666 	do_breakpoint(esr, regs);
667 	debug_exception_exit(regs);
668 	local_daif_restore(DAIF_PROCCTX);
669 	arm64_exit_to_user_mode(regs);
670 }
671 
672 static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
673 {
674 	bool step_done;
675 
676 	if (!is_ttbr0_addr(regs->pc))
677 		arm64_apply_bp_hardening();
678 
679 	arm64_enter_from_user_mode(regs);
680 	/*
681 	 * After handling a breakpoint, we suspend the breakpoint
682 	 * and use single-step to move to the next instruction.
683 	 * If we are stepping a suspended breakpoint there's nothing more to do:
684 	 * the single-step is complete.
685 	 */
686 	step_done = try_step_suspended_breakpoints(regs);
687 	local_daif_restore(DAIF_PROCCTX);
688 	if (!step_done)
689 		do_el0_softstep(esr, regs);
690 	arm64_exit_to_user_mode(regs);
691 }
692 
693 static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr)
694 {
695 	/* Watchpoints are the only debug exception to write FAR_EL1 */
696 	unsigned long far = read_sysreg(far_el1);
697 
698 	arm64_enter_from_user_mode(regs);
699 	debug_exception_enter(regs);
700 	do_watchpoint(far, esr, regs);
701 	debug_exception_exit(regs);
702 	local_daif_restore(DAIF_PROCCTX);
703 	arm64_exit_to_user_mode(regs);
704 }
705 
706 static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr)
707 {
708 	arm64_enter_from_user_mode(regs);
709 	local_daif_restore(DAIF_PROCCTX);
710 	do_el0_brk64(esr, regs);
711 	arm64_exit_to_user_mode(regs);
712 }
713 
714 static void noinstr el0_svc(struct pt_regs *regs)
715 {
716 	arm64_enter_from_user_mode(regs);
717 	cortex_a76_erratum_1463225_svc_handler();
718 	fpsimd_syscall_enter();
719 	local_daif_restore(DAIF_PROCCTX);
720 	do_el0_svc(regs);
721 	arm64_exit_to_user_mode(regs);
722 	fpsimd_syscall_exit();
723 }
724 
725 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
726 {
727 	arm64_enter_from_user_mode(regs);
728 	local_daif_restore(DAIF_PROCCTX);
729 	do_el0_fpac(regs, esr);
730 	arm64_exit_to_user_mode(regs);
731 }
732 
733 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
734 {
735 	unsigned long esr = read_sysreg(esr_el1);
736 
737 	switch (ESR_ELx_EC(esr)) {
738 	case ESR_ELx_EC_SVC64:
739 		el0_svc(regs);
740 		break;
741 	case ESR_ELx_EC_DABT_LOW:
742 		el0_da(regs, esr);
743 		break;
744 	case ESR_ELx_EC_IABT_LOW:
745 		el0_ia(regs, esr);
746 		break;
747 	case ESR_ELx_EC_FP_ASIMD:
748 		el0_fpsimd_acc(regs, esr);
749 		break;
750 	case ESR_ELx_EC_SVE:
751 		el0_sve_acc(regs, esr);
752 		break;
753 	case ESR_ELx_EC_SME:
754 		el0_sme_acc(regs, esr);
755 		break;
756 	case ESR_ELx_EC_FP_EXC64:
757 		el0_fpsimd_exc(regs, esr);
758 		break;
759 	case ESR_ELx_EC_SYS64:
760 	case ESR_ELx_EC_WFx:
761 		el0_sys(regs, esr);
762 		break;
763 	case ESR_ELx_EC_SP_ALIGN:
764 		el0_sp(regs, esr);
765 		break;
766 	case ESR_ELx_EC_PC_ALIGN:
767 		el0_pc(regs, esr);
768 		break;
769 	case ESR_ELx_EC_UNKNOWN:
770 		el0_undef(regs, esr);
771 		break;
772 	case ESR_ELx_EC_BTI:
773 		el0_bti(regs);
774 		break;
775 	case ESR_ELx_EC_MOPS:
776 		el0_mops(regs, esr);
777 		break;
778 	case ESR_ELx_EC_GCS:
779 		el0_gcs(regs, esr);
780 		break;
781 	case ESR_ELx_EC_BREAKPT_LOW:
782 		el0_breakpt(regs, esr);
783 		break;
784 	case ESR_ELx_EC_SOFTSTP_LOW:
785 		el0_softstp(regs, esr);
786 		break;
787 	case ESR_ELx_EC_WATCHPT_LOW:
788 		el0_watchpt(regs, esr);
789 		break;
790 	case ESR_ELx_EC_BRK64:
791 		el0_brk64(regs, esr);
792 		break;
793 	case ESR_ELx_EC_FPAC:
794 		el0_fpac(regs, esr);
795 		break;
796 	default:
797 		el0_inv(regs, esr);
798 	}
799 }
800 
801 static void noinstr el0_interrupt(struct pt_regs *regs,
802 				  void (*handler)(struct pt_regs *))
803 {
804 	arm64_enter_from_user_mode(regs);
805 
806 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
807 
808 	if (regs->pc & BIT(55))
809 		arm64_apply_bp_hardening();
810 
811 	irq_enter_rcu();
812 	do_interrupt_handler(regs, handler);
813 	irq_exit_rcu();
814 
815 	arm64_exit_to_user_mode(regs);
816 }
817 
818 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
819 {
820 	el0_interrupt(regs, handle_arch_irq);
821 }
822 
823 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
824 {
825 	__el0_irq_handler_common(regs);
826 }
827 
828 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
829 {
830 	el0_interrupt(regs, handle_arch_fiq);
831 }
832 
833 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
834 {
835 	__el0_fiq_handler_common(regs);
836 }
837 
838 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
839 {
840 	unsigned long esr = read_sysreg(esr_el1);
841 	irqentry_state_t state;
842 
843 	arm64_enter_from_user_mode(regs);
844 	local_daif_restore(DAIF_ERRCTX);
845 	state = irqentry_nmi_enter(regs);
846 	do_serror(regs, esr);
847 	irqentry_nmi_exit(regs, state);
848 	local_daif_restore(DAIF_PROCCTX);
849 	arm64_exit_to_user_mode(regs);
850 }
851 
852 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
853 {
854 	__el0_error_handler_common(regs);
855 }
856 
857 #ifdef CONFIG_COMPAT
858 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
859 {
860 	arm64_enter_from_user_mode(regs);
861 	local_daif_restore(DAIF_PROCCTX);
862 	do_el0_cp15(esr, regs);
863 	arm64_exit_to_user_mode(regs);
864 }
865 
866 static void noinstr el0_svc_compat(struct pt_regs *regs)
867 {
868 	arm64_enter_from_user_mode(regs);
869 	cortex_a76_erratum_1463225_svc_handler();
870 	local_daif_restore(DAIF_PROCCTX);
871 	do_el0_svc_compat(regs);
872 	arm64_exit_to_user_mode(regs);
873 }
874 
875 static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr)
876 {
877 	arm64_enter_from_user_mode(regs);
878 	local_daif_restore(DAIF_PROCCTX);
879 	do_bkpt32(esr, regs);
880 	arm64_exit_to_user_mode(regs);
881 }
882 
883 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
884 {
885 	unsigned long esr = read_sysreg(esr_el1);
886 
887 	switch (ESR_ELx_EC(esr)) {
888 	case ESR_ELx_EC_SVC32:
889 		el0_svc_compat(regs);
890 		break;
891 	case ESR_ELx_EC_DABT_LOW:
892 		el0_da(regs, esr);
893 		break;
894 	case ESR_ELx_EC_IABT_LOW:
895 		el0_ia(regs, esr);
896 		break;
897 	case ESR_ELx_EC_FP_ASIMD:
898 		el0_fpsimd_acc(regs, esr);
899 		break;
900 	case ESR_ELx_EC_FP_EXC32:
901 		el0_fpsimd_exc(regs, esr);
902 		break;
903 	case ESR_ELx_EC_PC_ALIGN:
904 		el0_pc(regs, esr);
905 		break;
906 	case ESR_ELx_EC_UNKNOWN:
907 	case ESR_ELx_EC_CP14_MR:
908 	case ESR_ELx_EC_CP14_LS:
909 	case ESR_ELx_EC_CP14_64:
910 		el0_undef(regs, esr);
911 		break;
912 	case ESR_ELx_EC_CP15_32:
913 	case ESR_ELx_EC_CP15_64:
914 		el0_cp15(regs, esr);
915 		break;
916 	case ESR_ELx_EC_BREAKPT_LOW:
917 		el0_breakpt(regs, esr);
918 		break;
919 	case ESR_ELx_EC_SOFTSTP_LOW:
920 		el0_softstp(regs, esr);
921 		break;
922 	case ESR_ELx_EC_WATCHPT_LOW:
923 		el0_watchpt(regs, esr);
924 		break;
925 	case ESR_ELx_EC_BKPT32:
926 		el0_bkpt32(regs, esr);
927 		break;
928 	default:
929 		el0_inv(regs, esr);
930 	}
931 }
932 
933 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
934 {
935 	__el0_irq_handler_common(regs);
936 }
937 
938 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
939 {
940 	__el0_fiq_handler_common(regs);
941 }
942 
943 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
944 {
945 	__el0_error_handler_common(regs);
946 }
947 #else /* CONFIG_COMPAT */
948 UNHANDLED(el0t, 32, sync)
949 UNHANDLED(el0t, 32, irq)
950 UNHANDLED(el0t, 32, fiq)
951 UNHANDLED(el0t, 32, error)
952 #endif /* CONFIG_COMPAT */
953 
954 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
955 {
956 	unsigned long esr = read_sysreg(esr_el1);
957 	unsigned long far = read_sysreg(far_el1);
958 
959 	irqentry_nmi_enter(regs);
960 	panic_bad_stack(regs, esr, far);
961 }
962 
963 #ifdef CONFIG_ARM_SDE_INTERFACE
964 asmlinkage noinstr unsigned long
965 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
966 {
967 	irqentry_state_t state;
968 	unsigned long ret;
969 
970 	/*
971 	 * We didn't take an exception to get here, so the HW hasn't
972 	 * set/cleared bits in PSTATE that we may rely on.
973 	 *
974 	 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
975 	 * whether PSTATE bits are inherited unchanged or generated from
976 	 * scratch, and the TF-A implementation always clears PAN and always
977 	 * clears UAO. There are no other known implementations.
978 	 *
979 	 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
980 	 * PSTATE is modified upon architectural exceptions, and so PAN is
981 	 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
982 	 * cleared.
983 	 *
984 	 * We must explicitly reset PAN to the expected state, including
985 	 * clearing it when the host isn't using it, in case a VM had it set.
986 	 */
987 	if (system_uses_hw_pan())
988 		set_pstate_pan(1);
989 	else if (cpu_has_pan())
990 		set_pstate_pan(0);
991 
992 	state = irqentry_nmi_enter(regs);
993 	ret = do_sdei_event(regs, arg);
994 	irqentry_nmi_exit(regs, state);
995 
996 	return ret;
997 }
998 #endif /* CONFIG_ARM_SDE_INTERFACE */
999