xref: /linux/arch/arm64/kernel/entry-common.c (revision 858fbd7248bd84b2899fb2c29bc7bc2634296edf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/irq-entry-common.h>
10 #include <linux/kasan.h>
11 #include <linux/linkage.h>
12 #include <linux/livepatch.h>
13 #include <linux/lockdep.h>
14 #include <linux/ptrace.h>
15 #include <linux/resume_user_mode.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/thread_info.h>
19 
20 #include <asm/cpufeature.h>
21 #include <asm/daifflags.h>
22 #include <asm/esr.h>
23 #include <asm/exception.h>
24 #include <asm/fpsimd.h>
25 #include <asm/irq_regs.h>
26 #include <asm/kprobes.h>
27 #include <asm/mmu.h>
28 #include <asm/processor.h>
29 #include <asm/sdei.h>
30 #include <asm/stacktrace.h>
31 #include <asm/sysreg.h>
32 #include <asm/system_misc.h>
33 
34 /*
35  * Handle IRQ/context state management when entering from kernel mode.
36  * Before this function is called it is not safe to call regular kernel code,
37  * instrumentable code, or any code which may trigger an exception.
38  */
39 static noinstr irqentry_state_t arm64_enter_from_kernel_mode(struct pt_regs *regs)
40 {
41 	irqentry_state_t state;
42 
43 	state = irqentry_enter_from_kernel_mode(regs);
44 	mte_check_tfsr_entry();
45 	mte_disable_tco_entry(current);
46 
47 	return state;
48 }
49 
50 /*
51  * Handle IRQ/context state management when exiting to kernel mode.
52  * After this function returns it is not safe to call regular kernel code,
53  * instrumentable code, or any code which may trigger an exception.
54  */
55 static void noinstr arm64_exit_to_kernel_mode(struct pt_regs *regs,
56 					      irqentry_state_t state)
57 {
58 	local_irq_disable();
59 	irqentry_exit_to_kernel_mode_preempt(regs, state);
60 	local_daif_mask();
61 	mte_check_tfsr_exit();
62 	irqentry_exit_to_kernel_mode_after_preempt(regs, state);
63 }
64 
65 /*
66  * Handle IRQ/context state management when entering from user mode.
67  * Before this function is called it is not safe to call regular kernel code,
68  * instrumentable code, or any code which may trigger an exception.
69  */
70 static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs)
71 {
72 	enter_from_user_mode(regs);
73 	mte_disable_tco_entry(current);
74 	sme_enter_from_user_mode();
75 }
76 
77 /*
78  * Handle IRQ/context state management when exiting to user mode.
79  * After this function returns it is not safe to call regular kernel code,
80  * instrumentable code, or any code which may trigger an exception.
81  */
82 
83 static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs)
84 {
85 	local_irq_disable();
86 	exit_to_user_mode_prepare_legacy(regs);
87 	local_daif_mask();
88 	sme_exit_to_user_mode();
89 	mte_check_tfsr_exit();
90 	exit_to_user_mode();
91 }
92 
93 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
94 {
95 	arm64_exit_to_user_mode(regs);
96 }
97 
98 /*
99  * Handle IRQ/context state management when entering a debug exception from
100  * kernel mode. Before this function is called it is not safe to call regular
101  * kernel code, instrumentable code, or any code which may trigger an exception.
102  */
103 static noinstr irqentry_state_t arm64_enter_el1_dbg(struct pt_regs *regs)
104 {
105 	irqentry_state_t state;
106 
107 	state.lockdep = lockdep_hardirqs_enabled();
108 
109 	lockdep_hardirqs_off(CALLER_ADDR0);
110 	ct_nmi_enter();
111 
112 	trace_hardirqs_off_finish();
113 
114 	return state;
115 }
116 
117 /*
118  * Handle IRQ/context state management when exiting a debug exception from
119  * kernel mode. After this function returns it is not safe to call regular
120  * kernel code, instrumentable code, or any code which may trigger an exception.
121  */
122 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs,
123 				       irqentry_state_t state)
124 {
125 	if (state.lockdep) {
126 		trace_hardirqs_on_prepare();
127 		lockdep_hardirqs_on_prepare();
128 	}
129 
130 	ct_nmi_exit();
131 	if (state.lockdep)
132 		lockdep_hardirqs_on(CALLER_ADDR0);
133 }
134 
135 static void do_interrupt_handler(struct pt_regs *regs,
136 				 void (*handler)(struct pt_regs *))
137 {
138 	struct pt_regs *old_regs = set_irq_regs(regs);
139 
140 	if (on_thread_stack())
141 		call_on_irq_stack(regs, handler);
142 	else
143 		handler(regs);
144 
145 	set_irq_regs(old_regs);
146 }
147 
148 extern void (*handle_arch_irq)(struct pt_regs *);
149 extern void (*handle_arch_fiq)(struct pt_regs *);
150 
151 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
152 				      unsigned long esr)
153 {
154 	irqentry_nmi_enter(regs);
155 
156 	console_verbose();
157 
158 	pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
159 		vector, smp_processor_id(), esr,
160 		esr_get_class_string(esr));
161 
162 	__show_regs(regs);
163 	panic("Unhandled exception");
164 }
165 
166 #define UNHANDLED(el, regsize, vector)							\
167 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)	\
168 {											\
169 	const char *desc = #regsize "-bit " #el " " #vector;				\
170 	__panic_unhandled(regs, desc, read_sysreg(esr_el1));				\
171 }
172 
173 #ifdef CONFIG_ARM64_ERRATUM_1463225
174 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
175 
176 static void cortex_a76_erratum_1463225_svc_handler(void)
177 {
178 	u64 reg, val;
179 
180 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
181 		return;
182 
183 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
184 		return;
185 
186 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
187 	reg = read_sysreg(mdscr_el1);
188 	val = reg | MDSCR_EL1_SS | MDSCR_EL1_KDE;
189 	write_sysreg(val, mdscr_el1);
190 	asm volatile("msr daifclr, #8");
191 	isb();
192 
193 	/* We will have taken a single-step exception by this point */
194 
195 	write_sysreg(reg, mdscr_el1);
196 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
197 }
198 
199 static __always_inline bool
200 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
201 {
202 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
203 		return false;
204 
205 	/*
206 	 * We've taken a dummy step exception from the kernel to ensure
207 	 * that interrupts are re-enabled on the syscall path. Return back
208 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
209 	 * masked so that we can safely restore the mdscr and get on with
210 	 * handling the syscall.
211 	 */
212 	regs->pstate |= PSR_D_BIT;
213 	return true;
214 }
215 #else /* CONFIG_ARM64_ERRATUM_1463225 */
216 static void cortex_a76_erratum_1463225_svc_handler(void) { }
217 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
218 {
219 	return false;
220 }
221 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
222 
223 /*
224  * As per the ABI exit SME streaming mode and clear the SVE state not
225  * shared with FPSIMD on syscall entry.
226  */
227 static inline void fpsimd_syscall_enter(void)
228 {
229 	/* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */
230 	if (system_supports_sme())
231 		sme_smstop_sm();
232 
233 	/*
234 	 * The CPU is not in streaming mode. If non-streaming SVE is not
235 	 * supported, there is no SVE state that needs to be discarded.
236 	 */
237 	if (!system_supports_sve())
238 		return;
239 
240 	if (test_thread_flag(TIF_SVE)) {
241 		unsigned int sve_vq_minus_one;
242 
243 		sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
244 		sve_flush_live(true, sve_vq_minus_one);
245 	}
246 
247 	/*
248 	 * Any live non-FPSIMD SVE state has been zeroed. Allow
249 	 * fpsimd_save_user_state() to lazily discard SVE state until either
250 	 * the live state is unbound or fpsimd_syscall_exit() is called.
251 	 */
252 	__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD);
253 }
254 
255 static __always_inline void fpsimd_syscall_exit(void)
256 {
257 	if (!system_supports_sve())
258 		return;
259 
260 	/*
261 	 * The current task's user FPSIMD/SVE/SME state is now bound to this
262 	 * CPU. The fpsimd_last_state.to_save value is either:
263 	 *
264 	 * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU
265 	 *   since fpsimd_syscall_enter().
266 	 *
267 	 * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at
268 	 *   any point.
269 	 *
270 	 * Reset this to FP_STATE_CURRENT to stop lazy discarding.
271 	 */
272 	__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT);
273 }
274 
275 /*
276  * In debug exception context, we explicitly disable preemption despite
277  * having interrupts disabled.
278  * This serves two purposes: it makes it much less likely that we would
279  * accidentally schedule in exception context and it will force a warning
280  * if we somehow manage to schedule by accident.
281  */
282 static void debug_exception_enter(struct pt_regs *regs)
283 {
284 	preempt_disable();
285 
286 	/* This code is a bit fragile.  Test it. */
287 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
288 }
289 NOKPROBE_SYMBOL(debug_exception_enter);
290 
291 static void debug_exception_exit(struct pt_regs *regs)
292 {
293 	preempt_enable_no_resched();
294 }
295 NOKPROBE_SYMBOL(debug_exception_exit);
296 
297 UNHANDLED(el1t, 64, sync)
298 UNHANDLED(el1t, 64, irq)
299 UNHANDLED(el1t, 64, fiq)
300 UNHANDLED(el1t, 64, error)
301 
302 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
303 {
304 	unsigned long far = read_sysreg(far_el1);
305 	irqentry_state_t state;
306 
307 	state = arm64_enter_from_kernel_mode(regs);
308 	local_daif_inherit(regs);
309 	do_mem_abort(far, esr, regs);
310 	arm64_exit_to_kernel_mode(regs, state);
311 }
312 
313 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
314 {
315 	unsigned long far = read_sysreg(far_el1);
316 	irqentry_state_t state;
317 
318 	state = arm64_enter_from_kernel_mode(regs);
319 	local_daif_inherit(regs);
320 	do_sp_pc_abort(far, esr, regs);
321 	arm64_exit_to_kernel_mode(regs, state);
322 }
323 
324 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
325 {
326 	irqentry_state_t state;
327 
328 	state = arm64_enter_from_kernel_mode(regs);
329 	local_daif_inherit(regs);
330 	do_el1_undef(regs, esr);
331 	arm64_exit_to_kernel_mode(regs, state);
332 }
333 
334 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
335 {
336 	irqentry_state_t state;
337 
338 	state = arm64_enter_from_kernel_mode(regs);
339 	local_daif_inherit(regs);
340 	do_el1_bti(regs, esr);
341 	arm64_exit_to_kernel_mode(regs, state);
342 }
343 
344 static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
345 {
346 	irqentry_state_t state;
347 
348 	state = arm64_enter_from_kernel_mode(regs);
349 	local_daif_inherit(regs);
350 	do_el1_gcs(regs, esr);
351 	arm64_exit_to_kernel_mode(regs, state);
352 }
353 
354 static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
355 {
356 	irqentry_state_t state;
357 
358 	state = arm64_enter_from_kernel_mode(regs);
359 	local_daif_inherit(regs);
360 	do_el1_mops(regs, esr);
361 	arm64_exit_to_kernel_mode(regs, state);
362 }
363 
364 static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
365 {
366 	irqentry_state_t state;
367 
368 	state = arm64_enter_el1_dbg(regs);
369 	debug_exception_enter(regs);
370 	do_breakpoint(esr, regs);
371 	debug_exception_exit(regs);
372 	arm64_exit_el1_dbg(regs, state);
373 }
374 
375 static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
376 {
377 	irqentry_state_t state;
378 
379 	state = arm64_enter_el1_dbg(regs);
380 	if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
381 		debug_exception_enter(regs);
382 		/*
383 		 * After handling a breakpoint, we suspend the breakpoint
384 		 * and use single-step to move to the next instruction.
385 		 * If we are stepping a suspended breakpoint there's nothing more to do:
386 		 * the single-step is complete.
387 		 */
388 		if (!try_step_suspended_breakpoints(regs))
389 			do_el1_softstep(esr, regs);
390 		debug_exception_exit(regs);
391 	}
392 	arm64_exit_el1_dbg(regs, state);
393 }
394 
395 static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr)
396 {
397 	/* Watchpoints are the only debug exception to write FAR_EL1 */
398 	unsigned long far = read_sysreg(far_el1);
399 	irqentry_state_t state;
400 
401 	state = arm64_enter_el1_dbg(regs);
402 	debug_exception_enter(regs);
403 	do_watchpoint(far, esr, regs);
404 	debug_exception_exit(regs);
405 	arm64_exit_el1_dbg(regs, state);
406 }
407 
408 static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr)
409 {
410 	irqentry_state_t state;
411 
412 	state = arm64_enter_el1_dbg(regs);
413 	debug_exception_enter(regs);
414 	do_el1_brk64(esr, regs);
415 	debug_exception_exit(regs);
416 	arm64_exit_el1_dbg(regs, state);
417 }
418 
419 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
420 {
421 	irqentry_state_t state;
422 
423 	state = arm64_enter_from_kernel_mode(regs);
424 	local_daif_inherit(regs);
425 	do_el1_fpac(regs, esr);
426 	arm64_exit_to_kernel_mode(regs, state);
427 }
428 
429 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
430 {
431 	unsigned long esr = read_sysreg(esr_el1);
432 
433 	switch (ESR_ELx_EC(esr)) {
434 	case ESR_ELx_EC_DABT_CUR:
435 	case ESR_ELx_EC_IABT_CUR:
436 		el1_abort(regs, esr);
437 		break;
438 	/*
439 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
440 	 * recursive exception when trying to push the initial pt_regs.
441 	 */
442 	case ESR_ELx_EC_PC_ALIGN:
443 		el1_pc(regs, esr);
444 		break;
445 	case ESR_ELx_EC_SYS64:
446 	case ESR_ELx_EC_UNKNOWN:
447 		el1_undef(regs, esr);
448 		break;
449 	case ESR_ELx_EC_BTI:
450 		el1_bti(regs, esr);
451 		break;
452 	case ESR_ELx_EC_GCS:
453 		el1_gcs(regs, esr);
454 		break;
455 	case ESR_ELx_EC_MOPS:
456 		el1_mops(regs, esr);
457 		break;
458 	case ESR_ELx_EC_BREAKPT_CUR:
459 		el1_breakpt(regs, esr);
460 		break;
461 	case ESR_ELx_EC_SOFTSTP_CUR:
462 		el1_softstp(regs, esr);
463 		break;
464 	case ESR_ELx_EC_WATCHPT_CUR:
465 		el1_watchpt(regs, esr);
466 		break;
467 	case ESR_ELx_EC_BRK64:
468 		el1_brk64(regs, esr);
469 		break;
470 	case ESR_ELx_EC_FPAC:
471 		el1_fpac(regs, esr);
472 		break;
473 	default:
474 		__panic_unhandled(regs, "64-bit el1h sync", esr);
475 	}
476 }
477 
478 static __always_inline void __el1_pnmi(struct pt_regs *regs,
479 				       void (*handler)(struct pt_regs *))
480 {
481 	irqentry_state_t state;
482 
483 	state = irqentry_nmi_enter(regs);
484 	do_interrupt_handler(regs, handler);
485 	irqentry_nmi_exit(regs, state);
486 }
487 
488 static __always_inline void __el1_irq(struct pt_regs *regs,
489 				      void (*handler)(struct pt_regs *))
490 {
491 	irqentry_state_t state;
492 
493 	state = arm64_enter_from_kernel_mode(regs);
494 
495 	irq_enter_rcu();
496 	do_interrupt_handler(regs, handler);
497 	irq_exit_rcu();
498 
499 	arm64_exit_to_kernel_mode(regs, state);
500 }
501 static void noinstr el1_interrupt(struct pt_regs *regs,
502 				  void (*handler)(struct pt_regs *))
503 {
504 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
505 
506 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && regs_irqs_disabled(regs))
507 		__el1_pnmi(regs, handler);
508 	else
509 		__el1_irq(regs, handler);
510 }
511 
512 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
513 {
514 	el1_interrupt(regs, handle_arch_irq);
515 }
516 
517 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
518 {
519 	el1_interrupt(regs, handle_arch_fiq);
520 }
521 
522 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
523 {
524 	unsigned long esr = read_sysreg(esr_el1);
525 	irqentry_state_t state;
526 
527 	local_daif_restore(DAIF_ERRCTX);
528 	state = irqentry_nmi_enter(regs);
529 	do_serror(regs, esr);
530 	irqentry_nmi_exit(regs, state);
531 }
532 
533 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
534 {
535 	unsigned long far = read_sysreg(far_el1);
536 
537 	arm64_enter_from_user_mode(regs);
538 	local_daif_restore(DAIF_PROCCTX);
539 	do_mem_abort(far, esr, regs);
540 	arm64_exit_to_user_mode(regs);
541 }
542 
543 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
544 {
545 	unsigned long far = read_sysreg(far_el1);
546 
547 	/*
548 	 * We've taken an instruction abort from userspace and not yet
549 	 * re-enabled IRQs. If the address is a kernel address, apply
550 	 * BP hardening prior to enabling IRQs and pre-emption.
551 	 */
552 	if (!is_ttbr0_addr(far))
553 		arm64_apply_bp_hardening();
554 
555 	arm64_enter_from_user_mode(regs);
556 	local_daif_restore(DAIF_PROCCTX);
557 	do_mem_abort(far, esr, regs);
558 	arm64_exit_to_user_mode(regs);
559 }
560 
561 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
562 {
563 	arm64_enter_from_user_mode(regs);
564 	local_daif_restore(DAIF_PROCCTX);
565 	do_fpsimd_acc(esr, regs);
566 	arm64_exit_to_user_mode(regs);
567 }
568 
569 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
570 {
571 	arm64_enter_from_user_mode(regs);
572 	local_daif_restore(DAIF_PROCCTX);
573 	do_sve_acc(esr, regs);
574 	arm64_exit_to_user_mode(regs);
575 }
576 
577 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
578 {
579 	arm64_enter_from_user_mode(regs);
580 	local_daif_restore(DAIF_PROCCTX);
581 	do_sme_acc(esr, regs);
582 	arm64_exit_to_user_mode(regs);
583 }
584 
585 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
586 {
587 	arm64_enter_from_user_mode(regs);
588 	local_daif_restore(DAIF_PROCCTX);
589 	do_fpsimd_exc(esr, regs);
590 	arm64_exit_to_user_mode(regs);
591 }
592 
593 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
594 {
595 	arm64_enter_from_user_mode(regs);
596 	local_daif_restore(DAIF_PROCCTX);
597 	do_el0_sys(esr, regs);
598 	arm64_exit_to_user_mode(regs);
599 }
600 
601 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
602 {
603 	unsigned long far = read_sysreg(far_el1);
604 
605 	if (!is_ttbr0_addr(instruction_pointer(regs)))
606 		arm64_apply_bp_hardening();
607 
608 	arm64_enter_from_user_mode(regs);
609 	local_daif_restore(DAIF_PROCCTX);
610 	do_sp_pc_abort(far, esr, regs);
611 	arm64_exit_to_user_mode(regs);
612 }
613 
614 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
615 {
616 	arm64_enter_from_user_mode(regs);
617 	local_daif_restore(DAIF_PROCCTX);
618 	do_sp_pc_abort(regs->sp, esr, regs);
619 	arm64_exit_to_user_mode(regs);
620 }
621 
622 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
623 {
624 	arm64_enter_from_user_mode(regs);
625 	local_daif_restore(DAIF_PROCCTX);
626 	do_el0_undef(regs, esr);
627 	arm64_exit_to_user_mode(regs);
628 }
629 
630 static void noinstr el0_bti(struct pt_regs *regs)
631 {
632 	arm64_enter_from_user_mode(regs);
633 	local_daif_restore(DAIF_PROCCTX);
634 	do_el0_bti(regs);
635 	arm64_exit_to_user_mode(regs);
636 }
637 
638 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
639 {
640 	arm64_enter_from_user_mode(regs);
641 	local_daif_restore(DAIF_PROCCTX);
642 	do_el0_mops(regs, esr);
643 	arm64_exit_to_user_mode(regs);
644 }
645 
646 static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr)
647 {
648 	arm64_enter_from_user_mode(regs);
649 	local_daif_restore(DAIF_PROCCTX);
650 	do_el0_gcs(regs, esr);
651 	arm64_exit_to_user_mode(regs);
652 }
653 
654 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
655 {
656 	arm64_enter_from_user_mode(regs);
657 	local_daif_restore(DAIF_PROCCTX);
658 	bad_el0_sync(regs, 0, esr);
659 	arm64_exit_to_user_mode(regs);
660 }
661 
662 static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
663 {
664 	if (!is_ttbr0_addr(regs->pc))
665 		arm64_apply_bp_hardening();
666 
667 	arm64_enter_from_user_mode(regs);
668 	debug_exception_enter(regs);
669 	do_breakpoint(esr, regs);
670 	debug_exception_exit(regs);
671 	local_daif_restore(DAIF_PROCCTX);
672 	arm64_exit_to_user_mode(regs);
673 }
674 
675 static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
676 {
677 	bool step_done;
678 
679 	if (!is_ttbr0_addr(regs->pc))
680 		arm64_apply_bp_hardening();
681 
682 	arm64_enter_from_user_mode(regs);
683 	/*
684 	 * After handling a breakpoint, we suspend the breakpoint
685 	 * and use single-step to move to the next instruction.
686 	 * If we are stepping a suspended breakpoint there's nothing more to do:
687 	 * the single-step is complete.
688 	 */
689 	step_done = try_step_suspended_breakpoints(regs);
690 	local_daif_restore(DAIF_PROCCTX);
691 	if (!step_done)
692 		do_el0_softstep(esr, regs);
693 	arm64_exit_to_user_mode(regs);
694 }
695 
696 static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr)
697 {
698 	/* Watchpoints are the only debug exception to write FAR_EL1 */
699 	unsigned long far = read_sysreg(far_el1);
700 
701 	arm64_enter_from_user_mode(regs);
702 	debug_exception_enter(regs);
703 	do_watchpoint(far, esr, regs);
704 	debug_exception_exit(regs);
705 	local_daif_restore(DAIF_PROCCTX);
706 	arm64_exit_to_user_mode(regs);
707 }
708 
709 static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr)
710 {
711 	arm64_enter_from_user_mode(regs);
712 	local_daif_restore(DAIF_PROCCTX);
713 	do_el0_brk64(esr, regs);
714 	arm64_exit_to_user_mode(regs);
715 }
716 
717 static void noinstr el0_svc(struct pt_regs *regs)
718 {
719 	arm64_enter_from_user_mode(regs);
720 	cortex_a76_erratum_1463225_svc_handler();
721 	fpsimd_syscall_enter();
722 	local_daif_restore(DAIF_PROCCTX);
723 	do_el0_svc(regs);
724 	arm64_exit_to_user_mode(regs);
725 	fpsimd_syscall_exit();
726 }
727 
728 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
729 {
730 	arm64_enter_from_user_mode(regs);
731 	local_daif_restore(DAIF_PROCCTX);
732 	do_el0_fpac(regs, esr);
733 	arm64_exit_to_user_mode(regs);
734 }
735 
736 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
737 {
738 	unsigned long esr = read_sysreg(esr_el1);
739 
740 	switch (ESR_ELx_EC(esr)) {
741 	case ESR_ELx_EC_SVC64:
742 		el0_svc(regs);
743 		break;
744 	case ESR_ELx_EC_DABT_LOW:
745 		el0_da(regs, esr);
746 		break;
747 	case ESR_ELx_EC_IABT_LOW:
748 		el0_ia(regs, esr);
749 		break;
750 	case ESR_ELx_EC_FP_ASIMD:
751 		el0_fpsimd_acc(regs, esr);
752 		break;
753 	case ESR_ELx_EC_SVE:
754 		el0_sve_acc(regs, esr);
755 		break;
756 	case ESR_ELx_EC_SME:
757 		el0_sme_acc(regs, esr);
758 		break;
759 	case ESR_ELx_EC_FP_EXC64:
760 		el0_fpsimd_exc(regs, esr);
761 		break;
762 	case ESR_ELx_EC_SYS64:
763 	case ESR_ELx_EC_WFx:
764 		el0_sys(regs, esr);
765 		break;
766 	case ESR_ELx_EC_SP_ALIGN:
767 		el0_sp(regs, esr);
768 		break;
769 	case ESR_ELx_EC_PC_ALIGN:
770 		el0_pc(regs, esr);
771 		break;
772 	case ESR_ELx_EC_UNKNOWN:
773 		el0_undef(regs, esr);
774 		break;
775 	case ESR_ELx_EC_BTI:
776 		el0_bti(regs);
777 		break;
778 	case ESR_ELx_EC_MOPS:
779 		el0_mops(regs, esr);
780 		break;
781 	case ESR_ELx_EC_GCS:
782 		el0_gcs(regs, esr);
783 		break;
784 	case ESR_ELx_EC_BREAKPT_LOW:
785 		el0_breakpt(regs, esr);
786 		break;
787 	case ESR_ELx_EC_SOFTSTP_LOW:
788 		el0_softstp(regs, esr);
789 		break;
790 	case ESR_ELx_EC_WATCHPT_LOW:
791 		el0_watchpt(regs, esr);
792 		break;
793 	case ESR_ELx_EC_BRK64:
794 		el0_brk64(regs, esr);
795 		break;
796 	case ESR_ELx_EC_FPAC:
797 		el0_fpac(regs, esr);
798 		break;
799 	default:
800 		el0_inv(regs, esr);
801 	}
802 }
803 
804 static void noinstr el0_interrupt(struct pt_regs *regs,
805 				  void (*handler)(struct pt_regs *))
806 {
807 	arm64_enter_from_user_mode(regs);
808 
809 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
810 
811 	if (regs->pc & BIT(55))
812 		arm64_apply_bp_hardening();
813 
814 	irq_enter_rcu();
815 	do_interrupt_handler(regs, handler);
816 	irq_exit_rcu();
817 
818 	arm64_exit_to_user_mode(regs);
819 }
820 
821 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
822 {
823 	el0_interrupt(regs, handle_arch_irq);
824 }
825 
826 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
827 {
828 	__el0_irq_handler_common(regs);
829 }
830 
831 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
832 {
833 	el0_interrupt(regs, handle_arch_fiq);
834 }
835 
836 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
837 {
838 	__el0_fiq_handler_common(regs);
839 }
840 
841 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
842 {
843 	unsigned long esr = read_sysreg(esr_el1);
844 	irqentry_state_t state;
845 
846 	arm64_enter_from_user_mode(regs);
847 	local_daif_restore(DAIF_ERRCTX);
848 	state = irqentry_nmi_enter(regs);
849 	do_serror(regs, esr);
850 	irqentry_nmi_exit(regs, state);
851 	local_daif_restore(DAIF_PROCCTX);
852 	arm64_exit_to_user_mode(regs);
853 }
854 
855 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
856 {
857 	__el0_error_handler_common(regs);
858 }
859 
860 #ifdef CONFIG_COMPAT
861 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
862 {
863 	arm64_enter_from_user_mode(regs);
864 	local_daif_restore(DAIF_PROCCTX);
865 	do_el0_cp15(esr, regs);
866 	arm64_exit_to_user_mode(regs);
867 }
868 
869 static void noinstr el0_svc_compat(struct pt_regs *regs)
870 {
871 	arm64_enter_from_user_mode(regs);
872 	cortex_a76_erratum_1463225_svc_handler();
873 	local_daif_restore(DAIF_PROCCTX);
874 	do_el0_svc_compat(regs);
875 	arm64_exit_to_user_mode(regs);
876 }
877 
878 static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr)
879 {
880 	arm64_enter_from_user_mode(regs);
881 	local_daif_restore(DAIF_PROCCTX);
882 	do_bkpt32(esr, regs);
883 	arm64_exit_to_user_mode(regs);
884 }
885 
886 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
887 {
888 	unsigned long esr = read_sysreg(esr_el1);
889 
890 	switch (ESR_ELx_EC(esr)) {
891 	case ESR_ELx_EC_SVC32:
892 		el0_svc_compat(regs);
893 		break;
894 	case ESR_ELx_EC_DABT_LOW:
895 		el0_da(regs, esr);
896 		break;
897 	case ESR_ELx_EC_IABT_LOW:
898 		el0_ia(regs, esr);
899 		break;
900 	case ESR_ELx_EC_FP_ASIMD:
901 		el0_fpsimd_acc(regs, esr);
902 		break;
903 	case ESR_ELx_EC_FP_EXC32:
904 		el0_fpsimd_exc(regs, esr);
905 		break;
906 	case ESR_ELx_EC_PC_ALIGN:
907 		el0_pc(regs, esr);
908 		break;
909 	case ESR_ELx_EC_UNKNOWN:
910 	case ESR_ELx_EC_CP14_MR:
911 	case ESR_ELx_EC_CP14_LS:
912 	case ESR_ELx_EC_CP14_64:
913 		el0_undef(regs, esr);
914 		break;
915 	case ESR_ELx_EC_CP15_32:
916 	case ESR_ELx_EC_CP15_64:
917 		el0_cp15(regs, esr);
918 		break;
919 	case ESR_ELx_EC_BREAKPT_LOW:
920 		el0_breakpt(regs, esr);
921 		break;
922 	case ESR_ELx_EC_SOFTSTP_LOW:
923 		el0_softstp(regs, esr);
924 		break;
925 	case ESR_ELx_EC_WATCHPT_LOW:
926 		el0_watchpt(regs, esr);
927 		break;
928 	case ESR_ELx_EC_BKPT32:
929 		el0_bkpt32(regs, esr);
930 		break;
931 	default:
932 		el0_inv(regs, esr);
933 	}
934 }
935 
936 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
937 {
938 	__el0_irq_handler_common(regs);
939 }
940 
941 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
942 {
943 	__el0_fiq_handler_common(regs);
944 }
945 
946 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
947 {
948 	__el0_error_handler_common(regs);
949 }
950 #else /* CONFIG_COMPAT */
951 UNHANDLED(el0t, 32, sync)
952 UNHANDLED(el0t, 32, irq)
953 UNHANDLED(el0t, 32, fiq)
954 UNHANDLED(el0t, 32, error)
955 #endif /* CONFIG_COMPAT */
956 
957 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
958 {
959 	unsigned long esr = read_sysreg(esr_el1);
960 	unsigned long far = read_sysreg(far_el1);
961 
962 	irqentry_nmi_enter(regs);
963 	panic_bad_stack(regs, esr, far);
964 }
965 
966 #ifdef CONFIG_ARM_SDE_INTERFACE
967 asmlinkage noinstr unsigned long
968 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
969 {
970 	irqentry_state_t state;
971 	unsigned long ret;
972 
973 	/*
974 	 * We didn't take an exception to get here, so the HW hasn't
975 	 * set/cleared bits in PSTATE that we may rely on.
976 	 *
977 	 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
978 	 * whether PSTATE bits are inherited unchanged or generated from
979 	 * scratch, and the TF-A implementation always clears PAN and always
980 	 * clears UAO. There are no other known implementations.
981 	 *
982 	 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
983 	 * PSTATE is modified upon architectural exceptions, and so PAN is
984 	 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
985 	 * cleared.
986 	 *
987 	 * We must explicitly reset PAN to the expected state, including
988 	 * clearing it when the host isn't using it, in case a VM had it set.
989 	 */
990 	if (system_uses_hw_pan())
991 		set_pstate_pan(1);
992 	else if (cpu_has_pan())
993 		set_pstate_pan(0);
994 
995 	state = irqentry_nmi_enter(regs);
996 	ret = do_sdei_event(regs, arg);
997 	irqentry_nmi_exit(regs, state);
998 
999 	return ret;
1000 }
1001 #endif /* CONFIG_ARM_SDE_INTERFACE */
1002