xref: /linux/arch/powerpc/include/asm/interrupt.h (revision 812aa68ef7d4d71bed996468ead665092a3f8de9)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_INTERRUPT_H
3 #define _ASM_POWERPC_INTERRUPT_H
4 
5 #include <linux/context_tracking.h>
6 #include <linux/hardirq.h>
7 #include <asm/cputime.h>
8 #include <asm/ftrace.h>
9 #include <asm/kprobes.h>
10 #include <asm/runlatch.h>
11 
12 struct interrupt_state {
13 #ifdef CONFIG_PPC_BOOK3E_64
14 	enum ctx_state ctx_state;
15 #endif
16 };
17 
18 static inline void booke_restore_dbcr0(void)
19 {
20 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
21 	unsigned long dbcr0 = current->thread.debug.dbcr0;
22 
23 	if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
24 		mtspr(SPRN_DBSR, -1);
25 		mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
26 	}
27 #endif
28 }
29 
30 static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
31 {
32 #ifdef CONFIG_PPC32
33 	if (!arch_irq_disabled_regs(regs))
34 		trace_hardirqs_off();
35 
36 	if (user_mode(regs)) {
37 		kuep_lock();
38 		account_cpu_user_entry();
39 	} else {
40 		kuap_save_and_lock(regs);
41 	}
42 #endif
43 	/*
44 	 * Book3E reconciles irq soft mask in asm
45 	 */
46 #ifdef CONFIG_PPC_BOOK3S_64
47 	if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
48 		trace_hardirqs_off();
49 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
50 
51 	if (user_mode(regs)) {
52 		CT_WARN_ON(ct_state() != CONTEXT_USER);
53 		user_exit_irqoff();
54 
55 		account_cpu_user_entry();
56 		account_stolen_time();
57 	} else {
58 		/*
59 		 * CT_WARN_ON comes here via program_check_exception,
60 		 * so avoid recursion.
61 		 */
62 		if (TRAP(regs) != 0x700)
63 			CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
64 	}
65 #endif
66 
67 #ifdef CONFIG_PPC_BOOK3E_64
68 	state->ctx_state = exception_enter();
69 	if (user_mode(regs))
70 		account_cpu_user_entry();
71 #endif
72 
73 	booke_restore_dbcr0();
74 }
75 
76 /*
77  * Care should be taken to note that interrupt_exit_prepare and
78  * interrupt_async_exit_prepare do not necessarily return immediately to
79  * regs context (e.g., if regs is usermode, we don't necessarily return to
80  * user mode). Other interrupts might be taken between here and return,
81  * context switch / preemption may occur in the exit path after this, or a
82  * signal may be delivered, etc.
83  *
84  * The real interrupt exit code is platform specific, e.g.,
85  * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
86  *
87  * However interrupt_nmi_exit_prepare does return directly to regs, because
88  * NMIs do not do "exit work" or replay soft-masked interrupts.
89  */
90 static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
91 {
92 #ifdef CONFIG_PPC_BOOK3E_64
93 	exception_exit(state->ctx_state);
94 #endif
95 
96 	if (user_mode(regs))
97 		kuep_unlock();
98 	/*
99 	 * Book3S exits to user via interrupt_exit_user_prepare(), which does
100 	 * context tracking, which is a cleaner way to handle PREEMPT=y
101 	 * and avoid context entry/exit in e.g., preempt_schedule_irq()),
102 	 * which is likely to be where the core code wants to end up.
103 	 *
104 	 * The above comment explains why we can't do the
105 	 *
106 	 *     if (user_mode(regs))
107 	 *         user_exit_irqoff();
108 	 *
109 	 * sequence here.
110 	 */
111 }
112 
113 static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
114 {
115 #ifdef CONFIG_PPC_BOOK3S_64
116 	if (cpu_has_feature(CPU_FTR_CTRL) &&
117 	    !test_thread_local_flags(_TLF_RUNLATCH))
118 		__ppc64_runlatch_on();
119 #endif
120 
121 	interrupt_enter_prepare(regs, state);
122 	irq_enter();
123 }
124 
125 static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
126 {
127 	irq_exit();
128 	interrupt_exit_prepare(regs, state);
129 }
130 
131 struct interrupt_nmi_state {
132 #ifdef CONFIG_PPC64
133 #ifdef CONFIG_PPC_BOOK3S_64
134 	u8 irq_soft_mask;
135 	u8 irq_happened;
136 #endif
137 	u8 ftrace_enabled;
138 #endif
139 };
140 
141 static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
142 {
143 #ifdef CONFIG_PPC64
144 #ifdef CONFIG_PPC_BOOK3S_64
145 	state->irq_soft_mask = local_paca->irq_soft_mask;
146 	state->irq_happened = local_paca->irq_happened;
147 
148 	/*
149 	 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
150 	 * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
151 	 * because that goes through irq tracing which we don't want in NMI.
152 	 */
153 	local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
154 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
155 
156 	/* Don't do any per-CPU operations until interrupt state is fixed */
157 #endif
158 	/* Allow DEC and PMI to be traced when they are soft-NMI */
159 	if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
160 		state->ftrace_enabled = this_cpu_get_ftrace_enabled();
161 		this_cpu_set_ftrace_enabled(0);
162 	}
163 #endif
164 
165 	/*
166 	 * Do not use nmi_enter() for pseries hash guest taking a real-mode
167 	 * NMI because not everything it touches is within the RMA limit.
168 	 */
169 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
170 			!firmware_has_feature(FW_FEATURE_LPAR) ||
171 			radix_enabled() || (mfmsr() & MSR_DR))
172 		nmi_enter();
173 }
174 
175 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
176 {
177 	if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
178 			!firmware_has_feature(FW_FEATURE_LPAR) ||
179 			radix_enabled() || (mfmsr() & MSR_DR))
180 		nmi_exit();
181 
182 #ifdef CONFIG_PPC64
183 	if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
184 		this_cpu_set_ftrace_enabled(state->ftrace_enabled);
185 
186 #ifdef CONFIG_PPC_BOOK3S_64
187 	/* Check we didn't change the pending interrupt mask. */
188 	WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
189 	local_paca->irq_happened = state->irq_happened;
190 	local_paca->irq_soft_mask = state->irq_soft_mask;
191 #endif
192 #endif
193 }
194 
195 /*
196  * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
197  * function definition. The reason for this is the noinstr section is placed
198  * after the main text section, i.e., very far away from the interrupt entry
199  * asm. That creates problems with fitting linker stubs when building large
200  * kernels.
201  */
202 #define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
203 
204 /**
205  * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
206  * @func:	Function name of the entry point
207  * @returns:	Returns a value back to asm caller
208  */
209 #define DECLARE_INTERRUPT_HANDLER_RAW(func)				\
210 	__visible long func(struct pt_regs *regs)
211 
212 /**
213  * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
214  * @func:	Function name of the entry point
215  * @returns:	Returns a value back to asm caller
216  *
217  * @func is called from ASM entry code.
218  *
219  * This is a plain function which does no tracing, reconciling, etc.
220  * The macro is written so it acts as function definition. Append the
221  * body with a pair of curly brackets.
222  *
223  * raw interrupt handlers must not enable or disable interrupts, or
224  * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
225  * not be advisable either, although may be possible in a pinch, the
226  * trace will look odd at least.
227  *
228  * A raw handler may call one of the other interrupt handler functions
229  * to be converted into that interrupt context without these restrictions.
230  *
231  * On PPC64, _RAW handlers may return with fast_interrupt_return.
232  *
233  * Specific handlers may have additional restrictions.
234  */
235 #define DEFINE_INTERRUPT_HANDLER_RAW(func)				\
236 static __always_inline long ____##func(struct pt_regs *regs);		\
237 									\
238 interrupt_handler long func(struct pt_regs *regs)			\
239 {									\
240 	long ret;							\
241 									\
242 	ret = ____##func (regs);					\
243 									\
244 	return ret;							\
245 }									\
246 NOKPROBE_SYMBOL(func);							\
247 									\
248 static __always_inline long ____##func(struct pt_regs *regs)
249 
250 /**
251  * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
252  * @func:	Function name of the entry point
253  */
254 #define DECLARE_INTERRUPT_HANDLER(func)					\
255 	__visible void func(struct pt_regs *regs)
256 
257 /**
258  * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
259  * @func:	Function name of the entry point
260  *
261  * @func is called from ASM entry code.
262  *
263  * The macro is written so it acts as function definition. Append the
264  * body with a pair of curly brackets.
265  */
266 #define DEFINE_INTERRUPT_HANDLER(func)					\
267 static __always_inline void ____##func(struct pt_regs *regs);		\
268 									\
269 interrupt_handler void func(struct pt_regs *regs)			\
270 {									\
271 	struct interrupt_state state;					\
272 									\
273 	interrupt_enter_prepare(regs, &state);				\
274 									\
275 	____##func (regs);						\
276 									\
277 	interrupt_exit_prepare(regs, &state);				\
278 }									\
279 NOKPROBE_SYMBOL(func);							\
280 									\
281 static __always_inline void ____##func(struct pt_regs *regs)
282 
283 /**
284  * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
285  * @func:	Function name of the entry point
286  * @returns:	Returns a value back to asm caller
287  */
288 #define DECLARE_INTERRUPT_HANDLER_RET(func)				\
289 	__visible long func(struct pt_regs *regs)
290 
291 /**
292  * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
293  * @func:	Function name of the entry point
294  * @returns:	Returns a value back to asm caller
295  *
296  * @func is called from ASM entry code.
297  *
298  * The macro is written so it acts as function definition. Append the
299  * body with a pair of curly brackets.
300  */
301 #define DEFINE_INTERRUPT_HANDLER_RET(func)				\
302 static __always_inline long ____##func(struct pt_regs *regs);		\
303 									\
304 interrupt_handler long func(struct pt_regs *regs)			\
305 {									\
306 	struct interrupt_state state;					\
307 	long ret;							\
308 									\
309 	interrupt_enter_prepare(regs, &state);				\
310 									\
311 	ret = ____##func (regs);					\
312 									\
313 	interrupt_exit_prepare(regs, &state);				\
314 									\
315 	return ret;							\
316 }									\
317 NOKPROBE_SYMBOL(func);							\
318 									\
319 static __always_inline long ____##func(struct pt_regs *regs)
320 
321 /**
322  * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
323  * @func:	Function name of the entry point
324  */
325 #define DECLARE_INTERRUPT_HANDLER_ASYNC(func)				\
326 	__visible void func(struct pt_regs *regs)
327 
328 /**
329  * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
330  * @func:	Function name of the entry point
331  *
332  * @func is called from ASM entry code.
333  *
334  * The macro is written so it acts as function definition. Append the
335  * body with a pair of curly brackets.
336  */
337 #define DEFINE_INTERRUPT_HANDLER_ASYNC(func)				\
338 static __always_inline void ____##func(struct pt_regs *regs);		\
339 									\
340 interrupt_handler void func(struct pt_regs *regs)			\
341 {									\
342 	struct interrupt_state state;					\
343 									\
344 	interrupt_async_enter_prepare(regs, &state);			\
345 									\
346 	____##func (regs);						\
347 									\
348 	interrupt_async_exit_prepare(regs, &state);			\
349 }									\
350 NOKPROBE_SYMBOL(func);							\
351 									\
352 static __always_inline void ____##func(struct pt_regs *regs)
353 
354 /**
355  * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
356  * @func:	Function name of the entry point
357  * @returns:	Returns a value back to asm caller
358  */
359 #define DECLARE_INTERRUPT_HANDLER_NMI(func)				\
360 	__visible long func(struct pt_regs *regs)
361 
362 /**
363  * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
364  * @func:	Function name of the entry point
365  * @returns:	Returns a value back to asm caller
366  *
367  * @func is called from ASM entry code.
368  *
369  * The macro is written so it acts as function definition. Append the
370  * body with a pair of curly brackets.
371  */
372 #define DEFINE_INTERRUPT_HANDLER_NMI(func)				\
373 static __always_inline long ____##func(struct pt_regs *regs);		\
374 									\
375 interrupt_handler long func(struct pt_regs *regs)			\
376 {									\
377 	struct interrupt_nmi_state state;				\
378 	long ret;							\
379 									\
380 	interrupt_nmi_enter_prepare(regs, &state);			\
381 									\
382 	ret = ____##func (regs);					\
383 									\
384 	interrupt_nmi_exit_prepare(regs, &state);			\
385 									\
386 	return ret;							\
387 }									\
388 NOKPROBE_SYMBOL(func);							\
389 									\
390 static __always_inline long ____##func(struct pt_regs *regs)
391 
392 
393 /* Interrupt handlers */
394 /* kernel/traps.c */
395 DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
396 #ifdef CONFIG_PPC_BOOK3S_64
397 DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
398 #else
399 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
400 #endif
401 DECLARE_INTERRUPT_HANDLER(SMIException);
402 DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
403 DECLARE_INTERRUPT_HANDLER(unknown_exception);
404 DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
405 DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
406 DECLARE_INTERRUPT_HANDLER(RunModeException);
407 DECLARE_INTERRUPT_HANDLER(single_step_exception);
408 DECLARE_INTERRUPT_HANDLER(program_check_exception);
409 DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
410 DECLARE_INTERRUPT_HANDLER(alignment_exception);
411 DECLARE_INTERRUPT_HANDLER(StackOverflow);
412 DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
413 DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
414 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
415 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
416 DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
417 DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
418 DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
419 DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
420 DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
421 DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
422 DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
423 DECLARE_INTERRUPT_HANDLER(DebugException);
424 DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
425 DECLARE_INTERRUPT_HANDLER(CacheLockingException);
426 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
427 DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
428 DECLARE_INTERRUPT_HANDLER(WatchdogException);
429 DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
430 
431 /* slb.c */
432 DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
433 DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
434 
435 /* hash_utils.c */
436 DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
437 
438 /* fault.c */
439 DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
440 DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
441 
442 /* process.c */
443 DECLARE_INTERRUPT_HANDLER(do_break);
444 
445 /* time.c */
446 DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
447 
448 /* mce.c */
449 DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
450 DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
451 
452 DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
453 
454 void __noreturn unrecoverable_exception(struct pt_regs *regs);
455 
456 void replay_system_reset(void);
457 void replay_soft_interrupts(void);
458 
459 static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
460 {
461 	if (!arch_irq_disabled_regs(regs))
462 		local_irq_enable();
463 }
464 
465 #endif /* _ASM_POWERPC_INTERRUPT_H */
466