xref: /linux/arch/arm64/include/asm/irqflags.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_IRQFLAGS_H
6 #define __ASM_IRQFLAGS_H
7 
8 #include <asm/barrier.h>
9 #include <asm/ptrace.h>
10 #include <asm/sysreg.h>
11 
12 /*
13  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
14  * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
15  * order:
16  * Masking debug exceptions causes all other exceptions to be masked too/
17  * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
18  * always masked and unmasked together, and have no side effects for other
19  * flags. Keeping to this order makes it easier for entry.S to know which
20  * exceptions should be unmasked.
21  */
22 
__daif_local_irq_enable(void)23 static __always_inline void __daif_local_irq_enable(void)
24 {
25 	barrier();
26 	asm volatile("msr daifclr, #3");
27 	barrier();
28 }
29 
__pmr_local_irq_enable(void)30 static __always_inline void __pmr_local_irq_enable(void)
31 {
32 	if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
33 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
34 		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
35 	}
36 
37 	barrier();
38 	write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1);
39 	pmr_sync();
40 	barrier();
41 }
42 
arch_local_irq_enable(void)43 static inline void arch_local_irq_enable(void)
44 {
45 	if (system_uses_irq_prio_masking()) {
46 		__pmr_local_irq_enable();
47 	} else {
48 		__daif_local_irq_enable();
49 	}
50 }
51 
__daif_local_irq_disable(void)52 static __always_inline void __daif_local_irq_disable(void)
53 {
54 	barrier();
55 	asm volatile("msr daifset, #3");
56 	barrier();
57 }
58 
__pmr_local_irq_disable(void)59 static __always_inline void __pmr_local_irq_disable(void)
60 {
61 	if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
62 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
63 		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
64 	}
65 
66 	barrier();
67 	write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1);
68 	barrier();
69 }
70 
arch_local_irq_disable(void)71 static inline void arch_local_irq_disable(void)
72 {
73 	if (system_uses_irq_prio_masking()) {
74 		__pmr_local_irq_disable();
75 	} else {
76 		__daif_local_irq_disable();
77 	}
78 }
79 
__daif_local_save_flags(void)80 static __always_inline unsigned long __daif_local_save_flags(void)
81 {
82 	return read_sysreg(daif);
83 }
84 
__pmr_local_save_flags(void)85 static __always_inline unsigned long __pmr_local_save_flags(void)
86 {
87 	return read_sysreg_s(SYS_ICC_PMR_EL1);
88 }
89 
90 /*
91  * Save the current interrupt enable state.
92  */
arch_local_save_flags(void)93 static inline unsigned long arch_local_save_flags(void)
94 {
95 	if (system_uses_irq_prio_masking()) {
96 		return __pmr_local_save_flags();
97 	} else {
98 		return __daif_local_save_flags();
99 	}
100 }
101 
__daif_irqs_disabled_flags(unsigned long flags)102 static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
103 {
104 	return flags & PSR_I_BIT;
105 }
106 
__pmr_irqs_disabled_flags(unsigned long flags)107 static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
108 {
109 	return flags != GIC_PRIO_IRQON;
110 }
111 
arch_irqs_disabled_flags(unsigned long flags)112 static inline bool arch_irqs_disabled_flags(unsigned long flags)
113 {
114 	if (system_uses_irq_prio_masking()) {
115 		return __pmr_irqs_disabled_flags(flags);
116 	} else {
117 		return __daif_irqs_disabled_flags(flags);
118 	}
119 }
120 
__daif_irqs_disabled(void)121 static __always_inline bool __daif_irqs_disabled(void)
122 {
123 	return __daif_irqs_disabled_flags(__daif_local_save_flags());
124 }
125 
__pmr_irqs_disabled(void)126 static __always_inline bool __pmr_irqs_disabled(void)
127 {
128 	return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
129 }
130 
arch_irqs_disabled(void)131 static inline bool arch_irqs_disabled(void)
132 {
133 	if (system_uses_irq_prio_masking()) {
134 		return __pmr_irqs_disabled();
135 	} else {
136 		return __daif_irqs_disabled();
137 	}
138 }
139 
__daif_local_irq_save(void)140 static __always_inline unsigned long __daif_local_irq_save(void)
141 {
142 	unsigned long flags = __daif_local_save_flags();
143 
144 	__daif_local_irq_disable();
145 
146 	return flags;
147 }
148 
__pmr_local_irq_save(void)149 static __always_inline unsigned long __pmr_local_irq_save(void)
150 {
151 	unsigned long flags = __pmr_local_save_flags();
152 
153 	/*
154 	 * There are too many states with IRQs disabled, just keep the current
155 	 * state if interrupts are already disabled/masked.
156 	 */
157 	if (!__pmr_irqs_disabled_flags(flags))
158 		__pmr_local_irq_disable();
159 
160 	return flags;
161 }
162 
arch_local_irq_save(void)163 static inline unsigned long arch_local_irq_save(void)
164 {
165 	if (system_uses_irq_prio_masking()) {
166 		return __pmr_local_irq_save();
167 	} else {
168 		return __daif_local_irq_save();
169 	}
170 }
171 
__daif_local_irq_restore(unsigned long flags)172 static __always_inline void __daif_local_irq_restore(unsigned long flags)
173 {
174 	barrier();
175 	write_sysreg(flags, daif);
176 	barrier();
177 }
178 
__pmr_local_irq_restore(unsigned long flags)179 static __always_inline void __pmr_local_irq_restore(unsigned long flags)
180 {
181 	barrier();
182 	write_sysreg_s(flags, SYS_ICC_PMR_EL1);
183 	pmr_sync();
184 	barrier();
185 }
186 
187 /*
188  * restore saved IRQ state
189  */
arch_local_irq_restore(unsigned long flags)190 static inline void arch_local_irq_restore(unsigned long flags)
191 {
192 	if (system_uses_irq_prio_masking()) {
193 		__pmr_local_irq_restore(flags);
194 	} else {
195 		__daif_local_irq_restore(flags);
196 	}
197 }
198 
199 #endif /* __ASM_IRQFLAGS_H */
200