xref: /linux/arch/x86/include/asm/irqflags.h (revision 6000fc4d6f3e55ad52cce8d76317187fe01af2aa)
1 #ifndef _X86_IRQFLAGS_H_
2 #define _X86_IRQFLAGS_H_
3 
4 #include <asm/processor-flags.h>
5 
6 #ifndef __ASSEMBLY__
7 /*
8  * Interrupt control:
9  */
10 
11 static inline unsigned long native_save_fl(void)
12 {
13 	unsigned long flags;
14 
15 	/*
16 	 * Note: this needs to be "=r" not "=rm", because we have the
17 	 * stack offset from what gcc expects at the time the "pop" is
18 	 * executed, and so a memory reference with respect to the stack
19 	 * would end up using the wrong address.
20 	 */
21 	asm volatile("# __raw_save_flags\n\t"
22 		     "pushf ; pop %0"
23 		     : "=r" (flags)
24 		     : /* no input */
25 		     : "memory");
26 
27 	return flags;
28 }
29 
30 static inline void native_restore_fl(unsigned long flags)
31 {
32 	asm volatile("push %0 ; popf"
33 		     : /* no output */
34 		     :"g" (flags)
35 		     :"memory", "cc");
36 }
37 
38 static inline void native_irq_disable(void)
39 {
40 	asm volatile("cli": : :"memory");
41 }
42 
43 static inline void native_irq_enable(void)
44 {
45 	asm volatile("sti": : :"memory");
46 }
47 
48 static inline void native_safe_halt(void)
49 {
50 	asm volatile("sti; hlt": : :"memory");
51 }
52 
53 static inline void native_halt(void)
54 {
55 	asm volatile("hlt": : :"memory");
56 }
57 
58 #endif
59 
60 #ifdef CONFIG_PARAVIRT
61 #include <asm/paravirt.h>
62 #else
63 #ifndef __ASSEMBLY__
64 
65 static inline unsigned long __raw_local_save_flags(void)
66 {
67 	return native_save_fl();
68 }
69 
70 static inline void raw_local_irq_restore(unsigned long flags)
71 {
72 	native_restore_fl(flags);
73 }
74 
75 static inline void raw_local_irq_disable(void)
76 {
77 	native_irq_disable();
78 }
79 
80 static inline void raw_local_irq_enable(void)
81 {
82 	native_irq_enable();
83 }
84 
85 /*
86  * Used in the idle loop; sti takes one instruction cycle
87  * to complete:
88  */
89 static inline void raw_safe_halt(void)
90 {
91 	native_safe_halt();
92 }
93 
94 /*
95  * Used when interrupts are already enabled or to
96  * shutdown the processor:
97  */
98 static inline void halt(void)
99 {
100 	native_halt();
101 }
102 
103 /*
104  * For spinlocks, etc:
105  */
106 static inline unsigned long __raw_local_irq_save(void)
107 {
108 	unsigned long flags = __raw_local_save_flags();
109 
110 	raw_local_irq_disable();
111 
112 	return flags;
113 }
114 #else
115 
116 #define ENABLE_INTERRUPTS(x)	sti
117 #define DISABLE_INTERRUPTS(x)	cli
118 
119 #ifdef CONFIG_X86_64
120 #define SWAPGS	swapgs
121 /*
122  * Currently paravirt can't handle swapgs nicely when we
123  * don't have a stack we can rely on (such as a user space
124  * stack).  So we either find a way around these or just fault
125  * and emulate if a guest tries to call swapgs directly.
126  *
127  * Either way, this is a good way to document that we don't
128  * have a reliable stack. x86_64 only.
129  */
130 #define SWAPGS_UNSAFE_STACK	swapgs
131 
132 #define PARAVIRT_ADJUST_EXCEPTION_FRAME	/*  */
133 
134 #define INTERRUPT_RETURN	iretq
135 #define USERGS_SYSRET64				\
136 	swapgs;					\
137 	sysretq;
138 #define USERGS_SYSRET32				\
139 	swapgs;					\
140 	sysretl
141 #define ENABLE_INTERRUPTS_SYSEXIT32		\
142 	swapgs;					\
143 	sti;					\
144 	sysexit
145 
146 #else
147 #define INTERRUPT_RETURN		iret
148 #define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
149 #define GET_CR0_INTO_EAX		movl %cr0, %eax
150 #endif
151 
152 
153 #endif /* __ASSEMBLY__ */
154 #endif /* CONFIG_PARAVIRT */
155 
156 #ifndef __ASSEMBLY__
157 #define raw_local_save_flags(flags)				\
158 	do { (flags) = __raw_local_save_flags(); } while (0)
159 
160 #define raw_local_irq_save(flags)				\
161 	do { (flags) = __raw_local_irq_save(); } while (0)
162 
163 static inline int raw_irqs_disabled_flags(unsigned long flags)
164 {
165 	return !(flags & X86_EFLAGS_IF);
166 }
167 
168 static inline int raw_irqs_disabled(void)
169 {
170 	unsigned long flags = __raw_local_save_flags();
171 
172 	return raw_irqs_disabled_flags(flags);
173 }
174 
175 #else
176 
177 #ifdef CONFIG_X86_64
178 #define ARCH_LOCKDEP_SYS_EXIT		call lockdep_sys_exit_thunk
179 #define ARCH_LOCKDEP_SYS_EXIT_IRQ	\
180 	TRACE_IRQS_ON; \
181 	sti; \
182 	SAVE_REST; \
183 	LOCKDEP_SYS_EXIT; \
184 	RESTORE_REST; \
185 	cli; \
186 	TRACE_IRQS_OFF;
187 
188 #else
189 #define ARCH_LOCKDEP_SYS_EXIT			\
190 	pushl %eax;				\
191 	pushl %ecx;				\
192 	pushl %edx;				\
193 	call lockdep_sys_exit;			\
194 	popl %edx;				\
195 	popl %ecx;				\
196 	popl %eax;
197 
198 #define ARCH_LOCKDEP_SYS_EXIT_IRQ
199 #endif
200 
201 #ifdef CONFIG_TRACE_IRQFLAGS
202 #  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk;
203 #  define TRACE_IRQS_OFF	call trace_hardirqs_off_thunk;
204 #else
205 #  define TRACE_IRQS_ON
206 #  define TRACE_IRQS_OFF
207 #endif
208 #ifdef CONFIG_DEBUG_LOCK_ALLOC
209 #  define LOCKDEP_SYS_EXIT	ARCH_LOCKDEP_SYS_EXIT
210 #  define LOCKDEP_SYS_EXIT_IRQ	ARCH_LOCKDEP_SYS_EXIT_IRQ
211 # else
212 #  define LOCKDEP_SYS_EXIT
213 #  define LOCKDEP_SYS_EXIT_IRQ
214 # endif
215 
216 #endif /* __ASSEMBLY__ */
217 #endif
218