xref: /linux/arch/x86/include/asm/irqflags.h (revision 0b8061c340b643e01da431dd60c75a41bb1d31ec)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _X86_IRQFLAGS_H_
3 #define _X86_IRQFLAGS_H_
4 
5 #include <asm/processor-flags.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #include <asm/nospec-branch.h>
10 
11 /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
12 #define __cpuidle __section(".cpuidle.text")
13 
14 /*
15  * Interrupt control:
16  */
17 
18 /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
19 extern inline unsigned long native_save_fl(void);
20 extern __always_inline unsigned long native_save_fl(void)
21 {
22 	unsigned long flags;
23 
24 	/*
25 	 * "=rm" is safe here, because "pop" adjusts the stack before
26 	 * it evaluates its effective address -- this is part of the
27 	 * documented behavior of the "pop" instruction.
28 	 */
29 	asm volatile("# __raw_save_flags\n\t"
30 		     "pushf ; pop %0"
31 		     : "=rm" (flags)
32 		     : /* no input */
33 		     : "memory");
34 
35 	return flags;
36 }
37 
38 static __always_inline void native_irq_disable(void)
39 {
40 	asm volatile("cli": : :"memory");
41 }
42 
43 static __always_inline void native_irq_enable(void)
44 {
45 	asm volatile("sti": : :"memory");
46 }
47 
48 static inline __cpuidle void native_safe_halt(void)
49 {
50 	mds_idle_clear_cpu_buffers();
51 	asm volatile("sti; hlt": : :"memory");
52 }
53 
54 static inline __cpuidle void native_halt(void)
55 {
56 	mds_idle_clear_cpu_buffers();
57 	asm volatile("hlt": : :"memory");
58 }
59 
60 #endif
61 
62 #ifdef CONFIG_PARAVIRT_XXL
63 #include <asm/paravirt.h>
64 #else
65 #ifndef __ASSEMBLY__
66 #include <linux/types.h>
67 
68 static __always_inline unsigned long arch_local_save_flags(void)
69 {
70 	return native_save_fl();
71 }
72 
73 static __always_inline void arch_local_irq_disable(void)
74 {
75 	native_irq_disable();
76 }
77 
78 static __always_inline void arch_local_irq_enable(void)
79 {
80 	native_irq_enable();
81 }
82 
83 /*
84  * Used in the idle loop; sti takes one instruction cycle
85  * to complete:
86  */
87 static inline __cpuidle void arch_safe_halt(void)
88 {
89 	native_safe_halt();
90 }
91 
92 /*
93  * Used when interrupts are already enabled or to
94  * shutdown the processor:
95  */
96 static inline __cpuidle void halt(void)
97 {
98 	native_halt();
99 }
100 
101 /*
102  * For spinlocks, etc:
103  */
104 static __always_inline unsigned long arch_local_irq_save(void)
105 {
106 	unsigned long flags = arch_local_save_flags();
107 	arch_local_irq_disable();
108 	return flags;
109 }
110 #else
111 
112 #define ENABLE_INTERRUPTS(x)	sti
113 #define DISABLE_INTERRUPTS(x)	cli
114 
115 #ifdef CONFIG_X86_64
116 #ifdef CONFIG_DEBUG_ENTRY
117 #define SAVE_FLAGS(x)		pushfq; popq %rax
118 #endif
119 
120 #define INTERRUPT_RETURN	jmp native_iret
121 
122 #else
123 #define INTERRUPT_RETURN		iret
124 #endif
125 
126 #endif /* __ASSEMBLY__ */
127 #endif /* CONFIG_PARAVIRT_XXL */
128 
129 #ifndef __ASSEMBLY__
130 static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
131 {
132 	return !(flags & X86_EFLAGS_IF);
133 }
134 
135 static __always_inline int arch_irqs_disabled(void)
136 {
137 	unsigned long flags = arch_local_save_flags();
138 
139 	return arch_irqs_disabled_flags(flags);
140 }
141 
142 static __always_inline void arch_local_irq_restore(unsigned long flags)
143 {
144 	if (!arch_irqs_disabled_flags(flags))
145 		arch_local_irq_enable();
146 }
147 #else
148 #ifdef CONFIG_X86_64
149 #ifdef CONFIG_XEN_PV
150 #define SWAPGS	ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
151 #else
152 #define SWAPGS	swapgs
153 #endif
154 #endif
155 #endif /* !__ASSEMBLY__ */
156 
157 #endif
158