1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MWAIT_H 3 #define _ASM_X86_MWAIT_H 4 5 #include <linux/sched.h> 6 #include <linux/sched/idle.h> 7 8 #include <asm/cpufeature.h> 9 #include <asm/nospec-branch.h> 10 11 #define MWAIT_SUBSTATE_MASK 0xf 12 #define MWAIT_CSTATE_MASK 0xf 13 #define MWAIT_SUBSTATE_SIZE 4 14 #define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) 15 #define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK) 16 #define MWAIT_C1_SUBSTATE_MASK 0xf0 17 18 #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 19 #define CPUID5_ECX_INTERRUPT_BREAK 0x2 20 21 #define MWAIT_ECX_INTERRUPT_BREAK 0x1 22 #define MWAITX_ECX_TIMER_ENABLE BIT(1) 23 #define MWAITX_MAX_WAIT_CYCLES UINT_MAX 24 #define MWAITX_DISABLE_CSTATES 0xf0 25 #define TPAUSE_C01_STATE 1 26 #define TPAUSE_C02_STATE 0 27 28 static __always_inline void __monitor(const void *eax, unsigned long ecx, 29 unsigned long edx) 30 { 31 /* "monitor %eax, %ecx, %edx;" */ 32 asm volatile(".byte 0x0f, 0x01, 0xc8;" 33 :: "a" (eax), "c" (ecx), "d"(edx)); 34 } 35 36 static __always_inline void __monitorx(const void *eax, unsigned long ecx, 37 unsigned long edx) 38 { 39 /* "monitorx %eax, %ecx, %edx;" */ 40 asm volatile(".byte 0x0f, 0x01, 0xfa;" 41 :: "a" (eax), "c" (ecx), "d"(edx)); 42 } 43 44 static __always_inline void __mwait(unsigned long eax, unsigned long ecx) 45 { 46 mds_idle_clear_cpu_buffers(); 47 48 /* "mwait %eax, %ecx;" */ 49 asm volatile(".byte 0x0f, 0x01, 0xc9;" 50 :: "a" (eax), "c" (ecx)); 51 } 52 53 /* 54 * MWAITX allows for a timer expiration to get the core out a wait state in 55 * addition to the default MWAIT exit condition of a store appearing at a 56 * monitored virtual address. 57 * 58 * Registers: 59 * 60 * MWAITX ECX[1]: enable timer if set 61 * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0 62 * frequency is the same as the TSC frequency. 63 * 64 * Below is a comparison between MWAIT and MWAITX on AMD processors: 65 * 66 * MWAIT MWAITX 67 * opcode 0f 01 c9 | 0f 01 fb 68 * ECX[0] value of RFLAGS.IF seen by instruction 69 * ECX[1] unused/#GP if set | enable timer if set 70 * ECX[31:2] unused/#GP if set 71 * EAX unused (reserve for hint) 72 * EBX[31:0] unused | max wait time (P0 clocks) 73 * 74 * MONITOR MONITORX 75 * opcode 0f 01 c8 | 0f 01 fa 76 * EAX (logical) address to monitor 77 * ECX #GP if not zero 78 */ 79 static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, 80 unsigned long ecx) 81 { 82 /* No MDS buffer clear as this is AMD/HYGON only */ 83 84 /* "mwaitx %eax, %ebx, %ecx;" */ 85 asm volatile(".byte 0x0f, 0x01, 0xfb;" 86 :: "a" (eax), "b" (ebx), "c" (ecx)); 87 } 88 89 /* 90 * Re-enable interrupts right upon calling mwait in such a way that 91 * no interrupt can fire _before_ the execution of mwait, ie: no 92 * instruction must be placed between "sti" and "mwait". 93 * 94 * This is necessary because if an interrupt queues a timer before 95 * executing mwait, it would otherwise go unnoticed and the next tick 96 * would not be reprogrammed accordingly before mwait ever wakes up. 97 */ 98 static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) 99 { 100 mds_idle_clear_cpu_buffers(); 101 /* "mwait %eax, %ecx;" */ 102 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" 103 :: "a" (eax), "c" (ecx)); 104 } 105 106 /* 107 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, 108 * which can obviate IPI to trigger checking of need_resched. 109 * We execute MONITOR against need_resched and enter optimized wait state 110 * through MWAIT. Whenever someone changes need_resched, we would be woken 111 * up from MWAIT (without an IPI). 112 * 113 * New with Core Duo processors, MWAIT can take some hints based on CPU 114 * capability. 115 */ 116 static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) 117 { 118 if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { 119 if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { 120 mb(); 121 clflush((void *)¤t_thread_info()->flags); 122 mb(); 123 } 124 125 __monitor((void *)¤t_thread_info()->flags, 0, 0); 126 127 if (!need_resched()) { 128 if (ecx & 1) { 129 __mwait(eax, ecx); 130 } else { 131 __sti_mwait(eax, ecx); 132 raw_local_irq_disable(); 133 } 134 } 135 } 136 current_clr_polling(); 137 } 138 139 /* 140 * Caller can specify whether to enter C0.1 (low latency, less 141 * power saving) or C0.2 state (saves more power, but longer wakeup 142 * latency). This may be overridden by the IA32_UMWAIT_CONTROL MSR 143 * which can force requests for C0.2 to be downgraded to C0.1. 144 */ 145 static inline void __tpause(u32 ecx, u32 edx, u32 eax) 146 { 147 /* "tpause %ecx, %edx, %eax;" */ 148 #ifdef CONFIG_AS_TPAUSE 149 asm volatile("tpause %%ecx\n" 150 : 151 : "c"(ecx), "d"(edx), "a"(eax)); 152 #else 153 asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n" 154 : 155 : "c"(ecx), "d"(edx), "a"(eax)); 156 #endif 157 } 158 159 #endif /* _ASM_X86_MWAIT_H */ 160