1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_SPECIAL_INSNS_H 3 #define _ASM_X86_SPECIAL_INSNS_H 4 5 #ifdef __KERNEL__ 6 #include <asm/nops.h> 7 #include <asm/processor-flags.h> 8 9 #include <linux/errno.h> 10 #include <linux/irqflags.h> 11 #include <linux/jump_label.h> 12 13 void native_write_cr0(unsigned long val); 14 15 static inline unsigned long native_read_cr0(void) 16 { 17 unsigned long val; 18 asm volatile("mov %%cr0,%0" : "=r" (val)); 19 return val; 20 } 21 22 static __always_inline unsigned long native_read_cr2(void) 23 { 24 unsigned long val; 25 asm volatile("mov %%cr2,%0" : "=r" (val)); 26 return val; 27 } 28 29 static __always_inline void native_write_cr2(unsigned long val) 30 { 31 asm volatile("mov %0,%%cr2": : "r" (val) : "memory"); 32 } 33 34 static __always_inline unsigned long __native_read_cr3(void) 35 { 36 unsigned long val; 37 asm volatile("mov %%cr3,%0" : "=r" (val)); 38 return val; 39 } 40 41 static __always_inline void native_write_cr3(unsigned long val) 42 { 43 asm volatile("mov %0,%%cr3": : "r" (val) : "memory"); 44 } 45 46 static inline unsigned long native_read_cr4(void) 47 { 48 unsigned long val; 49 #ifdef CONFIG_X86_32 50 /* 51 * This could fault if CR4 does not exist. Non-existent CR4 52 * is functionally equivalent to CR4 == 0. Keep it simple and pretend 53 * that CR4 == 0 on CPUs that don't have CR4. 54 */ 55 asm volatile("1: mov %%cr4, %0\n" 56 "2:\n" 57 _ASM_EXTABLE(1b, 2b) 58 : "=r" (val) : "0" (0)); 59 #else 60 /* CR4 always exists on x86_64. */ 61 asm volatile("mov %%cr4,%0" : "=r" (val)); 62 #endif 63 return val; 64 } 65 66 void native_write_cr4(unsigned long val); 67 68 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 69 static inline u32 rdpkru(void) 70 { 71 u32 ecx = 0; 72 u32 edx, pkru; 73 74 /* 75 * "rdpkru" instruction. Places PKRU contents in to EAX, 76 * clears EDX and requires that ecx=0. 77 */ 78 asm volatile(".byte 0x0f,0x01,0xee\n\t" 79 : "=a" (pkru), "=d" (edx) 80 : "c" (ecx)); 81 return pkru; 82 } 83 84 static inline void wrpkru(u32 pkru) 85 { 86 u32 ecx = 0, edx = 0; 87 88 /* 89 * "wrpkru" instruction. Loads contents in EAX to PKRU, 90 * requires that ecx = edx = 0. 91 */ 92 asm volatile(".byte 0x0f,0x01,0xef\n\t" 93 : : "a" (pkru), "c"(ecx), "d"(edx)); 94 } 95 96 #else 97 static inline u32 rdpkru(void) 98 { 99 return 0; 100 } 101 102 static inline void wrpkru(u32 pkru) 103 { 104 } 105 #endif 106 107 /* 108 * Write back all modified lines in all levels of cache associated with this 109 * logical processor to main memory, and then invalidate all caches. Depending 110 * on the micro-architecture, WBINVD (and WBNOINVD below) may or may not affect 111 * lower level caches associated with another logical processor that shares any 112 * level of this processor's cache hierarchy. 113 */ 114 static __always_inline void wbinvd(void) 115 { 116 asm volatile("wbinvd" : : : "memory"); 117 } 118 119 /* Instruction encoding provided for binutils backwards compatibility. */ 120 #define ASM_WBNOINVD _ASM_BYTES(0xf3,0x0f,0x09) 121 122 /* 123 * Write back all modified lines in all levels of cache associated with this 124 * logical processor to main memory, but do NOT explicitly invalidate caches, 125 * i.e. leave all/most cache lines in the hierarchy in non-modified state. 126 */ 127 static __always_inline void wbnoinvd(void) 128 { 129 /* 130 * Explicitly encode WBINVD if X86_FEATURE_WBNOINVD is unavailable even 131 * though WBNOINVD is backwards compatible (it's simply WBINVD with an 132 * ignored REP prefix), to guarantee that WBNOINVD isn't used if it 133 * needs to be avoided for any reason. For all supported usage in the 134 * kernel, WBINVD is functionally a superset of WBNOINVD. 135 */ 136 alternative("wbinvd", ASM_WBNOINVD, X86_FEATURE_WBNOINVD); 137 } 138 139 static inline unsigned long __read_cr4(void) 140 { 141 return native_read_cr4(); 142 } 143 144 #ifdef CONFIG_PARAVIRT_XXL 145 #include <asm/paravirt.h> 146 #else 147 148 static inline unsigned long read_cr0(void) 149 { 150 return native_read_cr0(); 151 } 152 153 static inline void write_cr0(unsigned long x) 154 { 155 native_write_cr0(x); 156 } 157 158 static __always_inline unsigned long read_cr2(void) 159 { 160 return native_read_cr2(); 161 } 162 163 static __always_inline void write_cr2(unsigned long x) 164 { 165 native_write_cr2(x); 166 } 167 168 /* 169 * Careful! CR3 contains more than just an address. You probably want 170 * read_cr3_pa() instead. 171 */ 172 static inline unsigned long __read_cr3(void) 173 { 174 return __native_read_cr3(); 175 } 176 177 static inline void write_cr3(unsigned long x) 178 { 179 native_write_cr3(x); 180 } 181 182 static inline void __write_cr4(unsigned long x) 183 { 184 native_write_cr4(x); 185 } 186 #endif /* CONFIG_PARAVIRT_XXL */ 187 188 static __always_inline void clflush(volatile void *__p) 189 { 190 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); 191 } 192 193 static inline void clflushopt(volatile void *__p) 194 { 195 alternative_io("ds clflush %0", 196 "clflushopt %0", X86_FEATURE_CLFLUSHOPT, 197 "+m" (*(volatile char __force *)__p)); 198 } 199 200 static inline void clwb(volatile void *__p) 201 { 202 volatile struct { char x[64]; } *p = __p; 203 204 asm_inline volatile(ALTERNATIVE_2( 205 "ds clflush %0", 206 "clflushopt %0", X86_FEATURE_CLFLUSHOPT, 207 "clwb %0", X86_FEATURE_CLWB) 208 : "+m" (*p)); 209 } 210 211 #ifdef CONFIG_X86_USER_SHADOW_STACK 212 static inline int write_user_shstk_64(u64 __user *addr, u64 val) 213 { 214 asm goto("1: wrussq %[val], %[addr]\n" 215 _ASM_EXTABLE(1b, %l[fail]) 216 :: [addr] "m" (*addr), [val] "r" (val) 217 :: fail); 218 return 0; 219 fail: 220 return -EFAULT; 221 } 222 #endif /* CONFIG_X86_USER_SHADOW_STACK */ 223 224 #define nop() asm volatile ("nop") 225 226 static __always_inline void serialize(void) 227 { 228 /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */ 229 asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory"); 230 } 231 232 /* The dst parameter must be 64-bytes aligned */ 233 static inline void movdir64b(void *dst, const void *src) 234 { 235 const struct { char _[64]; } *__src = src; 236 struct { char _[64]; } *__dst = dst; 237 238 /* 239 * MOVDIR64B %(rdx), rax. 240 * 241 * Both __src and __dst must be memory constraints in order to tell the 242 * compiler that no other memory accesses should be reordered around 243 * this one. 244 * 245 * Also, both must be supplied as lvalues because this tells 246 * the compiler what the object is (its size) the instruction accesses. 247 * I.e., not the pointers but what they point to, thus the deref'ing '*'. 248 */ 249 asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" 250 : "+m" (*__dst) 251 : "m" (*__src), "a" (__dst), "d" (__src)); 252 } 253 254 static inline void movdir64b_io(void __iomem *dst, const void *src) 255 { 256 movdir64b((void __force *)dst, src); 257 } 258 259 /** 260 * enqcmds - Enqueue a command in supervisor (CPL0) mode 261 * @dst: destination, in MMIO space (must be 512-bit aligned) 262 * @src: 512 bits memory operand 263 * 264 * The ENQCMDS instruction allows software to write a 512-bit command to 265 * a 512-bit-aligned special MMIO region that supports the instruction. 266 * A return status is loaded into the ZF flag in the RFLAGS register. 267 * ZF = 0 equates to success, and ZF = 1 indicates retry or error. 268 * 269 * This function issues the ENQCMDS instruction to submit data from 270 * kernel space to MMIO space, in a unit of 512 bits. Order of data access 271 * is not guaranteed, nor is a memory barrier performed afterwards. It 272 * returns 0 on success and -EAGAIN on failure. 273 * 274 * Warning: Do not use this helper unless your driver has checked that the 275 * ENQCMDS instruction is supported on the platform and the device accepts 276 * ENQCMDS. 277 */ 278 static inline int enqcmds(void __iomem *dst, const void *src) 279 { 280 const struct { char _[64]; } *__src = src; 281 struct { char _[64]; } __iomem *__dst = dst; 282 bool zf; 283 284 /* 285 * ENQCMDS %(rdx), rax 286 * 287 * See movdir64b()'s comment on operand specification. 288 */ 289 asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90" 290 CC_SET(z) 291 : CC_OUT(z) (zf), "+m" (*__dst) 292 : "m" (*__src), "a" (__dst), "d" (__src)); 293 294 /* Submission failure is indicated via EFLAGS.ZF=1 */ 295 if (zf) 296 return -EAGAIN; 297 298 return 0; 299 } 300 301 static __always_inline void tile_release(void) 302 { 303 /* 304 * Instruction opcode for TILERELEASE; supported in binutils 305 * version >= 2.36. 306 */ 307 asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0"); 308 } 309 310 #endif /* __KERNEL__ */ 311 312 #endif /* _ASM_X86_SPECIAL_INSNS_H */ 313