1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_SPECIAL_INSNS_H 3 #define _ASM_X86_SPECIAL_INSNS_H 4 5 #ifdef __KERNEL__ 6 #include <asm/nops.h> 7 #include <asm/processor-flags.h> 8 9 #include <linux/errno.h> 10 #include <linux/irqflags.h> 11 #include <linux/jump_label.h> 12 13 void native_write_cr0(unsigned long val); 14 15 static inline unsigned long native_read_cr0(void) 16 { 17 unsigned long val; 18 asm volatile("mov %%cr0,%0" : "=r" (val)); 19 return val; 20 } 21 22 static __always_inline unsigned long native_read_cr2(void) 23 { 24 unsigned long val; 25 asm volatile("mov %%cr2,%0" : "=r" (val)); 26 return val; 27 } 28 29 static __always_inline void native_write_cr2(unsigned long val) 30 { 31 asm volatile("mov %0,%%cr2": : "r" (val) : "memory"); 32 } 33 34 static __always_inline unsigned long __native_read_cr3(void) 35 { 36 unsigned long val; 37 asm volatile("mov %%cr3,%0" : "=r" (val)); 38 return val; 39 } 40 41 static __always_inline void native_write_cr3(unsigned long val) 42 { 43 asm volatile("mov %0,%%cr3": : "r" (val) : "memory"); 44 } 45 46 static inline unsigned long native_read_cr4(void) 47 { 48 unsigned long val; 49 #ifdef CONFIG_X86_32 50 /* 51 * This could fault if CR4 does not exist. Non-existent CR4 52 * is functionally equivalent to CR4 == 0. Keep it simple and pretend 53 * that CR4 == 0 on CPUs that don't have CR4. 54 */ 55 asm volatile("1: mov %%cr4, %0\n" 56 "2:\n" 57 _ASM_EXTABLE(1b, 2b) 58 : "=r" (val) : "0" (0)); 59 #else 60 /* CR4 always exists on x86_64. */ 61 asm volatile("mov %%cr4,%0" : "=r" (val)); 62 #endif 63 return val; 64 } 65 66 void native_write_cr4(unsigned long val); 67 68 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 69 static inline u32 rdpkru(void) 70 { 71 u32 ecx = 0; 72 u32 edx, pkru; 73 74 /* 75 * "rdpkru" instruction. Places PKRU contents in to EAX, 76 * clears EDX and requires that ecx=0. 77 */ 78 asm volatile(".byte 0x0f,0x01,0xee\n\t" 79 : "=a" (pkru), "=d" (edx) 80 : "c" (ecx)); 81 return pkru; 82 } 83 84 static inline void wrpkru(u32 pkru) 85 { 86 u32 ecx = 0, edx = 0; 87 88 /* 89 * "wrpkru" instruction. Loads contents in EAX to PKRU, 90 * requires that ecx = edx = 0. 91 */ 92 asm volatile(".byte 0x0f,0x01,0xef\n\t" 93 : : "a" (pkru), "c"(ecx), "d"(edx)); 94 } 95 96 #else 97 static inline u32 rdpkru(void) 98 { 99 return 0; 100 } 101 102 static inline void wrpkru(u32 pkru) 103 { 104 } 105 #endif 106 107 static __always_inline void wbinvd(void) 108 { 109 asm volatile("wbinvd": : :"memory"); 110 } 111 112 static inline unsigned long __read_cr4(void) 113 { 114 return native_read_cr4(); 115 } 116 117 #ifdef CONFIG_PARAVIRT_XXL 118 #include <asm/paravirt.h> 119 #else 120 121 static inline unsigned long read_cr0(void) 122 { 123 return native_read_cr0(); 124 } 125 126 static inline void write_cr0(unsigned long x) 127 { 128 native_write_cr0(x); 129 } 130 131 static __always_inline unsigned long read_cr2(void) 132 { 133 return native_read_cr2(); 134 } 135 136 static __always_inline void write_cr2(unsigned long x) 137 { 138 native_write_cr2(x); 139 } 140 141 /* 142 * Careful! CR3 contains more than just an address. You probably want 143 * read_cr3_pa() instead. 144 */ 145 static inline unsigned long __read_cr3(void) 146 { 147 return __native_read_cr3(); 148 } 149 150 static inline void write_cr3(unsigned long x) 151 { 152 native_write_cr3(x); 153 } 154 155 static inline void __write_cr4(unsigned long x) 156 { 157 native_write_cr4(x); 158 } 159 #endif /* CONFIG_PARAVIRT_XXL */ 160 161 static __always_inline void clflush(volatile void *__p) 162 { 163 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); 164 } 165 166 static inline void clflushopt(volatile void *__p) 167 { 168 alternative_io("ds clflush %0", 169 "clflushopt %0", X86_FEATURE_CLFLUSHOPT, 170 "+m" (*(volatile char __force *)__p)); 171 } 172 173 static inline void clwb(volatile void *__p) 174 { 175 volatile struct { char x[64]; } *p = __p; 176 177 asm_inline volatile(ALTERNATIVE_2( 178 "ds clflush %0", 179 "clflushopt %0", X86_FEATURE_CLFLUSHOPT, 180 "clwb %0", X86_FEATURE_CLWB) 181 : "+m" (*p)); 182 } 183 184 #ifdef CONFIG_X86_USER_SHADOW_STACK 185 static inline int write_user_shstk_64(u64 __user *addr, u64 val) 186 { 187 asm goto("1: wrussq %[val], %[addr]\n" 188 _ASM_EXTABLE(1b, %l[fail]) 189 :: [addr] "m" (*addr), [val] "r" (val) 190 :: fail); 191 return 0; 192 fail: 193 return -EFAULT; 194 } 195 #endif /* CONFIG_X86_USER_SHADOW_STACK */ 196 197 #define nop() asm volatile ("nop") 198 199 static __always_inline void serialize(void) 200 { 201 /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */ 202 asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory"); 203 } 204 205 /* The dst parameter must be 64-bytes aligned */ 206 static inline void movdir64b(void *dst, const void *src) 207 { 208 const struct { char _[64]; } *__src = src; 209 struct { char _[64]; } *__dst = dst; 210 211 /* 212 * MOVDIR64B %(rdx), rax. 213 * 214 * Both __src and __dst must be memory constraints in order to tell the 215 * compiler that no other memory accesses should be reordered around 216 * this one. 217 * 218 * Also, both must be supplied as lvalues because this tells 219 * the compiler what the object is (its size) the instruction accesses. 220 * I.e., not the pointers but what they point to, thus the deref'ing '*'. 221 */ 222 asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" 223 : "+m" (*__dst) 224 : "m" (*__src), "a" (__dst), "d" (__src)); 225 } 226 227 static inline void movdir64b_io(void __iomem *dst, const void *src) 228 { 229 movdir64b((void __force *)dst, src); 230 } 231 232 /** 233 * enqcmds - Enqueue a command in supervisor (CPL0) mode 234 * @dst: destination, in MMIO space (must be 512-bit aligned) 235 * @src: 512 bits memory operand 236 * 237 * The ENQCMDS instruction allows software to write a 512-bit command to 238 * a 512-bit-aligned special MMIO region that supports the instruction. 239 * A return status is loaded into the ZF flag in the RFLAGS register. 240 * ZF = 0 equates to success, and ZF = 1 indicates retry or error. 241 * 242 * This function issues the ENQCMDS instruction to submit data from 243 * kernel space to MMIO space, in a unit of 512 bits. Order of data access 244 * is not guaranteed, nor is a memory barrier performed afterwards. It 245 * returns 0 on success and -EAGAIN on failure. 246 * 247 * Warning: Do not use this helper unless your driver has checked that the 248 * ENQCMDS instruction is supported on the platform and the device accepts 249 * ENQCMDS. 250 */ 251 static inline int enqcmds(void __iomem *dst, const void *src) 252 { 253 const struct { char _[64]; } *__src = src; 254 struct { char _[64]; } __iomem *__dst = dst; 255 bool zf; 256 257 /* 258 * ENQCMDS %(rdx), rax 259 * 260 * See movdir64b()'s comment on operand specification. 261 */ 262 asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90" 263 CC_SET(z) 264 : CC_OUT(z) (zf), "+m" (*__dst) 265 : "m" (*__src), "a" (__dst), "d" (__src)); 266 267 /* Submission failure is indicated via EFLAGS.ZF=1 */ 268 if (zf) 269 return -EAGAIN; 270 271 return 0; 272 } 273 274 static __always_inline void tile_release(void) 275 { 276 /* 277 * Instruction opcode for TILERELEASE; supported in binutils 278 * version >= 2.36. 279 */ 280 asm volatile(".byte 0xc4, 0xe2, 0x78, 0x49, 0xc0"); 281 } 282 283 #endif /* __KERNEL__ */ 284 285 #endif /* _ASM_X86_SPECIAL_INSNS_H */ 286