1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MSR_H 3 #define _ASM_X86_MSR_H 4 5 #include "msr-index.h" 6 7 #ifndef __ASSEMBLER__ 8 9 #include <asm/asm.h> 10 #include <asm/errno.h> 11 #include <asm/cpumask.h> 12 #include <uapi/asm/msr.h> 13 #include <asm/shared/msr.h> 14 15 #include <linux/types.h> 16 #include <linux/percpu.h> 17 18 struct msr_info { 19 u32 msr_no; 20 struct msr reg; 21 struct msr __percpu *msrs; 22 int err; 23 }; 24 25 struct msr_regs_info { 26 u32 *regs; 27 int err; 28 }; 29 30 struct saved_msr { 31 bool valid; 32 struct msr_info info; 33 }; 34 35 struct saved_msrs { 36 unsigned int num; 37 struct saved_msr *array; 38 }; 39 40 /* 41 * Be very careful with includes. This header is prone to include loops. 42 */ 43 #include <asm/atomic.h> 44 #include <linux/tracepoint-defs.h> 45 46 #ifdef CONFIG_TRACEPOINTS 47 DECLARE_TRACEPOINT(read_msr); 48 DECLARE_TRACEPOINT(write_msr); 49 DECLARE_TRACEPOINT(rdpmc); 50 extern void do_trace_write_msr(u32 msr, u64 val, int failed); 51 extern void do_trace_read_msr(u32 msr, u64 val, int failed); 52 extern void do_trace_rdpmc(u32 msr, u64 val, int failed); 53 #else 54 static inline void do_trace_write_msr(u32 msr, u64 val, int failed) {} 55 static inline void do_trace_read_msr(u32 msr, u64 val, int failed) {} 56 static inline void do_trace_rdpmc(u32 msr, u64 val, int failed) {} 57 #endif 58 59 /* 60 * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR 61 * accessors and should not have any tracing or other functionality piggybacking 62 * on them - those are *purely* for accessing MSRs and nothing more. So don't even 63 * think of extending them - you will be slapped with a stinking trout or a frozen 64 * shark will reach you, wherever you are! You've been warned. 65 */ 66 static __always_inline u64 __rdmsr(u32 msr) 67 { 68 EAX_EDX_DECLARE_ARGS(val, low, high); 69 70 asm volatile("1: rdmsr\n" 71 "2:\n" 72 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR) 73 : EAX_EDX_RET(val, low, high) : "c" (msr)); 74 75 return EAX_EDX_VAL(val, low, high); 76 } 77 78 static __always_inline void __wrmsrq(u32 msr, u64 val) 79 { 80 asm volatile("1: wrmsr\n" 81 "2:\n" 82 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) 83 : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)) : "memory"); 84 } 85 86 #define native_rdmsr(msr, val1, val2) \ 87 do { \ 88 u64 __val = __rdmsr((msr)); \ 89 (void)((val1) = (u32)__val); \ 90 (void)((val2) = (u32)(__val >> 32)); \ 91 } while (0) 92 93 static __always_inline u64 native_rdmsrq(u32 msr) 94 { 95 return __rdmsr(msr); 96 } 97 98 #define native_wrmsr(msr, low, high) \ 99 __wrmsrq((msr), (u64)(high) << 32 | (low)) 100 101 #define native_wrmsrq(msr, val) \ 102 __wrmsrq((msr), (val)) 103 104 static inline u64 native_read_msr(u32 msr) 105 { 106 u64 val; 107 108 val = __rdmsr(msr); 109 110 if (tracepoint_enabled(read_msr)) 111 do_trace_read_msr(msr, val, 0); 112 113 return val; 114 } 115 116 static inline int native_read_msr_safe(u32 msr, u64 *p) 117 { 118 int err; 119 EAX_EDX_DECLARE_ARGS(val, low, high); 120 121 asm volatile("1: rdmsr ; xor %[err],%[err]\n" 122 "2:\n\t" 123 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err]) 124 : [err] "=r" (err), EAX_EDX_RET(val, low, high) 125 : "c" (msr)); 126 if (tracepoint_enabled(read_msr)) 127 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err); 128 129 *p = EAX_EDX_VAL(val, low, high); 130 131 return err; 132 } 133 134 /* Can be uninlined because referenced by paravirt */ 135 static inline void notrace native_write_msr(u32 msr, u64 val) 136 { 137 native_wrmsrq(msr, val); 138 139 if (tracepoint_enabled(write_msr)) 140 do_trace_write_msr(msr, val, 0); 141 } 142 143 /* Can be uninlined because referenced by paravirt */ 144 static inline int notrace native_write_msr_safe(u32 msr, u64 val) 145 { 146 int err; 147 148 asm volatile("1: wrmsr ; xor %[err],%[err]\n" 149 "2:\n\t" 150 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err]) 151 : [err] "=a" (err) 152 : "c" (msr), "0" ((u32)val), "d" ((u32)(val >> 32)) 153 : "memory"); 154 if (tracepoint_enabled(write_msr)) 155 do_trace_write_msr(msr, val, err); 156 return err; 157 } 158 159 extern int rdmsr_safe_regs(u32 regs[8]); 160 extern int wrmsr_safe_regs(u32 regs[8]); 161 162 static inline u64 native_read_pmc(int counter) 163 { 164 EAX_EDX_DECLARE_ARGS(val, low, high); 165 166 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); 167 if (tracepoint_enabled(rdpmc)) 168 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); 169 return EAX_EDX_VAL(val, low, high); 170 } 171 172 #ifdef CONFIG_PARAVIRT_XXL 173 #include <asm/paravirt.h> 174 #else 175 #include <linux/errno.h> 176 /* 177 * Access to machine-specific registers (available on 586 and better only) 178 * Note: the rd* operations modify the parameters directly (without using 179 * pointer indirection), this allows gcc to optimize better 180 */ 181 182 #define rdmsr(msr, low, high) \ 183 do { \ 184 u64 __val = native_read_msr((msr)); \ 185 (void)((low) = (u32)__val); \ 186 (void)((high) = (u32)(__val >> 32)); \ 187 } while (0) 188 189 static inline void wrmsr(u32 msr, u32 low, u32 high) 190 { 191 native_write_msr(msr, (u64)high << 32 | low); 192 } 193 194 #define rdmsrq(msr, val) \ 195 ((val) = native_read_msr((msr))) 196 197 static inline void wrmsrq(u32 msr, u64 val) 198 { 199 native_write_msr(msr, val); 200 } 201 202 /* wrmsr with exception handling */ 203 static inline int wrmsrq_safe(u32 msr, u64 val) 204 { 205 return native_write_msr_safe(msr, val); 206 } 207 208 /* rdmsr with exception handling */ 209 #define rdmsr_safe(msr, low, high) \ 210 ({ \ 211 u64 __val; \ 212 int __err = native_read_msr_safe((msr), &__val); \ 213 (*low) = (u32)__val; \ 214 (*high) = (u32)(__val >> 32); \ 215 __err; \ 216 }) 217 218 static inline int rdmsrq_safe(u32 msr, u64 *p) 219 { 220 return native_read_msr_safe(msr, p); 221 } 222 223 static __always_inline u64 rdpmc(int counter) 224 { 225 return native_read_pmc(counter); 226 } 227 228 #endif /* !CONFIG_PARAVIRT_XXL */ 229 230 /* Instruction opcode for WRMSRNS supported in binutils >= 2.40 */ 231 #define ASM_WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6) 232 233 /* Non-serializing WRMSR, when available. Falls back to a serializing WRMSR. */ 234 static __always_inline void wrmsrns(u32 msr, u64 val) 235 { 236 /* 237 * WRMSR is 2 bytes. WRMSRNS is 3 bytes. Pad WRMSR with a redundant 238 * DS prefix to avoid a trailing NOP. 239 */ 240 asm volatile("1: " ALTERNATIVE("ds wrmsr", ASM_WRMSRNS, X86_FEATURE_WRMSRNS) 241 "2: " _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) 242 : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32))); 243 } 244 245 /* 246 * Dual u32 version of wrmsrq_safe(): 247 */ 248 static inline int wrmsr_safe(u32 msr, u32 low, u32 high) 249 { 250 return wrmsrq_safe(msr, (u64)high << 32 | low); 251 } 252 253 struct msr __percpu *msrs_alloc(void); 254 void msrs_free(struct msr __percpu *msrs); 255 int msr_set_bit(u32 msr, u8 bit); 256 int msr_clear_bit(u32 msr, u8 bit); 257 258 #ifdef CONFIG_SMP 259 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 260 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 261 int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 262 int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 263 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); 264 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); 265 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 266 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 267 int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 268 int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 269 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); 270 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); 271 #else /* CONFIG_SMP */ 272 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 273 { 274 rdmsr(msr_no, *l, *h); 275 return 0; 276 } 277 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 278 { 279 wrmsr(msr_no, l, h); 280 return 0; 281 } 282 static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) 283 { 284 rdmsrq(msr_no, *q); 285 return 0; 286 } 287 static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 288 { 289 wrmsrq(msr_no, q); 290 return 0; 291 } 292 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, 293 struct msr __percpu *msrs) 294 { 295 rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h)); 296 } 297 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, 298 struct msr __percpu *msrs) 299 { 300 wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h)); 301 } 302 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, 303 u32 *l, u32 *h) 304 { 305 return rdmsr_safe(msr_no, l, h); 306 } 307 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 308 { 309 return wrmsr_safe(msr_no, l, h); 310 } 311 static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) 312 { 313 return rdmsrq_safe(msr_no, q); 314 } 315 static inline int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 316 { 317 return wrmsrq_safe(msr_no, q); 318 } 319 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) 320 { 321 return rdmsr_safe_regs(regs); 322 } 323 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) 324 { 325 return wrmsr_safe_regs(regs); 326 } 327 #endif /* CONFIG_SMP */ 328 329 /* Compatibility wrappers: */ 330 #define rdmsrl(msr, val) rdmsrq(msr, val) 331 #define wrmsrl(msr, val) wrmsrq(msr, val) 332 #define rdmsrl_on_cpu(cpu, msr, q) rdmsrq_on_cpu(cpu, msr, q) 333 334 #endif /* __ASSEMBLER__ */ 335 #endif /* _ASM_X86_MSR_H */ 336