xref: /linux/arch/x86/include/asm/msr.h (revision b136021126b99072da705f693a8be07c6285e47c)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_MSR_H
31965aae3SH. Peter Anvin #define _ASM_X86_MSR_H
4bb898558SAl Viro 
5b72e7464SBorislav Petkov #include "msr-index.h"
6bb898558SAl Viro 
7bb898558SAl Viro #ifndef __ASSEMBLY__
8bb898558SAl Viro 
9bb898558SAl Viro #include <asm/asm.h>
10bb898558SAl Viro #include <asm/errno.h>
116bc1096dSBorislav Petkov #include <asm/cpumask.h>
12b72e7464SBorislav Petkov #include <uapi/asm/msr.h>
13176db622SMichael Roth #include <asm/shared/msr.h>
14bb898558SAl Viro 
155323922fSThomas Gleixner #include <linux/percpu.h>
165323922fSThomas Gleixner 
176ede31e0SBorislav Petkov struct msr_info {
186ede31e0SBorislav Petkov 	u32			msr_no;
196ede31e0SBorislav Petkov 	struct msr		reg;
205323922fSThomas Gleixner 	struct msr __percpu	*msrs;
216ede31e0SBorislav Petkov 	int			err;
226ede31e0SBorislav Petkov };
236ede31e0SBorislav Petkov 
246ede31e0SBorislav Petkov struct msr_regs_info {
256ede31e0SBorislav Petkov 	u32 *regs;
266ede31e0SBorislav Petkov 	int err;
276ede31e0SBorislav Petkov };
286ede31e0SBorislav Petkov 
297a9c2dd0SChen Yu struct saved_msr {
307a9c2dd0SChen Yu 	bool valid;
317a9c2dd0SChen Yu 	struct msr_info info;
327a9c2dd0SChen Yu };
337a9c2dd0SChen Yu 
347a9c2dd0SChen Yu struct saved_msrs {
357a9c2dd0SChen Yu 	unsigned int num;
367a9c2dd0SChen Yu 	struct saved_msr *array;
377a9c2dd0SChen Yu };
387a9c2dd0SChen Yu 
39bb898558SAl Viro /*
40d4f1b103SJike Song  * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
41d4f1b103SJike Song  * constraint has different meanings. For i386, "A" means exactly
42d4f1b103SJike Song  * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
43d4f1b103SJike Song  * it means rax *or* rdx.
44bb898558SAl Viro  */
45bb898558SAl Viro #ifdef CONFIG_X86_64
465a33fcb8SGeorge Spelvin /* Using 64-bit values saves one instruction clearing the high half of low */
475a33fcb8SGeorge Spelvin #define DECLARE_ARGS(val, low, high)	unsigned long low, high
485a33fcb8SGeorge Spelvin #define EAX_EDX_VAL(val, low, high)	((low) | (high) << 32)
49bb898558SAl Viro #define EAX_EDX_RET(val, low, high)	"=a" (low), "=d" (high)
50bb898558SAl Viro #else
51bb898558SAl Viro #define DECLARE_ARGS(val, low, high)	unsigned long long val
52bb898558SAl Viro #define EAX_EDX_VAL(val, low, high)	(val)
53bb898558SAl Viro #define EAX_EDX_RET(val, low, high)	"=A" (val)
54bb898558SAl Viro #endif
55bb898558SAl Viro 
567f47d8ccSAndi Kleen /*
577f47d8ccSAndi Kleen  * Be very careful with includes. This header is prone to include loops.
587f47d8ccSAndi Kleen  */
597f47d8ccSAndi Kleen #include <asm/atomic.h>
607f47d8ccSAndi Kleen #include <linux/tracepoint-defs.h>
617f47d8ccSAndi Kleen 
62fdb46faeSSteven Rostedt (VMware) #ifdef CONFIG_TRACEPOINTS
63fdb46faeSSteven Rostedt (VMware) DECLARE_TRACEPOINT(read_msr);
64fdb46faeSSteven Rostedt (VMware) DECLARE_TRACEPOINT(write_msr);
65fdb46faeSSteven Rostedt (VMware) DECLARE_TRACEPOINT(rdpmc);
665d07c2ccSBorislav Petkov extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
675d07c2ccSBorislav Petkov extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
685d07c2ccSBorislav Petkov extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
697f47d8ccSAndi Kleen #else
705d07c2ccSBorislav Petkov static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
715d07c2ccSBorislav Petkov static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
725d07c2ccSBorislav Petkov static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
737f47d8ccSAndi Kleen #endif
747f47d8ccSAndi Kleen 
75a585df8eSBorislav Petkov /*
76a585df8eSBorislav Petkov  * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
77a585df8eSBorislav Petkov  * accessors and should not have any tracing or other functionality piggybacking
78a585df8eSBorislav Petkov  * on them - those are *purely* for accessing MSRs and nothing more. So don't even
79a585df8eSBorislav Petkov  * think of extending them - you will be slapped with a stinking trout or a frozen
80a585df8eSBorislav Petkov  * shark will reach you, wherever you are! You've been warned.
81a585df8eSBorislav Petkov  */
8266a42501SPeter Zijlstra static __always_inline unsigned long long __rdmsr(unsigned int msr)
83bb898558SAl Viro {
84bb898558SAl Viro 	DECLARE_ARGS(val, low, high);
85bb898558SAl Viro 
86fbd70437SAndy Lutomirski 	asm volatile("1: rdmsr\n"
87fbd70437SAndy Lutomirski 		     "2:\n"
8846d28947SThomas Gleixner 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR)
89fbd70437SAndy Lutomirski 		     : EAX_EDX_RET(val, low, high) : "c" (msr));
90a585df8eSBorislav Petkov 
91bb898558SAl Viro 	return EAX_EDX_VAL(val, low, high);
92bb898558SAl Viro }
93bb898558SAl Viro 
9466a42501SPeter Zijlstra static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
95a585df8eSBorislav Petkov {
96a585df8eSBorislav Petkov 	asm volatile("1: wrmsr\n"
97a585df8eSBorislav Petkov 		     "2:\n"
9846d28947SThomas Gleixner 		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
99a585df8eSBorislav Petkov 		     : : "c" (msr), "a"(low), "d" (high) : "memory");
100a585df8eSBorislav Petkov }
101a585df8eSBorislav Petkov 
102c996f380SBorislav Petkov #define native_rdmsr(msr, val1, val2)			\
103c996f380SBorislav Petkov do {							\
104c996f380SBorislav Petkov 	u64 __val = __rdmsr((msr));			\
105c996f380SBorislav Petkov 	(void)((val1) = (u32)__val);			\
106c996f380SBorislav Petkov 	(void)((val2) = (u32)(__val >> 32));		\
107c996f380SBorislav Petkov } while (0)
108c996f380SBorislav Petkov 
109c996f380SBorislav Petkov #define native_wrmsr(msr, low, high)			\
110c996f380SBorislav Petkov 	__wrmsr(msr, low, high)
111c996f380SBorislav Petkov 
112c996f380SBorislav Petkov #define native_wrmsrl(msr, val)				\
113c996f380SBorislav Petkov 	__wrmsr((msr), (u32)((u64)(val)),		\
114c996f380SBorislav Petkov 		       (u32)((u64)(val) >> 32))
115c996f380SBorislav Petkov 
116a585df8eSBorislav Petkov static inline unsigned long long native_read_msr(unsigned int msr)
117a585df8eSBorislav Petkov {
118a585df8eSBorislav Petkov 	unsigned long long val;
119a585df8eSBorislav Petkov 
120a585df8eSBorislav Petkov 	val = __rdmsr(msr);
121a585df8eSBorislav Petkov 
122fdb46faeSSteven Rostedt (VMware) 	if (tracepoint_enabled(read_msr))
123a585df8eSBorislav Petkov 		do_trace_read_msr(msr, val, 0);
124a585df8eSBorislav Petkov 
125a585df8eSBorislav Petkov 	return val;
126a585df8eSBorislav Petkov }
127a585df8eSBorislav Petkov 
128bb898558SAl Viro static inline unsigned long long native_read_msr_safe(unsigned int msr,
129bb898558SAl Viro 						      int *err)
130bb898558SAl Viro {
131bb898558SAl Viro 	DECLARE_ARGS(val, low, high);
132bb898558SAl Viro 
133d52a7344SPeter Zijlstra 	asm volatile("1: rdmsr ; xor %[err],%[err]\n"
134d52a7344SPeter Zijlstra 		     "2:\n\t"
135d52a7344SPeter Zijlstra 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
136bb898558SAl Viro 		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
137d52a7344SPeter Zijlstra 		     : "c" (msr));
138fdb46faeSSteven Rostedt (VMware) 	if (tracepoint_enabled(read_msr))
1397f47d8ccSAndi Kleen 		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
140bb898558SAl Viro 	return EAX_EDX_VAL(val, low, high);
141bb898558SAl Viro }
142bb898558SAl Viro 
143dd2f4a00SAndy Lutomirski /* Can be uninlined because referenced by paravirt */
1445d07c2ccSBorislav Petkov static inline void notrace
1455d07c2ccSBorislav Petkov native_write_msr(unsigned int msr, u32 low, u32 high)
146b2c5ea4fSWanpeng Li {
147a585df8eSBorislav Petkov 	__wrmsr(msr, low, high);
148a585df8eSBorislav Petkov 
149fdb46faeSSteven Rostedt (VMware) 	if (tracepoint_enabled(write_msr))
1507f47d8ccSAndi Kleen 		do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
151bb898558SAl Viro }
152bb898558SAl Viro 
1530ca59dd9SFrederic Weisbecker /* Can be uninlined because referenced by paravirt */
1545d07c2ccSBorislav Petkov static inline int notrace
1555d07c2ccSBorislav Petkov native_write_msr_safe(unsigned int msr, u32 low, u32 high)
156bb898558SAl Viro {
157bb898558SAl Viro 	int err;
1585d07c2ccSBorislav Petkov 
159d52a7344SPeter Zijlstra 	asm volatile("1: wrmsr ; xor %[err],%[err]\n"
160d52a7344SPeter Zijlstra 		     "2:\n\t"
161d52a7344SPeter Zijlstra 		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
162bb898558SAl Viro 		     : [err] "=a" (err)
163d52a7344SPeter Zijlstra 		     : "c" (msr), "0" (low), "d" (high)
164bb898558SAl Viro 		     : "memory");
165fdb46faeSSteven Rostedt (VMware) 	if (tracepoint_enabled(write_msr))
1667f47d8ccSAndi Kleen 		do_trace_write_msr(msr, ((u64)high << 32 | low), err);
167bb898558SAl Viro 	return err;
168bb898558SAl Viro }
169bb898558SAl Viro 
1701f975f78SAndre Przywara extern int rdmsr_safe_regs(u32 regs[8]);
1711f975f78SAndre Przywara extern int wrmsr_safe_regs(u32 regs[8]);
172132ec92fSBorislav Petkov 
1734ea1636bSAndy Lutomirski /**
1744ea1636bSAndy Lutomirski  * rdtsc() - returns the current TSC without ordering constraints
1754ea1636bSAndy Lutomirski  *
1764ea1636bSAndy Lutomirski  * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
1774ea1636bSAndy Lutomirski  * only ordering constraint it supplies is the ordering implied by
1784ea1636bSAndy Lutomirski  * "asm volatile": it will put the RDTSC in the place you expect.  The
1794ea1636bSAndy Lutomirski  * CPU can and will speculatively execute that RDTSC, though, so the
1804ea1636bSAndy Lutomirski  * results can be non-monotonic if compared on different CPUs.
1814ea1636bSAndy Lutomirski  */
1824ea1636bSAndy Lutomirski static __always_inline unsigned long long rdtsc(void)
183bb898558SAl Viro {
184bb898558SAl Viro 	DECLARE_ARGS(val, low, high);
185bb898558SAl Viro 
186bb898558SAl Viro 	asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
187bb898558SAl Viro 
188bb898558SAl Viro 	return EAX_EDX_VAL(val, low, high);
189bb898558SAl Viro }
190bb898558SAl Viro 
19103b9730bSAndy Lutomirski /**
19203b9730bSAndy Lutomirski  * rdtsc_ordered() - read the current TSC in program order
19303b9730bSAndy Lutomirski  *
19403b9730bSAndy Lutomirski  * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
19503b9730bSAndy Lutomirski  * It is ordered like a load to a global in-memory counter.  It should
19603b9730bSAndy Lutomirski  * be impossible to observe non-monotonic rdtsc_unordered() behavior
19703b9730bSAndy Lutomirski  * across multiple CPUs as long as the TSC is synced.
19803b9730bSAndy Lutomirski  */
19903b9730bSAndy Lutomirski static __always_inline unsigned long long rdtsc_ordered(void)
20003b9730bSAndy Lutomirski {
201093ae8f9SBorislav Petkov 	DECLARE_ARGS(val, low, high);
202093ae8f9SBorislav Petkov 
20303b9730bSAndy Lutomirski 	/*
20403b9730bSAndy Lutomirski 	 * The RDTSC instruction is not ordered relative to memory
20503b9730bSAndy Lutomirski 	 * access.  The Intel SDM and the AMD APM are both vague on this
20603b9730bSAndy Lutomirski 	 * point, but empirically an RDTSC instruction can be
20703b9730bSAndy Lutomirski 	 * speculatively executed before prior loads.  An RDTSC
20803b9730bSAndy Lutomirski 	 * immediately after an appropriate barrier appears to be
20903b9730bSAndy Lutomirski 	 * ordered as a normal load, that is, it provides the same
21003b9730bSAndy Lutomirski 	 * ordering guarantees as reading from a global memory location
21103b9730bSAndy Lutomirski 	 * that some other imaginary CPU is updating continuously with a
21203b9730bSAndy Lutomirski 	 * time stamp.
213093ae8f9SBorislav Petkov 	 *
214093ae8f9SBorislav Petkov 	 * Thus, use the preferred barrier on the respective CPU, aiming for
215093ae8f9SBorislav Petkov 	 * RDTSCP as the default.
21603b9730bSAndy Lutomirski 	 */
217be261ffcSJosh Poimboeuf 	asm volatile(ALTERNATIVE_2("rdtsc",
218093ae8f9SBorislav Petkov 				   "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
219093ae8f9SBorislav Petkov 				   "rdtscp", X86_FEATURE_RDTSCP)
220093ae8f9SBorislav Petkov 			: EAX_EDX_RET(val, low, high)
221093ae8f9SBorislav Petkov 			/* RDTSCP clobbers ECX with MSR_TSC_AUX. */
222093ae8f9SBorislav Petkov 			:: "ecx");
223093ae8f9SBorislav Petkov 
224093ae8f9SBorislav Petkov 	return EAX_EDX_VAL(val, low, high);
22503b9730bSAndy Lutomirski }
22603b9730bSAndy Lutomirski 
227bb898558SAl Viro static inline unsigned long long native_read_pmc(int counter)
228bb898558SAl Viro {
229bb898558SAl Viro 	DECLARE_ARGS(val, low, high);
230bb898558SAl Viro 
231bb898558SAl Viro 	asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
232fdb46faeSSteven Rostedt (VMware) 	if (tracepoint_enabled(rdpmc))
2337f47d8ccSAndi Kleen 		do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
234bb898558SAl Viro 	return EAX_EDX_VAL(val, low, high);
235bb898558SAl Viro }
236bb898558SAl Viro 
2379bad5658SJuergen Gross #ifdef CONFIG_PARAVIRT_XXL
238bb898558SAl Viro #include <asm/paravirt.h>
239bb898558SAl Viro #else
240bb898558SAl Viro #include <linux/errno.h>
241bb898558SAl Viro /*
242bb898558SAl Viro  * Access to machine-specific registers (available on 586 and better only)
243bb898558SAl Viro  * Note: the rd* operations modify the parameters directly (without using
244bb898558SAl Viro  * pointer indirection), this allows gcc to optimize better
245bb898558SAl Viro  */
246bb898558SAl Viro 
2471423bed2SBorislav Petkov #define rdmsr(msr, low, high)					\
248bb898558SAl Viro do {								\
249bb898558SAl Viro 	u64 __val = native_read_msr((msr));			\
2501423bed2SBorislav Petkov 	(void)((low) = (u32)__val);				\
2511423bed2SBorislav Petkov 	(void)((high) = (u32)(__val >> 32));			\
252bb898558SAl Viro } while (0)
253bb898558SAl Viro 
2545d07c2ccSBorislav Petkov static inline void wrmsr(unsigned int msr, u32 low, u32 high)
255bb898558SAl Viro {
256bb898558SAl Viro 	native_write_msr(msr, low, high);
257bb898558SAl Viro }
258bb898558SAl Viro 
259bb898558SAl Viro #define rdmsrl(msr, val)			\
260bb898558SAl Viro 	((val) = native_read_msr((msr)))
261bb898558SAl Viro 
2625d07c2ccSBorislav Petkov static inline void wrmsrl(unsigned int msr, u64 val)
26347edb651SAndy Lutomirski {
264679bcea8SBorislav Petkov 	native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
26547edb651SAndy Lutomirski }
266bb898558SAl Viro 
267bb898558SAl Viro /* wrmsr with exception handling */
2685d07c2ccSBorislav Petkov static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
269bb898558SAl Viro {
270bb898558SAl Viro 	return native_write_msr_safe(msr, low, high);
271bb898558SAl Viro }
272bb898558SAl Viro 
273060feb65SH. Peter Anvin /* rdmsr with exception handling */
2741423bed2SBorislav Petkov #define rdmsr_safe(msr, low, high)				\
275bb898558SAl Viro ({								\
276bb898558SAl Viro 	int __err;						\
277bb898558SAl Viro 	u64 __val = native_read_msr_safe((msr), &__err);	\
2781423bed2SBorislav Petkov 	(*low) = (u32)__val;					\
2791423bed2SBorislav Petkov 	(*high) = (u32)(__val >> 32);				\
280bb898558SAl Viro 	__err;							\
281bb898558SAl Viro })
282bb898558SAl Viro 
2835d07c2ccSBorislav Petkov static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
284bb898558SAl Viro {
285bb898558SAl Viro 	int err;
286bb898558SAl Viro 
287bb898558SAl Viro 	*p = native_read_msr_safe(msr, &err);
288bb898558SAl Viro 	return err;
289bb898558SAl Viro }
290177fed1eSBorislav Petkov 
291bb898558SAl Viro #define rdpmc(counter, low, high)			\
292bb898558SAl Viro do {							\
293bb898558SAl Viro 	u64 _l = native_read_pmc((counter));		\
294bb898558SAl Viro 	(low)  = (u32)_l;				\
295bb898558SAl Viro 	(high) = (u32)(_l >> 32);			\
296bb898558SAl Viro } while (0)
297bb898558SAl Viro 
2981ff4d58aSAndi Kleen #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
2991ff4d58aSAndi Kleen 
3009bad5658SJuergen Gross #endif	/* !CONFIG_PARAVIRT_XXL */
3019261e050SAndy Lutomirski 
302*efe50881SAndrew Cooper /* Instruction opcode for WRMSRNS supported in binutils >= 2.40 */
303*efe50881SAndrew Cooper #define WRMSRNS _ASM_BYTES(0x0f,0x01,0xc6)
304*efe50881SAndrew Cooper 
305*efe50881SAndrew Cooper /* Non-serializing WRMSR, when available.  Falls back to a serializing WRMSR. */
306a4cb5eceSXin Li static __always_inline void wrmsrns(u32 msr, u64 val)
307a4cb5eceSXin Li {
308*efe50881SAndrew Cooper 	/*
309*efe50881SAndrew Cooper 	 * WRMSR is 2 bytes.  WRMSRNS is 3 bytes.  Pad WRMSR with a redundant
310*efe50881SAndrew Cooper 	 * DS prefix to avoid a trailing NOP.
311*efe50881SAndrew Cooper 	 */
312*efe50881SAndrew Cooper 	asm volatile("1: " ALTERNATIVE("ds wrmsr", WRMSRNS, X86_FEATURE_WRMSRNS)
313*efe50881SAndrew Cooper 		     "2: " _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
314*efe50881SAndrew Cooper 		     : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)));
315a4cb5eceSXin Li }
316a4cb5eceSXin Li 
317cf991de2SAndy Lutomirski /*
318cf991de2SAndy Lutomirski  * 64-bit version of wrmsr_safe():
319cf991de2SAndy Lutomirski  */
320cf991de2SAndy Lutomirski static inline int wrmsrl_safe(u32 msr, u64 val)
321cf991de2SAndy Lutomirski {
322cf991de2SAndy Lutomirski 	return wrmsr_safe(msr, (u32)val,  (u32)(val >> 32));
323cf991de2SAndy Lutomirski }
324bb898558SAl Viro 
3255323922fSThomas Gleixner struct msr __percpu *msrs_alloc(void);
3265323922fSThomas Gleixner void msrs_free(struct msr __percpu *msrs);
32722085a66SBorislav Petkov int msr_set_bit(u32 msr, u8 bit);
32822085a66SBorislav Petkov int msr_clear_bit(u32 msr, u8 bit);
32950542251SBorislav Petkov 
330bb898558SAl Viro #ifdef CONFIG_SMP
331bb898558SAl Viro int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
332bb898558SAl Viro int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
3331a6b991aSJacob Pan int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
3341a6b991aSJacob Pan int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
3355323922fSThomas Gleixner void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
3365323922fSThomas Gleixner void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
337bb898558SAl Viro int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
338bb898558SAl Viro int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
3391a6b991aSJacob Pan int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
3401a6b991aSJacob Pan int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
3418b956bf1SH. Peter Anvin int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
3428b956bf1SH. Peter Anvin int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
343bb898558SAl Viro #else  /*  CONFIG_SMP  */
344bb898558SAl Viro static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
345bb898558SAl Viro {
346bb898558SAl Viro 	rdmsr(msr_no, *l, *h);
347bb898558SAl Viro 	return 0;
348bb898558SAl Viro }
349bb898558SAl Viro static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
350bb898558SAl Viro {
351bb898558SAl Viro 	wrmsr(msr_no, l, h);
352bb898558SAl Viro 	return 0;
353bb898558SAl Viro }
3541a6b991aSJacob Pan static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
3551a6b991aSJacob Pan {
3561a6b991aSJacob Pan 	rdmsrl(msr_no, *q);
3571a6b991aSJacob Pan 	return 0;
3581a6b991aSJacob Pan }
3591a6b991aSJacob Pan static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
3601a6b991aSJacob Pan {
3611a6b991aSJacob Pan 	wrmsrl(msr_no, q);
3621a6b991aSJacob Pan 	return 0;
3631a6b991aSJacob Pan }
3640d0fbbddSRusty Russell static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
3655323922fSThomas Gleixner 				struct msr __percpu *msrs)
366b034c19fSBorislav Petkov {
3675323922fSThomas Gleixner 	rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h));
368b034c19fSBorislav Petkov }
3690d0fbbddSRusty Russell static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
3705323922fSThomas Gleixner 				struct msr __percpu *msrs)
371b034c19fSBorislav Petkov {
3725323922fSThomas Gleixner 	wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h));
373b034c19fSBorislav Petkov }
374bb898558SAl Viro static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
375bb898558SAl Viro 				    u32 *l, u32 *h)
376bb898558SAl Viro {
377bb898558SAl Viro 	return rdmsr_safe(msr_no, l, h);
378bb898558SAl Viro }
379bb898558SAl Viro static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
380bb898558SAl Viro {
381bb898558SAl Viro 	return wrmsr_safe(msr_no, l, h);
382bb898558SAl Viro }
3831a6b991aSJacob Pan static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
3841a6b991aSJacob Pan {
3851a6b991aSJacob Pan 	return rdmsrl_safe(msr_no, q);
3861a6b991aSJacob Pan }
3871a6b991aSJacob Pan static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
3881a6b991aSJacob Pan {
3891a6b991aSJacob Pan 	return wrmsrl_safe(msr_no, q);
3901a6b991aSJacob Pan }
3918b956bf1SH. Peter Anvin static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
3928b956bf1SH. Peter Anvin {
3938b956bf1SH. Peter Anvin 	return rdmsr_safe_regs(regs);
3948b956bf1SH. Peter Anvin }
3958b956bf1SH. Peter Anvin static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
3968b956bf1SH. Peter Anvin {
3978b956bf1SH. Peter Anvin 	return wrmsr_safe_regs(regs);
3988b956bf1SH. Peter Anvin }
399bb898558SAl Viro #endif  /* CONFIG_SMP */
400ff55df53SH. Peter Anvin #endif /* __ASSEMBLY__ */
4011965aae3SH. Peter Anvin #endif /* _ASM_X86_MSR_H */
402