xref: /linux/arch/x86/include/asm/tsc.h (revision bdfa82f5b8998a6311a8ef0cf89ad413f5cd9ea4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * x86 TSC related functions
4  */
5 #ifndef _ASM_X86_TSC_H
6 #define _ASM_X86_TSC_H
7 
8 #include <asm/asm.h>
9 #include <asm/cpufeature.h>
10 #include <asm/processor.h>
11 #include <asm/msr.h>
12 
13 /**
14  * rdtsc() - returns the current TSC without ordering constraints
15  *
16  * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
17  * only ordering constraint it supplies is the ordering implied by
18  * "asm volatile": it will put the RDTSC in the place you expect.  The
19  * CPU can and will speculatively execute that RDTSC, though, so the
20  * results can be non-monotonic if compared on different CPUs.
21  */
22 static __always_inline u64 rdtsc(void)
23 {
24 	EAX_EDX_DECLARE_ARGS(val, low, high);
25 
26 	asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
27 
28 	return EAX_EDX_VAL(val, low, high);
29 }
30 
31 /**
32  * rdtsc_ordered() - read the current TSC in program order
33  *
34  * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
35  * It is ordered like a load to a global in-memory counter.  It should
36  * be impossible to observe non-monotonic rdtsc_unordered() behavior
37  * across multiple CPUs as long as the TSC is synced.
38  */
39 static __always_inline u64 rdtsc_ordered(void)
40 {
41 	EAX_EDX_DECLARE_ARGS(val, low, high);
42 
43 	/*
44 	 * The RDTSC instruction is not ordered relative to memory
45 	 * access.  The Intel SDM and the AMD APM are both vague on this
46 	 * point, but empirically an RDTSC instruction can be
47 	 * speculatively executed before prior loads.  An RDTSC
48 	 * immediately after an appropriate barrier appears to be
49 	 * ordered as a normal load, that is, it provides the same
50 	 * ordering guarantees as reading from a global memory location
51 	 * that some other imaginary CPU is updating continuously with a
52 	 * time stamp.
53 	 *
54 	 * Thus, use the preferred barrier on the respective CPU, aiming for
55 	 * RDTSCP as the default.
56 	 */
57 	asm volatile(ALTERNATIVE_2("rdtsc",
58 				   "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
59 				   "rdtscp", X86_FEATURE_RDTSCP)
60 			: EAX_EDX_RET(val, low, high)
61 			/* RDTSCP clobbers ECX with MSR_TSC_AUX. */
62 			:: "ecx");
63 
64 	return EAX_EDX_VAL(val, low, high);
65 }
66 
67 /*
68  * Standard way to access the cycle counter.
69  */
70 typedef unsigned long long cycles_t;
71 
72 extern unsigned int cpu_khz;
73 extern unsigned int tsc_khz;
74 
75 extern void disable_TSC(void);
76 
77 static inline cycles_t get_cycles(void)
78 {
79 	if (!IS_ENABLED(CONFIG_X86_TSC) &&
80 	    !cpu_feature_enabled(X86_FEATURE_TSC))
81 		return 0;
82 	return rdtsc();
83 }
84 #define get_cycles get_cycles
85 
86 extern void tsc_early_init(void);
87 extern void tsc_init(void);
88 extern void mark_tsc_unstable(char *reason);
89 extern int unsynchronized_tsc(void);
90 extern int check_tsc_unstable(void);
91 extern void mark_tsc_async_resets(char *reason);
92 extern unsigned long native_calibrate_cpu_early(void);
93 extern unsigned long native_calibrate_tsc(void);
94 extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
95 
96 extern int tsc_clocksource_reliable;
97 #ifdef CONFIG_X86_TSC
98 extern bool tsc_async_resets;
99 #else
100 # define tsc_async_resets	false
101 #endif
102 
103 /*
104  * Boot-time check whether the TSCs are synchronized across
105  * all CPUs/cores:
106  */
107 #ifdef CONFIG_X86_TSC
108 extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
109 extern void tsc_verify_tsc_adjust(bool resume);
110 extern void check_tsc_sync_target(void);
111 #else
112 static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
113 static inline void tsc_verify_tsc_adjust(bool resume) { }
114 static inline void check_tsc_sync_target(void) { }
115 #endif
116 
117 extern int notsc_setup(char *);
118 extern void tsc_save_sched_clock_state(void);
119 extern void tsc_restore_sched_clock_state(void);
120 
121 unsigned long cpu_khz_from_msr(void);
122 
123 #endif /* _ASM_X86_TSC_H */
124