xref: /linux/arch/x86/kernel/tsc.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/sched/clock.h>
7 #include <linux/init.h>
8 #include <linux/export.h>
9 #include <linux/timer.h>
10 #include <linux/acpi_pmtmr.h>
11 #include <linux/cpufreq.h>
12 #include <linux/delay.h>
13 #include <linux/clocksource.h>
14 #include <linux/kvm_types.h>
15 #include <linux/percpu.h>
16 #include <linux/timex.h>
17 #include <linux/static_key.h>
18 #include <linux/static_call.h>
19 
20 #include <asm/cpuid/api.h>
21 #include <asm/hpet.h>
22 #include <asm/timer.h>
23 #include <asm/vgtod.h>
24 #include <asm/time.h>
25 #include <asm/delay.h>
26 #include <asm/hypervisor.h>
27 #include <asm/nmi.h>
28 #include <asm/x86_init.h>
29 #include <asm/geode.h>
30 #include <asm/apic.h>
31 #include <asm/cpu_device_id.h>
32 #include <asm/i8259.h>
33 #include <asm/msr.h>
34 #include <asm/topology.h>
35 #include <asm/uv/uv.h>
36 #include <asm/sev.h>
37 
38 unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
39 EXPORT_SYMBOL(cpu_khz);
40 
41 unsigned int __read_mostly tsc_khz;
42 EXPORT_SYMBOL(tsc_khz);
43 
44 #define KHZ	1000
45 
46 /*
47  * TSC can be unstable due to cpufreq or due to unsynced TSCs
48  */
49 static int __read_mostly tsc_unstable;
50 static unsigned int __initdata tsc_early_khz;
51 
52 static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
53 
54 int tsc_clocksource_reliable;
55 
56 static int __read_mostly tsc_force_recalibrate;
57 
58 static struct clocksource_base art_base_clk = {
59 	.id    = CSID_X86_ART,
60 };
61 static bool have_art;
62 
63 struct cyc2ns {
64 	struct cyc2ns_data data[2];	/*  0 + 2*16 = 32 */
65 	seqcount_latch_t   seq;		/* 32 + 4    = 36 */
66 
67 }; /* fits one cacheline */
68 
69 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
70 
71 static int __init tsc_early_khz_setup(char *buf)
72 {
73 	return kstrtouint(buf, 0, &tsc_early_khz);
74 }
75 early_param("tsc_early_khz", tsc_early_khz_setup);
76 
77 __always_inline void __cyc2ns_read(struct cyc2ns_data *data)
78 {
79 	int seq, idx;
80 
81 	do {
82 		seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
83 		idx = seq & 1;
84 
85 		data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
86 		data->cyc2ns_mul    = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
87 		data->cyc2ns_shift  = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
88 
89 	} while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
90 }
91 
92 __always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
93 {
94 	preempt_disable_notrace();
95 	__cyc2ns_read(data);
96 }
97 
98 __always_inline void cyc2ns_read_end(void)
99 {
100 	preempt_enable_notrace();
101 }
102 
103 /*
104  * Accelerators for sched_clock()
105  * convert from cycles(64bits) => nanoseconds (64bits)
106  *  basic equation:
107  *              ns = cycles / (freq / ns_per_sec)
108  *              ns = cycles * (ns_per_sec / freq)
109  *              ns = cycles * (10^9 / (cpu_khz * 10^3))
110  *              ns = cycles * (10^6 / cpu_khz)
111  *
112  *      Then we use scaling math (suggested by george@mvista.com) to get:
113  *              ns = cycles * (10^6 * SC / cpu_khz) / SC
114  *              ns = cycles * cyc2ns_scale / SC
115  *
116  *      And since SC is a constant power of two, we can convert the div
117  *  into a shift. The larger SC is, the more accurate the conversion, but
118  *  cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
119  *  (64-bit result) can be used.
120  *
121  *  We can use khz divisor instead of mhz to keep a better precision.
122  *  (mathieu.desnoyers@polymtl.ca)
123  *
124  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
125  */
126 
127 static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc)
128 {
129 	struct cyc2ns_data data;
130 	unsigned long long ns;
131 
132 	__cyc2ns_read(&data);
133 
134 	ns = data.cyc2ns_offset;
135 	ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
136 
137 	return ns;
138 }
139 
140 static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
141 {
142 	unsigned long long ns;
143 	preempt_disable_notrace();
144 	ns = __cycles_2_ns(cyc);
145 	preempt_enable_notrace();
146 	return ns;
147 }
148 
149 static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
150 {
151 	unsigned long long ns_now;
152 	struct cyc2ns_data data;
153 	struct cyc2ns *c2n;
154 
155 	ns_now = cycles_2_ns(tsc_now);
156 
157 	/*
158 	 * Compute a new multiplier as per the above comment and ensure our
159 	 * time function is continuous; see the comment near struct
160 	 * cyc2ns_data.
161 	 */
162 	clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
163 			       NSEC_PER_MSEC, 0);
164 
165 	/*
166 	 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
167 	 * not expected to be greater than 31 due to the original published
168 	 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
169 	 * value) - refer perf_event_mmap_page documentation in perf_event.h.
170 	 */
171 	if (data.cyc2ns_shift == 32) {
172 		data.cyc2ns_shift = 31;
173 		data.cyc2ns_mul >>= 1;
174 	}
175 
176 	data.cyc2ns_offset = ns_now -
177 		mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
178 
179 	c2n = per_cpu_ptr(&cyc2ns, cpu);
180 
181 	write_seqcount_latch_begin(&c2n->seq);
182 	c2n->data[0] = data;
183 	write_seqcount_latch(&c2n->seq);
184 	c2n->data[1] = data;
185 	write_seqcount_latch_end(&c2n->seq);
186 }
187 
188 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
189 {
190 	unsigned long flags;
191 
192 	local_irq_save(flags);
193 	sched_clock_idle_sleep_event();
194 
195 	if (khz)
196 		__set_cyc2ns_scale(khz, cpu, tsc_now);
197 
198 	sched_clock_idle_wakeup_event();
199 	local_irq_restore(flags);
200 }
201 
202 /*
203  * Initialize cyc2ns for boot cpu
204  */
205 static void __init cyc2ns_init_boot_cpu(void)
206 {
207 	struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
208 
209 	seqcount_latch_init(&c2n->seq);
210 	__set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
211 }
212 
213 /*
214  * Secondary CPUs do not run through tsc_init(), so set up
215  * all the scale factors for all CPUs, assuming the same
216  * speed as the bootup CPU.
217  */
218 static void __init cyc2ns_init_secondary_cpus(void)
219 {
220 	unsigned int cpu, this_cpu = smp_processor_id();
221 	struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
222 	struct cyc2ns_data *data = c2n->data;
223 
224 	for_each_possible_cpu(cpu) {
225 		if (cpu != this_cpu) {
226 			seqcount_latch_init(&c2n->seq);
227 			c2n = per_cpu_ptr(&cyc2ns, cpu);
228 			c2n->data[0] = data[0];
229 			c2n->data[1] = data[1];
230 		}
231 	}
232 }
233 
234 /*
235  * Scheduler clock - returns current time in nanosec units.
236  */
237 noinstr u64 native_sched_clock(void)
238 {
239 	if (static_branch_likely(&__use_tsc)) {
240 		u64 tsc_now = rdtsc();
241 
242 		/* return the value in ns */
243 		return __cycles_2_ns(tsc_now);
244 	}
245 
246 	/*
247 	 * Fall back to jiffies if there's no TSC available:
248 	 * ( But note that we still use it if the TSC is marked
249 	 *   unstable. We do this because unlike Time Of Day,
250 	 *   the scheduler clock tolerates small errors and it's
251 	 *   very important for it to be as fast as the platform
252 	 *   can achieve it. )
253 	 */
254 
255 	/* No locking but a rare wrong value is not a big deal: */
256 	return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
257 }
258 
259 /*
260  * Generate a sched_clock if you already have a TSC value.
261  */
262 u64 native_sched_clock_from_tsc(u64 tsc)
263 {
264 	return cycles_2_ns(tsc);
265 }
266 
267 /* We need to define a real function for sched_clock, to override the
268    weak default version */
269 #ifdef CONFIG_PARAVIRT
270 DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
271 
272 noinstr u64 sched_clock_noinstr(void)
273 {
274 	return static_call(pv_sched_clock)();
275 }
276 
277 bool using_native_sched_clock(void)
278 {
279 	return static_call_query(pv_sched_clock) == native_sched_clock;
280 }
281 
282 void paravirt_set_sched_clock(u64 (*func)(void))
283 {
284 	static_call_update(pv_sched_clock, func);
285 }
286 #else
287 u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
288 
289 bool using_native_sched_clock(void) { return true; }
290 void paravirt_set_sched_clock(u64 (*func)(void)) { }
291 #endif
292 
293 notrace u64 sched_clock(void)
294 {
295 	u64 now;
296 	preempt_disable_notrace();
297 	now = sched_clock_noinstr();
298 	preempt_enable_notrace();
299 	return now;
300 }
301 
302 int check_tsc_unstable(void)
303 {
304 	return tsc_unstable;
305 }
306 EXPORT_SYMBOL_GPL(check_tsc_unstable);
307 
308 #ifdef CONFIG_X86_TSC
309 int __init notsc_setup(char *str)
310 {
311 	mark_tsc_unstable("boot parameter notsc");
312 	return 1;
313 }
314 #else
315 /*
316  * disable flag for tsc. Takes effect by clearing the TSC cpu flag
317  * in cpu/common.c
318  */
319 int __init notsc_setup(char *str)
320 {
321 	setup_clear_cpu_cap(X86_FEATURE_TSC);
322 	return 1;
323 }
324 #endif
325 __setup("notsc", notsc_setup);
326 
327 enum {
328 	TSC_WATCHDOG_AUTO,
329 	TSC_WATCHDOG_OFF,
330 	TSC_WATCHDOG_ON,
331 };
332 
333 static int no_sched_irq_time;
334 static int tsc_watchdog;
335 
336 static int __init tsc_setup(char *str)
337 {
338 	if (!strcmp(str, "reliable"))
339 		tsc_clocksource_reliable = 1;
340 	if (!strncmp(str, "noirqtime", 9))
341 		no_sched_irq_time = 1;
342 	if (!strcmp(str, "unstable"))
343 		mark_tsc_unstable("boot parameter");
344 	if (!strcmp(str, "nowatchdog"))
345 		tsc_watchdog = TSC_WATCHDOG_OFF;
346 	if (!strcmp(str, "recalibrate"))
347 		tsc_force_recalibrate = 1;
348 	if (!strcmp(str, "watchdog"))
349 		tsc_watchdog = TSC_WATCHDOG_ON;
350 	return 1;
351 }
352 __setup("tsc=", tsc_setup);
353 
354 #define MAX_RETRIES		5
355 #define TSC_DEFAULT_THRESHOLD	0x20000
356 
357 /*
358  * Read TSC and the reference counters. Take care of any disturbances
359  */
360 static u64 tsc_read_refs(u64 *p, int hpet)
361 {
362 	u64 t1, t2;
363 	u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
364 	int i;
365 
366 	for (i = 0; i < MAX_RETRIES; i++) {
367 		t1 = get_cycles();
368 		if (hpet)
369 			*p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
370 		else
371 			*p = acpi_pm_read_early();
372 		t2 = get_cycles();
373 		if ((t2 - t1) < thresh)
374 			return t2;
375 	}
376 	return ULLONG_MAX;
377 }
378 
379 /*
380  * Calculate the TSC frequency from HPET reference
381  */
382 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
383 {
384 	u64 tmp;
385 
386 	if (hpet2 < hpet1)
387 		hpet2 += 0x100000000ULL;
388 	hpet2 -= hpet1;
389 	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
390 	do_div(tmp, 1000000);
391 	deltatsc = div64_u64(deltatsc, tmp);
392 
393 	return (unsigned long) deltatsc;
394 }
395 
396 /*
397  * Calculate the TSC frequency from PMTimer reference
398  */
399 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
400 {
401 	u64 tmp;
402 
403 	if (!pm1 && !pm2)
404 		return ULONG_MAX;
405 
406 	if (pm2 < pm1)
407 		pm2 += (u64)ACPI_PM_OVRRUN;
408 	pm2 -= pm1;
409 	tmp = pm2 * 1000000000LL;
410 	do_div(tmp, PMTMR_TICKS_PER_SEC);
411 	do_div(deltatsc, tmp);
412 
413 	return (unsigned long) deltatsc;
414 }
415 
416 #define CAL_MS		10
417 #define CAL_LATCH	(PIT_TICK_RATE / (1000 / CAL_MS))
418 #define CAL_PIT_LOOPS	1000
419 
420 #define CAL2_MS		50
421 #define CAL2_LATCH	(PIT_TICK_RATE / (1000 / CAL2_MS))
422 #define CAL2_PIT_LOOPS	5000
423 
424 
425 /*
426  * Try to calibrate the TSC against the Programmable
427  * Interrupt Timer and return the frequency of the TSC
428  * in kHz.
429  *
430  * Return ULONG_MAX on failure to calibrate.
431  */
432 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
433 {
434 	u64 tsc, t1, t2, delta;
435 	unsigned long tscmin, tscmax;
436 	int pitcnt;
437 
438 	if (!has_legacy_pic()) {
439 		/*
440 		 * Relies on tsc_early_delay_calibrate() to have given us semi
441 		 * usable udelay(), wait for the same 50ms we would have with
442 		 * the PIT loop below.
443 		 */
444 		udelay(10 * USEC_PER_MSEC);
445 		udelay(10 * USEC_PER_MSEC);
446 		udelay(10 * USEC_PER_MSEC);
447 		udelay(10 * USEC_PER_MSEC);
448 		udelay(10 * USEC_PER_MSEC);
449 		return ULONG_MAX;
450 	}
451 
452 	/* Set the Gate high, disable speaker */
453 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
454 
455 	/*
456 	 * Setup CTC channel 2* for mode 0, (interrupt on terminal
457 	 * count mode), binary count. Set the latch register to 50ms
458 	 * (LSB then MSB) to begin countdown.
459 	 */
460 	outb(0xb0, 0x43);
461 	outb(latch & 0xff, 0x42);
462 	outb(latch >> 8, 0x42);
463 
464 	tsc = t1 = t2 = get_cycles();
465 
466 	pitcnt = 0;
467 	tscmax = 0;
468 	tscmin = ULONG_MAX;
469 	while ((inb(0x61) & 0x20) == 0) {
470 		t2 = get_cycles();
471 		delta = t2 - tsc;
472 		tsc = t2;
473 		if ((unsigned long) delta < tscmin)
474 			tscmin = (unsigned int) delta;
475 		if ((unsigned long) delta > tscmax)
476 			tscmax = (unsigned int) delta;
477 		pitcnt++;
478 	}
479 
480 	/*
481 	 * Sanity checks:
482 	 *
483 	 * If we were not able to read the PIT more than loopmin
484 	 * times, then we have been hit by a massive SMI
485 	 *
486 	 * If the maximum is 10 times larger than the minimum,
487 	 * then we got hit by an SMI as well.
488 	 */
489 	if (pitcnt < loopmin || tscmax > 10 * tscmin)
490 		return ULONG_MAX;
491 
492 	/* Calculate the PIT value */
493 	delta = t2 - t1;
494 	do_div(delta, ms);
495 	return delta;
496 }
497 
498 /*
499  * This reads the current MSB of the PIT counter, and
500  * checks if we are running on sufficiently fast and
501  * non-virtualized hardware.
502  *
503  * Our expectations are:
504  *
505  *  - the PIT is running at roughly 1.19MHz
506  *
507  *  - each IO is going to take about 1us on real hardware,
508  *    but we allow it to be much faster (by a factor of 10) or
509  *    _slightly_ slower (ie we allow up to a 2us read+counter
510  *    update - anything else implies a unacceptably slow CPU
511  *    or PIT for the fast calibration to work.
512  *
513  *  - with 256 PIT ticks to read the value, we have 214us to
514  *    see the same MSB (and overhead like doing a single TSC
515  *    read per MSB value etc).
516  *
517  *  - We're doing 2 reads per loop (LSB, MSB), and we expect
518  *    them each to take about a microsecond on real hardware.
519  *    So we expect a count value of around 100. But we'll be
520  *    generous, and accept anything over 50.
521  *
522  *  - if the PIT is stuck, and we see *many* more reads, we
523  *    return early (and the next caller of pit_expect_msb()
524  *    then consider it a failure when they don't see the
525  *    next expected value).
526  *
527  * These expectations mean that we know that we have seen the
528  * transition from one expected value to another with a fairly
529  * high accuracy, and we didn't miss any events. We can thus
530  * use the TSC value at the transitions to calculate a pretty
531  * good value for the TSC frequency.
532  */
533 static inline int pit_verify_msb(unsigned char val)
534 {
535 	/* Ignore LSB */
536 	inb(0x42);
537 	return inb(0x42) == val;
538 }
539 
540 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
541 {
542 	int count;
543 	u64 tsc = 0, prev_tsc = 0;
544 
545 	for (count = 0; count < 50000; count++) {
546 		if (!pit_verify_msb(val))
547 			break;
548 		prev_tsc = tsc;
549 		tsc = get_cycles();
550 	}
551 	*deltap = get_cycles() - prev_tsc;
552 	*tscp = tsc;
553 
554 	/*
555 	 * We require _some_ success, but the quality control
556 	 * will be based on the error terms on the TSC values.
557 	 */
558 	return count > 5;
559 }
560 
561 /*
562  * How many MSB values do we want to see? We aim for
563  * a maximum error rate of 500ppm (in practice the
564  * real error is much smaller), but refuse to spend
565  * more than 50ms on it.
566  */
567 #define MAX_QUICK_PIT_MS 50
568 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
569 
570 static unsigned long quick_pit_calibrate(void)
571 {
572 	int i;
573 	u64 tsc, delta;
574 	unsigned long d1, d2;
575 
576 	if (!has_legacy_pic())
577 		return 0;
578 
579 	/* Set the Gate high, disable speaker */
580 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
581 
582 	/*
583 	 * Counter 2, mode 0 (one-shot), binary count
584 	 *
585 	 * NOTE! Mode 2 decrements by two (and then the
586 	 * output is flipped each time, giving the same
587 	 * final output frequency as a decrement-by-one),
588 	 * so mode 0 is much better when looking at the
589 	 * individual counts.
590 	 */
591 	outb(0xb0, 0x43);
592 
593 	/* Start at 0xffff */
594 	outb(0xff, 0x42);
595 	outb(0xff, 0x42);
596 
597 	/*
598 	 * The PIT starts counting at the next edge, so we
599 	 * need to delay for a microsecond. The easiest way
600 	 * to do that is to just read back the 16-bit counter
601 	 * once from the PIT.
602 	 */
603 	pit_verify_msb(0);
604 
605 	if (pit_expect_msb(0xff, &tsc, &d1)) {
606 		for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
607 			if (!pit_expect_msb(0xff-i, &delta, &d2))
608 				break;
609 
610 			delta -= tsc;
611 
612 			/*
613 			 * Extrapolate the error and fail fast if the error will
614 			 * never be below 500 ppm.
615 			 */
616 			if (i == 1 &&
617 			    d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
618 				return 0;
619 
620 			/*
621 			 * Iterate until the error is less than 500 ppm
622 			 */
623 			if (d1+d2 >= delta >> 11)
624 				continue;
625 
626 			/*
627 			 * Check the PIT one more time to verify that
628 			 * all TSC reads were stable wrt the PIT.
629 			 *
630 			 * This also guarantees serialization of the
631 			 * last cycle read ('d2') in pit_expect_msb.
632 			 */
633 			if (!pit_verify_msb(0xfe - i))
634 				break;
635 			goto success;
636 		}
637 	}
638 	pr_info("Fast TSC calibration failed\n");
639 	return 0;
640 
641 success:
642 	/*
643 	 * Ok, if we get here, then we've seen the
644 	 * MSB of the PIT decrement 'i' times, and the
645 	 * error has shrunk to less than 500 ppm.
646 	 *
647 	 * As a result, we can depend on there not being
648 	 * any odd delays anywhere, and the TSC reads are
649 	 * reliable (within the error).
650 	 *
651 	 * kHz = ticks / time-in-seconds / 1000;
652 	 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
653 	 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
654 	 */
655 	delta *= PIT_TICK_RATE;
656 	do_div(delta, i*256*1000);
657 	pr_info("Fast TSC calibration using PIT\n");
658 	return delta;
659 }
660 
661 /**
662  * native_calibrate_tsc - determine TSC frequency
663  * Determine TSC frequency via CPUID, else return 0.
664  */
665 unsigned long native_calibrate_tsc(void)
666 {
667 	unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
668 	unsigned int crystal_khz;
669 
670 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
671 		return 0;
672 
673 	if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
674 		return 0;
675 
676 	eax_denominator = ebx_numerator = ecx_hz = edx = 0;
677 
678 	/* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
679 	cpuid(CPUID_LEAF_TSC, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
680 
681 	if (ebx_numerator == 0 || eax_denominator == 0)
682 		return 0;
683 
684 	crystal_khz = ecx_hz / 1000;
685 
686 	/*
687 	 * Denverton SoCs don't report crystal clock, and also don't support
688 	 * CPUID_LEAF_FREQ for the calculation below, so hardcode the 25MHz
689 	 * crystal clock.
690 	 */
691 	if (crystal_khz == 0 &&
692 			boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
693 		crystal_khz = 25000;
694 
695 	/*
696 	 * TSC frequency reported directly by CPUID is a "hardware reported"
697 	 * frequency and is the most accurate one so far we have. This
698 	 * is considered a known frequency.
699 	 */
700 	if (crystal_khz != 0)
701 		setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
702 
703 	/*
704 	 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
705 	 * clock, but we can easily calculate it to a high degree of accuracy
706 	 * by considering the crystal ratio and the CPU speed.
707 	 */
708 	if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= CPUID_LEAF_FREQ) {
709 		unsigned int eax_base_mhz, ebx, ecx, edx;
710 
711 		cpuid(CPUID_LEAF_FREQ, &eax_base_mhz, &ebx, &ecx, &edx);
712 		crystal_khz = eax_base_mhz * 1000 *
713 			eax_denominator / ebx_numerator;
714 	}
715 
716 	if (crystal_khz == 0)
717 		return 0;
718 
719 	/*
720 	 * For Atom SoCs TSC is the only reliable clocksource.
721 	 * Mark TSC reliable so no watchdog on it.
722 	 */
723 	if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT)
724 		setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
725 
726 #ifdef CONFIG_X86_LOCAL_APIC
727 	/*
728 	 * The local APIC appears to be fed by the core crystal clock
729 	 * (which sounds entirely sensible). We can set the global
730 	 * lapic_timer_period here to avoid having to calibrate the APIC
731 	 * timer later.
732 	 */
733 	lapic_timer_period = crystal_khz * 1000 / HZ;
734 #endif
735 
736 	return crystal_khz * ebx_numerator / eax_denominator;
737 }
738 
739 static unsigned long cpu_khz_from_cpuid(void)
740 {
741 	unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
742 
743 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
744 		return 0;
745 
746 	if (boot_cpu_data.cpuid_level < CPUID_LEAF_FREQ)
747 		return 0;
748 
749 	eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
750 
751 	cpuid(CPUID_LEAF_FREQ, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
752 
753 	return eax_base_mhz * 1000;
754 }
755 
756 /*
757  * calibrate cpu using pit, hpet, and ptimer methods. They are available
758  * later in boot after acpi is initialized.
759  */
760 static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
761 {
762 	u64 tsc1, tsc2, delta, ref1, ref2;
763 	unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
764 	unsigned long flags, latch, ms;
765 	int hpet = is_hpet_enabled(), i, loopmin;
766 
767 	/*
768 	 * Run 5 calibration loops to get the lowest frequency value
769 	 * (the best estimate). We use two different calibration modes
770 	 * here:
771 	 *
772 	 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
773 	 * load a timeout of 50ms. We read the time right after we
774 	 * started the timer and wait until the PIT count down reaches
775 	 * zero. In each wait loop iteration we read the TSC and check
776 	 * the delta to the previous read. We keep track of the min
777 	 * and max values of that delta. The delta is mostly defined
778 	 * by the IO time of the PIT access, so we can detect when
779 	 * any disturbance happened between the two reads. If the
780 	 * maximum time is significantly larger than the minimum time,
781 	 * then we discard the result and have another try.
782 	 *
783 	 * 2) Reference counter. If available we use the HPET or the
784 	 * PMTIMER as a reference to check the sanity of that value.
785 	 * We use separate TSC readouts and check inside of the
786 	 * reference read for any possible disturbance. We discard
787 	 * disturbed values here as well. We do that around the PIT
788 	 * calibration delay loop as we have to wait for a certain
789 	 * amount of time anyway.
790 	 */
791 
792 	/* Preset PIT loop values */
793 	latch = CAL_LATCH;
794 	ms = CAL_MS;
795 	loopmin = CAL_PIT_LOOPS;
796 
797 	for (i = 0; i < 3; i++) {
798 		unsigned long tsc_pit_khz;
799 
800 		/*
801 		 * Read the start value and the reference count of
802 		 * hpet/pmtimer when available. Then do the PIT
803 		 * calibration, which will take at least 50ms, and
804 		 * read the end value.
805 		 */
806 		local_irq_save(flags);
807 		tsc1 = tsc_read_refs(&ref1, hpet);
808 		tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
809 		tsc2 = tsc_read_refs(&ref2, hpet);
810 		local_irq_restore(flags);
811 
812 		/* Pick the lowest PIT TSC calibration so far */
813 		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
814 
815 		/* hpet or pmtimer available ? */
816 		if (ref1 == ref2)
817 			continue;
818 
819 		/* Check, whether the sampling was disturbed */
820 		if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
821 			continue;
822 
823 		tsc2 = (tsc2 - tsc1) * 1000000LL;
824 		if (hpet)
825 			tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
826 		else
827 			tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
828 
829 		tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
830 
831 		/* Check the reference deviation */
832 		delta = ((u64) tsc_pit_min) * 100;
833 		do_div(delta, tsc_ref_min);
834 
835 		/*
836 		 * If both calibration results are inside a 10% window
837 		 * then we can be sure, that the calibration
838 		 * succeeded. We break out of the loop right away. We
839 		 * use the reference value, as it is more precise.
840 		 */
841 		if (delta >= 90 && delta <= 110) {
842 			pr_info("PIT calibration matches %s. %d loops\n",
843 				hpet ? "HPET" : "PMTIMER", i + 1);
844 			return tsc_ref_min;
845 		}
846 
847 		/*
848 		 * Check whether PIT failed more than once. This
849 		 * happens in virtualized environments. We need to
850 		 * give the virtual PC a slightly longer timeframe for
851 		 * the HPET/PMTIMER to make the result precise.
852 		 */
853 		if (i == 1 && tsc_pit_min == ULONG_MAX) {
854 			latch = CAL2_LATCH;
855 			ms = CAL2_MS;
856 			loopmin = CAL2_PIT_LOOPS;
857 		}
858 	}
859 
860 	/*
861 	 * Now check the results.
862 	 */
863 	if (tsc_pit_min == ULONG_MAX) {
864 		/* PIT gave no useful value */
865 		pr_warn("Unable to calibrate against PIT\n");
866 
867 		/* We don't have an alternative source, disable TSC */
868 		if (!hpet && !ref1 && !ref2) {
869 			pr_notice("No reference (HPET/PMTIMER) available\n");
870 			return 0;
871 		}
872 
873 		/* The alternative source failed as well, disable TSC */
874 		if (tsc_ref_min == ULONG_MAX) {
875 			pr_warn("HPET/PMTIMER calibration failed\n");
876 			return 0;
877 		}
878 
879 		/* Use the alternative source */
880 		pr_info("using %s reference calibration\n",
881 			hpet ? "HPET" : "PMTIMER");
882 
883 		return tsc_ref_min;
884 	}
885 
886 	/* We don't have an alternative source, use the PIT calibration value */
887 	if (!hpet && !ref1 && !ref2) {
888 		pr_info("Using PIT calibration value\n");
889 		return tsc_pit_min;
890 	}
891 
892 	/* The alternative source failed, use the PIT calibration value */
893 	if (tsc_ref_min == ULONG_MAX) {
894 		pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
895 		return tsc_pit_min;
896 	}
897 
898 	/*
899 	 * The calibration values differ too much. In doubt, we use
900 	 * the PIT value as we know that there are PMTIMERs around
901 	 * running at double speed. At least we let the user know:
902 	 */
903 	pr_warn("PIT calibration deviates from %s: %lu %lu\n",
904 		hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
905 	pr_info("Using PIT calibration value\n");
906 	return tsc_pit_min;
907 }
908 
909 /**
910  * native_calibrate_cpu_early - can calibrate the cpu early in boot
911  */
912 unsigned long native_calibrate_cpu_early(void)
913 {
914 	unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
915 
916 	if (!fast_calibrate)
917 		fast_calibrate = cpu_khz_from_msr();
918 	if (!fast_calibrate) {
919 		local_irq_save(flags);
920 		fast_calibrate = quick_pit_calibrate();
921 		local_irq_restore(flags);
922 	}
923 	return fast_calibrate;
924 }
925 
926 
927 /**
928  * native_calibrate_cpu - calibrate the cpu
929  */
930 static unsigned long native_calibrate_cpu(void)
931 {
932 	unsigned long tsc_freq = native_calibrate_cpu_early();
933 
934 	if (!tsc_freq)
935 		tsc_freq = pit_hpet_ptimer_calibrate_cpu();
936 
937 	return tsc_freq;
938 }
939 
940 void recalibrate_cpu_khz(void)
941 {
942 #ifndef CONFIG_SMP
943 	unsigned long cpu_khz_old = cpu_khz;
944 
945 	if (!boot_cpu_has(X86_FEATURE_TSC))
946 		return;
947 
948 	cpu_khz = x86_platform.calibrate_cpu();
949 	tsc_khz = x86_platform.calibrate_tsc();
950 	if (tsc_khz == 0)
951 		tsc_khz = cpu_khz;
952 	else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
953 		cpu_khz = tsc_khz;
954 	cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
955 						    cpu_khz_old, cpu_khz);
956 #endif
957 }
958 EXPORT_SYMBOL_GPL(recalibrate_cpu_khz);
959 
960 
961 static unsigned long long cyc2ns_suspend;
962 
963 void tsc_save_sched_clock_state(void)
964 {
965 	if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
966 		return;
967 
968 	cyc2ns_suspend = sched_clock();
969 }
970 
971 /*
972  * Even on processors with invariant TSC, TSC gets reset in some the
973  * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
974  * arbitrary value (still sync'd across cpu's) during resume from such sleep
975  * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
976  * that sched_clock() continues from the point where it was left off during
977  * suspend.
978  */
979 void tsc_restore_sched_clock_state(void)
980 {
981 	unsigned long long offset;
982 	unsigned long flags;
983 	int cpu;
984 
985 	if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
986 		return;
987 
988 	local_irq_save(flags);
989 
990 	/*
991 	 * We're coming out of suspend, there's no concurrency yet; don't
992 	 * bother being nice about the RCU stuff, just write to both
993 	 * data fields.
994 	 */
995 
996 	this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
997 	this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
998 
999 	offset = cyc2ns_suspend - sched_clock();
1000 
1001 	for_each_possible_cpu(cpu) {
1002 		per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
1003 		per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
1004 	}
1005 
1006 	local_irq_restore(flags);
1007 }
1008 
1009 #ifdef CONFIG_CPU_FREQ
1010 /*
1011  * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
1012  * changes.
1013  *
1014  * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
1015  * as unstable and give up in those cases.
1016  *
1017  * Should fix up last_tsc too. Currently gettimeofday in the
1018  * first tick after the change will be slightly wrong.
1019  */
1020 
1021 static unsigned int  ref_freq;
1022 static unsigned long loops_per_jiffy_ref;
1023 static unsigned long tsc_khz_ref;
1024 
1025 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
1026 				void *data)
1027 {
1028 	struct cpufreq_freqs *freq = data;
1029 
1030 	if (num_online_cpus() > 1) {
1031 		mark_tsc_unstable("cpufreq changes on SMP");
1032 		return 0;
1033 	}
1034 
1035 	if (!ref_freq) {
1036 		ref_freq = freq->old;
1037 		loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
1038 		tsc_khz_ref = tsc_khz;
1039 	}
1040 
1041 	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
1042 	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
1043 		boot_cpu_data.loops_per_jiffy =
1044 			cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
1045 
1046 		tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
1047 		if (!(freq->flags & CPUFREQ_CONST_LOOPS))
1048 			mark_tsc_unstable("cpufreq changes");
1049 
1050 		set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
1051 	}
1052 
1053 	return 0;
1054 }
1055 
1056 static struct notifier_block time_cpufreq_notifier_block = {
1057 	.notifier_call  = time_cpufreq_notifier
1058 };
1059 
1060 static int __init cpufreq_register_tsc_scaling(void)
1061 {
1062 	if (!boot_cpu_has(X86_FEATURE_TSC))
1063 		return 0;
1064 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1065 		return 0;
1066 	cpufreq_register_notifier(&time_cpufreq_notifier_block,
1067 				CPUFREQ_TRANSITION_NOTIFIER);
1068 	return 0;
1069 }
1070 
1071 core_initcall(cpufreq_register_tsc_scaling);
1072 
1073 #endif /* CONFIG_CPU_FREQ */
1074 
1075 #define ART_MIN_DENOMINATOR (1)
1076 
1077 /*
1078  * If ART is present detect the numerator:denominator to convert to TSC
1079  */
1080 static void __init detect_art(void)
1081 {
1082 	unsigned int unused;
1083 
1084 	if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
1085 		return;
1086 
1087 	/*
1088 	 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1089 	 * and the TSC counter resets must not occur asynchronously.
1090 	 */
1091 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1092 	    !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1093 	    !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1094 	    tsc_async_resets)
1095 		return;
1096 
1097 	cpuid(CPUID_LEAF_TSC, &art_base_clk.denominator,
1098 	      &art_base_clk.numerator, &art_base_clk.freq_khz, &unused);
1099 
1100 	art_base_clk.freq_khz /= KHZ;
1101 	if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
1102 		return;
1103 
1104 	rdmsrq(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
1105 
1106 	/* Make this sticky over multiple CPU init calls */
1107 	setup_force_cpu_cap(X86_FEATURE_ART);
1108 }
1109 
1110 
1111 /* clocksource code */
1112 
1113 static void tsc_resume(struct clocksource *cs)
1114 {
1115 	tsc_verify_tsc_adjust(true);
1116 }
1117 
1118 /*
1119  * We used to compare the TSC to the cycle_last value in the clocksource
1120  * structure to avoid a nasty time-warp. This can be observed in a
1121  * very small window right after one CPU updated cycle_last under
1122  * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1123  * is smaller than the cycle_last reference value due to a TSC which
1124  * is slightly behind. This delta is nowhere else observable, but in
1125  * that case it results in a forward time jump in the range of hours
1126  * due to the unsigned delta calculation of the time keeping core
1127  * code, which is necessary to support wrapping clocksources like pm
1128  * timer.
1129  *
1130  * This sanity check is now done in the core timekeeping code.
1131  * checking the result of read_tsc() - cycle_last for being negative.
1132  * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1133  */
1134 static u64 read_tsc(struct clocksource *cs)
1135 {
1136 	return (u64)rdtsc_ordered();
1137 }
1138 
1139 static void tsc_cs_mark_unstable(struct clocksource *cs)
1140 {
1141 	if (tsc_unstable)
1142 		return;
1143 
1144 	tsc_unstable = 1;
1145 	if (using_native_sched_clock())
1146 		clear_sched_clock_stable();
1147 	pr_info("Marking TSC unstable due to clocksource watchdog\n");
1148 }
1149 
1150 static void tsc_cs_tick_stable(struct clocksource *cs)
1151 {
1152 	if (tsc_unstable)
1153 		return;
1154 
1155 	if (using_native_sched_clock())
1156 		sched_clock_tick_stable();
1157 }
1158 
1159 static int tsc_cs_enable(struct clocksource *cs)
1160 {
1161 	vclocks_set_used(VDSO_CLOCKMODE_TSC);
1162 	return 0;
1163 }
1164 
1165 /*
1166  * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1167  */
1168 static struct clocksource clocksource_tsc_early = {
1169 	.name			= "tsc-early",
1170 	.rating			= 299,
1171 	.read			= read_tsc,
1172 	.mask			= CLOCKSOURCE_MASK(64),
1173 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
1174 				  CLOCK_SOURCE_MUST_VERIFY,
1175 	.id			= CSID_X86_TSC_EARLY,
1176 	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
1177 	.enable			= tsc_cs_enable,
1178 	.resume			= tsc_resume,
1179 	.mark_unstable		= tsc_cs_mark_unstable,
1180 	.tick_stable		= tsc_cs_tick_stable,
1181 	.list			= LIST_HEAD_INIT(clocksource_tsc_early.list),
1182 };
1183 
1184 /*
1185  * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1186  * this one will immediately take over. We will only register if TSC has
1187  * been found good.
1188  */
1189 static struct clocksource clocksource_tsc = {
1190 	.name			= "tsc",
1191 	.rating			= 300,
1192 	.read			= read_tsc,
1193 	.mask			= CLOCKSOURCE_MASK(64),
1194 	.flags			= CLOCK_SOURCE_IS_CONTINUOUS |
1195 				  CLOCK_SOURCE_CAN_INLINE_READ |
1196 				  CLOCK_SOURCE_MUST_VERIFY |
1197 				  CLOCK_SOURCE_HAS_COUPLED_CLOCK_EVENT,
1198 	.id			= CSID_X86_TSC,
1199 	.vdso_clock_mode	= VDSO_CLOCKMODE_TSC,
1200 	.enable			= tsc_cs_enable,
1201 	.resume			= tsc_resume,
1202 	.mark_unstable		= tsc_cs_mark_unstable,
1203 	.tick_stable		= tsc_cs_tick_stable,
1204 	.list			= LIST_HEAD_INIT(clocksource_tsc.list),
1205 };
1206 
1207 void mark_tsc_unstable(char *reason)
1208 {
1209 	if (tsc_unstable)
1210 		return;
1211 
1212 	tsc_unstable = 1;
1213 	if (using_native_sched_clock())
1214 		clear_sched_clock_stable();
1215 	pr_info("Marking TSC unstable due to %s\n", reason);
1216 
1217 	clocksource_mark_unstable(&clocksource_tsc_early);
1218 	clocksource_mark_unstable(&clocksource_tsc);
1219 }
1220 
1221 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1222 
1223 static void __init tsc_disable_clocksource_watchdog(void)
1224 {
1225 	if (tsc_watchdog == TSC_WATCHDOG_ON)
1226 		return;
1227 	clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1228 	clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1229 }
1230 
1231 static void __init check_system_tsc_reliable(void)
1232 {
1233 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1234 	if (is_geode_lx()) {
1235 		/* RTSC counts during suspend */
1236 #define RTSC_SUSP 0x100
1237 		unsigned long res_low, res_high;
1238 
1239 		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1240 		/* Geode_LX - the OLPC CPU has a very reliable TSC */
1241 		if (res_low & RTSC_SUSP)
1242 			tsc_clocksource_reliable = 1;
1243 	}
1244 #endif
1245 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1246 		tsc_clocksource_reliable = 1;
1247 
1248 	/*
1249 	 * Disable the clocksource watchdog when the system has:
1250 	 *  - TSC running at constant frequency
1251 	 *  - TSC which does not stop in C-States
1252 	 *  - the TSC_ADJUST register which allows to detect even minimal
1253 	 *    modifications
1254 	 *  - not more than four packages
1255 	 */
1256 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
1257 	    boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
1258 	    boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
1259 	    topology_max_packages() <= 4)
1260 		tsc_disable_clocksource_watchdog();
1261 }
1262 
1263 /*
1264  * Make an educated guess if the TSC is trustworthy and synchronized
1265  * over all CPUs.
1266  */
1267 int unsynchronized_tsc(void)
1268 {
1269 	if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1270 		return 1;
1271 
1272 #ifdef CONFIG_SMP
1273 	if (apic_is_clustered_box())
1274 		return 1;
1275 #endif
1276 
1277 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1278 		return 0;
1279 
1280 	if (tsc_clocksource_reliable)
1281 		return 0;
1282 	/*
1283 	 * Intel systems are normally all synchronized.
1284 	 * Exceptions must mark TSC as unstable:
1285 	 */
1286 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1287 		/* assume multi socket systems are not synchronized: */
1288 		if (topology_max_packages() > 1)
1289 			return 1;
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 static void tsc_refine_calibration_work(struct work_struct *work);
1296 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1297 /**
1298  * tsc_refine_calibration_work - Further refine tsc freq calibration
1299  * @work: ignored.
1300  *
1301  * This functions uses delayed work over a period of a
1302  * second to further refine the TSC freq value. Since this is
1303  * timer based, instead of loop based, we don't block the boot
1304  * process while this longer calibration is done.
1305  *
1306  * If there are any calibration anomalies (too many SMIs, etc),
1307  * or the refined calibration is off by 1% of the fast early
1308  * calibration, we throw out the new calibration and use the
1309  * early calibration.
1310  */
1311 static void tsc_refine_calibration_work(struct work_struct *work)
1312 {
1313 	static u64 tsc_start = ULLONG_MAX, ref_start;
1314 	static int hpet;
1315 	u64 tsc_stop, ref_stop, delta;
1316 	unsigned long freq;
1317 	int cpu;
1318 
1319 	/* Don't bother refining TSC on unstable systems */
1320 	if (tsc_unstable)
1321 		goto unreg;
1322 
1323 	/*
1324 	 * Since the work is started early in boot, we may be
1325 	 * delayed the first time we expire. So set the workqueue
1326 	 * again once we know timers are working.
1327 	 */
1328 	if (tsc_start == ULLONG_MAX) {
1329 restart:
1330 		/*
1331 		 * Only set hpet once, to avoid mixing hardware
1332 		 * if the hpet becomes enabled later.
1333 		 */
1334 		hpet = is_hpet_enabled();
1335 		tsc_start = tsc_read_refs(&ref_start, hpet);
1336 		schedule_delayed_work(&tsc_irqwork, HZ);
1337 		return;
1338 	}
1339 
1340 	tsc_stop = tsc_read_refs(&ref_stop, hpet);
1341 
1342 	/* hpet or pmtimer available ? */
1343 	if (ref_start == ref_stop)
1344 		goto out;
1345 
1346 	/* Check, whether the sampling was disturbed */
1347 	if (tsc_stop == ULLONG_MAX)
1348 		goto restart;
1349 
1350 	delta = tsc_stop - tsc_start;
1351 	delta *= 1000000LL;
1352 	if (hpet)
1353 		freq = calc_hpet_ref(delta, ref_start, ref_stop);
1354 	else
1355 		freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1356 
1357 	/* Will hit this only if tsc_force_recalibrate has been set */
1358 	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1359 
1360 		/* Warn if the deviation exceeds 500 ppm */
1361 		if (abs(tsc_khz - freq) > (tsc_khz >> 11)) {
1362 			pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n");
1363 			pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n",
1364 				(unsigned long)tsc_khz / 1000,
1365 				(unsigned long)tsc_khz % 1000);
1366 		}
1367 
1368 		pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n",
1369 			hpet ? "HPET" : "PM_TIMER",
1370 			(unsigned long)freq / 1000,
1371 			(unsigned long)freq % 1000);
1372 
1373 		return;
1374 	}
1375 
1376 	/* Make sure we're within 1% */
1377 	if (abs(tsc_khz - freq) > tsc_khz/100)
1378 		goto out;
1379 
1380 	tsc_khz = freq;
1381 	pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1382 		(unsigned long)tsc_khz / 1000,
1383 		(unsigned long)tsc_khz % 1000);
1384 
1385 	clocksource_tsc.flags |= CLOCK_SOURCE_CALIBRATED;
1386 
1387 	/* Inform the TSC deadline clockevent devices about the recalibration */
1388 	lapic_update_tsc_freq();
1389 
1390 	/* Update the sched_clock() rate to match the clocksource one */
1391 	for_each_possible_cpu(cpu)
1392 		set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1393 
1394 out:
1395 	if (tsc_unstable)
1396 		goto unreg;
1397 
1398 	if (boot_cpu_has(X86_FEATURE_ART)) {
1399 		have_art = true;
1400 		clocksource_tsc.base = &art_base_clk;
1401 	}
1402 
1403 	/*
1404 	 * Transfer the valid for high resolution flag if it was set on the
1405 	 * early TSC already. That guarantees that there is no intermediate
1406 	 * clocksource selected once the early TSC is unregistered.
1407 	 */
1408 	if (clocksource_tsc_early.flags & CLOCK_SOURCE_VALID_FOR_HRES)
1409 		clocksource_tsc.flags |= CLOCK_SOURCE_VALID_FOR_HRES;
1410 
1411 	clocksource_register_khz(&clocksource_tsc, tsc_khz);
1412 unreg:
1413 	clocksource_unregister(&clocksource_tsc_early);
1414 }
1415 
1416 
1417 static int __init init_tsc_clocksource(void)
1418 {
1419 	if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1420 		return 0;
1421 
1422 	if (tsc_unstable) {
1423 		clocksource_unregister(&clocksource_tsc_early);
1424 		return 0;
1425 	}
1426 
1427 	if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1428 		clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1429 
1430 	/*
1431 	 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1432 	 * the refined calibration and directly register it as a clocksource.
1433 	 */
1434 	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1435 		if (boot_cpu_has(X86_FEATURE_ART)) {
1436 			have_art = true;
1437 			clocksource_tsc.base = &art_base_clk;
1438 		}
1439 		clocksource_register_khz(&clocksource_tsc, tsc_khz);
1440 		clocksource_unregister(&clocksource_tsc_early);
1441 
1442 		if (!tsc_force_recalibrate)
1443 			return 0;
1444 	}
1445 
1446 	schedule_delayed_work(&tsc_irqwork, 0);
1447 	return 0;
1448 }
1449 /*
1450  * We use device_initcall here, to ensure we run after the hpet
1451  * is fully initialized, which may occur at fs_initcall time.
1452  */
1453 device_initcall(init_tsc_clocksource);
1454 
1455 static bool __init determine_cpu_tsc_frequencies(bool early)
1456 {
1457 	/* Make sure that cpu and tsc are not already calibrated */
1458 	WARN_ON(cpu_khz || tsc_khz);
1459 
1460 	if (early) {
1461 		cpu_khz = x86_platform.calibrate_cpu();
1462 		if (tsc_early_khz)
1463 			tsc_khz = tsc_early_khz;
1464 		else
1465 			tsc_khz = x86_platform.calibrate_tsc();
1466 	} else {
1467 		/* We should not be here with non-native cpu calibration */
1468 		WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1469 		cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1470 	}
1471 
1472 	/*
1473 	 * Trust non-zero tsc_khz as authoritative,
1474 	 * and use it to sanity check cpu_khz,
1475 	 * which will be off if system timer is off.
1476 	 */
1477 	if (tsc_khz == 0)
1478 		tsc_khz = cpu_khz;
1479 	else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1480 		cpu_khz = tsc_khz;
1481 
1482 	if (tsc_khz == 0)
1483 		return false;
1484 
1485 	pr_info("Detected %lu.%03lu MHz processor\n",
1486 		(unsigned long)cpu_khz / KHZ,
1487 		(unsigned long)cpu_khz % KHZ);
1488 
1489 	if (cpu_khz != tsc_khz) {
1490 		pr_info("Detected %lu.%03lu MHz TSC",
1491 			(unsigned long)tsc_khz / KHZ,
1492 			(unsigned long)tsc_khz % KHZ);
1493 	}
1494 	return true;
1495 }
1496 
1497 static unsigned long __init get_loops_per_jiffy(void)
1498 {
1499 	u64 lpj = (u64)tsc_khz * KHZ;
1500 
1501 	do_div(lpj, HZ);
1502 	return lpj;
1503 }
1504 
1505 static void __init tsc_enable_sched_clock(void)
1506 {
1507 	loops_per_jiffy = get_loops_per_jiffy();
1508 	use_tsc_delay();
1509 
1510 	/* Sanitize TSC ADJUST before cyc2ns gets initialized */
1511 	tsc_store_and_check_tsc_adjust(true);
1512 	cyc2ns_init_boot_cpu();
1513 	static_branch_enable(&__use_tsc);
1514 }
1515 
1516 void __init tsc_early_init(void)
1517 {
1518 	if (!boot_cpu_has(X86_FEATURE_TSC))
1519 		return;
1520 	/* Don't change UV TSC multi-chassis synchronization */
1521 	if (is_early_uv_system())
1522 		return;
1523 
1524 	snp_secure_tsc_init();
1525 
1526 	if (!determine_cpu_tsc_frequencies(true))
1527 		return;
1528 	tsc_enable_sched_clock();
1529 }
1530 
1531 void __init tsc_init(void)
1532 {
1533 	if (!cpu_feature_enabled(X86_FEATURE_TSC)) {
1534 		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1535 		return;
1536 	}
1537 
1538 	/*
1539 	 * native_calibrate_cpu_early can only calibrate using methods that are
1540 	 * available early in boot.
1541 	 */
1542 	if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1543 		x86_platform.calibrate_cpu = native_calibrate_cpu;
1544 
1545 	if (!tsc_khz) {
1546 		/* We failed to determine frequencies earlier, try again */
1547 		if (!determine_cpu_tsc_frequencies(false)) {
1548 			mark_tsc_unstable("could not calculate TSC khz");
1549 			setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1550 			return;
1551 		}
1552 		tsc_enable_sched_clock();
1553 	}
1554 
1555 	cyc2ns_init_secondary_cpus();
1556 
1557 	if (!no_sched_irq_time)
1558 		enable_sched_clock_irqtime();
1559 
1560 	lpj_fine = get_loops_per_jiffy();
1561 
1562 	check_system_tsc_reliable();
1563 
1564 	if (unsynchronized_tsc()) {
1565 		mark_tsc_unstable("TSCs unsynchronized");
1566 		return;
1567 	}
1568 
1569 	if (tsc_clocksource_reliable || tsc_watchdog == TSC_WATCHDOG_OFF)
1570 		tsc_disable_clocksource_watchdog();
1571 
1572 	clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1573 	detect_art();
1574 }
1575 
1576 #ifdef CONFIG_SMP
1577 /*
1578  * Check whether existing calibration data can be reused.
1579  */
1580 unsigned long calibrate_delay_is_known(void)
1581 {
1582 	int sibling, cpu = smp_processor_id();
1583 	int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1584 	const struct cpumask *mask = topology_core_cpumask(cpu);
1585 
1586 	/*
1587 	 * If TSC has constant frequency and TSC is synchronized across
1588 	 * sockets then reuse CPU0 calibration.
1589 	 */
1590 	if (constant_tsc && !tsc_unstable)
1591 		return cpu_data(0).loops_per_jiffy;
1592 
1593 	/*
1594 	 * If TSC has constant frequency and TSC is not synchronized across
1595 	 * sockets and this is not the first CPU in the socket, then reuse
1596 	 * the calibration value of an already online CPU on that socket.
1597 	 *
1598 	 * This assumes that CONSTANT_TSC is consistent for all CPUs in a
1599 	 * socket.
1600 	 */
1601 	if (!constant_tsc || !mask)
1602 		return 0;
1603 
1604 	sibling = cpumask_any_but(mask, cpu);
1605 	if (sibling < nr_cpu_ids)
1606 		return cpu_data(sibling).loops_per_jiffy;
1607 	return 0;
1608 }
1609 #endif
1610