xref: /linux/arch/x86/kernel/tsc.c (revision 7ff836f064e2c814a7504c91a4464eea45d475bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/sched/clock.h>
7 #include <linux/init.h>
8 #include <linux/export.h>
9 #include <linux/timer.h>
10 #include <linux/acpi_pmtmr.h>
11 #include <linux/cpufreq.h>
12 #include <linux/delay.h>
13 #include <linux/clocksource.h>
14 #include <linux/percpu.h>
15 #include <linux/timex.h>
16 #include <linux/static_key.h>
17 
18 #include <asm/hpet.h>
19 #include <asm/timer.h>
20 #include <asm/vgtod.h>
21 #include <asm/time.h>
22 #include <asm/delay.h>
23 #include <asm/hypervisor.h>
24 #include <asm/nmi.h>
25 #include <asm/x86_init.h>
26 #include <asm/geode.h>
27 #include <asm/apic.h>
28 #include <asm/intel-family.h>
29 #include <asm/i8259.h>
30 #include <asm/uv/uv.h>
31 
32 unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
33 EXPORT_SYMBOL(cpu_khz);
34 
35 unsigned int __read_mostly tsc_khz;
36 EXPORT_SYMBOL(tsc_khz);
37 
38 #define KHZ	1000
39 
40 /*
41  * TSC can be unstable due to cpufreq or due to unsynced TSCs
42  */
43 static int __read_mostly tsc_unstable;
44 
45 static DEFINE_STATIC_KEY_FALSE(__use_tsc);
46 
47 int tsc_clocksource_reliable;
48 
49 static u32 art_to_tsc_numerator;
50 static u32 art_to_tsc_denominator;
51 static u64 art_to_tsc_offset;
52 struct clocksource *art_related_clocksource;
53 
54 struct cyc2ns {
55 	struct cyc2ns_data data[2];	/*  0 + 2*16 = 32 */
56 	seqcount_t	   seq;		/* 32 + 4    = 36 */
57 
58 }; /* fits one cacheline */
59 
60 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
61 
62 void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
63 {
64 	int seq, idx;
65 
66 	preempt_disable_notrace();
67 
68 	do {
69 		seq = this_cpu_read(cyc2ns.seq.sequence);
70 		idx = seq & 1;
71 
72 		data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
73 		data->cyc2ns_mul    = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
74 		data->cyc2ns_shift  = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
75 
76 	} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
77 }
78 
79 void __always_inline cyc2ns_read_end(void)
80 {
81 	preempt_enable_notrace();
82 }
83 
84 /*
85  * Accelerators for sched_clock()
86  * convert from cycles(64bits) => nanoseconds (64bits)
87  *  basic equation:
88  *              ns = cycles / (freq / ns_per_sec)
89  *              ns = cycles * (ns_per_sec / freq)
90  *              ns = cycles * (10^9 / (cpu_khz * 10^3))
91  *              ns = cycles * (10^6 / cpu_khz)
92  *
93  *      Then we use scaling math (suggested by george@mvista.com) to get:
94  *              ns = cycles * (10^6 * SC / cpu_khz) / SC
95  *              ns = cycles * cyc2ns_scale / SC
96  *
97  *      And since SC is a constant power of two, we can convert the div
98  *  into a shift. The larger SC is, the more accurate the conversion, but
99  *  cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
100  *  (64-bit result) can be used.
101  *
102  *  We can use khz divisor instead of mhz to keep a better precision.
103  *  (mathieu.desnoyers@polymtl.ca)
104  *
105  *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
106  */
107 
108 static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
109 {
110 	struct cyc2ns_data data;
111 	unsigned long long ns;
112 
113 	cyc2ns_read_begin(&data);
114 
115 	ns = data.cyc2ns_offset;
116 	ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
117 
118 	cyc2ns_read_end();
119 
120 	return ns;
121 }
122 
123 static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
124 {
125 	unsigned long long ns_now;
126 	struct cyc2ns_data data;
127 	struct cyc2ns *c2n;
128 
129 	ns_now = cycles_2_ns(tsc_now);
130 
131 	/*
132 	 * Compute a new multiplier as per the above comment and ensure our
133 	 * time function is continuous; see the comment near struct
134 	 * cyc2ns_data.
135 	 */
136 	clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
137 			       NSEC_PER_MSEC, 0);
138 
139 	/*
140 	 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
141 	 * not expected to be greater than 31 due to the original published
142 	 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
143 	 * value) - refer perf_event_mmap_page documentation in perf_event.h.
144 	 */
145 	if (data.cyc2ns_shift == 32) {
146 		data.cyc2ns_shift = 31;
147 		data.cyc2ns_mul >>= 1;
148 	}
149 
150 	data.cyc2ns_offset = ns_now -
151 		mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
152 
153 	c2n = per_cpu_ptr(&cyc2ns, cpu);
154 
155 	raw_write_seqcount_latch(&c2n->seq);
156 	c2n->data[0] = data;
157 	raw_write_seqcount_latch(&c2n->seq);
158 	c2n->data[1] = data;
159 }
160 
161 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
162 {
163 	unsigned long flags;
164 
165 	local_irq_save(flags);
166 	sched_clock_idle_sleep_event();
167 
168 	if (khz)
169 		__set_cyc2ns_scale(khz, cpu, tsc_now);
170 
171 	sched_clock_idle_wakeup_event();
172 	local_irq_restore(flags);
173 }
174 
175 /*
176  * Initialize cyc2ns for boot cpu
177  */
178 static void __init cyc2ns_init_boot_cpu(void)
179 {
180 	struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
181 
182 	seqcount_init(&c2n->seq);
183 	__set_cyc2ns_scale(tsc_khz, smp_processor_id(), rdtsc());
184 }
185 
186 /*
187  * Secondary CPUs do not run through tsc_init(), so set up
188  * all the scale factors for all CPUs, assuming the same
189  * speed as the bootup CPU.
190  */
191 static void __init cyc2ns_init_secondary_cpus(void)
192 {
193 	unsigned int cpu, this_cpu = smp_processor_id();
194 	struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
195 	struct cyc2ns_data *data = c2n->data;
196 
197 	for_each_possible_cpu(cpu) {
198 		if (cpu != this_cpu) {
199 			seqcount_init(&c2n->seq);
200 			c2n = per_cpu_ptr(&cyc2ns, cpu);
201 			c2n->data[0] = data[0];
202 			c2n->data[1] = data[1];
203 		}
204 	}
205 }
206 
207 /*
208  * Scheduler clock - returns current time in nanosec units.
209  */
210 u64 native_sched_clock(void)
211 {
212 	if (static_branch_likely(&__use_tsc)) {
213 		u64 tsc_now = rdtsc();
214 
215 		/* return the value in ns */
216 		return cycles_2_ns(tsc_now);
217 	}
218 
219 	/*
220 	 * Fall back to jiffies if there's no TSC available:
221 	 * ( But note that we still use it if the TSC is marked
222 	 *   unstable. We do this because unlike Time Of Day,
223 	 *   the scheduler clock tolerates small errors and it's
224 	 *   very important for it to be as fast as the platform
225 	 *   can achieve it. )
226 	 */
227 
228 	/* No locking but a rare wrong value is not a big deal: */
229 	return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
230 }
231 
232 /*
233  * Generate a sched_clock if you already have a TSC value.
234  */
235 u64 native_sched_clock_from_tsc(u64 tsc)
236 {
237 	return cycles_2_ns(tsc);
238 }
239 
240 /* We need to define a real function for sched_clock, to override the
241    weak default version */
242 #ifdef CONFIG_PARAVIRT
243 unsigned long long sched_clock(void)
244 {
245 	return paravirt_sched_clock();
246 }
247 
248 bool using_native_sched_clock(void)
249 {
250 	return pv_ops.time.sched_clock == native_sched_clock;
251 }
252 #else
253 unsigned long long
254 sched_clock(void) __attribute__((alias("native_sched_clock")));
255 
256 bool using_native_sched_clock(void) { return true; }
257 #endif
258 
259 int check_tsc_unstable(void)
260 {
261 	return tsc_unstable;
262 }
263 EXPORT_SYMBOL_GPL(check_tsc_unstable);
264 
265 #ifdef CONFIG_X86_TSC
266 int __init notsc_setup(char *str)
267 {
268 	mark_tsc_unstable("boot parameter notsc");
269 	return 1;
270 }
271 #else
272 /*
273  * disable flag for tsc. Takes effect by clearing the TSC cpu flag
274  * in cpu/common.c
275  */
276 int __init notsc_setup(char *str)
277 {
278 	setup_clear_cpu_cap(X86_FEATURE_TSC);
279 	return 1;
280 }
281 #endif
282 
283 __setup("notsc", notsc_setup);
284 
285 static int no_sched_irq_time;
286 static int no_tsc_watchdog;
287 
288 static int __init tsc_setup(char *str)
289 {
290 	if (!strcmp(str, "reliable"))
291 		tsc_clocksource_reliable = 1;
292 	if (!strncmp(str, "noirqtime", 9))
293 		no_sched_irq_time = 1;
294 	if (!strcmp(str, "unstable"))
295 		mark_tsc_unstable("boot parameter");
296 	if (!strcmp(str, "nowatchdog"))
297 		no_tsc_watchdog = 1;
298 	return 1;
299 }
300 
301 __setup("tsc=", tsc_setup);
302 
303 #define MAX_RETRIES		5
304 #define TSC_DEFAULT_THRESHOLD	0x20000
305 
306 /*
307  * Read TSC and the reference counters. Take care of any disturbances
308  */
309 static u64 tsc_read_refs(u64 *p, int hpet)
310 {
311 	u64 t1, t2;
312 	u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
313 	int i;
314 
315 	for (i = 0; i < MAX_RETRIES; i++) {
316 		t1 = get_cycles();
317 		if (hpet)
318 			*p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
319 		else
320 			*p = acpi_pm_read_early();
321 		t2 = get_cycles();
322 		if ((t2 - t1) < thresh)
323 			return t2;
324 	}
325 	return ULLONG_MAX;
326 }
327 
328 /*
329  * Calculate the TSC frequency from HPET reference
330  */
331 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
332 {
333 	u64 tmp;
334 
335 	if (hpet2 < hpet1)
336 		hpet2 += 0x100000000ULL;
337 	hpet2 -= hpet1;
338 	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
339 	do_div(tmp, 1000000);
340 	deltatsc = div64_u64(deltatsc, tmp);
341 
342 	return (unsigned long) deltatsc;
343 }
344 
345 /*
346  * Calculate the TSC frequency from PMTimer reference
347  */
348 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
349 {
350 	u64 tmp;
351 
352 	if (!pm1 && !pm2)
353 		return ULONG_MAX;
354 
355 	if (pm2 < pm1)
356 		pm2 += (u64)ACPI_PM_OVRRUN;
357 	pm2 -= pm1;
358 	tmp = pm2 * 1000000000LL;
359 	do_div(tmp, PMTMR_TICKS_PER_SEC);
360 	do_div(deltatsc, tmp);
361 
362 	return (unsigned long) deltatsc;
363 }
364 
365 #define CAL_MS		10
366 #define CAL_LATCH	(PIT_TICK_RATE / (1000 / CAL_MS))
367 #define CAL_PIT_LOOPS	1000
368 
369 #define CAL2_MS		50
370 #define CAL2_LATCH	(PIT_TICK_RATE / (1000 / CAL2_MS))
371 #define CAL2_PIT_LOOPS	5000
372 
373 
374 /*
375  * Try to calibrate the TSC against the Programmable
376  * Interrupt Timer and return the frequency of the TSC
377  * in kHz.
378  *
379  * Return ULONG_MAX on failure to calibrate.
380  */
381 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
382 {
383 	u64 tsc, t1, t2, delta;
384 	unsigned long tscmin, tscmax;
385 	int pitcnt;
386 
387 	if (!has_legacy_pic()) {
388 		/*
389 		 * Relies on tsc_early_delay_calibrate() to have given us semi
390 		 * usable udelay(), wait for the same 50ms we would have with
391 		 * the PIT loop below.
392 		 */
393 		udelay(10 * USEC_PER_MSEC);
394 		udelay(10 * USEC_PER_MSEC);
395 		udelay(10 * USEC_PER_MSEC);
396 		udelay(10 * USEC_PER_MSEC);
397 		udelay(10 * USEC_PER_MSEC);
398 		return ULONG_MAX;
399 	}
400 
401 	/* Set the Gate high, disable speaker */
402 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
403 
404 	/*
405 	 * Setup CTC channel 2* for mode 0, (interrupt on terminal
406 	 * count mode), binary count. Set the latch register to 50ms
407 	 * (LSB then MSB) to begin countdown.
408 	 */
409 	outb(0xb0, 0x43);
410 	outb(latch & 0xff, 0x42);
411 	outb(latch >> 8, 0x42);
412 
413 	tsc = t1 = t2 = get_cycles();
414 
415 	pitcnt = 0;
416 	tscmax = 0;
417 	tscmin = ULONG_MAX;
418 	while ((inb(0x61) & 0x20) == 0) {
419 		t2 = get_cycles();
420 		delta = t2 - tsc;
421 		tsc = t2;
422 		if ((unsigned long) delta < tscmin)
423 			tscmin = (unsigned int) delta;
424 		if ((unsigned long) delta > tscmax)
425 			tscmax = (unsigned int) delta;
426 		pitcnt++;
427 	}
428 
429 	/*
430 	 * Sanity checks:
431 	 *
432 	 * If we were not able to read the PIT more than loopmin
433 	 * times, then we have been hit by a massive SMI
434 	 *
435 	 * If the maximum is 10 times larger than the minimum,
436 	 * then we got hit by an SMI as well.
437 	 */
438 	if (pitcnt < loopmin || tscmax > 10 * tscmin)
439 		return ULONG_MAX;
440 
441 	/* Calculate the PIT value */
442 	delta = t2 - t1;
443 	do_div(delta, ms);
444 	return delta;
445 }
446 
447 /*
448  * This reads the current MSB of the PIT counter, and
449  * checks if we are running on sufficiently fast and
450  * non-virtualized hardware.
451  *
452  * Our expectations are:
453  *
454  *  - the PIT is running at roughly 1.19MHz
455  *
456  *  - each IO is going to take about 1us on real hardware,
457  *    but we allow it to be much faster (by a factor of 10) or
458  *    _slightly_ slower (ie we allow up to a 2us read+counter
459  *    update - anything else implies a unacceptably slow CPU
460  *    or PIT for the fast calibration to work.
461  *
462  *  - with 256 PIT ticks to read the value, we have 214us to
463  *    see the same MSB (and overhead like doing a single TSC
464  *    read per MSB value etc).
465  *
466  *  - We're doing 2 reads per loop (LSB, MSB), and we expect
467  *    them each to take about a microsecond on real hardware.
468  *    So we expect a count value of around 100. But we'll be
469  *    generous, and accept anything over 50.
470  *
471  *  - if the PIT is stuck, and we see *many* more reads, we
472  *    return early (and the next caller of pit_expect_msb()
473  *    then consider it a failure when they don't see the
474  *    next expected value).
475  *
476  * These expectations mean that we know that we have seen the
477  * transition from one expected value to another with a fairly
478  * high accuracy, and we didn't miss any events. We can thus
479  * use the TSC value at the transitions to calculate a pretty
480  * good value for the TSC frequencty.
481  */
482 static inline int pit_verify_msb(unsigned char val)
483 {
484 	/* Ignore LSB */
485 	inb(0x42);
486 	return inb(0x42) == val;
487 }
488 
489 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
490 {
491 	int count;
492 	u64 tsc = 0, prev_tsc = 0;
493 
494 	for (count = 0; count < 50000; count++) {
495 		if (!pit_verify_msb(val))
496 			break;
497 		prev_tsc = tsc;
498 		tsc = get_cycles();
499 	}
500 	*deltap = get_cycles() - prev_tsc;
501 	*tscp = tsc;
502 
503 	/*
504 	 * We require _some_ success, but the quality control
505 	 * will be based on the error terms on the TSC values.
506 	 */
507 	return count > 5;
508 }
509 
510 /*
511  * How many MSB values do we want to see? We aim for
512  * a maximum error rate of 500ppm (in practice the
513  * real error is much smaller), but refuse to spend
514  * more than 50ms on it.
515  */
516 #define MAX_QUICK_PIT_MS 50
517 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
518 
519 static unsigned long quick_pit_calibrate(void)
520 {
521 	int i;
522 	u64 tsc, delta;
523 	unsigned long d1, d2;
524 
525 	if (!has_legacy_pic())
526 		return 0;
527 
528 	/* Set the Gate high, disable speaker */
529 	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
530 
531 	/*
532 	 * Counter 2, mode 0 (one-shot), binary count
533 	 *
534 	 * NOTE! Mode 2 decrements by two (and then the
535 	 * output is flipped each time, giving the same
536 	 * final output frequency as a decrement-by-one),
537 	 * so mode 0 is much better when looking at the
538 	 * individual counts.
539 	 */
540 	outb(0xb0, 0x43);
541 
542 	/* Start at 0xffff */
543 	outb(0xff, 0x42);
544 	outb(0xff, 0x42);
545 
546 	/*
547 	 * The PIT starts counting at the next edge, so we
548 	 * need to delay for a microsecond. The easiest way
549 	 * to do that is to just read back the 16-bit counter
550 	 * once from the PIT.
551 	 */
552 	pit_verify_msb(0);
553 
554 	if (pit_expect_msb(0xff, &tsc, &d1)) {
555 		for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
556 			if (!pit_expect_msb(0xff-i, &delta, &d2))
557 				break;
558 
559 			delta -= tsc;
560 
561 			/*
562 			 * Extrapolate the error and fail fast if the error will
563 			 * never be below 500 ppm.
564 			 */
565 			if (i == 1 &&
566 			    d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
567 				return 0;
568 
569 			/*
570 			 * Iterate until the error is less than 500 ppm
571 			 */
572 			if (d1+d2 >= delta >> 11)
573 				continue;
574 
575 			/*
576 			 * Check the PIT one more time to verify that
577 			 * all TSC reads were stable wrt the PIT.
578 			 *
579 			 * This also guarantees serialization of the
580 			 * last cycle read ('d2') in pit_expect_msb.
581 			 */
582 			if (!pit_verify_msb(0xfe - i))
583 				break;
584 			goto success;
585 		}
586 	}
587 	pr_info("Fast TSC calibration failed\n");
588 	return 0;
589 
590 success:
591 	/*
592 	 * Ok, if we get here, then we've seen the
593 	 * MSB of the PIT decrement 'i' times, and the
594 	 * error has shrunk to less than 500 ppm.
595 	 *
596 	 * As a result, we can depend on there not being
597 	 * any odd delays anywhere, and the TSC reads are
598 	 * reliable (within the error).
599 	 *
600 	 * kHz = ticks / time-in-seconds / 1000;
601 	 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
602 	 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
603 	 */
604 	delta *= PIT_TICK_RATE;
605 	do_div(delta, i*256*1000);
606 	pr_info("Fast TSC calibration using PIT\n");
607 	return delta;
608 }
609 
610 /**
611  * native_calibrate_tsc
612  * Determine TSC frequency via CPUID, else return 0.
613  */
614 unsigned long native_calibrate_tsc(void)
615 {
616 	unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
617 	unsigned int crystal_khz;
618 
619 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
620 		return 0;
621 
622 	if (boot_cpu_data.cpuid_level < 0x15)
623 		return 0;
624 
625 	eax_denominator = ebx_numerator = ecx_hz = edx = 0;
626 
627 	/* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
628 	cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
629 
630 	if (ebx_numerator == 0 || eax_denominator == 0)
631 		return 0;
632 
633 	crystal_khz = ecx_hz / 1000;
634 
635 	if (crystal_khz == 0) {
636 		switch (boot_cpu_data.x86_model) {
637 		case INTEL_FAM6_SKYLAKE_MOBILE:
638 		case INTEL_FAM6_SKYLAKE_DESKTOP:
639 		case INTEL_FAM6_KABYLAKE_MOBILE:
640 		case INTEL_FAM6_KABYLAKE_DESKTOP:
641 			crystal_khz = 24000;	/* 24.0 MHz */
642 			break;
643 		case INTEL_FAM6_ATOM_GOLDMONT_X:
644 			crystal_khz = 25000;	/* 25.0 MHz */
645 			break;
646 		case INTEL_FAM6_ATOM_GOLDMONT:
647 			crystal_khz = 19200;	/* 19.2 MHz */
648 			break;
649 		}
650 	}
651 
652 	if (crystal_khz == 0)
653 		return 0;
654 	/*
655 	 * TSC frequency determined by CPUID is a "hardware reported"
656 	 * frequency and is the most accurate one so far we have. This
657 	 * is considered a known frequency.
658 	 */
659 	setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
660 
661 	/*
662 	 * For Atom SoCs TSC is the only reliable clocksource.
663 	 * Mark TSC reliable so no watchdog on it.
664 	 */
665 	if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
666 		setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
667 
668 	return crystal_khz * ebx_numerator / eax_denominator;
669 }
670 
671 static unsigned long cpu_khz_from_cpuid(void)
672 {
673 	unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
674 
675 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
676 		return 0;
677 
678 	if (boot_cpu_data.cpuid_level < 0x16)
679 		return 0;
680 
681 	eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
682 
683 	cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
684 
685 	return eax_base_mhz * 1000;
686 }
687 
688 /*
689  * calibrate cpu using pit, hpet, and ptimer methods. They are available
690  * later in boot after acpi is initialized.
691  */
692 static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
693 {
694 	u64 tsc1, tsc2, delta, ref1, ref2;
695 	unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
696 	unsigned long flags, latch, ms;
697 	int hpet = is_hpet_enabled(), i, loopmin;
698 
699 	/*
700 	 * Run 5 calibration loops to get the lowest frequency value
701 	 * (the best estimate). We use two different calibration modes
702 	 * here:
703 	 *
704 	 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
705 	 * load a timeout of 50ms. We read the time right after we
706 	 * started the timer and wait until the PIT count down reaches
707 	 * zero. In each wait loop iteration we read the TSC and check
708 	 * the delta to the previous read. We keep track of the min
709 	 * and max values of that delta. The delta is mostly defined
710 	 * by the IO time of the PIT access, so we can detect when
711 	 * any disturbance happened between the two reads. If the
712 	 * maximum time is significantly larger than the minimum time,
713 	 * then we discard the result and have another try.
714 	 *
715 	 * 2) Reference counter. If available we use the HPET or the
716 	 * PMTIMER as a reference to check the sanity of that value.
717 	 * We use separate TSC readouts and check inside of the
718 	 * reference read for any possible disturbance. We dicard
719 	 * disturbed values here as well. We do that around the PIT
720 	 * calibration delay loop as we have to wait for a certain
721 	 * amount of time anyway.
722 	 */
723 
724 	/* Preset PIT loop values */
725 	latch = CAL_LATCH;
726 	ms = CAL_MS;
727 	loopmin = CAL_PIT_LOOPS;
728 
729 	for (i = 0; i < 3; i++) {
730 		unsigned long tsc_pit_khz;
731 
732 		/*
733 		 * Read the start value and the reference count of
734 		 * hpet/pmtimer when available. Then do the PIT
735 		 * calibration, which will take at least 50ms, and
736 		 * read the end value.
737 		 */
738 		local_irq_save(flags);
739 		tsc1 = tsc_read_refs(&ref1, hpet);
740 		tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
741 		tsc2 = tsc_read_refs(&ref2, hpet);
742 		local_irq_restore(flags);
743 
744 		/* Pick the lowest PIT TSC calibration so far */
745 		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
746 
747 		/* hpet or pmtimer available ? */
748 		if (ref1 == ref2)
749 			continue;
750 
751 		/* Check, whether the sampling was disturbed */
752 		if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
753 			continue;
754 
755 		tsc2 = (tsc2 - tsc1) * 1000000LL;
756 		if (hpet)
757 			tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
758 		else
759 			tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
760 
761 		tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
762 
763 		/* Check the reference deviation */
764 		delta = ((u64) tsc_pit_min) * 100;
765 		do_div(delta, tsc_ref_min);
766 
767 		/*
768 		 * If both calibration results are inside a 10% window
769 		 * then we can be sure, that the calibration
770 		 * succeeded. We break out of the loop right away. We
771 		 * use the reference value, as it is more precise.
772 		 */
773 		if (delta >= 90 && delta <= 110) {
774 			pr_info("PIT calibration matches %s. %d loops\n",
775 				hpet ? "HPET" : "PMTIMER", i + 1);
776 			return tsc_ref_min;
777 		}
778 
779 		/*
780 		 * Check whether PIT failed more than once. This
781 		 * happens in virtualized environments. We need to
782 		 * give the virtual PC a slightly longer timeframe for
783 		 * the HPET/PMTIMER to make the result precise.
784 		 */
785 		if (i == 1 && tsc_pit_min == ULONG_MAX) {
786 			latch = CAL2_LATCH;
787 			ms = CAL2_MS;
788 			loopmin = CAL2_PIT_LOOPS;
789 		}
790 	}
791 
792 	/*
793 	 * Now check the results.
794 	 */
795 	if (tsc_pit_min == ULONG_MAX) {
796 		/* PIT gave no useful value */
797 		pr_warn("Unable to calibrate against PIT\n");
798 
799 		/* We don't have an alternative source, disable TSC */
800 		if (!hpet && !ref1 && !ref2) {
801 			pr_notice("No reference (HPET/PMTIMER) available\n");
802 			return 0;
803 		}
804 
805 		/* The alternative source failed as well, disable TSC */
806 		if (tsc_ref_min == ULONG_MAX) {
807 			pr_warn("HPET/PMTIMER calibration failed\n");
808 			return 0;
809 		}
810 
811 		/* Use the alternative source */
812 		pr_info("using %s reference calibration\n",
813 			hpet ? "HPET" : "PMTIMER");
814 
815 		return tsc_ref_min;
816 	}
817 
818 	/* We don't have an alternative source, use the PIT calibration value */
819 	if (!hpet && !ref1 && !ref2) {
820 		pr_info("Using PIT calibration value\n");
821 		return tsc_pit_min;
822 	}
823 
824 	/* The alternative source failed, use the PIT calibration value */
825 	if (tsc_ref_min == ULONG_MAX) {
826 		pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
827 		return tsc_pit_min;
828 	}
829 
830 	/*
831 	 * The calibration values differ too much. In doubt, we use
832 	 * the PIT value as we know that there are PMTIMERs around
833 	 * running at double speed. At least we let the user know:
834 	 */
835 	pr_warn("PIT calibration deviates from %s: %lu %lu\n",
836 		hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
837 	pr_info("Using PIT calibration value\n");
838 	return tsc_pit_min;
839 }
840 
841 /**
842  * native_calibrate_cpu_early - can calibrate the cpu early in boot
843  */
844 unsigned long native_calibrate_cpu_early(void)
845 {
846 	unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
847 
848 	if (!fast_calibrate)
849 		fast_calibrate = cpu_khz_from_msr();
850 	if (!fast_calibrate) {
851 		local_irq_save(flags);
852 		fast_calibrate = quick_pit_calibrate();
853 		local_irq_restore(flags);
854 	}
855 	return fast_calibrate;
856 }
857 
858 
859 /**
860  * native_calibrate_cpu - calibrate the cpu
861  */
862 static unsigned long native_calibrate_cpu(void)
863 {
864 	unsigned long tsc_freq = native_calibrate_cpu_early();
865 
866 	if (!tsc_freq)
867 		tsc_freq = pit_hpet_ptimer_calibrate_cpu();
868 
869 	return tsc_freq;
870 }
871 
872 void recalibrate_cpu_khz(void)
873 {
874 #ifndef CONFIG_SMP
875 	unsigned long cpu_khz_old = cpu_khz;
876 
877 	if (!boot_cpu_has(X86_FEATURE_TSC))
878 		return;
879 
880 	cpu_khz = x86_platform.calibrate_cpu();
881 	tsc_khz = x86_platform.calibrate_tsc();
882 	if (tsc_khz == 0)
883 		tsc_khz = cpu_khz;
884 	else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
885 		cpu_khz = tsc_khz;
886 	cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
887 						    cpu_khz_old, cpu_khz);
888 #endif
889 }
890 
891 EXPORT_SYMBOL(recalibrate_cpu_khz);
892 
893 
894 static unsigned long long cyc2ns_suspend;
895 
896 void tsc_save_sched_clock_state(void)
897 {
898 	if (!sched_clock_stable())
899 		return;
900 
901 	cyc2ns_suspend = sched_clock();
902 }
903 
904 /*
905  * Even on processors with invariant TSC, TSC gets reset in some the
906  * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
907  * arbitrary value (still sync'd across cpu's) during resume from such sleep
908  * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
909  * that sched_clock() continues from the point where it was left off during
910  * suspend.
911  */
912 void tsc_restore_sched_clock_state(void)
913 {
914 	unsigned long long offset;
915 	unsigned long flags;
916 	int cpu;
917 
918 	if (!sched_clock_stable())
919 		return;
920 
921 	local_irq_save(flags);
922 
923 	/*
924 	 * We're coming out of suspend, there's no concurrency yet; don't
925 	 * bother being nice about the RCU stuff, just write to both
926 	 * data fields.
927 	 */
928 
929 	this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
930 	this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
931 
932 	offset = cyc2ns_suspend - sched_clock();
933 
934 	for_each_possible_cpu(cpu) {
935 		per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
936 		per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
937 	}
938 
939 	local_irq_restore(flags);
940 }
941 
942 #ifdef CONFIG_CPU_FREQ
943 /*
944  * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
945  * changes.
946  *
947  * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
948  * as unstable and give up in those cases.
949  *
950  * Should fix up last_tsc too. Currently gettimeofday in the
951  * first tick after the change will be slightly wrong.
952  */
953 
954 static unsigned int  ref_freq;
955 static unsigned long loops_per_jiffy_ref;
956 static unsigned long tsc_khz_ref;
957 
958 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
959 				void *data)
960 {
961 	struct cpufreq_freqs *freq = data;
962 
963 	if (num_online_cpus() > 1) {
964 		mark_tsc_unstable("cpufreq changes on SMP");
965 		return 0;
966 	}
967 
968 	if (!ref_freq) {
969 		ref_freq = freq->old;
970 		loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
971 		tsc_khz_ref = tsc_khz;
972 	}
973 
974 	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
975 	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
976 		boot_cpu_data.loops_per_jiffy =
977 			cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
978 
979 		tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
980 		if (!(freq->flags & CPUFREQ_CONST_LOOPS))
981 			mark_tsc_unstable("cpufreq changes");
982 
983 		set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
984 	}
985 
986 	return 0;
987 }
988 
989 static struct notifier_block time_cpufreq_notifier_block = {
990 	.notifier_call  = time_cpufreq_notifier
991 };
992 
993 static int __init cpufreq_register_tsc_scaling(void)
994 {
995 	if (!boot_cpu_has(X86_FEATURE_TSC))
996 		return 0;
997 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
998 		return 0;
999 	cpufreq_register_notifier(&time_cpufreq_notifier_block,
1000 				CPUFREQ_TRANSITION_NOTIFIER);
1001 	return 0;
1002 }
1003 
1004 core_initcall(cpufreq_register_tsc_scaling);
1005 
1006 #endif /* CONFIG_CPU_FREQ */
1007 
1008 #define ART_CPUID_LEAF (0x15)
1009 #define ART_MIN_DENOMINATOR (1)
1010 
1011 
1012 /*
1013  * If ART is present detect the numerator:denominator to convert to TSC
1014  */
1015 static void __init detect_art(void)
1016 {
1017 	unsigned int unused[2];
1018 
1019 	if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
1020 		return;
1021 
1022 	/*
1023 	 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1024 	 * and the TSC counter resets must not occur asynchronously.
1025 	 */
1026 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1027 	    !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1028 	    !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1029 	    tsc_async_resets)
1030 		return;
1031 
1032 	cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
1033 	      &art_to_tsc_numerator, unused, unused+1);
1034 
1035 	if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
1036 		return;
1037 
1038 	rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
1039 
1040 	/* Make this sticky over multiple CPU init calls */
1041 	setup_force_cpu_cap(X86_FEATURE_ART);
1042 }
1043 
1044 
1045 /* clocksource code */
1046 
1047 static void tsc_resume(struct clocksource *cs)
1048 {
1049 	tsc_verify_tsc_adjust(true);
1050 }
1051 
1052 /*
1053  * We used to compare the TSC to the cycle_last value in the clocksource
1054  * structure to avoid a nasty time-warp. This can be observed in a
1055  * very small window right after one CPU updated cycle_last under
1056  * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1057  * is smaller than the cycle_last reference value due to a TSC which
1058  * is slighty behind. This delta is nowhere else observable, but in
1059  * that case it results in a forward time jump in the range of hours
1060  * due to the unsigned delta calculation of the time keeping core
1061  * code, which is necessary to support wrapping clocksources like pm
1062  * timer.
1063  *
1064  * This sanity check is now done in the core timekeeping code.
1065  * checking the result of read_tsc() - cycle_last for being negative.
1066  * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1067  */
1068 static u64 read_tsc(struct clocksource *cs)
1069 {
1070 	return (u64)rdtsc_ordered();
1071 }
1072 
1073 static void tsc_cs_mark_unstable(struct clocksource *cs)
1074 {
1075 	if (tsc_unstable)
1076 		return;
1077 
1078 	tsc_unstable = 1;
1079 	if (using_native_sched_clock())
1080 		clear_sched_clock_stable();
1081 	disable_sched_clock_irqtime();
1082 	pr_info("Marking TSC unstable due to clocksource watchdog\n");
1083 }
1084 
1085 static void tsc_cs_tick_stable(struct clocksource *cs)
1086 {
1087 	if (tsc_unstable)
1088 		return;
1089 
1090 	if (using_native_sched_clock())
1091 		sched_clock_tick_stable();
1092 }
1093 
1094 /*
1095  * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1096  */
1097 static struct clocksource clocksource_tsc_early = {
1098 	.name                   = "tsc-early",
1099 	.rating                 = 299,
1100 	.read                   = read_tsc,
1101 	.mask                   = CLOCKSOURCE_MASK(64),
1102 	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
1103 				  CLOCK_SOURCE_MUST_VERIFY,
1104 	.archdata               = { .vclock_mode = VCLOCK_TSC },
1105 	.resume			= tsc_resume,
1106 	.mark_unstable		= tsc_cs_mark_unstable,
1107 	.tick_stable		= tsc_cs_tick_stable,
1108 	.list			= LIST_HEAD_INIT(clocksource_tsc_early.list),
1109 };
1110 
1111 /*
1112  * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1113  * this one will immediately take over. We will only register if TSC has
1114  * been found good.
1115  */
1116 static struct clocksource clocksource_tsc = {
1117 	.name                   = "tsc",
1118 	.rating                 = 300,
1119 	.read                   = read_tsc,
1120 	.mask                   = CLOCKSOURCE_MASK(64),
1121 	.flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
1122 				  CLOCK_SOURCE_VALID_FOR_HRES |
1123 				  CLOCK_SOURCE_MUST_VERIFY,
1124 	.archdata               = { .vclock_mode = VCLOCK_TSC },
1125 	.resume			= tsc_resume,
1126 	.mark_unstable		= tsc_cs_mark_unstable,
1127 	.tick_stable		= tsc_cs_tick_stable,
1128 	.list			= LIST_HEAD_INIT(clocksource_tsc.list),
1129 };
1130 
1131 void mark_tsc_unstable(char *reason)
1132 {
1133 	if (tsc_unstable)
1134 		return;
1135 
1136 	tsc_unstable = 1;
1137 	if (using_native_sched_clock())
1138 		clear_sched_clock_stable();
1139 	disable_sched_clock_irqtime();
1140 	pr_info("Marking TSC unstable due to %s\n", reason);
1141 
1142 	clocksource_mark_unstable(&clocksource_tsc_early);
1143 	clocksource_mark_unstable(&clocksource_tsc);
1144 }
1145 
1146 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1147 
1148 static void __init check_system_tsc_reliable(void)
1149 {
1150 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1151 	if (is_geode_lx()) {
1152 		/* RTSC counts during suspend */
1153 #define RTSC_SUSP 0x100
1154 		unsigned long res_low, res_high;
1155 
1156 		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1157 		/* Geode_LX - the OLPC CPU has a very reliable TSC */
1158 		if (res_low & RTSC_SUSP)
1159 			tsc_clocksource_reliable = 1;
1160 	}
1161 #endif
1162 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1163 		tsc_clocksource_reliable = 1;
1164 }
1165 
1166 /*
1167  * Make an educated guess if the TSC is trustworthy and synchronized
1168  * over all CPUs.
1169  */
1170 int unsynchronized_tsc(void)
1171 {
1172 	if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1173 		return 1;
1174 
1175 #ifdef CONFIG_SMP
1176 	if (apic_is_clustered_box())
1177 		return 1;
1178 #endif
1179 
1180 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1181 		return 0;
1182 
1183 	if (tsc_clocksource_reliable)
1184 		return 0;
1185 	/*
1186 	 * Intel systems are normally all synchronized.
1187 	 * Exceptions must mark TSC as unstable:
1188 	 */
1189 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1190 		/* assume multi socket systems are not synchronized: */
1191 		if (num_possible_cpus() > 1)
1192 			return 1;
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 /*
1199  * Convert ART to TSC given numerator/denominator found in detect_art()
1200  */
1201 struct system_counterval_t convert_art_to_tsc(u64 art)
1202 {
1203 	u64 tmp, res, rem;
1204 
1205 	rem = do_div(art, art_to_tsc_denominator);
1206 
1207 	res = art * art_to_tsc_numerator;
1208 	tmp = rem * art_to_tsc_numerator;
1209 
1210 	do_div(tmp, art_to_tsc_denominator);
1211 	res += tmp + art_to_tsc_offset;
1212 
1213 	return (struct system_counterval_t) {.cs = art_related_clocksource,
1214 			.cycles = res};
1215 }
1216 EXPORT_SYMBOL(convert_art_to_tsc);
1217 
1218 /**
1219  * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC.
1220  * @art_ns: ART (Always Running Timer) in unit of nanoseconds
1221  *
1222  * PTM requires all timestamps to be in units of nanoseconds. When user
1223  * software requests a cross-timestamp, this function converts system timestamp
1224  * to TSC.
1225  *
1226  * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set
1227  * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check
1228  * that this flag is set before conversion to TSC is attempted.
1229  *
1230  * Return:
1231  * struct system_counterval_t - system counter value with the pointer to the
1232  *	corresponding clocksource
1233  *	@cycles:	System counter value
1234  *	@cs:		Clocksource corresponding to system counter value. Used
1235  *			by timekeeping code to verify comparibility of two cycle
1236  *			values.
1237  */
1238 
1239 struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns)
1240 {
1241 	u64 tmp, res, rem;
1242 
1243 	rem = do_div(art_ns, USEC_PER_SEC);
1244 
1245 	res = art_ns * tsc_khz;
1246 	tmp = rem * tsc_khz;
1247 
1248 	do_div(tmp, USEC_PER_SEC);
1249 	res += tmp;
1250 
1251 	return (struct system_counterval_t) { .cs = art_related_clocksource,
1252 					      .cycles = res};
1253 }
1254 EXPORT_SYMBOL(convert_art_ns_to_tsc);
1255 
1256 
1257 static void tsc_refine_calibration_work(struct work_struct *work);
1258 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1259 /**
1260  * tsc_refine_calibration_work - Further refine tsc freq calibration
1261  * @work - ignored.
1262  *
1263  * This functions uses delayed work over a period of a
1264  * second to further refine the TSC freq value. Since this is
1265  * timer based, instead of loop based, we don't block the boot
1266  * process while this longer calibration is done.
1267  *
1268  * If there are any calibration anomalies (too many SMIs, etc),
1269  * or the refined calibration is off by 1% of the fast early
1270  * calibration, we throw out the new calibration and use the
1271  * early calibration.
1272  */
1273 static void tsc_refine_calibration_work(struct work_struct *work)
1274 {
1275 	static u64 tsc_start = ULLONG_MAX, ref_start;
1276 	static int hpet;
1277 	u64 tsc_stop, ref_stop, delta;
1278 	unsigned long freq;
1279 	int cpu;
1280 
1281 	/* Don't bother refining TSC on unstable systems */
1282 	if (tsc_unstable)
1283 		goto unreg;
1284 
1285 	/*
1286 	 * Since the work is started early in boot, we may be
1287 	 * delayed the first time we expire. So set the workqueue
1288 	 * again once we know timers are working.
1289 	 */
1290 	if (tsc_start == ULLONG_MAX) {
1291 restart:
1292 		/*
1293 		 * Only set hpet once, to avoid mixing hardware
1294 		 * if the hpet becomes enabled later.
1295 		 */
1296 		hpet = is_hpet_enabled();
1297 		tsc_start = tsc_read_refs(&ref_start, hpet);
1298 		schedule_delayed_work(&tsc_irqwork, HZ);
1299 		return;
1300 	}
1301 
1302 	tsc_stop = tsc_read_refs(&ref_stop, hpet);
1303 
1304 	/* hpet or pmtimer available ? */
1305 	if (ref_start == ref_stop)
1306 		goto out;
1307 
1308 	/* Check, whether the sampling was disturbed */
1309 	if (tsc_stop == ULLONG_MAX)
1310 		goto restart;
1311 
1312 	delta = tsc_stop - tsc_start;
1313 	delta *= 1000000LL;
1314 	if (hpet)
1315 		freq = calc_hpet_ref(delta, ref_start, ref_stop);
1316 	else
1317 		freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1318 
1319 	/* Make sure we're within 1% */
1320 	if (abs(tsc_khz - freq) > tsc_khz/100)
1321 		goto out;
1322 
1323 	tsc_khz = freq;
1324 	pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1325 		(unsigned long)tsc_khz / 1000,
1326 		(unsigned long)tsc_khz % 1000);
1327 
1328 	/* Inform the TSC deadline clockevent devices about the recalibration */
1329 	lapic_update_tsc_freq();
1330 
1331 	/* Update the sched_clock() rate to match the clocksource one */
1332 	for_each_possible_cpu(cpu)
1333 		set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
1334 
1335 out:
1336 	if (tsc_unstable)
1337 		goto unreg;
1338 
1339 	if (boot_cpu_has(X86_FEATURE_ART))
1340 		art_related_clocksource = &clocksource_tsc;
1341 	clocksource_register_khz(&clocksource_tsc, tsc_khz);
1342 unreg:
1343 	clocksource_unregister(&clocksource_tsc_early);
1344 }
1345 
1346 
1347 static int __init init_tsc_clocksource(void)
1348 {
1349 	if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1350 		return 0;
1351 
1352 	if (tsc_unstable)
1353 		goto unreg;
1354 
1355 	if (tsc_clocksource_reliable || no_tsc_watchdog)
1356 		clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1357 
1358 	if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1359 		clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1360 
1361 	/*
1362 	 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1363 	 * the refined calibration and directly register it as a clocksource.
1364 	 */
1365 	if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1366 		if (boot_cpu_has(X86_FEATURE_ART))
1367 			art_related_clocksource = &clocksource_tsc;
1368 		clocksource_register_khz(&clocksource_tsc, tsc_khz);
1369 unreg:
1370 		clocksource_unregister(&clocksource_tsc_early);
1371 		return 0;
1372 	}
1373 
1374 	schedule_delayed_work(&tsc_irqwork, 0);
1375 	return 0;
1376 }
1377 /*
1378  * We use device_initcall here, to ensure we run after the hpet
1379  * is fully initialized, which may occur at fs_initcall time.
1380  */
1381 device_initcall(init_tsc_clocksource);
1382 
1383 static bool __init determine_cpu_tsc_frequencies(bool early)
1384 {
1385 	/* Make sure that cpu and tsc are not already calibrated */
1386 	WARN_ON(cpu_khz || tsc_khz);
1387 
1388 	if (early) {
1389 		cpu_khz = x86_platform.calibrate_cpu();
1390 		tsc_khz = x86_platform.calibrate_tsc();
1391 	} else {
1392 		/* We should not be here with non-native cpu calibration */
1393 		WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1394 		cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1395 	}
1396 
1397 	/*
1398 	 * Trust non-zero tsc_khz as authoritative,
1399 	 * and use it to sanity check cpu_khz,
1400 	 * which will be off if system timer is off.
1401 	 */
1402 	if (tsc_khz == 0)
1403 		tsc_khz = cpu_khz;
1404 	else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1405 		cpu_khz = tsc_khz;
1406 
1407 	if (tsc_khz == 0)
1408 		return false;
1409 
1410 	pr_info("Detected %lu.%03lu MHz processor\n",
1411 		(unsigned long)cpu_khz / KHZ,
1412 		(unsigned long)cpu_khz % KHZ);
1413 
1414 	if (cpu_khz != tsc_khz) {
1415 		pr_info("Detected %lu.%03lu MHz TSC",
1416 			(unsigned long)tsc_khz / KHZ,
1417 			(unsigned long)tsc_khz % KHZ);
1418 	}
1419 	return true;
1420 }
1421 
1422 static unsigned long __init get_loops_per_jiffy(void)
1423 {
1424 	u64 lpj = (u64)tsc_khz * KHZ;
1425 
1426 	do_div(lpj, HZ);
1427 	return lpj;
1428 }
1429 
1430 static void __init tsc_enable_sched_clock(void)
1431 {
1432 	/* Sanitize TSC ADJUST before cyc2ns gets initialized */
1433 	tsc_store_and_check_tsc_adjust(true);
1434 	cyc2ns_init_boot_cpu();
1435 	static_branch_enable(&__use_tsc);
1436 }
1437 
1438 void __init tsc_early_init(void)
1439 {
1440 	if (!boot_cpu_has(X86_FEATURE_TSC))
1441 		return;
1442 	/* Don't change UV TSC multi-chassis synchronization */
1443 	if (is_early_uv_system())
1444 		return;
1445 	if (!determine_cpu_tsc_frequencies(true))
1446 		return;
1447 	loops_per_jiffy = get_loops_per_jiffy();
1448 
1449 	tsc_enable_sched_clock();
1450 }
1451 
1452 void __init tsc_init(void)
1453 {
1454 	/*
1455 	 * native_calibrate_cpu_early can only calibrate using methods that are
1456 	 * available early in boot.
1457 	 */
1458 	if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1459 		x86_platform.calibrate_cpu = native_calibrate_cpu;
1460 
1461 	if (!boot_cpu_has(X86_FEATURE_TSC)) {
1462 		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1463 		return;
1464 	}
1465 
1466 	if (!tsc_khz) {
1467 		/* We failed to determine frequencies earlier, try again */
1468 		if (!determine_cpu_tsc_frequencies(false)) {
1469 			mark_tsc_unstable("could not calculate TSC khz");
1470 			setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1471 			return;
1472 		}
1473 		tsc_enable_sched_clock();
1474 	}
1475 
1476 	cyc2ns_init_secondary_cpus();
1477 
1478 	if (!no_sched_irq_time)
1479 		enable_sched_clock_irqtime();
1480 
1481 	lpj_fine = get_loops_per_jiffy();
1482 	use_tsc_delay();
1483 
1484 	check_system_tsc_reliable();
1485 
1486 	if (unsynchronized_tsc()) {
1487 		mark_tsc_unstable("TSCs unsynchronized");
1488 		return;
1489 	}
1490 
1491 	clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1492 	detect_art();
1493 }
1494 
1495 #ifdef CONFIG_SMP
1496 /*
1497  * If we have a constant TSC and are using the TSC for the delay loop,
1498  * we can skip clock calibration if another cpu in the same socket has already
1499  * been calibrated. This assumes that CONSTANT_TSC applies to all
1500  * cpus in the socket - this should be a safe assumption.
1501  */
1502 unsigned long calibrate_delay_is_known(void)
1503 {
1504 	int sibling, cpu = smp_processor_id();
1505 	int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1506 	const struct cpumask *mask = topology_core_cpumask(cpu);
1507 
1508 	if (!constant_tsc || !mask)
1509 		return 0;
1510 
1511 	sibling = cpumask_any_but(mask, cpu);
1512 	if (sibling < nr_cpu_ids)
1513 		return cpu_data(sibling).loops_per_jiffy;
1514 	return 0;
1515 }
1516 #endif
1517