1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/kernel.h> 4 #include <linux/sched.h> 5 #include <linux/sched/clock.h> 6 #include <linux/init.h> 7 #include <linux/export.h> 8 #include <linux/timer.h> 9 #include <linux/acpi_pmtmr.h> 10 #include <linux/cpufreq.h> 11 #include <linux/delay.h> 12 #include <linux/clocksource.h> 13 #include <linux/percpu.h> 14 #include <linux/timex.h> 15 #include <linux/static_key.h> 16 17 #include <asm/hpet.h> 18 #include <asm/timer.h> 19 #include <asm/vgtod.h> 20 #include <asm/time.h> 21 #include <asm/delay.h> 22 #include <asm/hypervisor.h> 23 #include <asm/nmi.h> 24 #include <asm/x86_init.h> 25 #include <asm/geode.h> 26 #include <asm/apic.h> 27 #include <asm/intel-family.h> 28 29 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 30 EXPORT_SYMBOL(cpu_khz); 31 32 unsigned int __read_mostly tsc_khz; 33 EXPORT_SYMBOL(tsc_khz); 34 35 /* 36 * TSC can be unstable due to cpufreq or due to unsynced TSCs 37 */ 38 static int __read_mostly tsc_unstable; 39 40 /* native_sched_clock() is called before tsc_init(), so 41 we must start with the TSC soft disabled to prevent 42 erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */ 43 static int __read_mostly tsc_disabled = -1; 44 45 static DEFINE_STATIC_KEY_FALSE(__use_tsc); 46 47 int tsc_clocksource_reliable; 48 49 static u32 art_to_tsc_numerator; 50 static u32 art_to_tsc_denominator; 51 static u64 art_to_tsc_offset; 52 struct clocksource *art_related_clocksource; 53 54 struct cyc2ns { 55 struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */ 56 seqcount_t seq; /* 32 + 4 = 36 */ 57 58 }; /* fits one cacheline */ 59 60 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); 61 62 void cyc2ns_read_begin(struct cyc2ns_data *data) 63 { 64 int seq, idx; 65 66 preempt_disable_notrace(); 67 68 do { 69 seq = this_cpu_read(cyc2ns.seq.sequence); 70 idx = seq & 1; 71 72 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset); 73 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul); 74 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift); 75 76 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence))); 77 } 78 79 void cyc2ns_read_end(void) 80 { 81 preempt_enable_notrace(); 82 } 83 84 /* 85 * Accelerators for sched_clock() 86 * convert from cycles(64bits) => nanoseconds (64bits) 87 * basic equation: 88 * ns = cycles / (freq / ns_per_sec) 89 * ns = cycles * (ns_per_sec / freq) 90 * ns = cycles * (10^9 / (cpu_khz * 10^3)) 91 * ns = cycles * (10^6 / cpu_khz) 92 * 93 * Then we use scaling math (suggested by george@mvista.com) to get: 94 * ns = cycles * (10^6 * SC / cpu_khz) / SC 95 * ns = cycles * cyc2ns_scale / SC 96 * 97 * And since SC is a constant power of two, we can convert the div 98 * into a shift. The larger SC is, the more accurate the conversion, but 99 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication 100 * (64-bit result) can be used. 101 * 102 * We can use khz divisor instead of mhz to keep a better precision. 103 * (mathieu.desnoyers@polymtl.ca) 104 * 105 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 106 */ 107 108 static void cyc2ns_data_init(struct cyc2ns_data *data) 109 { 110 data->cyc2ns_mul = 0; 111 data->cyc2ns_shift = 0; 112 data->cyc2ns_offset = 0; 113 } 114 115 static void cyc2ns_init(int cpu) 116 { 117 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); 118 119 cyc2ns_data_init(&c2n->data[0]); 120 cyc2ns_data_init(&c2n->data[1]); 121 122 seqcount_init(&c2n->seq); 123 } 124 125 static inline unsigned long long cycles_2_ns(unsigned long long cyc) 126 { 127 struct cyc2ns_data data; 128 unsigned long long ns; 129 130 cyc2ns_read_begin(&data); 131 132 ns = data.cyc2ns_offset; 133 ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift); 134 135 cyc2ns_read_end(); 136 137 return ns; 138 } 139 140 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) 141 { 142 unsigned long long ns_now; 143 struct cyc2ns_data data; 144 struct cyc2ns *c2n; 145 unsigned long flags; 146 147 local_irq_save(flags); 148 sched_clock_idle_sleep_event(); 149 150 if (!khz) 151 goto done; 152 153 ns_now = cycles_2_ns(tsc_now); 154 155 /* 156 * Compute a new multiplier as per the above comment and ensure our 157 * time function is continuous; see the comment near struct 158 * cyc2ns_data. 159 */ 160 clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz, 161 NSEC_PER_MSEC, 0); 162 163 /* 164 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is 165 * not expected to be greater than 31 due to the original published 166 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit 167 * value) - refer perf_event_mmap_page documentation in perf_event.h. 168 */ 169 if (data.cyc2ns_shift == 32) { 170 data.cyc2ns_shift = 31; 171 data.cyc2ns_mul >>= 1; 172 } 173 174 data.cyc2ns_offset = ns_now - 175 mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift); 176 177 c2n = per_cpu_ptr(&cyc2ns, cpu); 178 179 raw_write_seqcount_latch(&c2n->seq); 180 c2n->data[0] = data; 181 raw_write_seqcount_latch(&c2n->seq); 182 c2n->data[1] = data; 183 184 done: 185 sched_clock_idle_wakeup_event(); 186 local_irq_restore(flags); 187 } 188 189 /* 190 * Scheduler clock - returns current time in nanosec units. 191 */ 192 u64 native_sched_clock(void) 193 { 194 if (static_branch_likely(&__use_tsc)) { 195 u64 tsc_now = rdtsc(); 196 197 /* return the value in ns */ 198 return cycles_2_ns(tsc_now); 199 } 200 201 /* 202 * Fall back to jiffies if there's no TSC available: 203 * ( But note that we still use it if the TSC is marked 204 * unstable. We do this because unlike Time Of Day, 205 * the scheduler clock tolerates small errors and it's 206 * very important for it to be as fast as the platform 207 * can achieve it. ) 208 */ 209 210 /* No locking but a rare wrong value is not a big deal: */ 211 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 212 } 213 214 /* 215 * Generate a sched_clock if you already have a TSC value. 216 */ 217 u64 native_sched_clock_from_tsc(u64 tsc) 218 { 219 return cycles_2_ns(tsc); 220 } 221 222 /* We need to define a real function for sched_clock, to override the 223 weak default version */ 224 #ifdef CONFIG_PARAVIRT 225 unsigned long long sched_clock(void) 226 { 227 return paravirt_sched_clock(); 228 } 229 230 bool using_native_sched_clock(void) 231 { 232 return pv_time_ops.sched_clock == native_sched_clock; 233 } 234 #else 235 unsigned long long 236 sched_clock(void) __attribute__((alias("native_sched_clock"))); 237 238 bool using_native_sched_clock(void) { return true; } 239 #endif 240 241 int check_tsc_unstable(void) 242 { 243 return tsc_unstable; 244 } 245 EXPORT_SYMBOL_GPL(check_tsc_unstable); 246 247 #ifdef CONFIG_X86_TSC 248 int __init notsc_setup(char *str) 249 { 250 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n"); 251 tsc_disabled = 1; 252 return 1; 253 } 254 #else 255 /* 256 * disable flag for tsc. Takes effect by clearing the TSC cpu flag 257 * in cpu/common.c 258 */ 259 int __init notsc_setup(char *str) 260 { 261 setup_clear_cpu_cap(X86_FEATURE_TSC); 262 return 1; 263 } 264 #endif 265 266 __setup("notsc", notsc_setup); 267 268 static int no_sched_irq_time; 269 270 static int __init tsc_setup(char *str) 271 { 272 if (!strcmp(str, "reliable")) 273 tsc_clocksource_reliable = 1; 274 if (!strncmp(str, "noirqtime", 9)) 275 no_sched_irq_time = 1; 276 if (!strcmp(str, "unstable")) 277 mark_tsc_unstable("boot parameter"); 278 return 1; 279 } 280 281 __setup("tsc=", tsc_setup); 282 283 #define MAX_RETRIES 5 284 #define SMI_TRESHOLD 50000 285 286 /* 287 * Read TSC and the reference counters. Take care of SMI disturbance 288 */ 289 static u64 tsc_read_refs(u64 *p, int hpet) 290 { 291 u64 t1, t2; 292 int i; 293 294 for (i = 0; i < MAX_RETRIES; i++) { 295 t1 = get_cycles(); 296 if (hpet) 297 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; 298 else 299 *p = acpi_pm_read_early(); 300 t2 = get_cycles(); 301 if ((t2 - t1) < SMI_TRESHOLD) 302 return t2; 303 } 304 return ULLONG_MAX; 305 } 306 307 /* 308 * Calculate the TSC frequency from HPET reference 309 */ 310 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) 311 { 312 u64 tmp; 313 314 if (hpet2 < hpet1) 315 hpet2 += 0x100000000ULL; 316 hpet2 -= hpet1; 317 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 318 do_div(tmp, 1000000); 319 do_div(deltatsc, tmp); 320 321 return (unsigned long) deltatsc; 322 } 323 324 /* 325 * Calculate the TSC frequency from PMTimer reference 326 */ 327 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) 328 { 329 u64 tmp; 330 331 if (!pm1 && !pm2) 332 return ULONG_MAX; 333 334 if (pm2 < pm1) 335 pm2 += (u64)ACPI_PM_OVRRUN; 336 pm2 -= pm1; 337 tmp = pm2 * 1000000000LL; 338 do_div(tmp, PMTMR_TICKS_PER_SEC); 339 do_div(deltatsc, tmp); 340 341 return (unsigned long) deltatsc; 342 } 343 344 #define CAL_MS 10 345 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) 346 #define CAL_PIT_LOOPS 1000 347 348 #define CAL2_MS 50 349 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) 350 #define CAL2_PIT_LOOPS 5000 351 352 353 /* 354 * Try to calibrate the TSC against the Programmable 355 * Interrupt Timer and return the frequency of the TSC 356 * in kHz. 357 * 358 * Return ULONG_MAX on failure to calibrate. 359 */ 360 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) 361 { 362 u64 tsc, t1, t2, delta; 363 unsigned long tscmin, tscmax; 364 int pitcnt; 365 366 /* Set the Gate high, disable speaker */ 367 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 368 369 /* 370 * Setup CTC channel 2* for mode 0, (interrupt on terminal 371 * count mode), binary count. Set the latch register to 50ms 372 * (LSB then MSB) to begin countdown. 373 */ 374 outb(0xb0, 0x43); 375 outb(latch & 0xff, 0x42); 376 outb(latch >> 8, 0x42); 377 378 tsc = t1 = t2 = get_cycles(); 379 380 pitcnt = 0; 381 tscmax = 0; 382 tscmin = ULONG_MAX; 383 while ((inb(0x61) & 0x20) == 0) { 384 t2 = get_cycles(); 385 delta = t2 - tsc; 386 tsc = t2; 387 if ((unsigned long) delta < tscmin) 388 tscmin = (unsigned int) delta; 389 if ((unsigned long) delta > tscmax) 390 tscmax = (unsigned int) delta; 391 pitcnt++; 392 } 393 394 /* 395 * Sanity checks: 396 * 397 * If we were not able to read the PIT more than loopmin 398 * times, then we have been hit by a massive SMI 399 * 400 * If the maximum is 10 times larger than the minimum, 401 * then we got hit by an SMI as well. 402 */ 403 if (pitcnt < loopmin || tscmax > 10 * tscmin) 404 return ULONG_MAX; 405 406 /* Calculate the PIT value */ 407 delta = t2 - t1; 408 do_div(delta, ms); 409 return delta; 410 } 411 412 /* 413 * This reads the current MSB of the PIT counter, and 414 * checks if we are running on sufficiently fast and 415 * non-virtualized hardware. 416 * 417 * Our expectations are: 418 * 419 * - the PIT is running at roughly 1.19MHz 420 * 421 * - each IO is going to take about 1us on real hardware, 422 * but we allow it to be much faster (by a factor of 10) or 423 * _slightly_ slower (ie we allow up to a 2us read+counter 424 * update - anything else implies a unacceptably slow CPU 425 * or PIT for the fast calibration to work. 426 * 427 * - with 256 PIT ticks to read the value, we have 214us to 428 * see the same MSB (and overhead like doing a single TSC 429 * read per MSB value etc). 430 * 431 * - We're doing 2 reads per loop (LSB, MSB), and we expect 432 * them each to take about a microsecond on real hardware. 433 * So we expect a count value of around 100. But we'll be 434 * generous, and accept anything over 50. 435 * 436 * - if the PIT is stuck, and we see *many* more reads, we 437 * return early (and the next caller of pit_expect_msb() 438 * then consider it a failure when they don't see the 439 * next expected value). 440 * 441 * These expectations mean that we know that we have seen the 442 * transition from one expected value to another with a fairly 443 * high accuracy, and we didn't miss any events. We can thus 444 * use the TSC value at the transitions to calculate a pretty 445 * good value for the TSC frequencty. 446 */ 447 static inline int pit_verify_msb(unsigned char val) 448 { 449 /* Ignore LSB */ 450 inb(0x42); 451 return inb(0x42) == val; 452 } 453 454 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) 455 { 456 int count; 457 u64 tsc = 0, prev_tsc = 0; 458 459 for (count = 0; count < 50000; count++) { 460 if (!pit_verify_msb(val)) 461 break; 462 prev_tsc = tsc; 463 tsc = get_cycles(); 464 } 465 *deltap = get_cycles() - prev_tsc; 466 *tscp = tsc; 467 468 /* 469 * We require _some_ success, but the quality control 470 * will be based on the error terms on the TSC values. 471 */ 472 return count > 5; 473 } 474 475 /* 476 * How many MSB values do we want to see? We aim for 477 * a maximum error rate of 500ppm (in practice the 478 * real error is much smaller), but refuse to spend 479 * more than 50ms on it. 480 */ 481 #define MAX_QUICK_PIT_MS 50 482 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) 483 484 static unsigned long quick_pit_calibrate(void) 485 { 486 int i; 487 u64 tsc, delta; 488 unsigned long d1, d2; 489 490 /* Set the Gate high, disable speaker */ 491 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 492 493 /* 494 * Counter 2, mode 0 (one-shot), binary count 495 * 496 * NOTE! Mode 2 decrements by two (and then the 497 * output is flipped each time, giving the same 498 * final output frequency as a decrement-by-one), 499 * so mode 0 is much better when looking at the 500 * individual counts. 501 */ 502 outb(0xb0, 0x43); 503 504 /* Start at 0xffff */ 505 outb(0xff, 0x42); 506 outb(0xff, 0x42); 507 508 /* 509 * The PIT starts counting at the next edge, so we 510 * need to delay for a microsecond. The easiest way 511 * to do that is to just read back the 16-bit counter 512 * once from the PIT. 513 */ 514 pit_verify_msb(0); 515 516 if (pit_expect_msb(0xff, &tsc, &d1)) { 517 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { 518 if (!pit_expect_msb(0xff-i, &delta, &d2)) 519 break; 520 521 delta -= tsc; 522 523 /* 524 * Extrapolate the error and fail fast if the error will 525 * never be below 500 ppm. 526 */ 527 if (i == 1 && 528 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) 529 return 0; 530 531 /* 532 * Iterate until the error is less than 500 ppm 533 */ 534 if (d1+d2 >= delta >> 11) 535 continue; 536 537 /* 538 * Check the PIT one more time to verify that 539 * all TSC reads were stable wrt the PIT. 540 * 541 * This also guarantees serialization of the 542 * last cycle read ('d2') in pit_expect_msb. 543 */ 544 if (!pit_verify_msb(0xfe - i)) 545 break; 546 goto success; 547 } 548 } 549 pr_info("Fast TSC calibration failed\n"); 550 return 0; 551 552 success: 553 /* 554 * Ok, if we get here, then we've seen the 555 * MSB of the PIT decrement 'i' times, and the 556 * error has shrunk to less than 500 ppm. 557 * 558 * As a result, we can depend on there not being 559 * any odd delays anywhere, and the TSC reads are 560 * reliable (within the error). 561 * 562 * kHz = ticks / time-in-seconds / 1000; 563 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 564 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) 565 */ 566 delta *= PIT_TICK_RATE; 567 do_div(delta, i*256*1000); 568 pr_info("Fast TSC calibration using PIT\n"); 569 return delta; 570 } 571 572 /** 573 * native_calibrate_tsc 574 * Determine TSC frequency via CPUID, else return 0. 575 */ 576 unsigned long native_calibrate_tsc(void) 577 { 578 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx; 579 unsigned int crystal_khz; 580 581 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 582 return 0; 583 584 if (boot_cpu_data.cpuid_level < 0x15) 585 return 0; 586 587 eax_denominator = ebx_numerator = ecx_hz = edx = 0; 588 589 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */ 590 cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); 591 592 if (ebx_numerator == 0 || eax_denominator == 0) 593 return 0; 594 595 crystal_khz = ecx_hz / 1000; 596 597 if (crystal_khz == 0) { 598 switch (boot_cpu_data.x86_model) { 599 case INTEL_FAM6_SKYLAKE_MOBILE: 600 case INTEL_FAM6_SKYLAKE_DESKTOP: 601 case INTEL_FAM6_KABYLAKE_MOBILE: 602 case INTEL_FAM6_KABYLAKE_DESKTOP: 603 crystal_khz = 24000; /* 24.0 MHz */ 604 break; 605 case INTEL_FAM6_SKYLAKE_X: 606 case INTEL_FAM6_ATOM_DENVERTON: 607 crystal_khz = 25000; /* 25.0 MHz */ 608 break; 609 case INTEL_FAM6_ATOM_GOLDMONT: 610 crystal_khz = 19200; /* 19.2 MHz */ 611 break; 612 } 613 } 614 615 /* 616 * TSC frequency determined by CPUID is a "hardware reported" 617 * frequency and is the most accurate one so far we have. This 618 * is considered a known frequency. 619 */ 620 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); 621 622 /* 623 * For Atom SoCs TSC is the only reliable clocksource. 624 * Mark TSC reliable so no watchdog on it. 625 */ 626 if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT) 627 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); 628 629 return crystal_khz * ebx_numerator / eax_denominator; 630 } 631 632 static unsigned long cpu_khz_from_cpuid(void) 633 { 634 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx; 635 636 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 637 return 0; 638 639 if (boot_cpu_data.cpuid_level < 0x16) 640 return 0; 641 642 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0; 643 644 cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx); 645 646 return eax_base_mhz * 1000; 647 } 648 649 /** 650 * native_calibrate_cpu - calibrate the cpu on boot 651 */ 652 unsigned long native_calibrate_cpu(void) 653 { 654 u64 tsc1, tsc2, delta, ref1, ref2; 655 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 656 unsigned long flags, latch, ms, fast_calibrate; 657 int hpet = is_hpet_enabled(), i, loopmin; 658 659 fast_calibrate = cpu_khz_from_cpuid(); 660 if (fast_calibrate) 661 return fast_calibrate; 662 663 fast_calibrate = cpu_khz_from_msr(); 664 if (fast_calibrate) 665 return fast_calibrate; 666 667 local_irq_save(flags); 668 fast_calibrate = quick_pit_calibrate(); 669 local_irq_restore(flags); 670 if (fast_calibrate) 671 return fast_calibrate; 672 673 /* 674 * Run 5 calibration loops to get the lowest frequency value 675 * (the best estimate). We use two different calibration modes 676 * here: 677 * 678 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and 679 * load a timeout of 50ms. We read the time right after we 680 * started the timer and wait until the PIT count down reaches 681 * zero. In each wait loop iteration we read the TSC and check 682 * the delta to the previous read. We keep track of the min 683 * and max values of that delta. The delta is mostly defined 684 * by the IO time of the PIT access, so we can detect when a 685 * SMI/SMM disturbance happened between the two reads. If the 686 * maximum time is significantly larger than the minimum time, 687 * then we discard the result and have another try. 688 * 689 * 2) Reference counter. If available we use the HPET or the 690 * PMTIMER as a reference to check the sanity of that value. 691 * We use separate TSC readouts and check inside of the 692 * reference read for a SMI/SMM disturbance. We dicard 693 * disturbed values here as well. We do that around the PIT 694 * calibration delay loop as we have to wait for a certain 695 * amount of time anyway. 696 */ 697 698 /* Preset PIT loop values */ 699 latch = CAL_LATCH; 700 ms = CAL_MS; 701 loopmin = CAL_PIT_LOOPS; 702 703 for (i = 0; i < 3; i++) { 704 unsigned long tsc_pit_khz; 705 706 /* 707 * Read the start value and the reference count of 708 * hpet/pmtimer when available. Then do the PIT 709 * calibration, which will take at least 50ms, and 710 * read the end value. 711 */ 712 local_irq_save(flags); 713 tsc1 = tsc_read_refs(&ref1, hpet); 714 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); 715 tsc2 = tsc_read_refs(&ref2, hpet); 716 local_irq_restore(flags); 717 718 /* Pick the lowest PIT TSC calibration so far */ 719 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 720 721 /* hpet or pmtimer available ? */ 722 if (ref1 == ref2) 723 continue; 724 725 /* Check, whether the sampling was disturbed by an SMI */ 726 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) 727 continue; 728 729 tsc2 = (tsc2 - tsc1) * 1000000LL; 730 if (hpet) 731 tsc2 = calc_hpet_ref(tsc2, ref1, ref2); 732 else 733 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); 734 735 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); 736 737 /* Check the reference deviation */ 738 delta = ((u64) tsc_pit_min) * 100; 739 do_div(delta, tsc_ref_min); 740 741 /* 742 * If both calibration results are inside a 10% window 743 * then we can be sure, that the calibration 744 * succeeded. We break out of the loop right away. We 745 * use the reference value, as it is more precise. 746 */ 747 if (delta >= 90 && delta <= 110) { 748 pr_info("PIT calibration matches %s. %d loops\n", 749 hpet ? "HPET" : "PMTIMER", i + 1); 750 return tsc_ref_min; 751 } 752 753 /* 754 * Check whether PIT failed more than once. This 755 * happens in virtualized environments. We need to 756 * give the virtual PC a slightly longer timeframe for 757 * the HPET/PMTIMER to make the result precise. 758 */ 759 if (i == 1 && tsc_pit_min == ULONG_MAX) { 760 latch = CAL2_LATCH; 761 ms = CAL2_MS; 762 loopmin = CAL2_PIT_LOOPS; 763 } 764 } 765 766 /* 767 * Now check the results. 768 */ 769 if (tsc_pit_min == ULONG_MAX) { 770 /* PIT gave no useful value */ 771 pr_warn("Unable to calibrate against PIT\n"); 772 773 /* We don't have an alternative source, disable TSC */ 774 if (!hpet && !ref1 && !ref2) { 775 pr_notice("No reference (HPET/PMTIMER) available\n"); 776 return 0; 777 } 778 779 /* The alternative source failed as well, disable TSC */ 780 if (tsc_ref_min == ULONG_MAX) { 781 pr_warn("HPET/PMTIMER calibration failed\n"); 782 return 0; 783 } 784 785 /* Use the alternative source */ 786 pr_info("using %s reference calibration\n", 787 hpet ? "HPET" : "PMTIMER"); 788 789 return tsc_ref_min; 790 } 791 792 /* We don't have an alternative source, use the PIT calibration value */ 793 if (!hpet && !ref1 && !ref2) { 794 pr_info("Using PIT calibration value\n"); 795 return tsc_pit_min; 796 } 797 798 /* The alternative source failed, use the PIT calibration value */ 799 if (tsc_ref_min == ULONG_MAX) { 800 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); 801 return tsc_pit_min; 802 } 803 804 /* 805 * The calibration values differ too much. In doubt, we use 806 * the PIT value as we know that there are PMTIMERs around 807 * running at double speed. At least we let the user know: 808 */ 809 pr_warn("PIT calibration deviates from %s: %lu %lu\n", 810 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); 811 pr_info("Using PIT calibration value\n"); 812 return tsc_pit_min; 813 } 814 815 int recalibrate_cpu_khz(void) 816 { 817 #ifndef CONFIG_SMP 818 unsigned long cpu_khz_old = cpu_khz; 819 820 if (!boot_cpu_has(X86_FEATURE_TSC)) 821 return -ENODEV; 822 823 cpu_khz = x86_platform.calibrate_cpu(); 824 tsc_khz = x86_platform.calibrate_tsc(); 825 if (tsc_khz == 0) 826 tsc_khz = cpu_khz; 827 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) 828 cpu_khz = tsc_khz; 829 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy, 830 cpu_khz_old, cpu_khz); 831 832 return 0; 833 #else 834 return -ENODEV; 835 #endif 836 } 837 838 EXPORT_SYMBOL(recalibrate_cpu_khz); 839 840 841 static unsigned long long cyc2ns_suspend; 842 843 void tsc_save_sched_clock_state(void) 844 { 845 if (!sched_clock_stable()) 846 return; 847 848 cyc2ns_suspend = sched_clock(); 849 } 850 851 /* 852 * Even on processors with invariant TSC, TSC gets reset in some the 853 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to 854 * arbitrary value (still sync'd across cpu's) during resume from such sleep 855 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so 856 * that sched_clock() continues from the point where it was left off during 857 * suspend. 858 */ 859 void tsc_restore_sched_clock_state(void) 860 { 861 unsigned long long offset; 862 unsigned long flags; 863 int cpu; 864 865 if (!sched_clock_stable()) 866 return; 867 868 local_irq_save(flags); 869 870 /* 871 * We're coming out of suspend, there's no concurrency yet; don't 872 * bother being nice about the RCU stuff, just write to both 873 * data fields. 874 */ 875 876 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); 877 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); 878 879 offset = cyc2ns_suspend - sched_clock(); 880 881 for_each_possible_cpu(cpu) { 882 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; 883 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; 884 } 885 886 local_irq_restore(flags); 887 } 888 889 #ifdef CONFIG_CPU_FREQ 890 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency 891 * changes. 892 * 893 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's 894 * not that important because current Opteron setups do not support 895 * scaling on SMP anyroads. 896 * 897 * Should fix up last_tsc too. Currently gettimeofday in the 898 * first tick after the change will be slightly wrong. 899 */ 900 901 static unsigned int ref_freq; 902 static unsigned long loops_per_jiffy_ref; 903 static unsigned long tsc_khz_ref; 904 905 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 906 void *data) 907 { 908 struct cpufreq_freqs *freq = data; 909 unsigned long *lpj; 910 911 lpj = &boot_cpu_data.loops_per_jiffy; 912 #ifdef CONFIG_SMP 913 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 914 lpj = &cpu_data(freq->cpu).loops_per_jiffy; 915 #endif 916 917 if (!ref_freq) { 918 ref_freq = freq->old; 919 loops_per_jiffy_ref = *lpj; 920 tsc_khz_ref = tsc_khz; 921 } 922 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || 923 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { 924 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); 925 926 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 927 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 928 mark_tsc_unstable("cpufreq changes"); 929 930 set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc()); 931 } 932 933 return 0; 934 } 935 936 static struct notifier_block time_cpufreq_notifier_block = { 937 .notifier_call = time_cpufreq_notifier 938 }; 939 940 static int __init cpufreq_register_tsc_scaling(void) 941 { 942 if (!boot_cpu_has(X86_FEATURE_TSC)) 943 return 0; 944 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 945 return 0; 946 cpufreq_register_notifier(&time_cpufreq_notifier_block, 947 CPUFREQ_TRANSITION_NOTIFIER); 948 return 0; 949 } 950 951 core_initcall(cpufreq_register_tsc_scaling); 952 953 #endif /* CONFIG_CPU_FREQ */ 954 955 #define ART_CPUID_LEAF (0x15) 956 #define ART_MIN_DENOMINATOR (1) 957 958 959 /* 960 * If ART is present detect the numerator:denominator to convert to TSC 961 */ 962 static void detect_art(void) 963 { 964 unsigned int unused[2]; 965 966 if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF) 967 return; 968 969 /* Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required */ 970 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || 971 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || 972 !boot_cpu_has(X86_FEATURE_TSC_ADJUST)) 973 return; 974 975 cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, 976 &art_to_tsc_numerator, unused, unused+1); 977 978 if (art_to_tsc_denominator < ART_MIN_DENOMINATOR) 979 return; 980 981 rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset); 982 983 /* Make this sticky over multiple CPU init calls */ 984 setup_force_cpu_cap(X86_FEATURE_ART); 985 } 986 987 988 /* clocksource code */ 989 990 static struct clocksource clocksource_tsc; 991 992 static void tsc_resume(struct clocksource *cs) 993 { 994 tsc_verify_tsc_adjust(true); 995 } 996 997 /* 998 * We used to compare the TSC to the cycle_last value in the clocksource 999 * structure to avoid a nasty time-warp. This can be observed in a 1000 * very small window right after one CPU updated cycle_last under 1001 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which 1002 * is smaller than the cycle_last reference value due to a TSC which 1003 * is slighty behind. This delta is nowhere else observable, but in 1004 * that case it results in a forward time jump in the range of hours 1005 * due to the unsigned delta calculation of the time keeping core 1006 * code, which is necessary to support wrapping clocksources like pm 1007 * timer. 1008 * 1009 * This sanity check is now done in the core timekeeping code. 1010 * checking the result of read_tsc() - cycle_last for being negative. 1011 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. 1012 */ 1013 static u64 read_tsc(struct clocksource *cs) 1014 { 1015 return (u64)rdtsc_ordered(); 1016 } 1017 1018 static void tsc_cs_mark_unstable(struct clocksource *cs) 1019 { 1020 if (tsc_unstable) 1021 return; 1022 1023 tsc_unstable = 1; 1024 if (using_native_sched_clock()) 1025 clear_sched_clock_stable(); 1026 disable_sched_clock_irqtime(); 1027 pr_info("Marking TSC unstable due to clocksource watchdog\n"); 1028 } 1029 1030 static void tsc_cs_tick_stable(struct clocksource *cs) 1031 { 1032 if (tsc_unstable) 1033 return; 1034 1035 if (using_native_sched_clock()) 1036 sched_clock_tick_stable(); 1037 } 1038 1039 /* 1040 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() 1041 */ 1042 static struct clocksource clocksource_tsc = { 1043 .name = "tsc", 1044 .rating = 300, 1045 .read = read_tsc, 1046 .mask = CLOCKSOURCE_MASK(64), 1047 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 1048 CLOCK_SOURCE_MUST_VERIFY, 1049 .archdata = { .vclock_mode = VCLOCK_TSC }, 1050 .resume = tsc_resume, 1051 .mark_unstable = tsc_cs_mark_unstable, 1052 .tick_stable = tsc_cs_tick_stable, 1053 }; 1054 1055 void mark_tsc_unstable(char *reason) 1056 { 1057 if (tsc_unstable) 1058 return; 1059 1060 tsc_unstable = 1; 1061 if (using_native_sched_clock()) 1062 clear_sched_clock_stable(); 1063 disable_sched_clock_irqtime(); 1064 pr_info("Marking TSC unstable due to %s\n", reason); 1065 /* Change only the rating, when not registered */ 1066 if (clocksource_tsc.mult) { 1067 clocksource_mark_unstable(&clocksource_tsc); 1068 } else { 1069 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; 1070 clocksource_tsc.rating = 0; 1071 } 1072 } 1073 1074 EXPORT_SYMBOL_GPL(mark_tsc_unstable); 1075 1076 static void __init check_system_tsc_reliable(void) 1077 { 1078 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) 1079 if (is_geode_lx()) { 1080 /* RTSC counts during suspend */ 1081 #define RTSC_SUSP 0x100 1082 unsigned long res_low, res_high; 1083 1084 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); 1085 /* Geode_LX - the OLPC CPU has a very reliable TSC */ 1086 if (res_low & RTSC_SUSP) 1087 tsc_clocksource_reliable = 1; 1088 } 1089 #endif 1090 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) 1091 tsc_clocksource_reliable = 1; 1092 } 1093 1094 /* 1095 * Make an educated guess if the TSC is trustworthy and synchronized 1096 * over all CPUs. 1097 */ 1098 int unsynchronized_tsc(void) 1099 { 1100 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable) 1101 return 1; 1102 1103 #ifdef CONFIG_SMP 1104 if (apic_is_clustered_box()) 1105 return 1; 1106 #endif 1107 1108 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1109 return 0; 1110 1111 if (tsc_clocksource_reliable) 1112 return 0; 1113 /* 1114 * Intel systems are normally all synchronized. 1115 * Exceptions must mark TSC as unstable: 1116 */ 1117 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1118 /* assume multi socket systems are not synchronized: */ 1119 if (num_possible_cpus() > 1) 1120 return 1; 1121 } 1122 1123 return 0; 1124 } 1125 1126 /* 1127 * Convert ART to TSC given numerator/denominator found in detect_art() 1128 */ 1129 struct system_counterval_t convert_art_to_tsc(u64 art) 1130 { 1131 u64 tmp, res, rem; 1132 1133 rem = do_div(art, art_to_tsc_denominator); 1134 1135 res = art * art_to_tsc_numerator; 1136 tmp = rem * art_to_tsc_numerator; 1137 1138 do_div(tmp, art_to_tsc_denominator); 1139 res += tmp + art_to_tsc_offset; 1140 1141 return (struct system_counterval_t) {.cs = art_related_clocksource, 1142 .cycles = res}; 1143 } 1144 EXPORT_SYMBOL(convert_art_to_tsc); 1145 1146 static void tsc_refine_calibration_work(struct work_struct *work); 1147 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); 1148 /** 1149 * tsc_refine_calibration_work - Further refine tsc freq calibration 1150 * @work - ignored. 1151 * 1152 * This functions uses delayed work over a period of a 1153 * second to further refine the TSC freq value. Since this is 1154 * timer based, instead of loop based, we don't block the boot 1155 * process while this longer calibration is done. 1156 * 1157 * If there are any calibration anomalies (too many SMIs, etc), 1158 * or the refined calibration is off by 1% of the fast early 1159 * calibration, we throw out the new calibration and use the 1160 * early calibration. 1161 */ 1162 static void tsc_refine_calibration_work(struct work_struct *work) 1163 { 1164 static u64 tsc_start = -1, ref_start; 1165 static int hpet; 1166 u64 tsc_stop, ref_stop, delta; 1167 unsigned long freq; 1168 int cpu; 1169 1170 /* Don't bother refining TSC on unstable systems */ 1171 if (check_tsc_unstable()) 1172 goto out; 1173 1174 /* 1175 * Since the work is started early in boot, we may be 1176 * delayed the first time we expire. So set the workqueue 1177 * again once we know timers are working. 1178 */ 1179 if (tsc_start == -1) { 1180 /* 1181 * Only set hpet once, to avoid mixing hardware 1182 * if the hpet becomes enabled later. 1183 */ 1184 hpet = is_hpet_enabled(); 1185 schedule_delayed_work(&tsc_irqwork, HZ); 1186 tsc_start = tsc_read_refs(&ref_start, hpet); 1187 return; 1188 } 1189 1190 tsc_stop = tsc_read_refs(&ref_stop, hpet); 1191 1192 /* hpet or pmtimer available ? */ 1193 if (ref_start == ref_stop) 1194 goto out; 1195 1196 /* Check, whether the sampling was disturbed by an SMI */ 1197 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) 1198 goto out; 1199 1200 delta = tsc_stop - tsc_start; 1201 delta *= 1000000LL; 1202 if (hpet) 1203 freq = calc_hpet_ref(delta, ref_start, ref_stop); 1204 else 1205 freq = calc_pmtimer_ref(delta, ref_start, ref_stop); 1206 1207 /* Make sure we're within 1% */ 1208 if (abs(tsc_khz - freq) > tsc_khz/100) 1209 goto out; 1210 1211 tsc_khz = freq; 1212 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", 1213 (unsigned long)tsc_khz / 1000, 1214 (unsigned long)tsc_khz % 1000); 1215 1216 /* Inform the TSC deadline clockevent devices about the recalibration */ 1217 lapic_update_tsc_freq(); 1218 1219 /* Update the sched_clock() rate to match the clocksource one */ 1220 for_each_possible_cpu(cpu) 1221 set_cyc2ns_scale(tsc_khz, cpu, tsc_stop); 1222 1223 out: 1224 if (boot_cpu_has(X86_FEATURE_ART)) 1225 art_related_clocksource = &clocksource_tsc; 1226 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1227 } 1228 1229 1230 static int __init init_tsc_clocksource(void) 1231 { 1232 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) 1233 return 0; 1234 1235 if (tsc_clocksource_reliable) 1236 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; 1237 /* lower the rating if we already know its unstable: */ 1238 if (check_tsc_unstable()) { 1239 clocksource_tsc.rating = 0; 1240 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 1241 } 1242 1243 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) 1244 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; 1245 1246 /* 1247 * When TSC frequency is known (retrieved via MSR or CPUID), we skip 1248 * the refined calibration and directly register it as a clocksource. 1249 */ 1250 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { 1251 if (boot_cpu_has(X86_FEATURE_ART)) 1252 art_related_clocksource = &clocksource_tsc; 1253 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1254 return 0; 1255 } 1256 1257 schedule_delayed_work(&tsc_irqwork, 0); 1258 return 0; 1259 } 1260 /* 1261 * We use device_initcall here, to ensure we run after the hpet 1262 * is fully initialized, which may occur at fs_initcall time. 1263 */ 1264 device_initcall(init_tsc_clocksource); 1265 1266 void __init tsc_init(void) 1267 { 1268 u64 lpj, cyc; 1269 int cpu; 1270 1271 if (!boot_cpu_has(X86_FEATURE_TSC)) { 1272 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); 1273 return; 1274 } 1275 1276 cpu_khz = x86_platform.calibrate_cpu(); 1277 tsc_khz = x86_platform.calibrate_tsc(); 1278 1279 /* 1280 * Trust non-zero tsc_khz as authorative, 1281 * and use it to sanity check cpu_khz, 1282 * which will be off if system timer is off. 1283 */ 1284 if (tsc_khz == 0) 1285 tsc_khz = cpu_khz; 1286 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz) 1287 cpu_khz = tsc_khz; 1288 1289 if (!tsc_khz) { 1290 mark_tsc_unstable("could not calculate TSC khz"); 1291 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); 1292 return; 1293 } 1294 1295 pr_info("Detected %lu.%03lu MHz processor\n", 1296 (unsigned long)cpu_khz / 1000, 1297 (unsigned long)cpu_khz % 1000); 1298 1299 /* Sanitize TSC ADJUST before cyc2ns gets initialized */ 1300 tsc_store_and_check_tsc_adjust(true); 1301 1302 /* 1303 * Secondary CPUs do not run through tsc_init(), so set up 1304 * all the scale factors for all CPUs, assuming the same 1305 * speed as the bootup CPU. (cpufreq notifiers will fix this 1306 * up if their speed diverges) 1307 */ 1308 cyc = rdtsc(); 1309 for_each_possible_cpu(cpu) { 1310 cyc2ns_init(cpu); 1311 set_cyc2ns_scale(tsc_khz, cpu, cyc); 1312 } 1313 1314 if (tsc_disabled > 0) 1315 return; 1316 1317 /* now allow native_sched_clock() to use rdtsc */ 1318 1319 tsc_disabled = 0; 1320 static_branch_enable(&__use_tsc); 1321 1322 if (!no_sched_irq_time) 1323 enable_sched_clock_irqtime(); 1324 1325 lpj = ((u64)tsc_khz * 1000); 1326 do_div(lpj, HZ); 1327 lpj_fine = lpj; 1328 1329 use_tsc_delay(); 1330 1331 check_system_tsc_reliable(); 1332 1333 if (unsynchronized_tsc()) 1334 mark_tsc_unstable("TSCs unsynchronized"); 1335 1336 detect_art(); 1337 } 1338 1339 #ifdef CONFIG_SMP 1340 /* 1341 * If we have a constant TSC and are using the TSC for the delay loop, 1342 * we can skip clock calibration if another cpu in the same socket has already 1343 * been calibrated. This assumes that CONSTANT_TSC applies to all 1344 * cpus in the socket - this should be a safe assumption. 1345 */ 1346 unsigned long calibrate_delay_is_known(void) 1347 { 1348 int sibling, cpu = smp_processor_id(); 1349 struct cpumask *mask = topology_core_cpumask(cpu); 1350 1351 if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) 1352 return 0; 1353 1354 if (!mask) 1355 return 0; 1356 1357 sibling = cpumask_any_but(mask, cpu); 1358 if (sibling < nr_cpu_ids) 1359 return cpu_data(sibling).loops_per_jiffy; 1360 return 0; 1361 } 1362 #endif 1363