1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 #include <linux/kernel.h> 4 #include <linux/sched.h> 5 #include <linux/init.h> 6 #include <linux/module.h> 7 #include <linux/timer.h> 8 #include <linux/acpi_pmtmr.h> 9 #include <linux/cpufreq.h> 10 #include <linux/delay.h> 11 #include <linux/clocksource.h> 12 #include <linux/percpu.h> 13 #include <linux/timex.h> 14 #include <linux/static_key.h> 15 16 #include <asm/hpet.h> 17 #include <asm/timer.h> 18 #include <asm/vgtod.h> 19 #include <asm/time.h> 20 #include <asm/delay.h> 21 #include <asm/hypervisor.h> 22 #include <asm/nmi.h> 23 #include <asm/x86_init.h> 24 #include <asm/geode.h> 25 26 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 27 EXPORT_SYMBOL(cpu_khz); 28 29 unsigned int __read_mostly tsc_khz; 30 EXPORT_SYMBOL(tsc_khz); 31 32 /* 33 * TSC can be unstable due to cpufreq or due to unsynced TSCs 34 */ 35 static int __read_mostly tsc_unstable; 36 37 /* native_sched_clock() is called before tsc_init(), so 38 we must start with the TSC soft disabled to prevent 39 erroneous rdtsc usage on !cpu_has_tsc processors */ 40 static int __read_mostly tsc_disabled = -1; 41 42 static DEFINE_STATIC_KEY_FALSE(__use_tsc); 43 44 int tsc_clocksource_reliable; 45 46 /* 47 * Use a ring-buffer like data structure, where a writer advances the head by 48 * writing a new data entry and a reader advances the tail when it observes a 49 * new entry. 50 * 51 * Writers are made to wait on readers until there's space to write a new 52 * entry. 53 * 54 * This means that we can always use an {offset, mul} pair to compute a ns 55 * value that is 'roughly' in the right direction, even if we're writing a new 56 * {offset, mul} pair during the clock read. 57 * 58 * The down-side is that we can no longer guarantee strict monotonicity anymore 59 * (assuming the TSC was that to begin with), because while we compute the 60 * intersection point of the two clock slopes and make sure the time is 61 * continuous at the point of switching; we can no longer guarantee a reader is 62 * strictly before or after the switch point. 63 * 64 * It does mean a reader no longer needs to disable IRQs in order to avoid 65 * CPU-Freq updates messing with his times, and similarly an NMI reader will 66 * no longer run the risk of hitting half-written state. 67 */ 68 69 struct cyc2ns { 70 struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */ 71 struct cyc2ns_data *head; /* 48 + 8 = 56 */ 72 struct cyc2ns_data *tail; /* 56 + 8 = 64 */ 73 }; /* exactly fits one cacheline */ 74 75 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); 76 77 struct cyc2ns_data *cyc2ns_read_begin(void) 78 { 79 struct cyc2ns_data *head; 80 81 preempt_disable(); 82 83 head = this_cpu_read(cyc2ns.head); 84 /* 85 * Ensure we observe the entry when we observe the pointer to it. 86 * matches the wmb from cyc2ns_write_end(). 87 */ 88 smp_read_barrier_depends(); 89 head->__count++; 90 barrier(); 91 92 return head; 93 } 94 95 void cyc2ns_read_end(struct cyc2ns_data *head) 96 { 97 barrier(); 98 /* 99 * If we're the outer most nested read; update the tail pointer 100 * when we're done. This notifies possible pending writers 101 * that we've observed the head pointer and that the other 102 * entry is now free. 103 */ 104 if (!--head->__count) { 105 /* 106 * x86-TSO does not reorder writes with older reads; 107 * therefore once this write becomes visible to another 108 * cpu, we must be finished reading the cyc2ns_data. 109 * 110 * matches with cyc2ns_write_begin(). 111 */ 112 this_cpu_write(cyc2ns.tail, head); 113 } 114 preempt_enable(); 115 } 116 117 /* 118 * Begin writing a new @data entry for @cpu. 119 * 120 * Assumes some sort of write side lock; currently 'provided' by the assumption 121 * that cpufreq will call its notifiers sequentially. 122 */ 123 static struct cyc2ns_data *cyc2ns_write_begin(int cpu) 124 { 125 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); 126 struct cyc2ns_data *data = c2n->data; 127 128 if (data == c2n->head) 129 data++; 130 131 /* XXX send an IPI to @cpu in order to guarantee a read? */ 132 133 /* 134 * When we observe the tail write from cyc2ns_read_end(), 135 * the cpu must be done with that entry and its safe 136 * to start writing to it. 137 */ 138 while (c2n->tail == data) 139 cpu_relax(); 140 141 return data; 142 } 143 144 static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) 145 { 146 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); 147 148 /* 149 * Ensure the @data writes are visible before we publish the 150 * entry. Matches the data-depencency in cyc2ns_read_begin(). 151 */ 152 smp_wmb(); 153 154 ACCESS_ONCE(c2n->head) = data; 155 } 156 157 /* 158 * Accelerators for sched_clock() 159 * convert from cycles(64bits) => nanoseconds (64bits) 160 * basic equation: 161 * ns = cycles / (freq / ns_per_sec) 162 * ns = cycles * (ns_per_sec / freq) 163 * ns = cycles * (10^9 / (cpu_khz * 10^3)) 164 * ns = cycles * (10^6 / cpu_khz) 165 * 166 * Then we use scaling math (suggested by george@mvista.com) to get: 167 * ns = cycles * (10^6 * SC / cpu_khz) / SC 168 * ns = cycles * cyc2ns_scale / SC 169 * 170 * And since SC is a constant power of two, we can convert the div 171 * into a shift. The larger SC is, the more accurate the conversion, but 172 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication 173 * (64-bit result) can be used. 174 * 175 * We can use khz divisor instead of mhz to keep a better precision. 176 * (mathieu.desnoyers@polymtl.ca) 177 * 178 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 179 */ 180 181 static void cyc2ns_data_init(struct cyc2ns_data *data) 182 { 183 data->cyc2ns_mul = 0; 184 data->cyc2ns_shift = 0; 185 data->cyc2ns_offset = 0; 186 data->__count = 0; 187 } 188 189 static void cyc2ns_init(int cpu) 190 { 191 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); 192 193 cyc2ns_data_init(&c2n->data[0]); 194 cyc2ns_data_init(&c2n->data[1]); 195 196 c2n->head = c2n->data; 197 c2n->tail = c2n->data; 198 } 199 200 static inline unsigned long long cycles_2_ns(unsigned long long cyc) 201 { 202 struct cyc2ns_data *data, *tail; 203 unsigned long long ns; 204 205 /* 206 * See cyc2ns_read_*() for details; replicated in order to avoid 207 * an extra few instructions that came with the abstraction. 208 * Notable, it allows us to only do the __count and tail update 209 * dance when its actually needed. 210 */ 211 212 preempt_disable_notrace(); 213 data = this_cpu_read(cyc2ns.head); 214 tail = this_cpu_read(cyc2ns.tail); 215 216 if (likely(data == tail)) { 217 ns = data->cyc2ns_offset; 218 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); 219 } else { 220 data->__count++; 221 222 barrier(); 223 224 ns = data->cyc2ns_offset; 225 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); 226 227 barrier(); 228 229 if (!--data->__count) 230 this_cpu_write(cyc2ns.tail, data); 231 } 232 preempt_enable_notrace(); 233 234 return ns; 235 } 236 237 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) 238 { 239 unsigned long long tsc_now, ns_now; 240 struct cyc2ns_data *data; 241 unsigned long flags; 242 243 local_irq_save(flags); 244 sched_clock_idle_sleep_event(); 245 246 if (!cpu_khz) 247 goto done; 248 249 data = cyc2ns_write_begin(cpu); 250 251 tsc_now = rdtsc(); 252 ns_now = cycles_2_ns(tsc_now); 253 254 /* 255 * Compute a new multiplier as per the above comment and ensure our 256 * time function is continuous; see the comment near struct 257 * cyc2ns_data. 258 */ 259 clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz, 260 NSEC_PER_MSEC, 0); 261 262 data->cyc2ns_offset = ns_now - 263 mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift); 264 265 cyc2ns_write_end(cpu, data); 266 267 done: 268 sched_clock_idle_wakeup_event(0); 269 local_irq_restore(flags); 270 } 271 /* 272 * Scheduler clock - returns current time in nanosec units. 273 */ 274 u64 native_sched_clock(void) 275 { 276 if (static_branch_likely(&__use_tsc)) { 277 u64 tsc_now = rdtsc(); 278 279 /* return the value in ns */ 280 return cycles_2_ns(tsc_now); 281 } 282 283 /* 284 * Fall back to jiffies if there's no TSC available: 285 * ( But note that we still use it if the TSC is marked 286 * unstable. We do this because unlike Time Of Day, 287 * the scheduler clock tolerates small errors and it's 288 * very important for it to be as fast as the platform 289 * can achieve it. ) 290 */ 291 292 /* No locking but a rare wrong value is not a big deal: */ 293 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 294 } 295 296 /* 297 * Generate a sched_clock if you already have a TSC value. 298 */ 299 u64 native_sched_clock_from_tsc(u64 tsc) 300 { 301 return cycles_2_ns(tsc); 302 } 303 304 /* We need to define a real function for sched_clock, to override the 305 weak default version */ 306 #ifdef CONFIG_PARAVIRT 307 unsigned long long sched_clock(void) 308 { 309 return paravirt_sched_clock(); 310 } 311 #else 312 unsigned long long 313 sched_clock(void) __attribute__((alias("native_sched_clock"))); 314 #endif 315 316 int check_tsc_unstable(void) 317 { 318 return tsc_unstable; 319 } 320 EXPORT_SYMBOL_GPL(check_tsc_unstable); 321 322 int check_tsc_disabled(void) 323 { 324 return tsc_disabled; 325 } 326 EXPORT_SYMBOL_GPL(check_tsc_disabled); 327 328 #ifdef CONFIG_X86_TSC 329 int __init notsc_setup(char *str) 330 { 331 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n"); 332 tsc_disabled = 1; 333 return 1; 334 } 335 #else 336 /* 337 * disable flag for tsc. Takes effect by clearing the TSC cpu flag 338 * in cpu/common.c 339 */ 340 int __init notsc_setup(char *str) 341 { 342 setup_clear_cpu_cap(X86_FEATURE_TSC); 343 return 1; 344 } 345 #endif 346 347 __setup("notsc", notsc_setup); 348 349 static int no_sched_irq_time; 350 351 static int __init tsc_setup(char *str) 352 { 353 if (!strcmp(str, "reliable")) 354 tsc_clocksource_reliable = 1; 355 if (!strncmp(str, "noirqtime", 9)) 356 no_sched_irq_time = 1; 357 return 1; 358 } 359 360 __setup("tsc=", tsc_setup); 361 362 #define MAX_RETRIES 5 363 #define SMI_TRESHOLD 50000 364 365 /* 366 * Read TSC and the reference counters. Take care of SMI disturbance 367 */ 368 static u64 tsc_read_refs(u64 *p, int hpet) 369 { 370 u64 t1, t2; 371 int i; 372 373 for (i = 0; i < MAX_RETRIES; i++) { 374 t1 = get_cycles(); 375 if (hpet) 376 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; 377 else 378 *p = acpi_pm_read_early(); 379 t2 = get_cycles(); 380 if ((t2 - t1) < SMI_TRESHOLD) 381 return t2; 382 } 383 return ULLONG_MAX; 384 } 385 386 /* 387 * Calculate the TSC frequency from HPET reference 388 */ 389 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) 390 { 391 u64 tmp; 392 393 if (hpet2 < hpet1) 394 hpet2 += 0x100000000ULL; 395 hpet2 -= hpet1; 396 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 397 do_div(tmp, 1000000); 398 do_div(deltatsc, tmp); 399 400 return (unsigned long) deltatsc; 401 } 402 403 /* 404 * Calculate the TSC frequency from PMTimer reference 405 */ 406 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) 407 { 408 u64 tmp; 409 410 if (!pm1 && !pm2) 411 return ULONG_MAX; 412 413 if (pm2 < pm1) 414 pm2 += (u64)ACPI_PM_OVRRUN; 415 pm2 -= pm1; 416 tmp = pm2 * 1000000000LL; 417 do_div(tmp, PMTMR_TICKS_PER_SEC); 418 do_div(deltatsc, tmp); 419 420 return (unsigned long) deltatsc; 421 } 422 423 #define CAL_MS 10 424 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) 425 #define CAL_PIT_LOOPS 1000 426 427 #define CAL2_MS 50 428 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) 429 #define CAL2_PIT_LOOPS 5000 430 431 432 /* 433 * Try to calibrate the TSC against the Programmable 434 * Interrupt Timer and return the frequency of the TSC 435 * in kHz. 436 * 437 * Return ULONG_MAX on failure to calibrate. 438 */ 439 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) 440 { 441 u64 tsc, t1, t2, delta; 442 unsigned long tscmin, tscmax; 443 int pitcnt; 444 445 /* Set the Gate high, disable speaker */ 446 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 447 448 /* 449 * Setup CTC channel 2* for mode 0, (interrupt on terminal 450 * count mode), binary count. Set the latch register to 50ms 451 * (LSB then MSB) to begin countdown. 452 */ 453 outb(0xb0, 0x43); 454 outb(latch & 0xff, 0x42); 455 outb(latch >> 8, 0x42); 456 457 tsc = t1 = t2 = get_cycles(); 458 459 pitcnt = 0; 460 tscmax = 0; 461 tscmin = ULONG_MAX; 462 while ((inb(0x61) & 0x20) == 0) { 463 t2 = get_cycles(); 464 delta = t2 - tsc; 465 tsc = t2; 466 if ((unsigned long) delta < tscmin) 467 tscmin = (unsigned int) delta; 468 if ((unsigned long) delta > tscmax) 469 tscmax = (unsigned int) delta; 470 pitcnt++; 471 } 472 473 /* 474 * Sanity checks: 475 * 476 * If we were not able to read the PIT more than loopmin 477 * times, then we have been hit by a massive SMI 478 * 479 * If the maximum is 10 times larger than the minimum, 480 * then we got hit by an SMI as well. 481 */ 482 if (pitcnt < loopmin || tscmax > 10 * tscmin) 483 return ULONG_MAX; 484 485 /* Calculate the PIT value */ 486 delta = t2 - t1; 487 do_div(delta, ms); 488 return delta; 489 } 490 491 /* 492 * This reads the current MSB of the PIT counter, and 493 * checks if we are running on sufficiently fast and 494 * non-virtualized hardware. 495 * 496 * Our expectations are: 497 * 498 * - the PIT is running at roughly 1.19MHz 499 * 500 * - each IO is going to take about 1us on real hardware, 501 * but we allow it to be much faster (by a factor of 10) or 502 * _slightly_ slower (ie we allow up to a 2us read+counter 503 * update - anything else implies a unacceptably slow CPU 504 * or PIT for the fast calibration to work. 505 * 506 * - with 256 PIT ticks to read the value, we have 214us to 507 * see the same MSB (and overhead like doing a single TSC 508 * read per MSB value etc). 509 * 510 * - We're doing 2 reads per loop (LSB, MSB), and we expect 511 * them each to take about a microsecond on real hardware. 512 * So we expect a count value of around 100. But we'll be 513 * generous, and accept anything over 50. 514 * 515 * - if the PIT is stuck, and we see *many* more reads, we 516 * return early (and the next caller of pit_expect_msb() 517 * then consider it a failure when they don't see the 518 * next expected value). 519 * 520 * These expectations mean that we know that we have seen the 521 * transition from one expected value to another with a fairly 522 * high accuracy, and we didn't miss any events. We can thus 523 * use the TSC value at the transitions to calculate a pretty 524 * good value for the TSC frequencty. 525 */ 526 static inline int pit_verify_msb(unsigned char val) 527 { 528 /* Ignore LSB */ 529 inb(0x42); 530 return inb(0x42) == val; 531 } 532 533 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) 534 { 535 int count; 536 u64 tsc = 0, prev_tsc = 0; 537 538 for (count = 0; count < 50000; count++) { 539 if (!pit_verify_msb(val)) 540 break; 541 prev_tsc = tsc; 542 tsc = get_cycles(); 543 } 544 *deltap = get_cycles() - prev_tsc; 545 *tscp = tsc; 546 547 /* 548 * We require _some_ success, but the quality control 549 * will be based on the error terms on the TSC values. 550 */ 551 return count > 5; 552 } 553 554 /* 555 * How many MSB values do we want to see? We aim for 556 * a maximum error rate of 500ppm (in practice the 557 * real error is much smaller), but refuse to spend 558 * more than 50ms on it. 559 */ 560 #define MAX_QUICK_PIT_MS 50 561 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) 562 563 static unsigned long quick_pit_calibrate(void) 564 { 565 int i; 566 u64 tsc, delta; 567 unsigned long d1, d2; 568 569 /* Set the Gate high, disable speaker */ 570 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 571 572 /* 573 * Counter 2, mode 0 (one-shot), binary count 574 * 575 * NOTE! Mode 2 decrements by two (and then the 576 * output is flipped each time, giving the same 577 * final output frequency as a decrement-by-one), 578 * so mode 0 is much better when looking at the 579 * individual counts. 580 */ 581 outb(0xb0, 0x43); 582 583 /* Start at 0xffff */ 584 outb(0xff, 0x42); 585 outb(0xff, 0x42); 586 587 /* 588 * The PIT starts counting at the next edge, so we 589 * need to delay for a microsecond. The easiest way 590 * to do that is to just read back the 16-bit counter 591 * once from the PIT. 592 */ 593 pit_verify_msb(0); 594 595 if (pit_expect_msb(0xff, &tsc, &d1)) { 596 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { 597 if (!pit_expect_msb(0xff-i, &delta, &d2)) 598 break; 599 600 delta -= tsc; 601 602 /* 603 * Extrapolate the error and fail fast if the error will 604 * never be below 500 ppm. 605 */ 606 if (i == 1 && 607 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) 608 return 0; 609 610 /* 611 * Iterate until the error is less than 500 ppm 612 */ 613 if (d1+d2 >= delta >> 11) 614 continue; 615 616 /* 617 * Check the PIT one more time to verify that 618 * all TSC reads were stable wrt the PIT. 619 * 620 * This also guarantees serialization of the 621 * last cycle read ('d2') in pit_expect_msb. 622 */ 623 if (!pit_verify_msb(0xfe - i)) 624 break; 625 goto success; 626 } 627 } 628 pr_info("Fast TSC calibration failed\n"); 629 return 0; 630 631 success: 632 /* 633 * Ok, if we get here, then we've seen the 634 * MSB of the PIT decrement 'i' times, and the 635 * error has shrunk to less than 500 ppm. 636 * 637 * As a result, we can depend on there not being 638 * any odd delays anywhere, and the TSC reads are 639 * reliable (within the error). 640 * 641 * kHz = ticks / time-in-seconds / 1000; 642 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 643 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) 644 */ 645 delta *= PIT_TICK_RATE; 646 do_div(delta, i*256*1000); 647 pr_info("Fast TSC calibration using PIT\n"); 648 return delta; 649 } 650 651 /** 652 * native_calibrate_tsc - calibrate the tsc on boot 653 */ 654 unsigned long native_calibrate_tsc(void) 655 { 656 u64 tsc1, tsc2, delta, ref1, ref2; 657 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 658 unsigned long flags, latch, ms, fast_calibrate; 659 int hpet = is_hpet_enabled(), i, loopmin; 660 661 /* Calibrate TSC using MSR for Intel Atom SoCs */ 662 local_irq_save(flags); 663 fast_calibrate = try_msr_calibrate_tsc(); 664 local_irq_restore(flags); 665 if (fast_calibrate) 666 return fast_calibrate; 667 668 local_irq_save(flags); 669 fast_calibrate = quick_pit_calibrate(); 670 local_irq_restore(flags); 671 if (fast_calibrate) 672 return fast_calibrate; 673 674 /* 675 * Run 5 calibration loops to get the lowest frequency value 676 * (the best estimate). We use two different calibration modes 677 * here: 678 * 679 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and 680 * load a timeout of 50ms. We read the time right after we 681 * started the timer and wait until the PIT count down reaches 682 * zero. In each wait loop iteration we read the TSC and check 683 * the delta to the previous read. We keep track of the min 684 * and max values of that delta. The delta is mostly defined 685 * by the IO time of the PIT access, so we can detect when a 686 * SMI/SMM disturbance happened between the two reads. If the 687 * maximum time is significantly larger than the minimum time, 688 * then we discard the result and have another try. 689 * 690 * 2) Reference counter. If available we use the HPET or the 691 * PMTIMER as a reference to check the sanity of that value. 692 * We use separate TSC readouts and check inside of the 693 * reference read for a SMI/SMM disturbance. We dicard 694 * disturbed values here as well. We do that around the PIT 695 * calibration delay loop as we have to wait for a certain 696 * amount of time anyway. 697 */ 698 699 /* Preset PIT loop values */ 700 latch = CAL_LATCH; 701 ms = CAL_MS; 702 loopmin = CAL_PIT_LOOPS; 703 704 for (i = 0; i < 3; i++) { 705 unsigned long tsc_pit_khz; 706 707 /* 708 * Read the start value and the reference count of 709 * hpet/pmtimer when available. Then do the PIT 710 * calibration, which will take at least 50ms, and 711 * read the end value. 712 */ 713 local_irq_save(flags); 714 tsc1 = tsc_read_refs(&ref1, hpet); 715 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); 716 tsc2 = tsc_read_refs(&ref2, hpet); 717 local_irq_restore(flags); 718 719 /* Pick the lowest PIT TSC calibration so far */ 720 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 721 722 /* hpet or pmtimer available ? */ 723 if (ref1 == ref2) 724 continue; 725 726 /* Check, whether the sampling was disturbed by an SMI */ 727 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) 728 continue; 729 730 tsc2 = (tsc2 - tsc1) * 1000000LL; 731 if (hpet) 732 tsc2 = calc_hpet_ref(tsc2, ref1, ref2); 733 else 734 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); 735 736 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); 737 738 /* Check the reference deviation */ 739 delta = ((u64) tsc_pit_min) * 100; 740 do_div(delta, tsc_ref_min); 741 742 /* 743 * If both calibration results are inside a 10% window 744 * then we can be sure, that the calibration 745 * succeeded. We break out of the loop right away. We 746 * use the reference value, as it is more precise. 747 */ 748 if (delta >= 90 && delta <= 110) { 749 pr_info("PIT calibration matches %s. %d loops\n", 750 hpet ? "HPET" : "PMTIMER", i + 1); 751 return tsc_ref_min; 752 } 753 754 /* 755 * Check whether PIT failed more than once. This 756 * happens in virtualized environments. We need to 757 * give the virtual PC a slightly longer timeframe for 758 * the HPET/PMTIMER to make the result precise. 759 */ 760 if (i == 1 && tsc_pit_min == ULONG_MAX) { 761 latch = CAL2_LATCH; 762 ms = CAL2_MS; 763 loopmin = CAL2_PIT_LOOPS; 764 } 765 } 766 767 /* 768 * Now check the results. 769 */ 770 if (tsc_pit_min == ULONG_MAX) { 771 /* PIT gave no useful value */ 772 pr_warn("Unable to calibrate against PIT\n"); 773 774 /* We don't have an alternative source, disable TSC */ 775 if (!hpet && !ref1 && !ref2) { 776 pr_notice("No reference (HPET/PMTIMER) available\n"); 777 return 0; 778 } 779 780 /* The alternative source failed as well, disable TSC */ 781 if (tsc_ref_min == ULONG_MAX) { 782 pr_warn("HPET/PMTIMER calibration failed\n"); 783 return 0; 784 } 785 786 /* Use the alternative source */ 787 pr_info("using %s reference calibration\n", 788 hpet ? "HPET" : "PMTIMER"); 789 790 return tsc_ref_min; 791 } 792 793 /* We don't have an alternative source, use the PIT calibration value */ 794 if (!hpet && !ref1 && !ref2) { 795 pr_info("Using PIT calibration value\n"); 796 return tsc_pit_min; 797 } 798 799 /* The alternative source failed, use the PIT calibration value */ 800 if (tsc_ref_min == ULONG_MAX) { 801 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); 802 return tsc_pit_min; 803 } 804 805 /* 806 * The calibration values differ too much. In doubt, we use 807 * the PIT value as we know that there are PMTIMERs around 808 * running at double speed. At least we let the user know: 809 */ 810 pr_warn("PIT calibration deviates from %s: %lu %lu\n", 811 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); 812 pr_info("Using PIT calibration value\n"); 813 return tsc_pit_min; 814 } 815 816 int recalibrate_cpu_khz(void) 817 { 818 #ifndef CONFIG_SMP 819 unsigned long cpu_khz_old = cpu_khz; 820 821 if (cpu_has_tsc) { 822 tsc_khz = x86_platform.calibrate_tsc(); 823 cpu_khz = tsc_khz; 824 cpu_data(0).loops_per_jiffy = 825 cpufreq_scale(cpu_data(0).loops_per_jiffy, 826 cpu_khz_old, cpu_khz); 827 return 0; 828 } else 829 return -ENODEV; 830 #else 831 return -ENODEV; 832 #endif 833 } 834 835 EXPORT_SYMBOL(recalibrate_cpu_khz); 836 837 838 static unsigned long long cyc2ns_suspend; 839 840 void tsc_save_sched_clock_state(void) 841 { 842 if (!sched_clock_stable()) 843 return; 844 845 cyc2ns_suspend = sched_clock(); 846 } 847 848 /* 849 * Even on processors with invariant TSC, TSC gets reset in some the 850 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to 851 * arbitrary value (still sync'd across cpu's) during resume from such sleep 852 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so 853 * that sched_clock() continues from the point where it was left off during 854 * suspend. 855 */ 856 void tsc_restore_sched_clock_state(void) 857 { 858 unsigned long long offset; 859 unsigned long flags; 860 int cpu; 861 862 if (!sched_clock_stable()) 863 return; 864 865 local_irq_save(flags); 866 867 /* 868 * We're comming out of suspend, there's no concurrency yet; don't 869 * bother being nice about the RCU stuff, just write to both 870 * data fields. 871 */ 872 873 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); 874 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); 875 876 offset = cyc2ns_suspend - sched_clock(); 877 878 for_each_possible_cpu(cpu) { 879 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; 880 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; 881 } 882 883 local_irq_restore(flags); 884 } 885 886 #ifdef CONFIG_CPU_FREQ 887 888 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency 889 * changes. 890 * 891 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's 892 * not that important because current Opteron setups do not support 893 * scaling on SMP anyroads. 894 * 895 * Should fix up last_tsc too. Currently gettimeofday in the 896 * first tick after the change will be slightly wrong. 897 */ 898 899 static unsigned int ref_freq; 900 static unsigned long loops_per_jiffy_ref; 901 static unsigned long tsc_khz_ref; 902 903 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, 904 void *data) 905 { 906 struct cpufreq_freqs *freq = data; 907 unsigned long *lpj; 908 909 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) 910 return 0; 911 912 lpj = &boot_cpu_data.loops_per_jiffy; 913 #ifdef CONFIG_SMP 914 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 915 lpj = &cpu_data(freq->cpu).loops_per_jiffy; 916 #endif 917 918 if (!ref_freq) { 919 ref_freq = freq->old; 920 loops_per_jiffy_ref = *lpj; 921 tsc_khz_ref = tsc_khz; 922 } 923 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || 924 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { 925 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); 926 927 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 928 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 929 mark_tsc_unstable("cpufreq changes"); 930 931 set_cyc2ns_scale(tsc_khz, freq->cpu); 932 } 933 934 return 0; 935 } 936 937 static struct notifier_block time_cpufreq_notifier_block = { 938 .notifier_call = time_cpufreq_notifier 939 }; 940 941 static int __init cpufreq_tsc(void) 942 { 943 if (!cpu_has_tsc) 944 return 0; 945 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 946 return 0; 947 cpufreq_register_notifier(&time_cpufreq_notifier_block, 948 CPUFREQ_TRANSITION_NOTIFIER); 949 return 0; 950 } 951 952 core_initcall(cpufreq_tsc); 953 954 #endif /* CONFIG_CPU_FREQ */ 955 956 /* clocksource code */ 957 958 static struct clocksource clocksource_tsc; 959 960 /* 961 * We used to compare the TSC to the cycle_last value in the clocksource 962 * structure to avoid a nasty time-warp. This can be observed in a 963 * very small window right after one CPU updated cycle_last under 964 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which 965 * is smaller than the cycle_last reference value due to a TSC which 966 * is slighty behind. This delta is nowhere else observable, but in 967 * that case it results in a forward time jump in the range of hours 968 * due to the unsigned delta calculation of the time keeping core 969 * code, which is necessary to support wrapping clocksources like pm 970 * timer. 971 * 972 * This sanity check is now done in the core timekeeping code. 973 * checking the result of read_tsc() - cycle_last for being negative. 974 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. 975 */ 976 static cycle_t read_tsc(struct clocksource *cs) 977 { 978 return (cycle_t)rdtsc_ordered(); 979 } 980 981 /* 982 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() 983 */ 984 static struct clocksource clocksource_tsc = { 985 .name = "tsc", 986 .rating = 300, 987 .read = read_tsc, 988 .mask = CLOCKSOURCE_MASK(64), 989 .flags = CLOCK_SOURCE_IS_CONTINUOUS | 990 CLOCK_SOURCE_MUST_VERIFY, 991 .archdata = { .vclock_mode = VCLOCK_TSC }, 992 }; 993 994 void mark_tsc_unstable(char *reason) 995 { 996 if (!tsc_unstable) { 997 tsc_unstable = 1; 998 clear_sched_clock_stable(); 999 disable_sched_clock_irqtime(); 1000 pr_info("Marking TSC unstable due to %s\n", reason); 1001 /* Change only the rating, when not registered */ 1002 if (clocksource_tsc.mult) 1003 clocksource_mark_unstable(&clocksource_tsc); 1004 else { 1005 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; 1006 clocksource_tsc.rating = 0; 1007 } 1008 } 1009 } 1010 1011 EXPORT_SYMBOL_GPL(mark_tsc_unstable); 1012 1013 static void __init check_system_tsc_reliable(void) 1014 { 1015 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) 1016 if (is_geode_lx()) { 1017 /* RTSC counts during suspend */ 1018 #define RTSC_SUSP 0x100 1019 unsigned long res_low, res_high; 1020 1021 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); 1022 /* Geode_LX - the OLPC CPU has a very reliable TSC */ 1023 if (res_low & RTSC_SUSP) 1024 tsc_clocksource_reliable = 1; 1025 } 1026 #endif 1027 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) 1028 tsc_clocksource_reliable = 1; 1029 } 1030 1031 /* 1032 * Make an educated guess if the TSC is trustworthy and synchronized 1033 * over all CPUs. 1034 */ 1035 int unsynchronized_tsc(void) 1036 { 1037 if (!cpu_has_tsc || tsc_unstable) 1038 return 1; 1039 1040 #ifdef CONFIG_SMP 1041 if (apic_is_clustered_box()) 1042 return 1; 1043 #endif 1044 1045 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1046 return 0; 1047 1048 if (tsc_clocksource_reliable) 1049 return 0; 1050 /* 1051 * Intel systems are normally all synchronized. 1052 * Exceptions must mark TSC as unstable: 1053 */ 1054 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1055 /* assume multi socket systems are not synchronized: */ 1056 if (num_possible_cpus() > 1) 1057 return 1; 1058 } 1059 1060 return 0; 1061 } 1062 1063 1064 static void tsc_refine_calibration_work(struct work_struct *work); 1065 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); 1066 /** 1067 * tsc_refine_calibration_work - Further refine tsc freq calibration 1068 * @work - ignored. 1069 * 1070 * This functions uses delayed work over a period of a 1071 * second to further refine the TSC freq value. Since this is 1072 * timer based, instead of loop based, we don't block the boot 1073 * process while this longer calibration is done. 1074 * 1075 * If there are any calibration anomalies (too many SMIs, etc), 1076 * or the refined calibration is off by 1% of the fast early 1077 * calibration, we throw out the new calibration and use the 1078 * early calibration. 1079 */ 1080 static void tsc_refine_calibration_work(struct work_struct *work) 1081 { 1082 static u64 tsc_start = -1, ref_start; 1083 static int hpet; 1084 u64 tsc_stop, ref_stop, delta; 1085 unsigned long freq; 1086 1087 /* Don't bother refining TSC on unstable systems */ 1088 if (check_tsc_unstable()) 1089 goto out; 1090 1091 /* 1092 * Since the work is started early in boot, we may be 1093 * delayed the first time we expire. So set the workqueue 1094 * again once we know timers are working. 1095 */ 1096 if (tsc_start == -1) { 1097 /* 1098 * Only set hpet once, to avoid mixing hardware 1099 * if the hpet becomes enabled later. 1100 */ 1101 hpet = is_hpet_enabled(); 1102 schedule_delayed_work(&tsc_irqwork, HZ); 1103 tsc_start = tsc_read_refs(&ref_start, hpet); 1104 return; 1105 } 1106 1107 tsc_stop = tsc_read_refs(&ref_stop, hpet); 1108 1109 /* hpet or pmtimer available ? */ 1110 if (ref_start == ref_stop) 1111 goto out; 1112 1113 /* Check, whether the sampling was disturbed by an SMI */ 1114 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) 1115 goto out; 1116 1117 delta = tsc_stop - tsc_start; 1118 delta *= 1000000LL; 1119 if (hpet) 1120 freq = calc_hpet_ref(delta, ref_start, ref_stop); 1121 else 1122 freq = calc_pmtimer_ref(delta, ref_start, ref_stop); 1123 1124 /* Make sure we're within 1% */ 1125 if (abs(tsc_khz - freq) > tsc_khz/100) 1126 goto out; 1127 1128 tsc_khz = freq; 1129 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", 1130 (unsigned long)tsc_khz / 1000, 1131 (unsigned long)tsc_khz % 1000); 1132 1133 out: 1134 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1135 } 1136 1137 1138 static int __init init_tsc_clocksource(void) 1139 { 1140 if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz) 1141 return 0; 1142 1143 if (tsc_clocksource_reliable) 1144 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; 1145 /* lower the rating if we already know its unstable: */ 1146 if (check_tsc_unstable()) { 1147 clocksource_tsc.rating = 0; 1148 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 1149 } 1150 1151 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) 1152 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; 1153 1154 /* 1155 * Trust the results of the earlier calibration on systems 1156 * exporting a reliable TSC. 1157 */ 1158 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { 1159 clocksource_register_khz(&clocksource_tsc, tsc_khz); 1160 return 0; 1161 } 1162 1163 schedule_delayed_work(&tsc_irqwork, 0); 1164 return 0; 1165 } 1166 /* 1167 * We use device_initcall here, to ensure we run after the hpet 1168 * is fully initialized, which may occur at fs_initcall time. 1169 */ 1170 device_initcall(init_tsc_clocksource); 1171 1172 void __init tsc_init(void) 1173 { 1174 u64 lpj; 1175 int cpu; 1176 1177 x86_init.timers.tsc_pre_init(); 1178 1179 if (!cpu_has_tsc) { 1180 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); 1181 return; 1182 } 1183 1184 tsc_khz = x86_platform.calibrate_tsc(); 1185 cpu_khz = tsc_khz; 1186 1187 if (!tsc_khz) { 1188 mark_tsc_unstable("could not calculate TSC khz"); 1189 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); 1190 return; 1191 } 1192 1193 pr_info("Detected %lu.%03lu MHz processor\n", 1194 (unsigned long)cpu_khz / 1000, 1195 (unsigned long)cpu_khz % 1000); 1196 1197 /* 1198 * Secondary CPUs do not run through tsc_init(), so set up 1199 * all the scale factors for all CPUs, assuming the same 1200 * speed as the bootup CPU. (cpufreq notifiers will fix this 1201 * up if their speed diverges) 1202 */ 1203 for_each_possible_cpu(cpu) { 1204 cyc2ns_init(cpu); 1205 set_cyc2ns_scale(cpu_khz, cpu); 1206 } 1207 1208 if (tsc_disabled > 0) 1209 return; 1210 1211 /* now allow native_sched_clock() to use rdtsc */ 1212 1213 tsc_disabled = 0; 1214 static_branch_enable(&__use_tsc); 1215 1216 if (!no_sched_irq_time) 1217 enable_sched_clock_irqtime(); 1218 1219 lpj = ((u64)tsc_khz * 1000); 1220 do_div(lpj, HZ); 1221 lpj_fine = lpj; 1222 1223 use_tsc_delay(); 1224 1225 if (unsynchronized_tsc()) 1226 mark_tsc_unstable("TSCs unsynchronized"); 1227 1228 check_system_tsc_reliable(); 1229 } 1230 1231 #ifdef CONFIG_SMP 1232 /* 1233 * If we have a constant TSC and are using the TSC for the delay loop, 1234 * we can skip clock calibration if another cpu in the same socket has already 1235 * been calibrated. This assumes that CONSTANT_TSC applies to all 1236 * cpus in the socket - this should be a safe assumption. 1237 */ 1238 unsigned long calibrate_delay_is_known(void) 1239 { 1240 int i, cpu = smp_processor_id(); 1241 1242 if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) 1243 return 0; 1244 1245 for_each_online_cpu(i) 1246 if (cpu_data(i).phys_proc_id == cpu_data(cpu).phys_proc_id) 1247 return cpu_data(i).loops_per_jiffy; 1248 return 0; 1249 } 1250 #endif 1251