1 /* 2 * Common time routines among all ppc machines. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 5 * Paul Mackerras' version and mine for PReP and Pmac. 6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). 7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) 8 * 9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es) 10 * to make clock more stable (2.4.0-test5). The only thing 11 * that this code assumes is that the timebases have been synchronized 12 * by firmware on SMP and are never stopped (never do sleep 13 * on SMP then, nap and doze are OK). 14 * 15 * Speeded up do_gettimeofday by getting rid of references to 16 * xtime (which required locks for consistency). (mikejc@us.ibm.com) 17 * 18 * TODO (not necessarily in this file): 19 * - improve precision and reproducibility of timebase frequency 20 * measurement at boot time. (for iSeries, we calibrate the timebase 21 * against the Titan chip's clock.) 22 * - for astronomical applications: add a new function to get 23 * non ambiguous timestamps even around leap seconds. This needs 24 * a new timestamp format and a good name. 25 * 26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 27 * "A Kernel Model for Precision Timekeeping" by Dave Mills 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 #include <linux/errno.h> 36 #include <linux/module.h> 37 #include <linux/sched.h> 38 #include <linux/kernel.h> 39 #include <linux/param.h> 40 #include <linux/string.h> 41 #include <linux/mm.h> 42 #include <linux/interrupt.h> 43 #include <linux/timex.h> 44 #include <linux/kernel_stat.h> 45 #include <linux/time.h> 46 #include <linux/init.h> 47 #include <linux/profile.h> 48 #include <linux/cpu.h> 49 #include <linux/security.h> 50 #include <linux/percpu.h> 51 #include <linux/rtc.h> 52 #include <linux/jiffies.h> 53 #include <linux/posix-timers.h> 54 #include <linux/irq.h> 55 #include <linux/delay.h> 56 57 #include <asm/io.h> 58 #include <asm/processor.h> 59 #include <asm/nvram.h> 60 #include <asm/cache.h> 61 #include <asm/machdep.h> 62 #include <asm/uaccess.h> 63 #include <asm/time.h> 64 #include <asm/prom.h> 65 #include <asm/irq.h> 66 #include <asm/div64.h> 67 #include <asm/smp.h> 68 #include <asm/vdso_datapage.h> 69 #include <asm/firmware.h> 70 #include <asm/cputime.h> 71 #ifdef CONFIG_PPC_ISERIES 72 #include <asm/iseries/it_lp_queue.h> 73 #include <asm/iseries/hv_call_xm.h> 74 #endif 75 76 /* powerpc clocksource/clockevent code */ 77 78 #include <linux/clockchips.h> 79 #include <linux/clocksource.h> 80 81 static cycle_t rtc_read(struct clocksource *); 82 static struct clocksource clocksource_rtc = { 83 .name = "rtc", 84 .rating = 400, 85 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 86 .mask = CLOCKSOURCE_MASK(64), 87 .shift = 22, 88 .mult = 0, /* To be filled in */ 89 .read = rtc_read, 90 }; 91 92 static cycle_t timebase_read(struct clocksource *); 93 static struct clocksource clocksource_timebase = { 94 .name = "timebase", 95 .rating = 400, 96 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 97 .mask = CLOCKSOURCE_MASK(64), 98 .shift = 22, 99 .mult = 0, /* To be filled in */ 100 .read = timebase_read, 101 }; 102 103 #define DECREMENTER_MAX 0x7fffffff 104 105 static int decrementer_set_next_event(unsigned long evt, 106 struct clock_event_device *dev); 107 static void decrementer_set_mode(enum clock_event_mode mode, 108 struct clock_event_device *dev); 109 110 static struct clock_event_device decrementer_clockevent = { 111 .name = "decrementer", 112 .rating = 200, 113 .shift = 0, /* To be filled in */ 114 .mult = 0, /* To be filled in */ 115 .irq = 0, 116 .set_next_event = decrementer_set_next_event, 117 .set_mode = decrementer_set_mode, 118 .features = CLOCK_EVT_FEAT_ONESHOT, 119 }; 120 121 struct decrementer_clock { 122 struct clock_event_device event; 123 u64 next_tb; 124 }; 125 126 static DEFINE_PER_CPU(struct decrementer_clock, decrementers); 127 128 #ifdef CONFIG_PPC_ISERIES 129 static unsigned long __initdata iSeries_recal_titan; 130 static signed long __initdata iSeries_recal_tb; 131 132 /* Forward declaration is only needed for iSereis compiles */ 133 static void __init clocksource_init(void); 134 #endif 135 136 #define XSEC_PER_SEC (1024*1024) 137 138 #ifdef CONFIG_PPC64 139 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC) 140 #else 141 /* compute ((xsec << 12) * max) >> 32 */ 142 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max) 143 #endif 144 145 unsigned long tb_ticks_per_jiffy; 146 unsigned long tb_ticks_per_usec = 100; /* sane default */ 147 EXPORT_SYMBOL(tb_ticks_per_usec); 148 unsigned long tb_ticks_per_sec; 149 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ 150 u64 tb_to_xs; 151 unsigned tb_to_us; 152 153 #define TICKLEN_SCALE NTP_SCALE_SHIFT 154 static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ 155 static u64 ticklen_to_xs; /* 0.64 fraction */ 156 157 /* If last_tick_len corresponds to about 1/HZ seconds, then 158 last_tick_len << TICKLEN_SHIFT will be about 2^63. */ 159 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) 160 161 DEFINE_SPINLOCK(rtc_lock); 162 EXPORT_SYMBOL_GPL(rtc_lock); 163 164 static u64 tb_to_ns_scale __read_mostly; 165 static unsigned tb_to_ns_shift __read_mostly; 166 static unsigned long boot_tb __read_mostly; 167 168 extern struct timezone sys_tz; 169 static long timezone_offset; 170 171 unsigned long ppc_proc_freq; 172 EXPORT_SYMBOL(ppc_proc_freq); 173 unsigned long ppc_tb_freq; 174 175 static u64 tb_last_jiffy __cacheline_aligned_in_smp; 176 static DEFINE_PER_CPU(u64, last_jiffy); 177 178 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 179 /* 180 * Factors for converting from cputime_t (timebase ticks) to 181 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). 182 * These are all stored as 0.64 fixed-point binary fractions. 183 */ 184 u64 __cputime_jiffies_factor; 185 EXPORT_SYMBOL(__cputime_jiffies_factor); 186 u64 __cputime_msec_factor; 187 EXPORT_SYMBOL(__cputime_msec_factor); 188 u64 __cputime_sec_factor; 189 EXPORT_SYMBOL(__cputime_sec_factor); 190 u64 __cputime_clockt_factor; 191 EXPORT_SYMBOL(__cputime_clockt_factor); 192 DEFINE_PER_CPU(unsigned long, cputime_last_delta); 193 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); 194 195 static void calc_cputime_factors(void) 196 { 197 struct div_result res; 198 199 div128_by_32(HZ, 0, tb_ticks_per_sec, &res); 200 __cputime_jiffies_factor = res.result_low; 201 div128_by_32(1000, 0, tb_ticks_per_sec, &res); 202 __cputime_msec_factor = res.result_low; 203 div128_by_32(1, 0, tb_ticks_per_sec, &res); 204 __cputime_sec_factor = res.result_low; 205 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); 206 __cputime_clockt_factor = res.result_low; 207 } 208 209 /* 210 * Read the PURR on systems that have it, otherwise the timebase. 211 */ 212 static u64 read_purr(void) 213 { 214 if (cpu_has_feature(CPU_FTR_PURR)) 215 return mfspr(SPRN_PURR); 216 return mftb(); 217 } 218 219 /* 220 * Read the SPURR on systems that have it, otherwise the purr 221 */ 222 static u64 read_spurr(u64 purr) 223 { 224 /* 225 * cpus without PURR won't have a SPURR 226 * We already know the former when we use this, so tell gcc 227 */ 228 if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) 229 return mfspr(SPRN_SPURR); 230 return purr; 231 } 232 233 /* 234 * Account time for a transition between system, hard irq 235 * or soft irq state. 236 */ 237 void account_system_vtime(struct task_struct *tsk) 238 { 239 u64 now, nowscaled, delta, deltascaled, sys_time; 240 unsigned long flags; 241 242 local_irq_save(flags); 243 now = read_purr(); 244 nowscaled = read_spurr(now); 245 delta = now - get_paca()->startpurr; 246 deltascaled = nowscaled - get_paca()->startspurr; 247 get_paca()->startpurr = now; 248 get_paca()->startspurr = nowscaled; 249 if (!in_interrupt()) { 250 /* deltascaled includes both user and system time. 251 * Hence scale it based on the purr ratio to estimate 252 * the system time */ 253 sys_time = get_paca()->system_time; 254 if (get_paca()->user_time) 255 deltascaled = deltascaled * sys_time / 256 (sys_time + get_paca()->user_time); 257 delta += sys_time; 258 get_paca()->system_time = 0; 259 } 260 if (in_irq() || idle_task(smp_processor_id()) != tsk) 261 account_system_time(tsk, 0, delta, deltascaled); 262 else 263 account_idle_time(delta); 264 per_cpu(cputime_last_delta, smp_processor_id()) = delta; 265 per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; 266 local_irq_restore(flags); 267 } 268 269 /* 270 * Transfer the user and system times accumulated in the paca 271 * by the exception entry and exit code to the generic process 272 * user and system time records. 273 * Must be called with interrupts disabled. 274 */ 275 void account_process_tick(struct task_struct *tsk, int user_tick) 276 { 277 cputime_t utime, utimescaled; 278 279 utime = get_paca()->user_time; 280 get_paca()->user_time = 0; 281 utimescaled = cputime_to_scaled(utime); 282 account_user_time(tsk, utime, utimescaled); 283 } 284 285 /* 286 * Stuff for accounting stolen time. 287 */ 288 struct cpu_purr_data { 289 int initialized; /* thread is running */ 290 u64 tb; /* last TB value read */ 291 u64 purr; /* last PURR value read */ 292 u64 spurr; /* last SPURR value read */ 293 }; 294 295 /* 296 * Each entry in the cpu_purr_data array is manipulated only by its 297 * "owner" cpu -- usually in the timer interrupt but also occasionally 298 * in process context for cpu online. As long as cpus do not touch 299 * each others' cpu_purr_data, disabling local interrupts is 300 * sufficient to serialize accesses. 301 */ 302 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); 303 304 static void snapshot_tb_and_purr(void *data) 305 { 306 unsigned long flags; 307 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); 308 309 local_irq_save(flags); 310 p->tb = get_tb_or_rtc(); 311 p->purr = mfspr(SPRN_PURR); 312 wmb(); 313 p->initialized = 1; 314 local_irq_restore(flags); 315 } 316 317 /* 318 * Called during boot when all cpus have come up. 319 */ 320 void snapshot_timebases(void) 321 { 322 if (!cpu_has_feature(CPU_FTR_PURR)) 323 return; 324 on_each_cpu(snapshot_tb_and_purr, NULL, 1); 325 } 326 327 /* 328 * Must be called with interrupts disabled. 329 */ 330 void calculate_steal_time(void) 331 { 332 u64 tb, purr; 333 s64 stolen; 334 struct cpu_purr_data *pme; 335 336 pme = &__get_cpu_var(cpu_purr_data); 337 if (!pme->initialized) 338 return; /* !CPU_FTR_PURR or early in early boot */ 339 tb = mftb(); 340 purr = mfspr(SPRN_PURR); 341 stolen = (tb - pme->tb) - (purr - pme->purr); 342 if (stolen > 0) { 343 if (idle_task(smp_processor_id()) != current) 344 account_steal_time(stolen); 345 else 346 account_idle_time(stolen); 347 } 348 pme->tb = tb; 349 pme->purr = purr; 350 } 351 352 #ifdef CONFIG_PPC_SPLPAR 353 /* 354 * Must be called before the cpu is added to the online map when 355 * a cpu is being brought up at runtime. 356 */ 357 static void snapshot_purr(void) 358 { 359 struct cpu_purr_data *pme; 360 unsigned long flags; 361 362 if (!cpu_has_feature(CPU_FTR_PURR)) 363 return; 364 local_irq_save(flags); 365 pme = &__get_cpu_var(cpu_purr_data); 366 pme->tb = mftb(); 367 pme->purr = mfspr(SPRN_PURR); 368 pme->initialized = 1; 369 local_irq_restore(flags); 370 } 371 372 #endif /* CONFIG_PPC_SPLPAR */ 373 374 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 375 #define calc_cputime_factors() 376 #define calculate_steal_time() do { } while (0) 377 #endif 378 379 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) 380 #define snapshot_purr() do { } while (0) 381 #endif 382 383 /* 384 * Called when a cpu comes up after the system has finished booting, 385 * i.e. as a result of a hotplug cpu action. 386 */ 387 void snapshot_timebase(void) 388 { 389 __get_cpu_var(last_jiffy) = get_tb_or_rtc(); 390 snapshot_purr(); 391 } 392 393 void __delay(unsigned long loops) 394 { 395 unsigned long start; 396 int diff; 397 398 if (__USE_RTC()) { 399 start = get_rtcl(); 400 do { 401 /* the RTCL register wraps at 1000000000 */ 402 diff = get_rtcl() - start; 403 if (diff < 0) 404 diff += 1000000000; 405 } while (diff < loops); 406 } else { 407 start = get_tbl(); 408 while (get_tbl() - start < loops) 409 HMT_low(); 410 HMT_medium(); 411 } 412 } 413 EXPORT_SYMBOL(__delay); 414 415 void udelay(unsigned long usecs) 416 { 417 __delay(tb_ticks_per_usec * usecs); 418 } 419 EXPORT_SYMBOL(udelay); 420 421 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, 422 u64 new_tb_to_xs) 423 { 424 /* 425 * tb_update_count is used to allow the userspace gettimeofday code 426 * to assure itself that it sees a consistent view of the tb_to_xs and 427 * stamp_xsec variables. It reads the tb_update_count, then reads 428 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If 429 * the two values of tb_update_count match and are even then the 430 * tb_to_xs and stamp_xsec values are consistent. If not, then it 431 * loops back and reads them again until this criteria is met. 432 * We expect the caller to have done the first increment of 433 * vdso_data->tb_update_count already. 434 */ 435 vdso_data->tb_orig_stamp = new_tb_stamp; 436 vdso_data->stamp_xsec = new_stamp_xsec; 437 vdso_data->tb_to_xs = new_tb_to_xs; 438 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 439 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 440 vdso_data->stamp_xtime = xtime; 441 smp_wmb(); 442 ++(vdso_data->tb_update_count); 443 } 444 445 #ifdef CONFIG_SMP 446 unsigned long profile_pc(struct pt_regs *regs) 447 { 448 unsigned long pc = instruction_pointer(regs); 449 450 if (in_lock_functions(pc)) 451 return regs->link; 452 453 return pc; 454 } 455 EXPORT_SYMBOL(profile_pc); 456 #endif 457 458 #ifdef CONFIG_PPC_ISERIES 459 460 /* 461 * This function recalibrates the timebase based on the 49-bit time-of-day 462 * value in the Titan chip. The Titan is much more accurate than the value 463 * returned by the service processor for the timebase frequency. 464 */ 465 466 static int __init iSeries_tb_recal(void) 467 { 468 struct div_result divres; 469 unsigned long titan, tb; 470 471 /* Make sure we only run on iSeries */ 472 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 473 return -ENODEV; 474 475 tb = get_tb(); 476 titan = HvCallXm_loadTod(); 477 if ( iSeries_recal_titan ) { 478 unsigned long tb_ticks = tb - iSeries_recal_tb; 479 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; 480 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; 481 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ; 482 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; 483 char sign = '+'; 484 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ 485 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; 486 487 if ( tick_diff < 0 ) { 488 tick_diff = -tick_diff; 489 sign = '-'; 490 } 491 if ( tick_diff ) { 492 if ( tick_diff < tb_ticks_per_jiffy/25 ) { 493 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", 494 new_tb_ticks_per_jiffy, sign, tick_diff ); 495 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; 496 tb_ticks_per_sec = new_tb_ticks_per_sec; 497 calc_cputime_factors(); 498 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 499 tb_to_xs = divres.result_low; 500 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 501 vdso_data->tb_to_xs = tb_to_xs; 502 } 503 else { 504 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 505 " new tb_ticks_per_jiffy = %lu\n" 506 " old tb_ticks_per_jiffy = %lu\n", 507 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); 508 } 509 } 510 } 511 iSeries_recal_titan = titan; 512 iSeries_recal_tb = tb; 513 514 /* Called here as now we know accurate values for the timebase */ 515 clocksource_init(); 516 return 0; 517 } 518 late_initcall(iSeries_tb_recal); 519 520 /* Called from platform early init */ 521 void __init iSeries_time_init_early(void) 522 { 523 iSeries_recal_tb = get_tb(); 524 iSeries_recal_titan = HvCallXm_loadTod(); 525 } 526 #endif /* CONFIG_PPC_ISERIES */ 527 528 /* 529 * For iSeries shared processors, we have to let the hypervisor 530 * set the hardware decrementer. We set a virtual decrementer 531 * in the lppaca and call the hypervisor if the virtual 532 * decrementer is less than the current value in the hardware 533 * decrementer. (almost always the new decrementer value will 534 * be greater than the current hardware decementer so the hypervisor 535 * call will not be needed) 536 */ 537 538 /* 539 * timer_interrupt - gets called when the decrementer overflows, 540 * with interrupts disabled. 541 */ 542 void timer_interrupt(struct pt_regs * regs) 543 { 544 struct pt_regs *old_regs; 545 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); 546 struct clock_event_device *evt = &decrementer->event; 547 u64 now; 548 549 /* Ensure a positive value is written to the decrementer, or else 550 * some CPUs will continuue to take decrementer exceptions */ 551 set_dec(DECREMENTER_MAX); 552 553 #ifdef CONFIG_PPC32 554 if (atomic_read(&ppc_n_lost_interrupts) != 0) 555 do_IRQ(regs); 556 #endif 557 558 now = get_tb_or_rtc(); 559 if (now < decrementer->next_tb) { 560 /* not time for this event yet */ 561 now = decrementer->next_tb - now; 562 if (now <= DECREMENTER_MAX) 563 set_dec((int)now); 564 return; 565 } 566 old_regs = set_irq_regs(regs); 567 irq_enter(); 568 569 calculate_steal_time(); 570 571 #ifdef CONFIG_PPC_ISERIES 572 if (firmware_has_feature(FW_FEATURE_ISERIES)) 573 get_lppaca()->int_dword.fields.decr_int = 0; 574 #endif 575 576 if (evt->event_handler) 577 evt->event_handler(evt); 578 579 #ifdef CONFIG_PPC_ISERIES 580 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 581 process_hvlpevents(); 582 #endif 583 584 #ifdef CONFIG_PPC64 585 /* collect purr register values often, for accurate calculations */ 586 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 587 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 588 cu->current_tb = mfspr(SPRN_PURR); 589 } 590 #endif 591 592 irq_exit(); 593 set_irq_regs(old_regs); 594 } 595 596 void wakeup_decrementer(void) 597 { 598 unsigned long ticks; 599 600 /* 601 * The timebase gets saved on sleep and restored on wakeup, 602 * so all we need to do is to reset the decrementer. 603 */ 604 ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); 605 if (ticks < tb_ticks_per_jiffy) 606 ticks = tb_ticks_per_jiffy - ticks; 607 else 608 ticks = 1; 609 set_dec(ticks); 610 } 611 612 #ifdef CONFIG_SUSPEND 613 void generic_suspend_disable_irqs(void) 614 { 615 preempt_disable(); 616 617 /* Disable the decrementer, so that it doesn't interfere 618 * with suspending. 619 */ 620 621 set_dec(0x7fffffff); 622 local_irq_disable(); 623 set_dec(0x7fffffff); 624 } 625 626 void generic_suspend_enable_irqs(void) 627 { 628 wakeup_decrementer(); 629 630 local_irq_enable(); 631 preempt_enable(); 632 } 633 634 /* Overrides the weak version in kernel/power/main.c */ 635 void arch_suspend_disable_irqs(void) 636 { 637 if (ppc_md.suspend_disable_irqs) 638 ppc_md.suspend_disable_irqs(); 639 generic_suspend_disable_irqs(); 640 } 641 642 /* Overrides the weak version in kernel/power/main.c */ 643 void arch_suspend_enable_irqs(void) 644 { 645 generic_suspend_enable_irqs(); 646 if (ppc_md.suspend_enable_irqs) 647 ppc_md.suspend_enable_irqs(); 648 } 649 #endif 650 651 #ifdef CONFIG_SMP 652 void __init smp_space_timers(unsigned int max_cpus) 653 { 654 int i; 655 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); 656 657 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ 658 previous_tb -= tb_ticks_per_jiffy; 659 660 for_each_possible_cpu(i) { 661 if (i == boot_cpuid) 662 continue; 663 per_cpu(last_jiffy, i) = previous_tb; 664 } 665 } 666 #endif 667 668 /* 669 * Scheduler clock - returns current time in nanosec units. 670 * 671 * Note: mulhdu(a, b) (multiply high double unsigned) returns 672 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b 673 * are 64-bit unsigned numbers. 674 */ 675 unsigned long long sched_clock(void) 676 { 677 if (__USE_RTC()) 678 return get_rtc(); 679 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; 680 } 681 682 static int __init get_freq(char *name, int cells, unsigned long *val) 683 { 684 struct device_node *cpu; 685 const unsigned int *fp; 686 int found = 0; 687 688 /* The cpu node should have timebase and clock frequency properties */ 689 cpu = of_find_node_by_type(NULL, "cpu"); 690 691 if (cpu) { 692 fp = of_get_property(cpu, name, NULL); 693 if (fp) { 694 found = 1; 695 *val = of_read_ulong(fp, cells); 696 } 697 698 of_node_put(cpu); 699 } 700 701 return found; 702 } 703 704 void __init generic_calibrate_decr(void) 705 { 706 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ 707 708 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && 709 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { 710 711 printk(KERN_ERR "WARNING: Estimating decrementer frequency " 712 "(not found)\n"); 713 } 714 715 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ 716 717 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && 718 !get_freq("clock-frequency", 1, &ppc_proc_freq)) { 719 720 printk(KERN_ERR "WARNING: Estimating processor frequency " 721 "(not found)\n"); 722 } 723 724 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 725 /* Clear any pending timer interrupts */ 726 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); 727 728 /* Enable decrementer interrupt */ 729 mtspr(SPRN_TCR, TCR_DIE); 730 #endif 731 } 732 733 int update_persistent_clock(struct timespec now) 734 { 735 struct rtc_time tm; 736 737 if (!ppc_md.set_rtc_time) 738 return 0; 739 740 to_tm(now.tv_sec + 1 + timezone_offset, &tm); 741 tm.tm_year -= 1900; 742 tm.tm_mon -= 1; 743 744 return ppc_md.set_rtc_time(&tm); 745 } 746 747 unsigned long read_persistent_clock(void) 748 { 749 struct rtc_time tm; 750 static int first = 1; 751 752 /* XXX this is a litle fragile but will work okay in the short term */ 753 if (first) { 754 first = 0; 755 if (ppc_md.time_init) 756 timezone_offset = ppc_md.time_init(); 757 758 /* get_boot_time() isn't guaranteed to be safe to call late */ 759 if (ppc_md.get_boot_time) 760 return ppc_md.get_boot_time() -timezone_offset; 761 } 762 if (!ppc_md.get_rtc_time) 763 return 0; 764 ppc_md.get_rtc_time(&tm); 765 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 766 tm.tm_hour, tm.tm_min, tm.tm_sec); 767 } 768 769 /* clocksource code */ 770 static cycle_t rtc_read(struct clocksource *cs) 771 { 772 return (cycle_t)get_rtc(); 773 } 774 775 static cycle_t timebase_read(struct clocksource *cs) 776 { 777 return (cycle_t)get_tb(); 778 } 779 780 void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) 781 { 782 u64 t2x, stamp_xsec; 783 784 if (clock != &clocksource_timebase) 785 return; 786 787 /* Make userspace gettimeofday spin until we're done. */ 788 ++vdso_data->tb_update_count; 789 smp_mb(); 790 791 /* XXX this assumes clock->shift == 22 */ 792 /* 4611686018 ~= 2^(20+64-22) / 1e9 */ 793 t2x = (u64) clock->mult * 4611686018ULL; 794 stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; 795 do_div(stamp_xsec, 1000000000); 796 stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; 797 update_gtod(clock->cycle_last, stamp_xsec, t2x); 798 } 799 800 void update_vsyscall_tz(void) 801 { 802 /* Make userspace gettimeofday spin until we're done. */ 803 ++vdso_data->tb_update_count; 804 smp_mb(); 805 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 806 vdso_data->tz_dsttime = sys_tz.tz_dsttime; 807 smp_mb(); 808 ++vdso_data->tb_update_count; 809 } 810 811 static void __init clocksource_init(void) 812 { 813 struct clocksource *clock; 814 815 if (__USE_RTC()) 816 clock = &clocksource_rtc; 817 else 818 clock = &clocksource_timebase; 819 820 clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift); 821 822 if (clocksource_register(clock)) { 823 printk(KERN_ERR "clocksource: %s is already registered\n", 824 clock->name); 825 return; 826 } 827 828 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n", 829 clock->name, clock->mult, clock->shift); 830 } 831 832 static int decrementer_set_next_event(unsigned long evt, 833 struct clock_event_device *dev) 834 { 835 __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt; 836 set_dec(evt); 837 return 0; 838 } 839 840 static void decrementer_set_mode(enum clock_event_mode mode, 841 struct clock_event_device *dev) 842 { 843 if (mode != CLOCK_EVT_MODE_ONESHOT) 844 decrementer_set_next_event(DECREMENTER_MAX, dev); 845 } 846 847 static void __init setup_clockevent_multiplier(unsigned long hz) 848 { 849 u64 mult, shift = 32; 850 851 while (1) { 852 mult = div_sc(hz, NSEC_PER_SEC, shift); 853 if (mult && (mult >> 32UL) == 0UL) 854 break; 855 856 shift--; 857 } 858 859 decrementer_clockevent.shift = shift; 860 decrementer_clockevent.mult = mult; 861 } 862 863 static void register_decrementer_clockevent(int cpu) 864 { 865 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; 866 867 *dec = decrementer_clockevent; 868 dec->cpumask = cpumask_of(cpu); 869 870 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", 871 dec->name, dec->mult, dec->shift, cpu); 872 873 clockevents_register_device(dec); 874 } 875 876 static void __init init_decrementer_clockevent(void) 877 { 878 int cpu = smp_processor_id(); 879 880 setup_clockevent_multiplier(ppc_tb_freq); 881 decrementer_clockevent.max_delta_ns = 882 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); 883 decrementer_clockevent.min_delta_ns = 884 clockevent_delta2ns(2, &decrementer_clockevent); 885 886 register_decrementer_clockevent(cpu); 887 } 888 889 void secondary_cpu_time_init(void) 890 { 891 /* FIME: Should make unrelatred change to move snapshot_timebase 892 * call here ! */ 893 register_decrementer_clockevent(smp_processor_id()); 894 } 895 896 /* This function is only called on the boot processor */ 897 void __init time_init(void) 898 { 899 unsigned long flags; 900 struct div_result res; 901 u64 scale, x; 902 unsigned shift; 903 904 if (__USE_RTC()) { 905 /* 601 processor: dec counts down by 128 every 128ns */ 906 ppc_tb_freq = 1000000000; 907 tb_last_jiffy = get_rtcl(); 908 } else { 909 /* Normal PowerPC with timebase register */ 910 ppc_md.calibrate_decr(); 911 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", 912 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); 913 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", 914 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 915 tb_last_jiffy = get_tb(); 916 } 917 918 tb_ticks_per_jiffy = ppc_tb_freq / HZ; 919 tb_ticks_per_sec = ppc_tb_freq; 920 tb_ticks_per_usec = ppc_tb_freq / 1000000; 921 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 922 calc_cputime_factors(); 923 924 /* 925 * Calculate the length of each tick in ns. It will not be 926 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. 927 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, 928 * rounded up. 929 */ 930 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; 931 do_div(x, ppc_tb_freq); 932 tick_nsec = x; 933 last_tick_len = x << TICKLEN_SCALE; 934 935 /* 936 * Compute ticklen_to_xs, which is a factor which gets multiplied 937 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. 938 * It is computed as: 939 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) 940 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT 941 * which turns out to be N = 51 - SHIFT_HZ. 942 * This gives the result as a 0.64 fixed-point fraction. 943 * That value is reduced by an offset amounting to 1 xsec per 944 * 2^31 timebase ticks to avoid problems with time going backwards 945 * by 1 xsec when we do timer_recalc_offset due to losing the 946 * fractional xsec. That offset is equal to ppc_tb_freq/2^51 947 * since there are 2^20 xsec in a second. 948 */ 949 div128_by_32((1ULL << 51) - ppc_tb_freq, 0, 950 tb_ticks_per_jiffy << SHIFT_HZ, &res); 951 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); 952 ticklen_to_xs = res.result_low; 953 954 /* Compute tb_to_xs from tick_nsec */ 955 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); 956 957 /* 958 * Compute scale factor for sched_clock. 959 * The calibrate_decr() function has set tb_ticks_per_sec, 960 * which is the timebase frequency. 961 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret 962 * the 128-bit result as a 64.64 fixed-point number. 963 * We then shift that number right until it is less than 1.0, 964 * giving us the scale factor and shift count to use in 965 * sched_clock(). 966 */ 967 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); 968 scale = res.result_low; 969 for (shift = 0; res.result_high != 0; ++shift) { 970 scale = (scale >> 1) | (res.result_high << 63); 971 res.result_high >>= 1; 972 } 973 tb_to_ns_scale = scale; 974 tb_to_ns_shift = shift; 975 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ 976 boot_tb = get_tb_or_rtc(); 977 978 write_seqlock_irqsave(&xtime_lock, flags); 979 980 /* If platform provided a timezone (pmac), we correct the time */ 981 if (timezone_offset) { 982 sys_tz.tz_minuteswest = -timezone_offset / 60; 983 sys_tz.tz_dsttime = 0; 984 } 985 986 vdso_data->tb_orig_stamp = tb_last_jiffy; 987 vdso_data->tb_update_count = 0; 988 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 989 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; 990 vdso_data->tb_to_xs = tb_to_xs; 991 992 write_sequnlock_irqrestore(&xtime_lock, flags); 993 994 /* Register the clocksource, if we're not running on iSeries */ 995 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 996 clocksource_init(); 997 998 init_decrementer_clockevent(); 999 } 1000 1001 1002 #define FEBRUARY 2 1003 #define STARTOFTIME 1970 1004 #define SECDAY 86400L 1005 #define SECYR (SECDAY * 365) 1006 #define leapyear(year) ((year) % 4 == 0 && \ 1007 ((year) % 100 != 0 || (year) % 400 == 0)) 1008 #define days_in_year(a) (leapyear(a) ? 366 : 365) 1009 #define days_in_month(a) (month_days[(a) - 1]) 1010 1011 static int month_days[12] = { 1012 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 1013 }; 1014 1015 /* 1016 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) 1017 */ 1018 void GregorianDay(struct rtc_time * tm) 1019 { 1020 int leapsToDate; 1021 int lastYear; 1022 int day; 1023 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 1024 1025 lastYear = tm->tm_year - 1; 1026 1027 /* 1028 * Number of leap corrections to apply up to end of last year 1029 */ 1030 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; 1031 1032 /* 1033 * This year is a leap year if it is divisible by 4 except when it is 1034 * divisible by 100 unless it is divisible by 400 1035 * 1036 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was 1037 */ 1038 day = tm->tm_mon > 2 && leapyear(tm->tm_year); 1039 1040 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 1041 tm->tm_mday; 1042 1043 tm->tm_wday = day % 7; 1044 } 1045 1046 void to_tm(int tim, struct rtc_time * tm) 1047 { 1048 register int i; 1049 register long hms, day; 1050 1051 day = tim / SECDAY; 1052 hms = tim % SECDAY; 1053 1054 /* Hours, minutes, seconds are easy */ 1055 tm->tm_hour = hms / 3600; 1056 tm->tm_min = (hms % 3600) / 60; 1057 tm->tm_sec = (hms % 3600) % 60; 1058 1059 /* Number of years in days */ 1060 for (i = STARTOFTIME; day >= days_in_year(i); i++) 1061 day -= days_in_year(i); 1062 tm->tm_year = i; 1063 1064 /* Number of months in days left */ 1065 if (leapyear(tm->tm_year)) 1066 days_in_month(FEBRUARY) = 29; 1067 for (i = 1; day >= days_in_month(i); i++) 1068 day -= days_in_month(i); 1069 days_in_month(FEBRUARY) = 28; 1070 tm->tm_mon = i; 1071 1072 /* Days are what is left over (+1) from all that. */ 1073 tm->tm_mday = day + 1; 1074 1075 /* 1076 * Determine the day of week 1077 */ 1078 GregorianDay(tm); 1079 } 1080 1081 /* Auxiliary function to compute scaling factors */ 1082 /* Actually the choice of a timebase running at 1/4 the of the bus 1083 * frequency giving resolution of a few tens of nanoseconds is quite nice. 1084 * It makes this computation very precise (27-28 bits typically) which 1085 * is optimistic considering the stability of most processor clock 1086 * oscillators and the precision with which the timebase frequency 1087 * is measured but does not harm. 1088 */ 1089 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) 1090 { 1091 unsigned mlt=0, tmp, err; 1092 /* No concern for performance, it's done once: use a stupid 1093 * but safe and compact method to find the multiplier. 1094 */ 1095 1096 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { 1097 if (mulhwu(inscale, mlt|tmp) < outscale) 1098 mlt |= tmp; 1099 } 1100 1101 /* We might still be off by 1 for the best approximation. 1102 * A side effect of this is that if outscale is too large 1103 * the returned value will be zero. 1104 * Many corner cases have been checked and seem to work, 1105 * some might have been forgotten in the test however. 1106 */ 1107 1108 err = inscale * (mlt+1); 1109 if (err <= inscale/2) 1110 mlt++; 1111 return mlt; 1112 } 1113 1114 /* 1115 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 1116 * result. 1117 */ 1118 void div128_by_32(u64 dividend_high, u64 dividend_low, 1119 unsigned divisor, struct div_result *dr) 1120 { 1121 unsigned long a, b, c, d; 1122 unsigned long w, x, y, z; 1123 u64 ra, rb, rc; 1124 1125 a = dividend_high >> 32; 1126 b = dividend_high & 0xffffffff; 1127 c = dividend_low >> 32; 1128 d = dividend_low & 0xffffffff; 1129 1130 w = a / divisor; 1131 ra = ((u64)(a - (w * divisor)) << 32) + b; 1132 1133 rb = ((u64) do_div(ra, divisor) << 32) + c; 1134 x = ra; 1135 1136 rc = ((u64) do_div(rb, divisor) << 32) + d; 1137 y = rb; 1138 1139 do_div(rc, divisor); 1140 z = rc; 1141 1142 dr->result_high = ((u64)w << 32) + x; 1143 dr->result_low = ((u64)y << 32) + z; 1144 1145 } 1146 1147 /* We don't need to calibrate delay, we use the CPU timebase for that */ 1148 void calibrate_delay(void) 1149 { 1150 /* Some generic code (such as spinlock debug) use loops_per_jiffy 1151 * as the number of __delay(1) in a jiffy, so make it so 1152 */ 1153 loops_per_jiffy = tb_ticks_per_jiffy; 1154 } 1155 1156 static int __init rtc_init(void) 1157 { 1158 struct platform_device *pdev; 1159 1160 if (!ppc_md.get_rtc_time) 1161 return -ENODEV; 1162 1163 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); 1164 if (IS_ERR(pdev)) 1165 return PTR_ERR(pdev); 1166 1167 return 0; 1168 } 1169 1170 module_init(rtc_init); 1171