1 /* 2 * linux/kernel/time/timekeeping.c 3 * 4 * Kernel timekeeping code and accessor functions 5 * 6 * This code was moved from linux/kernel/timer.c. 7 * Please see that file for copyright and history logs. 8 * 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/percpu.h> 14 #include <linux/init.h> 15 #include <linux/mm.h> 16 #include <linux/sched.h> 17 #include <linux/sysdev.h> 18 #include <linux/clocksource.h> 19 #include <linux/jiffies.h> 20 #include <linux/time.h> 21 #include <linux/tick.h> 22 #include <linux/stop_machine.h> 23 24 /* Structure holding internal timekeeping values. */ 25 struct timekeeper { 26 /* Current clocksource used for timekeeping. */ 27 struct clocksource *clock; 28 /* The shift value of the current clocksource. */ 29 int shift; 30 31 /* Number of clock cycles in one NTP interval. */ 32 cycle_t cycle_interval; 33 /* Number of clock shifted nano seconds in one NTP interval. */ 34 u64 xtime_interval; 35 /* Raw nano seconds accumulated per NTP interval. */ 36 u32 raw_interval; 37 38 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */ 39 u64 xtime_nsec; 40 /* Difference between accumulated time and NTP time in ntp 41 * shifted nano seconds. */ 42 s64 ntp_error; 43 /* Shift conversion between clock shifted nano seconds and 44 * ntp shifted nano seconds. */ 45 int ntp_error_shift; 46 /* NTP adjusted clock multiplier */ 47 u32 mult; 48 }; 49 50 struct timekeeper timekeeper; 51 52 /** 53 * timekeeper_setup_internals - Set up internals to use clocksource clock. 54 * 55 * @clock: Pointer to clocksource. 56 * 57 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment 58 * pair and interval request. 59 * 60 * Unless you're the timekeeping code, you should not be using this! 61 */ 62 static void timekeeper_setup_internals(struct clocksource *clock) 63 { 64 cycle_t interval; 65 u64 tmp; 66 67 timekeeper.clock = clock; 68 clock->cycle_last = clock->read(clock); 69 70 /* Do the ns -> cycle conversion first, using original mult */ 71 tmp = NTP_INTERVAL_LENGTH; 72 tmp <<= clock->shift; 73 tmp += clock->mult/2; 74 do_div(tmp, clock->mult); 75 if (tmp == 0) 76 tmp = 1; 77 78 interval = (cycle_t) tmp; 79 timekeeper.cycle_interval = interval; 80 81 /* Go back from cycles -> shifted ns */ 82 timekeeper.xtime_interval = (u64) interval * clock->mult; 83 timekeeper.raw_interval = 84 ((u64) interval * clock->mult) >> clock->shift; 85 86 timekeeper.xtime_nsec = 0; 87 timekeeper.shift = clock->shift; 88 89 timekeeper.ntp_error = 0; 90 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 91 92 /* 93 * The timekeeper keeps its own mult values for the currently 94 * active clocksource. These value will be adjusted via NTP 95 * to counteract clock drifting. 96 */ 97 timekeeper.mult = clock->mult; 98 } 99 100 /* Timekeeper helper functions. */ 101 static inline s64 timekeeping_get_ns(void) 102 { 103 cycle_t cycle_now, cycle_delta; 104 struct clocksource *clock; 105 106 /* read clocksource: */ 107 clock = timekeeper.clock; 108 cycle_now = clock->read(clock); 109 110 /* calculate the delta since the last update_wall_time: */ 111 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 112 113 /* return delta convert to nanoseconds using ntp adjusted mult. */ 114 return clocksource_cyc2ns(cycle_delta, timekeeper.mult, 115 timekeeper.shift); 116 } 117 118 static inline s64 timekeeping_get_ns_raw(void) 119 { 120 cycle_t cycle_now, cycle_delta; 121 struct clocksource *clock; 122 123 /* read clocksource: */ 124 clock = timekeeper.clock; 125 cycle_now = clock->read(clock); 126 127 /* calculate the delta since the last update_wall_time: */ 128 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 129 130 /* return delta convert to nanoseconds using ntp adjusted mult. */ 131 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 132 } 133 134 /* 135 * This read-write spinlock protects us from races in SMP while 136 * playing with xtime. 137 */ 138 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); 139 140 141 /* 142 * The current time 143 * wall_to_monotonic is what we need to add to xtime (or xtime corrected 144 * for sub jiffie times) to get to monotonic time. Monotonic is pegged 145 * at zero at system boot time, so wall_to_monotonic will be negative, 146 * however, we will ALWAYS keep the tv_nsec part positive so we can use 147 * the usual normalization. 148 * 149 * wall_to_monotonic is moved after resume from suspend for the monotonic 150 * time not to jump. We need to add total_sleep_time to wall_to_monotonic 151 * to get the real boot based time offset. 152 * 153 * - wall_to_monotonic is no longer the boot time, getboottime must be 154 * used instead. 155 */ 156 static struct timespec xtime __attribute__ ((aligned (16))); 157 static struct timespec wall_to_monotonic __attribute__ ((aligned (16))); 158 static struct timespec total_sleep_time; 159 160 /* 161 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. 162 */ 163 struct timespec raw_time; 164 165 /* flag for if timekeeping is suspended */ 166 int __read_mostly timekeeping_suspended; 167 168 /* must hold xtime_lock */ 169 void timekeeping_leap_insert(int leapsecond) 170 { 171 xtime.tv_sec += leapsecond; 172 wall_to_monotonic.tv_sec -= leapsecond; 173 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 174 timekeeper.mult); 175 } 176 177 /** 178 * timekeeping_forward_now - update clock to the current time 179 * 180 * Forward the current clock to update its state since the last call to 181 * update_wall_time(). This is useful before significant clock changes, 182 * as it avoids having to deal with this time offset explicitly. 183 */ 184 static void timekeeping_forward_now(void) 185 { 186 cycle_t cycle_now, cycle_delta; 187 struct clocksource *clock; 188 s64 nsec; 189 190 clock = timekeeper.clock; 191 cycle_now = clock->read(clock); 192 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 193 clock->cycle_last = cycle_now; 194 195 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult, 196 timekeeper.shift); 197 198 /* If arch requires, add in gettimeoffset() */ 199 nsec += arch_gettimeoffset(); 200 201 timespec_add_ns(&xtime, nsec); 202 203 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 204 timespec_add_ns(&raw_time, nsec); 205 } 206 207 /** 208 * getnstimeofday - Returns the time of day in a timespec 209 * @ts: pointer to the timespec to be set 210 * 211 * Returns the time of day in a timespec. 212 */ 213 void getnstimeofday(struct timespec *ts) 214 { 215 unsigned long seq; 216 s64 nsecs; 217 218 WARN_ON(timekeeping_suspended); 219 220 do { 221 seq = read_seqbegin(&xtime_lock); 222 223 *ts = xtime; 224 nsecs = timekeeping_get_ns(); 225 226 /* If arch requires, add in gettimeoffset() */ 227 nsecs += arch_gettimeoffset(); 228 229 } while (read_seqretry(&xtime_lock, seq)); 230 231 timespec_add_ns(ts, nsecs); 232 } 233 234 EXPORT_SYMBOL(getnstimeofday); 235 236 ktime_t ktime_get(void) 237 { 238 unsigned int seq; 239 s64 secs, nsecs; 240 241 WARN_ON(timekeeping_suspended); 242 243 do { 244 seq = read_seqbegin(&xtime_lock); 245 secs = xtime.tv_sec + wall_to_monotonic.tv_sec; 246 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; 247 nsecs += timekeeping_get_ns(); 248 249 } while (read_seqretry(&xtime_lock, seq)); 250 /* 251 * Use ktime_set/ktime_add_ns to create a proper ktime on 252 * 32-bit architectures without CONFIG_KTIME_SCALAR. 253 */ 254 return ktime_add_ns(ktime_set(secs, 0), nsecs); 255 } 256 EXPORT_SYMBOL_GPL(ktime_get); 257 258 /** 259 * ktime_get_ts - get the monotonic clock in timespec format 260 * @ts: pointer to timespec variable 261 * 262 * The function calculates the monotonic clock from the realtime 263 * clock and the wall_to_monotonic offset and stores the result 264 * in normalized timespec format in the variable pointed to by @ts. 265 */ 266 void ktime_get_ts(struct timespec *ts) 267 { 268 struct timespec tomono; 269 unsigned int seq; 270 s64 nsecs; 271 272 WARN_ON(timekeeping_suspended); 273 274 do { 275 seq = read_seqbegin(&xtime_lock); 276 *ts = xtime; 277 tomono = wall_to_monotonic; 278 nsecs = timekeeping_get_ns(); 279 280 } while (read_seqretry(&xtime_lock, seq)); 281 282 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, 283 ts->tv_nsec + tomono.tv_nsec + nsecs); 284 } 285 EXPORT_SYMBOL_GPL(ktime_get_ts); 286 287 /** 288 * do_gettimeofday - Returns the time of day in a timeval 289 * @tv: pointer to the timeval to be set 290 * 291 * NOTE: Users should be converted to using getnstimeofday() 292 */ 293 void do_gettimeofday(struct timeval *tv) 294 { 295 struct timespec now; 296 297 getnstimeofday(&now); 298 tv->tv_sec = now.tv_sec; 299 tv->tv_usec = now.tv_nsec/1000; 300 } 301 302 EXPORT_SYMBOL(do_gettimeofday); 303 /** 304 * do_settimeofday - Sets the time of day 305 * @tv: pointer to the timespec variable containing the new time 306 * 307 * Sets the time of day to the new time and update NTP and notify hrtimers 308 */ 309 int do_settimeofday(struct timespec *tv) 310 { 311 struct timespec ts_delta; 312 unsigned long flags; 313 314 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 315 return -EINVAL; 316 317 write_seqlock_irqsave(&xtime_lock, flags); 318 319 timekeeping_forward_now(); 320 321 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; 322 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; 323 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); 324 325 xtime = *tv; 326 327 timekeeper.ntp_error = 0; 328 ntp_clear(); 329 330 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 331 timekeeper.mult); 332 333 write_sequnlock_irqrestore(&xtime_lock, flags); 334 335 /* signal hrtimers about time change */ 336 clock_was_set(); 337 338 return 0; 339 } 340 341 EXPORT_SYMBOL(do_settimeofday); 342 343 /** 344 * change_clocksource - Swaps clocksources if a new one is available 345 * 346 * Accumulates current time interval and initializes new clocksource 347 */ 348 static int change_clocksource(void *data) 349 { 350 struct clocksource *new, *old; 351 352 new = (struct clocksource *) data; 353 354 timekeeping_forward_now(); 355 if (!new->enable || new->enable(new) == 0) { 356 old = timekeeper.clock; 357 timekeeper_setup_internals(new); 358 if (old->disable) 359 old->disable(old); 360 } 361 return 0; 362 } 363 364 /** 365 * timekeeping_notify - Install a new clock source 366 * @clock: pointer to the clock source 367 * 368 * This function is called from clocksource.c after a new, better clock 369 * source has been registered. The caller holds the clocksource_mutex. 370 */ 371 void timekeeping_notify(struct clocksource *clock) 372 { 373 if (timekeeper.clock == clock) 374 return; 375 stop_machine(change_clocksource, clock, NULL); 376 tick_clock_notify(); 377 } 378 379 /** 380 * ktime_get_real - get the real (wall-) time in ktime_t format 381 * 382 * returns the time in ktime_t format 383 */ 384 ktime_t ktime_get_real(void) 385 { 386 struct timespec now; 387 388 getnstimeofday(&now); 389 390 return timespec_to_ktime(now); 391 } 392 EXPORT_SYMBOL_GPL(ktime_get_real); 393 394 /** 395 * getrawmonotonic - Returns the raw monotonic time in a timespec 396 * @ts: pointer to the timespec to be set 397 * 398 * Returns the raw monotonic time (completely un-modified by ntp) 399 */ 400 void getrawmonotonic(struct timespec *ts) 401 { 402 unsigned long seq; 403 s64 nsecs; 404 405 do { 406 seq = read_seqbegin(&xtime_lock); 407 nsecs = timekeeping_get_ns_raw(); 408 *ts = raw_time; 409 410 } while (read_seqretry(&xtime_lock, seq)); 411 412 timespec_add_ns(ts, nsecs); 413 } 414 EXPORT_SYMBOL(getrawmonotonic); 415 416 417 /** 418 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres 419 */ 420 int timekeeping_valid_for_hres(void) 421 { 422 unsigned long seq; 423 int ret; 424 425 do { 426 seq = read_seqbegin(&xtime_lock); 427 428 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 429 430 } while (read_seqretry(&xtime_lock, seq)); 431 432 return ret; 433 } 434 435 /** 436 * timekeeping_max_deferment - Returns max time the clocksource can be deferred 437 * 438 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to 439 * ensure that the clocksource does not change! 440 */ 441 u64 timekeeping_max_deferment(void) 442 { 443 return timekeeper.clock->max_idle_ns; 444 } 445 446 /** 447 * read_persistent_clock - Return time from the persistent clock. 448 * 449 * Weak dummy function for arches that do not yet support it. 450 * Reads the time from the battery backed persistent clock. 451 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. 452 * 453 * XXX - Do be sure to remove it once all arches implement it. 454 */ 455 void __attribute__((weak)) read_persistent_clock(struct timespec *ts) 456 { 457 ts->tv_sec = 0; 458 ts->tv_nsec = 0; 459 } 460 461 /** 462 * read_boot_clock - Return time of the system start. 463 * 464 * Weak dummy function for arches that do not yet support it. 465 * Function to read the exact time the system has been started. 466 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. 467 * 468 * XXX - Do be sure to remove it once all arches implement it. 469 */ 470 void __attribute__((weak)) read_boot_clock(struct timespec *ts) 471 { 472 ts->tv_sec = 0; 473 ts->tv_nsec = 0; 474 } 475 476 /* 477 * timekeeping_init - Initializes the clocksource and common timekeeping values 478 */ 479 void __init timekeeping_init(void) 480 { 481 struct clocksource *clock; 482 unsigned long flags; 483 struct timespec now, boot; 484 485 read_persistent_clock(&now); 486 read_boot_clock(&boot); 487 488 write_seqlock_irqsave(&xtime_lock, flags); 489 490 ntp_init(); 491 492 clock = clocksource_default_clock(); 493 if (clock->enable) 494 clock->enable(clock); 495 timekeeper_setup_internals(clock); 496 497 xtime.tv_sec = now.tv_sec; 498 xtime.tv_nsec = now.tv_nsec; 499 raw_time.tv_sec = 0; 500 raw_time.tv_nsec = 0; 501 if (boot.tv_sec == 0 && boot.tv_nsec == 0) { 502 boot.tv_sec = xtime.tv_sec; 503 boot.tv_nsec = xtime.tv_nsec; 504 } 505 set_normalized_timespec(&wall_to_monotonic, 506 -boot.tv_sec, -boot.tv_nsec); 507 total_sleep_time.tv_sec = 0; 508 total_sleep_time.tv_nsec = 0; 509 write_sequnlock_irqrestore(&xtime_lock, flags); 510 } 511 512 /* time in seconds when suspend began */ 513 static struct timespec timekeeping_suspend_time; 514 515 /** 516 * timekeeping_resume - Resumes the generic timekeeping subsystem. 517 * @dev: unused 518 * 519 * This is for the generic clocksource timekeeping. 520 * xtime/wall_to_monotonic/jiffies/etc are 521 * still managed by arch specific suspend/resume code. 522 */ 523 static int timekeeping_resume(struct sys_device *dev) 524 { 525 unsigned long flags; 526 struct timespec ts; 527 528 read_persistent_clock(&ts); 529 530 clocksource_resume(); 531 532 write_seqlock_irqsave(&xtime_lock, flags); 533 534 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { 535 ts = timespec_sub(ts, timekeeping_suspend_time); 536 xtime = timespec_add(xtime, ts); 537 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); 538 total_sleep_time = timespec_add(total_sleep_time, ts); 539 } 540 /* re-base the last cycle value */ 541 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); 542 timekeeper.ntp_error = 0; 543 timekeeping_suspended = 0; 544 write_sequnlock_irqrestore(&xtime_lock, flags); 545 546 touch_softlockup_watchdog(); 547 548 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); 549 550 /* Resume hrtimers */ 551 hres_timers_resume(); 552 553 return 0; 554 } 555 556 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) 557 { 558 unsigned long flags; 559 560 read_persistent_clock(&timekeeping_suspend_time); 561 562 write_seqlock_irqsave(&xtime_lock, flags); 563 timekeeping_forward_now(); 564 timekeeping_suspended = 1; 565 write_sequnlock_irqrestore(&xtime_lock, flags); 566 567 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 568 clocksource_suspend(); 569 570 return 0; 571 } 572 573 /* sysfs resume/suspend bits for timekeeping */ 574 static struct sysdev_class timekeeping_sysclass = { 575 .name = "timekeeping", 576 .resume = timekeeping_resume, 577 .suspend = timekeeping_suspend, 578 }; 579 580 static struct sys_device device_timer = { 581 .id = 0, 582 .cls = &timekeeping_sysclass, 583 }; 584 585 static int __init timekeeping_init_device(void) 586 { 587 int error = sysdev_class_register(&timekeeping_sysclass); 588 if (!error) 589 error = sysdev_register(&device_timer); 590 return error; 591 } 592 593 device_initcall(timekeeping_init_device); 594 595 /* 596 * If the error is already larger, we look ahead even further 597 * to compensate for late or lost adjustments. 598 */ 599 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, 600 s64 *offset) 601 { 602 s64 tick_error, i; 603 u32 look_ahead, adj; 604 s32 error2, mult; 605 606 /* 607 * Use the current error value to determine how much to look ahead. 608 * The larger the error the slower we adjust for it to avoid problems 609 * with losing too many ticks, otherwise we would overadjust and 610 * produce an even larger error. The smaller the adjustment the 611 * faster we try to adjust for it, as lost ticks can do less harm 612 * here. This is tuned so that an error of about 1 msec is adjusted 613 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). 614 */ 615 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); 616 error2 = abs(error2); 617 for (look_ahead = 0; error2 > 0; look_ahead++) 618 error2 >>= 2; 619 620 /* 621 * Now calculate the error in (1 << look_ahead) ticks, but first 622 * remove the single look ahead already included in the error. 623 */ 624 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1); 625 tick_error -= timekeeper.xtime_interval >> 1; 626 error = ((error - tick_error) >> look_ahead) + tick_error; 627 628 /* Finally calculate the adjustment shift value. */ 629 i = *interval; 630 mult = 1; 631 if (error < 0) { 632 error = -error; 633 *interval = -*interval; 634 *offset = -*offset; 635 mult = -1; 636 } 637 for (adj = 0; error > i; adj++) 638 error >>= 1; 639 640 *interval <<= adj; 641 *offset <<= adj; 642 return mult << adj; 643 } 644 645 /* 646 * Adjust the multiplier to reduce the error value, 647 * this is optimized for the most common adjustments of -1,0,1, 648 * for other values we can do a bit more work. 649 */ 650 static void timekeeping_adjust(s64 offset) 651 { 652 s64 error, interval = timekeeper.cycle_interval; 653 int adj; 654 655 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); 656 if (error > interval) { 657 error >>= 2; 658 if (likely(error <= interval)) 659 adj = 1; 660 else 661 adj = timekeeping_bigadjust(error, &interval, &offset); 662 } else if (error < -interval) { 663 error >>= 2; 664 if (likely(error >= -interval)) { 665 adj = -1; 666 interval = -interval; 667 offset = -offset; 668 } else 669 adj = timekeeping_bigadjust(error, &interval, &offset); 670 } else 671 return; 672 673 timekeeper.mult += adj; 674 timekeeper.xtime_interval += interval; 675 timekeeper.xtime_nsec -= offset; 676 timekeeper.ntp_error -= (interval - offset) << 677 timekeeper.ntp_error_shift; 678 } 679 680 681 /** 682 * logarithmic_accumulation - shifted accumulation of cycles 683 * 684 * This functions accumulates a shifted interval of cycles into 685 * into a shifted interval nanoseconds. Allows for O(log) accumulation 686 * loop. 687 * 688 * Returns the unconsumed cycles. 689 */ 690 static cycle_t logarithmic_accumulation(cycle_t offset, int shift) 691 { 692 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; 693 694 /* If the offset is smaller then a shifted interval, do nothing */ 695 if (offset < timekeeper.cycle_interval<<shift) 696 return offset; 697 698 /* Accumulate one shifted interval */ 699 offset -= timekeeper.cycle_interval << shift; 700 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; 701 702 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; 703 while (timekeeper.xtime_nsec >= nsecps) { 704 timekeeper.xtime_nsec -= nsecps; 705 xtime.tv_sec++; 706 second_overflow(); 707 } 708 709 /* Accumulate into raw time */ 710 raw_time.tv_nsec += timekeeper.raw_interval << shift;; 711 while (raw_time.tv_nsec >= NSEC_PER_SEC) { 712 raw_time.tv_nsec -= NSEC_PER_SEC; 713 raw_time.tv_sec++; 714 } 715 716 /* Accumulate error between NTP and clock interval */ 717 timekeeper.ntp_error += tick_length << shift; 718 timekeeper.ntp_error -= timekeeper.xtime_interval << 719 (timekeeper.ntp_error_shift + shift); 720 721 return offset; 722 } 723 724 725 /** 726 * update_wall_time - Uses the current clocksource to increment the wall time 727 * 728 * Called from the timer interrupt, must hold a write on xtime_lock. 729 */ 730 void update_wall_time(void) 731 { 732 struct clocksource *clock; 733 cycle_t offset; 734 int shift = 0, maxshift; 735 736 /* Make sure we're fully resumed: */ 737 if (unlikely(timekeeping_suspended)) 738 return; 739 740 clock = timekeeper.clock; 741 742 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 743 offset = timekeeper.cycle_interval; 744 #else 745 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 746 #endif 747 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; 748 749 /* 750 * With NO_HZ we may have to accumulate many cycle_intervals 751 * (think "ticks") worth of time at once. To do this efficiently, 752 * we calculate the largest doubling multiple of cycle_intervals 753 * that is smaller then the offset. We then accumulate that 754 * chunk in one go, and then try to consume the next smaller 755 * doubled multiple. 756 */ 757 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); 758 shift = max(0, shift); 759 /* Bound shift to one less then what overflows tick_length */ 760 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; 761 shift = min(shift, maxshift); 762 while (offset >= timekeeper.cycle_interval) { 763 offset = logarithmic_accumulation(offset, shift); 764 if(offset < timekeeper.cycle_interval<<shift) 765 shift--; 766 } 767 768 /* correct the clock when NTP error is too big */ 769 timekeeping_adjust(offset); 770 771 /* 772 * Since in the loop above, we accumulate any amount of time 773 * in xtime_nsec over a second into xtime.tv_sec, its possible for 774 * xtime_nsec to be fairly small after the loop. Further, if we're 775 * slightly speeding the clocksource up in timekeeping_adjust(), 776 * its possible the required corrective factor to xtime_nsec could 777 * cause it to underflow. 778 * 779 * Now, we cannot simply roll the accumulated second back, since 780 * the NTP subsystem has been notified via second_overflow. So 781 * instead we push xtime_nsec forward by the amount we underflowed, 782 * and add that amount into the error. 783 * 784 * We'll correct this error next time through this function, when 785 * xtime_nsec is not as small. 786 */ 787 if (unlikely((s64)timekeeper.xtime_nsec < 0)) { 788 s64 neg = -(s64)timekeeper.xtime_nsec; 789 timekeeper.xtime_nsec = 0; 790 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; 791 } 792 793 794 /* 795 * Store full nanoseconds into xtime after rounding it up and 796 * add the remainder to the error difference. 797 */ 798 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; 799 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift; 800 timekeeper.ntp_error += timekeeper.xtime_nsec << 801 timekeeper.ntp_error_shift; 802 803 /* 804 * Finally, make sure that after the rounding 805 * xtime.tv_nsec isn't larger then NSEC_PER_SEC 806 */ 807 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) { 808 xtime.tv_nsec -= NSEC_PER_SEC; 809 xtime.tv_sec++; 810 second_overflow(); 811 } 812 813 /* check to see if there is a new clocksource to use */ 814 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 815 timekeeper.mult); 816 } 817 818 /** 819 * getboottime - Return the real time of system boot. 820 * @ts: pointer to the timespec to be set 821 * 822 * Returns the time of day in a timespec. 823 * 824 * This is based on the wall_to_monotonic offset and the total suspend 825 * time. Calls to settimeofday will affect the value returned (which 826 * basically means that however wrong your real time clock is at boot time, 827 * you get the right time here). 828 */ 829 void getboottime(struct timespec *ts) 830 { 831 struct timespec boottime = { 832 .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec, 833 .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec 834 }; 835 836 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 837 } 838 EXPORT_SYMBOL_GPL(getboottime); 839 840 /** 841 * monotonic_to_bootbased - Convert the monotonic time to boot based. 842 * @ts: pointer to the timespec to be converted 843 */ 844 void monotonic_to_bootbased(struct timespec *ts) 845 { 846 *ts = timespec_add(*ts, total_sleep_time); 847 } 848 EXPORT_SYMBOL_GPL(monotonic_to_bootbased); 849 850 unsigned long get_seconds(void) 851 { 852 return xtime.tv_sec; 853 } 854 EXPORT_SYMBOL(get_seconds); 855 856 struct timespec __current_kernel_time(void) 857 { 858 return xtime; 859 } 860 861 struct timespec __get_wall_to_monotonic(void) 862 { 863 return wall_to_monotonic; 864 } 865 866 struct timespec current_kernel_time(void) 867 { 868 struct timespec now; 869 unsigned long seq; 870 871 do { 872 seq = read_seqbegin(&xtime_lock); 873 874 now = xtime; 875 } while (read_seqretry(&xtime_lock, seq)); 876 877 return now; 878 } 879 EXPORT_SYMBOL(current_kernel_time); 880 881 struct timespec get_monotonic_coarse(void) 882 { 883 struct timespec now, mono; 884 unsigned long seq; 885 886 do { 887 seq = read_seqbegin(&xtime_lock); 888 889 now = xtime; 890 mono = wall_to_monotonic; 891 } while (read_seqretry(&xtime_lock, seq)); 892 893 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, 894 now.tv_nsec + mono.tv_nsec); 895 return now; 896 } 897