1 /* 2 * linux/kernel/time/timekeeping.c 3 * 4 * Kernel timekeeping code and accessor functions 5 * 6 * This code was moved from linux/kernel/timer.c. 7 * Please see that file for copyright and history logs. 8 * 9 */ 10 11 #include <linux/module.h> 12 #include <linux/interrupt.h> 13 #include <linux/percpu.h> 14 #include <linux/init.h> 15 #include <linux/mm.h> 16 #include <linux/sched.h> 17 #include <linux/syscore_ops.h> 18 #include <linux/clocksource.h> 19 #include <linux/jiffies.h> 20 #include <linux/time.h> 21 #include <linux/tick.h> 22 #include <linux/stop_machine.h> 23 24 /* Structure holding internal timekeeping values. */ 25 struct timekeeper { 26 /* Current clocksource used for timekeeping. */ 27 struct clocksource *clock; 28 /* The shift value of the current clocksource. */ 29 int shift; 30 31 /* Number of clock cycles in one NTP interval. */ 32 cycle_t cycle_interval; 33 /* Number of clock shifted nano seconds in one NTP interval. */ 34 u64 xtime_interval; 35 /* shifted nano seconds left over when rounding cycle_interval */ 36 s64 xtime_remainder; 37 /* Raw nano seconds accumulated per NTP interval. */ 38 u32 raw_interval; 39 40 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */ 41 u64 xtime_nsec; 42 /* Difference between accumulated time and NTP time in ntp 43 * shifted nano seconds. */ 44 s64 ntp_error; 45 /* Shift conversion between clock shifted nano seconds and 46 * ntp shifted nano seconds. */ 47 int ntp_error_shift; 48 /* NTP adjusted clock multiplier */ 49 u32 mult; 50 }; 51 52 static struct timekeeper timekeeper; 53 54 /** 55 * timekeeper_setup_internals - Set up internals to use clocksource clock. 56 * 57 * @clock: Pointer to clocksource. 58 * 59 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment 60 * pair and interval request. 61 * 62 * Unless you're the timekeeping code, you should not be using this! 63 */ 64 static void timekeeper_setup_internals(struct clocksource *clock) 65 { 66 cycle_t interval; 67 u64 tmp, ntpinterval; 68 69 timekeeper.clock = clock; 70 clock->cycle_last = clock->read(clock); 71 72 /* Do the ns -> cycle conversion first, using original mult */ 73 tmp = NTP_INTERVAL_LENGTH; 74 tmp <<= clock->shift; 75 ntpinterval = tmp; 76 tmp += clock->mult/2; 77 do_div(tmp, clock->mult); 78 if (tmp == 0) 79 tmp = 1; 80 81 interval = (cycle_t) tmp; 82 timekeeper.cycle_interval = interval; 83 84 /* Go back from cycles -> shifted ns */ 85 timekeeper.xtime_interval = (u64) interval * clock->mult; 86 timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval; 87 timekeeper.raw_interval = 88 ((u64) interval * clock->mult) >> clock->shift; 89 90 timekeeper.xtime_nsec = 0; 91 timekeeper.shift = clock->shift; 92 93 timekeeper.ntp_error = 0; 94 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; 95 96 /* 97 * The timekeeper keeps its own mult values for the currently 98 * active clocksource. These value will be adjusted via NTP 99 * to counteract clock drifting. 100 */ 101 timekeeper.mult = clock->mult; 102 } 103 104 /* Timekeeper helper functions. */ 105 static inline s64 timekeeping_get_ns(void) 106 { 107 cycle_t cycle_now, cycle_delta; 108 struct clocksource *clock; 109 110 /* read clocksource: */ 111 clock = timekeeper.clock; 112 cycle_now = clock->read(clock); 113 114 /* calculate the delta since the last update_wall_time: */ 115 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 116 117 /* return delta convert to nanoseconds using ntp adjusted mult. */ 118 return clocksource_cyc2ns(cycle_delta, timekeeper.mult, 119 timekeeper.shift); 120 } 121 122 static inline s64 timekeeping_get_ns_raw(void) 123 { 124 cycle_t cycle_now, cycle_delta; 125 struct clocksource *clock; 126 127 /* read clocksource: */ 128 clock = timekeeper.clock; 129 cycle_now = clock->read(clock); 130 131 /* calculate the delta since the last update_wall_time: */ 132 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 133 134 /* return delta convert to nanoseconds using ntp adjusted mult. */ 135 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 136 } 137 138 /* 139 * This read-write spinlock protects us from races in SMP while 140 * playing with xtime. 141 */ 142 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); 143 144 145 /* 146 * The current time 147 * wall_to_monotonic is what we need to add to xtime (or xtime corrected 148 * for sub jiffie times) to get to monotonic time. Monotonic is pegged 149 * at zero at system boot time, so wall_to_monotonic will be negative, 150 * however, we will ALWAYS keep the tv_nsec part positive so we can use 151 * the usual normalization. 152 * 153 * wall_to_monotonic is moved after resume from suspend for the monotonic 154 * time not to jump. We need to add total_sleep_time to wall_to_monotonic 155 * to get the real boot based time offset. 156 * 157 * - wall_to_monotonic is no longer the boot time, getboottime must be 158 * used instead. 159 */ 160 static struct timespec xtime __attribute__ ((aligned (16))); 161 static struct timespec wall_to_monotonic __attribute__ ((aligned (16))); 162 static struct timespec total_sleep_time; 163 164 /* 165 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. 166 */ 167 static struct timespec raw_time; 168 169 /* flag for if timekeeping is suspended */ 170 int __read_mostly timekeeping_suspended; 171 172 /* must hold xtime_lock */ 173 void timekeeping_leap_insert(int leapsecond) 174 { 175 xtime.tv_sec += leapsecond; 176 wall_to_monotonic.tv_sec -= leapsecond; 177 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 178 timekeeper.mult); 179 } 180 181 /** 182 * timekeeping_forward_now - update clock to the current time 183 * 184 * Forward the current clock to update its state since the last call to 185 * update_wall_time(). This is useful before significant clock changes, 186 * as it avoids having to deal with this time offset explicitly. 187 */ 188 static void timekeeping_forward_now(void) 189 { 190 cycle_t cycle_now, cycle_delta; 191 struct clocksource *clock; 192 s64 nsec; 193 194 clock = timekeeper.clock; 195 cycle_now = clock->read(clock); 196 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 197 clock->cycle_last = cycle_now; 198 199 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult, 200 timekeeper.shift); 201 202 /* If arch requires, add in gettimeoffset() */ 203 nsec += arch_gettimeoffset(); 204 205 timespec_add_ns(&xtime, nsec); 206 207 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 208 timespec_add_ns(&raw_time, nsec); 209 } 210 211 /** 212 * getnstimeofday - Returns the time of day in a timespec 213 * @ts: pointer to the timespec to be set 214 * 215 * Returns the time of day in a timespec. 216 */ 217 void getnstimeofday(struct timespec *ts) 218 { 219 unsigned long seq; 220 s64 nsecs; 221 222 WARN_ON(timekeeping_suspended); 223 224 do { 225 seq = read_seqbegin(&xtime_lock); 226 227 *ts = xtime; 228 nsecs = timekeeping_get_ns(); 229 230 /* If arch requires, add in gettimeoffset() */ 231 nsecs += arch_gettimeoffset(); 232 233 } while (read_seqretry(&xtime_lock, seq)); 234 235 timespec_add_ns(ts, nsecs); 236 } 237 238 EXPORT_SYMBOL(getnstimeofday); 239 240 ktime_t ktime_get(void) 241 { 242 unsigned int seq; 243 s64 secs, nsecs; 244 245 WARN_ON(timekeeping_suspended); 246 247 do { 248 seq = read_seqbegin(&xtime_lock); 249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec; 250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; 251 nsecs += timekeeping_get_ns(); 252 253 } while (read_seqretry(&xtime_lock, seq)); 254 /* 255 * Use ktime_set/ktime_add_ns to create a proper ktime on 256 * 32-bit architectures without CONFIG_KTIME_SCALAR. 257 */ 258 return ktime_add_ns(ktime_set(secs, 0), nsecs); 259 } 260 EXPORT_SYMBOL_GPL(ktime_get); 261 262 /** 263 * ktime_get_ts - get the monotonic clock in timespec format 264 * @ts: pointer to timespec variable 265 * 266 * The function calculates the monotonic clock from the realtime 267 * clock and the wall_to_monotonic offset and stores the result 268 * in normalized timespec format in the variable pointed to by @ts. 269 */ 270 void ktime_get_ts(struct timespec *ts) 271 { 272 struct timespec tomono; 273 unsigned int seq; 274 s64 nsecs; 275 276 WARN_ON(timekeeping_suspended); 277 278 do { 279 seq = read_seqbegin(&xtime_lock); 280 *ts = xtime; 281 tomono = wall_to_monotonic; 282 nsecs = timekeeping_get_ns(); 283 284 } while (read_seqretry(&xtime_lock, seq)); 285 286 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, 287 ts->tv_nsec + tomono.tv_nsec + nsecs); 288 } 289 EXPORT_SYMBOL_GPL(ktime_get_ts); 290 291 #ifdef CONFIG_NTP_PPS 292 293 /** 294 * getnstime_raw_and_real - get day and raw monotonic time in timespec format 295 * @ts_raw: pointer to the timespec to be set to raw monotonic time 296 * @ts_real: pointer to the timespec to be set to the time of day 297 * 298 * This function reads both the time of day and raw monotonic time at the 299 * same time atomically and stores the resulting timestamps in timespec 300 * format. 301 */ 302 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) 303 { 304 unsigned long seq; 305 s64 nsecs_raw, nsecs_real; 306 307 WARN_ON_ONCE(timekeeping_suspended); 308 309 do { 310 u32 arch_offset; 311 312 seq = read_seqbegin(&xtime_lock); 313 314 *ts_raw = raw_time; 315 *ts_real = xtime; 316 317 nsecs_raw = timekeeping_get_ns_raw(); 318 nsecs_real = timekeeping_get_ns(); 319 320 /* If arch requires, add in gettimeoffset() */ 321 arch_offset = arch_gettimeoffset(); 322 nsecs_raw += arch_offset; 323 nsecs_real += arch_offset; 324 325 } while (read_seqretry(&xtime_lock, seq)); 326 327 timespec_add_ns(ts_raw, nsecs_raw); 328 timespec_add_ns(ts_real, nsecs_real); 329 } 330 EXPORT_SYMBOL(getnstime_raw_and_real); 331 332 #endif /* CONFIG_NTP_PPS */ 333 334 /** 335 * do_gettimeofday - Returns the time of day in a timeval 336 * @tv: pointer to the timeval to be set 337 * 338 * NOTE: Users should be converted to using getnstimeofday() 339 */ 340 void do_gettimeofday(struct timeval *tv) 341 { 342 struct timespec now; 343 344 getnstimeofday(&now); 345 tv->tv_sec = now.tv_sec; 346 tv->tv_usec = now.tv_nsec/1000; 347 } 348 349 EXPORT_SYMBOL(do_gettimeofday); 350 /** 351 * do_settimeofday - Sets the time of day 352 * @tv: pointer to the timespec variable containing the new time 353 * 354 * Sets the time of day to the new time and update NTP and notify hrtimers 355 */ 356 int do_settimeofday(const struct timespec *tv) 357 { 358 struct timespec ts_delta; 359 unsigned long flags; 360 361 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 362 return -EINVAL; 363 364 write_seqlock_irqsave(&xtime_lock, flags); 365 366 timekeeping_forward_now(); 367 368 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; 369 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; 370 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); 371 372 xtime = *tv; 373 374 timekeeper.ntp_error = 0; 375 ntp_clear(); 376 377 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 378 timekeeper.mult); 379 380 write_sequnlock_irqrestore(&xtime_lock, flags); 381 382 /* signal hrtimers about time change */ 383 clock_was_set(); 384 385 return 0; 386 } 387 388 EXPORT_SYMBOL(do_settimeofday); 389 390 391 /** 392 * timekeeping_inject_offset - Adds or subtracts from the current time. 393 * @tv: pointer to the timespec variable containing the offset 394 * 395 * Adds or subtracts an offset value from the current time. 396 */ 397 int timekeeping_inject_offset(struct timespec *ts) 398 { 399 unsigned long flags; 400 401 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 402 return -EINVAL; 403 404 write_seqlock_irqsave(&xtime_lock, flags); 405 406 timekeeping_forward_now(); 407 408 xtime = timespec_add(xtime, *ts); 409 wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts); 410 411 timekeeper.ntp_error = 0; 412 ntp_clear(); 413 414 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 415 timekeeper.mult); 416 417 write_sequnlock_irqrestore(&xtime_lock, flags); 418 419 /* signal hrtimers about time change */ 420 clock_was_set(); 421 422 return 0; 423 } 424 EXPORT_SYMBOL(timekeeping_inject_offset); 425 426 /** 427 * change_clocksource - Swaps clocksources if a new one is available 428 * 429 * Accumulates current time interval and initializes new clocksource 430 */ 431 static int change_clocksource(void *data) 432 { 433 struct clocksource *new, *old; 434 435 new = (struct clocksource *) data; 436 437 timekeeping_forward_now(); 438 if (!new->enable || new->enable(new) == 0) { 439 old = timekeeper.clock; 440 timekeeper_setup_internals(new); 441 if (old->disable) 442 old->disable(old); 443 } 444 return 0; 445 } 446 447 /** 448 * timekeeping_notify - Install a new clock source 449 * @clock: pointer to the clock source 450 * 451 * This function is called from clocksource.c after a new, better clock 452 * source has been registered. The caller holds the clocksource_mutex. 453 */ 454 void timekeeping_notify(struct clocksource *clock) 455 { 456 if (timekeeper.clock == clock) 457 return; 458 stop_machine(change_clocksource, clock, NULL); 459 tick_clock_notify(); 460 } 461 462 /** 463 * ktime_get_real - get the real (wall-) time in ktime_t format 464 * 465 * returns the time in ktime_t format 466 */ 467 ktime_t ktime_get_real(void) 468 { 469 struct timespec now; 470 471 getnstimeofday(&now); 472 473 return timespec_to_ktime(now); 474 } 475 EXPORT_SYMBOL_GPL(ktime_get_real); 476 477 /** 478 * getrawmonotonic - Returns the raw monotonic time in a timespec 479 * @ts: pointer to the timespec to be set 480 * 481 * Returns the raw monotonic time (completely un-modified by ntp) 482 */ 483 void getrawmonotonic(struct timespec *ts) 484 { 485 unsigned long seq; 486 s64 nsecs; 487 488 do { 489 seq = read_seqbegin(&xtime_lock); 490 nsecs = timekeeping_get_ns_raw(); 491 *ts = raw_time; 492 493 } while (read_seqretry(&xtime_lock, seq)); 494 495 timespec_add_ns(ts, nsecs); 496 } 497 EXPORT_SYMBOL(getrawmonotonic); 498 499 500 /** 501 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres 502 */ 503 int timekeeping_valid_for_hres(void) 504 { 505 unsigned long seq; 506 int ret; 507 508 do { 509 seq = read_seqbegin(&xtime_lock); 510 511 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; 512 513 } while (read_seqretry(&xtime_lock, seq)); 514 515 return ret; 516 } 517 518 /** 519 * timekeeping_max_deferment - Returns max time the clocksource can be deferred 520 * 521 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to 522 * ensure that the clocksource does not change! 523 */ 524 u64 timekeeping_max_deferment(void) 525 { 526 return timekeeper.clock->max_idle_ns; 527 } 528 529 /** 530 * read_persistent_clock - Return time from the persistent clock. 531 * 532 * Weak dummy function for arches that do not yet support it. 533 * Reads the time from the battery backed persistent clock. 534 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. 535 * 536 * XXX - Do be sure to remove it once all arches implement it. 537 */ 538 void __attribute__((weak)) read_persistent_clock(struct timespec *ts) 539 { 540 ts->tv_sec = 0; 541 ts->tv_nsec = 0; 542 } 543 544 /** 545 * read_boot_clock - Return time of the system start. 546 * 547 * Weak dummy function for arches that do not yet support it. 548 * Function to read the exact time the system has been started. 549 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. 550 * 551 * XXX - Do be sure to remove it once all arches implement it. 552 */ 553 void __attribute__((weak)) read_boot_clock(struct timespec *ts) 554 { 555 ts->tv_sec = 0; 556 ts->tv_nsec = 0; 557 } 558 559 /* 560 * timekeeping_init - Initializes the clocksource and common timekeeping values 561 */ 562 void __init timekeeping_init(void) 563 { 564 struct clocksource *clock; 565 unsigned long flags; 566 struct timespec now, boot; 567 568 read_persistent_clock(&now); 569 read_boot_clock(&boot); 570 571 write_seqlock_irqsave(&xtime_lock, flags); 572 573 ntp_init(); 574 575 clock = clocksource_default_clock(); 576 if (clock->enable) 577 clock->enable(clock); 578 timekeeper_setup_internals(clock); 579 580 xtime.tv_sec = now.tv_sec; 581 xtime.tv_nsec = now.tv_nsec; 582 raw_time.tv_sec = 0; 583 raw_time.tv_nsec = 0; 584 if (boot.tv_sec == 0 && boot.tv_nsec == 0) { 585 boot.tv_sec = xtime.tv_sec; 586 boot.tv_nsec = xtime.tv_nsec; 587 } 588 set_normalized_timespec(&wall_to_monotonic, 589 -boot.tv_sec, -boot.tv_nsec); 590 total_sleep_time.tv_sec = 0; 591 total_sleep_time.tv_nsec = 0; 592 write_sequnlock_irqrestore(&xtime_lock, flags); 593 } 594 595 /* time in seconds when suspend began */ 596 static struct timespec timekeeping_suspend_time; 597 598 /** 599 * timekeeping_resume - Resumes the generic timekeeping subsystem. 600 * 601 * This is for the generic clocksource timekeeping. 602 * xtime/wall_to_monotonic/jiffies/etc are 603 * still managed by arch specific suspend/resume code. 604 */ 605 static void timekeeping_resume(void) 606 { 607 unsigned long flags; 608 struct timespec ts; 609 610 read_persistent_clock(&ts); 611 612 clocksource_resume(); 613 614 write_seqlock_irqsave(&xtime_lock, flags); 615 616 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { 617 ts = timespec_sub(ts, timekeeping_suspend_time); 618 xtime = timespec_add(xtime, ts); 619 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); 620 total_sleep_time = timespec_add(total_sleep_time, ts); 621 } 622 /* re-base the last cycle value */ 623 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); 624 timekeeper.ntp_error = 0; 625 timekeeping_suspended = 0; 626 write_sequnlock_irqrestore(&xtime_lock, flags); 627 628 touch_softlockup_watchdog(); 629 630 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); 631 632 /* Resume hrtimers */ 633 hres_timers_resume(); 634 } 635 636 static int timekeeping_suspend(void) 637 { 638 unsigned long flags; 639 640 read_persistent_clock(&timekeeping_suspend_time); 641 642 write_seqlock_irqsave(&xtime_lock, flags); 643 timekeeping_forward_now(); 644 timekeeping_suspended = 1; 645 write_sequnlock_irqrestore(&xtime_lock, flags); 646 647 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 648 clocksource_suspend(); 649 650 return 0; 651 } 652 653 /* sysfs resume/suspend bits for timekeeping */ 654 static struct syscore_ops timekeeping_syscore_ops = { 655 .resume = timekeeping_resume, 656 .suspend = timekeeping_suspend, 657 }; 658 659 static int __init timekeeping_init_ops(void) 660 { 661 register_syscore_ops(&timekeeping_syscore_ops); 662 return 0; 663 } 664 665 device_initcall(timekeeping_init_ops); 666 667 /* 668 * If the error is already larger, we look ahead even further 669 * to compensate for late or lost adjustments. 670 */ 671 static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, 672 s64 *offset) 673 { 674 s64 tick_error, i; 675 u32 look_ahead, adj; 676 s32 error2, mult; 677 678 /* 679 * Use the current error value to determine how much to look ahead. 680 * The larger the error the slower we adjust for it to avoid problems 681 * with losing too many ticks, otherwise we would overadjust and 682 * produce an even larger error. The smaller the adjustment the 683 * faster we try to adjust for it, as lost ticks can do less harm 684 * here. This is tuned so that an error of about 1 msec is adjusted 685 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). 686 */ 687 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); 688 error2 = abs(error2); 689 for (look_ahead = 0; error2 > 0; look_ahead++) 690 error2 >>= 2; 691 692 /* 693 * Now calculate the error in (1 << look_ahead) ticks, but first 694 * remove the single look ahead already included in the error. 695 */ 696 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1); 697 tick_error -= timekeeper.xtime_interval >> 1; 698 error = ((error - tick_error) >> look_ahead) + tick_error; 699 700 /* Finally calculate the adjustment shift value. */ 701 i = *interval; 702 mult = 1; 703 if (error < 0) { 704 error = -error; 705 *interval = -*interval; 706 *offset = -*offset; 707 mult = -1; 708 } 709 for (adj = 0; error > i; adj++) 710 error >>= 1; 711 712 *interval <<= adj; 713 *offset <<= adj; 714 return mult << adj; 715 } 716 717 /* 718 * Adjust the multiplier to reduce the error value, 719 * this is optimized for the most common adjustments of -1,0,1, 720 * for other values we can do a bit more work. 721 */ 722 static void timekeeping_adjust(s64 offset) 723 { 724 s64 error, interval = timekeeper.cycle_interval; 725 int adj; 726 727 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); 728 if (error > interval) { 729 error >>= 2; 730 if (likely(error <= interval)) 731 adj = 1; 732 else 733 adj = timekeeping_bigadjust(error, &interval, &offset); 734 } else if (error < -interval) { 735 error >>= 2; 736 if (likely(error >= -interval)) { 737 adj = -1; 738 interval = -interval; 739 offset = -offset; 740 } else 741 adj = timekeeping_bigadjust(error, &interval, &offset); 742 } else 743 return; 744 745 timekeeper.mult += adj; 746 timekeeper.xtime_interval += interval; 747 timekeeper.xtime_nsec -= offset; 748 timekeeper.ntp_error -= (interval - offset) << 749 timekeeper.ntp_error_shift; 750 } 751 752 753 /** 754 * logarithmic_accumulation - shifted accumulation of cycles 755 * 756 * This functions accumulates a shifted interval of cycles into 757 * into a shifted interval nanoseconds. Allows for O(log) accumulation 758 * loop. 759 * 760 * Returns the unconsumed cycles. 761 */ 762 static cycle_t logarithmic_accumulation(cycle_t offset, int shift) 763 { 764 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; 765 u64 raw_nsecs; 766 767 /* If the offset is smaller then a shifted interval, do nothing */ 768 if (offset < timekeeper.cycle_interval<<shift) 769 return offset; 770 771 /* Accumulate one shifted interval */ 772 offset -= timekeeper.cycle_interval << shift; 773 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; 774 775 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; 776 while (timekeeper.xtime_nsec >= nsecps) { 777 timekeeper.xtime_nsec -= nsecps; 778 xtime.tv_sec++; 779 second_overflow(); 780 } 781 782 /* Accumulate raw time */ 783 raw_nsecs = timekeeper.raw_interval << shift; 784 raw_nsecs += raw_time.tv_nsec; 785 if (raw_nsecs >= NSEC_PER_SEC) { 786 u64 raw_secs = raw_nsecs; 787 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 788 raw_time.tv_sec += raw_secs; 789 } 790 raw_time.tv_nsec = raw_nsecs; 791 792 /* Accumulate error between NTP and clock interval */ 793 timekeeper.ntp_error += tick_length << shift; 794 timekeeper.ntp_error -= 795 (timekeeper.xtime_interval + timekeeper.xtime_remainder) << 796 (timekeeper.ntp_error_shift + shift); 797 798 return offset; 799 } 800 801 802 /** 803 * update_wall_time - Uses the current clocksource to increment the wall time 804 * 805 * Called from the timer interrupt, must hold a write on xtime_lock. 806 */ 807 static void update_wall_time(void) 808 { 809 struct clocksource *clock; 810 cycle_t offset; 811 int shift = 0, maxshift; 812 813 /* Make sure we're fully resumed: */ 814 if (unlikely(timekeeping_suspended)) 815 return; 816 817 clock = timekeeper.clock; 818 819 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 820 offset = timekeeper.cycle_interval; 821 #else 822 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 823 #endif 824 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; 825 826 /* 827 * With NO_HZ we may have to accumulate many cycle_intervals 828 * (think "ticks") worth of time at once. To do this efficiently, 829 * we calculate the largest doubling multiple of cycle_intervals 830 * that is smaller then the offset. We then accumulate that 831 * chunk in one go, and then try to consume the next smaller 832 * doubled multiple. 833 */ 834 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); 835 shift = max(0, shift); 836 /* Bound shift to one less then what overflows tick_length */ 837 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; 838 shift = min(shift, maxshift); 839 while (offset >= timekeeper.cycle_interval) { 840 offset = logarithmic_accumulation(offset, shift); 841 if(offset < timekeeper.cycle_interval<<shift) 842 shift--; 843 } 844 845 /* correct the clock when NTP error is too big */ 846 timekeeping_adjust(offset); 847 848 /* 849 * Since in the loop above, we accumulate any amount of time 850 * in xtime_nsec over a second into xtime.tv_sec, its possible for 851 * xtime_nsec to be fairly small after the loop. Further, if we're 852 * slightly speeding the clocksource up in timekeeping_adjust(), 853 * its possible the required corrective factor to xtime_nsec could 854 * cause it to underflow. 855 * 856 * Now, we cannot simply roll the accumulated second back, since 857 * the NTP subsystem has been notified via second_overflow. So 858 * instead we push xtime_nsec forward by the amount we underflowed, 859 * and add that amount into the error. 860 * 861 * We'll correct this error next time through this function, when 862 * xtime_nsec is not as small. 863 */ 864 if (unlikely((s64)timekeeper.xtime_nsec < 0)) { 865 s64 neg = -(s64)timekeeper.xtime_nsec; 866 timekeeper.xtime_nsec = 0; 867 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; 868 } 869 870 871 /* 872 * Store full nanoseconds into xtime after rounding it up and 873 * add the remainder to the error difference. 874 */ 875 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; 876 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift; 877 timekeeper.ntp_error += timekeeper.xtime_nsec << 878 timekeeper.ntp_error_shift; 879 880 /* 881 * Finally, make sure that after the rounding 882 * xtime.tv_nsec isn't larger then NSEC_PER_SEC 883 */ 884 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) { 885 xtime.tv_nsec -= NSEC_PER_SEC; 886 xtime.tv_sec++; 887 second_overflow(); 888 } 889 890 /* check to see if there is a new clocksource to use */ 891 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, 892 timekeeper.mult); 893 } 894 895 /** 896 * getboottime - Return the real time of system boot. 897 * @ts: pointer to the timespec to be set 898 * 899 * Returns the wall-time of boot in a timespec. 900 * 901 * This is based on the wall_to_monotonic offset and the total suspend 902 * time. Calls to settimeofday will affect the value returned (which 903 * basically means that however wrong your real time clock is at boot time, 904 * you get the right time here). 905 */ 906 void getboottime(struct timespec *ts) 907 { 908 struct timespec boottime = { 909 .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec, 910 .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec 911 }; 912 913 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 914 } 915 EXPORT_SYMBOL_GPL(getboottime); 916 917 918 /** 919 * get_monotonic_boottime - Returns monotonic time since boot 920 * @ts: pointer to the timespec to be set 921 * 922 * Returns the monotonic time since boot in a timespec. 923 * 924 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also 925 * includes the time spent in suspend. 926 */ 927 void get_monotonic_boottime(struct timespec *ts) 928 { 929 struct timespec tomono, sleep; 930 unsigned int seq; 931 s64 nsecs; 932 933 WARN_ON(timekeeping_suspended); 934 935 do { 936 seq = read_seqbegin(&xtime_lock); 937 *ts = xtime; 938 tomono = wall_to_monotonic; 939 sleep = total_sleep_time; 940 nsecs = timekeeping_get_ns(); 941 942 } while (read_seqretry(&xtime_lock, seq)); 943 944 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, 945 ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); 946 } 947 EXPORT_SYMBOL_GPL(get_monotonic_boottime); 948 949 /** 950 * ktime_get_boottime - Returns monotonic time since boot in a ktime 951 * 952 * Returns the monotonic time since boot in a ktime 953 * 954 * This is similar to CLOCK_MONTONIC/ktime_get, but also 955 * includes the time spent in suspend. 956 */ 957 ktime_t ktime_get_boottime(void) 958 { 959 struct timespec ts; 960 961 get_monotonic_boottime(&ts); 962 return timespec_to_ktime(ts); 963 } 964 EXPORT_SYMBOL_GPL(ktime_get_boottime); 965 966 /** 967 * monotonic_to_bootbased - Convert the monotonic time to boot based. 968 * @ts: pointer to the timespec to be converted 969 */ 970 void monotonic_to_bootbased(struct timespec *ts) 971 { 972 *ts = timespec_add(*ts, total_sleep_time); 973 } 974 EXPORT_SYMBOL_GPL(monotonic_to_bootbased); 975 976 unsigned long get_seconds(void) 977 { 978 return xtime.tv_sec; 979 } 980 EXPORT_SYMBOL(get_seconds); 981 982 struct timespec __current_kernel_time(void) 983 { 984 return xtime; 985 } 986 987 struct timespec current_kernel_time(void) 988 { 989 struct timespec now; 990 unsigned long seq; 991 992 do { 993 seq = read_seqbegin(&xtime_lock); 994 995 now = xtime; 996 } while (read_seqretry(&xtime_lock, seq)); 997 998 return now; 999 } 1000 EXPORT_SYMBOL(current_kernel_time); 1001 1002 struct timespec get_monotonic_coarse(void) 1003 { 1004 struct timespec now, mono; 1005 unsigned long seq; 1006 1007 do { 1008 seq = read_seqbegin(&xtime_lock); 1009 1010 now = xtime; 1011 mono = wall_to_monotonic; 1012 } while (read_seqretry(&xtime_lock, seq)); 1013 1014 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, 1015 now.tv_nsec + mono.tv_nsec); 1016 return now; 1017 } 1018 1019 /* 1020 * The 64-bit jiffies value is not atomic - you MUST NOT read it 1021 * without sampling the sequence number in xtime_lock. 1022 * jiffies is defined in the linker script... 1023 */ 1024 void do_timer(unsigned long ticks) 1025 { 1026 jiffies_64 += ticks; 1027 update_wall_time(); 1028 calc_global_load(ticks); 1029 } 1030 1031 /** 1032 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, 1033 * and sleep offsets. 1034 * @xtim: pointer to timespec to be set with xtime 1035 * @wtom: pointer to timespec to be set with wall_to_monotonic 1036 * @sleep: pointer to timespec to be set with time in suspend 1037 */ 1038 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, 1039 struct timespec *wtom, struct timespec *sleep) 1040 { 1041 unsigned long seq; 1042 1043 do { 1044 seq = read_seqbegin(&xtime_lock); 1045 *xtim = xtime; 1046 *wtom = wall_to_monotonic; 1047 *sleep = total_sleep_time; 1048 } while (read_seqretry(&xtime_lock, seq)); 1049 } 1050 1051 /** 1052 * xtime_update() - advances the timekeeping infrastructure 1053 * @ticks: number of ticks, that have elapsed since the last call. 1054 * 1055 * Must be called with interrupts disabled. 1056 */ 1057 void xtime_update(unsigned long ticks) 1058 { 1059 write_seqlock(&xtime_lock); 1060 do_timer(ticks); 1061 write_sequnlock(&xtime_lock); 1062 } 1063