Lines Matching +full:cs +full:- +full:enable +full:- +full:shift
1 // SPDX-License-Identifier: GPL-2.0
31 #include "tick-internal.h"
67 return ktime_get_aux_ts64(CLOCK_AUX + tkid - TIMEKEEPER_AUX_FIRST, ts); in tk_get_aux_ts64()
72 return tk->id >= TIMEKEEPER_AUX_FIRST && tk->id <= TIMEKEEPER_AUX_LAST; in tk_is_aux()
90 * struct tk_fast - NMI safe timekeeper
103 /* Suspend-time cycles value for halted fast timekeeper. */
106 static u64 dummy_clock_read(struct clocksource *cs) in dummy_clock_read() argument
121 * and shift=0. When the first proper clocksource is installed then
129 .shift = 0, \
168 * Multigrain timestamps require tracking the latest fine-grained timestamp
169 * that has been issued, and never returning a coarse-grained timestamp that is
172 * mg_floor represents the latest fine-grained time that has been handed out as
174 * converted to a realtime clock value on an as-needed basis.
186 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { in tk_normalize_xtime()
187 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in tk_normalize_xtime()
188 tk->xtime_sec++; in tk_normalize_xtime()
190 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) { in tk_normalize_xtime()
191 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in tk_normalize_xtime()
192 tk->raw_sec++; in tk_normalize_xtime()
200 ts.tv_sec = tk->xtime_sec; in tk_xtime()
201 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_xtime()
209 ts.tv_sec = tk->xtime_sec; in tk_xtime_coarse()
210 ts.tv_nsec = tk->coarse_nsec; in tk_xtime_coarse()
225 tk->coarse_nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; in tk_update_coarse_nsecs()
230 tk->xtime_sec = ts->tv_sec; in tk_set_xtime()
231 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_set_xtime()
237 tk->xtime_sec += ts->tv_sec; in tk_xtime_add()
238 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; in tk_xtime_add()
248 * Verify consistency of: offset_real = -wall_to_monotonic in tk_set_wall_to_mono()
251 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, in tk_set_wall_to_mono()
252 -tk->wall_to_monotonic.tv_nsec); in tk_set_wall_to_mono()
253 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp)); in tk_set_wall_to_mono()
254 tk->wall_to_monotonic = wtm; in tk_set_wall_to_mono()
255 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); in tk_set_wall_to_mono()
257 WRITE_ONCE(tk->offs_real, timespec64_to_ktime(tmp)); in tk_set_wall_to_mono()
258 WRITE_ONCE(tk->offs_tai, ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0))); in tk_set_wall_to_mono()
264 WRITE_ONCE(tk->offs_boot, ktime_add(tk->offs_boot, delta)); in tk_update_sleep_time()
269 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot); in tk_update_sleep_time()
273 * tk_clock_read - atomic clocksource read() helper
282 * a read of the fast-timekeeper tkrs (which is protected by its own locking
287 struct clocksource *clock = READ_ONCE(tkr->clock); in tk_clock_read()
289 return clock->read(clock); in tk_clock_read()
293 * tk_setup_internals - Set up internals to use clocksource clock.
309 ++tk->cs_was_changed_seq; in tk_setup_internals()
310 old_clock = tk->tkr_mono.clock; in tk_setup_internals()
311 tk->tkr_mono.clock = clock; in tk_setup_internals()
312 tk->tkr_mono.mask = clock->mask; in tk_setup_internals()
313 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); in tk_setup_internals()
315 tk->tkr_raw.clock = clock; in tk_setup_internals()
316 tk->tkr_raw.mask = clock->mask; in tk_setup_internals()
317 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; in tk_setup_internals()
319 /* Do the ns -> cycle conversion first, using original mult */ in tk_setup_internals()
321 tmp <<= clock->shift; in tk_setup_internals()
323 tmp += clock->mult/2; in tk_setup_internals()
324 do_div(tmp, clock->mult); in tk_setup_internals()
329 tk->cycle_interval = interval; in tk_setup_internals()
331 /* Go back from cycles -> shifted ns */ in tk_setup_internals()
332 tk->xtime_interval = interval * clock->mult; in tk_setup_internals()
333 tk->xtime_remainder = ntpinterval - tk->xtime_interval; in tk_setup_internals()
334 tk->raw_interval = interval * clock->mult; in tk_setup_internals()
336 /* if changing clocks, convert xtime_nsec shift units */ in tk_setup_internals()
338 int shift_change = clock->shift - old_clock->shift; in tk_setup_internals()
340 tk->tkr_mono.xtime_nsec >>= -shift_change; in tk_setup_internals()
341 tk->tkr_raw.xtime_nsec >>= -shift_change; in tk_setup_internals()
343 tk->tkr_mono.xtime_nsec <<= shift_change; in tk_setup_internals()
344 tk->tkr_raw.xtime_nsec <<= shift_change; in tk_setup_internals()
348 tk->tkr_mono.shift = clock->shift; in tk_setup_internals()
349 tk->tkr_raw.shift = clock->shift; in tk_setup_internals()
351 tk->ntp_error = 0; in tk_setup_internals()
352 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; in tk_setup_internals()
353 tk->ntp_tick = ntpinterval << tk->ntp_error_shift; in tk_setup_internals()
360 tk->tkr_mono.mult = clock->mult; in tk_setup_internals()
361 tk->tkr_raw.mult = clock->mult; in tk_setup_internals()
362 tk->ntp_err_mult = 0; in tk_setup_internals()
363 tk->skip_second_overflow = 0; in tk_setup_internals()
369 return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift); in delta_to_ns_safe()
375 u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask; in timekeeping_cycles_to_ns()
379 * overflows the multiplication with tkr->mult. in timekeeping_cycles_to_ns()
381 if (unlikely(delta > tkr->clock->max_cycles)) { in timekeeping_cycles_to_ns()
388 return tkr->xtime_nsec >> tkr->shift; in timekeeping_cycles_to_ns()
393 return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift; in timekeeping_cycles_to_ns()
402 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
419 struct tk_read_base *base = tkf->base; in update_fast_timekeeper()
422 write_seqcount_latch_begin(&tkf->seq); in update_fast_timekeeper()
428 write_seqcount_latch(&tkf->seq); in update_fast_timekeeper()
433 write_seqcount_latch_end(&tkf->seq); in update_fast_timekeeper()
443 seq = read_seqcount_latch(&tkf->seq); in __ktime_get_fast_ns()
444 tkr = tkf->base + (seq & 0x01); in __ktime_get_fast_ns()
445 now = ktime_to_ns(tkr->base); in __ktime_get_fast_ns()
447 } while (read_seqcount_latch_retry(&tkf->seq, seq)); in __ktime_get_fast_ns()
453 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
470 * |12345678---> reader order
491 * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
503 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
519 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
520 * partially updated. Since the tk->offs_boot update is a rare event, this
530 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_boot))); in ktime_get_boot_fast_ns()
535 * ktime_get_tai_fast_ns - NMI safe and fast access to tai clock.
547 return (ktime_get_mono_fast_ns() + ktime_to_ns(data_race(tk->offs_tai))); in ktime_get_tai_fast_ns()
552 * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
564 seq = raw_read_seqcount_latch(&tkf->seq); in ktime_get_real_fast_ns()
565 tkr = tkf->base + (seq & 0x01); in ktime_get_real_fast_ns()
566 baser = ktime_to_ns(tkr->base_real); in ktime_get_real_fast_ns()
568 } while (raw_read_seqcount_latch_retry(&tkf->seq, seq)); in ktime_get_real_fast_ns()
575 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
587 const struct tk_read_base *tkr = &tk->tkr_mono; in halt_fast_timekeeper()
592 tkr_dummy.base_real = tkr->base + tk->offs_real; in halt_fast_timekeeper()
595 tkr = &tk->tkr_raw; in halt_fast_timekeeper()
609 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
626 * pvclock_gtod_unregister_notifier - unregister a pvclock
638 * tk_update_leap_state - helper to update the next_leap_ktime
642 tk->next_leap_ktime = ntp_get_next_leap(tk->id); in tk_update_leap_state()
643 if (tk->next_leap_ktime != KTIME_MAX) in tk_update_leap_state()
645 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real); in tk_update_leap_state()
654 write_seqcount_begin(&tkd->seq); in tk_update_leap_state_all()
655 tk_update_leap_state(&tkd->shadow_timekeeper); in tk_update_leap_state_all()
656 tkd->timekeeper.next_leap_ktime = tkd->shadow_timekeeper.next_leap_ktime; in tk_update_leap_state_all()
657 write_seqcount_end(&tkd->seq); in tk_update_leap_state_all()
675 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); in tk_update_ktime_data()
676 nsec = (u32) tk->wall_to_monotonic.tv_nsec; in tk_update_ktime_data()
677 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); in tk_update_ktime_data()
682 * this into account before updating tk->ktime_sec. in tk_update_ktime_data()
684 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); in tk_update_ktime_data()
687 tk->ktime_sec = seconds; in tk_update_ktime_data()
690 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC); in tk_update_ktime_data()
698 lockdep_assert_held(&tkd->lock); in timekeeping_restore_shadow()
699 memcpy(&tkd->shadow_timekeeper, &tkd->timekeeper, sizeof(tkd->timekeeper)); in timekeeping_restore_shadow()
704 struct timekeeper *tk = &tkd->shadow_timekeeper; in timekeeping_update_from_shadow()
706 lockdep_assert_held(&tkd->lock); in timekeeping_update_from_shadow()
715 write_seqcount_begin(&tkd->seq); in timekeeping_update_from_shadow()
718 tk->ntp_error = 0; in timekeeping_update_from_shadow()
719 ntp_clear(tk->id); in timekeeping_update_from_shadow()
724 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real; in timekeeping_update_from_shadow()
726 if (tk->id == TIMEKEEPER_CORE) { in timekeeping_update_from_shadow()
730 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono); in timekeeping_update_from_shadow()
731 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw); in timekeeping_update_from_shadow()
737 tk->clock_was_set_seq++; in timekeeping_update_from_shadow()
747 memcpy(&tkd->timekeeper, tk, sizeof(*tk)); in timekeeping_update_from_shadow()
748 write_seqcount_end(&tkd->seq); in timekeeping_update_from_shadow()
752 * timekeeping_forward_now - update clock to the current time
763 cycle_now = tk_clock_read(&tk->tkr_mono); in timekeeping_forward_now()
764 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask, in timekeeping_forward_now()
765 tk->tkr_mono.clock->max_raw_delta); in timekeeping_forward_now()
766 tk->tkr_mono.cycle_last = cycle_now; in timekeeping_forward_now()
767 tk->tkr_raw.cycle_last = cycle_now; in timekeeping_forward_now()
770 u64 max = tk->tkr_mono.clock->max_cycles; in timekeeping_forward_now()
773 tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult; in timekeeping_forward_now()
774 tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult; in timekeeping_forward_now()
776 delta -= incr; in timekeeping_forward_now()
782 * ktime_get_real_ts64 - Returns the time of day in a timespec64.
798 ts->tv_sec = tk->xtime_sec; in ktime_get_real_ts64()
799 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_real_ts64()
803 ts->tv_nsec = 0; in ktime_get_real_ts64()
819 base = tk->tkr_mono.base; in ktime_get()
820 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get()
838 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift; in ktime_get_resolution_ns()
862 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_with_offset()
863 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_with_offset()
883 base = ktime_add(tk->tkr_mono.base, *offset); in ktime_get_coarse_with_offset()
884 nsecs = tk->coarse_nsec; in ktime_get_coarse_with_offset()
893 * ktime_mono_to_any() - convert monotonic time to any other time
921 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
932 base = tk->tkr_raw.base; in ktime_get_raw()
933 nsecs = timekeeping_get_ns(&tk->tkr_raw); in ktime_get_raw()
942 * ktime_get_ts64 - get the monotonic clock in timespec64 format
960 ts->tv_sec = tk->xtime_sec; in ktime_get_ts64()
961 nsec = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_ts64()
962 tomono = tk->wall_to_monotonic; in ktime_get_ts64()
966 ts->tv_sec += tomono.tv_sec; in ktime_get_ts64()
967 ts->tv_nsec = 0; in ktime_get_ts64()
973 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
976 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
986 return tk->ktime_sec; in ktime_get_seconds()
991 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
995 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
997 * counter to provide "atomic" access to the 64bit tk->xtime_sec
1007 return tk->xtime_sec; in ktime_get_real_seconds()
1011 seconds = tk->xtime_sec; in ktime_get_real_seconds()
1020 * __ktime_get_real_seconds - Unprotected access to CLOCK_REALTIME seconds
1024 * handler and in KGDB. It's unprotected on 32-bit vs. concurrent half
1033 return tk->xtime_sec; in __ktime_get_real_seconds()
1037 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
1055 now = tk_clock_read(&tk->tkr_mono); in ktime_get_snapshot()
1056 systime_snapshot->cs_id = tk->tkr_mono.clock->id; in ktime_get_snapshot()
1057 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; in ktime_get_snapshot()
1058 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; in ktime_get_snapshot()
1059 base_real = ktime_add(tk->tkr_mono.base, in ktime_get_snapshot()
1061 base_boot = ktime_add(tk->tkr_mono.base, in ktime_get_snapshot()
1063 base_raw = tk->tkr_raw.base; in ktime_get_snapshot()
1064 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now); in ktime_get_snapshot()
1065 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now); in ktime_get_snapshot()
1068 systime_snapshot->cycles = now; in ktime_get_snapshot()
1069 systime_snapshot->real = ktime_add_ns(base_real, nsec_real); in ktime_get_snapshot()
1070 systime_snapshot->boot = ktime_add_ns(base_boot, nsec_real); in ktime_get_snapshot()
1071 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw); in ktime_get_snapshot()
1082 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) || in scale64_check_overflow()
1083 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) in scale64_check_overflow()
1084 return -EOVERFLOW; in scale64_check_overflow()
1093 * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval
1126 total_history_cycles - partial_history_cycles : in adjust_historical_crosststamp()
1134 ktime_sub(ts->sys_monoraw, history->raw)); in adjust_historical_crosststamp()
1149 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult); in adjust_historical_crosststamp()
1152 ktime_sub(ts->sys_realtime, history->real)); in adjust_historical_crosststamp()
1161 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw); in adjust_historical_crosststamp()
1162 ts->sys_realtime = ktime_add_ns(history->real, corr_real); in adjust_historical_crosststamp()
1164 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw); in adjust_historical_crosststamp()
1165 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real); in adjust_historical_crosststamp()
1172 * timestamp_in_interval - true if ts is chronologically in [start, end]
1199 struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock; in convert_base_to_cs() local
1204 if (cs->id == scv->cs_id) in convert_base_to_cs()
1209 * re-evaluating @base as the clocksource might change concurrently. in convert_base_to_cs()
1211 base = READ_ONCE(cs->base); in convert_base_to_cs()
1212 if (!base || base->id != scv->cs_id) in convert_base_to_cs()
1215 num = scv->use_nsecs ? cs->freq_khz : base->numerator; in convert_base_to_cs()
1216 den = scv->use_nsecs ? USEC_PER_SEC : base->denominator; in convert_base_to_cs()
1218 if (!convert_clock(&scv->cycles, num, den)) in convert_base_to_cs()
1221 scv->cycles += base->offset; in convert_base_to_cs()
1227 struct clocksource *cs = tk_core.timekeeper.tkr_mono.clock; in convert_cs_to_base() local
1232 * re-evaluating @base as the clocksource might change concurrently. in convert_cs_to_base()
1234 base = READ_ONCE(cs->base); in convert_cs_to_base()
1235 if (!base || base->id != base_id) in convert_cs_to_base()
1238 *cycles -= base->offset; in convert_cs_to_base()
1239 if (!convert_clock(cycles, base->denominator, base->numerator)) in convert_cs_to_base()
1248 if (BITS_TO_BYTES(fls64(*delta) + tkr->shift) >= sizeof(*delta)) in convert_ns_to_cs()
1251 *delta = div_u64((*delta << tkr->shift) - tkr->xtime_nsec, tkr->mult); in convert_ns_to_cs()
1256 * ktime_real_to_base_clock() - Convert CLOCK_REALTIME timestamp to a base clock timestamp
1273 if ((u64)treal < tk->tkr_mono.base_real) in ktime_real_to_base_clock()
1275 delta = (u64)treal - tk->tkr_mono.base_real; in ktime_real_to_base_clock()
1278 *cycles = tk->tkr_mono.cycle_last + delta; in ktime_real_to_base_clock()
1288 * get_device_system_crosststamp - Synchronously capture system/device timestamp
1323 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx); in get_device_system_crosststamp()
1334 return -ENODEV; in get_device_system_crosststamp()
1341 now = tk_clock_read(&tk->tkr_mono); in get_device_system_crosststamp()
1342 interval_start = tk->tkr_mono.cycle_last; in get_device_system_crosststamp()
1344 clock_was_set_seq = tk->clock_was_set_seq; in get_device_system_crosststamp()
1345 cs_was_changed_seq = tk->cs_was_changed_seq; in get_device_system_crosststamp()
1352 base_real = ktime_add(tk->tkr_mono.base, in get_device_system_crosststamp()
1354 base_raw = tk->tkr_raw.base; in get_device_system_crosststamp()
1356 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles); in get_device_system_crosststamp()
1357 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles); in get_device_system_crosststamp()
1360 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real); in get_device_system_crosststamp()
1361 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw); in get_device_system_crosststamp()
1377 !timestamp_in_interval(history_begin->cycles, in get_device_system_crosststamp()
1379 history_begin->cs_was_changed_seq != cs_was_changed_seq) in get_device_system_crosststamp()
1380 return -EINVAL; in get_device_system_crosststamp()
1381 partial_history_cycles = cycles - system_counterval.cycles; in get_device_system_crosststamp()
1382 total_history_cycles = cycles - history_begin->cycles; in get_device_system_crosststamp()
1384 history_begin->clock_was_set_seq != clock_was_set_seq; in get_device_system_crosststamp()
1399 * timekeeping_clocksource_has_base - Check whether the current clocksource
1413 * count. Just prevent the compiler from re-evaluating @base as the in timekeeping_clocksource_has_base()
1416 struct clocksource_base *base = READ_ONCE(tk_core.timekeeper.tkr_mono.clock->base); in timekeeping_clocksource_has_base()
1418 return base ? base->id == id : false; in timekeeping_clocksource_has_base()
1423 * do_settimeofday64 - Sets the time of day.
1433 return -EINVAL; in do_settimeofday64()
1443 if (timespec64_compare(&tks->wall_to_monotonic, &ts_delta) > 0) { in do_settimeofday64()
1445 return -EINVAL; in do_settimeofday64()
1448 tk_set_wall_to_mono(tks, timespec64_sub(tks->wall_to_monotonic, ts_delta)); in do_settimeofday64()
1464 return !IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS) || tk->id == TIMEKEEPER_CORE; in timekeeper_is_core_tk()
1468 * __timekeeping_inject_offset - Adds or subtracts from the current time.
1476 struct timekeeper *tks = &tkd->shadow_timekeeper; in __timekeeping_inject_offset()
1479 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) in __timekeeping_inject_offset()
1480 return -EINVAL; in __timekeeping_inject_offset()
1487 if (timespec64_compare(&tks->wall_to_monotonic, ts) > 0 || in __timekeeping_inject_offset()
1490 return -EINVAL; in __timekeeping_inject_offset()
1494 tk_set_wall_to_mono(tks, timespec64_sub(tks->wall_to_monotonic, *ts)); in __timekeeping_inject_offset()
1496 struct tk_read_base *tkr_mono = &tks->tkr_mono; in __timekeeping_inject_offset()
1500 now = ktime_add_ns(tkr_mono->base, timekeeping_get_ns(tkr_mono)); in __timekeeping_inject_offset()
1502 offs = ktime_add(tks->offs_aux, timespec64_to_ktime(*ts)); in __timekeeping_inject_offset()
1507 return -EINVAL; in __timekeeping_inject_offset()
1509 tks->offs_aux = offs; in __timekeeping_inject_offset()
1545 * - TYT, 1992-01-01
1564 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
1568 tk->tai_offset = tai_offset; in __timekeeping_set_tai_offset()
1569 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0)); in __timekeeping_set_tai_offset()
1573 * change_clocksource - Swaps clocksources if a new one is available
1583 * Succeeds for built-in code (owner == NULL) as well. Abort if the in change_clocksource()
1586 if (!try_module_get(new->owner)) in change_clocksource()
1590 if (new->enable && new->enable(new) != 0) { in change_clocksource()
1591 module_put(new->owner); in change_clocksource()
1599 old = tks->tkr_mono.clock; in change_clocksource()
1607 if (old->disable) in change_clocksource()
1608 old->disable(old); in change_clocksource()
1609 module_put(old->owner); in change_clocksource()
1616 * timekeeping_notify - Install a new clock source
1626 if (tk->tkr_mono.clock == clock) in timekeeping_notify()
1630 return tk->tkr_mono.clock == clock ? 0 : -1; in timekeeping_notify()
1634 * ktime_get_raw_ts64 - Returns the raw monotonic time in a timespec
1637 * Returns the raw monotonic time (completely un-modified by ntp)
1647 ts->tv_sec = tk->raw_sec; in ktime_get_raw_ts64()
1648 nsecs = timekeeping_get_ns(&tk->tkr_raw); in ktime_get_raw_ts64()
1652 ts->tv_nsec = 0; in ktime_get_raw_ts64()
1658 * ktime_get_clock_ts64 - Returns time of a clock in a timespec
1662 * The timestamp is invalidated (@ts->sec is set to -1) if the
1668 ts->tv_sec = -1; in ktime_get_clock_ts64()
1669 ts->tv_nsec = 0; in ktime_get_clock_ts64()
1692 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
1703 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; in timekeeping_valid_for_hres()
1711 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
1722 ret = tk->tkr_mono.clock->max_idle_ns; in timekeeping_max_deferment()
1730 * read_persistent_clock64 - Return time from the persistent clock.
1737 * XXX - Do be sure to remove it once all arches implement it.
1741 ts->tv_sec = 0; in read_persistent_clock64()
1742 ts->tv_nsec = 0; in read_persistent_clock64()
1746 * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
1749 * @boot_offset: offset that is defined as wall_time - boot_time
1768 raw_spin_lock_init(&tkd->lock); in tkd_basic_setup()
1769 seqcount_raw_spinlock_init(&tkd->seq, &tkd->lock); in tkd_basic_setup()
1770 tkd->timekeeper.id = tkd->shadow_timekeeper.id = tk_id; in tkd_basic_setup()
1771 tkd->timekeeper.clock_valid = tkd->shadow_timekeeper.clock_valid = valid; in tkd_basic_setup()
1793 * timekeeping_init - Initializes the clocksource and common timekeeping values
1827 if (clock->enable) in timekeeping_init()
1828 clock->enable(clock); in timekeeping_init()
1832 tks->raw_sec = 0; in timekeeping_init()
1843 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
1860 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta)); in __timekeeping_inject_sleeptime()
1869 * 1) non-stop clocksource
1902 * timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
1929 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1934 struct clocksource *clock = tks->tkr_mono.clock; in timekeeping_resume()
1955 * suspend-nonstop clocksource -> persistent clock -> rtc in timekeeping_resume()
1959 cycle_now = tk_clock_read(&tks->tkr_mono); in timekeeping_resume()
1974 /* Re-base the last cycle value */ in timekeeping_resume()
1975 tks->tkr_mono.cycle_last = cycle_now; in timekeeping_resume()
1976 tks->tkr_raw.cycle_last = cycle_now; in timekeeping_resume()
1978 tks->ntp_error = 0; in timekeeping_resume()
2021 curr_clock = tks->tkr_mono.clock; in timekeeping_suspend()
2022 cycle_now = tks->tkr_mono.cycle_last; in timekeeping_suspend()
2078 s64 interval = tk->cycle_interval; in timekeeping_apply_adjustment()
2082 } else if (mult_adj == -1) { in timekeeping_apply_adjustment()
2083 interval = -interval; in timekeeping_apply_adjustment()
2084 offset = -offset; in timekeeping_apply_adjustment()
2109 * So offset stores the non-accumulated cycles. Thus the current in timekeeping_apply_adjustment()
2133 * xtime_nsec_2 = xtime_nsec_1 - offset in timekeeping_apply_adjustment()
2135 * xtime_nsec -= offset in timekeeping_apply_adjustment()
2137 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) { in timekeeping_apply_adjustment()
2143 tk->tkr_mono.mult += mult_adj; in timekeeping_apply_adjustment()
2144 tk->xtime_interval += interval; in timekeeping_apply_adjustment()
2145 tk->tkr_mono.xtime_nsec -= offset; in timekeeping_apply_adjustment()
2154 u64 ntp_tl = ntp_tick_length(tk->id); in timekeeping_adjust()
2161 if (likely(tk->ntp_tick == ntp_tl)) { in timekeeping_adjust()
2162 mult = tk->tkr_mono.mult - tk->ntp_err_mult; in timekeeping_adjust()
2164 tk->ntp_tick = ntp_tl; in timekeeping_adjust()
2165 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) - in timekeeping_adjust()
2166 tk->xtime_remainder, tk->cycle_interval); in timekeeping_adjust()
2173 * ahead until the tick length changes to a non-divisible value. in timekeeping_adjust()
2175 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0; in timekeeping_adjust()
2176 mult += tk->ntp_err_mult; in timekeeping_adjust()
2178 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult); in timekeeping_adjust()
2180 if (unlikely(tk->tkr_mono.clock->maxadj && in timekeeping_adjust()
2181 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult) in timekeeping_adjust()
2182 > tk->tkr_mono.clock->maxadj))) { in timekeeping_adjust()
2185 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult, in timekeeping_adjust()
2186 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj); in timekeeping_adjust()
2199 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) { in timekeeping_adjust()
2200 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC << in timekeeping_adjust()
2201 tk->tkr_mono.shift; in timekeeping_adjust()
2202 tk->xtime_sec--; in timekeeping_adjust()
2203 tk->skip_second_overflow = 1; in timekeeping_adjust()
2208 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
2216 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift; in accumulate_nsecs_to_secs()
2219 while (tk->tkr_mono.xtime_nsec >= nsecps) { in accumulate_nsecs_to_secs()
2222 tk->tkr_mono.xtime_nsec -= nsecps; in accumulate_nsecs_to_secs()
2223 tk->xtime_sec++; in accumulate_nsecs_to_secs()
2229 if (unlikely(tk->skip_second_overflow)) { in accumulate_nsecs_to_secs()
2230 tk->skip_second_overflow = 0; in accumulate_nsecs_to_secs()
2235 leap = second_overflow(tk->id, tk->xtime_sec); in accumulate_nsecs_to_secs()
2239 tk->xtime_sec += leap; in accumulate_nsecs_to_secs()
2244 timespec64_sub(tk->wall_to_monotonic, ts)); in accumulate_nsecs_to_secs()
2246 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); in accumulate_nsecs_to_secs()
2255 * logarithmic_accumulation - shifted accumulation of cycles
2264 u32 shift, unsigned int *clock_set) in logarithmic_accumulation() argument
2266 u64 interval = tk->cycle_interval << shift; in logarithmic_accumulation()
2274 offset -= interval; in logarithmic_accumulation()
2275 tk->tkr_mono.cycle_last += interval; in logarithmic_accumulation()
2276 tk->tkr_raw.cycle_last += interval; in logarithmic_accumulation()
2278 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift; in logarithmic_accumulation()
2282 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; in logarithmic_accumulation()
2283 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; in logarithmic_accumulation()
2284 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { in logarithmic_accumulation()
2285 tk->tkr_raw.xtime_nsec -= snsec_per_sec; in logarithmic_accumulation()
2286 tk->raw_sec++; in logarithmic_accumulation()
2290 tk->ntp_error += tk->ntp_tick << shift; in logarithmic_accumulation()
2291 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << in logarithmic_accumulation()
2292 (tk->ntp_error_shift + shift); in logarithmic_accumulation()
2298 * timekeeping_advance - Updates the timekeeper to the current time and
2303 struct timekeeper *tk = &tkd->shadow_timekeeper; in __timekeeping_advance()
2304 struct timekeeper *real_tk = &tkd->timekeeper; in __timekeeping_advance()
2306 int shift = 0, maxshift; in __timekeeping_advance() local
2313 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), in __timekeeping_advance()
2314 tk->tkr_mono.cycle_last, tk->tkr_mono.mask, in __timekeeping_advance()
2315 tk->tkr_mono.clock->max_raw_delta); in __timekeeping_advance()
2318 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK) in __timekeeping_advance()
2329 shift = ilog2(offset) - ilog2(tk->cycle_interval); in __timekeeping_advance()
2330 shift = max(0, shift); in __timekeeping_advance()
2331 /* Bound shift to one less than what overflows tick_length */ in __timekeeping_advance()
2332 maxshift = (64 - (ilog2(ntp_tick_length(tk->id)) + 1)) - 1; in __timekeeping_advance()
2333 shift = min(shift, maxshift); in __timekeeping_advance()
2334 while (offset >= tk->cycle_interval) { in __timekeeping_advance()
2335 offset = logarithmic_accumulation(tk, offset, shift, &clock_set); in __timekeeping_advance()
2336 if (offset < tk->cycle_interval<<shift) in __timekeeping_advance()
2337 shift--; in __timekeeping_advance()
2369 * update_wall_time - Uses the current clocksource to increment the wall time
2381 * getboottime64 - Return the real time of system boot.
2384 * Returns the wall-time of boot in a timespec64.
2394 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); in getboottime64()
2414 * ktime_get_coarse_real_ts64_mg - return latter of coarse grained time or floor
2418 * to the current coarse-grained time. Fill @ts with whichever is
2419 * latest. Note that this is a filesystem-specific interface and should be
2442 * ktime_get_real_ts64_mg - attempt to update floor value and return result
2445 * Get a monotonic fine-grained time value and attempt to swap it into
2452 * and determining that the resulting coarse-grained timestamp did not effect
2471 ts->tv_sec = tk->xtime_sec; in ktime_get_real_ts64_mg()
2472 mono = tk->tkr_mono.base; in ktime_get_real_ts64_mg()
2473 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_real_ts64_mg()
2487 ts->tv_nsec = 0; in ktime_get_real_ts64_mg()
2511 mono = tk->wall_to_monotonic; in ktime_get_coarse_ts64()
2529 * ktime_get_update_offsets_now - hrtimer helper
2531 * @offs_real: pointer to storage for monotonic -> realtime offset
2532 * @offs_boot: pointer to storage for monotonic -> boottime offset
2533 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2552 base = tk->tkr_mono.base; in ktime_get_update_offsets_now()
2553 nsecs = timekeeping_get_ns(&tk->tkr_mono); in ktime_get_update_offsets_now()
2556 if (*cwsseq != tk->clock_was_set_seq) { in ktime_get_update_offsets_now()
2557 *cwsseq = tk->clock_was_set_seq; in ktime_get_update_offsets_now()
2558 *offs_real = tk->offs_real; in ktime_get_update_offsets_now()
2559 *offs_boot = tk->offs_boot; in ktime_get_update_offsets_now()
2560 *offs_tai = tk->offs_tai; in ktime_get_update_offsets_now()
2564 if (unlikely(base >= tk->next_leap_ktime)) in ktime_get_update_offsets_now()
2565 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0)); in ktime_get_update_offsets_now()
2573 * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
2577 if (txc->modes & ADJ_ADJTIME) { in timekeeping_validate_timex()
2579 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT)) in timekeeping_validate_timex()
2580 return -EINVAL; in timekeeping_validate_timex()
2581 if (!(txc->modes & ADJ_OFFSET_READONLY) && in timekeeping_validate_timex()
2583 return -EPERM; in timekeeping_validate_timex()
2585 /* In order to modify anything, you gotta be super-user! */ in timekeeping_validate_timex()
2586 if (txc->modes && !capable(CAP_SYS_TIME)) in timekeeping_validate_timex()
2587 return -EPERM; in timekeeping_validate_timex()
2592 if (txc->modes & ADJ_TICK && in timekeeping_validate_timex()
2593 (txc->tick < 900000/USER_HZ || in timekeeping_validate_timex()
2594 txc->tick > 1100000/USER_HZ)) in timekeeping_validate_timex()
2595 return -EINVAL; in timekeeping_validate_timex()
2598 if (txc->modes & ADJ_SETOFFSET) { in timekeeping_validate_timex()
2599 /* In order to inject time, you gotta be super-user! */ in timekeeping_validate_timex()
2601 return -EPERM; in timekeeping_validate_timex()
2608 * The field tv_usec/tv_nsec must always be non-negative and in timekeeping_validate_timex()
2611 if (txc->time.tv_usec < 0) in timekeeping_validate_timex()
2612 return -EINVAL; in timekeeping_validate_timex()
2614 if (txc->modes & ADJ_NANO) { in timekeeping_validate_timex()
2615 if (txc->time.tv_usec >= NSEC_PER_SEC) in timekeeping_validate_timex()
2616 return -EINVAL; in timekeeping_validate_timex()
2618 if (txc->time.tv_usec >= USEC_PER_SEC) in timekeeping_validate_timex()
2619 return -EINVAL; in timekeeping_validate_timex()
2625 * only happen on 64-bit systems: in timekeeping_validate_timex()
2627 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) { in timekeeping_validate_timex()
2628 if (LLONG_MIN / PPM_SCALE > txc->freq) in timekeeping_validate_timex()
2629 return -EINVAL; in timekeeping_validate_timex()
2630 if (LLONG_MAX / PPM_SCALE < txc->freq) in timekeeping_validate_timex()
2631 return -EINVAL; in timekeeping_validate_timex()
2636 if (txc->status & (STA_INS | STA_DEL)) in timekeeping_validate_timex()
2637 return -EINVAL; in timekeeping_validate_timex()
2640 if (txc->modes & ADJ_TAI) in timekeeping_validate_timex()
2641 return -EINVAL; in timekeeping_validate_timex()
2644 if (txc->status & (STA_PPSFREQ | STA_PPSTIME)) in timekeeping_validate_timex()
2645 return -EINVAL; in timekeeping_validate_timex()
2652 * random_get_entropy_fallback - Returns the raw clock source value,
2658 struct clocksource *clock = READ_ONCE(tkr->clock); in random_get_entropy_fallback()
2662 return clock->read(clock); in random_get_entropy_fallback()
2675 struct timekeeper *tks = &tkd->shadow_timekeeper; in __do_adjtimex()
2690 tk_get_aux_ts64(tkd->timekeeper.id, &ts); in __do_adjtimex()
2694 guard(raw_spinlock_irqsave)(&tkd->lock); in __do_adjtimex()
2696 if (!tks->clock_valid) in __do_adjtimex()
2697 return -ENODEV; in __do_adjtimex()
2699 if (txc->modes & ADJ_SETOFFSET) { in __do_adjtimex()
2700 result->delta.tv_sec = txc->time.tv_sec; in __do_adjtimex()
2701 result->delta.tv_nsec = txc->time.tv_usec; in __do_adjtimex()
2702 if (!(txc->modes & ADJ_NANO)) in __do_adjtimex()
2703 result->delta.tv_nsec *= 1000; in __do_adjtimex()
2704 ret = __timekeeping_inject_offset(tkd, &result->delta); in __do_adjtimex()
2707 result->clock_set = true; in __do_adjtimex()
2710 orig_tai = tai = tks->tai_offset; in __do_adjtimex()
2711 ret = ntp_adjtimex(tks->id, txc, &ts, &tai, &result->ad); in __do_adjtimex()
2716 result->clock_set = true; in __do_adjtimex()
2722 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK)) in __do_adjtimex()
2723 result->clock_set |= __timekeeping_advance(tkd, TK_ADV_FREQ); in __do_adjtimex()
2729 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
2741 if (txc->modes & ADJ_SETOFFSET) in do_adjtimex()
2765 * hardpps() - Accessor function to NTP __hardpps function
2778 #include "posix-timers.h"
2783 * the state of the corresponding timekeeper has to be re-checked under
2790 return TIMEKEEPER_AUX_FIRST + id - CLOCK_AUX; in clockid_to_tkid()
2808 struct timekeeper *tks = &tkd->shadow_timekeeper; in tk_aux_update_clocksource()
2810 guard(raw_spinlock_irqsave)(&tkd->lock); in tk_aux_update_clocksource()
2811 if (!tks->clock_valid) in tk_aux_update_clocksource()
2829 guard(raw_spinlock)(&aux_tkd->lock); in tk_aux_advance()
2830 if (aux_tkd->shadow_timekeeper.clock_valid) in tk_aux_advance()
2836 * ktime_get_aux - Get time for a AUX clock
2855 aux_tk = &aux_tkd->timekeeper; in ktime_get_aux()
2857 seq = read_seqcount_begin(&aux_tkd->seq); in ktime_get_aux()
2858 if (!aux_tk->clock_valid) in ktime_get_aux()
2861 base = ktime_add(aux_tk->tkr_mono.base, aux_tk->offs_aux); in ktime_get_aux()
2862 nsecs = timekeeping_get_ns(&aux_tk->tkr_mono); in ktime_get_aux()
2863 } while (read_seqcount_retry(&aux_tkd->seq, seq)); in ktime_get_aux()
2871 * ktime_get_aux_ts64 - Get time for a AUX clock
2891 return -ENODEV; in aux_get_res()
2893 tp->tv_sec = aux_clock_resolution_ns() / NSEC_PER_SEC; in aux_get_res()
2894 tp->tv_nsec = aux_clock_resolution_ns() % NSEC_PER_SEC; in aux_get_res()
2900 return ktime_get_aux_ts64(id, tp) ? 0 : -ENODEV; in aux_get_timespec()
2910 return -EINVAL; in aux_clock_set()
2912 return -ENODEV; in aux_clock_set()
2914 aux_tks = &aux_tkd->shadow_timekeeper; in aux_clock_set()
2916 guard(raw_spinlock_irq)(&aux_tkd->lock); in aux_clock_set()
2917 if (!aux_tks->clock_valid) in aux_clock_set()
2918 return -ENODEV; in aux_clock_set()
2931 nsecs = timekeeping_cycles_to_ns(&aux_tks->tkr_mono, aux_tks->tkr_mono.cycle_last); in aux_clock_set()
2932 tnow = ktime_add(aux_tks->tkr_mono.base, nsecs); in aux_clock_set()
2940 aux_tks->offs_aux = ktime_sub(timespec64_to_ktime(*tnew), tnow); in aux_clock_set()
2952 return -ENODEV; in aux_clock_adj()
2972 struct timekeeper *aux_tks = &aux_tkd->shadow_timekeeper; in aux_clock_enable()
2982 guard(raw_spinlock_nested)(&aux_tkd->lock); in aux_clock_enable()
2987 aux_tks->id = aux_tkd->timekeeper.id; in aux_clock_enable()
2989 tk_setup_internals(aux_tks, tkr_raw->clock); in aux_clock_enable()
2992 aux_tks->clock_valid = true; in aux_clock_enable()
3000 guard(raw_spinlock_irq)(&aux_tkd->lock); in aux_clock_disable()
3001 aux_tkd->shadow_timekeeper.clock_valid = false; in aux_clock_disable()
3011 int id = kobj->name[0] & 0x7; in aux_clock_enable_store()
3012 bool enable; in aux_clock_enable_store() local
3015 return -EPERM; in aux_clock_enable_store()
3017 if (kstrtobool(buf, &enable) < 0) in aux_clock_enable_store()
3018 return -EINVAL; in aux_clock_enable_store()
3021 if (enable == test_bit(id, &aux_timekeepers)) in aux_clock_enable_store()
3024 if (enable) { in aux_clock_enable_store()
3038 int id = kobj->name[0] & 0x7; in aux_clock_enable_show()
3059 return -ENOMEM; in tk_aux_sysfs_init()
3064 return -ENOMEM; in tk_aux_sysfs_init()
3072 return -ENOMEM; in tk_aux_sysfs_init()