Lines Matching +full:cs +full:- +full:enable +full:- +full:shift

1 // SPDX-License-Identifier: GPL-2.0+
20 #include "tick-internal.h"
23 static void clocksource_enqueue(struct clocksource *cs);
25 static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
27 u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta);
29 if (likely(delta < cs->max_cycles))
30 return clocksource_cyc2ns(delta, cs->mult, cs->shift);
32 return mul_u64_u32_shr(delta, cs->mult, cs->shift);
36 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
38 * @shift: pointer to shift variable
43 * The function evaluates the shift/mult pair for the scaled math
52 * calculated mult and shift factors. This guarantees that no 64bit
55 * reduce the conversion accuracy by choosing smaller mult and shift
59 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
65 * Calculate the shift factor which is limiting the conversion
71 sftacc--;
75 * Find the conversion shift/mult pair which has the best
78 for (sft = 32; sft > 0; sft--) {
86 *shift = sft;
90 /*[Clocksource internal variables]---------
100 * Name of the user-specified clocksource.
125 * a lower bound for cs->uncertainty_margin values when registering clocks.
131 * precise (for example, with a sub-nanosecond period), the maximum
144 * Default for maximum permissible skew when cs->uncertainty_margin is
145 * not specified, and the lower bound even when cs->uncertainty_margin
147 * clocks with unspecifed cs->uncertainty_margin, so this macro is used
195 static void clocksource_change_rating(struct clocksource *cs, int rating)
197 list_del(&cs->list);
198 cs->rating = rating;
199 clocksource_enqueue(cs);
202 static void __clocksource_unstable(struct clocksource *cs)
204 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
205 cs->flags |= CLOCK_SOURCE_UNSTABLE;
209 * re-rate and re-select.
211 if (list_empty(&cs->list)) {
212 cs->rating = 0;
216 if (cs->mark_unstable)
217 cs->mark_unstable(cs);
225 * clocksource_mark_unstable - mark clocksource unstable via watchdog
226 * @cs: clocksource to be marked unstable
229 * it defers demotion and re-selection to a kthread.
231 void clocksource_mark_unstable(struct clocksource *cs)
236 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
237 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
238 list_add(&cs->wd_list, &watchdog_list);
239 __clocksource_unstable(cs);
253 static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
255 int64_t md = 2 * watchdog->uncertainty_margin;
263 *wdnow = watchdog->read(watchdog);
264 *csnow = cs->read(cs);
265 wd_end = watchdog->read(watchdog);
266 wd_end2 = watchdog->read(watchdog);
270 if (wd_delay <= md + cs->uncertainty_margin) {
273 smp_processor_id(), watchdog->name, nretries);
283 * If consecutive WD read-back delay > md, report
292 pr_warn("timekeeping watchdog on CPU%d: wd-%s-wd excessive read-back delay of %lldns vs. limit of %ldns, wd-wd read-back delay only %lldns, attempt %d, marking %s unstable\n",
293 smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name);
297 pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n",
298 smp_processor_id(), watchdog->name, wd_seq_delay);
299 pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n",
300 cs->name, wd_delay);
354 struct clocksource *cs = (struct clocksource *)csin;
356 csnow_mid = cs->read(cs);
359 void clocksource_verify_percpu(struct clocksource *cs)
376 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
381 cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
386 csnow_begin = cs->read(cs);
387 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
388 csnow_end = cs->read(cs);
389 delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
392 delta = (csnow_end - csnow_mid) & cs->mask;
395 cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
406 cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
409 cpumask_pr_args(&cpus_behind), testcpu, cs->name);
411 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
412 testcpu, cs_nsec_min, cs_nsec_max, cs->name);
418 struct clocksource *cs;
420 list_for_each_entry(cs, &watchdog_list, wd_list)
421 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
430 struct clocksource *cs;
441 list_for_each_entry(cs, &watchdog_list, wd_list) {
444 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
450 read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
454 __clocksource_unstable(cs);
468 * cs->last could keep unchanged for 5 minutes, reset
477 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
479 cs->flags |= CLOCK_SOURCE_WATCHDOG;
480 cs->wd_last = wdnow;
481 cs->cs_last = csnow;
485 wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
486 cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
487 wdlast = cs->wd_last; /* save these in case we print them */
488 cslast = cs->cs_last;
489 cs->cs_last = csnow;
490 cs->wd_last = wdnow;
517 md = cs->uncertainty_margin + watchdog->uncertainty_margin;
518 if (abs(cs_nsec - wd_nsec) > md) {
524 smp_processor_id(), cs->name);
526 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
528 cs->name, cs_nsec, csnow, cslast, cs->mask);
529 cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem);
532 cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec);
533 if (curr_clocksource == cs)
534 pr_warn(" '%s' is current clocksource.\n", cs->name);
536 pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
539 __clocksource_unstable(cs);
543 if (cs == curr_clocksource && cs->tick_stable)
544 cs->tick_stable(cs);
546 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
547 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
548 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
549 /* Mark it valid for high-res. */
550 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
567 if (cs != curr_clocksource) {
568 cs->flags |= CLOCK_SOURCE_RESELECT;
624 static void clocksource_enqueue_watchdog(struct clocksource *cs)
626 INIT_LIST_HEAD(&cs->wd_list);
628 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
629 /* cs is a clocksource to be watched. */
630 list_add(&cs->wd_list, &watchdog_list);
631 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
633 /* cs is a watchdog. */
634 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
635 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
641 struct clocksource *cs, *old_wd;
650 list_for_each_entry(cs, &clocksource_list, list) {
651 /* cs is a clocksource to be watched. */
652 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
656 if (fallback && cs == old_wd)
660 if (!watchdog || cs->rating > watchdog->rating)
661 watchdog = cs;
676 static void clocksource_dequeue_watchdog(struct clocksource *cs)
678 if (cs != watchdog) {
679 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
680 /* cs is a watched clocksource. */
681 list_del_init(&cs->wd_list);
690 struct clocksource *cs, *tmp;
694 /* Do any required per-CPU skew verification. */
696 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
697 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
701 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
702 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
703 list_del_init(&cs->wd_list);
704 clocksource_change_rating(cs, 0);
707 if (cs->flags & CLOCK_SOURCE_RESELECT) {
708 cs->flags &= ~CLOCK_SOURCE_RESELECT;
728 static bool clocksource_is_watchdog(struct clocksource *cs)
730 return cs == watchdog;
735 static void clocksource_enqueue_watchdog(struct clocksource *cs)
737 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
738 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
742 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
745 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
746 void clocksource_mark_unstable(struct clocksource *cs) { }
753 static bool clocksource_is_suspend(struct clocksource *cs)
755 return cs == suspend_clocksource;
758 static void __clocksource_suspend_select(struct clocksource *cs)
763 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
771 if (cs->suspend || cs->resume) {
773 cs->name);
777 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
778 suspend_clocksource = cs;
782 * clocksource_suspend_select - Select the best clocksource for suspend timing
787 struct clocksource *cs, *old_suspend;
793 list_for_each_entry(cs, &clocksource_list, list) {
795 if (fallback && cs == old_suspend)
798 __clocksource_suspend_select(cs);
803 * clocksource_start_suspend_timing - Start measuring the suspend timing
804 * @cs: current clocksource from timekeeping
811 * that means processes are frozen, non-boot cpus and interrupts are disabled
815 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
825 if (clocksource_is_suspend(cs)) {
830 if (suspend_clocksource->enable &&
831 suspend_clocksource->enable(suspend_clocksource)) {
832 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
836 suspend_start = suspend_clocksource->read(suspend_clocksource);
840 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
841 * @cs: current clocksource from timekeeping
853 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
865 if (clocksource_is_suspend(cs))
868 now = suspend_clocksource->read(suspend_clocksource);
877 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
878 suspend_clocksource->disable(suspend_clocksource);
884 * clocksource_suspend - suspend the clocksource(s)
888 struct clocksource *cs;
890 list_for_each_entry_reverse(cs, &clocksource_list, list)
891 if (cs->suspend)
892 cs->suspend(cs);
896 * clocksource_resume - resume the clocksource(s)
900 struct clocksource *cs;
902 list_for_each_entry(cs, &clocksource_list, list)
903 if (cs->resume)
904 cs->resume(cs);
910 * clocksource_touch_watchdog - Update watchdog
922 * clocksource_max_adjustment- Returns max adjustment amount
923 * @cs: Pointer to clocksource
926 static u32 clocksource_max_adjustment(struct clocksource *cs)
932 ret = (u64)cs->mult * 11;
938 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
940 * @shift: cycle to nanosecond divisor (power of two)
952 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
958 * cyc2ns() function without overflowing a 64-bit result.
970 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
983 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
984 * @cs: Pointer to clocksource to be updated
987 static inline void clocksource_update_max_deferment(struct clocksource *cs)
989 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
990 cs->maxadj, cs->mask,
991 &cs->max_cycles);
1000 cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3);
1005 struct clocksource *cs;
1015 list_for_each_entry(cs, &clocksource_list, list) {
1016 if (skipcur && cs == curr_clocksource)
1018 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1020 return cs;
1028 struct clocksource *best, *cs;
1039 list_for_each_entry(cs, &clocksource_list, list) {
1040 if (skipcur && cs == curr_clocksource)
1042 if (strcmp(cs->name, override_name) != 0)
1045 * Check to make sure we don't switch to a non-highres
1049 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
1051 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
1052 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
1053 cs->name);
1060 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
1061 cs->name);
1065 best = cs;
1071 pr_info("Switched to clocksource %s\n", best->name);
1077 * clocksource_select - Select the best clocksource available
1095 * clocksource_done_booting - Called near the end of core bootup
1119 static void clocksource_enqueue(struct clocksource *cs)
1126 if (tmp->rating < cs->rating)
1128 entry = &tmp->list;
1130 list_add(&cs->list, entry);
1134 * __clocksource_update_freq_scale - Used update clocksource with new freq
1135 * @cs: clocksource to be registered
1139 * This should only be called from the clocksource->enable() method.
1145 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
1150 * Default clocksources are *special* and self-define their mult/shift.
1156 * wrapping around. For clocksources which have a mask > 32-bit
1159 * amount. That results in a shift value of 24 for a
1160 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
1163 sec = cs->mask;
1168 else if (sec > 600 && cs->mask > UINT_MAX)
1171 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
1177 * both scale and freq are non-zero, calculate the clock period, but
1180 * and take the tens-of-milliseconds WATCHDOG_THRESHOLD value
1190 if (scale && freq && !cs->uncertainty_margin) {
1191 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
1192 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
1193 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
1194 } else if (!cs->uncertainty_margin) {
1195 cs->uncertainty_margin = WATCHDOG_THRESHOLD;
1197 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
1203 cs->maxadj = clocksource_max_adjustment(cs);
1204 while (freq && ((cs->mult + cs->maxadj < cs->mult)
1205 || (cs->mult - cs->maxadj > cs->mult))) {
1206 cs->mult >>= 1;
1207 cs->shift--;
1208 cs->maxadj = clocksource_max_adjustment(cs);
1212 * Only warn for *special* clocksources that self-define
1213 * their mult/shift values and don't specify a freq.
1215 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
1217 cs->name);
1219 clocksource_update_max_deferment(cs);
1222 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
1227 * __clocksource_register_scale - Used to install new clocksources
1228 * @cs: clocksource to be registered
1232 * Returns -EBUSY if registration fails, zero otherwise.
1237 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
1241 clocksource_arch_init(cs);
1243 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX))
1244 cs->id = CSID_GENERIC;
1245 if (cs->vdso_clock_mode < 0 ||
1246 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1248 cs->name, cs->vdso_clock_mode);
1249 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1252 /* Initialize mult/shift and max_idle_ns */
1253 __clocksource_update_freq_scale(cs, scale, freq);
1259 clocksource_enqueue(cs);
1260 clocksource_enqueue_watchdog(cs);
1265 __clocksource_suspend_select(cs);
1272 * Unbind clocksource @cs. Called with clocksource_mutex held
1274 static int clocksource_unbind(struct clocksource *cs)
1278 if (clocksource_is_watchdog(cs)) {
1281 if (clocksource_is_watchdog(cs))
1282 return -EBUSY;
1285 if (cs == curr_clocksource) {
1288 if (curr_clocksource == cs)
1289 return -EBUSY;
1292 if (clocksource_is_suspend(cs)) {
1302 clocksource_dequeue_watchdog(cs);
1303 list_del_init(&cs->list);
1310 * clocksource_unregister - remove a registered clocksource
1311 * @cs: clocksource to be unregistered
1313 int clocksource_unregister(struct clocksource *cs)
1318 if (!list_empty(&cs->list))
1319 ret = clocksource_unbind(cs);
1327 * current_clocksource_show - sysfs interface for current clocksource
1341 count = sysfs_emit(buf, "%s\n", curr_clocksource->name);
1353 return -EINVAL;
1356 if (buf[cnt-1] == '\n')
1357 cnt--;
1365 * current_clocksource_store - interface for manually overriding clocksource
1393 * unbind_clocksource_store - interface for manually unbinding clocksource
1405 struct clocksource *cs;
1413 ret = -ENODEV;
1415 list_for_each_entry(cs, &clocksource_list, list) {
1416 if (strcmp(cs->name, name))
1418 ret = clocksource_unbind(cs);
1428 * available_clocksource_show - sysfs interface for listing clocksource
1445 * Don't show non-HRES clocksource if the tick code is
1449 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1451 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1452 "%s ", src->name);
1457 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1496 * boot_override_clocksource - boot clock override
1514 * boot_override_clock - Compatibility layer for deprecated boot option
1523 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1526 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");