Lines Matching +full:cs +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0+
20 #include "tick-internal.h"
23 static void clocksource_enqueue(struct clocksource *cs);
25 static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end) in cycles_to_nsec_safe() argument
27 u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta); in cycles_to_nsec_safe()
29 if (likely(delta < cs->max_cycles)) in cycles_to_nsec_safe()
30 return clocksource_cyc2ns(delta, cs->mult, cs->shift); in cycles_to_nsec_safe()
32 return mul_u64_u32_shr(delta, cs->mult, cs->shift); in cycles_to_nsec_safe()
36 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
71 sftacc--; in clocks_calc_mult_shift()
78 for (sft = 32; sft > 0; sft--) { in clocks_calc_mult_shift()
82 if ((tmp >> sftacc) == 0) in clocks_calc_mult_shift()
90 /*[Clocksource internal variables]---------
100 * Name of the user-specified clocksource.
125 * a lower bound for cs->uncertainty_margin values when registering clocks.
131 * precise (for example, with a sub-nanosecond period), the maximum
144 * Default for maximum permissible skew when cs->uncertainty_margin is
145 * not specified, and the lower bound even when cs->uncertainty_margin
147 * clocks with unspecifed cs->uncertainty_margin, so this macro is used
195 static void clocksource_change_rating(struct clocksource *cs, int rating) in clocksource_change_rating() argument
197 list_del(&cs->list); in clocksource_change_rating()
198 cs->rating = rating; in clocksource_change_rating()
199 clocksource_enqueue(cs); in clocksource_change_rating()
202 static void __clocksource_unstable(struct clocksource *cs) in __clocksource_unstable() argument
204 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); in __clocksource_unstable()
205 cs->flags |= CLOCK_SOURCE_UNSTABLE; in __clocksource_unstable()
209 * re-rate and re-select. in __clocksource_unstable()
211 if (list_empty(&cs->list)) { in __clocksource_unstable()
212 cs->rating = 0; in __clocksource_unstable()
216 if (cs->mark_unstable) in __clocksource_unstable()
217 cs->mark_unstable(cs); in __clocksource_unstable()
225 * clocksource_mark_unstable - mark clocksource unstable via watchdog
226 * @cs: clocksource to be marked unstable
229 * it defers demotion and re-selection to a kthread.
231 void clocksource_mark_unstable(struct clocksource *cs) in clocksource_mark_unstable() argument
236 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { in clocksource_mark_unstable()
237 if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) in clocksource_mark_unstable()
238 list_add(&cs->wd_list, &watchdog_list); in clocksource_mark_unstable()
239 __clocksource_unstable(cs); in clocksource_mark_unstable()
253 static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) in cs_watchdog_read() argument
255 int64_t md = 2 * watchdog->uncertainty_margin; in cs_watchdog_read()
261 for (nretries = 0; nretries <= max_retries; nretries++) { in cs_watchdog_read()
263 *wdnow = watchdog->read(watchdog); in cs_watchdog_read()
264 *csnow = cs->read(cs); in cs_watchdog_read()
265 wd_end = watchdog->read(watchdog); in cs_watchdog_read()
266 wd_end2 = watchdog->read(watchdog); in cs_watchdog_read()
270 if (wd_delay <= md + cs->uncertainty_margin) { in cs_watchdog_read()
273 smp_processor_id(), watchdog->name, nretries); in cs_watchdog_read()
283 * If consecutive WD read-back delay > md, report in cs_watchdog_read()
292 …warn("timekeeping watchdog on CPU%d: wd-%s-wd excessive read-back delay of %lldns vs. limit of %ld… in cs_watchdog_read()
293 smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name); in cs_watchdog_read()
297 pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n", in cs_watchdog_read()
298 smp_processor_id(), watchdog->name, wd_seq_delay); in cs_watchdog_read()
299 pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n", in cs_watchdog_read()
300 cs->name, wd_delay); in cs_watchdog_read()
313 if (n < 0) { in clocksource_verify_choose_cpus()
322 if (n == 0 || num_online_cpus() <= 1) in clocksource_verify_choose_cpus()
346 cpu = cpumask_next(cpu - 1, cpu_online_mask); in clocksource_verify_choose_cpus()
359 struct clocksource *cs = (struct clocksource *)csin; in clocksource_verify_one_cpu() local
361 csnow_mid = cs->read(cs); in clocksource_verify_one_cpu()
364 void clocksource_verify_percpu(struct clocksource *cs) in clocksource_verify_percpu() argument
366 int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX; in clocksource_verify_percpu()
371 if (verify_n_cpus == 0) in clocksource_verify_percpu()
381 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); in clocksource_verify_percpu()
386 cs->name, testcpu, cpumask_pr_args(&cpus_chosen)); in clocksource_verify_percpu()
391 csnow_begin = cs->read(cs); in clocksource_verify_percpu()
392 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1); in clocksource_verify_percpu()
393 csnow_end = cs->read(cs); in clocksource_verify_percpu()
394 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu()
395 if (delta < 0) in clocksource_verify_percpu()
397 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu()
398 if (delta < 0) in clocksource_verify_percpu()
400 cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end); in clocksource_verify_percpu()
411 cpumask_pr_args(&cpus_ahead), testcpu, cs->name); in clocksource_verify_percpu()
414 cpumask_pr_args(&cpus_behind), testcpu, cs->name); in clocksource_verify_percpu()
416 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n", in clocksource_verify_percpu()
417 testcpu, cs_nsec_min, cs_nsec_max, cs->name); in clocksource_verify_percpu()
423 struct clocksource *cs; in clocksource_reset_watchdog() local
425 list_for_each_entry(cs, &watchdog_list, wd_list) in clocksource_reset_watchdog()
426 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_reset_watchdog()
435 struct clocksource *cs; in clocksource_watchdog() local
437 unsigned long extra_wait = 0; in clocksource_watchdog()
446 list_for_each_entry(cs, &watchdog_list, wd_list) { in clocksource_watchdog()
449 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in clocksource_watchdog()
455 read_ret = cs_watchdog_read(cs, &csnow, &wdnow); in clocksource_watchdog()
459 __clocksource_unstable(cs); in clocksource_watchdog()
473 * cs->last could keep unchanged for 5 minutes, reset in clocksource_watchdog()
482 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || in clocksource_watchdog()
484 cs->flags |= CLOCK_SOURCE_WATCHDOG; in clocksource_watchdog()
485 cs->wd_last = wdnow; in clocksource_watchdog()
486 cs->cs_last = csnow; in clocksource_watchdog()
490 wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow); in clocksource_watchdog()
491 cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow); in clocksource_watchdog()
492 wdlast = cs->wd_last; /* save these in case we print them */ in clocksource_watchdog()
493 cslast = cs->cs_last; in clocksource_watchdog()
494 cs->cs_last = csnow; in clocksource_watchdog()
495 cs->wd_last = wdnow; in clocksource_watchdog()
522 md = cs->uncertainty_margin + watchdog->uncertainty_margin; in clocksource_watchdog()
523 if (abs(cs_nsec - wd_nsec) > md) { in clocksource_watchdog()
529 smp_processor_id(), cs->name); in clocksource_watchdog()
531 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask); in clocksource_watchdog()
533 cs->name, cs_nsec, csnow, cslast, cs->mask); in clocksource_watchdog()
534 cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem); in clocksource_watchdog()
537 cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec); in clocksource_watchdog()
538 if (curr_clocksource == cs) in clocksource_watchdog()
539 pr_warn(" '%s' is current clocksource.\n", cs->name); in clocksource_watchdog()
541 … '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name); in clocksource_watchdog()
544 __clocksource_unstable(cs); in clocksource_watchdog()
548 if (cs == curr_clocksource && cs->tick_stable) in clocksource_watchdog()
549 cs->tick_stable(cs); in clocksource_watchdog()
551 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && in clocksource_watchdog()
552 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && in clocksource_watchdog()
553 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { in clocksource_watchdog()
554 /* Mark it valid for high-res. */ in clocksource_watchdog()
555 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_watchdog()
572 if (cs != curr_clocksource) { in clocksource_watchdog()
573 cs->flags |= CLOCK_SOURCE_RESELECT; in clocksource_watchdog()
612 timer_setup(&watchdog_timer, clocksource_watchdog, 0); in clocksource_start_watchdog()
623 watchdog_running = 0; in clocksource_stop_watchdog()
631 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
633 INIT_LIST_HEAD(&cs->wd_list); in clocksource_enqueue_watchdog()
635 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_enqueue_watchdog()
636 /* cs is a clocksource to be watched. */ in clocksource_enqueue_watchdog()
637 list_add(&cs->wd_list, &watchdog_list); in clocksource_enqueue_watchdog()
638 cs->flags &= ~CLOCK_SOURCE_WATCHDOG; in clocksource_enqueue_watchdog()
640 /* cs is a watchdog. */ in clocksource_enqueue_watchdog()
641 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
642 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
648 struct clocksource *cs, *old_wd; in clocksource_select_watchdog() local
657 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_select_watchdog()
658 /* cs is a clocksource to be watched. */ in clocksource_select_watchdog()
659 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) in clocksource_select_watchdog()
663 if (fallback && cs == old_wd) in clocksource_select_watchdog()
667 if (!watchdog || cs->rating > watchdog->rating) in clocksource_select_watchdog()
668 watchdog = cs; in clocksource_select_watchdog()
683 static void clocksource_dequeue_watchdog(struct clocksource *cs) in clocksource_dequeue_watchdog() argument
685 if (cs != watchdog) { in clocksource_dequeue_watchdog()
686 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { in clocksource_dequeue_watchdog()
687 /* cs is a watched clocksource. */ in clocksource_dequeue_watchdog()
688 list_del_init(&cs->wd_list); in clocksource_dequeue_watchdog()
697 struct clocksource *cs, *tmp; in __clocksource_watchdog_kthread() local
699 int select = 0; in __clocksource_watchdog_kthread()
701 /* Do any required per-CPU skew verification. */ in __clocksource_watchdog_kthread()
703 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE && in __clocksource_watchdog_kthread()
704 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU) in __clocksource_watchdog_kthread()
708 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { in __clocksource_watchdog_kthread()
709 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_watchdog_kthread()
710 list_del_init(&cs->wd_list); in __clocksource_watchdog_kthread()
711 clocksource_change_rating(cs, 0); in __clocksource_watchdog_kthread()
714 if (cs->flags & CLOCK_SOURCE_RESELECT) { in __clocksource_watchdog_kthread()
715 cs->flags &= ~CLOCK_SOURCE_RESELECT; in __clocksource_watchdog_kthread()
732 return 0; in clocksource_watchdog_kthread()
735 static bool clocksource_is_watchdog(struct clocksource *cs) in clocksource_is_watchdog() argument
737 return cs == watchdog; in clocksource_is_watchdog()
742 static void clocksource_enqueue_watchdog(struct clocksource *cs) in clocksource_enqueue_watchdog() argument
744 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) in clocksource_enqueue_watchdog()
745 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; in clocksource_enqueue_watchdog()
749 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } in clocksource_dequeue_watchdog() argument
751 static inline int __clocksource_watchdog_kthread(void) { return 0; } in __clocksource_watchdog_kthread()
752 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } in clocksource_is_watchdog() argument
753 void clocksource_mark_unstable(struct clocksource *cs) { } in clocksource_mark_unstable() argument
760 static bool clocksource_is_suspend(struct clocksource *cs) in clocksource_is_suspend() argument
762 return cs == suspend_clocksource; in clocksource_is_suspend()
765 static void __clocksource_suspend_select(struct clocksource *cs) in __clocksource_suspend_select() argument
770 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) in __clocksource_suspend_select()
778 if (cs->suspend || cs->resume) { in __clocksource_suspend_select()
780 cs->name); in __clocksource_suspend_select()
784 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) in __clocksource_suspend_select()
785 suspend_clocksource = cs; in __clocksource_suspend_select()
789 * clocksource_suspend_select - Select the best clocksource for suspend timing
794 struct clocksource *cs, *old_suspend; in clocksource_suspend_select() local
800 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_suspend_select()
802 if (fallback && cs == old_suspend) in clocksource_suspend_select()
805 __clocksource_suspend_select(cs); in clocksource_suspend_select()
810 * clocksource_start_suspend_timing - Start measuring the suspend timing
811 * @cs: current clocksource from timekeeping
818 * that means processes are frozen, non-boot cpus and interrupts are disabled
822 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) in clocksource_start_suspend_timing() argument
832 if (clocksource_is_suspend(cs)) { in clocksource_start_suspend_timing()
837 if (suspend_clocksource->enable && in clocksource_start_suspend_timing()
838 suspend_clocksource->enable(suspend_clocksource)) { in clocksource_start_suspend_timing()
839 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); in clocksource_start_suspend_timing()
843 suspend_start = suspend_clocksource->read(suspend_clocksource); in clocksource_start_suspend_timing()
847 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
848 * @cs: current clocksource from timekeeping
853 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
860 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) in clocksource_stop_suspend_timing() argument
862 u64 now, nsec = 0; in clocksource_stop_suspend_timing()
865 return 0; in clocksource_stop_suspend_timing()
872 if (clocksource_is_suspend(cs)) in clocksource_stop_suspend_timing()
875 now = suspend_clocksource->read(suspend_clocksource); in clocksource_stop_suspend_timing()
884 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) in clocksource_stop_suspend_timing()
885 suspend_clocksource->disable(suspend_clocksource); in clocksource_stop_suspend_timing()
891 * clocksource_suspend - suspend the clocksource(s)
895 struct clocksource *cs; in clocksource_suspend() local
897 list_for_each_entry_reverse(cs, &clocksource_list, list) in clocksource_suspend()
898 if (cs->suspend) in clocksource_suspend()
899 cs->suspend(cs); in clocksource_suspend()
903 * clocksource_resume - resume the clocksource(s)
907 struct clocksource *cs; in clocksource_resume() local
909 list_for_each_entry(cs, &clocksource_list, list) in clocksource_resume()
910 if (cs->resume) in clocksource_resume()
911 cs->resume(cs); in clocksource_resume()
917 * clocksource_touch_watchdog - Update watchdog
929 * clocksource_max_adjustment- Returns max adjustment amount
930 * @cs: Pointer to clocksource
933 static u32 clocksource_max_adjustment(struct clocksource *cs) in clocksource_max_adjustment() argument
939 ret = (u64)cs->mult * 11; in clocksource_max_adjustment()
945 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
965 * cyc2ns() function without overflowing a 64-bit result. in clocks_calc_max_nsecs()
977 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); in clocks_calc_max_nsecs()
990 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
991 * @cs: Pointer to clocksource to be updated
994 static inline void clocksource_update_max_deferment(struct clocksource *cs) in clocksource_update_max_deferment() argument
996 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift, in clocksource_update_max_deferment()
997 cs->maxadj, cs->mask, in clocksource_update_max_deferment()
998 &cs->max_cycles); in clocksource_update_max_deferment()
1007 cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3); in clocksource_update_max_deferment()
1012 struct clocksource *cs; in clocksource_find_best() local
1022 list_for_each_entry(cs, &clocksource_list, list) { in clocksource_find_best()
1023 if (skipcur && cs == curr_clocksource) in clocksource_find_best()
1025 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in clocksource_find_best()
1027 return cs; in clocksource_find_best()
1035 struct clocksource *best, *cs; in __clocksource_select() local
1046 list_for_each_entry(cs, &clocksource_list, list) { in __clocksource_select()
1047 if (skipcur && cs == curr_clocksource) in __clocksource_select()
1049 if (strcmp(cs->name, override_name) != 0) in __clocksource_select()
1052 * Check to make sure we don't switch to a non-highres in __clocksource_select()
1056 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { in __clocksource_select()
1058 if (cs->flags & CLOCK_SOURCE_UNSTABLE) { in __clocksource_select()
1059 …pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/N… in __clocksource_select()
1060 cs->name); in __clocksource_select()
1061 override_name[0] = 0; in __clocksource_select()
1067 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n", in __clocksource_select()
1068 cs->name); in __clocksource_select()
1072 best = cs; in __clocksource_select()
1078 pr_info("Switched to clocksource %s\n", best->name); in __clocksource_select()
1084 * clocksource_select - Select the best clocksource available
1102 * clocksource_done_booting - Called near the end of core bootup
1119 return 0; in clocksource_done_booting()
1126 static void clocksource_enqueue(struct clocksource *cs) in clocksource_enqueue() argument
1133 if (tmp->rating < cs->rating) in clocksource_enqueue()
1135 entry = &tmp->list; in clocksource_enqueue()
1137 list_add(&cs->list, entry); in clocksource_enqueue()
1141 * __clocksource_update_freq_scale - Used update clocksource with new freq
1142 * @cs: clocksource to be registered
1146 * This should only be called from the clocksource->enable() method.
1152 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_update_freq_scale() argument
1157 * Default clocksources are *special* and self-define their mult/shift. in __clocksource_update_freq_scale()
1163 * wrapping around. For clocksources which have a mask > 32-bit in __clocksource_update_freq_scale()
1167 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to in __clocksource_update_freq_scale()
1170 sec = cs->mask; in __clocksource_update_freq_scale()
1175 else if (sec > 600 && cs->mask > UINT_MAX) in __clocksource_update_freq_scale()
1178 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, in __clocksource_update_freq_scale()
1184 * both scale and freq are non-zero, calculate the clock period, but in __clocksource_update_freq_scale()
1187 * and take the tens-of-milliseconds WATCHDOG_THRESHOLD value in __clocksource_update_freq_scale()
1197 if (scale && freq && !cs->uncertainty_margin) { in __clocksource_update_freq_scale()
1198 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq); in __clocksource_update_freq_scale()
1199 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW) in __clocksource_update_freq_scale()
1200 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW; in __clocksource_update_freq_scale()
1201 } else if (!cs->uncertainty_margin) { in __clocksource_update_freq_scale()
1202 cs->uncertainty_margin = WATCHDOG_THRESHOLD; in __clocksource_update_freq_scale()
1204 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW); in __clocksource_update_freq_scale()
1210 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
1211 while (freq && ((cs->mult + cs->maxadj < cs->mult) in __clocksource_update_freq_scale()
1212 || (cs->mult - cs->maxadj > cs->mult))) { in __clocksource_update_freq_scale()
1213 cs->mult >>= 1; in __clocksource_update_freq_scale()
1214 cs->shift--; in __clocksource_update_freq_scale()
1215 cs->maxadj = clocksource_max_adjustment(cs); in __clocksource_update_freq_scale()
1219 * Only warn for *special* clocksources that self-define in __clocksource_update_freq_scale()
1222 WARN_ONCE(cs->mult + cs->maxadj < cs->mult, in __clocksource_update_freq_scale()
1224 cs->name); in __clocksource_update_freq_scale()
1226 clocksource_update_max_deferment(cs); in __clocksource_update_freq_scale()
1228 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", in __clocksource_update_freq_scale()
1229 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); in __clocksource_update_freq_scale()
1234 * __clocksource_register_scale - Used to install new clocksources
1235 * @cs: clocksource to be registered
1239 * Returns -EBUSY if registration fails, zero otherwise.
1244 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) in __clocksource_register_scale() argument
1248 clocksource_arch_init(cs); in __clocksource_register_scale()
1250 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) in __clocksource_register_scale()
1251 cs->id = CSID_GENERIC; in __clocksource_register_scale()
1252 if (cs->vdso_clock_mode < 0 || in __clocksource_register_scale()
1253 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { in __clocksource_register_scale()
1255 cs->name, cs->vdso_clock_mode); in __clocksource_register_scale()
1256 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE; in __clocksource_register_scale()
1260 __clocksource_update_freq_scale(cs, scale, freq); in __clocksource_register_scale()
1266 clocksource_enqueue(cs); in __clocksource_register_scale()
1267 clocksource_enqueue_watchdog(cs); in __clocksource_register_scale()
1272 __clocksource_suspend_select(cs); in __clocksource_register_scale()
1274 return 0; in __clocksource_register_scale()
1279 * Unbind clocksource @cs. Called with clocksource_mutex held
1281 static int clocksource_unbind(struct clocksource *cs) in clocksource_unbind() argument
1285 if (clocksource_is_watchdog(cs)) { in clocksource_unbind()
1288 if (clocksource_is_watchdog(cs)) in clocksource_unbind()
1289 return -EBUSY; in clocksource_unbind()
1292 if (cs == curr_clocksource) { in clocksource_unbind()
1295 if (curr_clocksource == cs) in clocksource_unbind()
1296 return -EBUSY; in clocksource_unbind()
1299 if (clocksource_is_suspend(cs)) { in clocksource_unbind()
1309 clocksource_dequeue_watchdog(cs); in clocksource_unbind()
1310 list_del_init(&cs->list); in clocksource_unbind()
1313 return 0; in clocksource_unbind()
1317 * clocksource_unregister - remove a registered clocksource
1318 * @cs: clocksource to be unregistered
1320 int clocksource_unregister(struct clocksource *cs) in clocksource_unregister() argument
1322 int ret = 0; in clocksource_unregister()
1325 if (!list_empty(&cs->list)) in clocksource_unregister()
1326 ret = clocksource_unbind(cs); in clocksource_unregister()
1334 * current_clocksource_show - sysfs interface for current clocksource
1345 ssize_t count = 0; in current_clocksource_show()
1348 count = sysfs_emit(buf, "%s\n", curr_clocksource->name); in current_clocksource_show()
1358 /* strings from sysfs write are not 0 terminated! */ in sysfs_get_uname()
1360 return -EINVAL; in sysfs_get_uname()
1363 if (buf[cnt-1] == '\n') in sysfs_get_uname()
1364 cnt--; in sysfs_get_uname()
1365 if (cnt > 0) in sysfs_get_uname()
1367 dst[cnt] = 0; in sysfs_get_uname()
1372 * current_clocksource_store - interface for manually overriding clocksource
1390 if (ret >= 0) in current_clocksource_store()
1400 * unbind_clocksource_store - interface for manually unbinding clocksource
1412 struct clocksource *cs; in unbind_clocksource_store() local
1417 if (ret < 0) in unbind_clocksource_store()
1420 ret = -ENODEV; in unbind_clocksource_store()
1422 list_for_each_entry(cs, &clocksource_list, list) { in unbind_clocksource_store()
1423 if (strcmp(cs->name, name)) in unbind_clocksource_store()
1425 ret = clocksource_unbind(cs); in unbind_clocksource_store()
1435 * available_clocksource_show - sysfs interface for listing clocksource
1447 ssize_t count = 0; in available_clocksource_show()
1452 * Don't show non-HRES clocksource if the tick code is in available_clocksource_show()
1456 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) in available_clocksource_show()
1458 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), in available_clocksource_show()
1459 "%s ", src->name); in available_clocksource_show()
1464 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); in available_clocksource_show()
1484 .id = 0,
1503 * boot_override_clocksource - boot clock override
1521 * boot_override_clock - Compatibility layer for deprecated boot option
1530 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n"); in boot_override_clock()
1533 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n"); in boot_override_clock()