Lines Matching +full:int +full:- +full:clock +full:- +full:stable +full:- +full:broken
1 // SPDX-License-Identifier: GPL-2.0-only
18 * clock with bounded drift between CPUs. The value of cpu_clock(i)
29 * cpu_clock(i) -- can be used from any context, including NMI.
30 * local_clock() -- is cpu_clock() on the current CPU.
41 * Otherwise it tries to create a semi stable clock from a mixture of other
44 * - GTOD (clock monotonic)
45 * - sched_clock()
46 * - explicit idle events
57 #include <linux/sched/clock.h>
61 * Scheduler clock - returns current time in nanosec units.
63 * Architectures and sub-architectures can override this.
67 return (unsigned long long)(jiffies - INITIAL_JIFFIES) in sched_clock()
76 * We must start with !__sched_clock_stable because the unstable -> stable
77 * transition is accurate, while the stable -> unstable transition is not.
80 * will become stable, such that there's only a single 1 -> 0 transition.
83 static int __sched_clock_stable_early = 1;
94 u64 clock; member
104 notrace static inline struct sched_clock_data *cpu_sdc(int cpu) in cpu_sdc()
109 notrace int sched_clock_stable(void) in sched_clock_stable()
116 scd->tick_gtod = ktime_get_ns(); in __scd_stamp()
117 scd->tick_raw = sched_clock(); in __scd_stamp()
126 * to disable IRQs in order to get a consistent scd->tick* reading. in __set_sched_clock_stable()
131 * Attempt to make the (initial) unstable->stable transition continuous. in __set_sched_clock_stable()
133 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); in __set_sched_clock_stable()
136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", in __set_sched_clock_stable()
137 scd->tick_gtod, __gtod_offset, in __set_sched_clock_stable()
138 scd->tick_raw, __sched_clock_offset); in __set_sched_clock_stable()
145 * If we ever get here, we're screwed, because we found out -- typically after
146 * the fact -- that TSC wasn't good. This means all our clocksources (including
152 * The only way to fully avoid random clock jumps is to boot with:
158 int cpu; in __sched_clock_work()
164 scd->clock = scd->tick_gtod + __gtod_offset; in __sched_clock_work()
171 …printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unsta… in __sched_clock_work()
172 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", in __sched_clock_work()
173 scd->tick_gtod, __gtod_offset, in __sched_clock_work()
174 scd->tick_raw, __sched_clock_offset); in __sched_clock_work()
205 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod; in __sched_clock_gtod_offset()
224 * We run this as late_initcall() such that it runs after all built-in drivers,
227 static int __init sched_clock_init_late(void) in sched_clock_init_late()
252 return (s64)(x - y) < 0 ? x : y; in wrap_min()
257 return (s64)(x - y) > 0 ? x : y; in wrap_max()
263 * - filter out backward motion
264 * - use the GTOD tick value to create a window to filter crazy TSC values
268 u64 now, clock, old_clock, min_clock, max_clock, gtod; in sched_clock_local() local
273 delta = now - scd->tick_raw; in sched_clock_local()
277 old_clock = scd->clock; in sched_clock_local()
280 * scd->clock = clamp(scd->tick_gtod + delta, in sched_clock_local()
281 * max(scd->tick_gtod, scd->clock), in sched_clock_local()
282 * scd->tick_gtod + TICK_NSEC); in sched_clock_local()
285 gtod = scd->tick_gtod + __gtod_offset; in sched_clock_local()
286 clock = gtod + delta; in sched_clock_local()
290 clock = wrap_max(clock, min_clock); in sched_clock_local()
291 clock = wrap_min(clock, max_clock); in sched_clock_local()
293 if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock)) in sched_clock_local()
296 return clock; in sched_clock_local()
301 u64 clock; in local_clock_noinstr() local
309 clock = sched_clock_local(this_scd()); in local_clock_noinstr()
311 return clock; in local_clock_noinstr()
333 * Careful here: The local and the remote clock values need to in sched_clock_remote()
339 * 32-bit kernels as an NMI could use sched_clock_local() via the in sched_clock_remote()
341 * the low 32-bit and the high 32-bit portion. in sched_clock_remote()
345 * We must enforce atomic readout on 32-bit, otherwise the in sched_clock_remote()
347 * the low 32-bit and the high 32-bit portion. in sched_clock_remote()
349 remote_clock = cmpxchg64(&scd->clock, 0, 0); in sched_clock_remote()
352 * On 64-bit kernels the read of [my]scd->clock is atomic versus the in sched_clock_remote()
353 * update, so we can avoid the above 32-bit dance. in sched_clock_remote()
357 this_clock = my_scd->clock; in sched_clock_remote()
358 remote_clock = scd->clock; in sched_clock_remote()
367 if (likely((s64)(remote_clock - this_clock) < 0)) { in sched_clock_remote()
368 ptr = &scd->clock; in sched_clock_remote()
375 ptr = &my_scd->clock; in sched_clock_remote()
391 notrace u64 sched_clock_cpu(int cpu) in sched_clock_cpu()
394 u64 clock; in sched_clock_cpu() local
406 clock = sched_clock_remote(scd); in sched_clock_cpu()
408 clock = sched_clock_local(scd); in sched_clock_cpu()
411 return clock; in sched_clock_cpu()
440 * The watchdog just found this TSC to (still) be stable, so now is a in sched_clock_tick_stable()
450 * We are going deep-idle (IRQs are disabled):
487 notrace u64 sched_clock_cpu(int cpu) in sched_clock_cpu()
498 * Running clock - returns the time that has elapsed while a guest has been
503 * Architectures and sub-architectures can override this.