1 /* 2 * sched_clock for unstable cpu clocks 3 * 4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra 5 * 6 * Updates and enhancements: 7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> 8 * 9 * Based on code by: 10 * Ingo Molnar <mingo@redhat.com> 11 * Guillaume Chazarain <guichaz@gmail.com> 12 * 13 * 14 * What: 15 * 16 * cpu_clock(i) provides a fast (execution time) high resolution 17 * clock with bounded drift between CPUs. The value of cpu_clock(i) 18 * is monotonic for constant i. The timestamp returned is in nanoseconds. 19 * 20 * ######################### BIG FAT WARNING ########################## 21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # 22 * # go backwards !! # 23 * #################################################################### 24 * 25 * There is no strict promise about the base, although it tends to start 26 * at 0 on boot (but people really shouldn't rely on that). 27 * 28 * cpu_clock(i) -- can be used from any context, including NMI. 29 * local_clock() -- is cpu_clock() on the current cpu. 30 * 31 * sched_clock_cpu(i) 32 * 33 * How: 34 * 35 * The implementation either uses sched_clock() when 36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the 37 * sched_clock() is assumed to provide these properties (mostly it means 38 * the architecture provides a globally synchronized highres time source). 39 * 40 * Otherwise it tries to create a semi stable clock from a mixture of other 41 * clocks, including: 42 * 43 * - GTOD (clock monotomic) 44 * - sched_clock() 45 * - explicit idle events 46 * 47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The 48 * deltas are filtered to provide monotonicity and keeping it within an 49 * expected window. 50 * 51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time 52 * that is otherwise invisible (TSC gets stopped). 53 * 54 */ 55 #include <linux/spinlock.h> 56 #include <linux/hardirq.h> 57 #include <linux/export.h> 58 #include <linux/percpu.h> 59 #include <linux/ktime.h> 60 #include <linux/sched.h> 61 #include <linux/nmi.h> 62 #include <linux/sched/clock.h> 63 #include <linux/static_key.h> 64 #include <linux/workqueue.h> 65 #include <linux/compiler.h> 66 #include <linux/tick.h> 67 68 /* 69 * Scheduler clock - returns current time in nanosec units. 70 * This is default implementation. 71 * Architectures and sub-architectures can override this. 72 */ 73 unsigned long long __weak sched_clock(void) 74 { 75 return (unsigned long long)(jiffies - INITIAL_JIFFIES) 76 * (NSEC_PER_SEC / HZ); 77 } 78 EXPORT_SYMBOL_GPL(sched_clock); 79 80 __read_mostly int sched_clock_running; 81 82 void sched_clock_init(void) 83 { 84 sched_clock_running = 1; 85 } 86 87 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 88 /* 89 * We must start with !__sched_clock_stable because the unstable -> stable 90 * transition is accurate, while the stable -> unstable transition is not. 91 * 92 * Similarly we start with __sched_clock_stable_early, thereby assuming we 93 * will become stable, such that there's only a single 1 -> 0 transition. 94 */ 95 static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); 96 static int __sched_clock_stable_early = 1; 97 98 /* 99 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset 100 */ 101 __read_mostly u64 __sched_clock_offset; 102 static __read_mostly u64 __gtod_offset; 103 104 struct sched_clock_data { 105 u64 tick_raw; 106 u64 tick_gtod; 107 u64 clock; 108 }; 109 110 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); 111 112 static inline struct sched_clock_data *this_scd(void) 113 { 114 return this_cpu_ptr(&sched_clock_data); 115 } 116 117 static inline struct sched_clock_data *cpu_sdc(int cpu) 118 { 119 return &per_cpu(sched_clock_data, cpu); 120 } 121 122 int sched_clock_stable(void) 123 { 124 return static_branch_likely(&__sched_clock_stable); 125 } 126 127 static void __set_sched_clock_stable(void) 128 { 129 struct sched_clock_data *scd = this_scd(); 130 131 /* 132 * Attempt to make the (initial) unstable->stable transition continuous. 133 */ 134 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); 135 136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 137 scd->tick_gtod, __gtod_offset, 138 scd->tick_raw, __sched_clock_offset); 139 140 static_branch_enable(&__sched_clock_stable); 141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 142 } 143 144 static void __sched_clock_work(struct work_struct *work) 145 { 146 static_branch_disable(&__sched_clock_stable); 147 } 148 149 static DECLARE_WORK(sched_clock_work, __sched_clock_work); 150 151 static void __clear_sched_clock_stable(void) 152 { 153 struct sched_clock_data *scd = this_scd(); 154 155 /* 156 * Attempt to make the stable->unstable transition continuous. 157 * 158 * Trouble is, this is typically called from the TSC watchdog 159 * timer, which is late per definition. This means the tick 160 * values can already be screwy. 161 * 162 * Still do what we can. 163 */ 164 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod); 165 166 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 167 scd->tick_gtod, __gtod_offset, 168 scd->tick_raw, __sched_clock_offset); 169 170 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 171 172 if (sched_clock_stable()) 173 schedule_work(&sched_clock_work); 174 } 175 176 void clear_sched_clock_stable(void) 177 { 178 __sched_clock_stable_early = 0; 179 180 smp_mb(); /* matches sched_clock_init_late() */ 181 182 if (sched_clock_running == 2) 183 __clear_sched_clock_stable(); 184 } 185 186 void sched_clock_init_late(void) 187 { 188 sched_clock_running = 2; 189 /* 190 * Ensure that it is impossible to not do a static_key update. 191 * 192 * Either {set,clear}_sched_clock_stable() must see sched_clock_running 193 * and do the update, or we must see their __sched_clock_stable_early 194 * and do the update, or both. 195 */ 196 smp_mb(); /* matches {set,clear}_sched_clock_stable() */ 197 198 if (__sched_clock_stable_early) 199 __set_sched_clock_stable(); 200 } 201 202 /* 203 * min, max except they take wrapping into account 204 */ 205 206 static inline u64 wrap_min(u64 x, u64 y) 207 { 208 return (s64)(x - y) < 0 ? x : y; 209 } 210 211 static inline u64 wrap_max(u64 x, u64 y) 212 { 213 return (s64)(x - y) > 0 ? x : y; 214 } 215 216 /* 217 * update the percpu scd from the raw @now value 218 * 219 * - filter out backward motion 220 * - use the GTOD tick value to create a window to filter crazy TSC values 221 */ 222 static u64 sched_clock_local(struct sched_clock_data *scd) 223 { 224 u64 now, clock, old_clock, min_clock, max_clock, gtod; 225 s64 delta; 226 227 again: 228 now = sched_clock(); 229 delta = now - scd->tick_raw; 230 if (unlikely(delta < 0)) 231 delta = 0; 232 233 old_clock = scd->clock; 234 235 /* 236 * scd->clock = clamp(scd->tick_gtod + delta, 237 * max(scd->tick_gtod, scd->clock), 238 * scd->tick_gtod + TICK_NSEC); 239 */ 240 241 gtod = scd->tick_gtod + __gtod_offset; 242 clock = gtod + delta; 243 min_clock = wrap_max(gtod, old_clock); 244 max_clock = wrap_max(old_clock, gtod + TICK_NSEC); 245 246 clock = wrap_max(clock, min_clock); 247 clock = wrap_min(clock, max_clock); 248 249 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) 250 goto again; 251 252 return clock; 253 } 254 255 static u64 sched_clock_remote(struct sched_clock_data *scd) 256 { 257 struct sched_clock_data *my_scd = this_scd(); 258 u64 this_clock, remote_clock; 259 u64 *ptr, old_val, val; 260 261 #if BITS_PER_LONG != 64 262 again: 263 /* 264 * Careful here: The local and the remote clock values need to 265 * be read out atomic as we need to compare the values and 266 * then update either the local or the remote side. So the 267 * cmpxchg64 below only protects one readout. 268 * 269 * We must reread via sched_clock_local() in the retry case on 270 * 32bit as an NMI could use sched_clock_local() via the 271 * tracer and hit between the readout of 272 * the low32bit and the high 32bit portion. 273 */ 274 this_clock = sched_clock_local(my_scd); 275 /* 276 * We must enforce atomic readout on 32bit, otherwise the 277 * update on the remote cpu can hit inbetween the readout of 278 * the low32bit and the high 32bit portion. 279 */ 280 remote_clock = cmpxchg64(&scd->clock, 0, 0); 281 #else 282 /* 283 * On 64bit the read of [my]scd->clock is atomic versus the 284 * update, so we can avoid the above 32bit dance. 285 */ 286 sched_clock_local(my_scd); 287 again: 288 this_clock = my_scd->clock; 289 remote_clock = scd->clock; 290 #endif 291 292 /* 293 * Use the opportunity that we have both locks 294 * taken to couple the two clocks: we take the 295 * larger time as the latest time for both 296 * runqueues. (this creates monotonic movement) 297 */ 298 if (likely((s64)(remote_clock - this_clock) < 0)) { 299 ptr = &scd->clock; 300 old_val = remote_clock; 301 val = this_clock; 302 } else { 303 /* 304 * Should be rare, but possible: 305 */ 306 ptr = &my_scd->clock; 307 old_val = this_clock; 308 val = remote_clock; 309 } 310 311 if (cmpxchg64(ptr, old_val, val) != old_val) 312 goto again; 313 314 return val; 315 } 316 317 /* 318 * Similar to cpu_clock(), but requires local IRQs to be disabled. 319 * 320 * See cpu_clock(). 321 */ 322 u64 sched_clock_cpu(int cpu) 323 { 324 struct sched_clock_data *scd; 325 u64 clock; 326 327 if (sched_clock_stable()) 328 return sched_clock() + __sched_clock_offset; 329 330 if (unlikely(!sched_clock_running)) 331 return 0ull; 332 333 preempt_disable_notrace(); 334 scd = cpu_sdc(cpu); 335 336 if (cpu != smp_processor_id()) 337 clock = sched_clock_remote(scd); 338 else 339 clock = sched_clock_local(scd); 340 preempt_enable_notrace(); 341 342 return clock; 343 } 344 EXPORT_SYMBOL_GPL(sched_clock_cpu); 345 346 void sched_clock_tick(void) 347 { 348 struct sched_clock_data *scd; 349 350 WARN_ON_ONCE(!irqs_disabled()); 351 352 /* 353 * Update these values even if sched_clock_stable(), because it can 354 * become unstable at any point in time at which point we need some 355 * values to fall back on. 356 * 357 * XXX arguably we can skip this if we expose tsc_clocksource_reliable 358 */ 359 scd = this_scd(); 360 scd->tick_raw = sched_clock(); 361 scd->tick_gtod = ktime_get_ns(); 362 363 if (!sched_clock_stable() && likely(sched_clock_running)) 364 sched_clock_local(scd); 365 } 366 367 /* 368 * We are going deep-idle (irqs are disabled): 369 */ 370 void sched_clock_idle_sleep_event(void) 371 { 372 sched_clock_cpu(smp_processor_id()); 373 } 374 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); 375 376 /* 377 * We just idled delta nanoseconds (called with irqs disabled): 378 */ 379 void sched_clock_idle_wakeup_event(u64 delta_ns) 380 { 381 if (timekeeping_suspended) 382 return; 383 384 sched_clock_tick(); 385 touch_softlockup_watchdog_sched(); 386 } 387 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 388 389 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 390 391 u64 sched_clock_cpu(int cpu) 392 { 393 if (unlikely(!sched_clock_running)) 394 return 0; 395 396 return sched_clock(); 397 } 398 399 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 400 401 /* 402 * Running clock - returns the time that has elapsed while a guest has been 403 * running. 404 * On a guest this value should be local_clock minus the time the guest was 405 * suspended by the hypervisor (for any reason). 406 * On bare metal this function should return the same as local_clock. 407 * Architectures and sub-architectures can override this. 408 */ 409 u64 __weak running_clock(void) 410 { 411 return local_clock(); 412 } 413