xref: /titanic_41/usr/src/uts/i86pc/os/timestamp.c (revision 92d53ff13bfcc9a9e4f4b6e672ed3105597039fa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27  */
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/disp.h>
33 #include <sys/var.h>
34 #include <sys/cmn_err.h>
35 #include <sys/debug.h>
36 #include <sys/x86_archext.h>
37 #include <sys/archsystm.h>
38 #include <sys/cpuvar.h>
39 #include <sys/psm_defs.h>
40 #include <sys/clock.h>
41 #include <sys/atomic.h>
42 #include <sys/lockstat.h>
43 #include <sys/smp_impldefs.h>
44 #include <sys/dtrace.h>
45 #include <sys/time.h>
46 #include <sys/panic.h>
47 #include <sys/cpu.h>
48 
49 /*
50  * Using the Pentium's TSC register for gethrtime()
51  * ------------------------------------------------
52  *
53  * The Pentium family, like many chip architectures, has a high-resolution
54  * timestamp counter ("TSC") which increments once per CPU cycle.  The contents
55  * of the timestamp counter are read with the RDTSC instruction.
56  *
57  * As with its UltraSPARC equivalent (the %tick register), TSC's cycle count
58  * must be translated into nanoseconds in order to implement gethrtime().
59  * We avoid inducing floating point operations in this conversion by
60  * implementing the same nsec_scale algorithm as that found in the sun4u
61  * platform code.  The sun4u NATIVE_TIME_TO_NSEC_SCALE block comment contains
62  * a detailed description of the algorithm; the comment is not reproduced
63  * here.  This implementation differs only in its value for NSEC_SHIFT:
64  * we implement an NSEC_SHIFT of 5 (instead of sun4u's 4) to allow for
65  * 60 MHz Pentiums.
66  *
67  * While TSC and %tick are both cycle counting registers, TSC's functionality
68  * falls short in several critical ways:
69  *
70  *  (a)	TSCs on different CPUs are not guaranteed to be in sync.  While in
71  *	practice they often _are_ in sync, this isn't guaranteed by the
72  *	architecture.
73  *
74  *  (b)	The TSC cannot be reliably set to an arbitrary value.  The architecture
75  *	only supports writing the low 32-bits of TSC, making it impractical
76  *	to rewrite.
77  *
78  *  (c)	The architecture doesn't have the capacity to interrupt based on
79  *	arbitrary values of TSC; there is no TICK_CMPR equivalent.
80  *
81  * Together, (a) and (b) imply that software must track the skew between
82  * TSCs and account for it (it is assumed that while there may exist skew,
83  * there does not exist drift).  To determine the skew between CPUs, we
84  * have newly onlined CPUs call tsc_sync_slave(), while the CPU performing
85  * the online operation calls tsc_sync_master().
86  *
87  * In the absence of time-of-day clock adjustments, gethrtime() must stay in
88  * sync with gettimeofday().  This is problematic; given (c), the software
89  * cannot drive its time-of-day source from TSC, and yet they must somehow be
90  * kept in sync.  We implement this by having a routine, tsc_tick(), which
91  * is called once per second from the interrupt which drives time-of-day.
92  *
93  * Note that the hrtime base for gethrtime, tsc_hrtime_base, is modified
94  * atomically with nsec_scale under CLOCK_LOCK.  This assures that time
95  * monotonically increases.
96  */
97 
98 #define	NSEC_SHIFT 5
99 
100 static uint_t nsec_scale;
101 static uint_t nsec_unscale;
102 
103 /*
104  * These two variables used to be grouped together inside of a structure that
105  * lived on a single cache line. A regression (bug ID 4623398) caused the
106  * compiler to emit code that "optimized" away the while-loops below. The
107  * result was that no synchronization between the onlining and onlined CPUs
108  * took place.
109  */
110 static volatile int tsc_ready;
111 static volatile int tsc_sync_go;
112 
113 /*
114  * Used as indices into the tsc_sync_snaps[] array.
115  */
116 #define	TSC_MASTER		0
117 #define	TSC_SLAVE		1
118 
119 /*
120  * Used in the tsc_master_sync()/tsc_slave_sync() rendezvous.
121  */
122 #define	TSC_SYNC_STOP		1
123 #define	TSC_SYNC_GO		2
124 #define	TSC_SYNC_DONE		3
125 #define	SYNC_ITERATIONS		10
126 
127 #define	TSC_CONVERT_AND_ADD(tsc, hrt, scale) {	 	\
128 	unsigned int *_l = (unsigned int *)&(tsc); 	\
129 	(hrt) += mul32(_l[1], scale) << NSEC_SHIFT; 	\
130 	(hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \
131 }
132 
133 #define	TSC_CONVERT(tsc, hrt, scale) { 			\
134 	unsigned int *_l = (unsigned int *)&(tsc); 	\
135 	(hrt) = mul32(_l[1], scale) << NSEC_SHIFT; 	\
136 	(hrt) += mul32(_l[0], scale) >> (32 - NSEC_SHIFT); \
137 }
138 
139 int tsc_master_slave_sync_needed = 1;
140 
141 static int	tsc_max_delta;
142 static hrtime_t tsc_sync_tick_delta[NCPU];
143 typedef struct tsc_sync {
144 	volatile hrtime_t master_tsc, slave_tsc;
145 } tsc_sync_t;
146 static tsc_sync_t *tscp;
147 static hrtime_t largest_tsc_delta = 0;
148 static ulong_t shortest_write_time = ~0UL;
149 
150 static hrtime_t	tsc_last = 0;
151 static hrtime_t	tsc_last_jumped = 0;
152 static hrtime_t	tsc_hrtime_base = 0;
153 static int	tsc_jumped = 0;
154 
155 static hrtime_t	shadow_tsc_hrtime_base;
156 static hrtime_t	shadow_tsc_last;
157 static uint_t	shadow_nsec_scale;
158 static uint32_t	shadow_hres_lock;
159 int get_tsc_ready();
160 
161 hrtime_t
162 tsc_gethrtime(void)
163 {
164 	uint32_t old_hres_lock;
165 	hrtime_t tsc, hrt;
166 
167 	do {
168 		old_hres_lock = hres_lock;
169 
170 		if ((tsc = tsc_read()) >= tsc_last) {
171 			/*
172 			 * It would seem to be obvious that this is true
173 			 * (that is, the past is less than the present),
174 			 * but it isn't true in the presence of suspend/resume
175 			 * cycles.  If we manage to call gethrtime()
176 			 * after a resume, but before the first call to
177 			 * tsc_tick(), we will see the jump.  In this case,
178 			 * we will simply use the value in TSC as the delta.
179 			 */
180 			tsc -= tsc_last;
181 		} else if (tsc >= tsc_last - 2*tsc_max_delta) {
182 			/*
183 			 * There is a chance that tsc_tick() has just run on
184 			 * another CPU, and we have drifted just enough so that
185 			 * we appear behind tsc_last.  In this case, force the
186 			 * delta to be zero.
187 			 */
188 			tsc = 0;
189 		}
190 
191 		hrt = tsc_hrtime_base;
192 
193 		TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale);
194 	} while ((old_hres_lock & ~1) != hres_lock);
195 
196 	return (hrt);
197 }
198 
199 hrtime_t
200 tsc_gethrtime_delta(void)
201 {
202 	uint32_t old_hres_lock;
203 	hrtime_t tsc, hrt;
204 	ulong_t flags;
205 
206 	do {
207 		old_hres_lock = hres_lock;
208 
209 		/*
210 		 * We need to disable interrupts here to assure that we
211 		 * don't migrate between the call to tsc_read() and
212 		 * adding the CPU's TSC tick delta. Note that disabling
213 		 * and reenabling preemption is forbidden here because
214 		 * we may be in the middle of a fast trap. In the amd64
215 		 * kernel we cannot tolerate preemption during a fast
216 		 * trap. See _update_sregs().
217 		 */
218 
219 		flags = clear_int_flag();
220 		tsc = tsc_read() + tsc_sync_tick_delta[CPU->cpu_id];
221 		restore_int_flag(flags);
222 
223 		/* See comments in tsc_gethrtime() above */
224 
225 		if (tsc >= tsc_last) {
226 			tsc -= tsc_last;
227 		} else if (tsc >= tsc_last - 2 * tsc_max_delta) {
228 			tsc = 0;
229 		}
230 
231 		hrt = tsc_hrtime_base;
232 
233 		TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale);
234 	} while ((old_hres_lock & ~1) != hres_lock);
235 
236 	return (hrt);
237 }
238 
239 /*
240  * This is similar to the above, but it cannot actually spin on hres_lock.
241  * As a result, it caches all of the variables it needs; if the variables
242  * don't change, it's done.
243  */
244 hrtime_t
245 dtrace_gethrtime(void)
246 {
247 	uint32_t old_hres_lock;
248 	hrtime_t tsc, hrt;
249 	ulong_t flags;
250 
251 	do {
252 		old_hres_lock = hres_lock;
253 
254 		/*
255 		 * Interrupts are disabled to ensure that the thread isn't
256 		 * migrated between the tsc_read() and adding the CPU's
257 		 * TSC tick delta.
258 		 */
259 		flags = clear_int_flag();
260 
261 		tsc = tsc_read();
262 
263 		if (gethrtimef == tsc_gethrtime_delta)
264 			tsc += tsc_sync_tick_delta[CPU->cpu_id];
265 
266 		restore_int_flag(flags);
267 
268 		/*
269 		 * See the comments in tsc_gethrtime(), above.
270 		 */
271 		if (tsc >= tsc_last)
272 			tsc -= tsc_last;
273 		else if (tsc >= tsc_last - 2*tsc_max_delta)
274 			tsc = 0;
275 
276 		hrt = tsc_hrtime_base;
277 
278 		TSC_CONVERT_AND_ADD(tsc, hrt, nsec_scale);
279 
280 		if ((old_hres_lock & ~1) == hres_lock)
281 			break;
282 
283 		/*
284 		 * If we're here, the clock lock is locked -- or it has been
285 		 * unlocked and locked since we looked.  This may be due to
286 		 * tsc_tick() running on another CPU -- or it may be because
287 		 * some code path has ended up in dtrace_probe() with
288 		 * CLOCK_LOCK held.  We'll try to determine that we're in
289 		 * the former case by taking another lap if the lock has
290 		 * changed since when we first looked at it.
291 		 */
292 		if (old_hres_lock != hres_lock)
293 			continue;
294 
295 		/*
296 		 * So the lock was and is locked.  We'll use the old data
297 		 * instead.
298 		 */
299 		old_hres_lock = shadow_hres_lock;
300 
301 		/*
302 		 * Again, disable interrupts to ensure that the thread
303 		 * isn't migrated between the tsc_read() and adding
304 		 * the CPU's TSC tick delta.
305 		 */
306 		flags = clear_int_flag();
307 
308 		tsc = tsc_read();
309 
310 		if (gethrtimef == tsc_gethrtime_delta)
311 			tsc += tsc_sync_tick_delta[CPU->cpu_id];
312 
313 		restore_int_flag(flags);
314 
315 		/*
316 		 * See the comments in tsc_gethrtime(), above.
317 		 */
318 		if (tsc >= shadow_tsc_last)
319 			tsc -= shadow_tsc_last;
320 		else if (tsc >= shadow_tsc_last - 2 * tsc_max_delta)
321 			tsc = 0;
322 
323 		hrt = shadow_tsc_hrtime_base;
324 
325 		TSC_CONVERT_AND_ADD(tsc, hrt, shadow_nsec_scale);
326 	} while ((old_hres_lock & ~1) != shadow_hres_lock);
327 
328 	return (hrt);
329 }
330 
331 hrtime_t
332 tsc_gethrtimeunscaled(void)
333 {
334 	uint32_t old_hres_lock;
335 	hrtime_t tsc;
336 
337 	do {
338 		old_hres_lock = hres_lock;
339 
340 		/* See tsc_tick(). */
341 		tsc = tsc_read() + tsc_last_jumped;
342 	} while ((old_hres_lock & ~1) != hres_lock);
343 
344 	return (tsc);
345 }
346 
347 /*
348  * Convert a nanosecond based timestamp to tsc
349  */
350 uint64_t
351 tsc_unscalehrtime(hrtime_t nsec)
352 {
353 	hrtime_t tsc;
354 
355 	if (tsc_gethrtime_enable) {
356 		TSC_CONVERT(nsec, tsc, nsec_unscale);
357 		return (tsc);
358 	}
359 	return ((uint64_t)nsec);
360 }
361 
362 /* Convert a tsc timestamp to nanoseconds */
363 void
364 tsc_scalehrtime(hrtime_t *tsc)
365 {
366 	hrtime_t hrt;
367 	hrtime_t mytsc;
368 
369 	if (tsc == NULL)
370 		return;
371 	mytsc = *tsc;
372 
373 	TSC_CONVERT(mytsc, hrt, nsec_scale);
374 	*tsc  = hrt;
375 }
376 
377 hrtime_t
378 tsc_gethrtimeunscaled_delta(void)
379 {
380 	hrtime_t hrt;
381 	ulong_t flags;
382 
383 	/*
384 	 * Similarly to tsc_gethrtime_delta, we need to disable preemption
385 	 * to prevent migration between the call to tsc_gethrtimeunscaled
386 	 * and adding the CPU's hrtime delta. Note that disabling and
387 	 * reenabling preemption is forbidden here because we may be in the
388 	 * middle of a fast trap. In the amd64 kernel we cannot tolerate
389 	 * preemption during a fast trap. See _update_sregs().
390 	 */
391 
392 	flags = clear_int_flag();
393 	hrt = tsc_gethrtimeunscaled() + tsc_sync_tick_delta[CPU->cpu_id];
394 	restore_int_flag(flags);
395 
396 	return (hrt);
397 }
398 
399 /*
400  * Called by the master in the TSC sync operation (usually the boot CPU).
401  * If the slave is discovered to have a skew, gethrtimef will be changed to
402  * point to tsc_gethrtime_delta(). Calculating skews is precise only when
403  * the master and slave TSCs are read simultaneously; however, there is no
404  * algorithm that can read both CPUs in perfect simultaneity. The proposed
405  * algorithm is an approximate method based on the behaviour of cache
406  * management. The slave CPU continuously reads TSC and then reads a global
407  * variable which the master CPU updates. The moment the master's update reaches
408  * the slave's visibility (being forced by an mfence operation) we use the TSC
409  * reading taken on the slave. A corresponding TSC read will be taken on the
410  * master as soon as possible after finishing the mfence operation. But the
411  * delay between causing the slave to notice the invalid cache line and the
412  * competion of mfence is not repeatable. This error is heuristically assumed
413  * to be 1/4th of the total write time as being measured by the two TSC reads
414  * on the master sandwiching the mfence. Furthermore, due to the nature of
415  * bus arbitration, contention on memory bus, etc., the time taken for the write
416  * to reflect globally can vary a lot. So instead of taking a single reading,
417  * a set of readings are taken and the one with least write time is chosen
418  * to calculate the final skew.
419  *
420  * TSC sync is disabled in the context of virtualization because the CPUs
421  * assigned to the guest are virtual CPUs which means the real CPUs on which
422  * guest runs keep changing during life time of guest OS. So we would end up
423  * calculating TSC skews for a set of CPUs during boot whereas the guest
424  * might migrate to a different set of physical CPUs at a later point of
425  * time.
426  */
427 void
428 tsc_sync_master(processorid_t slave)
429 {
430 	ulong_t flags, source, min_write_time = ~0UL;
431 	hrtime_t write_time, x, mtsc_after, tdelta;
432 	tsc_sync_t *tsc = tscp;
433 	int cnt;
434 	int hwtype;
435 
436 	hwtype = get_hwenv();
437 	if (!tsc_master_slave_sync_needed || (hwtype & HW_VIRTUAL) != 0)
438 		return;
439 
440 	flags = clear_int_flag();
441 	source = CPU->cpu_id;
442 
443 	for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) {
444 		while (tsc_sync_go != TSC_SYNC_GO)
445 			SMT_PAUSE();
446 
447 		tsc->master_tsc = tsc_read();
448 		membar_enter();
449 		mtsc_after = tsc_read();
450 		while (tsc_sync_go != TSC_SYNC_DONE)
451 			SMT_PAUSE();
452 		write_time =  mtsc_after - tsc->master_tsc;
453 		if (write_time <= min_write_time) {
454 			min_write_time = write_time;
455 			/*
456 			 * Apply heuristic adjustment only if the calculated
457 			 * delta is > 1/4th of the write time.
458 			 */
459 			x = tsc->slave_tsc - mtsc_after;
460 			if (x < 0)
461 				x = -x;
462 			if (x > (min_write_time/4))
463 				/*
464 				 * Subtract 1/4th of the measured write time
465 				 * from the master's TSC value, as an estimate
466 				 * of how late the mfence completion came
467 				 * after the slave noticed the cache line
468 				 * change.
469 				 */
470 				tdelta = tsc->slave_tsc -
471 				    (mtsc_after - (min_write_time/4));
472 			else
473 				tdelta = tsc->slave_tsc - mtsc_after;
474 			tsc_sync_tick_delta[slave] =
475 			    tsc_sync_tick_delta[source] - tdelta;
476 		}
477 
478 		tsc->master_tsc = tsc->slave_tsc = write_time = 0;
479 		membar_enter();
480 		tsc_sync_go = TSC_SYNC_STOP;
481 	}
482 	if (tdelta < 0)
483 		tdelta = -tdelta;
484 	if (tdelta > largest_tsc_delta)
485 		largest_tsc_delta = tdelta;
486 	if (min_write_time < shortest_write_time)
487 		shortest_write_time = min_write_time;
488 	/*
489 	 * Enable delta variants of tsc functions if the largest of all chosen
490 	 * deltas is > smallest of the write time.
491 	 */
492 	if (largest_tsc_delta > shortest_write_time) {
493 		gethrtimef = tsc_gethrtime_delta;
494 		gethrtimeunscaledf = tsc_gethrtimeunscaled_delta;
495 	}
496 	restore_int_flag(flags);
497 }
498 
499 /*
500  * Called by a CPU which has just been onlined.  It is expected that the CPU
501  * performing the online operation will call tsc_sync_master().
502  *
503  * TSC sync is disabled in the context of virtualization. See comments
504  * above tsc_sync_master.
505  */
506 void
507 tsc_sync_slave(void)
508 {
509 	ulong_t flags;
510 	hrtime_t s1;
511 	tsc_sync_t *tsc = tscp;
512 	int cnt;
513 	int hwtype;
514 
515 	hwtype = get_hwenv();
516 	if (!tsc_master_slave_sync_needed || (hwtype & HW_VIRTUAL) != 0)
517 		return;
518 
519 	flags = clear_int_flag();
520 
521 	for (cnt = 0; cnt < SYNC_ITERATIONS; cnt++) {
522 		/* Re-fill the cache line */
523 		s1 = tsc->master_tsc;
524 		membar_enter();
525 		tsc_sync_go = TSC_SYNC_GO;
526 		do {
527 			/*
528 			 * Do not put an SMT_PAUSE here. For instance,
529 			 * if the master and slave are really the same
530 			 * hyper-threaded CPU, then you want the master
531 			 * to yield to the slave as quickly as possible here,
532 			 * but not the other way.
533 			 */
534 			s1 = tsc_read();
535 		} while (tsc->master_tsc == 0);
536 		tsc->slave_tsc = s1;
537 		membar_enter();
538 		tsc_sync_go = TSC_SYNC_DONE;
539 
540 		while (tsc_sync_go != TSC_SYNC_STOP)
541 			SMT_PAUSE();
542 	}
543 
544 	restore_int_flag(flags);
545 }
546 
547 /*
548  * Called once per second on a CPU from the cyclic subsystem's
549  * CY_HIGH_LEVEL interrupt.  (No longer just cpu0-only)
550  */
551 void
552 tsc_tick(void)
553 {
554 	hrtime_t now, delta;
555 	ushort_t spl;
556 
557 	/*
558 	 * Before we set the new variables, we set the shadow values.  This
559 	 * allows for lock free operation in dtrace_gethrtime().
560 	 */
561 	lock_set_spl((lock_t *)&shadow_hres_lock + HRES_LOCK_OFFSET,
562 	    ipltospl(CBE_HIGH_PIL), &spl);
563 
564 	shadow_tsc_hrtime_base = tsc_hrtime_base;
565 	shadow_tsc_last = tsc_last;
566 	shadow_nsec_scale = nsec_scale;
567 
568 	shadow_hres_lock++;
569 	splx(spl);
570 
571 	CLOCK_LOCK(&spl);
572 
573 	now = tsc_read();
574 
575 	if (gethrtimef == tsc_gethrtime_delta)
576 		now += tsc_sync_tick_delta[CPU->cpu_id];
577 
578 	if (now < tsc_last) {
579 		/*
580 		 * The TSC has just jumped into the past.  We assume that
581 		 * this is due to a suspend/resume cycle, and we're going
582 		 * to use the _current_ value of TSC as the delta.  This
583 		 * will keep tsc_hrtime_base correct.  We're also going to
584 		 * assume that rate of tsc does not change after a suspend
585 		 * resume (i.e nsec_scale remains the same).
586 		 */
587 		delta = now;
588 		tsc_last_jumped += tsc_last;
589 		tsc_jumped = 1;
590 	} else {
591 		/*
592 		 * Determine the number of TSC ticks since the last clock
593 		 * tick, and add that to the hrtime base.
594 		 */
595 		delta = now - tsc_last;
596 	}
597 
598 	TSC_CONVERT_AND_ADD(delta, tsc_hrtime_base, nsec_scale);
599 	tsc_last = now;
600 
601 	CLOCK_UNLOCK(spl);
602 }
603 
604 void
605 tsc_hrtimeinit(uint64_t cpu_freq_hz)
606 {
607 	extern int gethrtime_hires;
608 	longlong_t tsc;
609 	ulong_t flags;
610 
611 	/*
612 	 * cpu_freq_hz is the measured cpu frequency in hertz
613 	 */
614 
615 	/*
616 	 * We can't accommodate CPUs slower than 31.25 MHz.
617 	 */
618 	ASSERT(cpu_freq_hz > NANOSEC / (1 << NSEC_SHIFT));
619 	nsec_scale =
620 	    (uint_t)(((uint64_t)NANOSEC << (32 - NSEC_SHIFT)) / cpu_freq_hz);
621 	nsec_unscale =
622 	    (uint_t)(((uint64_t)cpu_freq_hz << (32 - NSEC_SHIFT)) / NANOSEC);
623 
624 	flags = clear_int_flag();
625 	tsc = tsc_read();
626 	(void) tsc_gethrtime();
627 	tsc_max_delta = tsc_read() - tsc;
628 	restore_int_flag(flags);
629 	gethrtimef = tsc_gethrtime;
630 	gethrtimeunscaledf = tsc_gethrtimeunscaled;
631 	scalehrtimef = tsc_scalehrtime;
632 	unscalehrtimef = tsc_unscalehrtime;
633 	hrtime_tick = tsc_tick;
634 	gethrtime_hires = 1;
635 	/*
636 	 * Allocate memory for the structure used in the tsc sync logic.
637 	 * This structure should be aligned on a multiple of cache line size.
638 	 */
639 	tscp = kmem_zalloc(PAGESIZE, KM_SLEEP);
640 }
641 
642 int
643 get_tsc_ready()
644 {
645 	return (tsc_ready);
646 }
647 
648 /*
649  * Adjust all the deltas by adding the passed value to the array.
650  * Then use the "delt" versions of the the gethrtime functions.
651  * Note that 'tdelta' _could_ be a negative number, which should
652  * reduce the values in the array (used, for example, if the Solaris
653  * instance was moved by a virtual manager to a machine with a higher
654  * value of tsc).
655  */
656 void
657 tsc_adjust_delta(hrtime_t tdelta)
658 {
659 	int		i;
660 
661 	for (i = 0; i < NCPU; i++) {
662 		tsc_sync_tick_delta[i] += tdelta;
663 	}
664 
665 	gethrtimef = tsc_gethrtime_delta;
666 	gethrtimeunscaledf = tsc_gethrtimeunscaled_delta;
667 }
668 
669 /*
670  * Functions to manage TSC and high-res time on suspend and resume.
671  */
672 
673 /*
674  * declarations needed for time adjustment
675  */
676 extern void	rtcsync(void);
677 extern tod_ops_t *tod_ops;
678 /* There must be a better way than exposing nsec_scale! */
679 extern uint_t	nsec_scale;
680 static uint64_t tsc_saved_tsc = 0; /* 1 in 2^64 chance this'll screw up! */
681 static timestruc_t tsc_saved_ts;
682 static int	tsc_needs_resume = 0;	/* We only want to do this once. */
683 int		tsc_delta_onsuspend = 0;
684 int		tsc_adjust_seconds = 1;
685 int		tsc_suspend_count = 0;
686 int		tsc_resume_in_cyclic = 0;
687 
688 /*
689  * Let timestamp.c know that we are suspending.  It needs to take
690  * snapshots of the current time, and do any pre-suspend work.
691  */
692 void
693 tsc_suspend(void)
694 {
695 /*
696  * What we need to do here, is to get the time we suspended, so that we
697  * know how much we should add to the resume.
698  * This routine is called by each CPU, so we need to handle reentry.
699  */
700 	if (tsc_gethrtime_enable) {
701 		/*
702 		 * We put the tsc_read() inside the lock as it
703 		 * as no locking constraints, and it puts the
704 		 * aquired value closer to the time stamp (in
705 		 * case we delay getting the lock).
706 		 */
707 		mutex_enter(&tod_lock);
708 		tsc_saved_tsc = tsc_read();
709 		tsc_saved_ts = TODOP_GET(tod_ops);
710 		mutex_exit(&tod_lock);
711 		/* We only want to do this once. */
712 		if (tsc_needs_resume == 0) {
713 			if (tsc_delta_onsuspend) {
714 				tsc_adjust_delta(tsc_saved_tsc);
715 			} else {
716 				tsc_adjust_delta(nsec_scale);
717 			}
718 			tsc_suspend_count++;
719 		}
720 	}
721 
722 	invalidate_cache();
723 	tsc_needs_resume = 1;
724 }
725 
726 /*
727  * Restore all timestamp state based on the snapshots taken at
728  * suspend time.
729  */
730 void
731 tsc_resume(void)
732 {
733 	/*
734 	 * We only need to (and want to) do this once.  So let the first
735 	 * caller handle this (we are locked by the cpu lock), as it
736 	 * is preferential that we get the earliest sync.
737 	 */
738 	if (tsc_needs_resume) {
739 		/*
740 		 * If using the TSC, adjust the delta based on how long
741 		 * we were sleeping (or away).  We also adjust for
742 		 * migration and a grown TSC.
743 		 */
744 		if (tsc_saved_tsc != 0) {
745 			timestruc_t	ts;
746 			hrtime_t	now, sleep_tsc = 0;
747 			int		sleep_sec;
748 			extern void	tsc_tick(void);
749 			extern uint64_t cpu_freq_hz;
750 
751 			/* tsc_read() MUST be before TODOP_GET() */
752 			mutex_enter(&tod_lock);
753 			now = tsc_read();
754 			ts = TODOP_GET(tod_ops);
755 			mutex_exit(&tod_lock);
756 
757 			/* Compute seconds of sleep time */
758 			sleep_sec = ts.tv_sec - tsc_saved_ts.tv_sec;
759 
760 			/*
761 			 * If the saved sec is less that or equal to
762 			 * the current ts, then there is likely a
763 			 * problem with the clock.  Assume at least
764 			 * one second has passed, so that time goes forward.
765 			 */
766 			if (sleep_sec <= 0) {
767 				sleep_sec = 1;
768 			}
769 
770 			/* How many TSC's should have occured while sleeping */
771 			if (tsc_adjust_seconds)
772 				sleep_tsc = sleep_sec * cpu_freq_hz;
773 
774 			/*
775 			 * We also want to subtract from the "sleep_tsc"
776 			 * the current value of tsc_read(), so that our
777 			 * adjustment accounts for the amount of time we
778 			 * have been resumed _or_ an adjustment based on
779 			 * the fact that we didn't actually power off the
780 			 * CPU (migration is another issue, but _should_
781 			 * also comply with this calculation).  If the CPU
782 			 * never powered off, then:
783 			 *    'now == sleep_tsc + saved_tsc'
784 			 * and the delta will effectively be "0".
785 			 */
786 			sleep_tsc -= now;
787 			if (tsc_delta_onsuspend) {
788 				tsc_adjust_delta(sleep_tsc);
789 			} else {
790 				tsc_adjust_delta(tsc_saved_tsc + sleep_tsc);
791 			}
792 			tsc_saved_tsc = 0;
793 
794 			tsc_tick();
795 		}
796 		tsc_needs_resume = 0;
797 	}
798 
799 }
800