xref: /freebsd/sys/kern/kern_tc.c (revision 5f0216bd883edee71bf81051e3c20505e4820903)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * Copyright (c) 2011 The FreeBSD Foundation
10  * All rights reserved.
11  *
12  * Portions of this software were developed by Julien Ridoux at the University
13  * of Melbourne under sponsorship from the FreeBSD Foundation.
14  */
15 
16 #include <sys/cdefs.h>
17 __FBSDID("$FreeBSD$");
18 
19 #include "opt_compat.h"
20 #include "opt_ntp.h"
21 #include "opt_ffclock.h"
22 
23 #include <sys/param.h>
24 #include <sys/kernel.h>
25 #include <sys/limits.h>
26 #include <sys/lock.h>
27 #include <sys/mutex.h>
28 #include <sys/sbuf.h>
29 #include <sys/sysctl.h>
30 #include <sys/syslog.h>
31 #include <sys/systm.h>
32 #include <sys/timeffc.h>
33 #include <sys/timepps.h>
34 #include <sys/timetc.h>
35 #include <sys/timex.h>
36 #include <sys/vdso.h>
37 #include <machine/atomic.h>
38 
39 /*
40  * A large step happens on boot.  This constant detects such steps.
41  * It is relatively small so that ntp_update_second gets called enough
42  * in the typical 'missed a couple of seconds' case, but doesn't loop
43  * forever when the time step is large.
44  */
45 #define LARGE_STEP	200
46 
47 /*
48  * Implement a dummy timecounter which we can use until we get a real one
49  * in the air.  This allows the console and other early stuff to use
50  * time services.
51  */
52 
53 static u_int
54 dummy_get_timecount(struct timecounter *tc)
55 {
56 	static u_int now;
57 
58 	return (++now);
59 }
60 
61 static struct timecounter dummy_timecounter = {
62 	dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000
63 };
64 
65 struct timehands {
66 	/* These fields must be initialized by the driver. */
67 	struct timecounter	*th_counter;
68 	int64_t			th_adjustment;
69 	uint64_t		th_scale;
70 	u_int	 		th_offset_count;
71 	struct bintime		th_offset;
72 	struct timeval		th_microtime;
73 	struct timespec		th_nanotime;
74 	/* Fields not to be copied in tc_windup start with th_generation. */
75 	u_int			th_generation;
76 	struct timehands	*th_next;
77 };
78 
79 static struct timehands th0;
80 static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
81 static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
82 static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
83 static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
84 static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
85 static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
86 static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
87 static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
88 static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
89 static struct timehands th0 = {
90 	&dummy_timecounter,
91 	0,
92 	(uint64_t)-1 / 1000000,
93 	0,
94 	{1, 0},
95 	{0, 0},
96 	{0, 0},
97 	1,
98 	&th1
99 };
100 
101 static struct timehands *volatile timehands = &th0;
102 struct timecounter *timecounter = &dummy_timecounter;
103 static struct timecounter *timecounters = &dummy_timecounter;
104 
105 int tc_min_ticktock_freq = 1;
106 
107 volatile time_t time_second = 1;
108 volatile time_t time_uptime = 1;
109 
110 struct bintime boottimebin;
111 struct timeval boottime;
112 static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
113 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
114     NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime");
115 
116 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
117 static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, "");
118 
119 static int timestepwarnings;
120 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
121     &timestepwarnings, 0, "Log time steps");
122 
123 struct bintime bt_timethreshold;
124 struct bintime bt_tickthreshold;
125 sbintime_t sbt_timethreshold;
126 sbintime_t sbt_tickthreshold;
127 struct bintime tc_tick_bt;
128 sbintime_t tc_tick_sbt;
129 int tc_precexp;
130 int tc_timepercentage = TC_DEFAULTPERC;
131 static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
132 SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
133     CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
134     sysctl_kern_timecounter_adjprecision, "I",
135     "Allowed time interval deviation in percents");
136 
137 static void tc_windup(void);
138 static void cpu_tick_calibrate(int);
139 
140 void dtrace_getnanotime(struct timespec *tsp);
141 
142 static int
143 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
144 {
145 #ifndef __mips__
146 #ifdef SCTL_MASK32
147 	int tv[2];
148 
149 	if (req->flags & SCTL_MASK32) {
150 		tv[0] = boottime.tv_sec;
151 		tv[1] = boottime.tv_usec;
152 		return SYSCTL_OUT(req, tv, sizeof(tv));
153 	} else
154 #endif
155 #endif
156 		return SYSCTL_OUT(req, &boottime, sizeof(boottime));
157 }
158 
159 static int
160 sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
161 {
162 	u_int ncount;
163 	struct timecounter *tc = arg1;
164 
165 	ncount = tc->tc_get_timecount(tc);
166 	return sysctl_handle_int(oidp, &ncount, 0, req);
167 }
168 
169 static int
170 sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
171 {
172 	uint64_t freq;
173 	struct timecounter *tc = arg1;
174 
175 	freq = tc->tc_frequency;
176 	return sysctl_handle_64(oidp, &freq, 0, req);
177 }
178 
179 /*
180  * Return the difference between the timehands' counter value now and what
181  * was when we copied it to the timehands' offset_count.
182  */
183 static __inline u_int
184 tc_delta(struct timehands *th)
185 {
186 	struct timecounter *tc;
187 
188 	tc = th->th_counter;
189 	return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
190 	    tc->tc_counter_mask);
191 }
192 
193 static u_int
194 tc_getgen(struct timehands *th)
195 {
196 
197 #ifdef SMP
198 	return (atomic_load_acq_int(&th->th_generation));
199 #else
200 	u_int gen;
201 
202 	gen = th->th_generation;
203 	__compiler_membar();
204 	return (gen);
205 #endif
206 }
207 
208 static void
209 tc_setgen(struct timehands *th, u_int newgen)
210 {
211 
212 #ifdef SMP
213 	atomic_store_rel_int(&th->th_generation, newgen);
214 #else
215 	__compiler_membar();
216 	th->th_generation = newgen;
217 #endif
218 }
219 
220 /*
221  * Functions for reading the time.  We have to loop until we are sure that
222  * the timehands that we operated on was not updated under our feet.  See
223  * the comment in <sys/time.h> for a description of these 12 functions.
224  */
225 
226 #ifdef FFCLOCK
227 void
228 fbclock_binuptime(struct bintime *bt)
229 {
230 	struct timehands *th;
231 	unsigned int gen;
232 
233 	do {
234 		th = timehands;
235 		gen = tc_getgen(th);
236 		*bt = th->th_offset;
237 		bintime_addx(bt, th->th_scale * tc_delta(th));
238 	} while (gen == 0 || gen != tc_getgen(th));
239 }
240 
241 void
242 fbclock_nanouptime(struct timespec *tsp)
243 {
244 	struct bintime bt;
245 
246 	fbclock_binuptime(&bt);
247 	bintime2timespec(&bt, tsp);
248 }
249 
250 void
251 fbclock_microuptime(struct timeval *tvp)
252 {
253 	struct bintime bt;
254 
255 	fbclock_binuptime(&bt);
256 	bintime2timeval(&bt, tvp);
257 }
258 
259 void
260 fbclock_bintime(struct bintime *bt)
261 {
262 
263 	fbclock_binuptime(bt);
264 	bintime_add(bt, &boottimebin);
265 }
266 
267 void
268 fbclock_nanotime(struct timespec *tsp)
269 {
270 	struct bintime bt;
271 
272 	fbclock_bintime(&bt);
273 	bintime2timespec(&bt, tsp);
274 }
275 
276 void
277 fbclock_microtime(struct timeval *tvp)
278 {
279 	struct bintime bt;
280 
281 	fbclock_bintime(&bt);
282 	bintime2timeval(&bt, tvp);
283 }
284 
285 void
286 fbclock_getbinuptime(struct bintime *bt)
287 {
288 	struct timehands *th;
289 	unsigned int gen;
290 
291 	do {
292 		th = timehands;
293 		gen = tc_getgen(th);
294 		*bt = th->th_offset;
295 	} while (gen == 0 || gen != tc_getgen(th));
296 }
297 
298 void
299 fbclock_getnanouptime(struct timespec *tsp)
300 {
301 	struct timehands *th;
302 	unsigned int gen;
303 
304 	do {
305 		th = timehands;
306 		gen = tc_getgen(th);
307 		bintime2timespec(&th->th_offset, tsp);
308 	} while (gen == 0 || gen != tc_getgen(th));
309 }
310 
311 void
312 fbclock_getmicrouptime(struct timeval *tvp)
313 {
314 	struct timehands *th;
315 	unsigned int gen;
316 
317 	do {
318 		th = timehands;
319 		gen = tc_getgen(th);
320 		bintime2timeval(&th->th_offset, tvp);
321 	} while (gen == 0 || gen != tc_getgen(th));
322 }
323 
324 void
325 fbclock_getbintime(struct bintime *bt)
326 {
327 	struct timehands *th;
328 	unsigned int gen;
329 
330 	do {
331 		th = timehands;
332 		gen = tc_getgen(th);
333 		*bt = th->th_offset;
334 	} while (gen == 0 || gen != tc_getgen(th));
335 	bintime_add(bt, &boottimebin);
336 }
337 
338 void
339 fbclock_getnanotime(struct timespec *tsp)
340 {
341 	struct timehands *th;
342 	unsigned int gen;
343 
344 	do {
345 		th = timehands;
346 		gen = tc_getgen(th);
347 		*tsp = th->th_nanotime;
348 	} while (gen == 0 || gen != tc_getgen(th));
349 }
350 
351 void
352 fbclock_getmicrotime(struct timeval *tvp)
353 {
354 	struct timehands *th;
355 	unsigned int gen;
356 
357 	do {
358 		th = timehands;
359 		gen = tc_getgen(th);
360 		*tvp = th->th_microtime;
361 	} while (gen == 0 || gen != tc_getgen(th));
362 }
363 #else /* !FFCLOCK */
364 void
365 binuptime(struct bintime *bt)
366 {
367 	struct timehands *th;
368 	u_int gen;
369 
370 	do {
371 		th = timehands;
372 		gen = tc_getgen(th);
373 		*bt = th->th_offset;
374 		bintime_addx(bt, th->th_scale * tc_delta(th));
375 	} while (gen == 0 || gen != tc_getgen(th));
376 }
377 
378 void
379 nanouptime(struct timespec *tsp)
380 {
381 	struct bintime bt;
382 
383 	binuptime(&bt);
384 	bintime2timespec(&bt, tsp);
385 }
386 
387 void
388 microuptime(struct timeval *tvp)
389 {
390 	struct bintime bt;
391 
392 	binuptime(&bt);
393 	bintime2timeval(&bt, tvp);
394 }
395 
396 void
397 bintime(struct bintime *bt)
398 {
399 
400 	binuptime(bt);
401 	bintime_add(bt, &boottimebin);
402 }
403 
404 void
405 nanotime(struct timespec *tsp)
406 {
407 	struct bintime bt;
408 
409 	bintime(&bt);
410 	bintime2timespec(&bt, tsp);
411 }
412 
413 void
414 microtime(struct timeval *tvp)
415 {
416 	struct bintime bt;
417 
418 	bintime(&bt);
419 	bintime2timeval(&bt, tvp);
420 }
421 
422 void
423 getbinuptime(struct bintime *bt)
424 {
425 	struct timehands *th;
426 	u_int gen;
427 
428 	do {
429 		th = timehands;
430 		gen = tc_getgen(th);
431 		*bt = th->th_offset;
432 	} while (gen == 0 || gen != tc_getgen(th));
433 }
434 
435 void
436 getnanouptime(struct timespec *tsp)
437 {
438 	struct timehands *th;
439 	u_int gen;
440 
441 	do {
442 		th = timehands;
443 		gen = tc_getgen(th);
444 		bintime2timespec(&th->th_offset, tsp);
445 	} while (gen == 0 || gen != tc_getgen(th));
446 }
447 
448 void
449 getmicrouptime(struct timeval *tvp)
450 {
451 	struct timehands *th;
452 	u_int gen;
453 
454 	do {
455 		th = timehands;
456 		gen = tc_getgen(th);
457 		bintime2timeval(&th->th_offset, tvp);
458 	} while (gen == 0 || gen != tc_getgen(th));
459 }
460 
461 void
462 getbintime(struct bintime *bt)
463 {
464 	struct timehands *th;
465 	u_int gen;
466 
467 	do {
468 		th = timehands;
469 		gen = tc_getgen(th);
470 		*bt = th->th_offset;
471 	} while (gen == 0 || gen != tc_getgen(th));
472 	bintime_add(bt, &boottimebin);
473 }
474 
475 void
476 getnanotime(struct timespec *tsp)
477 {
478 	struct timehands *th;
479 	u_int gen;
480 
481 	do {
482 		th = timehands;
483 		gen = tc_getgen(th);
484 		*tsp = th->th_nanotime;
485 	} while (gen == 0 || gen != tc_getgen(th));
486 }
487 
488 void
489 getmicrotime(struct timeval *tvp)
490 {
491 	struct timehands *th;
492 	u_int gen;
493 
494 	do {
495 		th = timehands;
496 		gen = tc_getgen(th);
497 		*tvp = th->th_microtime;
498 	} while (gen == 0 || gen != tc_getgen(th));
499 }
500 #endif /* FFCLOCK */
501 
502 #ifdef FFCLOCK
503 /*
504  * Support for feed-forward synchronization algorithms. This is heavily inspired
505  * by the timehands mechanism but kept independent from it. *_windup() functions
506  * have some connection to avoid accessing the timecounter hardware more than
507  * necessary.
508  */
509 
510 /* Feed-forward clock estimates kept updated by the synchronization daemon. */
511 struct ffclock_estimate ffclock_estimate;
512 struct bintime ffclock_boottime;	/* Feed-forward boot time estimate. */
513 uint32_t ffclock_status;		/* Feed-forward clock status. */
514 int8_t ffclock_updated;			/* New estimates are available. */
515 struct mtx ffclock_mtx;			/* Mutex on ffclock_estimate. */
516 
517 struct fftimehands {
518 	struct ffclock_estimate	cest;
519 	struct bintime		tick_time;
520 	struct bintime		tick_time_lerp;
521 	ffcounter		tick_ffcount;
522 	uint64_t		period_lerp;
523 	volatile uint8_t	gen;
524 	struct fftimehands	*next;
525 };
526 
527 #define	NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x))
528 
529 static struct fftimehands ffth[10];
530 static struct fftimehands *volatile fftimehands = ffth;
531 
532 static void
533 ffclock_init(void)
534 {
535 	struct fftimehands *cur;
536 	struct fftimehands *last;
537 
538 	memset(ffth, 0, sizeof(ffth));
539 
540 	last = ffth + NUM_ELEMENTS(ffth) - 1;
541 	for (cur = ffth; cur < last; cur++)
542 		cur->next = cur + 1;
543 	last->next = ffth;
544 
545 	ffclock_updated = 0;
546 	ffclock_status = FFCLOCK_STA_UNSYNC;
547 	mtx_init(&ffclock_mtx, "ffclock lock", NULL, MTX_DEF);
548 }
549 
550 /*
551  * Reset the feed-forward clock estimates. Called from inittodr() to get things
552  * kick started and uses the timecounter nominal frequency as a first period
553  * estimate. Note: this function may be called several time just after boot.
554  * Note: this is the only function that sets the value of boot time for the
555  * monotonic (i.e. uptime) version of the feed-forward clock.
556  */
557 void
558 ffclock_reset_clock(struct timespec *ts)
559 {
560 	struct timecounter *tc;
561 	struct ffclock_estimate cest;
562 
563 	tc = timehands->th_counter;
564 	memset(&cest, 0, sizeof(struct ffclock_estimate));
565 
566 	timespec2bintime(ts, &ffclock_boottime);
567 	timespec2bintime(ts, &(cest.update_time));
568 	ffclock_read_counter(&cest.update_ffcount);
569 	cest.leapsec_next = 0;
570 	cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
571 	cest.errb_abs = 0;
572 	cest.errb_rate = 0;
573 	cest.status = FFCLOCK_STA_UNSYNC;
574 	cest.leapsec_total = 0;
575 	cest.leapsec = 0;
576 
577 	mtx_lock(&ffclock_mtx);
578 	bcopy(&cest, &ffclock_estimate, sizeof(struct ffclock_estimate));
579 	ffclock_updated = INT8_MAX;
580 	mtx_unlock(&ffclock_mtx);
581 
582 	printf("ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
583 	    (unsigned long long)tc->tc_frequency, (long)ts->tv_sec,
584 	    (unsigned long)ts->tv_nsec);
585 }
586 
587 /*
588  * Sub-routine to convert a time interval measured in RAW counter units to time
589  * in seconds stored in bintime format.
590  * NOTE: bintime_mul requires u_int, but the value of the ffcounter may be
591  * larger than the max value of u_int (on 32 bit architecture). Loop to consume
592  * extra cycles.
593  */
594 static void
595 ffclock_convert_delta(ffcounter ffdelta, uint64_t period, struct bintime *bt)
596 {
597 	struct bintime bt2;
598 	ffcounter delta, delta_max;
599 
600 	delta_max = (1ULL << (8 * sizeof(unsigned int))) - 1;
601 	bintime_clear(bt);
602 	do {
603 		if (ffdelta > delta_max)
604 			delta = delta_max;
605 		else
606 			delta = ffdelta;
607 		bt2.sec = 0;
608 		bt2.frac = period;
609 		bintime_mul(&bt2, (unsigned int)delta);
610 		bintime_add(bt, &bt2);
611 		ffdelta -= delta;
612 	} while (ffdelta > 0);
613 }
614 
615 /*
616  * Update the fftimehands.
617  * Push the tick ffcount and time(s) forward based on current clock estimate.
618  * The conversion from ffcounter to bintime relies on the difference clock
619  * principle, whose accuracy relies on computing small time intervals. If a new
620  * clock estimate has been passed by the synchronisation daemon, make it
621  * current, and compute the linear interpolation for monotonic time if needed.
622  */
623 static void
624 ffclock_windup(unsigned int delta)
625 {
626 	struct ffclock_estimate *cest;
627 	struct fftimehands *ffth;
628 	struct bintime bt, gap_lerp;
629 	ffcounter ffdelta;
630 	uint64_t frac;
631 	unsigned int polling;
632 	uint8_t forward_jump, ogen;
633 
634 	/*
635 	 * Pick the next timehand, copy current ffclock estimates and move tick
636 	 * times and counter forward.
637 	 */
638 	forward_jump = 0;
639 	ffth = fftimehands->next;
640 	ogen = ffth->gen;
641 	ffth->gen = 0;
642 	cest = &ffth->cest;
643 	bcopy(&fftimehands->cest, cest, sizeof(struct ffclock_estimate));
644 	ffdelta = (ffcounter)delta;
645 	ffth->period_lerp = fftimehands->period_lerp;
646 
647 	ffth->tick_time = fftimehands->tick_time;
648 	ffclock_convert_delta(ffdelta, cest->period, &bt);
649 	bintime_add(&ffth->tick_time, &bt);
650 
651 	ffth->tick_time_lerp = fftimehands->tick_time_lerp;
652 	ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
653 	bintime_add(&ffth->tick_time_lerp, &bt);
654 
655 	ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
656 
657 	/*
658 	 * Assess the status of the clock, if the last update is too old, it is
659 	 * likely the synchronisation daemon is dead and the clock is free
660 	 * running.
661 	 */
662 	if (ffclock_updated == 0) {
663 		ffdelta = ffth->tick_ffcount - cest->update_ffcount;
664 		ffclock_convert_delta(ffdelta, cest->period, &bt);
665 		if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
666 			ffclock_status |= FFCLOCK_STA_UNSYNC;
667 	}
668 
669 	/*
670 	 * If available, grab updated clock estimates and make them current.
671 	 * Recompute time at this tick using the updated estimates. The clock
672 	 * estimates passed the feed-forward synchronisation daemon may result
673 	 * in time conversion that is not monotonically increasing (just after
674 	 * the update). time_lerp is a particular linear interpolation over the
675 	 * synchronisation algo polling period that ensures monotonicity for the
676 	 * clock ids requesting it.
677 	 */
678 	if (ffclock_updated > 0) {
679 		bcopy(&ffclock_estimate, cest, sizeof(struct ffclock_estimate));
680 		ffdelta = ffth->tick_ffcount - cest->update_ffcount;
681 		ffth->tick_time = cest->update_time;
682 		ffclock_convert_delta(ffdelta, cest->period, &bt);
683 		bintime_add(&ffth->tick_time, &bt);
684 
685 		/* ffclock_reset sets ffclock_updated to INT8_MAX */
686 		if (ffclock_updated == INT8_MAX)
687 			ffth->tick_time_lerp = ffth->tick_time;
688 
689 		if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
690 			forward_jump = 1;
691 		else
692 			forward_jump = 0;
693 
694 		bintime_clear(&gap_lerp);
695 		if (forward_jump) {
696 			gap_lerp = ffth->tick_time;
697 			bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
698 		} else {
699 			gap_lerp = ffth->tick_time_lerp;
700 			bintime_sub(&gap_lerp, &ffth->tick_time);
701 		}
702 
703 		/*
704 		 * The reset from the RTC clock may be far from accurate, and
705 		 * reducing the gap between real time and interpolated time
706 		 * could take a very long time if the interpolated clock insists
707 		 * on strict monotonicity. The clock is reset under very strict
708 		 * conditions (kernel time is known to be wrong and
709 		 * synchronization daemon has been restarted recently.
710 		 * ffclock_boottime absorbs the jump to ensure boot time is
711 		 * correct and uptime functions stay consistent.
712 		 */
713 		if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
714 		    ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
715 		    ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
716 			if (forward_jump)
717 				bintime_add(&ffclock_boottime, &gap_lerp);
718 			else
719 				bintime_sub(&ffclock_boottime, &gap_lerp);
720 			ffth->tick_time_lerp = ffth->tick_time;
721 			bintime_clear(&gap_lerp);
722 		}
723 
724 		ffclock_status = cest->status;
725 		ffth->period_lerp = cest->period;
726 
727 		/*
728 		 * Compute corrected period used for the linear interpolation of
729 		 * time. The rate of linear interpolation is capped to 5000PPM
730 		 * (5ms/s).
731 		 */
732 		if (bintime_isset(&gap_lerp)) {
733 			ffdelta = cest->update_ffcount;
734 			ffdelta -= fftimehands->cest.update_ffcount;
735 			ffclock_convert_delta(ffdelta, cest->period, &bt);
736 			polling = bt.sec;
737 			bt.sec = 0;
738 			bt.frac = 5000000 * (uint64_t)18446744073LL;
739 			bintime_mul(&bt, polling);
740 			if (bintime_cmp(&gap_lerp, &bt, >))
741 				gap_lerp = bt;
742 
743 			/* Approximate 1 sec by 1-(1/2^64) to ease arithmetic */
744 			frac = 0;
745 			if (gap_lerp.sec > 0) {
746 				frac -= 1;
747 				frac /= ffdelta / gap_lerp.sec;
748 			}
749 			frac += gap_lerp.frac / ffdelta;
750 
751 			if (forward_jump)
752 				ffth->period_lerp += frac;
753 			else
754 				ffth->period_lerp -= frac;
755 		}
756 
757 		ffclock_updated = 0;
758 	}
759 	if (++ogen == 0)
760 		ogen = 1;
761 	ffth->gen = ogen;
762 	fftimehands = ffth;
763 }
764 
765 /*
766  * Adjust the fftimehands when the timecounter is changed. Stating the obvious,
767  * the old and new hardware counter cannot be read simultaneously. tc_windup()
768  * does read the two counters 'back to back', but a few cycles are effectively
769  * lost, and not accumulated in tick_ffcount. This is a fairly radical
770  * operation for a feed-forward synchronization daemon, and it is its job to not
771  * pushing irrelevant data to the kernel. Because there is no locking here,
772  * simply force to ignore pending or next update to give daemon a chance to
773  * realize the counter has changed.
774  */
775 static void
776 ffclock_change_tc(struct timehands *th)
777 {
778 	struct fftimehands *ffth;
779 	struct ffclock_estimate *cest;
780 	struct timecounter *tc;
781 	uint8_t ogen;
782 
783 	tc = th->th_counter;
784 	ffth = fftimehands->next;
785 	ogen = ffth->gen;
786 	ffth->gen = 0;
787 
788 	cest = &ffth->cest;
789 	bcopy(&(fftimehands->cest), cest, sizeof(struct ffclock_estimate));
790 	cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
791 	cest->errb_abs = 0;
792 	cest->errb_rate = 0;
793 	cest->status |= FFCLOCK_STA_UNSYNC;
794 
795 	ffth->tick_ffcount = fftimehands->tick_ffcount;
796 	ffth->tick_time_lerp = fftimehands->tick_time_lerp;
797 	ffth->tick_time = fftimehands->tick_time;
798 	ffth->period_lerp = cest->period;
799 
800 	/* Do not lock but ignore next update from synchronization daemon. */
801 	ffclock_updated--;
802 
803 	if (++ogen == 0)
804 		ogen = 1;
805 	ffth->gen = ogen;
806 	fftimehands = ffth;
807 }
808 
809 /*
810  * Retrieve feed-forward counter and time of last kernel tick.
811  */
812 void
813 ffclock_last_tick(ffcounter *ffcount, struct bintime *bt, uint32_t flags)
814 {
815 	struct fftimehands *ffth;
816 	uint8_t gen;
817 
818 	/*
819 	 * No locking but check generation has not changed. Also need to make
820 	 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
821 	 */
822 	do {
823 		ffth = fftimehands;
824 		gen = ffth->gen;
825 		if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
826 			*bt = ffth->tick_time_lerp;
827 		else
828 			*bt = ffth->tick_time;
829 		*ffcount = ffth->tick_ffcount;
830 	} while (gen == 0 || gen != ffth->gen);
831 }
832 
833 /*
834  * Absolute clock conversion. Low level function to convert ffcounter to
835  * bintime. The ffcounter is converted using the current ffclock period estimate
836  * or the "interpolated period" to ensure monotonicity.
837  * NOTE: this conversion may have been deferred, and the clock updated since the
838  * hardware counter has been read.
839  */
840 void
841 ffclock_convert_abs(ffcounter ffcount, struct bintime *bt, uint32_t flags)
842 {
843 	struct fftimehands *ffth;
844 	struct bintime bt2;
845 	ffcounter ffdelta;
846 	uint8_t gen;
847 
848 	/*
849 	 * No locking but check generation has not changed. Also need to make
850 	 * sure ffdelta is positive, i.e. ffcount > tick_ffcount.
851 	 */
852 	do {
853 		ffth = fftimehands;
854 		gen = ffth->gen;
855 		if (ffcount > ffth->tick_ffcount)
856 			ffdelta = ffcount - ffth->tick_ffcount;
857 		else
858 			ffdelta = ffth->tick_ffcount - ffcount;
859 
860 		if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
861 			*bt = ffth->tick_time_lerp;
862 			ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
863 		} else {
864 			*bt = ffth->tick_time;
865 			ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
866 		}
867 
868 		if (ffcount > ffth->tick_ffcount)
869 			bintime_add(bt, &bt2);
870 		else
871 			bintime_sub(bt, &bt2);
872 	} while (gen == 0 || gen != ffth->gen);
873 }
874 
875 /*
876  * Difference clock conversion.
877  * Low level function to Convert a time interval measured in RAW counter units
878  * into bintime. The difference clock allows measuring small intervals much more
879  * reliably than the absolute clock.
880  */
881 void
882 ffclock_convert_diff(ffcounter ffdelta, struct bintime *bt)
883 {
884 	struct fftimehands *ffth;
885 	uint8_t gen;
886 
887 	/* No locking but check generation has not changed. */
888 	do {
889 		ffth = fftimehands;
890 		gen = ffth->gen;
891 		ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
892 	} while (gen == 0 || gen != ffth->gen);
893 }
894 
895 /*
896  * Access to current ffcounter value.
897  */
898 void
899 ffclock_read_counter(ffcounter *ffcount)
900 {
901 	struct timehands *th;
902 	struct fftimehands *ffth;
903 	unsigned int gen, delta;
904 
905 	/*
906 	 * ffclock_windup() called from tc_windup(), safe to rely on
907 	 * th->th_generation only, for correct delta and ffcounter.
908 	 */
909 	do {
910 		th = timehands;
911 		gen = tc_getgen(th);
912 		ffth = fftimehands;
913 		delta = tc_delta(th);
914 		*ffcount = ffth->tick_ffcount;
915 	} while (gen == 0 || gen != tc_getgen(th));
916 
917 	*ffcount += delta;
918 }
919 
920 void
921 binuptime(struct bintime *bt)
922 {
923 
924 	binuptime_fromclock(bt, sysclock_active);
925 }
926 
927 void
928 nanouptime(struct timespec *tsp)
929 {
930 
931 	nanouptime_fromclock(tsp, sysclock_active);
932 }
933 
934 void
935 microuptime(struct timeval *tvp)
936 {
937 
938 	microuptime_fromclock(tvp, sysclock_active);
939 }
940 
941 void
942 bintime(struct bintime *bt)
943 {
944 
945 	bintime_fromclock(bt, sysclock_active);
946 }
947 
948 void
949 nanotime(struct timespec *tsp)
950 {
951 
952 	nanotime_fromclock(tsp, sysclock_active);
953 }
954 
955 void
956 microtime(struct timeval *tvp)
957 {
958 
959 	microtime_fromclock(tvp, sysclock_active);
960 }
961 
962 void
963 getbinuptime(struct bintime *bt)
964 {
965 
966 	getbinuptime_fromclock(bt, sysclock_active);
967 }
968 
969 void
970 getnanouptime(struct timespec *tsp)
971 {
972 
973 	getnanouptime_fromclock(tsp, sysclock_active);
974 }
975 
976 void
977 getmicrouptime(struct timeval *tvp)
978 {
979 
980 	getmicrouptime_fromclock(tvp, sysclock_active);
981 }
982 
983 void
984 getbintime(struct bintime *bt)
985 {
986 
987 	getbintime_fromclock(bt, sysclock_active);
988 }
989 
990 void
991 getnanotime(struct timespec *tsp)
992 {
993 
994 	getnanotime_fromclock(tsp, sysclock_active);
995 }
996 
997 void
998 getmicrotime(struct timeval *tvp)
999 {
1000 
1001 	getmicrouptime_fromclock(tvp, sysclock_active);
1002 }
1003 
1004 #endif /* FFCLOCK */
1005 
1006 /*
1007  * This is a clone of getnanotime and used for walltimestamps.
1008  * The dtrace_ prefix prevents fbt from creating probes for
1009  * it so walltimestamp can be safely used in all fbt probes.
1010  */
1011 void
1012 dtrace_getnanotime(struct timespec *tsp)
1013 {
1014 	struct timehands *th;
1015 	u_int gen;
1016 
1017 	do {
1018 		th = timehands;
1019 		gen = tc_getgen(th);
1020 		*tsp = th->th_nanotime;
1021 	} while (gen == 0 || gen != tc_getgen(th));
1022 }
1023 
1024 /*
1025  * System clock currently providing time to the system. Modifiable via sysctl
1026  * when the FFCLOCK option is defined.
1027  */
1028 int sysclock_active = SYSCLOCK_FBCK;
1029 
1030 /* Internal NTP status and error estimates. */
1031 extern int time_status;
1032 extern long time_esterror;
1033 
1034 /*
1035  * Take a snapshot of sysclock data which can be used to compare system clocks
1036  * and generate timestamps after the fact.
1037  */
1038 void
1039 sysclock_getsnapshot(struct sysclock_snap *clock_snap, int fast)
1040 {
1041 	struct fbclock_info *fbi;
1042 	struct timehands *th;
1043 	struct bintime bt;
1044 	unsigned int delta, gen;
1045 #ifdef FFCLOCK
1046 	ffcounter ffcount;
1047 	struct fftimehands *ffth;
1048 	struct ffclock_info *ffi;
1049 	struct ffclock_estimate cest;
1050 
1051 	ffi = &clock_snap->ff_info;
1052 #endif
1053 
1054 	fbi = &clock_snap->fb_info;
1055 	delta = 0;
1056 
1057 	do {
1058 		th = timehands;
1059 		gen = tc_getgen(th);
1060 		fbi->th_scale = th->th_scale;
1061 		fbi->tick_time = th->th_offset;
1062 #ifdef FFCLOCK
1063 		ffth = fftimehands;
1064 		ffi->tick_time = ffth->tick_time_lerp;
1065 		ffi->tick_time_lerp = ffth->tick_time_lerp;
1066 		ffi->period = ffth->cest.period;
1067 		ffi->period_lerp = ffth->period_lerp;
1068 		clock_snap->ffcount = ffth->tick_ffcount;
1069 		cest = ffth->cest;
1070 #endif
1071 		if (!fast)
1072 			delta = tc_delta(th);
1073 	} while (gen == 0 || gen != tc_getgen(th));
1074 
1075 	clock_snap->delta = delta;
1076 	clock_snap->sysclock_active = sysclock_active;
1077 
1078 	/* Record feedback clock status and error. */
1079 	clock_snap->fb_info.status = time_status;
1080 	/* XXX: Very crude estimate of feedback clock error. */
1081 	bt.sec = time_esterror / 1000000;
1082 	bt.frac = ((time_esterror - bt.sec) * 1000000) *
1083 	    (uint64_t)18446744073709ULL;
1084 	clock_snap->fb_info.error = bt;
1085 
1086 #ifdef FFCLOCK
1087 	if (!fast)
1088 		clock_snap->ffcount += delta;
1089 
1090 	/* Record feed-forward clock leap second adjustment. */
1091 	ffi->leapsec_adjustment = cest.leapsec_total;
1092 	if (clock_snap->ffcount > cest.leapsec_next)
1093 		ffi->leapsec_adjustment -= cest.leapsec;
1094 
1095 	/* Record feed-forward clock status and error. */
1096 	clock_snap->ff_info.status = cest.status;
1097 	ffcount = clock_snap->ffcount - cest.update_ffcount;
1098 	ffclock_convert_delta(ffcount, cest.period, &bt);
1099 	/* 18446744073709 = int(2^64/1e12), err_bound_rate in [ps/s]. */
1100 	bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1101 	/* 18446744073 = int(2^64 / 1e9), since err_abs in [ns]. */
1102 	bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1103 	clock_snap->ff_info.error = bt;
1104 #endif
1105 }
1106 
1107 /*
1108  * Convert a sysclock snapshot into a struct bintime based on the specified
1109  * clock source and flags.
1110  */
1111 int
1112 sysclock_snap2bintime(struct sysclock_snap *cs, struct bintime *bt,
1113     int whichclock, uint32_t flags)
1114 {
1115 #ifdef FFCLOCK
1116 	struct bintime bt2;
1117 	uint64_t period;
1118 #endif
1119 
1120 	switch (whichclock) {
1121 	case SYSCLOCK_FBCK:
1122 		*bt = cs->fb_info.tick_time;
1123 
1124 		/* If snapshot was created with !fast, delta will be >0. */
1125 		if (cs->delta > 0)
1126 			bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1127 
1128 		if ((flags & FBCLOCK_UPTIME) == 0)
1129 			bintime_add(bt, &boottimebin);
1130 		break;
1131 #ifdef FFCLOCK
1132 	case SYSCLOCK_FFWD:
1133 		if (flags & FFCLOCK_LERP) {
1134 			*bt = cs->ff_info.tick_time_lerp;
1135 			period = cs->ff_info.period_lerp;
1136 		} else {
1137 			*bt = cs->ff_info.tick_time;
1138 			period = cs->ff_info.period;
1139 		}
1140 
1141 		/* If snapshot was created with !fast, delta will be >0. */
1142 		if (cs->delta > 0) {
1143 			ffclock_convert_delta(cs->delta, period, &bt2);
1144 			bintime_add(bt, &bt2);
1145 		}
1146 
1147 		/* Leap second adjustment. */
1148 		if (flags & FFCLOCK_LEAPSEC)
1149 			bt->sec -= cs->ff_info.leapsec_adjustment;
1150 
1151 		/* Boot time adjustment, for uptime/monotonic clocks. */
1152 		if (flags & FFCLOCK_UPTIME)
1153 			bintime_sub(bt, &ffclock_boottime);
1154 		break;
1155 #endif
1156 	default:
1157 		return (EINVAL);
1158 		break;
1159 	}
1160 
1161 	return (0);
1162 }
1163 
1164 /*
1165  * Initialize a new timecounter and possibly use it.
1166  */
1167 void
1168 tc_init(struct timecounter *tc)
1169 {
1170 	u_int u;
1171 	struct sysctl_oid *tc_root;
1172 
1173 	u = tc->tc_frequency / tc->tc_counter_mask;
1174 	/* XXX: We need some margin here, 10% is a guess */
1175 	u *= 11;
1176 	u /= 10;
1177 	if (u > hz && tc->tc_quality >= 0) {
1178 		tc->tc_quality = -2000;
1179 		if (bootverbose) {
1180 			printf("Timecounter \"%s\" frequency %ju Hz",
1181 			    tc->tc_name, (uintmax_t)tc->tc_frequency);
1182 			printf(" -- Insufficient hz, needs at least %u\n", u);
1183 		}
1184 	} else if (tc->tc_quality >= 0 || bootverbose) {
1185 		printf("Timecounter \"%s\" frequency %ju Hz quality %d\n",
1186 		    tc->tc_name, (uintmax_t)tc->tc_frequency,
1187 		    tc->tc_quality);
1188 	}
1189 
1190 	tc->tc_next = timecounters;
1191 	timecounters = tc;
1192 	/*
1193 	 * Set up sysctl tree for this counter.
1194 	 */
1195 	tc_root = SYSCTL_ADD_NODE(NULL,
1196 	    SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1197 	    CTLFLAG_RW, 0, "timecounter description");
1198 	SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1199 	    "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1200 	    "mask for implemented bits");
1201 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1202 	    "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc),
1203 	    sysctl_kern_timecounter_get, "IU", "current timecounter value");
1204 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1205 	    "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc, sizeof(*tc),
1206 	     sysctl_kern_timecounter_freq, "QU", "timecounter frequency");
1207 	SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1208 	    "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1209 	    "goodness of time counter");
1210 	/*
1211 	 * Never automatically use a timecounter with negative quality.
1212 	 * Even though we run on the dummy counter, switching here may be
1213 	 * worse since this timecounter may not be monotonous.
1214 	 */
1215 	if (tc->tc_quality < 0)
1216 		return;
1217 	if (tc->tc_quality < timecounter->tc_quality)
1218 		return;
1219 	if (tc->tc_quality == timecounter->tc_quality &&
1220 	    tc->tc_frequency < timecounter->tc_frequency)
1221 		return;
1222 	(void)tc->tc_get_timecount(tc);
1223 	(void)tc->tc_get_timecount(tc);
1224 	timecounter = tc;
1225 }
1226 
1227 /* Report the frequency of the current timecounter. */
1228 uint64_t
1229 tc_getfrequency(void)
1230 {
1231 
1232 	return (timehands->th_counter->tc_frequency);
1233 }
1234 
1235 /*
1236  * Step our concept of UTC.  This is done by modifying our estimate of
1237  * when we booted.
1238  * XXX: not locked.
1239  */
1240 void
1241 tc_setclock(struct timespec *ts)
1242 {
1243 	struct timespec tbef, taft;
1244 	struct bintime bt, bt2;
1245 
1246 	cpu_tick_calibrate(1);
1247 	nanotime(&tbef);
1248 	timespec2bintime(ts, &bt);
1249 	binuptime(&bt2);
1250 	bintime_sub(&bt, &bt2);
1251 	bintime_add(&bt2, &boottimebin);
1252 	boottimebin = bt;
1253 	bintime2timeval(&bt, &boottime);
1254 
1255 	/* XXX fiddle all the little crinkly bits around the fiords... */
1256 	tc_windup();
1257 	nanotime(&taft);
1258 	if (timestepwarnings) {
1259 		log(LOG_INFO,
1260 		    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1261 		    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1262 		    (intmax_t)taft.tv_sec, taft.tv_nsec,
1263 		    (intmax_t)ts->tv_sec, ts->tv_nsec);
1264 	}
1265 	cpu_tick_calibrate(1);
1266 }
1267 
1268 /*
1269  * Initialize the next struct timehands in the ring and make
1270  * it the active timehands.  Along the way we might switch to a different
1271  * timecounter and/or do seconds processing in NTP.  Slightly magic.
1272  */
1273 static void
1274 tc_windup(void)
1275 {
1276 	struct bintime bt;
1277 	struct timehands *th, *tho;
1278 	uint64_t scale;
1279 	u_int delta, ncount, ogen;
1280 	int i;
1281 	time_t t;
1282 
1283 	/*
1284 	 * Make the next timehands a copy of the current one, but do not
1285 	 * overwrite the generation or next pointer.  While we update
1286 	 * the contents, the generation must be zero.
1287 	 */
1288 	tho = timehands;
1289 	th = tho->th_next;
1290 	ogen = th->th_generation;
1291 	tc_setgen(th, 0);
1292 	bcopy(tho, th, offsetof(struct timehands, th_generation));
1293 
1294 	/*
1295 	 * Capture a timecounter delta on the current timecounter and if
1296 	 * changing timecounters, a counter value from the new timecounter.
1297 	 * Update the offset fields accordingly.
1298 	 */
1299 	delta = tc_delta(th);
1300 	if (th->th_counter != timecounter)
1301 		ncount = timecounter->tc_get_timecount(timecounter);
1302 	else
1303 		ncount = 0;
1304 #ifdef FFCLOCK
1305 	ffclock_windup(delta);
1306 #endif
1307 	th->th_offset_count += delta;
1308 	th->th_offset_count &= th->th_counter->tc_counter_mask;
1309 	while (delta > th->th_counter->tc_frequency) {
1310 		/* Eat complete unadjusted seconds. */
1311 		delta -= th->th_counter->tc_frequency;
1312 		th->th_offset.sec++;
1313 	}
1314 	if ((delta > th->th_counter->tc_frequency / 2) &&
1315 	    (th->th_scale * delta < ((uint64_t)1 << 63))) {
1316 		/* The product th_scale * delta just barely overflows. */
1317 		th->th_offset.sec++;
1318 	}
1319 	bintime_addx(&th->th_offset, th->th_scale * delta);
1320 
1321 	/*
1322 	 * Hardware latching timecounters may not generate interrupts on
1323 	 * PPS events, so instead we poll them.  There is a finite risk that
1324 	 * the hardware might capture a count which is later than the one we
1325 	 * got above, and therefore possibly in the next NTP second which might
1326 	 * have a different rate than the current NTP second.  It doesn't
1327 	 * matter in practice.
1328 	 */
1329 	if (tho->th_counter->tc_poll_pps)
1330 		tho->th_counter->tc_poll_pps(tho->th_counter);
1331 
1332 	/*
1333 	 * Deal with NTP second processing.  The for loop normally
1334 	 * iterates at most once, but in extreme situations it might
1335 	 * keep NTP sane if timeouts are not run for several seconds.
1336 	 * At boot, the time step can be large when the TOD hardware
1337 	 * has been read, so on really large steps, we call
1338 	 * ntp_update_second only twice.  We need to call it twice in
1339 	 * case we missed a leap second.
1340 	 */
1341 	bt = th->th_offset;
1342 	bintime_add(&bt, &boottimebin);
1343 	i = bt.sec - tho->th_microtime.tv_sec;
1344 	if (i > LARGE_STEP)
1345 		i = 2;
1346 	for (; i > 0; i--) {
1347 		t = bt.sec;
1348 		ntp_update_second(&th->th_adjustment, &bt.sec);
1349 		if (bt.sec != t)
1350 			boottimebin.sec += bt.sec - t;
1351 	}
1352 	/* Update the UTC timestamps used by the get*() functions. */
1353 	/* XXX shouldn't do this here.  Should force non-`get' versions. */
1354 	bintime2timeval(&bt, &th->th_microtime);
1355 	bintime2timespec(&bt, &th->th_nanotime);
1356 
1357 	/* Now is a good time to change timecounters. */
1358 	if (th->th_counter != timecounter) {
1359 #ifndef __arm__
1360 		if ((timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1361 			cpu_disable_c2_sleep++;
1362 		if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1363 			cpu_disable_c2_sleep--;
1364 #endif
1365 		th->th_counter = timecounter;
1366 		th->th_offset_count = ncount;
1367 		tc_min_ticktock_freq = max(1, timecounter->tc_frequency /
1368 		    (((uint64_t)timecounter->tc_counter_mask + 1) / 3));
1369 #ifdef FFCLOCK
1370 		ffclock_change_tc(th);
1371 #endif
1372 	}
1373 
1374 	/*-
1375 	 * Recalculate the scaling factor.  We want the number of 1/2^64
1376 	 * fractions of a second per period of the hardware counter, taking
1377 	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
1378 	 * processing provides us with.
1379 	 *
1380 	 * The th_adjustment is nanoseconds per second with 32 bit binary
1381 	 * fraction and we want 64 bit binary fraction of second:
1382 	 *
1383 	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
1384 	 *
1385 	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
1386 	 * we can only multiply by about 850 without overflowing, that
1387 	 * leaves no suitably precise fractions for multiply before divide.
1388 	 *
1389 	 * Divide before multiply with a fraction of 2199/512 results in a
1390 	 * systematic undercompensation of 10PPM of th_adjustment.  On a
1391 	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
1392  	 *
1393 	 * We happily sacrifice the lowest of the 64 bits of our result
1394 	 * to the goddess of code clarity.
1395 	 *
1396 	 */
1397 	scale = (uint64_t)1 << 63;
1398 	scale += (th->th_adjustment / 1024) * 2199;
1399 	scale /= th->th_counter->tc_frequency;
1400 	th->th_scale = scale * 2;
1401 
1402 	/*
1403 	 * Now that the struct timehands is again consistent, set the new
1404 	 * generation number, making sure to not make it zero.
1405 	 */
1406 	if (++ogen == 0)
1407 		ogen = 1;
1408 	tc_setgen(th, ogen);
1409 
1410 	/* Go live with the new struct timehands. */
1411 #ifdef FFCLOCK
1412 	switch (sysclock_active) {
1413 	case SYSCLOCK_FBCK:
1414 #endif
1415 		time_second = th->th_microtime.tv_sec;
1416 		time_uptime = th->th_offset.sec;
1417 #ifdef FFCLOCK
1418 		break;
1419 	case SYSCLOCK_FFWD:
1420 		time_second = fftimehands->tick_time_lerp.sec;
1421 		time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1422 		break;
1423 	}
1424 #endif
1425 
1426 	timehands = th;
1427 	timekeep_push_vdso();
1428 }
1429 
1430 /* Report or change the active timecounter hardware. */
1431 static int
1432 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1433 {
1434 	char newname[32];
1435 	struct timecounter *newtc, *tc;
1436 	int error;
1437 
1438 	tc = timecounter;
1439 	strlcpy(newname, tc->tc_name, sizeof(newname));
1440 
1441 	error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
1442 	if (error != 0 || req->newptr == NULL ||
1443 	    strcmp(newname, tc->tc_name) == 0)
1444 		return (error);
1445 	for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1446 		if (strcmp(newname, newtc->tc_name) != 0)
1447 			continue;
1448 
1449 		/* Warm up new timecounter. */
1450 		(void)newtc->tc_get_timecount(newtc);
1451 		(void)newtc->tc_get_timecount(newtc);
1452 
1453 		timecounter = newtc;
1454 
1455 		/*
1456 		 * The vdso timehands update is deferred until the next
1457 		 * 'tc_windup()'.
1458 		 *
1459 		 * This is prudent given that 'timekeep_push_vdso()' does not
1460 		 * use any locking and that it can be called in hard interrupt
1461 		 * context via 'tc_windup()'.
1462 		 */
1463 		return (0);
1464 	}
1465 	return (EINVAL);
1466 }
1467 
1468 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1469     0, 0, sysctl_kern_timecounter_hardware, "A",
1470     "Timecounter hardware selected");
1471 
1472 
1473 /* Report or change the active timecounter hardware. */
1474 static int
1475 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1476 {
1477 	struct sbuf sb;
1478 	struct timecounter *tc;
1479 	int error;
1480 
1481 	sbuf_new_for_sysctl(&sb, NULL, 0, req);
1482 	for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1483 		if (tc != timecounters)
1484 			sbuf_putc(&sb, ' ');
1485 		sbuf_printf(&sb, "%s(%d)", tc->tc_name, tc->tc_quality);
1486 	}
1487 	error = sbuf_finish(&sb);
1488 	sbuf_delete(&sb);
1489 	return (error);
1490 }
1491 
1492 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1493     0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected");
1494 
1495 /*
1496  * RFC 2783 PPS-API implementation.
1497  */
1498 
1499 /*
1500  *  Return true if the driver is aware of the abi version extensions in the
1501  *  pps_state structure, and it supports at least the given abi version number.
1502  */
1503 static inline int
1504 abi_aware(struct pps_state *pps, int vers)
1505 {
1506 
1507 	return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1508 }
1509 
1510 static int
1511 pps_fetch(struct pps_fetch_args *fapi, struct pps_state *pps)
1512 {
1513 	int err, timo;
1514 	pps_seq_t aseq, cseq;
1515 	struct timeval tv;
1516 
1517 	if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1518 		return (EINVAL);
1519 
1520 	/*
1521 	 * If no timeout is requested, immediately return whatever values were
1522 	 * most recently captured.  If timeout seconds is -1, that's a request
1523 	 * to block without a timeout.  WITNESS won't let us sleep forever
1524 	 * without a lock (we really don't need a lock), so just repeatedly
1525 	 * sleep a long time.
1526 	 */
1527 	if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1528 		if (fapi->timeout.tv_sec == -1)
1529 			timo = 0x7fffffff;
1530 		else {
1531 			tv.tv_sec = fapi->timeout.tv_sec;
1532 			tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1533 			timo = tvtohz(&tv);
1534 		}
1535 		aseq = pps->ppsinfo.assert_sequence;
1536 		cseq = pps->ppsinfo.clear_sequence;
1537 		while (aseq == pps->ppsinfo.assert_sequence &&
1538 		    cseq == pps->ppsinfo.clear_sequence) {
1539 			if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1540 				if (pps->flags & PPSFLAG_MTX_SPIN) {
1541 					err = msleep_spin(pps, pps->driver_mtx,
1542 					    "ppsfch", timo);
1543 				} else {
1544 					err = msleep(pps, pps->driver_mtx, PCATCH,
1545 					    "ppsfch", timo);
1546 				}
1547 			} else {
1548 				err = tsleep(pps, PCATCH, "ppsfch", timo);
1549 			}
1550 			if (err == EWOULDBLOCK && fapi->timeout.tv_sec == -1) {
1551 				continue;
1552 			} else if (err != 0) {
1553 				return (err);
1554 			}
1555 		}
1556 	}
1557 
1558 	pps->ppsinfo.current_mode = pps->ppsparam.mode;
1559 	fapi->pps_info_buf = pps->ppsinfo;
1560 
1561 	return (0);
1562 }
1563 
1564 int
1565 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1566 {
1567 	pps_params_t *app;
1568 	struct pps_fetch_args *fapi;
1569 #ifdef FFCLOCK
1570 	struct pps_fetch_ffc_args *fapi_ffc;
1571 #endif
1572 #ifdef PPS_SYNC
1573 	struct pps_kcbind_args *kapi;
1574 #endif
1575 
1576 	KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl"));
1577 	switch (cmd) {
1578 	case PPS_IOC_CREATE:
1579 		return (0);
1580 	case PPS_IOC_DESTROY:
1581 		return (0);
1582 	case PPS_IOC_SETPARAMS:
1583 		app = (pps_params_t *)data;
1584 		if (app->mode & ~pps->ppscap)
1585 			return (EINVAL);
1586 #ifdef FFCLOCK
1587 		/* Ensure only a single clock is selected for ffc timestamp. */
1588 		if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1589 			return (EINVAL);
1590 #endif
1591 		pps->ppsparam = *app;
1592 		return (0);
1593 	case PPS_IOC_GETPARAMS:
1594 		app = (pps_params_t *)data;
1595 		*app = pps->ppsparam;
1596 		app->api_version = PPS_API_VERS_1;
1597 		return (0);
1598 	case PPS_IOC_GETCAP:
1599 		*(int*)data = pps->ppscap;
1600 		return (0);
1601 	case PPS_IOC_FETCH:
1602 		fapi = (struct pps_fetch_args *)data;
1603 		return (pps_fetch(fapi, pps));
1604 #ifdef FFCLOCK
1605 	case PPS_IOC_FETCH_FFCOUNTER:
1606 		fapi_ffc = (struct pps_fetch_ffc_args *)data;
1607 		if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1608 		    PPS_TSFMT_TSPEC)
1609 			return (EINVAL);
1610 		if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1611 			return (EOPNOTSUPP);
1612 		pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1613 		fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1614 		/* Overwrite timestamps if feedback clock selected. */
1615 		switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1616 		case PPS_TSCLK_FBCK:
1617 			fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1618 			    pps->ppsinfo.assert_timestamp;
1619 			fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1620 			    pps->ppsinfo.clear_timestamp;
1621 			break;
1622 		case PPS_TSCLK_FFWD:
1623 			break;
1624 		default:
1625 			break;
1626 		}
1627 		return (0);
1628 #endif /* FFCLOCK */
1629 	case PPS_IOC_KCBIND:
1630 #ifdef PPS_SYNC
1631 		kapi = (struct pps_kcbind_args *)data;
1632 		/* XXX Only root should be able to do this */
1633 		if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1634 			return (EINVAL);
1635 		if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1636 			return (EINVAL);
1637 		if (kapi->edge & ~pps->ppscap)
1638 			return (EINVAL);
1639 		pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1640 		    (pps->kcmode & KCMODE_ABIFLAG);
1641 		return (0);
1642 #else
1643 		return (EOPNOTSUPP);
1644 #endif
1645 	default:
1646 		return (ENOIOCTL);
1647 	}
1648 }
1649 
1650 void
1651 pps_init(struct pps_state *pps)
1652 {
1653 	pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1654 	if (pps->ppscap & PPS_CAPTUREASSERT)
1655 		pps->ppscap |= PPS_OFFSETASSERT;
1656 	if (pps->ppscap & PPS_CAPTURECLEAR)
1657 		pps->ppscap |= PPS_OFFSETCLEAR;
1658 #ifdef FFCLOCK
1659 	pps->ppscap |= PPS_TSCLK_MASK;
1660 #endif
1661 	pps->kcmode &= ~KCMODE_ABIFLAG;
1662 }
1663 
1664 void
1665 pps_init_abi(struct pps_state *pps)
1666 {
1667 
1668 	pps_init(pps);
1669 	if (pps->driver_abi > 0) {
1670 		pps->kcmode |= KCMODE_ABIFLAG;
1671 		pps->kernel_abi = PPS_ABI_VERSION;
1672 	}
1673 }
1674 
1675 void
1676 pps_capture(struct pps_state *pps)
1677 {
1678 	struct timehands *th;
1679 
1680 	KASSERT(pps != NULL, ("NULL pps pointer in pps_capture"));
1681 	th = timehands;
1682 	pps->capgen = tc_getgen(th);
1683 	pps->capth = th;
1684 #ifdef FFCLOCK
1685 	pps->capffth = fftimehands;
1686 #endif
1687 	pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1688 	if (pps->capgen != tc_getgen(th))
1689 		pps->capgen = 0;
1690 }
1691 
1692 void
1693 pps_event(struct pps_state *pps, int event)
1694 {
1695 	struct bintime bt;
1696 	struct timespec ts, *tsp, *osp;
1697 	u_int tcount, *pcount;
1698 	int foff, fhard;
1699 	pps_seq_t *pseq;
1700 #ifdef FFCLOCK
1701 	struct timespec *tsp_ffc;
1702 	pps_seq_t *pseq_ffc;
1703 	ffcounter *ffcount;
1704 #endif
1705 
1706 	KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
1707 	/* If the timecounter was wound up underneath us, bail out. */
1708 	if (pps->capgen == 0 || pps->capgen != tc_getgen(pps->capth))
1709 		return;
1710 
1711 	/* Things would be easier with arrays. */
1712 	if (event == PPS_CAPTUREASSERT) {
1713 		tsp = &pps->ppsinfo.assert_timestamp;
1714 		osp = &pps->ppsparam.assert_offset;
1715 		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1716 		fhard = pps->kcmode & PPS_CAPTUREASSERT;
1717 		pcount = &pps->ppscount[0];
1718 		pseq = &pps->ppsinfo.assert_sequence;
1719 #ifdef FFCLOCK
1720 		ffcount = &pps->ppsinfo_ffc.assert_ffcount;
1721 		tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
1722 		pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
1723 #endif
1724 	} else {
1725 		tsp = &pps->ppsinfo.clear_timestamp;
1726 		osp = &pps->ppsparam.clear_offset;
1727 		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1728 		fhard = pps->kcmode & PPS_CAPTURECLEAR;
1729 		pcount = &pps->ppscount[1];
1730 		pseq = &pps->ppsinfo.clear_sequence;
1731 #ifdef FFCLOCK
1732 		ffcount = &pps->ppsinfo_ffc.clear_ffcount;
1733 		tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
1734 		pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
1735 #endif
1736 	}
1737 
1738 	/*
1739 	 * If the timecounter changed, we cannot compare the count values, so
1740 	 * we have to drop the rest of the PPS-stuff until the next event.
1741 	 */
1742 	if (pps->ppstc != pps->capth->th_counter) {
1743 		pps->ppstc = pps->capth->th_counter;
1744 		*pcount = pps->capcount;
1745 		pps->ppscount[2] = pps->capcount;
1746 		return;
1747 	}
1748 
1749 	/* Convert the count to a timespec. */
1750 	tcount = pps->capcount - pps->capth->th_offset_count;
1751 	tcount &= pps->capth->th_counter->tc_counter_mask;
1752 	bt = pps->capth->th_offset;
1753 	bintime_addx(&bt, pps->capth->th_scale * tcount);
1754 	bintime_add(&bt, &boottimebin);
1755 	bintime2timespec(&bt, &ts);
1756 
1757 	/* If the timecounter was wound up underneath us, bail out. */
1758 	if (pps->capgen != tc_getgen(pps->capth))
1759 		return;
1760 
1761 	*pcount = pps->capcount;
1762 	(*pseq)++;
1763 	*tsp = ts;
1764 
1765 	if (foff) {
1766 		timespecadd(tsp, osp);
1767 		if (tsp->tv_nsec < 0) {
1768 			tsp->tv_nsec += 1000000000;
1769 			tsp->tv_sec -= 1;
1770 		}
1771 	}
1772 
1773 #ifdef FFCLOCK
1774 	*ffcount = pps->capffth->tick_ffcount + tcount;
1775 	bt = pps->capffth->tick_time;
1776 	ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
1777 	bintime_add(&bt, &pps->capffth->tick_time);
1778 	bintime2timespec(&bt, &ts);
1779 	(*pseq_ffc)++;
1780 	*tsp_ffc = ts;
1781 #endif
1782 
1783 #ifdef PPS_SYNC
1784 	if (fhard) {
1785 		uint64_t scale;
1786 
1787 		/*
1788 		 * Feed the NTP PLL/FLL.
1789 		 * The FLL wants to know how many (hardware) nanoseconds
1790 		 * elapsed since the previous event.
1791 		 */
1792 		tcount = pps->capcount - pps->ppscount[2];
1793 		pps->ppscount[2] = pps->capcount;
1794 		tcount &= pps->capth->th_counter->tc_counter_mask;
1795 		scale = (uint64_t)1 << 63;
1796 		scale /= pps->capth->th_counter->tc_frequency;
1797 		scale *= 2;
1798 		bt.sec = 0;
1799 		bt.frac = 0;
1800 		bintime_addx(&bt, scale * tcount);
1801 		bintime2timespec(&bt, &ts);
1802 		hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
1803 	}
1804 #endif
1805 
1806 	/* Wakeup anyone sleeping in pps_fetch().  */
1807 	wakeup(pps);
1808 }
1809 
1810 /*
1811  * Timecounters need to be updated every so often to prevent the hardware
1812  * counter from overflowing.  Updating also recalculates the cached values
1813  * used by the get*() family of functions, so their precision depends on
1814  * the update frequency.
1815  */
1816 
1817 static int tc_tick;
1818 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
1819     "Approximate number of hardclock ticks in a millisecond");
1820 
1821 void
1822 tc_ticktock(int cnt)
1823 {
1824 	static int count;
1825 
1826 	count += cnt;
1827 	if (count < tc_tick)
1828 		return;
1829 	count = 0;
1830 	tc_windup();
1831 }
1832 
1833 static void __inline
1834 tc_adjprecision(void)
1835 {
1836 	int t;
1837 
1838 	if (tc_timepercentage > 0) {
1839 		t = (99 + tc_timepercentage) / tc_timepercentage;
1840 		tc_precexp = fls(t + (t >> 1)) - 1;
1841 		FREQ2BT(hz / tc_tick, &bt_timethreshold);
1842 		FREQ2BT(hz, &bt_tickthreshold);
1843 		bintime_shift(&bt_timethreshold, tc_precexp);
1844 		bintime_shift(&bt_tickthreshold, tc_precexp);
1845 	} else {
1846 		tc_precexp = 31;
1847 		bt_timethreshold.sec = INT_MAX;
1848 		bt_timethreshold.frac = ~(uint64_t)0;
1849 		bt_tickthreshold = bt_timethreshold;
1850 	}
1851 	sbt_timethreshold = bttosbt(bt_timethreshold);
1852 	sbt_tickthreshold = bttosbt(bt_tickthreshold);
1853 }
1854 
1855 static int
1856 sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
1857 {
1858 	int error, val;
1859 
1860 	val = tc_timepercentage;
1861 	error = sysctl_handle_int(oidp, &val, 0, req);
1862 	if (error != 0 || req->newptr == NULL)
1863 		return (error);
1864 	tc_timepercentage = val;
1865 	if (cold)
1866 		goto done;
1867 	tc_adjprecision();
1868 done:
1869 	return (0);
1870 }
1871 
1872 static void
1873 inittimecounter(void *dummy)
1874 {
1875 	u_int p;
1876 	int tick_rate;
1877 
1878 	/*
1879 	 * Set the initial timeout to
1880 	 * max(1, <approx. number of hardclock ticks in a millisecond>).
1881 	 * People should probably not use the sysctl to set the timeout
1882 	 * to smaller than its inital value, since that value is the
1883 	 * smallest reasonable one.  If they want better timestamps they
1884 	 * should use the non-"get"* functions.
1885 	 */
1886 	if (hz > 1000)
1887 		tc_tick = (hz + 500) / 1000;
1888 	else
1889 		tc_tick = 1;
1890 	tc_adjprecision();
1891 	FREQ2BT(hz, &tick_bt);
1892 	tick_sbt = bttosbt(tick_bt);
1893 	tick_rate = hz / tc_tick;
1894 	FREQ2BT(tick_rate, &tc_tick_bt);
1895 	tc_tick_sbt = bttosbt(tc_tick_bt);
1896 	p = (tc_tick * 1000000) / hz;
1897 	printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
1898 
1899 #ifdef FFCLOCK
1900 	ffclock_init();
1901 #endif
1902 	/* warm up new timecounter (again) and get rolling. */
1903 	(void)timecounter->tc_get_timecount(timecounter);
1904 	(void)timecounter->tc_get_timecount(timecounter);
1905 	tc_windup();
1906 }
1907 
1908 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
1909 
1910 /* Cpu tick handling -------------------------------------------------*/
1911 
1912 static int cpu_tick_variable;
1913 static uint64_t	cpu_tick_frequency;
1914 
1915 static uint64_t
1916 tc_cpu_ticks(void)
1917 {
1918 	static uint64_t base;
1919 	static unsigned last;
1920 	unsigned u;
1921 	struct timecounter *tc;
1922 
1923 	tc = timehands->th_counter;
1924 	u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
1925 	if (u < last)
1926 		base += (uint64_t)tc->tc_counter_mask + 1;
1927 	last = u;
1928 	return (u + base);
1929 }
1930 
1931 void
1932 cpu_tick_calibration(void)
1933 {
1934 	static time_t last_calib;
1935 
1936 	if (time_uptime != last_calib && !(time_uptime & 0xf)) {
1937 		cpu_tick_calibrate(0);
1938 		last_calib = time_uptime;
1939 	}
1940 }
1941 
1942 /*
1943  * This function gets called every 16 seconds on only one designated
1944  * CPU in the system from hardclock() via cpu_tick_calibration()().
1945  *
1946  * Whenever the real time clock is stepped we get called with reset=1
1947  * to make sure we handle suspend/resume and similar events correctly.
1948  */
1949 
1950 static void
1951 cpu_tick_calibrate(int reset)
1952 {
1953 	static uint64_t c_last;
1954 	uint64_t c_this, c_delta;
1955 	static struct bintime  t_last;
1956 	struct bintime t_this, t_delta;
1957 	uint32_t divi;
1958 
1959 	if (reset) {
1960 		/* The clock was stepped, abort & reset */
1961 		t_last.sec = 0;
1962 		return;
1963 	}
1964 
1965 	/* we don't calibrate fixed rate cputicks */
1966 	if (!cpu_tick_variable)
1967 		return;
1968 
1969 	getbinuptime(&t_this);
1970 	c_this = cpu_ticks();
1971 	if (t_last.sec != 0) {
1972 		c_delta = c_this - c_last;
1973 		t_delta = t_this;
1974 		bintime_sub(&t_delta, &t_last);
1975 		/*
1976 		 * Headroom:
1977 		 * 	2^(64-20) / 16[s] =
1978 		 * 	2^(44) / 16[s] =
1979 		 * 	17.592.186.044.416 / 16 =
1980 		 * 	1.099.511.627.776 [Hz]
1981 		 */
1982 		divi = t_delta.sec << 20;
1983 		divi |= t_delta.frac >> (64 - 20);
1984 		c_delta <<= 20;
1985 		c_delta /= divi;
1986 		if (c_delta > cpu_tick_frequency) {
1987 			if (0 && bootverbose)
1988 				printf("cpu_tick increased to %ju Hz\n",
1989 				    c_delta);
1990 			cpu_tick_frequency = c_delta;
1991 		}
1992 	}
1993 	c_last = c_this;
1994 	t_last = t_this;
1995 }
1996 
1997 void
1998 set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var)
1999 {
2000 
2001 	if (func == NULL) {
2002 		cpu_ticks = tc_cpu_ticks;
2003 	} else {
2004 		cpu_tick_frequency = freq;
2005 		cpu_tick_variable = var;
2006 		cpu_ticks = func;
2007 	}
2008 }
2009 
2010 uint64_t
2011 cpu_tickrate(void)
2012 {
2013 
2014 	if (cpu_ticks == tc_cpu_ticks)
2015 		return (tc_getfrequency());
2016 	return (cpu_tick_frequency);
2017 }
2018 
2019 /*
2020  * We need to be slightly careful converting cputicks to microseconds.
2021  * There is plenty of margin in 64 bits of microseconds (half a million
2022  * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply
2023  * before divide conversion (to retain precision) we find that the
2024  * margin shrinks to 1.5 hours (one millionth of 146y).
2025  * With a three prong approach we never lose significant bits, no
2026  * matter what the cputick rate and length of timeinterval is.
2027  */
2028 
2029 uint64_t
2030 cputick2usec(uint64_t tick)
2031 {
2032 
2033 	if (tick > 18446744073709551LL)		/* floor(2^64 / 1000) */
2034 		return (tick / (cpu_tickrate() / 1000000LL));
2035 	else if (tick > 18446744073709LL)	/* floor(2^64 / 1000000) */
2036 		return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2037 	else
2038 		return ((tick * 1000000LL) / cpu_tickrate());
2039 }
2040 
2041 cpu_tick_f	*cpu_ticks = tc_cpu_ticks;
2042 
2043 static int vdso_th_enable = 1;
2044 static int
2045 sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2046 {
2047 	int old_vdso_th_enable, error;
2048 
2049 	old_vdso_th_enable = vdso_th_enable;
2050 	error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2051 	if (error != 0)
2052 		return (error);
2053 	vdso_th_enable = old_vdso_th_enable;
2054 	return (0);
2055 }
2056 SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2057     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2058     NULL, 0, sysctl_fast_gettime, "I", "Enable fast time of day");
2059 
2060 uint32_t
2061 tc_fill_vdso_timehands(struct vdso_timehands *vdso_th)
2062 {
2063 	struct timehands *th;
2064 	uint32_t enabled;
2065 
2066 	th = timehands;
2067 	vdso_th->th_algo = VDSO_TH_ALGO_1;
2068 	vdso_th->th_scale = th->th_scale;
2069 	vdso_th->th_offset_count = th->th_offset_count;
2070 	vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2071 	vdso_th->th_offset = th->th_offset;
2072 	vdso_th->th_boottime = boottimebin;
2073 	enabled = cpu_fill_vdso_timehands(vdso_th, th->th_counter);
2074 	if (!vdso_th_enable)
2075 		enabled = 0;
2076 	return (enabled);
2077 }
2078 
2079 #ifdef COMPAT_FREEBSD32
2080 uint32_t
2081 tc_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32)
2082 {
2083 	struct timehands *th;
2084 	uint32_t enabled;
2085 
2086 	th = timehands;
2087 	vdso_th32->th_algo = VDSO_TH_ALGO_1;
2088 	*(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2089 	vdso_th32->th_offset_count = th->th_offset_count;
2090 	vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2091 	vdso_th32->th_offset.sec = th->th_offset.sec;
2092 	*(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2093 	vdso_th32->th_boottime.sec = boottimebin.sec;
2094 	*(uint64_t *)&vdso_th32->th_boottime.frac[0] = boottimebin.frac;
2095 	enabled = cpu_fill_vdso_timehands32(vdso_th32, th->th_counter);
2096 	if (!vdso_th_enable)
2097 		enabled = 0;
2098 	return (enabled);
2099 }
2100 #endif
2101