xref: /freebsd/sys/kern/kern_tc.c (revision 4b2eaea43fec8e8792be611dea204071a10b655a)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  */
11 
12 #include "opt_ntp.h"
13 
14 #include <sys/param.h>
15 #include <sys/stdint.h>
16 #include <sys/kernel.h>
17 #include <sys/sysctl.h>
18 #include <sys/systm.h>
19 #include <sys/timepps.h>
20 #include <sys/timetc.h>
21 #include <sys/timex.h>
22 
23 /*
24  * Implement a dummy timecounter which we can use until we get a real one
25  * in the air.  This allows the console and other early stuff to use
26  * time services.
27  */
28 
29 static u_int
30 dummy_get_timecount(struct timecounter *tc)
31 {
32 	static u_int now;
33 
34 	return (++now);
35 }
36 
37 static struct timecounter dummy_timecounter = {
38 	dummy_get_timecount, 0, ~0u, 1000000, "dummy",
39 };
40 
41 struct timehands {
42 	/* These fields must be initialized by the driver. */
43 	struct timecounter	*th_counter;
44 	int64_t			th_adjustment;
45 	u_int64_t		th_scale;
46 	u_int	 		th_offset_count;
47 	struct bintime		th_offset;
48 	struct timeval		th_microtime;
49 	struct timespec		th_nanotime;
50 	/* Fields not to be copied in tc_windup start with th_generation. */
51 	volatile u_int		th_generation;
52 	struct timehands	*th_next;
53 };
54 
55 extern struct timehands th0;
56 static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0};
57 static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9};
58 static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8};
59 static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7};
60 static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6};
61 static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5};
62 static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4};
63 static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3};
64 static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2};
65 static struct timehands th0 = {
66 	&dummy_timecounter,
67 	0,
68 	(uint64_t)-1 / 1000000,
69 	0,
70 	{1, 0},
71 	{0, 0},
72 	{0, 0},
73 	1,
74 	&th1
75 };
76 
77 static struct timehands *volatile timehands = &th0;
78 struct timecounter *timecounter = &dummy_timecounter;
79 static struct timecounter *timecounters = &dummy_timecounter;
80 
81 time_t time_second = 1;
82 time_t time_uptime = 0;
83 
84 static struct bintime boottimebin;
85 struct timeval boottime;
86 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
87     &boottime, timeval, "System boottime");
88 
89 SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
90 
91 #define TC_STATS(foo) \
92 	static u_int foo; \
93 	SYSCTL_UINT(_kern_timecounter, OID_AUTO, foo, CTLFLAG_RD, &foo, 0, "");\
94 	struct __hack
95 
96 TC_STATS(nbinuptime);    TC_STATS(nnanouptime);    TC_STATS(nmicrouptime);
97 TC_STATS(nbintime);      TC_STATS(nnanotime);      TC_STATS(nmicrotime);
98 TC_STATS(ngetbinuptime); TC_STATS(ngetnanouptime); TC_STATS(ngetmicrouptime);
99 TC_STATS(ngetbintime);   TC_STATS(ngetnanotime);   TC_STATS(ngetmicrotime);
100 TC_STATS(nsetclock);
101 
102 #undef TC_STATS
103 
104 static void tc_windup(void);
105 
106 /*
107  * Return the difference between the timehands' counter value now and what
108  * was when we copied it to the timehands' offset_count.
109  */
110 static __inline u_int
111 tc_delta(struct timehands *th)
112 {
113 	struct timecounter *tc;
114 
115 	tc = th->th_counter;
116 	return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
117 	    tc->tc_counter_mask);
118 }
119 
120 /*
121  * Functions for reading the time.  We have to loop until we are sure that
122  * the timehands that we operated on was not updated under our feet.  See
123  * the comment in <sys/time.h> for a description of these 12 functions.
124  */
125 
126 void
127 binuptime(struct bintime *bt)
128 {
129 	struct timehands *th;
130 	u_int gen;
131 
132 	nbinuptime++;
133 	do {
134 		th = timehands;
135 		gen = th->th_generation;
136 		*bt = th->th_offset;
137 		bintime_addx(bt, th->th_scale * tc_delta(th));
138 	} while (gen == 0 || gen != th->th_generation);
139 }
140 
141 void
142 nanouptime(struct timespec *tsp)
143 {
144 	struct bintime bt;
145 
146 	nnanouptime++;
147 	binuptime(&bt);
148 	bintime2timespec(&bt, tsp);
149 }
150 
151 void
152 microuptime(struct timeval *tvp)
153 {
154 	struct bintime bt;
155 
156 	nmicrouptime++;
157 	binuptime(&bt);
158 	bintime2timeval(&bt, tvp);
159 }
160 
161 void
162 bintime(struct bintime *bt)
163 {
164 
165 	nbintime++;
166 	binuptime(bt);
167 	bintime_add(bt, &boottimebin);
168 }
169 
170 void
171 nanotime(struct timespec *tsp)
172 {
173 	struct bintime bt;
174 
175 	nnanotime++;
176 	bintime(&bt);
177 	bintime2timespec(&bt, tsp);
178 }
179 
180 void
181 microtime(struct timeval *tvp)
182 {
183 	struct bintime bt;
184 
185 	nmicrotime++;
186 	bintime(&bt);
187 	bintime2timeval(&bt, tvp);
188 }
189 
190 void
191 getbinuptime(struct bintime *bt)
192 {
193 	struct timehands *th;
194 	u_int gen;
195 
196 	ngetbinuptime++;
197 	do {
198 		th = timehands;
199 		gen = th->th_generation;
200 		*bt = th->th_offset;
201 	} while (gen == 0 || gen != th->th_generation);
202 }
203 
204 void
205 getnanouptime(struct timespec *tsp)
206 {
207 	struct timehands *th;
208 	u_int gen;
209 
210 	ngetnanouptime++;
211 	do {
212 		th = timehands;
213 		gen = th->th_generation;
214 		bintime2timespec(&th->th_offset, tsp);
215 	} while (gen == 0 || gen != th->th_generation);
216 }
217 
218 void
219 getmicrouptime(struct timeval *tvp)
220 {
221 	struct timehands *th;
222 	u_int gen;
223 
224 	ngetmicrouptime++;
225 	do {
226 		th = timehands;
227 		gen = th->th_generation;
228 		bintime2timeval(&th->th_offset, tvp);
229 	} while (gen == 0 || gen != th->th_generation);
230 }
231 
232 void
233 getbintime(struct bintime *bt)
234 {
235 	struct timehands *th;
236 	u_int gen;
237 
238 	ngetbintime++;
239 	do {
240 		th = timehands;
241 		gen = th->th_generation;
242 		*bt = th->th_offset;
243 	} while (gen == 0 || gen != th->th_generation);
244 	bintime_add(bt, &boottimebin);
245 }
246 
247 void
248 getnanotime(struct timespec *tsp)
249 {
250 	struct timehands *th;
251 	u_int gen;
252 
253 	ngetnanotime++;
254 	do {
255 		th = timehands;
256 		gen = th->th_generation;
257 		*tsp = th->th_nanotime;
258 	} while (gen == 0 || gen != th->th_generation);
259 }
260 
261 void
262 getmicrotime(struct timeval *tvp)
263 {
264 	struct timehands *th;
265 	u_int gen;
266 
267 	ngetmicrotime++;
268 	do {
269 		th = timehands;
270 		gen = th->th_generation;
271 		*tvp = th->th_microtime;
272 	} while (gen == 0 || gen != th->th_generation);
273 }
274 
275 /*
276  * Initialize a new timecounter.
277  * We should really try to rank the timecounters and intelligently determine
278  * if the new timecounter is better than the current one.  This is subject
279  * to further study.  For now always use the new timecounter.
280  */
281 void
282 tc_init(struct timecounter *tc)
283 {
284 	unsigned u;
285 
286 	printf("Timecounter \"%s\"  frequency %ju Hz",
287 	    tc->tc_name, (intmax_t)tc->tc_frequency);
288 
289 	u = tc->tc_frequency / tc->tc_counter_mask;
290 	if (u > hz) {
291 		printf(" -- Insufficient hz, needs at least %u\n", u);
292 		return;
293 	}
294 	tc->tc_next = timecounters;
295 	timecounters = tc;
296 	printf("\n");
297 	(void)tc->tc_get_timecount(tc);
298 	(void)tc->tc_get_timecount(tc);
299 	timecounter = tc;
300 }
301 
302 /* Report the frequency of the current timecounter. */
303 u_int64_t
304 tc_getfrequency(void)
305 {
306 
307 	return (timehands->th_counter->tc_frequency);
308 }
309 
310 /*
311  * Step our concept of GMT.  This is done by modifying our estimate of
312  * when we booted.  XXX: needs futher work.
313  */
314 void
315 tc_setclock(struct timespec *ts)
316 {
317 	struct timespec ts2;
318 
319 	nsetclock++;
320 	nanouptime(&ts2);
321 	boottime.tv_sec = ts->tv_sec - ts2.tv_sec;
322 	/* XXX boottime should probably be a timespec. */
323 	boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000;
324 	if (boottime.tv_usec < 0) {
325 		boottime.tv_usec += 1000000;
326 		boottime.tv_sec--;
327 	}
328 	timeval2bintime(&boottime, &boottimebin);
329 
330 	/* XXX fiddle all the little crinkly bits around the fiords... */
331 	tc_windup();
332 }
333 
334 /*
335  * Initialize the next struct timehands in the ring and make
336  * it the active timehands.  Along the way we might switch to a different
337  * timecounter and/or do seconds processing in NTP.  Slightly magic.
338  */
339 static void
340 tc_windup(void)
341 {
342 	struct bintime bt;
343 	struct timehands *th, *tho;
344 	u_int64_t scale;
345 	u_int delta, ncount, ogen;
346 	int i;
347 
348 	/*
349 	 * Make the next timehands a copy of the current one, but do not
350 	 * overwrite the generation or next pointer.  While we update
351 	 * the contents, the generation must be zero.
352 	 */
353 	tho = timehands;
354 	th = tho->th_next;
355 	ogen = th->th_generation;
356 	th->th_generation = 0;
357 	bcopy(tho, th, offsetof(struct timehands, th_generation));
358 
359 	/*
360 	 * Capture a timecounter delta on the current timecounter and if
361 	 * changing timecounters, a counter value from the new timecounter.
362 	 * Update the offset fields accordingly.
363 	 */
364 	delta = tc_delta(th);
365 	if (th->th_counter != timecounter)
366 		ncount = timecounter->tc_get_timecount(timecounter);
367 	else
368 		ncount = 0;
369 	th->th_offset_count += delta;
370 	th->th_offset_count &= th->th_counter->tc_counter_mask;
371 	bintime_addx(&th->th_offset, th->th_scale * delta);
372 
373 	/*
374 	 * Hardware latching timecounters may not generate interrupts on
375 	 * PPS events, so instead we poll them.  There is a finite risk that
376 	 * the hardware might capture a count which is later than the one we
377 	 * got above, and therefore possibly in the next NTP second which might
378 	 * have a different rate than the current NTP second.  It doesn't
379 	 * matter in practice.
380 	 */
381 	if (tho->th_counter->tc_poll_pps)
382 		tho->th_counter->tc_poll_pps(tho->th_counter);
383 
384 	/*
385 	 * Deal with NTP second processing.  The for loop normally only
386 	 * iterates once, but in extreme situations it might keep NTP sane
387 	 * if timeouts are not run for several seconds.
388 	 */
389 	for (i = th->th_offset.sec - tho->th_offset.sec; i > 0; i--)
390 		ntp_update_second(&th->th_adjustment, &th->th_offset.sec);
391 
392 	/* Now is a good time to change timecounters. */
393 	if (th->th_counter != timecounter) {
394 		th->th_counter = timecounter;
395 		th->th_offset_count = ncount;
396 	}
397 
398 	/*-
399 	 * Recalculate the scaling factor.  We want the number of 1/2^64
400 	 * fractions of a second per period of the hardware counter, taking
401 	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
402 	 * processing provides us with.
403 	 *
404 	 * The th_adjustment is nanoseconds per second with 32 bit binary
405 	 * fraction and want 64 bit binary fraction of second:
406 	 *
407 	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
408 	 *
409 	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
410 	 * we can only multiply by about 850 without overflowing, but that
411 	 * leaves suitably precise fractions for multiply before divide.
412 	 *
413 	 * Divide before multiply with a fraction of 2199/512 results in a
414 	 * systematic undercompensation of 10PPM of th_adjustment.  On a
415 	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
416  	 *
417 	 * We happily sacrifice the lowest of the 64 bits of our result
418 	 * to the goddess of code clarity.
419 	 *
420 	 */
421 	scale = (u_int64_t)1 << 63;
422 	scale += (th->th_adjustment / 1024) * 2199;
423 	scale /= th->th_counter->tc_frequency;
424 	th->th_scale = scale * 2;
425 
426 	/* Update the GMT timestamps used for the get*() functions. */
427 	bt = th->th_offset;
428 	bintime_add(&bt, &boottimebin);
429 	bintime2timeval(&bt, &th->th_microtime);
430 	bintime2timespec(&bt, &th->th_nanotime);
431 
432 	/*
433 	 * Now that the struct timehands is again consistent, set the new
434 	 * generation number, making sure to not make it zero.
435 	 */
436 	if (++ogen == 0)
437 		ogen = 1;
438 	th->th_generation = ogen;
439 
440 	/* Go live with the new struct timehands. */
441 	time_second = th->th_microtime.tv_sec;
442 	time_uptime = th->th_offset.sec;
443 	timehands = th;
444 }
445 
446 /* Report or change the active timecounter hardware. */
447 static int
448 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
449 {
450 	char newname[32];
451 	struct timecounter *newtc, *tc;
452 	int error;
453 
454 	tc = timecounter;
455 	strlcpy(newname, tc->tc_name, sizeof(newname));
456 
457 	error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req);
458 	if (error != 0 || req->newptr == NULL ||
459 	    strcmp(newname, tc->tc_name) == 0)
460 		return (error);
461 	for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
462 		if (strcmp(newname, newtc->tc_name) != 0)
463 			continue;
464 
465 		/* Warm up new timecounter. */
466 		(void)newtc->tc_get_timecount(newtc);
467 		(void)newtc->tc_get_timecount(newtc);
468 
469 		timecounter = newtc;
470 		return (0);
471 	}
472 	return (EINVAL);
473 }
474 
475 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
476     0, 0, sysctl_kern_timecounter_hardware, "A", "");
477 
478 /*
479  * RFC 2783 PPS-API implementation.
480  */
481 
482 int
483 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
484 {
485 	pps_params_t *app;
486 	struct pps_fetch_args *fapi;
487 #ifdef PPS_SYNC
488 	struct pps_kcbind_args *kapi;
489 #endif
490 
491 	switch (cmd) {
492 	case PPS_IOC_CREATE:
493 		return (0);
494 	case PPS_IOC_DESTROY:
495 		return (0);
496 	case PPS_IOC_SETPARAMS:
497 		app = (pps_params_t *)data;
498 		if (app->mode & ~pps->ppscap)
499 			return (EINVAL);
500 		pps->ppsparam = *app;
501 		return (0);
502 	case PPS_IOC_GETPARAMS:
503 		app = (pps_params_t *)data;
504 		*app = pps->ppsparam;
505 		app->api_version = PPS_API_VERS_1;
506 		return (0);
507 	case PPS_IOC_GETCAP:
508 		*(int*)data = pps->ppscap;
509 		return (0);
510 	case PPS_IOC_FETCH:
511 		fapi = (struct pps_fetch_args *)data;
512 		if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
513 			return (EINVAL);
514 		if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
515 			return (EOPNOTSUPP);
516 		pps->ppsinfo.current_mode = pps->ppsparam.mode;
517 		fapi->pps_info_buf = pps->ppsinfo;
518 		return (0);
519 	case PPS_IOC_KCBIND:
520 #ifdef PPS_SYNC
521 		kapi = (struct pps_kcbind_args *)data;
522 		/* XXX Only root should be able to do this */
523 		if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
524 			return (EINVAL);
525 		if (kapi->kernel_consumer != PPS_KC_HARDPPS)
526 			return (EINVAL);
527 		if (kapi->edge & ~pps->ppscap)
528 			return (EINVAL);
529 		pps->kcmode = kapi->edge;
530 		return (0);
531 #else
532 		return (EOPNOTSUPP);
533 #endif
534 	default:
535 		return (ENOTTY);
536 	}
537 }
538 
539 void
540 pps_init(struct pps_state *pps)
541 {
542 	pps->ppscap |= PPS_TSFMT_TSPEC;
543 	if (pps->ppscap & PPS_CAPTUREASSERT)
544 		pps->ppscap |= PPS_OFFSETASSERT;
545 	if (pps->ppscap & PPS_CAPTURECLEAR)
546 		pps->ppscap |= PPS_OFFSETCLEAR;
547 }
548 
549 void
550 pps_capture(struct pps_state *pps)
551 {
552 	struct timehands *th;
553 
554 	th = timehands;
555 	pps->capgen = th->th_generation;
556 	pps->capth = th;
557 	pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
558 	if (pps->capgen != th->th_generation)
559 		pps->capgen = 0;
560 }
561 
562 void
563 pps_event(struct pps_state *pps, int event)
564 {
565 	struct bintime bt;
566 	struct timespec ts, *tsp, *osp;
567 	u_int tcount, *pcount;
568 	int foff, fhard;
569 	pps_seq_t *pseq;
570 
571 	/* If the timecounter was wound up underneath us, bail out. */
572 	if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
573 		return;
574 
575 	/* Things would be easier with arrays. */
576 	if (event == PPS_CAPTUREASSERT) {
577 		tsp = &pps->ppsinfo.assert_timestamp;
578 		osp = &pps->ppsparam.assert_offset;
579 		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
580 		fhard = pps->kcmode & PPS_CAPTUREASSERT;
581 		pcount = &pps->ppscount[0];
582 		pseq = &pps->ppsinfo.assert_sequence;
583 	} else {
584 		tsp = &pps->ppsinfo.clear_timestamp;
585 		osp = &pps->ppsparam.clear_offset;
586 		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
587 		fhard = pps->kcmode & PPS_CAPTURECLEAR;
588 		pcount = &pps->ppscount[1];
589 		pseq = &pps->ppsinfo.clear_sequence;
590 	}
591 
592 	/*
593 	 * If the timecounter changed, we cannot compare the count values, so
594 	 * we have to drop the rest of the PPS-stuff until the next event.
595 	 */
596 	if (pps->ppstc != pps->capth->th_counter) {
597 		pps->ppstc = pps->capth->th_counter;
598 		*pcount = pps->capcount;
599 		pps->ppscount[2] = pps->capcount;
600 		return;
601 	}
602 
603 	/* Return if nothing really happened. */
604 	if (*pcount == pps->capcount)
605 		return;
606 
607 	/* Convert the count to a timespec. */
608 	tcount = pps->capcount - pps->capth->th_offset_count;
609 	tcount &= pps->capth->th_counter->tc_counter_mask;
610 	bt = pps->capth->th_offset;
611 	bintime_addx(&bt, pps->capth->th_scale * tcount);
612 	bintime_add(&bt, &boottimebin);
613 	bintime2timespec(&bt, &ts);
614 
615 	/* If the timecounter was wound up underneath us, bail out. */
616 	if (pps->capgen != pps->capth->th_generation)
617 		return;
618 
619 	*pcount = pps->capcount;
620 	(*pseq)++;
621 	*tsp = ts;
622 
623 	if (foff) {
624 		timespecadd(tsp, osp);
625 		if (tsp->tv_nsec < 0) {
626 			tsp->tv_nsec += 1000000000;
627 			tsp->tv_sec -= 1;
628 		}
629 	}
630 #ifdef PPS_SYNC
631 	if (fhard) {
632 		u_int64_t scale;
633 
634 		/*
635 		 * Feed the NTP PLL/FLL.
636 		 * The FLL wants to know how many (hardware) nanoseconds
637 		 * elapsed since the previous event.
638 		 */
639 		tcount = pps->capcount - pps->ppscount[2];
640 		pps->ppscount[2] = pps->capcount;
641 		tcount &= pps->capth->th_counter->tc_counter_mask;
642 		scale = (u_int64_t)1 << 63;
643 		scale /= pps->capth->th_counter->tc_frequency;
644 		scale *= 2;
645 		bt.sec = 0;
646 		bt.frac = 0;
647 		bintime_addx(&bt, scale * tcount);
648 		bintime2timespec(&bt, &ts);
649 		hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
650 	}
651 #endif
652 }
653 
654 /*
655  * Timecounters need to be updated every so often to prevent the hardware
656  * counter from overflowing.  Updating also recalculates the cached values
657  * used by the get*() family of functions, so their precision depends on
658  * the update frequency.
659  */
660 
661 static int tc_tick;
662 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, "");
663 
664 void
665 tc_ticktock(void)
666 {
667 	static int count;
668 
669 	if (++count < tc_tick)
670 		return;
671 	count = 0;
672 	tc_windup();
673 }
674 
675 static void
676 inittimecounter(void *dummy)
677 {
678 	u_int p;
679 
680 	/*
681 	 * Set the initial timeout to
682 	 * max(1, <approx. number of hardclock ticks in a millisecond>).
683 	 * People should probably not use the sysctl to set the timeout
684 	 * to smaller than its inital value, since that value is the
685 	 * smallest reasonable one.  If they want better timestamps they
686 	 * should use the non-"get"* functions.
687 	 */
688 	if (hz > 1000)
689 		tc_tick = (hz + 500) / 1000;
690 	else
691 		tc_tick = 1;
692 	p = (tc_tick * 1000000) / hz;
693 	printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
694 
695 	/* warm up new timecounter (again) and get rolling. */
696 	(void)timecounter->tc_get_timecount(timecounter);
697 	(void)timecounter->tc_get_timecount(timecounter);
698 }
699 
700 SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL)
701