xref: /freebsd/sys/kern/kern_clocksource.c (revision 0677dfd1c4dadb62482e2c72fa4c6720902128a4)
1 /*-
2  * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 /*
31  * Common routines to manage event timers hardware.
32  */
33 
34 #include "opt_device_polling.h"
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/kdb.h>
42 #include <sys/ktr.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 #include <sys/timeet.h>
50 #include <sys/timetc.h>
51 
52 #include <machine/atomic.h>
53 #include <machine/clock.h>
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
56 
57 #ifdef KDTRACE_HOOKS
58 #include <sys/dtrace_bsd.h>
59 cyclic_clock_func_t	cyclic_clock_func = NULL;
60 #endif
61 
62 int			cpu_can_deep_sleep = 0;	/* C3 state is available. */
63 int			cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
64 
65 static void		setuptimer(void);
66 static void		loadtimer(sbintime_t now, int first);
67 static int		doconfigtimer(void);
68 static void		configtimer(int start);
69 static int		round_freq(struct eventtimer *et, int freq);
70 
71 static sbintime_t	getnextcpuevent(int idle);
72 static sbintime_t	getnextevent(void);
73 static int		handleevents(sbintime_t now, int fake);
74 
75 static struct mtx	et_hw_mtx;
76 
77 #define	ET_HW_LOCK(state)						\
78 	{								\
79 		if (timer->et_flags & ET_FLAGS_PERCPU)			\
80 			mtx_lock_spin(&(state)->et_hw_mtx);		\
81 		else							\
82 			mtx_lock_spin(&et_hw_mtx);			\
83 	}
84 
85 #define	ET_HW_UNLOCK(state)						\
86 	{								\
87 		if (timer->et_flags & ET_FLAGS_PERCPU)			\
88 			mtx_unlock_spin(&(state)->et_hw_mtx);		\
89 		else							\
90 			mtx_unlock_spin(&et_hw_mtx);			\
91 	}
92 
93 static struct eventtimer *timer = NULL;
94 static sbintime_t	timerperiod;	/* Timer period for periodic mode. */
95 static sbintime_t	statperiod;	/* statclock() events period. */
96 static sbintime_t	profperiod;	/* profclock() events period. */
97 static sbintime_t	nexttick;	/* Next global timer tick time. */
98 static u_int		busy = 1;	/* Reconfiguration is in progress. */
99 static int		profiling;	/* Profiling events enabled. */
100 
101 static char		timername[32];	/* Wanted timer. */
102 TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername));
103 
104 static int		singlemul;	/* Multiplier for periodic mode. */
105 SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RWTUN, &singlemul,
106     0, "Multiplier for periodic mode");
107 
108 static u_int		idletick;	/* Run periodic events when idle. */
109 SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick,
110     0, "Run periodic events when idle");
111 
112 static int		periodic;	/* Periodic or one-shot mode. */
113 static int		want_periodic;	/* What mode to prefer. */
114 TUNABLE_INT("kern.eventtimer.periodic", &want_periodic);
115 
116 struct pcpu_state {
117 	struct mtx	et_hw_mtx;	/* Per-CPU timer mutex. */
118 	u_int		action;		/* Reconfiguration requests. */
119 	u_int		handle;		/* Immediate handle resuests. */
120 	sbintime_t	now;		/* Last tick time. */
121 	sbintime_t	nextevent;	/* Next scheduled event on this CPU. */
122 	sbintime_t	nexttick;	/* Next timer tick time. */
123 	sbintime_t	nexthard;	/* Next hardlock() event. */
124 	sbintime_t	nextstat;	/* Next statclock() event. */
125 	sbintime_t	nextprof;	/* Next profclock() event. */
126 	sbintime_t	nextcall;	/* Next callout event. */
127 	sbintime_t	nextcallopt;	/* Next optional callout event. */
128 #ifdef KDTRACE_HOOKS
129 	sbintime_t	nextcyc;	/* Next OpenSolaris cyclics event. */
130 #endif
131 	int		ipi;		/* This CPU needs IPI. */
132 	int		idle;		/* This CPU is in idle mode. */
133 };
134 
135 static DPCPU_DEFINE(struct pcpu_state, timerstate);
136 DPCPU_DEFINE(sbintime_t, hardclocktime);
137 
138 /*
139  * Timer broadcast IPI handler.
140  */
141 int
142 hardclockintr(void)
143 {
144 	sbintime_t now;
145 	struct pcpu_state *state;
146 	int done;
147 
148 	if (doconfigtimer() || busy)
149 		return (FILTER_HANDLED);
150 	state = DPCPU_PTR(timerstate);
151 	now = state->now;
152 	CTR3(KTR_SPARE2, "ipi  at %d:    now  %d.%08x",
153 	    curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
154 	done = handleevents(now, 0);
155 	return (done ? FILTER_HANDLED : FILTER_STRAY);
156 }
157 
158 /*
159  * Handle all events for specified time on this CPU
160  */
161 static int
162 handleevents(sbintime_t now, int fake)
163 {
164 	sbintime_t t, *hct;
165 	struct trapframe *frame;
166 	struct pcpu_state *state;
167 	int usermode;
168 	int done, runs;
169 
170 	CTR3(KTR_SPARE2, "handle at %d:  now  %d.%08x",
171 	    curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
172 	done = 0;
173 	if (fake) {
174 		frame = NULL;
175 		usermode = 0;
176 	} else {
177 		frame = curthread->td_intr_frame;
178 		usermode = TRAPF_USERMODE(frame);
179 	}
180 
181 	state = DPCPU_PTR(timerstate);
182 
183 	runs = 0;
184 	while (now >= state->nexthard) {
185 		state->nexthard += tick_sbt;
186 		runs++;
187 	}
188 	if (runs) {
189 		hct = DPCPU_PTR(hardclocktime);
190 		*hct = state->nexthard - tick_sbt;
191 		if (fake < 2) {
192 			hardclock_cnt(runs, usermode);
193 			done = 1;
194 		}
195 	}
196 	runs = 0;
197 	while (now >= state->nextstat) {
198 		state->nextstat += statperiod;
199 		runs++;
200 	}
201 	if (runs && fake < 2) {
202 		statclock_cnt(runs, usermode);
203 		done = 1;
204 	}
205 	if (profiling) {
206 		runs = 0;
207 		while (now >= state->nextprof) {
208 			state->nextprof += profperiod;
209 			runs++;
210 		}
211 		if (runs && !fake) {
212 			profclock_cnt(runs, usermode, TRAPF_PC(frame));
213 			done = 1;
214 		}
215 	} else
216 		state->nextprof = state->nextstat;
217 	if (now >= state->nextcallopt) {
218 		state->nextcall = state->nextcallopt = SBT_MAX;
219 		callout_process(now);
220 	}
221 
222 #ifdef KDTRACE_HOOKS
223 	if (fake == 0 && now >= state->nextcyc && cyclic_clock_func != NULL) {
224 		state->nextcyc = SBT_MAX;
225 		(*cyclic_clock_func)(frame);
226 	}
227 #endif
228 
229 	t = getnextcpuevent(0);
230 	ET_HW_LOCK(state);
231 	if (!busy) {
232 		state->idle = 0;
233 		state->nextevent = t;
234 		loadtimer(now, (fake == 2) &&
235 		    (timer->et_flags & ET_FLAGS_PERCPU));
236 	}
237 	ET_HW_UNLOCK(state);
238 	return (done);
239 }
240 
241 /*
242  * Schedule binuptime of the next event on current CPU.
243  */
244 static sbintime_t
245 getnextcpuevent(int idle)
246 {
247 	sbintime_t event;
248 	struct pcpu_state *state;
249 	u_int hardfreq;
250 
251 	state = DPCPU_PTR(timerstate);
252 	/* Handle hardclock() events, skipping some if CPU is idle. */
253 	event = state->nexthard;
254 	if (idle) {
255 		hardfreq = (u_int)hz / 2;
256 		if (tc_min_ticktock_freq > 2
257 #ifdef SMP
258 		    && curcpu == CPU_FIRST()
259 #endif
260 		    )
261 			hardfreq = hz / tc_min_ticktock_freq;
262 		if (hardfreq > 1)
263 			event += tick_sbt * (hardfreq - 1);
264 	}
265 	/* Handle callout events. */
266 	if (event > state->nextcall)
267 		event = state->nextcall;
268 	if (!idle) { /* If CPU is active - handle other types of events. */
269 		if (event > state->nextstat)
270 			event = state->nextstat;
271 		if (profiling && event > state->nextprof)
272 			event = state->nextprof;
273 	}
274 #ifdef KDTRACE_HOOKS
275 	if (event > state->nextcyc)
276 		event = state->nextcyc;
277 #endif
278 	return (event);
279 }
280 
281 /*
282  * Schedule binuptime of the next event on all CPUs.
283  */
284 static sbintime_t
285 getnextevent(void)
286 {
287 	struct pcpu_state *state;
288 	sbintime_t event;
289 #ifdef SMP
290 	int	cpu;
291 #endif
292 	int	c;
293 
294 	state = DPCPU_PTR(timerstate);
295 	event = state->nextevent;
296 	c = -1;
297 #ifdef SMP
298 	if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
299 		CPU_FOREACH(cpu) {
300 			state = DPCPU_ID_PTR(cpu, timerstate);
301 			if (event > state->nextevent) {
302 				event = state->nextevent;
303 				c = cpu;
304 			}
305 		}
306 	}
307 #endif
308 	CTR4(KTR_SPARE2, "next at %d:    next %d.%08x by %d",
309 	    curcpu, (int)(event >> 32), (u_int)(event & 0xffffffff), c);
310 	return (event);
311 }
312 
313 /* Hardware timer callback function. */
314 static void
315 timercb(struct eventtimer *et, void *arg)
316 {
317 	sbintime_t now;
318 	sbintime_t *next;
319 	struct pcpu_state *state;
320 #ifdef SMP
321 	int cpu, bcast;
322 #endif
323 
324 	/* Do not touch anything if somebody reconfiguring timers. */
325 	if (busy)
326 		return;
327 	/* Update present and next tick times. */
328 	state = DPCPU_PTR(timerstate);
329 	if (et->et_flags & ET_FLAGS_PERCPU) {
330 		next = &state->nexttick;
331 	} else
332 		next = &nexttick;
333 	now = sbinuptime();
334 	if (periodic)
335 		*next = now + timerperiod;
336 	else
337 		*next = -1;	/* Next tick is not scheduled yet. */
338 	state->now = now;
339 	CTR3(KTR_SPARE2, "intr at %d:    now  %d.%08x",
340 	    curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
341 
342 #ifdef SMP
343 	/* Prepare broadcasting to other CPUs for non-per-CPU timers. */
344 	bcast = 0;
345 	if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
346 		CPU_FOREACH(cpu) {
347 			state = DPCPU_ID_PTR(cpu, timerstate);
348 			ET_HW_LOCK(state);
349 			state->now = now;
350 			if (now >= state->nextevent) {
351 				state->nextevent += SBT_1S;
352 				if (curcpu != cpu) {
353 					state->ipi = 1;
354 					bcast = 1;
355 				}
356 			}
357 			ET_HW_UNLOCK(state);
358 		}
359 	}
360 #endif
361 
362 	/* Handle events for this time on this CPU. */
363 	handleevents(now, 0);
364 
365 #ifdef SMP
366 	/* Broadcast interrupt to other CPUs for non-per-CPU timers. */
367 	if (bcast) {
368 		CPU_FOREACH(cpu) {
369 			if (curcpu == cpu)
370 				continue;
371 			state = DPCPU_ID_PTR(cpu, timerstate);
372 			if (state->ipi) {
373 				state->ipi = 0;
374 				ipi_cpu(cpu, IPI_HARDCLOCK);
375 			}
376 		}
377 	}
378 #endif
379 }
380 
381 /*
382  * Load new value into hardware timer.
383  */
384 static void
385 loadtimer(sbintime_t now, int start)
386 {
387 	struct pcpu_state *state;
388 	sbintime_t new;
389 	sbintime_t *next;
390 	uint64_t tmp;
391 	int eq;
392 
393 	if (timer->et_flags & ET_FLAGS_PERCPU) {
394 		state = DPCPU_PTR(timerstate);
395 		next = &state->nexttick;
396 	} else
397 		next = &nexttick;
398 	if (periodic) {
399 		if (start) {
400 			/*
401 			 * Try to start all periodic timers aligned
402 			 * to period to make events synchronous.
403 			 */
404 			tmp = now % timerperiod;
405 			new = timerperiod - tmp;
406 			if (new < tmp)		/* Left less then passed. */
407 				new += timerperiod;
408 			CTR5(KTR_SPARE2, "load p at %d:   now %d.%08x first in %d.%08x",
409 			    curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff),
410 			    (int)(new >> 32), (u_int)(new & 0xffffffff));
411 			*next = new + now;
412 			et_start(timer, new, timerperiod);
413 		}
414 	} else {
415 		new = getnextevent();
416 		eq = (new == *next);
417 		CTR4(KTR_SPARE2, "load at %d:    next %d.%08x eq %d",
418 		    curcpu, (int)(new >> 32), (u_int)(new & 0xffffffff), eq);
419 		if (!eq) {
420 			*next = new;
421 			et_start(timer, new - now, 0);
422 		}
423 	}
424 }
425 
426 /*
427  * Prepare event timer parameters after configuration changes.
428  */
429 static void
430 setuptimer(void)
431 {
432 	int freq;
433 
434 	if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
435 		periodic = 0;
436 	else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
437 		periodic = 1;
438 	singlemul = MIN(MAX(singlemul, 1), 20);
439 	freq = hz * singlemul;
440 	while (freq < (profiling ? profhz : stathz))
441 		freq += hz;
442 	freq = round_freq(timer, freq);
443 	timerperiod = SBT_1S / freq;
444 }
445 
446 /*
447  * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
448  */
449 static int
450 doconfigtimer(void)
451 {
452 	sbintime_t now;
453 	struct pcpu_state *state;
454 
455 	state = DPCPU_PTR(timerstate);
456 	switch (atomic_load_acq_int(&state->action)) {
457 	case 1:
458 		now = sbinuptime();
459 		ET_HW_LOCK(state);
460 		loadtimer(now, 1);
461 		ET_HW_UNLOCK(state);
462 		state->handle = 0;
463 		atomic_store_rel_int(&state->action, 0);
464 		return (1);
465 	case 2:
466 		ET_HW_LOCK(state);
467 		et_stop(timer);
468 		ET_HW_UNLOCK(state);
469 		state->handle = 0;
470 		atomic_store_rel_int(&state->action, 0);
471 		return (1);
472 	}
473 	if (atomic_readandclear_int(&state->handle) && !busy) {
474 		now = sbinuptime();
475 		handleevents(now, 0);
476 		return (1);
477 	}
478 	return (0);
479 }
480 
481 /*
482  * Reconfigure specified timer.
483  * For per-CPU timers use IPI to make other CPUs to reconfigure.
484  */
485 static void
486 configtimer(int start)
487 {
488 	sbintime_t now, next;
489 	struct pcpu_state *state;
490 	int cpu;
491 
492 	if (start) {
493 		setuptimer();
494 		now = sbinuptime();
495 	} else
496 		now = 0;
497 	critical_enter();
498 	ET_HW_LOCK(DPCPU_PTR(timerstate));
499 	if (start) {
500 		/* Initialize time machine parameters. */
501 		next = now + timerperiod;
502 		if (periodic)
503 			nexttick = next;
504 		else
505 			nexttick = -1;
506 		CPU_FOREACH(cpu) {
507 			state = DPCPU_ID_PTR(cpu, timerstate);
508 			state->now = now;
509 			if (!smp_started && cpu != CPU_FIRST())
510 				state->nextevent = SBT_MAX;
511 			else
512 				state->nextevent = next;
513 			if (periodic)
514 				state->nexttick = next;
515 			else
516 				state->nexttick = -1;
517 			state->nexthard = next;
518 			state->nextstat = next;
519 			state->nextprof = next;
520 			state->nextcall = next;
521 			state->nextcallopt = next;
522 			hardclock_sync(cpu);
523 		}
524 		busy = 0;
525 		/* Start global timer or per-CPU timer of this CPU. */
526 		loadtimer(now, 1);
527 	} else {
528 		busy = 1;
529 		/* Stop global timer or per-CPU timer of this CPU. */
530 		et_stop(timer);
531 	}
532 	ET_HW_UNLOCK(DPCPU_PTR(timerstate));
533 #ifdef SMP
534 	/* If timer is global or there is no other CPUs yet - we are done. */
535 	if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
536 		critical_exit();
537 		return;
538 	}
539 	/* Set reconfigure flags for other CPUs. */
540 	CPU_FOREACH(cpu) {
541 		state = DPCPU_ID_PTR(cpu, timerstate);
542 		atomic_store_rel_int(&state->action,
543 		    (cpu == curcpu) ? 0 : ( start ? 1 : 2));
544 	}
545 	/* Broadcast reconfigure IPI. */
546 	ipi_all_but_self(IPI_HARDCLOCK);
547 	/* Wait for reconfiguration completed. */
548 restart:
549 	cpu_spinwait();
550 	CPU_FOREACH(cpu) {
551 		if (cpu == curcpu)
552 			continue;
553 		state = DPCPU_ID_PTR(cpu, timerstate);
554 		if (atomic_load_acq_int(&state->action))
555 			goto restart;
556 	}
557 #endif
558 	critical_exit();
559 }
560 
561 /*
562  * Calculate nearest frequency supported by hardware timer.
563  */
564 static int
565 round_freq(struct eventtimer *et, int freq)
566 {
567 	uint64_t div;
568 
569 	if (et->et_frequency != 0) {
570 		div = lmax((et->et_frequency + freq / 2) / freq, 1);
571 		if (et->et_flags & ET_FLAGS_POW2DIV)
572 			div = 1 << (flsl(div + div / 2) - 1);
573 		freq = (et->et_frequency + div / 2) / div;
574 	}
575 	if (et->et_min_period > SBT_1S)
576 		panic("Event timer \"%s\" doesn't support sub-second periods!",
577 		    et->et_name);
578 	else if (et->et_min_period != 0)
579 		freq = min(freq, SBT2FREQ(et->et_min_period));
580 	if (et->et_max_period < SBT_1S && et->et_max_period != 0)
581 		freq = max(freq, SBT2FREQ(et->et_max_period));
582 	return (freq);
583 }
584 
585 /*
586  * Configure and start event timers (BSP part).
587  */
588 void
589 cpu_initclocks_bsp(void)
590 {
591 	struct pcpu_state *state;
592 	int base, div, cpu;
593 
594 	mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
595 	CPU_FOREACH(cpu) {
596 		state = DPCPU_ID_PTR(cpu, timerstate);
597 		mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
598 #ifdef KDTRACE_HOOKS
599 		state->nextcyc = SBT_MAX;
600 #endif
601 		state->nextcall = SBT_MAX;
602 		state->nextcallopt = SBT_MAX;
603 	}
604 	periodic = want_periodic;
605 	/* Grab requested timer or the best of present. */
606 	if (timername[0])
607 		timer = et_find(timername, 0, 0);
608 	if (timer == NULL && periodic) {
609 		timer = et_find(NULL,
610 		    ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
611 	}
612 	if (timer == NULL) {
613 		timer = et_find(NULL,
614 		    ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT);
615 	}
616 	if (timer == NULL && !periodic) {
617 		timer = et_find(NULL,
618 		    ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
619 	}
620 	if (timer == NULL)
621 		panic("No usable event timer found!");
622 	et_init(timer, timercb, NULL, NULL);
623 
624 	/* Adapt to timer capabilities. */
625 	if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
626 		periodic = 0;
627 	else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
628 		periodic = 1;
629 	if (timer->et_flags & ET_FLAGS_C3STOP)
630 		cpu_disable_deep_sleep++;
631 
632 	/*
633 	 * We honor the requested 'hz' value.
634 	 * We want to run stathz in the neighborhood of 128hz.
635 	 * We would like profhz to run as often as possible.
636 	 */
637 	if (singlemul <= 0 || singlemul > 20) {
638 		if (hz >= 1500 || (hz % 128) == 0)
639 			singlemul = 1;
640 		else if (hz >= 750)
641 			singlemul = 2;
642 		else
643 			singlemul = 4;
644 	}
645 	if (periodic) {
646 		base = round_freq(timer, hz * singlemul);
647 		singlemul = max((base + hz / 2) / hz, 1);
648 		hz = (base + singlemul / 2) / singlemul;
649 		if (base <= 128)
650 			stathz = base;
651 		else {
652 			div = base / 128;
653 			if (div >= singlemul && (div % singlemul) == 0)
654 				div++;
655 			stathz = base / div;
656 		}
657 		profhz = stathz;
658 		while ((profhz + stathz) <= 128 * 64)
659 			profhz += stathz;
660 		profhz = round_freq(timer, profhz);
661 	} else {
662 		hz = round_freq(timer, hz);
663 		stathz = round_freq(timer, 127);
664 		profhz = round_freq(timer, stathz * 64);
665 	}
666 	tick = 1000000 / hz;
667 	tick_sbt = SBT_1S / hz;
668 	tick_bt = sbttobt(tick_sbt);
669 	statperiod = SBT_1S / stathz;
670 	profperiod = SBT_1S / profhz;
671 	ET_LOCK();
672 	configtimer(1);
673 	ET_UNLOCK();
674 }
675 
676 /*
677  * Start per-CPU event timers on APs.
678  */
679 void
680 cpu_initclocks_ap(void)
681 {
682 	sbintime_t now;
683 	struct pcpu_state *state;
684 	struct thread *td;
685 
686 	state = DPCPU_PTR(timerstate);
687 	now = sbinuptime();
688 	ET_HW_LOCK(state);
689 	state->now = now;
690 	hardclock_sync(curcpu);
691 	spinlock_enter();
692 	ET_HW_UNLOCK(state);
693 	td = curthread;
694 	td->td_intr_nesting_level++;
695 	handleevents(state->now, 2);
696 	td->td_intr_nesting_level--;
697 	spinlock_exit();
698 }
699 
700 /*
701  * Switch to profiling clock rates.
702  */
703 void
704 cpu_startprofclock(void)
705 {
706 
707 	ET_LOCK();
708 	if (profiling == 0) {
709 		if (periodic) {
710 			configtimer(0);
711 			profiling = 1;
712 			configtimer(1);
713 		} else
714 			profiling = 1;
715 	} else
716 		profiling++;
717 	ET_UNLOCK();
718 }
719 
720 /*
721  * Switch to regular clock rates.
722  */
723 void
724 cpu_stopprofclock(void)
725 {
726 
727 	ET_LOCK();
728 	if (profiling == 1) {
729 		if (periodic) {
730 			configtimer(0);
731 			profiling = 0;
732 			configtimer(1);
733 		} else
734 		profiling = 0;
735 	} else
736 		profiling--;
737 	ET_UNLOCK();
738 }
739 
740 /*
741  * Switch to idle mode (all ticks handled).
742  */
743 sbintime_t
744 cpu_idleclock(void)
745 {
746 	sbintime_t now, t;
747 	struct pcpu_state *state;
748 
749 	if (idletick || busy ||
750 	    (periodic && (timer->et_flags & ET_FLAGS_PERCPU))
751 #ifdef DEVICE_POLLING
752 	    || curcpu == CPU_FIRST()
753 #endif
754 	    )
755 		return (-1);
756 	state = DPCPU_PTR(timerstate);
757 	if (periodic)
758 		now = state->now;
759 	else
760 		now = sbinuptime();
761 	CTR3(KTR_SPARE2, "idle at %d:    now  %d.%08x",
762 	    curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
763 	t = getnextcpuevent(1);
764 	ET_HW_LOCK(state);
765 	state->idle = 1;
766 	state->nextevent = t;
767 	if (!periodic)
768 		loadtimer(now, 0);
769 	ET_HW_UNLOCK(state);
770 	return (MAX(t - now, 0));
771 }
772 
773 /*
774  * Switch to active mode (skip empty ticks).
775  */
776 void
777 cpu_activeclock(void)
778 {
779 	sbintime_t now;
780 	struct pcpu_state *state;
781 	struct thread *td;
782 
783 	state = DPCPU_PTR(timerstate);
784 	if (state->idle == 0 || busy)
785 		return;
786 	if (periodic)
787 		now = state->now;
788 	else
789 		now = sbinuptime();
790 	CTR3(KTR_SPARE2, "active at %d:  now  %d.%08x",
791 	    curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
792 	spinlock_enter();
793 	td = curthread;
794 	td->td_intr_nesting_level++;
795 	handleevents(now, 1);
796 	td->td_intr_nesting_level--;
797 	spinlock_exit();
798 }
799 
800 /*
801  * Change the frequency of the given timer.  This changes et->et_frequency and
802  * if et is the active timer it reconfigures the timer on all CPUs.  This is
803  * intended to be a private interface for the use of et_change_frequency() only.
804  */
805 void
806 cpu_et_frequency(struct eventtimer *et, uint64_t newfreq)
807 {
808 
809 	ET_LOCK();
810 	if (et == timer) {
811 		configtimer(0);
812 		et->et_frequency = newfreq;
813 		configtimer(1);
814 	} else
815 		et->et_frequency = newfreq;
816 	ET_UNLOCK();
817 }
818 
819 #ifdef KDTRACE_HOOKS
820 void
821 clocksource_cyc_set(const struct bintime *bt)
822 {
823 	sbintime_t now, t;
824 	struct pcpu_state *state;
825 
826 	/* Do not touch anything if somebody reconfiguring timers. */
827 	if (busy)
828 		return;
829 	t = bttosbt(*bt);
830 	state = DPCPU_PTR(timerstate);
831 	if (periodic)
832 		now = state->now;
833 	else
834 		now = sbinuptime();
835 
836 	CTR5(KTR_SPARE2, "set_cyc at %d:  now  %d.%08x  t  %d.%08x",
837 	    curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff),
838 	    (int)(t >> 32), (u_int)(t & 0xffffffff));
839 
840 	ET_HW_LOCK(state);
841 	if (t == state->nextcyc)
842 		goto done;
843 	state->nextcyc = t;
844 	if (t >= state->nextevent)
845 		goto done;
846 	state->nextevent = t;
847 	if (!periodic)
848 		loadtimer(now, 0);
849 done:
850 	ET_HW_UNLOCK(state);
851 }
852 #endif
853 
854 void
855 cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt)
856 {
857 	struct pcpu_state *state;
858 
859 	/* Do not touch anything if somebody reconfiguring timers. */
860 	if (busy)
861 		return;
862 	CTR6(KTR_SPARE2, "new co at %d:    on %d at %d.%08x - %d.%08x",
863 	    curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff),
864 	    (int)(bt >> 32), (u_int)(bt & 0xffffffff));
865 	state = DPCPU_ID_PTR(cpu, timerstate);
866 	ET_HW_LOCK(state);
867 
868 	/*
869 	 * If there is callout time already set earlier -- do nothing.
870 	 * This check may appear redundant because we check already in
871 	 * callout_process() but this double check guarantees we're safe
872 	 * with respect to race conditions between interrupts execution
873 	 * and scheduling.
874 	 */
875 	state->nextcallopt = bt_opt;
876 	if (bt >= state->nextcall)
877 		goto done;
878 	state->nextcall = bt;
879 	/* If there is some other event set earlier -- do nothing. */
880 	if (bt >= state->nextevent)
881 		goto done;
882 	state->nextevent = bt;
883 	/* If timer is periodic -- there is nothing to reprogram. */
884 	if (periodic)
885 		goto done;
886 	/* If timer is global or of the current CPU -- reprogram it. */
887 	if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) {
888 		loadtimer(sbinuptime(), 0);
889 done:
890 		ET_HW_UNLOCK(state);
891 		return;
892 	}
893 	/* Otherwise make other CPU to reprogram it. */
894 	state->handle = 1;
895 	ET_HW_UNLOCK(state);
896 #ifdef SMP
897 	ipi_cpu(cpu, IPI_HARDCLOCK);
898 #endif
899 }
900 
901 /*
902  * Report or change the active event timers hardware.
903  */
904 static int
905 sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
906 {
907 	char buf[32];
908 	struct eventtimer *et;
909 	int error;
910 
911 	ET_LOCK();
912 	et = timer;
913 	snprintf(buf, sizeof(buf), "%s", et->et_name);
914 	ET_UNLOCK();
915 	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
916 	ET_LOCK();
917 	et = timer;
918 	if (error != 0 || req->newptr == NULL ||
919 	    strcasecmp(buf, et->et_name) == 0) {
920 		ET_UNLOCK();
921 		return (error);
922 	}
923 	et = et_find(buf, 0, 0);
924 	if (et == NULL) {
925 		ET_UNLOCK();
926 		return (ENOENT);
927 	}
928 	configtimer(0);
929 	et_free(timer);
930 	if (et->et_flags & ET_FLAGS_C3STOP)
931 		cpu_disable_deep_sleep++;
932 	if (timer->et_flags & ET_FLAGS_C3STOP)
933 		cpu_disable_deep_sleep--;
934 	periodic = want_periodic;
935 	timer = et;
936 	et_init(timer, timercb, NULL, NULL);
937 	configtimer(1);
938 	ET_UNLOCK();
939 	return (error);
940 }
941 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer,
942     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
943     0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer");
944 
945 /*
946  * Report or change the active event timer periodicity.
947  */
948 static int
949 sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
950 {
951 	int error, val;
952 
953 	val = periodic;
954 	error = sysctl_handle_int(oidp, &val, 0, req);
955 	if (error != 0 || req->newptr == NULL)
956 		return (error);
957 	ET_LOCK();
958 	configtimer(0);
959 	periodic = want_periodic = val;
960 	configtimer(1);
961 	ET_UNLOCK();
962 	return (error);
963 }
964 SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
965     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
966     0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode");
967