xref: /freebsd/sys/kern/kern_clock.c (revision 6472ac3d8a86336899b6cfb789a4cd9897e3fab5)
1df8bae1dSRodney W. Grimes /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1982, 1986, 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  * (c) UNIX System Laboratories, Inc.
5df8bae1dSRodney W. Grimes  * All or some portions of this file are derived from material licensed
6df8bae1dSRodney W. Grimes  * to the University of California by American Telephone and Telegraph
7df8bae1dSRodney W. Grimes  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8df8bae1dSRodney W. Grimes  * the permission of UNIX System Laboratories, Inc.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
34df8bae1dSRodney W. Grimes  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35df8bae1dSRodney W. Grimes  */
36df8bae1dSRodney W. Grimes 
37677b542eSDavid E. O'Brien #include <sys/cdefs.h>
38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
39677b542eSDavid E. O'Brien 
40911d16b8SEd Maste #include "opt_kdb.h"
41f0796cd2SGleb Smirnoff #include "opt_device_polling.h"
424da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
4332c20357SPoul-Henning Kamp #include "opt_ntp.h"
44370c3cb5SSean Kelly #include "opt_watchdog.h"
4532c20357SPoul-Henning Kamp 
46df8bae1dSRodney W. Grimes #include <sys/param.h>
47df8bae1dSRodney W. Grimes #include <sys/systm.h>
48df8bae1dSRodney W. Grimes #include <sys/callout.h>
492d50560aSMarcel Moolenaar #include <sys/kdb.h>
50df8bae1dSRodney W. Grimes #include <sys/kernel.h>
51f7829d0dSAttilio Rao #include <sys/kthread.h>
5261d80e90SJohn Baldwin #include <sys/ktr.h>
53f7829d0dSAttilio Rao #include <sys/lock.h>
5435e0e5b3SJohn Baldwin #include <sys/mutex.h>
55df8bae1dSRodney W. Grimes #include <sys/proc.h>
56e4625663SJeff Roberson #include <sys/resource.h>
57df8bae1dSRodney W. Grimes #include <sys/resourcevar.h>
58b43179fbSJeff Roberson #include <sys/sched.h>
59797f2d22SPoul-Henning Kamp #include <sys/signalvar.h>
60f7829d0dSAttilio Rao #include <sys/sleepqueue.h>
616caa8a15SJohn Baldwin #include <sys/smp.h>
628a129caeSDavid Greenman #include <vm/vm.h>
63efeaf95aSDavid Greenman #include <vm/pmap.h>
64efeaf95aSDavid Greenman #include <vm/vm_map.h>
65797f2d22SPoul-Henning Kamp #include <sys/sysctl.h>
668088699fSJohn Baldwin #include <sys/bus.h>
678088699fSJohn Baldwin #include <sys/interrupt.h>
68104a9b7eSAlexander Kabaev #include <sys/limits.h>
69e7fa55afSPoul-Henning Kamp #include <sys/timetc.h>
70df8bae1dSRodney W. Grimes 
71df8bae1dSRodney W. Grimes #ifdef GPROF
72df8bae1dSRodney W. Grimes #include <sys/gmon.h>
73df8bae1dSRodney W. Grimes #endif
74df8bae1dSRodney W. Grimes 
7536c0fd9dSJoseph Koshy #ifdef HWPMC_HOOKS
7636c0fd9dSJoseph Koshy #include <sys/pmckern.h>
7736c0fd9dSJoseph Koshy #endif
7836c0fd9dSJoseph Koshy 
79e4fc250cSLuigi Rizzo #ifdef DEVICE_POLLING
80e4fc250cSLuigi Rizzo extern void hardclock_device_poll(void);
81e4fc250cSLuigi Rizzo #endif /* DEVICE_POLLING */
82eae8fc2cSSteve Passe 
834d77a549SAlfred Perlstein static void initclocks(void *dummy);
84237fdd78SRobert Watson SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
852b14f991SJulian Elischer 
868b98fec9SJeff Roberson /* Spin-lock protecting profiling statistics. */
8786a49deaSAttilio Rao static struct mtx time_lock;
888b98fec9SJeff Roberson 
8962919d78SPeter Wemm static int
9062919d78SPeter Wemm sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
9162919d78SPeter Wemm {
9262919d78SPeter Wemm 	int error;
937628402bSPeter Wemm 	long cp_time[CPUSTATES];
94cff2e749SPaul Saab #ifdef SCTL_MASK32
9562919d78SPeter Wemm 	int i;
9662919d78SPeter Wemm 	unsigned int cp_time32[CPUSTATES];
977628402bSPeter Wemm #endif
9862919d78SPeter Wemm 
997628402bSPeter Wemm 	read_cpu_time(cp_time);
1007628402bSPeter Wemm #ifdef SCTL_MASK32
101cff2e749SPaul Saab 	if (req->flags & SCTL_MASK32) {
10262919d78SPeter Wemm 		if (!req->oldptr)
10362919d78SPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time32));
10462919d78SPeter Wemm 		for (i = 0; i < CPUSTATES; i++)
10562919d78SPeter Wemm 			cp_time32[i] = (unsigned int)cp_time[i];
10662919d78SPeter Wemm 		error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
10762919d78SPeter Wemm 	} else
10862919d78SPeter Wemm #endif
10962919d78SPeter Wemm 	{
11062919d78SPeter Wemm 		if (!req->oldptr)
11162919d78SPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time));
11262919d78SPeter Wemm 		error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
11362919d78SPeter Wemm 	}
11462919d78SPeter Wemm 	return error;
11562919d78SPeter Wemm }
11662919d78SPeter Wemm 
117c383c221SEd Schouten SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
11862919d78SPeter Wemm     0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
1197f112b04SRobert Watson 
1207628402bSPeter Wemm static long empty[CPUSTATES];
1217628402bSPeter Wemm 
1227628402bSPeter Wemm static int
1237628402bSPeter Wemm sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
1247628402bSPeter Wemm {
1257628402bSPeter Wemm 	struct pcpu *pcpu;
1267628402bSPeter Wemm 	int error;
127ef54068bSRobert Watson 	int c;
1287628402bSPeter Wemm 	long *cp_time;
1297628402bSPeter Wemm #ifdef SCTL_MASK32
1307628402bSPeter Wemm 	unsigned int cp_time32[CPUSTATES];
131ef54068bSRobert Watson 	int i;
1327628402bSPeter Wemm #endif
1337628402bSPeter Wemm 
1347628402bSPeter Wemm 	if (!req->oldptr) {
1357628402bSPeter Wemm #ifdef SCTL_MASK32
1367628402bSPeter Wemm 		if (req->flags & SCTL_MASK32)
1377628402bSPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
1387628402bSPeter Wemm 		else
1397628402bSPeter Wemm #endif
1407628402bSPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
1417628402bSPeter Wemm 	}
1427628402bSPeter Wemm 	for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
1437628402bSPeter Wemm 		if (!CPU_ABSENT(c)) {
1447628402bSPeter Wemm 			pcpu = pcpu_find(c);
1457628402bSPeter Wemm 			cp_time = pcpu->pc_cp_time;
1467628402bSPeter Wemm 		} else {
1477628402bSPeter Wemm 			cp_time = empty;
1487628402bSPeter Wemm 		}
1497628402bSPeter Wemm #ifdef SCTL_MASK32
1507628402bSPeter Wemm 		if (req->flags & SCTL_MASK32) {
1517628402bSPeter Wemm 			for (i = 0; i < CPUSTATES; i++)
1527628402bSPeter Wemm 				cp_time32[i] = (unsigned int)cp_time[i];
1537628402bSPeter Wemm 			error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
1547628402bSPeter Wemm 		} else
1557628402bSPeter Wemm #endif
1567628402bSPeter Wemm 			error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
1577628402bSPeter Wemm 	}
1587628402bSPeter Wemm 	return error;
1597628402bSPeter Wemm }
1607628402bSPeter Wemm 
161c383c221SEd Schouten SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
1627628402bSPeter Wemm     0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
1637628402bSPeter Wemm 
164f7829d0dSAttilio Rao #ifdef DEADLKRES
16536e51f65SAttilio Rao static const char *blessed[] = {
16695335fd8SAttilio Rao 	"getblk",
16736e51f65SAttilio Rao 	"so_snd_sx",
16836e51f65SAttilio Rao 	"so_rcv_sx",
16936e51f65SAttilio Rao 	NULL
17036e51f65SAttilio Rao };
171f7829d0dSAttilio Rao static int slptime_threshold = 1800;
172f7829d0dSAttilio Rao static int blktime_threshold = 900;
173f7829d0dSAttilio Rao static int sleepfreq = 3;
174f7829d0dSAttilio Rao 
175f7829d0dSAttilio Rao static void
176f7829d0dSAttilio Rao deadlkres(void)
177f7829d0dSAttilio Rao {
178f7829d0dSAttilio Rao 	struct proc *p;
179f7829d0dSAttilio Rao 	struct thread *td;
180f7829d0dSAttilio Rao 	void *wchan;
18136e51f65SAttilio Rao 	int blkticks, i, slpticks, slptype, tryl, tticks;
182f7829d0dSAttilio Rao 
183f7829d0dSAttilio Rao 	tryl = 0;
184f7829d0dSAttilio Rao 	for (;;) {
185f7829d0dSAttilio Rao 		blkticks = blktime_threshold * hz;
186f7829d0dSAttilio Rao 		slpticks = slptime_threshold * hz;
187f7829d0dSAttilio Rao 
188f7829d0dSAttilio Rao 		/*
189f7829d0dSAttilio Rao 		 * Avoid to sleep on the sx_lock in order to avoid a possible
190f7829d0dSAttilio Rao 		 * priority inversion problem leading to starvation.
191f7829d0dSAttilio Rao 		 * If the lock can't be held after 100 tries, panic.
192f7829d0dSAttilio Rao 		 */
193f7829d0dSAttilio Rao 		if (!sx_try_slock(&allproc_lock)) {
194f7829d0dSAttilio Rao 			if (tryl > 100)
195f7829d0dSAttilio Rao 		panic("%s: possible deadlock detected on allproc_lock\n",
196f7829d0dSAttilio Rao 				    __func__);
197f7829d0dSAttilio Rao 			tryl++;
198b5850804SJohn Baldwin 			pause("allproc", sleepfreq * hz);
199f7829d0dSAttilio Rao 			continue;
200f7829d0dSAttilio Rao 		}
201f7829d0dSAttilio Rao 		tryl = 0;
202f7829d0dSAttilio Rao 		FOREACH_PROC_IN_SYSTEM(p) {
203f7829d0dSAttilio Rao 			PROC_LOCK(p);
204e806d352SJohn Baldwin 			if (p->p_state == PRS_NEW) {
205e806d352SJohn Baldwin 				PROC_UNLOCK(p);
206e806d352SJohn Baldwin 				continue;
207e806d352SJohn Baldwin 			}
208f7829d0dSAttilio Rao 			FOREACH_THREAD_IN_PROC(p, td) {
209631cb86fSAttilio Rao 
210631cb86fSAttilio Rao 				/*
211631cb86fSAttilio Rao 				 * Once a thread is found in "interesting"
212631cb86fSAttilio Rao 				 * state a possible ticks wrap-up needs to be
213631cb86fSAttilio Rao 				 * checked.
214631cb86fSAttilio Rao 				 */
215f7829d0dSAttilio Rao 				thread_lock(td);
216631cb86fSAttilio Rao 				if (TD_ON_LOCK(td) && ticks < td->td_blktick) {
217f7829d0dSAttilio Rao 
218f7829d0dSAttilio Rao 					/*
219f7829d0dSAttilio Rao 					 * The thread should be blocked on a
220f7829d0dSAttilio Rao 					 * turnstile, simply check if the
221f7829d0dSAttilio Rao 					 * turnstile channel is in good state.
222f7829d0dSAttilio Rao 					 */
223f7829d0dSAttilio Rao 					MPASS(td->td_blocked != NULL);
22436e51f65SAttilio Rao 
225f7829d0dSAttilio Rao 					tticks = ticks - td->td_blktick;
226f7829d0dSAttilio Rao 					thread_unlock(td);
227f7829d0dSAttilio Rao 					if (tticks > blkticks) {
228f7829d0dSAttilio Rao 
229f7829d0dSAttilio Rao 						/*
230f7829d0dSAttilio Rao 						 * Accordingly with provided
231f7829d0dSAttilio Rao 						 * thresholds, this thread is
232f7829d0dSAttilio Rao 						 * stuck for too long on a
233f7829d0dSAttilio Rao 						 * turnstile.
234f7829d0dSAttilio Rao 						 */
235f7829d0dSAttilio Rao 						PROC_UNLOCK(p);
236f7829d0dSAttilio Rao 						sx_sunlock(&allproc_lock);
237f7829d0dSAttilio Rao 	panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
238f7829d0dSAttilio Rao 						    __func__, td, tticks);
239f7829d0dSAttilio Rao 					}
240631cb86fSAttilio Rao 				} else if (TD_IS_SLEEPING(td) &&
241631cb86fSAttilio Rao 				    TD_ON_SLEEPQ(td) &&
242631cb86fSAttilio Rao 				    ticks < td->td_blktick) {
24336e51f65SAttilio Rao 
244f7829d0dSAttilio Rao 					/*
245f7829d0dSAttilio Rao 					 * Check if the thread is sleeping on a
246f7829d0dSAttilio Rao 					 * lock, otherwise skip the check.
247f7829d0dSAttilio Rao 					 * Drop the thread lock in order to
248f7829d0dSAttilio Rao 					 * avoid a LOR with the sleepqueue
249f7829d0dSAttilio Rao 					 * spinlock.
250f7829d0dSAttilio Rao 					 */
251f7829d0dSAttilio Rao 					wchan = td->td_wchan;
252f7829d0dSAttilio Rao 					tticks = ticks - td->td_slptick;
253f7829d0dSAttilio Rao 					thread_unlock(td);
254f7829d0dSAttilio Rao 					slptype = sleepq_type(wchan);
255f7829d0dSAttilio Rao 					if ((slptype == SLEEPQ_SX ||
256f7829d0dSAttilio Rao 					    slptype == SLEEPQ_LK) &&
257f7829d0dSAttilio Rao 					    tticks > slpticks) {
258f7829d0dSAttilio Rao 
259f7829d0dSAttilio Rao 						/*
260f7829d0dSAttilio Rao 						 * Accordingly with provided
261f7829d0dSAttilio Rao 						 * thresholds, this thread is
262f7829d0dSAttilio Rao 						 * stuck for too long on a
263f7829d0dSAttilio Rao 						 * sleepqueue.
26436e51f65SAttilio Rao 						 * However, being on a
26536e51f65SAttilio Rao 						 * sleepqueue, we might still
26636e51f65SAttilio Rao 						 * check for the blessed
26736e51f65SAttilio Rao 						 * list.
268f7829d0dSAttilio Rao 						 */
26936e51f65SAttilio Rao 						tryl = 0;
27036e51f65SAttilio Rao 						for (i = 0; blessed[i] != NULL;
27136e51f65SAttilio Rao 						    i++) {
27236e51f65SAttilio Rao 							if (!strcmp(blessed[i],
27336e51f65SAttilio Rao 							    td->td_wmesg)) {
27436e51f65SAttilio Rao 								tryl = 1;
27536e51f65SAttilio Rao 								break;
27636e51f65SAttilio Rao 							}
27736e51f65SAttilio Rao 						}
27836e51f65SAttilio Rao 						if (tryl != 0) {
27936e51f65SAttilio Rao 							tryl = 0;
28036e51f65SAttilio Rao 							continue;
28136e51f65SAttilio Rao 						}
282f7829d0dSAttilio Rao 						PROC_UNLOCK(p);
283f7829d0dSAttilio Rao 						sx_sunlock(&allproc_lock);
284f7829d0dSAttilio Rao 	panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
285f7829d0dSAttilio Rao 						    __func__, td, tticks);
286f7829d0dSAttilio Rao 					}
287f7829d0dSAttilio Rao 				} else
288f7829d0dSAttilio Rao 					thread_unlock(td);
289f7829d0dSAttilio Rao 			}
290f7829d0dSAttilio Rao 			PROC_UNLOCK(p);
291f7829d0dSAttilio Rao 		}
292f7829d0dSAttilio Rao 		sx_sunlock(&allproc_lock);
293f7829d0dSAttilio Rao 
294f7829d0dSAttilio Rao 		/* Sleep for sleepfreq seconds. */
295b5850804SJohn Baldwin 		pause("-", sleepfreq * hz);
296f7829d0dSAttilio Rao 	}
297f7829d0dSAttilio Rao }
298f7829d0dSAttilio Rao 
299f7829d0dSAttilio Rao static struct kthread_desc deadlkres_kd = {
300f7829d0dSAttilio Rao 	"deadlkres",
301f7829d0dSAttilio Rao 	deadlkres,
302f7829d0dSAttilio Rao 	(struct thread **)NULL
303f7829d0dSAttilio Rao };
304f7829d0dSAttilio Rao 
305f7829d0dSAttilio Rao SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
306f7829d0dSAttilio Rao 
307*6472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW, 0,
308*6472ac3dSEd Schouten     "Deadlock resolver");
309f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
310f7829d0dSAttilio Rao     &slptime_threshold, 0,
311f7829d0dSAttilio Rao     "Number of seconds within is valid to sleep on a sleepqueue");
312f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
313f7829d0dSAttilio Rao     &blktime_threshold, 0,
314f7829d0dSAttilio Rao     "Number of seconds within is valid to block on a turnstile");
315f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
316f7829d0dSAttilio Rao     "Number of seconds between any deadlock resolver thread run");
317f7829d0dSAttilio Rao #endif	/* DEADLKRES */
318f7829d0dSAttilio Rao 
3197628402bSPeter Wemm void
3207628402bSPeter Wemm read_cpu_time(long *cp_time)
3217628402bSPeter Wemm {
3227628402bSPeter Wemm 	struct pcpu *pc;
3237628402bSPeter Wemm 	int i, j;
3247628402bSPeter Wemm 
3257628402bSPeter Wemm 	/* Sum up global cp_time[]. */
3267628402bSPeter Wemm 	bzero(cp_time, sizeof(long) * CPUSTATES);
3273aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
3287628402bSPeter Wemm 		pc = pcpu_find(i);
3297628402bSPeter Wemm 		for (j = 0; j < CPUSTATES; j++)
3307628402bSPeter Wemm 			cp_time[j] += pc->pc_cp_time[j];
3317628402bSPeter Wemm 	}
3327628402bSPeter Wemm }
3337628402bSPeter Wemm 
3344103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
3354103b765SPoul-Henning Kamp #include <sys/watchdog.h>
336370c3cb5SSean Kelly 
3374103b765SPoul-Henning Kamp static int watchdog_ticks;
338370c3cb5SSean Kelly static int watchdog_enabled;
3394103b765SPoul-Henning Kamp static void watchdog_fire(void);
3404103b765SPoul-Henning Kamp static void watchdog_config(void *, u_int, int *);
3414103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
342370c3cb5SSean Kelly 
3433bac064fSPoul-Henning Kamp /*
344df8bae1dSRodney W. Grimes  * Clock handling routines.
345df8bae1dSRodney W. Grimes  *
346b05dcf3cSPoul-Henning Kamp  * This code is written to operate with two timers that run independently of
347b05dcf3cSPoul-Henning Kamp  * each other.
3487ec73f64SPoul-Henning Kamp  *
349b05dcf3cSPoul-Henning Kamp  * The main timer, running hz times per second, is used to trigger interval
350b05dcf3cSPoul-Henning Kamp  * timers, timeouts and rescheduling as needed.
3517ec73f64SPoul-Henning Kamp  *
352b05dcf3cSPoul-Henning Kamp  * The second timer handles kernel and user profiling,
353b05dcf3cSPoul-Henning Kamp  * and does resource use estimation.  If the second timer is programmable,
354b05dcf3cSPoul-Henning Kamp  * it is randomized to avoid aliasing between the two clocks.  For example,
355b05dcf3cSPoul-Henning Kamp  * the randomization prevents an adversary from always giving up the cpu
356df8bae1dSRodney W. Grimes  * just before its quantum expires.  Otherwise, it would never accumulate
357df8bae1dSRodney W. Grimes  * cpu ticks.  The mean frequency of the second timer is stathz.
358b05dcf3cSPoul-Henning Kamp  *
359b05dcf3cSPoul-Henning Kamp  * If no second timer exists, stathz will be zero; in this case we drive
360b05dcf3cSPoul-Henning Kamp  * profiling and statistics off the main clock.  This WILL NOT be accurate;
361b05dcf3cSPoul-Henning Kamp  * do not do it unless absolutely necessary.
362b05dcf3cSPoul-Henning Kamp  *
363df8bae1dSRodney W. Grimes  * The statistics clock may (or may not) be run at a higher rate while
364b05dcf3cSPoul-Henning Kamp  * profiling.  This profile clock runs at profhz.  We require that profhz
365b05dcf3cSPoul-Henning Kamp  * be an integral multiple of stathz.
366b05dcf3cSPoul-Henning Kamp  *
367b05dcf3cSPoul-Henning Kamp  * If the statistics clock is running fast, it must be divided by the ratio
368b05dcf3cSPoul-Henning Kamp  * profhz/stathz for statistics.  (For profiling, every tick counts.)
369df8bae1dSRodney W. Grimes  *
3707ec73f64SPoul-Henning Kamp  * Time-of-day is maintained using a "timecounter", which may or may
3717ec73f64SPoul-Henning Kamp  * not be related to the hardware generating the above mentioned
3727ec73f64SPoul-Henning Kamp  * interrupts.
373df8bae1dSRodney W. Grimes  */
374df8bae1dSRodney W. Grimes 
375df8bae1dSRodney W. Grimes int	stathz;
376df8bae1dSRodney W. Grimes int	profhz;
377238dd320SJake Burkholder int	profprocs;
378df8bae1dSRodney W. Grimes int	ticks;
379238dd320SJake Burkholder int	psratio;
380df8bae1dSRodney W. Grimes 
3813e288e62SDimitry Andric static DPCPU_DEFINE(int, pcputicks);	/* Per-CPU version of ticks. */
3824763a8b8SAlexander Motin static int global_hardclock_run = 0;
383dbd55f3fSAlexander Motin 
384df8bae1dSRodney W. Grimes /*
385df8bae1dSRodney W. Grimes  * Initialize clock frequencies and start both clocks running.
386df8bae1dSRodney W. Grimes  */
3872b14f991SJulian Elischer /* ARGSUSED*/
3882b14f991SJulian Elischer static void
389d841aaa7SBruce Evans initclocks(dummy)
390d841aaa7SBruce Evans 	void *dummy;
391df8bae1dSRodney W. Grimes {
392df8bae1dSRodney W. Grimes 	register int i;
393df8bae1dSRodney W. Grimes 
394df8bae1dSRodney W. Grimes 	/*
395df8bae1dSRodney W. Grimes 	 * Set divisors to 1 (normal case) and let the machine-specific
396df8bae1dSRodney W. Grimes 	 * code do its bit.
397df8bae1dSRodney W. Grimes 	 */
398875b8844SAlexander Motin 	mtx_init(&time_lock, "time lock", NULL, MTX_DEF);
39963d69d25SRobert Watson 	cpu_initclocks();
400df8bae1dSRodney W. Grimes 
401df8bae1dSRodney W. Grimes 	/*
402df8bae1dSRodney W. Grimes 	 * Compute profhz/stathz, and fix profhz if needed.
403df8bae1dSRodney W. Grimes 	 */
404df8bae1dSRodney W. Grimes 	i = stathz ? stathz : hz;
405df8bae1dSRodney W. Grimes 	if (profhz == 0)
406df8bae1dSRodney W. Grimes 		profhz = i;
407df8bae1dSRodney W. Grimes 	psratio = profhz / i;
4084103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
4094103b765SPoul-Henning Kamp 	EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
4104103b765SPoul-Henning Kamp #endif
411df8bae1dSRodney W. Grimes }
412df8bae1dSRodney W. Grimes 
413df8bae1dSRodney W. Grimes /*
414238dd320SJake Burkholder  * Each time the real-time timer fires, this function is called on all CPUs.
415b439e431SJohn Baldwin  * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
416238dd320SJake Burkholder  * the other CPUs in the system need to call this function.
4176caa8a15SJohn Baldwin  */
4186caa8a15SJohn Baldwin void
419b439e431SJohn Baldwin hardclock_cpu(int usermode)
4206caa8a15SJohn Baldwin {
4216caa8a15SJohn Baldwin 	struct pstats *pstats;
422238dd320SJake Burkholder 	struct thread *td = curthread;
423b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
424b61ce5b0SJeff Roberson 	int flags;
4256caa8a15SJohn Baldwin 
4266caa8a15SJohn Baldwin 	/*
4276caa8a15SJohn Baldwin 	 * Run current process's virtual and profile time, as needed.
4286caa8a15SJohn Baldwin 	 */
429ad1e7d28SJulian Elischer 	pstats = p->p_stats;
430b61ce5b0SJeff Roberson 	flags = 0;
431ad1e7d28SJulian Elischer 	if (usermode &&
43240acdeabSJeff Roberson 	    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
43340acdeabSJeff Roberson 		PROC_SLOCK(p);
434b61ce5b0SJeff Roberson 		if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
435b61ce5b0SJeff Roberson 			flags |= TDF_ALRMPEND | TDF_ASTPENDING;
43640acdeabSJeff Roberson 		PROC_SUNLOCK(p);
43740acdeabSJeff Roberson 	}
43840acdeabSJeff Roberson 	if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
43940acdeabSJeff Roberson 		PROC_SLOCK(p);
440b61ce5b0SJeff Roberson 		if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
441b61ce5b0SJeff Roberson 			flags |= TDF_PROFPEND | TDF_ASTPENDING;
44240acdeabSJeff Roberson 		PROC_SUNLOCK(p);
44340acdeabSJeff Roberson 	}
44440acdeabSJeff Roberson 	thread_lock(td);
445a157e425SAlexander Motin 	sched_tick(1);
446b61ce5b0SJeff Roberson 	td->td_flags |= flags;
44740acdeabSJeff Roberson 	thread_unlock(td);
44836c0fd9dSJoseph Koshy 
44936c0fd9dSJoseph Koshy #ifdef	HWPMC_HOOKS
45036c0fd9dSJoseph Koshy 	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
45136c0fd9dSJoseph Koshy 		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
45236c0fd9dSJoseph Koshy #endif
4538d809d50SJeff Roberson 	callout_tick();
4546caa8a15SJohn Baldwin }
4556caa8a15SJohn Baldwin 
4566caa8a15SJohn Baldwin /*
457df8bae1dSRodney W. Grimes  * The real-time timer, interrupting hz times per second.
458df8bae1dSRodney W. Grimes  */
459df8bae1dSRodney W. Grimes void
460b439e431SJohn Baldwin hardclock(int usermode, uintfptr_t pc)
461df8bae1dSRodney W. Grimes {
462df8bae1dSRodney W. Grimes 
4638d809d50SJeff Roberson 	atomic_add_int((volatile int *)&ticks, 1);
464b439e431SJohn Baldwin 	hardclock_cpu(usermode);
4650e189873SAlexander Motin 	tc_ticktock(1);
466a157e425SAlexander Motin 	cpu_tick_calibration();
467df8bae1dSRodney W. Grimes 	/*
468df8bae1dSRodney W. Grimes 	 * If no separate statistics clock is available, run it from here.
4696caa8a15SJohn Baldwin 	 *
4706caa8a15SJohn Baldwin 	 * XXX: this only works for UP
471df8bae1dSRodney W. Grimes 	 */
472238dd320SJake Burkholder 	if (stathz == 0) {
473b439e431SJohn Baldwin 		profclock(usermode, pc);
474b439e431SJohn Baldwin 		statclock(usermode);
475238dd320SJake Burkholder 	}
476e4fc250cSLuigi Rizzo #ifdef DEVICE_POLLING
477daccb638SLuigi Rizzo 	hardclock_device_poll();	/* this is very short and quick */
478e4fc250cSLuigi Rizzo #endif /* DEVICE_POLLING */
4794103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
4804103b765SPoul-Henning Kamp 	if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
481370c3cb5SSean Kelly 		watchdog_fire();
4824103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
483ab36c067SJustin T. Gibbs }
484ab36c067SJustin T. Gibbs 
485a157e425SAlexander Motin void
486a157e425SAlexander Motin hardclock_anycpu(int cnt, int usermode)
487a157e425SAlexander Motin {
488a157e425SAlexander Motin 	struct pstats *pstats;
489a157e425SAlexander Motin 	struct thread *td = curthread;
490a157e425SAlexander Motin 	struct proc *p = td->td_proc;
491a157e425SAlexander Motin 	int *t = DPCPU_PTR(pcputicks);
4924763a8b8SAlexander Motin 	int flags, global, newticks;
4934763a8b8SAlexander Motin #ifdef SW_WATCHDOG
4944763a8b8SAlexander Motin 	int i;
4954763a8b8SAlexander Motin #endif /* SW_WATCHDOG */
496a157e425SAlexander Motin 
497a157e425SAlexander Motin 	/*
498a157e425SAlexander Motin 	 * Update per-CPU and possibly global ticks values.
499a157e425SAlexander Motin 	 */
500a157e425SAlexander Motin 	*t += cnt;
501a157e425SAlexander Motin 	do {
502a157e425SAlexander Motin 		global = ticks;
503a157e425SAlexander Motin 		newticks = *t - global;
504a157e425SAlexander Motin 		if (newticks <= 0) {
505a157e425SAlexander Motin 			if (newticks < -1)
506a157e425SAlexander Motin 				*t = global - 1;
507a157e425SAlexander Motin 			newticks = 0;
508a157e425SAlexander Motin 			break;
509a157e425SAlexander Motin 		}
510a157e425SAlexander Motin 	} while (!atomic_cmpset_int(&ticks, global, *t));
511a157e425SAlexander Motin 
512a157e425SAlexander Motin 	/*
513a157e425SAlexander Motin 	 * Run current process's virtual and profile time, as needed.
514a157e425SAlexander Motin 	 */
515a157e425SAlexander Motin 	pstats = p->p_stats;
516a157e425SAlexander Motin 	flags = 0;
517a157e425SAlexander Motin 	if (usermode &&
518a157e425SAlexander Motin 	    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
519a157e425SAlexander Motin 		PROC_SLOCK(p);
520a157e425SAlexander Motin 		if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
521a157e425SAlexander Motin 		    tick * cnt) == 0)
522a157e425SAlexander Motin 			flags |= TDF_ALRMPEND | TDF_ASTPENDING;
523a157e425SAlexander Motin 		PROC_SUNLOCK(p);
524a157e425SAlexander Motin 	}
525a157e425SAlexander Motin 	if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
526a157e425SAlexander Motin 		PROC_SLOCK(p);
527a157e425SAlexander Motin 		if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
528a157e425SAlexander Motin 		    tick * cnt) == 0)
529a157e425SAlexander Motin 			flags |= TDF_PROFPEND | TDF_ASTPENDING;
530a157e425SAlexander Motin 		PROC_SUNLOCK(p);
531a157e425SAlexander Motin 	}
532a157e425SAlexander Motin 	thread_lock(td);
533a157e425SAlexander Motin 	sched_tick(cnt);
534a157e425SAlexander Motin 	td->td_flags |= flags;
535a157e425SAlexander Motin 	thread_unlock(td);
536a157e425SAlexander Motin 
537a157e425SAlexander Motin #ifdef	HWPMC_HOOKS
538a157e425SAlexander Motin 	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
539a157e425SAlexander Motin 		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
540a157e425SAlexander Motin #endif
541a157e425SAlexander Motin 	callout_tick();
542a157e425SAlexander Motin 	/* We are in charge to handle this tick duty. */
543a157e425SAlexander Motin 	if (newticks > 0) {
5444763a8b8SAlexander Motin 		/* Dangerous and no need to call these things concurrently. */
5454763a8b8SAlexander Motin 		if (atomic_cmpset_acq_int(&global_hardclock_run, 0, 1)) {
5460e189873SAlexander Motin 			tc_ticktock(newticks);
547a157e425SAlexander Motin #ifdef DEVICE_POLLING
5484763a8b8SAlexander Motin 			/* This is very short and quick. */
5494763a8b8SAlexander Motin 			hardclock_device_poll();
550a157e425SAlexander Motin #endif /* DEVICE_POLLING */
5514763a8b8SAlexander Motin 			atomic_store_rel_int(&global_hardclock_run, 0);
5524763a8b8SAlexander Motin 		}
553a157e425SAlexander Motin #ifdef SW_WATCHDOG
554a157e425SAlexander Motin 		if (watchdog_enabled > 0) {
5554763a8b8SAlexander Motin 			i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
5564763a8b8SAlexander Motin 			if (i > 0 && i <= newticks)
557a157e425SAlexander Motin 				watchdog_fire();
558a157e425SAlexander Motin 		}
559a157e425SAlexander Motin #endif /* SW_WATCHDOG */
560a157e425SAlexander Motin 	}
561a157e425SAlexander Motin 	if (curcpu == CPU_FIRST())
562a157e425SAlexander Motin 		cpu_tick_calibration();
563a157e425SAlexander Motin }
564a157e425SAlexander Motin 
565a157e425SAlexander Motin void
566a157e425SAlexander Motin hardclock_sync(int cpu)
567a157e425SAlexander Motin {
568a157e425SAlexander Motin 	int	*t = DPCPU_ID_PTR(cpu, pcputicks);
569a157e425SAlexander Motin 
570a157e425SAlexander Motin 	*t = ticks;
571a157e425SAlexander Motin }
572a157e425SAlexander Motin 
573df8bae1dSRodney W. Grimes /*
574227ee8a1SPoul-Henning Kamp  * Compute number of ticks in the specified amount of time.
575df8bae1dSRodney W. Grimes  */
576df8bae1dSRodney W. Grimes int
577227ee8a1SPoul-Henning Kamp tvtohz(tv)
578df8bae1dSRodney W. Grimes 	struct timeval *tv;
579df8bae1dSRodney W. Grimes {
5806976af69SBruce Evans 	register unsigned long ticks;
5816976af69SBruce Evans 	register long sec, usec;
582df8bae1dSRodney W. Grimes 
583df8bae1dSRodney W. Grimes 	/*
5846976af69SBruce Evans 	 * If the number of usecs in the whole seconds part of the time
5856976af69SBruce Evans 	 * difference fits in a long, then the total number of usecs will
5866976af69SBruce Evans 	 * fit in an unsigned long.  Compute the total and convert it to
5876976af69SBruce Evans 	 * ticks, rounding up and adding 1 to allow for the current tick
5886976af69SBruce Evans 	 * to expire.  Rounding also depends on unsigned long arithmetic
5896976af69SBruce Evans 	 * to avoid overflow.
590df8bae1dSRodney W. Grimes 	 *
5916976af69SBruce Evans 	 * Otherwise, if the number of ticks in the whole seconds part of
5926976af69SBruce Evans 	 * the time difference fits in a long, then convert the parts to
5936976af69SBruce Evans 	 * ticks separately and add, using similar rounding methods and
5946976af69SBruce Evans 	 * overflow avoidance.  This method would work in the previous
5956976af69SBruce Evans 	 * case but it is slightly slower and assumes that hz is integral.
5966976af69SBruce Evans 	 *
5976976af69SBruce Evans 	 * Otherwise, round the time difference down to the maximum
5986976af69SBruce Evans 	 * representable value.
5996976af69SBruce Evans 	 *
6006976af69SBruce Evans 	 * If ints have 32 bits, then the maximum value for any timeout in
6016976af69SBruce Evans 	 * 10ms ticks is 248 days.
602df8bae1dSRodney W. Grimes 	 */
603227ee8a1SPoul-Henning Kamp 	sec = tv->tv_sec;
604227ee8a1SPoul-Henning Kamp 	usec = tv->tv_usec;
6056976af69SBruce Evans 	if (usec < 0) {
6066976af69SBruce Evans 		sec--;
6076976af69SBruce Evans 		usec += 1000000;
6086976af69SBruce Evans 	}
6096976af69SBruce Evans 	if (sec < 0) {
6106976af69SBruce Evans #ifdef DIAGNOSTIC
611b05dcf3cSPoul-Henning Kamp 		if (usec > 0) {
6127ec73f64SPoul-Henning Kamp 			sec++;
6137ec73f64SPoul-Henning Kamp 			usec -= 1000000;
6147ec73f64SPoul-Henning Kamp 		}
615227ee8a1SPoul-Henning Kamp 		printf("tvotohz: negative time difference %ld sec %ld usec\n",
6166976af69SBruce Evans 		       sec, usec);
6176976af69SBruce Evans #endif
6186976af69SBruce Evans 		ticks = 1;
6196976af69SBruce Evans 	} else if (sec <= LONG_MAX / 1000000)
6206976af69SBruce Evans 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
6216976af69SBruce Evans 			/ tick + 1;
6226976af69SBruce Evans 	else if (sec <= LONG_MAX / hz)
6236976af69SBruce Evans 		ticks = sec * hz
6246976af69SBruce Evans 			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
6256976af69SBruce Evans 	else
6266976af69SBruce Evans 		ticks = LONG_MAX;
6276976af69SBruce Evans 	if (ticks > INT_MAX)
6286976af69SBruce Evans 		ticks = INT_MAX;
629d6116663SAlexander Langer 	return ((int)ticks);
630df8bae1dSRodney W. Grimes }
631df8bae1dSRodney W. Grimes 
632df8bae1dSRodney W. Grimes /*
633df8bae1dSRodney W. Grimes  * Start profiling on a process.
634df8bae1dSRodney W. Grimes  *
635df8bae1dSRodney W. Grimes  * Kernel profiling passes proc0 which never exits and hence
636df8bae1dSRodney W. Grimes  * keeps the profile clock running constantly.
637df8bae1dSRodney W. Grimes  */
638df8bae1dSRodney W. Grimes void
639df8bae1dSRodney W. Grimes startprofclock(p)
640df8bae1dSRodney W. Grimes 	register struct proc *p;
641df8bae1dSRodney W. Grimes {
642df8bae1dSRodney W. Grimes 
6439752f794SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
6449752f794SJohn Baldwin 	if (p->p_flag & P_STOPPROF)
645a282253aSJulian Elischer 		return;
6469752f794SJohn Baldwin 	if ((p->p_flag & P_PROFIL) == 0) {
6479752f794SJohn Baldwin 		p->p_flag |= P_PROFIL;
648875b8844SAlexander Motin 		mtx_lock(&time_lock);
649238dd320SJake Burkholder 		if (++profprocs == 1)
650238dd320SJake Burkholder 			cpu_startprofclock();
651875b8844SAlexander Motin 		mtx_unlock(&time_lock);
652df8bae1dSRodney W. Grimes 	}
6539752f794SJohn Baldwin }
654df8bae1dSRodney W. Grimes 
655df8bae1dSRodney W. Grimes /*
656df8bae1dSRodney W. Grimes  * Stop profiling on a process.
657df8bae1dSRodney W. Grimes  */
658df8bae1dSRodney W. Grimes void
659df8bae1dSRodney W. Grimes stopprofclock(p)
660df8bae1dSRodney W. Grimes 	register struct proc *p;
661df8bae1dSRodney W. Grimes {
662df8bae1dSRodney W. Grimes 
663a282253aSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
6649752f794SJohn Baldwin 	if (p->p_flag & P_PROFIL) {
6659752f794SJohn Baldwin 		if (p->p_profthreads != 0) {
6669752f794SJohn Baldwin 			p->p_flag |= P_STOPPROF;
6679752f794SJohn Baldwin 			while (p->p_profthreads != 0)
668a282253aSJulian Elischer 				msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
669a89ec05eSPeter Wemm 				    "stopprof", 0);
6709752f794SJohn Baldwin 			p->p_flag &= ~P_STOPPROF;
671a282253aSJulian Elischer 		}
672b62b2304SColin Percival 		if ((p->p_flag & P_PROFIL) == 0)
673b62b2304SColin Percival 			return;
6749752f794SJohn Baldwin 		p->p_flag &= ~P_PROFIL;
675875b8844SAlexander Motin 		mtx_lock(&time_lock);
676238dd320SJake Burkholder 		if (--profprocs == 0)
677238dd320SJake Burkholder 			cpu_stopprofclock();
678875b8844SAlexander Motin 		mtx_unlock(&time_lock);
679df8bae1dSRodney W. Grimes 	}
6809752f794SJohn Baldwin }
681df8bae1dSRodney W. Grimes 
682df8bae1dSRodney W. Grimes /*
6831c4bcd05SJeff Roberson  * Statistics clock.  Updates rusage information and calls the scheduler
6841c4bcd05SJeff Roberson  * to adjust priorities of the active thread.
6851c4bcd05SJeff Roberson  *
686238dd320SJake Burkholder  * This should be called by all active processors.
687df8bae1dSRodney W. Grimes  */
688df8bae1dSRodney W. Grimes void
689b439e431SJohn Baldwin statclock(int usermode)
690df8bae1dSRodney W. Grimes {
6918a129caeSDavid Greenman 	struct rusage *ru;
6928a129caeSDavid Greenman 	struct vmspace *vm;
693238dd320SJake Burkholder 	struct thread *td;
694238dd320SJake Burkholder 	struct proc *p;
695238dd320SJake Burkholder 	long rss;
6967628402bSPeter Wemm 	long *cp_time;
6978a129caeSDavid Greenman 
698238dd320SJake Burkholder 	td = curthread;
699238dd320SJake Burkholder 	p = td->td_proc;
700238dd320SJake Burkholder 
7017628402bSPeter Wemm 	cp_time = (long *)PCPU_PTR(cp_time);
702b439e431SJohn Baldwin 	if (usermode) {
703df8bae1dSRodney W. Grimes 		/*
70471a62f8aSBruce Evans 		 * Charge the time as appropriate.
705df8bae1dSRodney W. Grimes 		 */
706e8444a7eSPoul-Henning Kamp 		td->td_uticks++;
707fa885116SJulian Elischer 		if (p->p_nice > NZERO)
7087628402bSPeter Wemm 			cp_time[CP_NICE]++;
709df8bae1dSRodney W. Grimes 		else
7107628402bSPeter Wemm 			cp_time[CP_USER]++;
711df8bae1dSRodney W. Grimes 	} else {
712df8bae1dSRodney W. Grimes 		/*
713df8bae1dSRodney W. Grimes 		 * Came from kernel mode, so we were:
714df8bae1dSRodney W. Grimes 		 * - handling an interrupt,
715df8bae1dSRodney W. Grimes 		 * - doing syscall or trap work on behalf of the current
716df8bae1dSRodney W. Grimes 		 *   user process, or
717df8bae1dSRodney W. Grimes 		 * - spinning in the idle loop.
718df8bae1dSRodney W. Grimes 		 * Whichever it is, charge the time as appropriate.
719df8bae1dSRodney W. Grimes 		 * Note that we charge interrupts to the current process,
720df8bae1dSRodney W. Grimes 		 * regardless of whether they are ``for'' that process,
721df8bae1dSRodney W. Grimes 		 * so that we know how much of its real time was spent
722df8bae1dSRodney W. Grimes 		 * in ``non-process'' (i.e., interrupt) work.
723df8bae1dSRodney W. Grimes 		 */
724e0f66ef8SJohn Baldwin 		if ((td->td_pflags & TDP_ITHREAD) ||
725e0f66ef8SJohn Baldwin 		    td->td_intr_nesting_level >= 2) {
726e8444a7eSPoul-Henning Kamp 			td->td_iticks++;
7277628402bSPeter Wemm 			cp_time[CP_INTR]++;
7280384fff8SJason Evans 		} else {
729eb2da9a5SPoul-Henning Kamp 			td->td_pticks++;
730e8444a7eSPoul-Henning Kamp 			td->td_sticks++;
731486a9414SJulian Elischer 			if (!TD_IS_IDLETHREAD(td))
7327628402bSPeter Wemm 				cp_time[CP_SYS]++;
7330384fff8SJason Evans 			else
7347628402bSPeter Wemm 				cp_time[CP_IDLE]++;
735df8bae1dSRodney W. Grimes 		}
7360384fff8SJason Evans 	}
737f5e9e8ecSBruce Evans 
738f5e9e8ecSBruce Evans 	/* Update resource usage integrals and maximums. */
73916f9f205SJohn Baldwin 	MPASS(p->p_vmspace != NULL);
74016f9f205SJohn Baldwin 	vm = p->p_vmspace;
7411c4bcd05SJeff Roberson 	ru = &td->td_ru;
7421c6d46f9SLuoqi Chen 	ru->ru_ixrss += pgtok(vm->vm_tsize);
7431c6d46f9SLuoqi Chen 	ru->ru_idrss += pgtok(vm->vm_dsize);
7441c6d46f9SLuoqi Chen 	ru->ru_isrss += pgtok(vm->vm_ssize);
7451c6d46f9SLuoqi Chen 	rss = pgtok(vmspace_resident_count(vm));
746f5e9e8ecSBruce Evans 	if (ru->ru_maxrss < rss)
747f5e9e8ecSBruce Evans 		ru->ru_maxrss = rss;
7488f51ad55SJeff Roberson 	KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
7498f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
7507628402bSPeter Wemm 	thread_lock_flags(td, MTX_QUIET);
75140acdeabSJeff Roberson 	sched_clock(td);
75240acdeabSJeff Roberson 	thread_unlock(td);
7536caa8a15SJohn Baldwin }
7546c567274SJohn Baldwin 
7556caa8a15SJohn Baldwin void
756b439e431SJohn Baldwin profclock(int usermode, uintfptr_t pc)
7576caa8a15SJohn Baldwin {
758238dd320SJake Burkholder 	struct thread *td;
759238dd320SJake Burkholder #ifdef GPROF
760238dd320SJake Burkholder 	struct gmonparam *g;
7615c8b4441SJohn Baldwin 	uintfptr_t i;
762238dd320SJake Burkholder #endif
7636caa8a15SJohn Baldwin 
7644a338afdSJulian Elischer 	td = curthread;
765b439e431SJohn Baldwin 	if (usermode) {
766238dd320SJake Burkholder 		/*
767238dd320SJake Burkholder 		 * Came from user mode; CPU was in user state.
768238dd320SJake Burkholder 		 * If this process is being profiled, record the tick.
769a282253aSJulian Elischer 		 * if there is no related user location yet, don't
770a282253aSJulian Elischer 		 * bother trying to count it.
771238dd320SJake Burkholder 		 */
7729752f794SJohn Baldwin 		if (td->td_proc->p_flag & P_PROFIL)
773b439e431SJohn Baldwin 			addupc_intr(td, pc, 1);
774238dd320SJake Burkholder 	}
775238dd320SJake Burkholder #ifdef GPROF
776238dd320SJake Burkholder 	else {
777238dd320SJake Burkholder 		/*
778238dd320SJake Burkholder 		 * Kernel statistics are just like addupc_intr, only easier.
779238dd320SJake Burkholder 		 */
780238dd320SJake Burkholder 		g = &_gmonparam;
781b439e431SJohn Baldwin 		if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
782b439e431SJohn Baldwin 			i = PC_TO_I(g, pc);
783238dd320SJake Burkholder 			if (i < g->textsize) {
784b439e431SJohn Baldwin 				KCOUNT(g, i)++;
785238dd320SJake Burkholder 			}
786238dd320SJake Burkholder 		}
787238dd320SJake Burkholder 	}
788238dd320SJake Burkholder #endif
789df8bae1dSRodney W. Grimes }
790df8bae1dSRodney W. Grimes 
791df8bae1dSRodney W. Grimes /*
792df8bae1dSRodney W. Grimes  * Return information about system clocks.
793df8bae1dSRodney W. Grimes  */
794787d58f2SPoul-Henning Kamp static int
79582d9ae4eSPoul-Henning Kamp sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
796df8bae1dSRodney W. Grimes {
797df8bae1dSRodney W. Grimes 	struct clockinfo clkinfo;
798df8bae1dSRodney W. Grimes 	/*
799df8bae1dSRodney W. Grimes 	 * Construct clockinfo structure.
800df8bae1dSRodney W. Grimes 	 */
801a9a0f15aSBruce Evans 	bzero(&clkinfo, sizeof(clkinfo));
802df8bae1dSRodney W. Grimes 	clkinfo.hz = hz;
803df8bae1dSRodney W. Grimes 	clkinfo.tick = tick;
804df8bae1dSRodney W. Grimes 	clkinfo.profhz = profhz;
805df8bae1dSRodney W. Grimes 	clkinfo.stathz = stathz ? stathz : hz;
806ae0eb976SPoul-Henning Kamp 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
807df8bae1dSRodney W. Grimes }
8083f31c649SGarrett Wollman 
809c383c221SEd Schouten SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
810c383c221SEd Schouten 	CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
811af1408e3SLuigi Rizzo 	0, 0, sysctl_kern_clockrate, "S,clockinfo",
812af1408e3SLuigi Rizzo 	"Rate and period of various kernel clocks");
813370c3cb5SSean Kelly 
8144103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
8154103b765SPoul-Henning Kamp 
8164103b765SPoul-Henning Kamp static void
8179079fff5SNick Hibma watchdog_config(void *unused __unused, u_int cmd, int *error)
818370c3cb5SSean Kelly {
8194103b765SPoul-Henning Kamp 	u_int u;
820370c3cb5SSean Kelly 
8214103b765SPoul-Henning Kamp 	u = cmd & WD_INTERVAL;
8229079fff5SNick Hibma 	if (u >= WD_TO_1SEC) {
8234103b765SPoul-Henning Kamp 		watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
8244103b765SPoul-Henning Kamp 		watchdog_enabled = 1;
8259079fff5SNick Hibma 		*error = 0;
8264103b765SPoul-Henning Kamp 	} else {
8274103b765SPoul-Henning Kamp 		watchdog_enabled = 0;
828370c3cb5SSean Kelly 	}
8294103b765SPoul-Henning Kamp }
830370c3cb5SSean Kelly 
831370c3cb5SSean Kelly /*
832370c3cb5SSean Kelly  * Handle a watchdog timeout by dumping interrupt information and
833911d16b8SEd Maste  * then either dropping to DDB or panicking.
834370c3cb5SSean Kelly  */
835370c3cb5SSean Kelly static void
836370c3cb5SSean Kelly watchdog_fire(void)
837370c3cb5SSean Kelly {
838370c3cb5SSean Kelly 	int nintr;
83960ae52f7SEd Schouten 	uint64_t inttotal;
840370c3cb5SSean Kelly 	u_long *curintr;
841370c3cb5SSean Kelly 	char *curname;
842370c3cb5SSean Kelly 
843370c3cb5SSean Kelly 	curintr = intrcnt;
844370c3cb5SSean Kelly 	curname = intrnames;
845370c3cb5SSean Kelly 	inttotal = 0;
846556a5850SAlexander Motin 	nintr = sintrcnt / sizeof(u_long);
847370c3cb5SSean Kelly 
848370c3cb5SSean Kelly 	printf("interrupt                   total\n");
849370c3cb5SSean Kelly 	while (--nintr >= 0) {
850370c3cb5SSean Kelly 		if (*curintr)
851370c3cb5SSean Kelly 			printf("%-12s %20lu\n", curname, *curintr);
852370c3cb5SSean Kelly 		curname += strlen(curname) + 1;
853370c3cb5SSean Kelly 		inttotal += *curintr++;
854370c3cb5SSean Kelly 	}
8556cda4155SSean Kelly 	printf("Total        %20ju\n", (uintmax_t)inttotal);
856911d16b8SEd Maste 
857911d16b8SEd Maste #if defined(KDB) && !defined(KDB_UNATTENDED)
858911d16b8SEd Maste 	kdb_backtrace();
8593de213ccSRobert Watson 	kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
860911d16b8SEd Maste #else
861370c3cb5SSean Kelly 	panic("watchdog timeout");
862911d16b8SEd Maste #endif
863370c3cb5SSean Kelly }
864370c3cb5SSean Kelly 
8654103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
866