xref: /freebsd/sys/kern/kern_clock.c (revision b3e9e682cf2d37fd8310fa054e56c959fe8b664e)
1df8bae1dSRodney W. Grimes /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1982, 1986, 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  * (c) UNIX System Laboratories, Inc.
5df8bae1dSRodney W. Grimes  * All or some portions of this file are derived from material licensed
6df8bae1dSRodney W. Grimes  * to the University of California by American Telephone and Telegraph
7df8bae1dSRodney W. Grimes  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8df8bae1dSRodney W. Grimes  * the permission of UNIX System Laboratories, Inc.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
34df8bae1dSRodney W. Grimes  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35df8bae1dSRodney W. Grimes  */
36df8bae1dSRodney W. Grimes 
37677b542eSDavid E. O'Brien #include <sys/cdefs.h>
38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
39677b542eSDavid E. O'Brien 
40911d16b8SEd Maste #include "opt_kdb.h"
41f0796cd2SGleb Smirnoff #include "opt_device_polling.h"
424da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
43*b3e9e682SRyan Stone #include "opt_kdtrace.h"
4432c20357SPoul-Henning Kamp #include "opt_ntp.h"
45370c3cb5SSean Kelly #include "opt_watchdog.h"
4632c20357SPoul-Henning Kamp 
47df8bae1dSRodney W. Grimes #include <sys/param.h>
48df8bae1dSRodney W. Grimes #include <sys/systm.h>
49df8bae1dSRodney W. Grimes #include <sys/callout.h>
502d50560aSMarcel Moolenaar #include <sys/kdb.h>
51df8bae1dSRodney W. Grimes #include <sys/kernel.h>
52f7829d0dSAttilio Rao #include <sys/kthread.h>
5361d80e90SJohn Baldwin #include <sys/ktr.h>
54f7829d0dSAttilio Rao #include <sys/lock.h>
5535e0e5b3SJohn Baldwin #include <sys/mutex.h>
56df8bae1dSRodney W. Grimes #include <sys/proc.h>
57e4625663SJeff Roberson #include <sys/resource.h>
58df8bae1dSRodney W. Grimes #include <sys/resourcevar.h>
59b43179fbSJeff Roberson #include <sys/sched.h>
60*b3e9e682SRyan Stone #include <sys/sdt.h>
61797f2d22SPoul-Henning Kamp #include <sys/signalvar.h>
62f7829d0dSAttilio Rao #include <sys/sleepqueue.h>
636caa8a15SJohn Baldwin #include <sys/smp.h>
648a129caeSDavid Greenman #include <vm/vm.h>
65efeaf95aSDavid Greenman #include <vm/pmap.h>
66efeaf95aSDavid Greenman #include <vm/vm_map.h>
67797f2d22SPoul-Henning Kamp #include <sys/sysctl.h>
688088699fSJohn Baldwin #include <sys/bus.h>
698088699fSJohn Baldwin #include <sys/interrupt.h>
70104a9b7eSAlexander Kabaev #include <sys/limits.h>
71e7fa55afSPoul-Henning Kamp #include <sys/timetc.h>
72df8bae1dSRodney W. Grimes 
73df8bae1dSRodney W. Grimes #ifdef GPROF
74df8bae1dSRodney W. Grimes #include <sys/gmon.h>
75df8bae1dSRodney W. Grimes #endif
76df8bae1dSRodney W. Grimes 
7736c0fd9dSJoseph Koshy #ifdef HWPMC_HOOKS
7836c0fd9dSJoseph Koshy #include <sys/pmckern.h>
79f5f9340bSFabien Thomas PMC_SOFT_DEFINE( , , clock, hard);
80f5f9340bSFabien Thomas PMC_SOFT_DEFINE( , , clock, stat);
8136c0fd9dSJoseph Koshy #endif
8236c0fd9dSJoseph Koshy 
83e4fc250cSLuigi Rizzo #ifdef DEVICE_POLLING
84e4fc250cSLuigi Rizzo extern void hardclock_device_poll(void);
85e4fc250cSLuigi Rizzo #endif /* DEVICE_POLLING */
86eae8fc2cSSteve Passe 
874d77a549SAlfred Perlstein static void initclocks(void *dummy);
88237fdd78SRobert Watson SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
892b14f991SJulian Elischer 
908b98fec9SJeff Roberson /* Spin-lock protecting profiling statistics. */
9186a49deaSAttilio Rao static struct mtx time_lock;
928b98fec9SJeff Roberson 
93*b3e9e682SRyan Stone SDT_PROVIDER_DECLARE(sched);
94*b3e9e682SRyan Stone SDT_PROBE_DEFINE2(sched, , , tick, tick, "struct thread *", "struct proc *");
95*b3e9e682SRyan Stone 
9662919d78SPeter Wemm static int
9762919d78SPeter Wemm sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
9862919d78SPeter Wemm {
9962919d78SPeter Wemm 	int error;
1007628402bSPeter Wemm 	long cp_time[CPUSTATES];
101cff2e749SPaul Saab #ifdef SCTL_MASK32
10262919d78SPeter Wemm 	int i;
10362919d78SPeter Wemm 	unsigned int cp_time32[CPUSTATES];
1047628402bSPeter Wemm #endif
10562919d78SPeter Wemm 
1067628402bSPeter Wemm 	read_cpu_time(cp_time);
1077628402bSPeter Wemm #ifdef SCTL_MASK32
108cff2e749SPaul Saab 	if (req->flags & SCTL_MASK32) {
10962919d78SPeter Wemm 		if (!req->oldptr)
11062919d78SPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time32));
11162919d78SPeter Wemm 		for (i = 0; i < CPUSTATES; i++)
11262919d78SPeter Wemm 			cp_time32[i] = (unsigned int)cp_time[i];
11362919d78SPeter Wemm 		error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
11462919d78SPeter Wemm 	} else
11562919d78SPeter Wemm #endif
11662919d78SPeter Wemm 	{
11762919d78SPeter Wemm 		if (!req->oldptr)
11862919d78SPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time));
11962919d78SPeter Wemm 		error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
12062919d78SPeter Wemm 	}
12162919d78SPeter Wemm 	return error;
12262919d78SPeter Wemm }
12362919d78SPeter Wemm 
124c383c221SEd Schouten SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
12562919d78SPeter Wemm     0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
1267f112b04SRobert Watson 
1277628402bSPeter Wemm static long empty[CPUSTATES];
1287628402bSPeter Wemm 
1297628402bSPeter Wemm static int
1307628402bSPeter Wemm sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
1317628402bSPeter Wemm {
1327628402bSPeter Wemm 	struct pcpu *pcpu;
1337628402bSPeter Wemm 	int error;
134ef54068bSRobert Watson 	int c;
1357628402bSPeter Wemm 	long *cp_time;
1367628402bSPeter Wemm #ifdef SCTL_MASK32
1377628402bSPeter Wemm 	unsigned int cp_time32[CPUSTATES];
138ef54068bSRobert Watson 	int i;
1397628402bSPeter Wemm #endif
1407628402bSPeter Wemm 
1417628402bSPeter Wemm 	if (!req->oldptr) {
1427628402bSPeter Wemm #ifdef SCTL_MASK32
1437628402bSPeter Wemm 		if (req->flags & SCTL_MASK32)
1447628402bSPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
1457628402bSPeter Wemm 		else
1467628402bSPeter Wemm #endif
1477628402bSPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
1487628402bSPeter Wemm 	}
1497628402bSPeter Wemm 	for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
1507628402bSPeter Wemm 		if (!CPU_ABSENT(c)) {
1517628402bSPeter Wemm 			pcpu = pcpu_find(c);
1527628402bSPeter Wemm 			cp_time = pcpu->pc_cp_time;
1537628402bSPeter Wemm 		} else {
1547628402bSPeter Wemm 			cp_time = empty;
1557628402bSPeter Wemm 		}
1567628402bSPeter Wemm #ifdef SCTL_MASK32
1577628402bSPeter Wemm 		if (req->flags & SCTL_MASK32) {
1587628402bSPeter Wemm 			for (i = 0; i < CPUSTATES; i++)
1597628402bSPeter Wemm 				cp_time32[i] = (unsigned int)cp_time[i];
1607628402bSPeter Wemm 			error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
1617628402bSPeter Wemm 		} else
1627628402bSPeter Wemm #endif
1637628402bSPeter Wemm 			error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
1647628402bSPeter Wemm 	}
1657628402bSPeter Wemm 	return error;
1667628402bSPeter Wemm }
1677628402bSPeter Wemm 
168c383c221SEd Schouten SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
1697628402bSPeter Wemm     0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
1707628402bSPeter Wemm 
171f7829d0dSAttilio Rao #ifdef DEADLKRES
17236e51f65SAttilio Rao static const char *blessed[] = {
17395335fd8SAttilio Rao 	"getblk",
17436e51f65SAttilio Rao 	"so_snd_sx",
17536e51f65SAttilio Rao 	"so_rcv_sx",
17636e51f65SAttilio Rao 	NULL
17736e51f65SAttilio Rao };
178f7829d0dSAttilio Rao static int slptime_threshold = 1800;
179f7829d0dSAttilio Rao static int blktime_threshold = 900;
180f7829d0dSAttilio Rao static int sleepfreq = 3;
181f7829d0dSAttilio Rao 
182f7829d0dSAttilio Rao static void
183f7829d0dSAttilio Rao deadlkres(void)
184f7829d0dSAttilio Rao {
185f7829d0dSAttilio Rao 	struct proc *p;
186f7829d0dSAttilio Rao 	struct thread *td;
187f7829d0dSAttilio Rao 	void *wchan;
18836e51f65SAttilio Rao 	int blkticks, i, slpticks, slptype, tryl, tticks;
189f7829d0dSAttilio Rao 
190f7829d0dSAttilio Rao 	tryl = 0;
191f7829d0dSAttilio Rao 	for (;;) {
192f7829d0dSAttilio Rao 		blkticks = blktime_threshold * hz;
193f7829d0dSAttilio Rao 		slpticks = slptime_threshold * hz;
194f7829d0dSAttilio Rao 
195f7829d0dSAttilio Rao 		/*
196f7829d0dSAttilio Rao 		 * Avoid to sleep on the sx_lock in order to avoid a possible
197f7829d0dSAttilio Rao 		 * priority inversion problem leading to starvation.
198f7829d0dSAttilio Rao 		 * If the lock can't be held after 100 tries, panic.
199f7829d0dSAttilio Rao 		 */
200f7829d0dSAttilio Rao 		if (!sx_try_slock(&allproc_lock)) {
201f7829d0dSAttilio Rao 			if (tryl > 100)
202f7829d0dSAttilio Rao 		panic("%s: possible deadlock detected on allproc_lock\n",
203f7829d0dSAttilio Rao 				    __func__);
204f7829d0dSAttilio Rao 			tryl++;
205b5850804SJohn Baldwin 			pause("allproc", sleepfreq * hz);
206f7829d0dSAttilio Rao 			continue;
207f7829d0dSAttilio Rao 		}
208f7829d0dSAttilio Rao 		tryl = 0;
209f7829d0dSAttilio Rao 		FOREACH_PROC_IN_SYSTEM(p) {
210f7829d0dSAttilio Rao 			PROC_LOCK(p);
211e806d352SJohn Baldwin 			if (p->p_state == PRS_NEW) {
212e806d352SJohn Baldwin 				PROC_UNLOCK(p);
213e806d352SJohn Baldwin 				continue;
214e806d352SJohn Baldwin 			}
215f7829d0dSAttilio Rao 			FOREACH_THREAD_IN_PROC(p, td) {
216631cb86fSAttilio Rao 
217631cb86fSAttilio Rao 				/*
218631cb86fSAttilio Rao 				 * Once a thread is found in "interesting"
219631cb86fSAttilio Rao 				 * state a possible ticks wrap-up needs to be
220631cb86fSAttilio Rao 				 * checked.
221631cb86fSAttilio Rao 				 */
222f7829d0dSAttilio Rao 				thread_lock(td);
223631cb86fSAttilio Rao 				if (TD_ON_LOCK(td) && ticks < td->td_blktick) {
224f7829d0dSAttilio Rao 
225f7829d0dSAttilio Rao 					/*
226f7829d0dSAttilio Rao 					 * The thread should be blocked on a
227f7829d0dSAttilio Rao 					 * turnstile, simply check if the
228f7829d0dSAttilio Rao 					 * turnstile channel is in good state.
229f7829d0dSAttilio Rao 					 */
230f7829d0dSAttilio Rao 					MPASS(td->td_blocked != NULL);
23136e51f65SAttilio Rao 
232f7829d0dSAttilio Rao 					tticks = ticks - td->td_blktick;
233f7829d0dSAttilio Rao 					thread_unlock(td);
234f7829d0dSAttilio Rao 					if (tticks > blkticks) {
235f7829d0dSAttilio Rao 
236f7829d0dSAttilio Rao 						/*
237f7829d0dSAttilio Rao 						 * Accordingly with provided
238f7829d0dSAttilio Rao 						 * thresholds, this thread is
239f7829d0dSAttilio Rao 						 * stuck for too long on a
240f7829d0dSAttilio Rao 						 * turnstile.
241f7829d0dSAttilio Rao 						 */
242f7829d0dSAttilio Rao 						PROC_UNLOCK(p);
243f7829d0dSAttilio Rao 						sx_sunlock(&allproc_lock);
244f7829d0dSAttilio Rao 	panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
245f7829d0dSAttilio Rao 						    __func__, td, tticks);
246f7829d0dSAttilio Rao 					}
247631cb86fSAttilio Rao 				} else if (TD_IS_SLEEPING(td) &&
248631cb86fSAttilio Rao 				    TD_ON_SLEEPQ(td) &&
249631cb86fSAttilio Rao 				    ticks < td->td_blktick) {
25036e51f65SAttilio Rao 
251f7829d0dSAttilio Rao 					/*
252f7829d0dSAttilio Rao 					 * Check if the thread is sleeping on a
253f7829d0dSAttilio Rao 					 * lock, otherwise skip the check.
254f7829d0dSAttilio Rao 					 * Drop the thread lock in order to
255f7829d0dSAttilio Rao 					 * avoid a LOR with the sleepqueue
256f7829d0dSAttilio Rao 					 * spinlock.
257f7829d0dSAttilio Rao 					 */
258f7829d0dSAttilio Rao 					wchan = td->td_wchan;
259f7829d0dSAttilio Rao 					tticks = ticks - td->td_slptick;
260f7829d0dSAttilio Rao 					thread_unlock(td);
261f7829d0dSAttilio Rao 					slptype = sleepq_type(wchan);
262f7829d0dSAttilio Rao 					if ((slptype == SLEEPQ_SX ||
263f7829d0dSAttilio Rao 					    slptype == SLEEPQ_LK) &&
264f7829d0dSAttilio Rao 					    tticks > slpticks) {
265f7829d0dSAttilio Rao 
266f7829d0dSAttilio Rao 						/*
267f7829d0dSAttilio Rao 						 * Accordingly with provided
268f7829d0dSAttilio Rao 						 * thresholds, this thread is
269f7829d0dSAttilio Rao 						 * stuck for too long on a
270f7829d0dSAttilio Rao 						 * sleepqueue.
27136e51f65SAttilio Rao 						 * However, being on a
27236e51f65SAttilio Rao 						 * sleepqueue, we might still
27336e51f65SAttilio Rao 						 * check for the blessed
27436e51f65SAttilio Rao 						 * list.
275f7829d0dSAttilio Rao 						 */
27636e51f65SAttilio Rao 						tryl = 0;
27736e51f65SAttilio Rao 						for (i = 0; blessed[i] != NULL;
27836e51f65SAttilio Rao 						    i++) {
27936e51f65SAttilio Rao 							if (!strcmp(blessed[i],
28036e51f65SAttilio Rao 							    td->td_wmesg)) {
28136e51f65SAttilio Rao 								tryl = 1;
28236e51f65SAttilio Rao 								break;
28336e51f65SAttilio Rao 							}
28436e51f65SAttilio Rao 						}
28536e51f65SAttilio Rao 						if (tryl != 0) {
28636e51f65SAttilio Rao 							tryl = 0;
28736e51f65SAttilio Rao 							continue;
28836e51f65SAttilio Rao 						}
289f7829d0dSAttilio Rao 						PROC_UNLOCK(p);
290f7829d0dSAttilio Rao 						sx_sunlock(&allproc_lock);
291f7829d0dSAttilio Rao 	panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
292f7829d0dSAttilio Rao 						    __func__, td, tticks);
293f7829d0dSAttilio Rao 					}
294f7829d0dSAttilio Rao 				} else
295f7829d0dSAttilio Rao 					thread_unlock(td);
296f7829d0dSAttilio Rao 			}
297f7829d0dSAttilio Rao 			PROC_UNLOCK(p);
298f7829d0dSAttilio Rao 		}
299f7829d0dSAttilio Rao 		sx_sunlock(&allproc_lock);
300f7829d0dSAttilio Rao 
301f7829d0dSAttilio Rao 		/* Sleep for sleepfreq seconds. */
302b5850804SJohn Baldwin 		pause("-", sleepfreq * hz);
303f7829d0dSAttilio Rao 	}
304f7829d0dSAttilio Rao }
305f7829d0dSAttilio Rao 
306f7829d0dSAttilio Rao static struct kthread_desc deadlkres_kd = {
307f7829d0dSAttilio Rao 	"deadlkres",
308f7829d0dSAttilio Rao 	deadlkres,
309f7829d0dSAttilio Rao 	(struct thread **)NULL
310f7829d0dSAttilio Rao };
311f7829d0dSAttilio Rao 
312f7829d0dSAttilio Rao SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
313f7829d0dSAttilio Rao 
3146472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW, 0,
3156472ac3dSEd Schouten     "Deadlock resolver");
316f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
317f7829d0dSAttilio Rao     &slptime_threshold, 0,
318f7829d0dSAttilio Rao     "Number of seconds within is valid to sleep on a sleepqueue");
319f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
320f7829d0dSAttilio Rao     &blktime_threshold, 0,
321f7829d0dSAttilio Rao     "Number of seconds within is valid to block on a turnstile");
322f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
323f7829d0dSAttilio Rao     "Number of seconds between any deadlock resolver thread run");
324f7829d0dSAttilio Rao #endif	/* DEADLKRES */
325f7829d0dSAttilio Rao 
3267628402bSPeter Wemm void
3277628402bSPeter Wemm read_cpu_time(long *cp_time)
3287628402bSPeter Wemm {
3297628402bSPeter Wemm 	struct pcpu *pc;
3307628402bSPeter Wemm 	int i, j;
3317628402bSPeter Wemm 
3327628402bSPeter Wemm 	/* Sum up global cp_time[]. */
3337628402bSPeter Wemm 	bzero(cp_time, sizeof(long) * CPUSTATES);
3343aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
3357628402bSPeter Wemm 		pc = pcpu_find(i);
3367628402bSPeter Wemm 		for (j = 0; j < CPUSTATES; j++)
3377628402bSPeter Wemm 			cp_time[j] += pc->pc_cp_time[j];
3387628402bSPeter Wemm 	}
3397628402bSPeter Wemm }
3407628402bSPeter Wemm 
3414103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
3424103b765SPoul-Henning Kamp #include <sys/watchdog.h>
343370c3cb5SSean Kelly 
3444103b765SPoul-Henning Kamp static int watchdog_ticks;
345370c3cb5SSean Kelly static int watchdog_enabled;
3464103b765SPoul-Henning Kamp static void watchdog_fire(void);
3474103b765SPoul-Henning Kamp static void watchdog_config(void *, u_int, int *);
3484103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
349370c3cb5SSean Kelly 
3503bac064fSPoul-Henning Kamp /*
351df8bae1dSRodney W. Grimes  * Clock handling routines.
352df8bae1dSRodney W. Grimes  *
353b05dcf3cSPoul-Henning Kamp  * This code is written to operate with two timers that run independently of
354b05dcf3cSPoul-Henning Kamp  * each other.
3557ec73f64SPoul-Henning Kamp  *
356b05dcf3cSPoul-Henning Kamp  * The main timer, running hz times per second, is used to trigger interval
357b05dcf3cSPoul-Henning Kamp  * timers, timeouts and rescheduling as needed.
3587ec73f64SPoul-Henning Kamp  *
359b05dcf3cSPoul-Henning Kamp  * The second timer handles kernel and user profiling,
360b05dcf3cSPoul-Henning Kamp  * and does resource use estimation.  If the second timer is programmable,
361b05dcf3cSPoul-Henning Kamp  * it is randomized to avoid aliasing between the two clocks.  For example,
362b05dcf3cSPoul-Henning Kamp  * the randomization prevents an adversary from always giving up the cpu
363df8bae1dSRodney W. Grimes  * just before its quantum expires.  Otherwise, it would never accumulate
364df8bae1dSRodney W. Grimes  * cpu ticks.  The mean frequency of the second timer is stathz.
365b05dcf3cSPoul-Henning Kamp  *
366b05dcf3cSPoul-Henning Kamp  * If no second timer exists, stathz will be zero; in this case we drive
367b05dcf3cSPoul-Henning Kamp  * profiling and statistics off the main clock.  This WILL NOT be accurate;
368b05dcf3cSPoul-Henning Kamp  * do not do it unless absolutely necessary.
369b05dcf3cSPoul-Henning Kamp  *
370df8bae1dSRodney W. Grimes  * The statistics clock may (or may not) be run at a higher rate while
371b05dcf3cSPoul-Henning Kamp  * profiling.  This profile clock runs at profhz.  We require that profhz
372b05dcf3cSPoul-Henning Kamp  * be an integral multiple of stathz.
373b05dcf3cSPoul-Henning Kamp  *
374b05dcf3cSPoul-Henning Kamp  * If the statistics clock is running fast, it must be divided by the ratio
375b05dcf3cSPoul-Henning Kamp  * profhz/stathz for statistics.  (For profiling, every tick counts.)
376df8bae1dSRodney W. Grimes  *
3777ec73f64SPoul-Henning Kamp  * Time-of-day is maintained using a "timecounter", which may or may
3787ec73f64SPoul-Henning Kamp  * not be related to the hardware generating the above mentioned
3797ec73f64SPoul-Henning Kamp  * interrupts.
380df8bae1dSRodney W. Grimes  */
381df8bae1dSRodney W. Grimes 
382df8bae1dSRodney W. Grimes int	stathz;
383df8bae1dSRodney W. Grimes int	profhz;
384238dd320SJake Burkholder int	profprocs;
385df8bae1dSRodney W. Grimes int	ticks;
386238dd320SJake Burkholder int	psratio;
387df8bae1dSRodney W. Grimes 
3883e288e62SDimitry Andric static DPCPU_DEFINE(int, pcputicks);	/* Per-CPU version of ticks. */
3894763a8b8SAlexander Motin static int global_hardclock_run = 0;
390dbd55f3fSAlexander Motin 
391df8bae1dSRodney W. Grimes /*
392df8bae1dSRodney W. Grimes  * Initialize clock frequencies and start both clocks running.
393df8bae1dSRodney W. Grimes  */
3942b14f991SJulian Elischer /* ARGSUSED*/
3952b14f991SJulian Elischer static void
396d841aaa7SBruce Evans initclocks(dummy)
397d841aaa7SBruce Evans 	void *dummy;
398df8bae1dSRodney W. Grimes {
399df8bae1dSRodney W. Grimes 	register int i;
400df8bae1dSRodney W. Grimes 
401df8bae1dSRodney W. Grimes 	/*
402df8bae1dSRodney W. Grimes 	 * Set divisors to 1 (normal case) and let the machine-specific
403df8bae1dSRodney W. Grimes 	 * code do its bit.
404df8bae1dSRodney W. Grimes 	 */
405875b8844SAlexander Motin 	mtx_init(&time_lock, "time lock", NULL, MTX_DEF);
40663d69d25SRobert Watson 	cpu_initclocks();
407df8bae1dSRodney W. Grimes 
408df8bae1dSRodney W. Grimes 	/*
409df8bae1dSRodney W. Grimes 	 * Compute profhz/stathz, and fix profhz if needed.
410df8bae1dSRodney W. Grimes 	 */
411df8bae1dSRodney W. Grimes 	i = stathz ? stathz : hz;
412df8bae1dSRodney W. Grimes 	if (profhz == 0)
413df8bae1dSRodney W. Grimes 		profhz = i;
414df8bae1dSRodney W. Grimes 	psratio = profhz / i;
4154103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
4164103b765SPoul-Henning Kamp 	EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
4174103b765SPoul-Henning Kamp #endif
418df8bae1dSRodney W. Grimes }
419df8bae1dSRodney W. Grimes 
420df8bae1dSRodney W. Grimes /*
421238dd320SJake Burkholder  * Each time the real-time timer fires, this function is called on all CPUs.
422b439e431SJohn Baldwin  * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
423238dd320SJake Burkholder  * the other CPUs in the system need to call this function.
4246caa8a15SJohn Baldwin  */
4256caa8a15SJohn Baldwin void
426b439e431SJohn Baldwin hardclock_cpu(int usermode)
4276caa8a15SJohn Baldwin {
4286caa8a15SJohn Baldwin 	struct pstats *pstats;
429238dd320SJake Burkholder 	struct thread *td = curthread;
430b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
431b61ce5b0SJeff Roberson 	int flags;
4326caa8a15SJohn Baldwin 
4336caa8a15SJohn Baldwin 	/*
4346caa8a15SJohn Baldwin 	 * Run current process's virtual and profile time, as needed.
4356caa8a15SJohn Baldwin 	 */
436ad1e7d28SJulian Elischer 	pstats = p->p_stats;
437b61ce5b0SJeff Roberson 	flags = 0;
438ad1e7d28SJulian Elischer 	if (usermode &&
43940acdeabSJeff Roberson 	    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
44040acdeabSJeff Roberson 		PROC_SLOCK(p);
441b61ce5b0SJeff Roberson 		if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
442b61ce5b0SJeff Roberson 			flags |= TDF_ALRMPEND | TDF_ASTPENDING;
44340acdeabSJeff Roberson 		PROC_SUNLOCK(p);
44440acdeabSJeff Roberson 	}
44540acdeabSJeff Roberson 	if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
44640acdeabSJeff Roberson 		PROC_SLOCK(p);
447b61ce5b0SJeff Roberson 		if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
448b61ce5b0SJeff Roberson 			flags |= TDF_PROFPEND | TDF_ASTPENDING;
44940acdeabSJeff Roberson 		PROC_SUNLOCK(p);
45040acdeabSJeff Roberson 	}
45140acdeabSJeff Roberson 	thread_lock(td);
452a157e425SAlexander Motin 	sched_tick(1);
453b61ce5b0SJeff Roberson 	td->td_flags |= flags;
45440acdeabSJeff Roberson 	thread_unlock(td);
45536c0fd9dSJoseph Koshy 
45636c0fd9dSJoseph Koshy #ifdef HWPMC_HOOKS
45736c0fd9dSJoseph Koshy 	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
45836c0fd9dSJoseph Koshy 		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
459f5f9340bSFabien Thomas 	if (td->td_intr_frame != NULL)
460f5f9340bSFabien Thomas 		PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
46136c0fd9dSJoseph Koshy #endif
4628d809d50SJeff Roberson 	callout_tick();
4636caa8a15SJohn Baldwin }
4646caa8a15SJohn Baldwin 
4656caa8a15SJohn Baldwin /*
466df8bae1dSRodney W. Grimes  * The real-time timer, interrupting hz times per second.
467df8bae1dSRodney W. Grimes  */
468df8bae1dSRodney W. Grimes void
469b439e431SJohn Baldwin hardclock(int usermode, uintfptr_t pc)
470df8bae1dSRodney W. Grimes {
471df8bae1dSRodney W. Grimes 
4728d809d50SJeff Roberson 	atomic_add_int((volatile int *)&ticks, 1);
473b439e431SJohn Baldwin 	hardclock_cpu(usermode);
4740e189873SAlexander Motin 	tc_ticktock(1);
475a157e425SAlexander Motin 	cpu_tick_calibration();
476df8bae1dSRodney W. Grimes 	/*
477df8bae1dSRodney W. Grimes 	 * If no separate statistics clock is available, run it from here.
4786caa8a15SJohn Baldwin 	 *
4796caa8a15SJohn Baldwin 	 * XXX: this only works for UP
480df8bae1dSRodney W. Grimes 	 */
481238dd320SJake Burkholder 	if (stathz == 0) {
482b439e431SJohn Baldwin 		profclock(usermode, pc);
483b439e431SJohn Baldwin 		statclock(usermode);
484238dd320SJake Burkholder 	}
485e4fc250cSLuigi Rizzo #ifdef DEVICE_POLLING
486daccb638SLuigi Rizzo 	hardclock_device_poll();	/* this is very short and quick */
487e4fc250cSLuigi Rizzo #endif /* DEVICE_POLLING */
4884103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
4894103b765SPoul-Henning Kamp 	if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
490370c3cb5SSean Kelly 		watchdog_fire();
4914103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
492ab36c067SJustin T. Gibbs }
493ab36c067SJustin T. Gibbs 
494a157e425SAlexander Motin void
495bcfd016cSAlexander Motin hardclock_cnt(int cnt, int usermode)
496a157e425SAlexander Motin {
497a157e425SAlexander Motin 	struct pstats *pstats;
498a157e425SAlexander Motin 	struct thread *td = curthread;
499a157e425SAlexander Motin 	struct proc *p = td->td_proc;
500a157e425SAlexander Motin 	int *t = DPCPU_PTR(pcputicks);
5014763a8b8SAlexander Motin 	int flags, global, newticks;
5024763a8b8SAlexander Motin #ifdef SW_WATCHDOG
5034763a8b8SAlexander Motin 	int i;
5044763a8b8SAlexander Motin #endif /* SW_WATCHDOG */
505a157e425SAlexander Motin 
506a157e425SAlexander Motin 	/*
507a157e425SAlexander Motin 	 * Update per-CPU and possibly global ticks values.
508a157e425SAlexander Motin 	 */
509a157e425SAlexander Motin 	*t += cnt;
510a157e425SAlexander Motin 	do {
511a157e425SAlexander Motin 		global = ticks;
512a157e425SAlexander Motin 		newticks = *t - global;
513a157e425SAlexander Motin 		if (newticks <= 0) {
514a157e425SAlexander Motin 			if (newticks < -1)
515a157e425SAlexander Motin 				*t = global - 1;
516a157e425SAlexander Motin 			newticks = 0;
517a157e425SAlexander Motin 			break;
518a157e425SAlexander Motin 		}
519a157e425SAlexander Motin 	} while (!atomic_cmpset_int(&ticks, global, *t));
520a157e425SAlexander Motin 
521a157e425SAlexander Motin 	/*
522a157e425SAlexander Motin 	 * Run current process's virtual and profile time, as needed.
523a157e425SAlexander Motin 	 */
524a157e425SAlexander Motin 	pstats = p->p_stats;
525a157e425SAlexander Motin 	flags = 0;
526a157e425SAlexander Motin 	if (usermode &&
527a157e425SAlexander Motin 	    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
528a157e425SAlexander Motin 		PROC_SLOCK(p);
529a157e425SAlexander Motin 		if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
530a157e425SAlexander Motin 		    tick * cnt) == 0)
531a157e425SAlexander Motin 			flags |= TDF_ALRMPEND | TDF_ASTPENDING;
532a157e425SAlexander Motin 		PROC_SUNLOCK(p);
533a157e425SAlexander Motin 	}
534a157e425SAlexander Motin 	if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
535a157e425SAlexander Motin 		PROC_SLOCK(p);
536a157e425SAlexander Motin 		if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
537a157e425SAlexander Motin 		    tick * cnt) == 0)
538a157e425SAlexander Motin 			flags |= TDF_PROFPEND | TDF_ASTPENDING;
539a157e425SAlexander Motin 		PROC_SUNLOCK(p);
540a157e425SAlexander Motin 	}
541a157e425SAlexander Motin 	thread_lock(td);
542a157e425SAlexander Motin 	sched_tick(cnt);
543a157e425SAlexander Motin 	td->td_flags |= flags;
544a157e425SAlexander Motin 	thread_unlock(td);
545a157e425SAlexander Motin 
546a157e425SAlexander Motin #ifdef	HWPMC_HOOKS
547a157e425SAlexander Motin 	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
548a157e425SAlexander Motin 		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
549f5f9340bSFabien Thomas 	if (td->td_intr_frame != NULL)
550f5f9340bSFabien Thomas 		PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
551a157e425SAlexander Motin #endif
552a157e425SAlexander Motin 	callout_tick();
553a157e425SAlexander Motin 	/* We are in charge to handle this tick duty. */
554a157e425SAlexander Motin 	if (newticks > 0) {
5554763a8b8SAlexander Motin 		/* Dangerous and no need to call these things concurrently. */
5564763a8b8SAlexander Motin 		if (atomic_cmpset_acq_int(&global_hardclock_run, 0, 1)) {
5570e189873SAlexander Motin 			tc_ticktock(newticks);
558a157e425SAlexander Motin #ifdef DEVICE_POLLING
5594763a8b8SAlexander Motin 			/* This is very short and quick. */
5604763a8b8SAlexander Motin 			hardclock_device_poll();
561a157e425SAlexander Motin #endif /* DEVICE_POLLING */
5624763a8b8SAlexander Motin 			atomic_store_rel_int(&global_hardclock_run, 0);
5634763a8b8SAlexander Motin 		}
564a157e425SAlexander Motin #ifdef SW_WATCHDOG
565a157e425SAlexander Motin 		if (watchdog_enabled > 0) {
5664763a8b8SAlexander Motin 			i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
5674763a8b8SAlexander Motin 			if (i > 0 && i <= newticks)
568a157e425SAlexander Motin 				watchdog_fire();
569a157e425SAlexander Motin 		}
570a157e425SAlexander Motin #endif /* SW_WATCHDOG */
571a157e425SAlexander Motin 	}
572a157e425SAlexander Motin 	if (curcpu == CPU_FIRST())
573a157e425SAlexander Motin 		cpu_tick_calibration();
574a157e425SAlexander Motin }
575a157e425SAlexander Motin 
576a157e425SAlexander Motin void
577a157e425SAlexander Motin hardclock_sync(int cpu)
578a157e425SAlexander Motin {
579a157e425SAlexander Motin 	int	*t = DPCPU_ID_PTR(cpu, pcputicks);
580a157e425SAlexander Motin 
581a157e425SAlexander Motin 	*t = ticks;
582a157e425SAlexander Motin }
583a157e425SAlexander Motin 
584df8bae1dSRodney W. Grimes /*
585227ee8a1SPoul-Henning Kamp  * Compute number of ticks in the specified amount of time.
586df8bae1dSRodney W. Grimes  */
587df8bae1dSRodney W. Grimes int
588227ee8a1SPoul-Henning Kamp tvtohz(tv)
589df8bae1dSRodney W. Grimes 	struct timeval *tv;
590df8bae1dSRodney W. Grimes {
5916976af69SBruce Evans 	register unsigned long ticks;
5926976af69SBruce Evans 	register long sec, usec;
593df8bae1dSRodney W. Grimes 
594df8bae1dSRodney W. Grimes 	/*
5956976af69SBruce Evans 	 * If the number of usecs in the whole seconds part of the time
5966976af69SBruce Evans 	 * difference fits in a long, then the total number of usecs will
5976976af69SBruce Evans 	 * fit in an unsigned long.  Compute the total and convert it to
5986976af69SBruce Evans 	 * ticks, rounding up and adding 1 to allow for the current tick
5996976af69SBruce Evans 	 * to expire.  Rounding also depends on unsigned long arithmetic
6006976af69SBruce Evans 	 * to avoid overflow.
601df8bae1dSRodney W. Grimes 	 *
6026976af69SBruce Evans 	 * Otherwise, if the number of ticks in the whole seconds part of
6036976af69SBruce Evans 	 * the time difference fits in a long, then convert the parts to
6046976af69SBruce Evans 	 * ticks separately and add, using similar rounding methods and
6056976af69SBruce Evans 	 * overflow avoidance.  This method would work in the previous
6066976af69SBruce Evans 	 * case but it is slightly slower and assumes that hz is integral.
6076976af69SBruce Evans 	 *
6086976af69SBruce Evans 	 * Otherwise, round the time difference down to the maximum
6096976af69SBruce Evans 	 * representable value.
6106976af69SBruce Evans 	 *
6116976af69SBruce Evans 	 * If ints have 32 bits, then the maximum value for any timeout in
6126976af69SBruce Evans 	 * 10ms ticks is 248 days.
613df8bae1dSRodney W. Grimes 	 */
614227ee8a1SPoul-Henning Kamp 	sec = tv->tv_sec;
615227ee8a1SPoul-Henning Kamp 	usec = tv->tv_usec;
6166976af69SBruce Evans 	if (usec < 0) {
6176976af69SBruce Evans 		sec--;
6186976af69SBruce Evans 		usec += 1000000;
6196976af69SBruce Evans 	}
6206976af69SBruce Evans 	if (sec < 0) {
6216976af69SBruce Evans #ifdef DIAGNOSTIC
622b05dcf3cSPoul-Henning Kamp 		if (usec > 0) {
6237ec73f64SPoul-Henning Kamp 			sec++;
6247ec73f64SPoul-Henning Kamp 			usec -= 1000000;
6257ec73f64SPoul-Henning Kamp 		}
626227ee8a1SPoul-Henning Kamp 		printf("tvotohz: negative time difference %ld sec %ld usec\n",
6276976af69SBruce Evans 		       sec, usec);
6286976af69SBruce Evans #endif
6296976af69SBruce Evans 		ticks = 1;
6306976af69SBruce Evans 	} else if (sec <= LONG_MAX / 1000000)
6316976af69SBruce Evans 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
6326976af69SBruce Evans 			/ tick + 1;
6336976af69SBruce Evans 	else if (sec <= LONG_MAX / hz)
6346976af69SBruce Evans 		ticks = sec * hz
6356976af69SBruce Evans 			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
6366976af69SBruce Evans 	else
6376976af69SBruce Evans 		ticks = LONG_MAX;
6386976af69SBruce Evans 	if (ticks > INT_MAX)
6396976af69SBruce Evans 		ticks = INT_MAX;
640d6116663SAlexander Langer 	return ((int)ticks);
641df8bae1dSRodney W. Grimes }
642df8bae1dSRodney W. Grimes 
643df8bae1dSRodney W. Grimes /*
644df8bae1dSRodney W. Grimes  * Start profiling on a process.
645df8bae1dSRodney W. Grimes  *
646df8bae1dSRodney W. Grimes  * Kernel profiling passes proc0 which never exits and hence
647df8bae1dSRodney W. Grimes  * keeps the profile clock running constantly.
648df8bae1dSRodney W. Grimes  */
649df8bae1dSRodney W. Grimes void
650df8bae1dSRodney W. Grimes startprofclock(p)
651df8bae1dSRodney W. Grimes 	register struct proc *p;
652df8bae1dSRodney W. Grimes {
653df8bae1dSRodney W. Grimes 
6549752f794SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
6559752f794SJohn Baldwin 	if (p->p_flag & P_STOPPROF)
656a282253aSJulian Elischer 		return;
6579752f794SJohn Baldwin 	if ((p->p_flag & P_PROFIL) == 0) {
6589752f794SJohn Baldwin 		p->p_flag |= P_PROFIL;
659875b8844SAlexander Motin 		mtx_lock(&time_lock);
660238dd320SJake Burkholder 		if (++profprocs == 1)
661238dd320SJake Burkholder 			cpu_startprofclock();
662875b8844SAlexander Motin 		mtx_unlock(&time_lock);
663df8bae1dSRodney W. Grimes 	}
6649752f794SJohn Baldwin }
665df8bae1dSRodney W. Grimes 
666df8bae1dSRodney W. Grimes /*
667df8bae1dSRodney W. Grimes  * Stop profiling on a process.
668df8bae1dSRodney W. Grimes  */
669df8bae1dSRodney W. Grimes void
670df8bae1dSRodney W. Grimes stopprofclock(p)
671df8bae1dSRodney W. Grimes 	register struct proc *p;
672df8bae1dSRodney W. Grimes {
673df8bae1dSRodney W. Grimes 
674a282253aSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
6759752f794SJohn Baldwin 	if (p->p_flag & P_PROFIL) {
6769752f794SJohn Baldwin 		if (p->p_profthreads != 0) {
6779752f794SJohn Baldwin 			p->p_flag |= P_STOPPROF;
6789752f794SJohn Baldwin 			while (p->p_profthreads != 0)
679a282253aSJulian Elischer 				msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
680a89ec05eSPeter Wemm 				    "stopprof", 0);
6819752f794SJohn Baldwin 			p->p_flag &= ~P_STOPPROF;
682a282253aSJulian Elischer 		}
683b62b2304SColin Percival 		if ((p->p_flag & P_PROFIL) == 0)
684b62b2304SColin Percival 			return;
6859752f794SJohn Baldwin 		p->p_flag &= ~P_PROFIL;
686875b8844SAlexander Motin 		mtx_lock(&time_lock);
687238dd320SJake Burkholder 		if (--profprocs == 0)
688238dd320SJake Burkholder 			cpu_stopprofclock();
689875b8844SAlexander Motin 		mtx_unlock(&time_lock);
690df8bae1dSRodney W. Grimes 	}
6919752f794SJohn Baldwin }
692df8bae1dSRodney W. Grimes 
693df8bae1dSRodney W. Grimes /*
6941c4bcd05SJeff Roberson  * Statistics clock.  Updates rusage information and calls the scheduler
6951c4bcd05SJeff Roberson  * to adjust priorities of the active thread.
6961c4bcd05SJeff Roberson  *
697238dd320SJake Burkholder  * This should be called by all active processors.
698df8bae1dSRodney W. Grimes  */
699df8bae1dSRodney W. Grimes void
700b439e431SJohn Baldwin statclock(int usermode)
701df8bae1dSRodney W. Grimes {
702bcfd016cSAlexander Motin 
703bcfd016cSAlexander Motin 	statclock_cnt(1, usermode);
704bcfd016cSAlexander Motin }
705bcfd016cSAlexander Motin 
706bcfd016cSAlexander Motin void
707bcfd016cSAlexander Motin statclock_cnt(int cnt, int usermode)
708bcfd016cSAlexander Motin {
7098a129caeSDavid Greenman 	struct rusage *ru;
7108a129caeSDavid Greenman 	struct vmspace *vm;
711238dd320SJake Burkholder 	struct thread *td;
712238dd320SJake Burkholder 	struct proc *p;
713238dd320SJake Burkholder 	long rss;
7147628402bSPeter Wemm 	long *cp_time;
7158a129caeSDavid Greenman 
716238dd320SJake Burkholder 	td = curthread;
717238dd320SJake Burkholder 	p = td->td_proc;
718238dd320SJake Burkholder 
7197628402bSPeter Wemm 	cp_time = (long *)PCPU_PTR(cp_time);
720b439e431SJohn Baldwin 	if (usermode) {
721df8bae1dSRodney W. Grimes 		/*
72271a62f8aSBruce Evans 		 * Charge the time as appropriate.
723df8bae1dSRodney W. Grimes 		 */
724bcfd016cSAlexander Motin 		td->td_uticks += cnt;
725fa885116SJulian Elischer 		if (p->p_nice > NZERO)
726bcfd016cSAlexander Motin 			cp_time[CP_NICE] += cnt;
727df8bae1dSRodney W. Grimes 		else
728bcfd016cSAlexander Motin 			cp_time[CP_USER] += cnt;
729df8bae1dSRodney W. Grimes 	} else {
730df8bae1dSRodney W. Grimes 		/*
731df8bae1dSRodney W. Grimes 		 * Came from kernel mode, so we were:
732df8bae1dSRodney W. Grimes 		 * - handling an interrupt,
733df8bae1dSRodney W. Grimes 		 * - doing syscall or trap work on behalf of the current
734df8bae1dSRodney W. Grimes 		 *   user process, or
735df8bae1dSRodney W. Grimes 		 * - spinning in the idle loop.
736df8bae1dSRodney W. Grimes 		 * Whichever it is, charge the time as appropriate.
737df8bae1dSRodney W. Grimes 		 * Note that we charge interrupts to the current process,
738df8bae1dSRodney W. Grimes 		 * regardless of whether they are ``for'' that process,
739df8bae1dSRodney W. Grimes 		 * so that we know how much of its real time was spent
740df8bae1dSRodney W. Grimes 		 * in ``non-process'' (i.e., interrupt) work.
741df8bae1dSRodney W. Grimes 		 */
742e0f66ef8SJohn Baldwin 		if ((td->td_pflags & TDP_ITHREAD) ||
743e0f66ef8SJohn Baldwin 		    td->td_intr_nesting_level >= 2) {
744bcfd016cSAlexander Motin 			td->td_iticks += cnt;
745bcfd016cSAlexander Motin 			cp_time[CP_INTR] += cnt;
7460384fff8SJason Evans 		} else {
747bcfd016cSAlexander Motin 			td->td_pticks += cnt;
748bcfd016cSAlexander Motin 			td->td_sticks += cnt;
749486a9414SJulian Elischer 			if (!TD_IS_IDLETHREAD(td))
750bcfd016cSAlexander Motin 				cp_time[CP_SYS] += cnt;
7510384fff8SJason Evans 			else
752bcfd016cSAlexander Motin 				cp_time[CP_IDLE] += cnt;
753df8bae1dSRodney W. Grimes 		}
7540384fff8SJason Evans 	}
755f5e9e8ecSBruce Evans 
756f5e9e8ecSBruce Evans 	/* Update resource usage integrals and maximums. */
75716f9f205SJohn Baldwin 	MPASS(p->p_vmspace != NULL);
75816f9f205SJohn Baldwin 	vm = p->p_vmspace;
7591c4bcd05SJeff Roberson 	ru = &td->td_ru;
760bcfd016cSAlexander Motin 	ru->ru_ixrss += pgtok(vm->vm_tsize) * cnt;
761bcfd016cSAlexander Motin 	ru->ru_idrss += pgtok(vm->vm_dsize) * cnt;
762bcfd016cSAlexander Motin 	ru->ru_isrss += pgtok(vm->vm_ssize) * cnt;
7631c6d46f9SLuoqi Chen 	rss = pgtok(vmspace_resident_count(vm));
764f5e9e8ecSBruce Evans 	if (ru->ru_maxrss < rss)
765f5e9e8ecSBruce Evans 		ru->ru_maxrss = rss;
7668f51ad55SJeff Roberson 	KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
7678f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
768*b3e9e682SRyan Stone 	SDT_PROBE2(sched, , , tick, td, td->td_proc);
7697628402bSPeter Wemm 	thread_lock_flags(td, MTX_QUIET);
770bcfd016cSAlexander Motin 	for ( ; cnt > 0; cnt--)
77140acdeabSJeff Roberson 		sched_clock(td);
77240acdeabSJeff Roberson 	thread_unlock(td);
773f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
774f5f9340bSFabien Thomas 	if (td->td_intr_frame != NULL)
775f5f9340bSFabien Thomas 		PMC_SOFT_CALL_TF( , , clock, stat, td->td_intr_frame);
776f5f9340bSFabien Thomas #endif
7776caa8a15SJohn Baldwin }
7786c567274SJohn Baldwin 
7796caa8a15SJohn Baldwin void
780b439e431SJohn Baldwin profclock(int usermode, uintfptr_t pc)
7816caa8a15SJohn Baldwin {
782bcfd016cSAlexander Motin 
783bcfd016cSAlexander Motin 	profclock_cnt(1, usermode, pc);
784bcfd016cSAlexander Motin }
785bcfd016cSAlexander Motin 
786bcfd016cSAlexander Motin void
787bcfd016cSAlexander Motin profclock_cnt(int cnt, int usermode, uintfptr_t pc)
788bcfd016cSAlexander Motin {
789238dd320SJake Burkholder 	struct thread *td;
790238dd320SJake Burkholder #ifdef GPROF
791238dd320SJake Burkholder 	struct gmonparam *g;
7925c8b4441SJohn Baldwin 	uintfptr_t i;
793238dd320SJake Burkholder #endif
7946caa8a15SJohn Baldwin 
7954a338afdSJulian Elischer 	td = curthread;
796b439e431SJohn Baldwin 	if (usermode) {
797238dd320SJake Burkholder 		/*
798238dd320SJake Burkholder 		 * Came from user mode; CPU was in user state.
799238dd320SJake Burkholder 		 * If this process is being profiled, record the tick.
800a282253aSJulian Elischer 		 * if there is no related user location yet, don't
801a282253aSJulian Elischer 		 * bother trying to count it.
802238dd320SJake Burkholder 		 */
8039752f794SJohn Baldwin 		if (td->td_proc->p_flag & P_PROFIL)
804bcfd016cSAlexander Motin 			addupc_intr(td, pc, cnt);
805238dd320SJake Burkholder 	}
806238dd320SJake Burkholder #ifdef GPROF
807238dd320SJake Burkholder 	else {
808238dd320SJake Burkholder 		/*
809238dd320SJake Burkholder 		 * Kernel statistics are just like addupc_intr, only easier.
810238dd320SJake Burkholder 		 */
811238dd320SJake Burkholder 		g = &_gmonparam;
812b439e431SJohn Baldwin 		if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
813b439e431SJohn Baldwin 			i = PC_TO_I(g, pc);
814238dd320SJake Burkholder 			if (i < g->textsize) {
815bcfd016cSAlexander Motin 				KCOUNT(g, i) += cnt;
816238dd320SJake Burkholder 			}
817238dd320SJake Burkholder 		}
818238dd320SJake Burkholder 	}
819238dd320SJake Burkholder #endif
820df8bae1dSRodney W. Grimes }
821df8bae1dSRodney W. Grimes 
822df8bae1dSRodney W. Grimes /*
823df8bae1dSRodney W. Grimes  * Return information about system clocks.
824df8bae1dSRodney W. Grimes  */
825787d58f2SPoul-Henning Kamp static int
82682d9ae4eSPoul-Henning Kamp sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
827df8bae1dSRodney W. Grimes {
828df8bae1dSRodney W. Grimes 	struct clockinfo clkinfo;
829df8bae1dSRodney W. Grimes 	/*
830df8bae1dSRodney W. Grimes 	 * Construct clockinfo structure.
831df8bae1dSRodney W. Grimes 	 */
832a9a0f15aSBruce Evans 	bzero(&clkinfo, sizeof(clkinfo));
833df8bae1dSRodney W. Grimes 	clkinfo.hz = hz;
834df8bae1dSRodney W. Grimes 	clkinfo.tick = tick;
835df8bae1dSRodney W. Grimes 	clkinfo.profhz = profhz;
836df8bae1dSRodney W. Grimes 	clkinfo.stathz = stathz ? stathz : hz;
837ae0eb976SPoul-Henning Kamp 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
838df8bae1dSRodney W. Grimes }
8393f31c649SGarrett Wollman 
840c383c221SEd Schouten SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
841c383c221SEd Schouten 	CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
842af1408e3SLuigi Rizzo 	0, 0, sysctl_kern_clockrate, "S,clockinfo",
843af1408e3SLuigi Rizzo 	"Rate and period of various kernel clocks");
844370c3cb5SSean Kelly 
8454103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
8464103b765SPoul-Henning Kamp 
8474103b765SPoul-Henning Kamp static void
8489079fff5SNick Hibma watchdog_config(void *unused __unused, u_int cmd, int *error)
849370c3cb5SSean Kelly {
8504103b765SPoul-Henning Kamp 	u_int u;
851370c3cb5SSean Kelly 
8524103b765SPoul-Henning Kamp 	u = cmd & WD_INTERVAL;
8539079fff5SNick Hibma 	if (u >= WD_TO_1SEC) {
8544103b765SPoul-Henning Kamp 		watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
8554103b765SPoul-Henning Kamp 		watchdog_enabled = 1;
8569079fff5SNick Hibma 		*error = 0;
8574103b765SPoul-Henning Kamp 	} else {
8584103b765SPoul-Henning Kamp 		watchdog_enabled = 0;
859370c3cb5SSean Kelly 	}
8604103b765SPoul-Henning Kamp }
861370c3cb5SSean Kelly 
862370c3cb5SSean Kelly /*
863370c3cb5SSean Kelly  * Handle a watchdog timeout by dumping interrupt information and
864911d16b8SEd Maste  * then either dropping to DDB or panicking.
865370c3cb5SSean Kelly  */
866370c3cb5SSean Kelly static void
867370c3cb5SSean Kelly watchdog_fire(void)
868370c3cb5SSean Kelly {
869370c3cb5SSean Kelly 	int nintr;
87060ae52f7SEd Schouten 	uint64_t inttotal;
871370c3cb5SSean Kelly 	u_long *curintr;
872370c3cb5SSean Kelly 	char *curname;
873370c3cb5SSean Kelly 
874370c3cb5SSean Kelly 	curintr = intrcnt;
875370c3cb5SSean Kelly 	curname = intrnames;
876370c3cb5SSean Kelly 	inttotal = 0;
877556a5850SAlexander Motin 	nintr = sintrcnt / sizeof(u_long);
878370c3cb5SSean Kelly 
879370c3cb5SSean Kelly 	printf("interrupt                   total\n");
880370c3cb5SSean Kelly 	while (--nintr >= 0) {
881370c3cb5SSean Kelly 		if (*curintr)
882370c3cb5SSean Kelly 			printf("%-12s %20lu\n", curname, *curintr);
883370c3cb5SSean Kelly 		curname += strlen(curname) + 1;
884370c3cb5SSean Kelly 		inttotal += *curintr++;
885370c3cb5SSean Kelly 	}
8866cda4155SSean Kelly 	printf("Total        %20ju\n", (uintmax_t)inttotal);
887911d16b8SEd Maste 
888911d16b8SEd Maste #if defined(KDB) && !defined(KDB_UNATTENDED)
889911d16b8SEd Maste 	kdb_backtrace();
8903de213ccSRobert Watson 	kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
891911d16b8SEd Maste #else
892370c3cb5SSean Kelly 	panic("watchdog timeout");
893911d16b8SEd Maste #endif
894370c3cb5SSean Kelly }
895370c3cb5SSean Kelly 
8964103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
897