xref: /freebsd/sys/kern/kern_clock.c (revision 1af19ee4a2929303426ae4f532bb533f275afce1)
1df8bae1dSRodney W. Grimes /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1982, 1986, 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  * (c) UNIX System Laboratories, Inc.
5df8bae1dSRodney W. Grimes  * All or some portions of this file are derived from material licensed
6df8bae1dSRodney W. Grimes  * to the University of California by American Telephone and Telegraph
7df8bae1dSRodney W. Grimes  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8df8bae1dSRodney W. Grimes  * the permission of UNIX System Laboratories, Inc.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
18df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
34df8bae1dSRodney W. Grimes  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35df8bae1dSRodney W. Grimes  */
36df8bae1dSRodney W. Grimes 
37677b542eSDavid E. O'Brien #include <sys/cdefs.h>
38677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
39677b542eSDavid E. O'Brien 
40911d16b8SEd Maste #include "opt_kdb.h"
41f0796cd2SGleb Smirnoff #include "opt_device_polling.h"
424da0d332SPeter Wemm #include "opt_hwpmc_hooks.h"
43b3e9e682SRyan Stone #include "opt_kdtrace.h"
4432c20357SPoul-Henning Kamp #include "opt_ntp.h"
45370c3cb5SSean Kelly #include "opt_watchdog.h"
4632c20357SPoul-Henning Kamp 
47df8bae1dSRodney W. Grimes #include <sys/param.h>
48df8bae1dSRodney W. Grimes #include <sys/systm.h>
49df8bae1dSRodney W. Grimes #include <sys/callout.h>
502d50560aSMarcel Moolenaar #include <sys/kdb.h>
51df8bae1dSRodney W. Grimes #include <sys/kernel.h>
52f7829d0dSAttilio Rao #include <sys/kthread.h>
5361d80e90SJohn Baldwin #include <sys/ktr.h>
54f7829d0dSAttilio Rao #include <sys/lock.h>
5535e0e5b3SJohn Baldwin #include <sys/mutex.h>
56df8bae1dSRodney W. Grimes #include <sys/proc.h>
57e4625663SJeff Roberson #include <sys/resource.h>
58df8bae1dSRodney W. Grimes #include <sys/resourcevar.h>
59b43179fbSJeff Roberson #include <sys/sched.h>
60b3e9e682SRyan Stone #include <sys/sdt.h>
61797f2d22SPoul-Henning Kamp #include <sys/signalvar.h>
62f7829d0dSAttilio Rao #include <sys/sleepqueue.h>
636caa8a15SJohn Baldwin #include <sys/smp.h>
648a129caeSDavid Greenman #include <vm/vm.h>
65efeaf95aSDavid Greenman #include <vm/pmap.h>
66efeaf95aSDavid Greenman #include <vm/vm_map.h>
67797f2d22SPoul-Henning Kamp #include <sys/sysctl.h>
688088699fSJohn Baldwin #include <sys/bus.h>
698088699fSJohn Baldwin #include <sys/interrupt.h>
70104a9b7eSAlexander Kabaev #include <sys/limits.h>
71e7fa55afSPoul-Henning Kamp #include <sys/timetc.h>
72df8bae1dSRodney W. Grimes 
73df8bae1dSRodney W. Grimes #ifdef GPROF
74df8bae1dSRodney W. Grimes #include <sys/gmon.h>
75df8bae1dSRodney W. Grimes #endif
76df8bae1dSRodney W. Grimes 
7736c0fd9dSJoseph Koshy #ifdef HWPMC_HOOKS
7836c0fd9dSJoseph Koshy #include <sys/pmckern.h>
79f5f9340bSFabien Thomas PMC_SOFT_DEFINE( , , clock, hard);
80f5f9340bSFabien Thomas PMC_SOFT_DEFINE( , , clock, stat);
81*1af19ee4SAlexander Motin PMC_SOFT_DEFINE( , , clock, prof);
8236c0fd9dSJoseph Koshy #endif
8336c0fd9dSJoseph Koshy 
84e4fc250cSLuigi Rizzo #ifdef DEVICE_POLLING
85e4fc250cSLuigi Rizzo extern void hardclock_device_poll(void);
86e4fc250cSLuigi Rizzo #endif /* DEVICE_POLLING */
87eae8fc2cSSteve Passe 
884d77a549SAlfred Perlstein static void initclocks(void *dummy);
89237fdd78SRobert Watson SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
902b14f991SJulian Elischer 
918b98fec9SJeff Roberson /* Spin-lock protecting profiling statistics. */
9286a49deaSAttilio Rao static struct mtx time_lock;
938b98fec9SJeff Roberson 
94b3e9e682SRyan Stone SDT_PROVIDER_DECLARE(sched);
95b3e9e682SRyan Stone SDT_PROBE_DEFINE2(sched, , , tick, tick, "struct thread *", "struct proc *");
96b3e9e682SRyan Stone 
9762919d78SPeter Wemm static int
9862919d78SPeter Wemm sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
9962919d78SPeter Wemm {
10062919d78SPeter Wemm 	int error;
1017628402bSPeter Wemm 	long cp_time[CPUSTATES];
102cff2e749SPaul Saab #ifdef SCTL_MASK32
10362919d78SPeter Wemm 	int i;
10462919d78SPeter Wemm 	unsigned int cp_time32[CPUSTATES];
1057628402bSPeter Wemm #endif
10662919d78SPeter Wemm 
1077628402bSPeter Wemm 	read_cpu_time(cp_time);
1087628402bSPeter Wemm #ifdef SCTL_MASK32
109cff2e749SPaul Saab 	if (req->flags & SCTL_MASK32) {
11062919d78SPeter Wemm 		if (!req->oldptr)
11162919d78SPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time32));
11262919d78SPeter Wemm 		for (i = 0; i < CPUSTATES; i++)
11362919d78SPeter Wemm 			cp_time32[i] = (unsigned int)cp_time[i];
11462919d78SPeter Wemm 		error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
11562919d78SPeter Wemm 	} else
11662919d78SPeter Wemm #endif
11762919d78SPeter Wemm 	{
11862919d78SPeter Wemm 		if (!req->oldptr)
11962919d78SPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time));
12062919d78SPeter Wemm 		error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
12162919d78SPeter Wemm 	}
12262919d78SPeter Wemm 	return error;
12362919d78SPeter Wemm }
12462919d78SPeter Wemm 
125c383c221SEd Schouten SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
12662919d78SPeter Wemm     0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
1277f112b04SRobert Watson 
1287628402bSPeter Wemm static long empty[CPUSTATES];
1297628402bSPeter Wemm 
1307628402bSPeter Wemm static int
1317628402bSPeter Wemm sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
1327628402bSPeter Wemm {
1337628402bSPeter Wemm 	struct pcpu *pcpu;
1347628402bSPeter Wemm 	int error;
135ef54068bSRobert Watson 	int c;
1367628402bSPeter Wemm 	long *cp_time;
1377628402bSPeter Wemm #ifdef SCTL_MASK32
1387628402bSPeter Wemm 	unsigned int cp_time32[CPUSTATES];
139ef54068bSRobert Watson 	int i;
1407628402bSPeter Wemm #endif
1417628402bSPeter Wemm 
1427628402bSPeter Wemm 	if (!req->oldptr) {
1437628402bSPeter Wemm #ifdef SCTL_MASK32
1447628402bSPeter Wemm 		if (req->flags & SCTL_MASK32)
1457628402bSPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
1467628402bSPeter Wemm 		else
1477628402bSPeter Wemm #endif
1487628402bSPeter Wemm 			return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
1497628402bSPeter Wemm 	}
1507628402bSPeter Wemm 	for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
1517628402bSPeter Wemm 		if (!CPU_ABSENT(c)) {
1527628402bSPeter Wemm 			pcpu = pcpu_find(c);
1537628402bSPeter Wemm 			cp_time = pcpu->pc_cp_time;
1547628402bSPeter Wemm 		} else {
1557628402bSPeter Wemm 			cp_time = empty;
1567628402bSPeter Wemm 		}
1577628402bSPeter Wemm #ifdef SCTL_MASK32
1587628402bSPeter Wemm 		if (req->flags & SCTL_MASK32) {
1597628402bSPeter Wemm 			for (i = 0; i < CPUSTATES; i++)
1607628402bSPeter Wemm 				cp_time32[i] = (unsigned int)cp_time[i];
1617628402bSPeter Wemm 			error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
1627628402bSPeter Wemm 		} else
1637628402bSPeter Wemm #endif
1647628402bSPeter Wemm 			error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
1657628402bSPeter Wemm 	}
1667628402bSPeter Wemm 	return error;
1677628402bSPeter Wemm }
1687628402bSPeter Wemm 
169c383c221SEd Schouten SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
1707628402bSPeter Wemm     0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
1717628402bSPeter Wemm 
172f7829d0dSAttilio Rao #ifdef DEADLKRES
17336e51f65SAttilio Rao static const char *blessed[] = {
17495335fd8SAttilio Rao 	"getblk",
17536e51f65SAttilio Rao 	"so_snd_sx",
17636e51f65SAttilio Rao 	"so_rcv_sx",
17736e51f65SAttilio Rao 	NULL
17836e51f65SAttilio Rao };
179f7829d0dSAttilio Rao static int slptime_threshold = 1800;
180f7829d0dSAttilio Rao static int blktime_threshold = 900;
181f7829d0dSAttilio Rao static int sleepfreq = 3;
182f7829d0dSAttilio Rao 
183f7829d0dSAttilio Rao static void
184f7829d0dSAttilio Rao deadlkres(void)
185f7829d0dSAttilio Rao {
186f7829d0dSAttilio Rao 	struct proc *p;
187f7829d0dSAttilio Rao 	struct thread *td;
188f7829d0dSAttilio Rao 	void *wchan;
18936e51f65SAttilio Rao 	int blkticks, i, slpticks, slptype, tryl, tticks;
190f7829d0dSAttilio Rao 
191f7829d0dSAttilio Rao 	tryl = 0;
192f7829d0dSAttilio Rao 	for (;;) {
193f7829d0dSAttilio Rao 		blkticks = blktime_threshold * hz;
194f7829d0dSAttilio Rao 		slpticks = slptime_threshold * hz;
195f7829d0dSAttilio Rao 
196f7829d0dSAttilio Rao 		/*
197f7829d0dSAttilio Rao 		 * Avoid to sleep on the sx_lock in order to avoid a possible
198f7829d0dSAttilio Rao 		 * priority inversion problem leading to starvation.
199f7829d0dSAttilio Rao 		 * If the lock can't be held after 100 tries, panic.
200f7829d0dSAttilio Rao 		 */
201f7829d0dSAttilio Rao 		if (!sx_try_slock(&allproc_lock)) {
202f7829d0dSAttilio Rao 			if (tryl > 100)
203f7829d0dSAttilio Rao 		panic("%s: possible deadlock detected on allproc_lock\n",
204f7829d0dSAttilio Rao 				    __func__);
205f7829d0dSAttilio Rao 			tryl++;
206b5850804SJohn Baldwin 			pause("allproc", sleepfreq * hz);
207f7829d0dSAttilio Rao 			continue;
208f7829d0dSAttilio Rao 		}
209f7829d0dSAttilio Rao 		tryl = 0;
210f7829d0dSAttilio Rao 		FOREACH_PROC_IN_SYSTEM(p) {
211f7829d0dSAttilio Rao 			PROC_LOCK(p);
212e806d352SJohn Baldwin 			if (p->p_state == PRS_NEW) {
213e806d352SJohn Baldwin 				PROC_UNLOCK(p);
214e806d352SJohn Baldwin 				continue;
215e806d352SJohn Baldwin 			}
216f7829d0dSAttilio Rao 			FOREACH_THREAD_IN_PROC(p, td) {
217631cb86fSAttilio Rao 
218631cb86fSAttilio Rao 				/*
219631cb86fSAttilio Rao 				 * Once a thread is found in "interesting"
220631cb86fSAttilio Rao 				 * state a possible ticks wrap-up needs to be
221631cb86fSAttilio Rao 				 * checked.
222631cb86fSAttilio Rao 				 */
223f7829d0dSAttilio Rao 				thread_lock(td);
224631cb86fSAttilio Rao 				if (TD_ON_LOCK(td) && ticks < td->td_blktick) {
225f7829d0dSAttilio Rao 
226f7829d0dSAttilio Rao 					/*
227f7829d0dSAttilio Rao 					 * The thread should be blocked on a
228f7829d0dSAttilio Rao 					 * turnstile, simply check if the
229f7829d0dSAttilio Rao 					 * turnstile channel is in good state.
230f7829d0dSAttilio Rao 					 */
231f7829d0dSAttilio Rao 					MPASS(td->td_blocked != NULL);
23236e51f65SAttilio Rao 
233f7829d0dSAttilio Rao 					tticks = ticks - td->td_blktick;
234f7829d0dSAttilio Rao 					thread_unlock(td);
235f7829d0dSAttilio Rao 					if (tticks > blkticks) {
236f7829d0dSAttilio Rao 
237f7829d0dSAttilio Rao 						/*
238f7829d0dSAttilio Rao 						 * Accordingly with provided
239f7829d0dSAttilio Rao 						 * thresholds, this thread is
240f7829d0dSAttilio Rao 						 * stuck for too long on a
241f7829d0dSAttilio Rao 						 * turnstile.
242f7829d0dSAttilio Rao 						 */
243f7829d0dSAttilio Rao 						PROC_UNLOCK(p);
244f7829d0dSAttilio Rao 						sx_sunlock(&allproc_lock);
245f7829d0dSAttilio Rao 	panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
246f7829d0dSAttilio Rao 						    __func__, td, tticks);
247f7829d0dSAttilio Rao 					}
248631cb86fSAttilio Rao 				} else if (TD_IS_SLEEPING(td) &&
249631cb86fSAttilio Rao 				    TD_ON_SLEEPQ(td) &&
250631cb86fSAttilio Rao 				    ticks < td->td_blktick) {
25136e51f65SAttilio Rao 
252f7829d0dSAttilio Rao 					/*
253f7829d0dSAttilio Rao 					 * Check if the thread is sleeping on a
254f7829d0dSAttilio Rao 					 * lock, otherwise skip the check.
255f7829d0dSAttilio Rao 					 * Drop the thread lock in order to
256f7829d0dSAttilio Rao 					 * avoid a LOR with the sleepqueue
257f7829d0dSAttilio Rao 					 * spinlock.
258f7829d0dSAttilio Rao 					 */
259f7829d0dSAttilio Rao 					wchan = td->td_wchan;
260f7829d0dSAttilio Rao 					tticks = ticks - td->td_slptick;
261f7829d0dSAttilio Rao 					thread_unlock(td);
262f7829d0dSAttilio Rao 					slptype = sleepq_type(wchan);
263f7829d0dSAttilio Rao 					if ((slptype == SLEEPQ_SX ||
264f7829d0dSAttilio Rao 					    slptype == SLEEPQ_LK) &&
265f7829d0dSAttilio Rao 					    tticks > slpticks) {
266f7829d0dSAttilio Rao 
267f7829d0dSAttilio Rao 						/*
268f7829d0dSAttilio Rao 						 * Accordingly with provided
269f7829d0dSAttilio Rao 						 * thresholds, this thread is
270f7829d0dSAttilio Rao 						 * stuck for too long on a
271f7829d0dSAttilio Rao 						 * sleepqueue.
27236e51f65SAttilio Rao 						 * However, being on a
27336e51f65SAttilio Rao 						 * sleepqueue, we might still
27436e51f65SAttilio Rao 						 * check for the blessed
27536e51f65SAttilio Rao 						 * list.
276f7829d0dSAttilio Rao 						 */
27736e51f65SAttilio Rao 						tryl = 0;
27836e51f65SAttilio Rao 						for (i = 0; blessed[i] != NULL;
27936e51f65SAttilio Rao 						    i++) {
28036e51f65SAttilio Rao 							if (!strcmp(blessed[i],
28136e51f65SAttilio Rao 							    td->td_wmesg)) {
28236e51f65SAttilio Rao 								tryl = 1;
28336e51f65SAttilio Rao 								break;
28436e51f65SAttilio Rao 							}
28536e51f65SAttilio Rao 						}
28636e51f65SAttilio Rao 						if (tryl != 0) {
28736e51f65SAttilio Rao 							tryl = 0;
28836e51f65SAttilio Rao 							continue;
28936e51f65SAttilio Rao 						}
290f7829d0dSAttilio Rao 						PROC_UNLOCK(p);
291f7829d0dSAttilio Rao 						sx_sunlock(&allproc_lock);
292f7829d0dSAttilio Rao 	panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
293f7829d0dSAttilio Rao 						    __func__, td, tticks);
294f7829d0dSAttilio Rao 					}
295f7829d0dSAttilio Rao 				} else
296f7829d0dSAttilio Rao 					thread_unlock(td);
297f7829d0dSAttilio Rao 			}
298f7829d0dSAttilio Rao 			PROC_UNLOCK(p);
299f7829d0dSAttilio Rao 		}
300f7829d0dSAttilio Rao 		sx_sunlock(&allproc_lock);
301f7829d0dSAttilio Rao 
302f7829d0dSAttilio Rao 		/* Sleep for sleepfreq seconds. */
303b5850804SJohn Baldwin 		pause("-", sleepfreq * hz);
304f7829d0dSAttilio Rao 	}
305f7829d0dSAttilio Rao }
306f7829d0dSAttilio Rao 
307f7829d0dSAttilio Rao static struct kthread_desc deadlkres_kd = {
308f7829d0dSAttilio Rao 	"deadlkres",
309f7829d0dSAttilio Rao 	deadlkres,
310f7829d0dSAttilio Rao 	(struct thread **)NULL
311f7829d0dSAttilio Rao };
312f7829d0dSAttilio Rao 
313f7829d0dSAttilio Rao SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
314f7829d0dSAttilio Rao 
3156472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW, 0,
3166472ac3dSEd Schouten     "Deadlock resolver");
317f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
318f7829d0dSAttilio Rao     &slptime_threshold, 0,
319f7829d0dSAttilio Rao     "Number of seconds within is valid to sleep on a sleepqueue");
320f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
321f7829d0dSAttilio Rao     &blktime_threshold, 0,
322f7829d0dSAttilio Rao     "Number of seconds within is valid to block on a turnstile");
323f7829d0dSAttilio Rao SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
324f7829d0dSAttilio Rao     "Number of seconds between any deadlock resolver thread run");
325f7829d0dSAttilio Rao #endif	/* DEADLKRES */
326f7829d0dSAttilio Rao 
3277628402bSPeter Wemm void
3287628402bSPeter Wemm read_cpu_time(long *cp_time)
3297628402bSPeter Wemm {
3307628402bSPeter Wemm 	struct pcpu *pc;
3317628402bSPeter Wemm 	int i, j;
3327628402bSPeter Wemm 
3337628402bSPeter Wemm 	/* Sum up global cp_time[]. */
3347628402bSPeter Wemm 	bzero(cp_time, sizeof(long) * CPUSTATES);
3353aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
3367628402bSPeter Wemm 		pc = pcpu_find(i);
3377628402bSPeter Wemm 		for (j = 0; j < CPUSTATES; j++)
3387628402bSPeter Wemm 			cp_time[j] += pc->pc_cp_time[j];
3397628402bSPeter Wemm 	}
3407628402bSPeter Wemm }
3417628402bSPeter Wemm 
3424103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
3434103b765SPoul-Henning Kamp #include <sys/watchdog.h>
344370c3cb5SSean Kelly 
3454103b765SPoul-Henning Kamp static int watchdog_ticks;
346370c3cb5SSean Kelly static int watchdog_enabled;
3474103b765SPoul-Henning Kamp static void watchdog_fire(void);
3484103b765SPoul-Henning Kamp static void watchdog_config(void *, u_int, int *);
3494103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
350370c3cb5SSean Kelly 
3513bac064fSPoul-Henning Kamp /*
352df8bae1dSRodney W. Grimes  * Clock handling routines.
353df8bae1dSRodney W. Grimes  *
354b05dcf3cSPoul-Henning Kamp  * This code is written to operate with two timers that run independently of
355b05dcf3cSPoul-Henning Kamp  * each other.
3567ec73f64SPoul-Henning Kamp  *
357b05dcf3cSPoul-Henning Kamp  * The main timer, running hz times per second, is used to trigger interval
358b05dcf3cSPoul-Henning Kamp  * timers, timeouts and rescheduling as needed.
3597ec73f64SPoul-Henning Kamp  *
360b05dcf3cSPoul-Henning Kamp  * The second timer handles kernel and user profiling,
361b05dcf3cSPoul-Henning Kamp  * and does resource use estimation.  If the second timer is programmable,
362b05dcf3cSPoul-Henning Kamp  * it is randomized to avoid aliasing between the two clocks.  For example,
363b05dcf3cSPoul-Henning Kamp  * the randomization prevents an adversary from always giving up the cpu
364df8bae1dSRodney W. Grimes  * just before its quantum expires.  Otherwise, it would never accumulate
365df8bae1dSRodney W. Grimes  * cpu ticks.  The mean frequency of the second timer is stathz.
366b05dcf3cSPoul-Henning Kamp  *
367b05dcf3cSPoul-Henning Kamp  * If no second timer exists, stathz will be zero; in this case we drive
368b05dcf3cSPoul-Henning Kamp  * profiling and statistics off the main clock.  This WILL NOT be accurate;
369b05dcf3cSPoul-Henning Kamp  * do not do it unless absolutely necessary.
370b05dcf3cSPoul-Henning Kamp  *
371df8bae1dSRodney W. Grimes  * The statistics clock may (or may not) be run at a higher rate while
372b05dcf3cSPoul-Henning Kamp  * profiling.  This profile clock runs at profhz.  We require that profhz
373b05dcf3cSPoul-Henning Kamp  * be an integral multiple of stathz.
374b05dcf3cSPoul-Henning Kamp  *
375b05dcf3cSPoul-Henning Kamp  * If the statistics clock is running fast, it must be divided by the ratio
376b05dcf3cSPoul-Henning Kamp  * profhz/stathz for statistics.  (For profiling, every tick counts.)
377df8bae1dSRodney W. Grimes  *
3787ec73f64SPoul-Henning Kamp  * Time-of-day is maintained using a "timecounter", which may or may
3797ec73f64SPoul-Henning Kamp  * not be related to the hardware generating the above mentioned
3807ec73f64SPoul-Henning Kamp  * interrupts.
381df8bae1dSRodney W. Grimes  */
382df8bae1dSRodney W. Grimes 
383df8bae1dSRodney W. Grimes int	stathz;
384df8bae1dSRodney W. Grimes int	profhz;
385238dd320SJake Burkholder int	profprocs;
386a8df530dSJohn Baldwin volatile int	ticks;
387238dd320SJake Burkholder int	psratio;
388df8bae1dSRodney W. Grimes 
3893e288e62SDimitry Andric static DPCPU_DEFINE(int, pcputicks);	/* Per-CPU version of ticks. */
3904763a8b8SAlexander Motin static int global_hardclock_run = 0;
391dbd55f3fSAlexander Motin 
392df8bae1dSRodney W. Grimes /*
393df8bae1dSRodney W. Grimes  * Initialize clock frequencies and start both clocks running.
394df8bae1dSRodney W. Grimes  */
3952b14f991SJulian Elischer /* ARGSUSED*/
3962b14f991SJulian Elischer static void
397d841aaa7SBruce Evans initclocks(dummy)
398d841aaa7SBruce Evans 	void *dummy;
399df8bae1dSRodney W. Grimes {
400df8bae1dSRodney W. Grimes 	register int i;
401df8bae1dSRodney W. Grimes 
402df8bae1dSRodney W. Grimes 	/*
403df8bae1dSRodney W. Grimes 	 * Set divisors to 1 (normal case) and let the machine-specific
404df8bae1dSRodney W. Grimes 	 * code do its bit.
405df8bae1dSRodney W. Grimes 	 */
406875b8844SAlexander Motin 	mtx_init(&time_lock, "time lock", NULL, MTX_DEF);
40763d69d25SRobert Watson 	cpu_initclocks();
408df8bae1dSRodney W. Grimes 
409df8bae1dSRodney W. Grimes 	/*
410df8bae1dSRodney W. Grimes 	 * Compute profhz/stathz, and fix profhz if needed.
411df8bae1dSRodney W. Grimes 	 */
412df8bae1dSRodney W. Grimes 	i = stathz ? stathz : hz;
413df8bae1dSRodney W. Grimes 	if (profhz == 0)
414df8bae1dSRodney W. Grimes 		profhz = i;
415df8bae1dSRodney W. Grimes 	psratio = profhz / i;
4164103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
4174103b765SPoul-Henning Kamp 	EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
4184103b765SPoul-Henning Kamp #endif
419df8bae1dSRodney W. Grimes }
420df8bae1dSRodney W. Grimes 
421df8bae1dSRodney W. Grimes /*
422238dd320SJake Burkholder  * Each time the real-time timer fires, this function is called on all CPUs.
423b439e431SJohn Baldwin  * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
424238dd320SJake Burkholder  * the other CPUs in the system need to call this function.
4256caa8a15SJohn Baldwin  */
4266caa8a15SJohn Baldwin void
427b439e431SJohn Baldwin hardclock_cpu(int usermode)
4286caa8a15SJohn Baldwin {
4296caa8a15SJohn Baldwin 	struct pstats *pstats;
430238dd320SJake Burkholder 	struct thread *td = curthread;
431b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
432b61ce5b0SJeff Roberson 	int flags;
4336caa8a15SJohn Baldwin 
4346caa8a15SJohn Baldwin 	/*
4356caa8a15SJohn Baldwin 	 * Run current process's virtual and profile time, as needed.
4366caa8a15SJohn Baldwin 	 */
437ad1e7d28SJulian Elischer 	pstats = p->p_stats;
438b61ce5b0SJeff Roberson 	flags = 0;
439ad1e7d28SJulian Elischer 	if (usermode &&
44040acdeabSJeff Roberson 	    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
44140acdeabSJeff Roberson 		PROC_SLOCK(p);
442b61ce5b0SJeff Roberson 		if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
443b61ce5b0SJeff Roberson 			flags |= TDF_ALRMPEND | TDF_ASTPENDING;
44440acdeabSJeff Roberson 		PROC_SUNLOCK(p);
44540acdeabSJeff Roberson 	}
44640acdeabSJeff Roberson 	if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
44740acdeabSJeff Roberson 		PROC_SLOCK(p);
448b61ce5b0SJeff Roberson 		if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
449b61ce5b0SJeff Roberson 			flags |= TDF_PROFPEND | TDF_ASTPENDING;
45040acdeabSJeff Roberson 		PROC_SUNLOCK(p);
45140acdeabSJeff Roberson 	}
45240acdeabSJeff Roberson 	thread_lock(td);
453a157e425SAlexander Motin 	sched_tick(1);
454b61ce5b0SJeff Roberson 	td->td_flags |= flags;
45540acdeabSJeff Roberson 	thread_unlock(td);
45636c0fd9dSJoseph Koshy 
45736c0fd9dSJoseph Koshy #ifdef HWPMC_HOOKS
45836c0fd9dSJoseph Koshy 	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
45936c0fd9dSJoseph Koshy 		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
460f5f9340bSFabien Thomas 	if (td->td_intr_frame != NULL)
461f5f9340bSFabien Thomas 		PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
46236c0fd9dSJoseph Koshy #endif
4638d809d50SJeff Roberson 	callout_tick();
4646caa8a15SJohn Baldwin }
4656caa8a15SJohn Baldwin 
4666caa8a15SJohn Baldwin /*
467df8bae1dSRodney W. Grimes  * The real-time timer, interrupting hz times per second.
468df8bae1dSRodney W. Grimes  */
469df8bae1dSRodney W. Grimes void
470b439e431SJohn Baldwin hardclock(int usermode, uintfptr_t pc)
471df8bae1dSRodney W. Grimes {
472df8bae1dSRodney W. Grimes 
473a8df530dSJohn Baldwin 	atomic_add_int(&ticks, 1);
474b439e431SJohn Baldwin 	hardclock_cpu(usermode);
4750e189873SAlexander Motin 	tc_ticktock(1);
476a157e425SAlexander Motin 	cpu_tick_calibration();
477df8bae1dSRodney W. Grimes 	/*
478df8bae1dSRodney W. Grimes 	 * If no separate statistics clock is available, run it from here.
4796caa8a15SJohn Baldwin 	 *
4806caa8a15SJohn Baldwin 	 * XXX: this only works for UP
481df8bae1dSRodney W. Grimes 	 */
482238dd320SJake Burkholder 	if (stathz == 0) {
483b439e431SJohn Baldwin 		profclock(usermode, pc);
484b439e431SJohn Baldwin 		statclock(usermode);
485238dd320SJake Burkholder 	}
486e4fc250cSLuigi Rizzo #ifdef DEVICE_POLLING
487daccb638SLuigi Rizzo 	hardclock_device_poll();	/* this is very short and quick */
488e4fc250cSLuigi Rizzo #endif /* DEVICE_POLLING */
4894103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
4904103b765SPoul-Henning Kamp 	if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
491370c3cb5SSean Kelly 		watchdog_fire();
4924103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
493ab36c067SJustin T. Gibbs }
494ab36c067SJustin T. Gibbs 
495a157e425SAlexander Motin void
496bcfd016cSAlexander Motin hardclock_cnt(int cnt, int usermode)
497a157e425SAlexander Motin {
498a157e425SAlexander Motin 	struct pstats *pstats;
499a157e425SAlexander Motin 	struct thread *td = curthread;
500a157e425SAlexander Motin 	struct proc *p = td->td_proc;
501a157e425SAlexander Motin 	int *t = DPCPU_PTR(pcputicks);
5024763a8b8SAlexander Motin 	int flags, global, newticks;
5034763a8b8SAlexander Motin #ifdef SW_WATCHDOG
5044763a8b8SAlexander Motin 	int i;
5054763a8b8SAlexander Motin #endif /* SW_WATCHDOG */
506a157e425SAlexander Motin 
507a157e425SAlexander Motin 	/*
508a157e425SAlexander Motin 	 * Update per-CPU and possibly global ticks values.
509a157e425SAlexander Motin 	 */
510a157e425SAlexander Motin 	*t += cnt;
511a157e425SAlexander Motin 	do {
512a157e425SAlexander Motin 		global = ticks;
513a157e425SAlexander Motin 		newticks = *t - global;
514a157e425SAlexander Motin 		if (newticks <= 0) {
515a157e425SAlexander Motin 			if (newticks < -1)
516a157e425SAlexander Motin 				*t = global - 1;
517a157e425SAlexander Motin 			newticks = 0;
518a157e425SAlexander Motin 			break;
519a157e425SAlexander Motin 		}
520a157e425SAlexander Motin 	} while (!atomic_cmpset_int(&ticks, global, *t));
521a157e425SAlexander Motin 
522a157e425SAlexander Motin 	/*
523a157e425SAlexander Motin 	 * Run current process's virtual and profile time, as needed.
524a157e425SAlexander Motin 	 */
525a157e425SAlexander Motin 	pstats = p->p_stats;
526a157e425SAlexander Motin 	flags = 0;
527a157e425SAlexander Motin 	if (usermode &&
528a157e425SAlexander Motin 	    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
529a157e425SAlexander Motin 		PROC_SLOCK(p);
530a157e425SAlexander Motin 		if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
531a157e425SAlexander Motin 		    tick * cnt) == 0)
532a157e425SAlexander Motin 			flags |= TDF_ALRMPEND | TDF_ASTPENDING;
533a157e425SAlexander Motin 		PROC_SUNLOCK(p);
534a157e425SAlexander Motin 	}
535a157e425SAlexander Motin 	if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
536a157e425SAlexander Motin 		PROC_SLOCK(p);
537a157e425SAlexander Motin 		if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
538a157e425SAlexander Motin 		    tick * cnt) == 0)
539a157e425SAlexander Motin 			flags |= TDF_PROFPEND | TDF_ASTPENDING;
540a157e425SAlexander Motin 		PROC_SUNLOCK(p);
541a157e425SAlexander Motin 	}
542a157e425SAlexander Motin 	thread_lock(td);
543a157e425SAlexander Motin 	sched_tick(cnt);
544a157e425SAlexander Motin 	td->td_flags |= flags;
545a157e425SAlexander Motin 	thread_unlock(td);
546a157e425SAlexander Motin 
547a157e425SAlexander Motin #ifdef	HWPMC_HOOKS
548a157e425SAlexander Motin 	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
549a157e425SAlexander Motin 		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
550f5f9340bSFabien Thomas 	if (td->td_intr_frame != NULL)
551f5f9340bSFabien Thomas 		PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
552a157e425SAlexander Motin #endif
553a157e425SAlexander Motin 	callout_tick();
554a157e425SAlexander Motin 	/* We are in charge to handle this tick duty. */
555a157e425SAlexander Motin 	if (newticks > 0) {
5564763a8b8SAlexander Motin 		/* Dangerous and no need to call these things concurrently. */
5574763a8b8SAlexander Motin 		if (atomic_cmpset_acq_int(&global_hardclock_run, 0, 1)) {
5580e189873SAlexander Motin 			tc_ticktock(newticks);
559a157e425SAlexander Motin #ifdef DEVICE_POLLING
5604763a8b8SAlexander Motin 			/* This is very short and quick. */
5614763a8b8SAlexander Motin 			hardclock_device_poll();
562a157e425SAlexander Motin #endif /* DEVICE_POLLING */
5634763a8b8SAlexander Motin 			atomic_store_rel_int(&global_hardclock_run, 0);
5644763a8b8SAlexander Motin 		}
565a157e425SAlexander Motin #ifdef SW_WATCHDOG
566a157e425SAlexander Motin 		if (watchdog_enabled > 0) {
5674763a8b8SAlexander Motin 			i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
5684763a8b8SAlexander Motin 			if (i > 0 && i <= newticks)
569a157e425SAlexander Motin 				watchdog_fire();
570a157e425SAlexander Motin 		}
571a157e425SAlexander Motin #endif /* SW_WATCHDOG */
572a157e425SAlexander Motin 	}
573a157e425SAlexander Motin 	if (curcpu == CPU_FIRST())
574a157e425SAlexander Motin 		cpu_tick_calibration();
575a157e425SAlexander Motin }
576a157e425SAlexander Motin 
577a157e425SAlexander Motin void
578a157e425SAlexander Motin hardclock_sync(int cpu)
579a157e425SAlexander Motin {
580a157e425SAlexander Motin 	int	*t = DPCPU_ID_PTR(cpu, pcputicks);
581a157e425SAlexander Motin 
582a157e425SAlexander Motin 	*t = ticks;
583a157e425SAlexander Motin }
584a157e425SAlexander Motin 
585df8bae1dSRodney W. Grimes /*
586227ee8a1SPoul-Henning Kamp  * Compute number of ticks in the specified amount of time.
587df8bae1dSRodney W. Grimes  */
588df8bae1dSRodney W. Grimes int
589227ee8a1SPoul-Henning Kamp tvtohz(tv)
590df8bae1dSRodney W. Grimes 	struct timeval *tv;
591df8bae1dSRodney W. Grimes {
5926976af69SBruce Evans 	register unsigned long ticks;
5936976af69SBruce Evans 	register long sec, usec;
594df8bae1dSRodney W. Grimes 
595df8bae1dSRodney W. Grimes 	/*
5966976af69SBruce Evans 	 * If the number of usecs in the whole seconds part of the time
5976976af69SBruce Evans 	 * difference fits in a long, then the total number of usecs will
5986976af69SBruce Evans 	 * fit in an unsigned long.  Compute the total and convert it to
5996976af69SBruce Evans 	 * ticks, rounding up and adding 1 to allow for the current tick
6006976af69SBruce Evans 	 * to expire.  Rounding also depends on unsigned long arithmetic
6016976af69SBruce Evans 	 * to avoid overflow.
602df8bae1dSRodney W. Grimes 	 *
6036976af69SBruce Evans 	 * Otherwise, if the number of ticks in the whole seconds part of
6046976af69SBruce Evans 	 * the time difference fits in a long, then convert the parts to
6056976af69SBruce Evans 	 * ticks separately and add, using similar rounding methods and
6066976af69SBruce Evans 	 * overflow avoidance.  This method would work in the previous
6076976af69SBruce Evans 	 * case but it is slightly slower and assumes that hz is integral.
6086976af69SBruce Evans 	 *
6096976af69SBruce Evans 	 * Otherwise, round the time difference down to the maximum
6106976af69SBruce Evans 	 * representable value.
6116976af69SBruce Evans 	 *
6126976af69SBruce Evans 	 * If ints have 32 bits, then the maximum value for any timeout in
6136976af69SBruce Evans 	 * 10ms ticks is 248 days.
614df8bae1dSRodney W. Grimes 	 */
615227ee8a1SPoul-Henning Kamp 	sec = tv->tv_sec;
616227ee8a1SPoul-Henning Kamp 	usec = tv->tv_usec;
6176976af69SBruce Evans 	if (usec < 0) {
6186976af69SBruce Evans 		sec--;
6196976af69SBruce Evans 		usec += 1000000;
6206976af69SBruce Evans 	}
6216976af69SBruce Evans 	if (sec < 0) {
6226976af69SBruce Evans #ifdef DIAGNOSTIC
623b05dcf3cSPoul-Henning Kamp 		if (usec > 0) {
6247ec73f64SPoul-Henning Kamp 			sec++;
6257ec73f64SPoul-Henning Kamp 			usec -= 1000000;
6267ec73f64SPoul-Henning Kamp 		}
627227ee8a1SPoul-Henning Kamp 		printf("tvotohz: negative time difference %ld sec %ld usec\n",
6286976af69SBruce Evans 		       sec, usec);
6296976af69SBruce Evans #endif
6306976af69SBruce Evans 		ticks = 1;
6316976af69SBruce Evans 	} else if (sec <= LONG_MAX / 1000000)
6326976af69SBruce Evans 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
6336976af69SBruce Evans 			/ tick + 1;
6346976af69SBruce Evans 	else if (sec <= LONG_MAX / hz)
6356976af69SBruce Evans 		ticks = sec * hz
6366976af69SBruce Evans 			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
6376976af69SBruce Evans 	else
6386976af69SBruce Evans 		ticks = LONG_MAX;
6396976af69SBruce Evans 	if (ticks > INT_MAX)
6406976af69SBruce Evans 		ticks = INT_MAX;
641d6116663SAlexander Langer 	return ((int)ticks);
642df8bae1dSRodney W. Grimes }
643df8bae1dSRodney W. Grimes 
644df8bae1dSRodney W. Grimes /*
645df8bae1dSRodney W. Grimes  * Start profiling on a process.
646df8bae1dSRodney W. Grimes  *
647df8bae1dSRodney W. Grimes  * Kernel profiling passes proc0 which never exits and hence
648df8bae1dSRodney W. Grimes  * keeps the profile clock running constantly.
649df8bae1dSRodney W. Grimes  */
650df8bae1dSRodney W. Grimes void
651df8bae1dSRodney W. Grimes startprofclock(p)
652df8bae1dSRodney W. Grimes 	register struct proc *p;
653df8bae1dSRodney W. Grimes {
654df8bae1dSRodney W. Grimes 
6559752f794SJohn Baldwin 	PROC_LOCK_ASSERT(p, MA_OWNED);
6569752f794SJohn Baldwin 	if (p->p_flag & P_STOPPROF)
657a282253aSJulian Elischer 		return;
6589752f794SJohn Baldwin 	if ((p->p_flag & P_PROFIL) == 0) {
6599752f794SJohn Baldwin 		p->p_flag |= P_PROFIL;
660875b8844SAlexander Motin 		mtx_lock(&time_lock);
661238dd320SJake Burkholder 		if (++profprocs == 1)
662238dd320SJake Burkholder 			cpu_startprofclock();
663875b8844SAlexander Motin 		mtx_unlock(&time_lock);
664df8bae1dSRodney W. Grimes 	}
6659752f794SJohn Baldwin }
666df8bae1dSRodney W. Grimes 
667df8bae1dSRodney W. Grimes /*
668df8bae1dSRodney W. Grimes  * Stop profiling on a process.
669df8bae1dSRodney W. Grimes  */
670df8bae1dSRodney W. Grimes void
671df8bae1dSRodney W. Grimes stopprofclock(p)
672df8bae1dSRodney W. Grimes 	register struct proc *p;
673df8bae1dSRodney W. Grimes {
674df8bae1dSRodney W. Grimes 
675a282253aSJulian Elischer 	PROC_LOCK_ASSERT(p, MA_OWNED);
6769752f794SJohn Baldwin 	if (p->p_flag & P_PROFIL) {
6779752f794SJohn Baldwin 		if (p->p_profthreads != 0) {
6789752f794SJohn Baldwin 			p->p_flag |= P_STOPPROF;
6799752f794SJohn Baldwin 			while (p->p_profthreads != 0)
680a282253aSJulian Elischer 				msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
681a89ec05eSPeter Wemm 				    "stopprof", 0);
6829752f794SJohn Baldwin 			p->p_flag &= ~P_STOPPROF;
683a282253aSJulian Elischer 		}
684b62b2304SColin Percival 		if ((p->p_flag & P_PROFIL) == 0)
685b62b2304SColin Percival 			return;
6869752f794SJohn Baldwin 		p->p_flag &= ~P_PROFIL;
687875b8844SAlexander Motin 		mtx_lock(&time_lock);
688238dd320SJake Burkholder 		if (--profprocs == 0)
689238dd320SJake Burkholder 			cpu_stopprofclock();
690875b8844SAlexander Motin 		mtx_unlock(&time_lock);
691df8bae1dSRodney W. Grimes 	}
6929752f794SJohn Baldwin }
693df8bae1dSRodney W. Grimes 
694df8bae1dSRodney W. Grimes /*
6951c4bcd05SJeff Roberson  * Statistics clock.  Updates rusage information and calls the scheduler
6961c4bcd05SJeff Roberson  * to adjust priorities of the active thread.
6971c4bcd05SJeff Roberson  *
698238dd320SJake Burkholder  * This should be called by all active processors.
699df8bae1dSRodney W. Grimes  */
700df8bae1dSRodney W. Grimes void
701b439e431SJohn Baldwin statclock(int usermode)
702df8bae1dSRodney W. Grimes {
703bcfd016cSAlexander Motin 
704bcfd016cSAlexander Motin 	statclock_cnt(1, usermode);
705bcfd016cSAlexander Motin }
706bcfd016cSAlexander Motin 
707bcfd016cSAlexander Motin void
708bcfd016cSAlexander Motin statclock_cnt(int cnt, int usermode)
709bcfd016cSAlexander Motin {
7108a129caeSDavid Greenman 	struct rusage *ru;
7118a129caeSDavid Greenman 	struct vmspace *vm;
712238dd320SJake Burkholder 	struct thread *td;
713238dd320SJake Burkholder 	struct proc *p;
714238dd320SJake Burkholder 	long rss;
7157628402bSPeter Wemm 	long *cp_time;
7168a129caeSDavid Greenman 
717238dd320SJake Burkholder 	td = curthread;
718238dd320SJake Burkholder 	p = td->td_proc;
719238dd320SJake Burkholder 
7207628402bSPeter Wemm 	cp_time = (long *)PCPU_PTR(cp_time);
721b439e431SJohn Baldwin 	if (usermode) {
722df8bae1dSRodney W. Grimes 		/*
72371a62f8aSBruce Evans 		 * Charge the time as appropriate.
724df8bae1dSRodney W. Grimes 		 */
725bcfd016cSAlexander Motin 		td->td_uticks += cnt;
726fa885116SJulian Elischer 		if (p->p_nice > NZERO)
727bcfd016cSAlexander Motin 			cp_time[CP_NICE] += cnt;
728df8bae1dSRodney W. Grimes 		else
729bcfd016cSAlexander Motin 			cp_time[CP_USER] += cnt;
730df8bae1dSRodney W. Grimes 	} else {
731df8bae1dSRodney W. Grimes 		/*
732df8bae1dSRodney W. Grimes 		 * Came from kernel mode, so we were:
733df8bae1dSRodney W. Grimes 		 * - handling an interrupt,
734df8bae1dSRodney W. Grimes 		 * - doing syscall or trap work on behalf of the current
735df8bae1dSRodney W. Grimes 		 *   user process, or
736df8bae1dSRodney W. Grimes 		 * - spinning in the idle loop.
737df8bae1dSRodney W. Grimes 		 * Whichever it is, charge the time as appropriate.
738df8bae1dSRodney W. Grimes 		 * Note that we charge interrupts to the current process,
739df8bae1dSRodney W. Grimes 		 * regardless of whether they are ``for'' that process,
740df8bae1dSRodney W. Grimes 		 * so that we know how much of its real time was spent
741df8bae1dSRodney W. Grimes 		 * in ``non-process'' (i.e., interrupt) work.
742df8bae1dSRodney W. Grimes 		 */
743e0f66ef8SJohn Baldwin 		if ((td->td_pflags & TDP_ITHREAD) ||
744e0f66ef8SJohn Baldwin 		    td->td_intr_nesting_level >= 2) {
745bcfd016cSAlexander Motin 			td->td_iticks += cnt;
746bcfd016cSAlexander Motin 			cp_time[CP_INTR] += cnt;
7470384fff8SJason Evans 		} else {
748bcfd016cSAlexander Motin 			td->td_pticks += cnt;
749bcfd016cSAlexander Motin 			td->td_sticks += cnt;
750486a9414SJulian Elischer 			if (!TD_IS_IDLETHREAD(td))
751bcfd016cSAlexander Motin 				cp_time[CP_SYS] += cnt;
7520384fff8SJason Evans 			else
753bcfd016cSAlexander Motin 				cp_time[CP_IDLE] += cnt;
754df8bae1dSRodney W. Grimes 		}
7550384fff8SJason Evans 	}
756f5e9e8ecSBruce Evans 
757f5e9e8ecSBruce Evans 	/* Update resource usage integrals and maximums. */
75816f9f205SJohn Baldwin 	MPASS(p->p_vmspace != NULL);
75916f9f205SJohn Baldwin 	vm = p->p_vmspace;
7601c4bcd05SJeff Roberson 	ru = &td->td_ru;
761bcfd016cSAlexander Motin 	ru->ru_ixrss += pgtok(vm->vm_tsize) * cnt;
762bcfd016cSAlexander Motin 	ru->ru_idrss += pgtok(vm->vm_dsize) * cnt;
763bcfd016cSAlexander Motin 	ru->ru_isrss += pgtok(vm->vm_ssize) * cnt;
7641c6d46f9SLuoqi Chen 	rss = pgtok(vmspace_resident_count(vm));
765f5e9e8ecSBruce Evans 	if (ru->ru_maxrss < rss)
766f5e9e8ecSBruce Evans 		ru->ru_maxrss = rss;
7678f51ad55SJeff Roberson 	KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
7688f51ad55SJeff Roberson 	    "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
769b3e9e682SRyan Stone 	SDT_PROBE2(sched, , , tick, td, td->td_proc);
7707628402bSPeter Wemm 	thread_lock_flags(td, MTX_QUIET);
771bcfd016cSAlexander Motin 	for ( ; cnt > 0; cnt--)
77240acdeabSJeff Roberson 		sched_clock(td);
77340acdeabSJeff Roberson 	thread_unlock(td);
774f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
775f5f9340bSFabien Thomas 	if (td->td_intr_frame != NULL)
776f5f9340bSFabien Thomas 		PMC_SOFT_CALL_TF( , , clock, stat, td->td_intr_frame);
777f5f9340bSFabien Thomas #endif
7786caa8a15SJohn Baldwin }
7796c567274SJohn Baldwin 
7806caa8a15SJohn Baldwin void
781b439e431SJohn Baldwin profclock(int usermode, uintfptr_t pc)
7826caa8a15SJohn Baldwin {
783bcfd016cSAlexander Motin 
784bcfd016cSAlexander Motin 	profclock_cnt(1, usermode, pc);
785bcfd016cSAlexander Motin }
786bcfd016cSAlexander Motin 
787bcfd016cSAlexander Motin void
788bcfd016cSAlexander Motin profclock_cnt(int cnt, int usermode, uintfptr_t pc)
789bcfd016cSAlexander Motin {
790238dd320SJake Burkholder 	struct thread *td;
791238dd320SJake Burkholder #ifdef GPROF
792238dd320SJake Burkholder 	struct gmonparam *g;
7935c8b4441SJohn Baldwin 	uintfptr_t i;
794238dd320SJake Burkholder #endif
7956caa8a15SJohn Baldwin 
7964a338afdSJulian Elischer 	td = curthread;
797b439e431SJohn Baldwin 	if (usermode) {
798238dd320SJake Burkholder 		/*
799238dd320SJake Burkholder 		 * Came from user mode; CPU was in user state.
800238dd320SJake Burkholder 		 * If this process is being profiled, record the tick.
801a282253aSJulian Elischer 		 * if there is no related user location yet, don't
802a282253aSJulian Elischer 		 * bother trying to count it.
803238dd320SJake Burkholder 		 */
8049752f794SJohn Baldwin 		if (td->td_proc->p_flag & P_PROFIL)
805bcfd016cSAlexander Motin 			addupc_intr(td, pc, cnt);
806238dd320SJake Burkholder 	}
807238dd320SJake Burkholder #ifdef GPROF
808238dd320SJake Burkholder 	else {
809238dd320SJake Burkholder 		/*
810238dd320SJake Burkholder 		 * Kernel statistics are just like addupc_intr, only easier.
811238dd320SJake Burkholder 		 */
812238dd320SJake Burkholder 		g = &_gmonparam;
813b439e431SJohn Baldwin 		if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
814b439e431SJohn Baldwin 			i = PC_TO_I(g, pc);
815238dd320SJake Burkholder 			if (i < g->textsize) {
816bcfd016cSAlexander Motin 				KCOUNT(g, i) += cnt;
817238dd320SJake Burkholder 			}
818238dd320SJake Burkholder 		}
819238dd320SJake Burkholder 	}
820238dd320SJake Burkholder #endif
821*1af19ee4SAlexander Motin #ifdef HWPMC_HOOKS
822*1af19ee4SAlexander Motin 	if (td->td_intr_frame != NULL)
823*1af19ee4SAlexander Motin 		PMC_SOFT_CALL_TF( , , clock, prof, td->td_intr_frame);
824*1af19ee4SAlexander Motin #endif
825df8bae1dSRodney W. Grimes }
826df8bae1dSRodney W. Grimes 
827df8bae1dSRodney W. Grimes /*
828df8bae1dSRodney W. Grimes  * Return information about system clocks.
829df8bae1dSRodney W. Grimes  */
830787d58f2SPoul-Henning Kamp static int
83182d9ae4eSPoul-Henning Kamp sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
832df8bae1dSRodney W. Grimes {
833df8bae1dSRodney W. Grimes 	struct clockinfo clkinfo;
834df8bae1dSRodney W. Grimes 	/*
835df8bae1dSRodney W. Grimes 	 * Construct clockinfo structure.
836df8bae1dSRodney W. Grimes 	 */
837a9a0f15aSBruce Evans 	bzero(&clkinfo, sizeof(clkinfo));
838df8bae1dSRodney W. Grimes 	clkinfo.hz = hz;
839df8bae1dSRodney W. Grimes 	clkinfo.tick = tick;
840df8bae1dSRodney W. Grimes 	clkinfo.profhz = profhz;
841df8bae1dSRodney W. Grimes 	clkinfo.stathz = stathz ? stathz : hz;
842ae0eb976SPoul-Henning Kamp 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
843df8bae1dSRodney W. Grimes }
8443f31c649SGarrett Wollman 
845c383c221SEd Schouten SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
846c383c221SEd Schouten 	CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
847af1408e3SLuigi Rizzo 	0, 0, sysctl_kern_clockrate, "S,clockinfo",
848af1408e3SLuigi Rizzo 	"Rate and period of various kernel clocks");
849370c3cb5SSean Kelly 
8504103b765SPoul-Henning Kamp #ifdef SW_WATCHDOG
8514103b765SPoul-Henning Kamp 
8524103b765SPoul-Henning Kamp static void
8539079fff5SNick Hibma watchdog_config(void *unused __unused, u_int cmd, int *error)
854370c3cb5SSean Kelly {
8554103b765SPoul-Henning Kamp 	u_int u;
856370c3cb5SSean Kelly 
8574103b765SPoul-Henning Kamp 	u = cmd & WD_INTERVAL;
8589079fff5SNick Hibma 	if (u >= WD_TO_1SEC) {
8594103b765SPoul-Henning Kamp 		watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
8604103b765SPoul-Henning Kamp 		watchdog_enabled = 1;
8619079fff5SNick Hibma 		*error = 0;
8624103b765SPoul-Henning Kamp 	} else {
8634103b765SPoul-Henning Kamp 		watchdog_enabled = 0;
864370c3cb5SSean Kelly 	}
8654103b765SPoul-Henning Kamp }
866370c3cb5SSean Kelly 
867370c3cb5SSean Kelly /*
868370c3cb5SSean Kelly  * Handle a watchdog timeout by dumping interrupt information and
869911d16b8SEd Maste  * then either dropping to DDB or panicking.
870370c3cb5SSean Kelly  */
871370c3cb5SSean Kelly static void
872370c3cb5SSean Kelly watchdog_fire(void)
873370c3cb5SSean Kelly {
874370c3cb5SSean Kelly 	int nintr;
87560ae52f7SEd Schouten 	uint64_t inttotal;
876370c3cb5SSean Kelly 	u_long *curintr;
877370c3cb5SSean Kelly 	char *curname;
878370c3cb5SSean Kelly 
879370c3cb5SSean Kelly 	curintr = intrcnt;
880370c3cb5SSean Kelly 	curname = intrnames;
881370c3cb5SSean Kelly 	inttotal = 0;
882556a5850SAlexander Motin 	nintr = sintrcnt / sizeof(u_long);
883370c3cb5SSean Kelly 
884370c3cb5SSean Kelly 	printf("interrupt                   total\n");
885370c3cb5SSean Kelly 	while (--nintr >= 0) {
886370c3cb5SSean Kelly 		if (*curintr)
887370c3cb5SSean Kelly 			printf("%-12s %20lu\n", curname, *curintr);
888370c3cb5SSean Kelly 		curname += strlen(curname) + 1;
889370c3cb5SSean Kelly 		inttotal += *curintr++;
890370c3cb5SSean Kelly 	}
8916cda4155SSean Kelly 	printf("Total        %20ju\n", (uintmax_t)inttotal);
892911d16b8SEd Maste 
893911d16b8SEd Maste #if defined(KDB) && !defined(KDB_UNATTENDED)
894911d16b8SEd Maste 	kdb_backtrace();
8953de213ccSRobert Watson 	kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
896911d16b8SEd Maste #else
897370c3cb5SSean Kelly 	panic("watchdog timeout");
898911d16b8SEd Maste #endif
899370c3cb5SSean Kelly }
900370c3cb5SSean Kelly 
9014103b765SPoul-Henning Kamp #endif /* SW_WATCHDOG */
902