xref: /freebsd/sys/kern/kern_clock.c (revision 35a04710d7286aa9538917fd7f8e417dbee95b82)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_kdb.h"
41 #include "opt_device_polling.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_ntp.h"
44 #include "opt_watchdog.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/callout.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/lock.h>
52 #include <sys/ktr.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resource.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/signalvar.h>
59 #include <sys/smp.h>
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 #include <sys/sysctl.h>
64 #include <sys/bus.h>
65 #include <sys/interrupt.h>
66 #include <sys/limits.h>
67 #include <sys/timetc.h>
68 
69 #ifdef GPROF
70 #include <sys/gmon.h>
71 #endif
72 
73 #ifdef HWPMC_HOOKS
74 #include <sys/pmckern.h>
75 #endif
76 
77 #ifdef DEVICE_POLLING
78 extern void hardclock_device_poll(void);
79 #endif /* DEVICE_POLLING */
80 
81 static void initclocks(void *dummy);
82 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
83 
84 /* Some of these don't belong here, but it's easiest to concentrate them. */
85 long cp_time[CPUSTATES];
86 
87 /* Spin-lock protecting profiling statistics. */
88 static struct mtx time_lock;
89 
90 static int
91 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
92 {
93 	int error;
94 #ifdef SCTL_MASK32
95 	int i;
96 	unsigned int cp_time32[CPUSTATES];
97 
98 	if (req->flags & SCTL_MASK32) {
99 		if (!req->oldptr)
100 			return SYSCTL_OUT(req, 0, sizeof(cp_time32));
101 		for (i = 0; i < CPUSTATES; i++)
102 			cp_time32[i] = (unsigned int)cp_time[i];
103 		error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
104 	} else
105 #endif
106 	{
107 		if (!req->oldptr)
108 			return SYSCTL_OUT(req, 0, sizeof(cp_time));
109 		error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
110 	}
111 	return error;
112 }
113 
114 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD,
115     0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
116 
117 #ifdef SW_WATCHDOG
118 #include <sys/watchdog.h>
119 
120 static int watchdog_ticks;
121 static int watchdog_enabled;
122 static void watchdog_fire(void);
123 static void watchdog_config(void *, u_int, int *);
124 #endif /* SW_WATCHDOG */
125 
126 /*
127  * Clock handling routines.
128  *
129  * This code is written to operate with two timers that run independently of
130  * each other.
131  *
132  * The main timer, running hz times per second, is used to trigger interval
133  * timers, timeouts and rescheduling as needed.
134  *
135  * The second timer handles kernel and user profiling,
136  * and does resource use estimation.  If the second timer is programmable,
137  * it is randomized to avoid aliasing between the two clocks.  For example,
138  * the randomization prevents an adversary from always giving up the cpu
139  * just before its quantum expires.  Otherwise, it would never accumulate
140  * cpu ticks.  The mean frequency of the second timer is stathz.
141  *
142  * If no second timer exists, stathz will be zero; in this case we drive
143  * profiling and statistics off the main clock.  This WILL NOT be accurate;
144  * do not do it unless absolutely necessary.
145  *
146  * The statistics clock may (or may not) be run at a higher rate while
147  * profiling.  This profile clock runs at profhz.  We require that profhz
148  * be an integral multiple of stathz.
149  *
150  * If the statistics clock is running fast, it must be divided by the ratio
151  * profhz/stathz for statistics.  (For profiling, every tick counts.)
152  *
153  * Time-of-day is maintained using a "timecounter", which may or may
154  * not be related to the hardware generating the above mentioned
155  * interrupts.
156  */
157 
158 int	stathz;
159 int	profhz;
160 int	profprocs;
161 int	ticks;
162 int	psratio;
163 
164 /*
165  * Initialize clock frequencies and start both clocks running.
166  */
167 /* ARGSUSED*/
168 static void
169 initclocks(dummy)
170 	void *dummy;
171 {
172 	register int i;
173 
174 	/*
175 	 * Set divisors to 1 (normal case) and let the machine-specific
176 	 * code do its bit.
177 	 */
178 	mtx_init(&time_lock, "time lock", NULL, MTX_SPIN);
179 	cpu_initclocks();
180 
181 	/*
182 	 * Compute profhz/stathz, and fix profhz if needed.
183 	 */
184 	i = stathz ? stathz : hz;
185 	if (profhz == 0)
186 		profhz = i;
187 	psratio = profhz / i;
188 #ifdef SW_WATCHDOG
189 	EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
190 #endif
191 }
192 
193 /*
194  * Each time the real-time timer fires, this function is called on all CPUs.
195  * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
196  * the other CPUs in the system need to call this function.
197  */
198 void
199 hardclock_cpu(int usermode)
200 {
201 	struct pstats *pstats;
202 	struct thread *td = curthread;
203 	struct proc *p = td->td_proc;
204 	int flags;
205 
206 	/*
207 	 * Run current process's virtual and profile time, as needed.
208 	 */
209 	pstats = p->p_stats;
210 	flags = 0;
211 	if (usermode &&
212 	    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
213 		PROC_SLOCK(p);
214 		if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
215 			flags |= TDF_ALRMPEND | TDF_ASTPENDING;
216 		PROC_SUNLOCK(p);
217 	}
218 	if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
219 		PROC_SLOCK(p);
220 		if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
221 			flags |= TDF_PROFPEND | TDF_ASTPENDING;
222 		PROC_SUNLOCK(p);
223 	}
224 	thread_lock(td);
225 	sched_tick();
226 	td->td_flags |= flags;
227 	thread_unlock(td);
228 
229 #ifdef	HWPMC_HOOKS
230 	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
231 		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
232 #endif
233 }
234 
235 /*
236  * The real-time timer, interrupting hz times per second.
237  */
238 void
239 hardclock(int usermode, uintfptr_t pc)
240 {
241 	int need_softclock = 0;
242 
243 	hardclock_cpu(usermode);
244 
245 	tc_ticktock();
246 	/*
247 	 * If no separate statistics clock is available, run it from here.
248 	 *
249 	 * XXX: this only works for UP
250 	 */
251 	if (stathz == 0) {
252 		profclock(usermode, pc);
253 		statclock(usermode);
254 	}
255 
256 #ifdef DEVICE_POLLING
257 	hardclock_device_poll();	/* this is very short and quick */
258 #endif /* DEVICE_POLLING */
259 
260 	/*
261 	 * Process callouts at a very low cpu priority, so we don't keep the
262 	 * relatively high clock interrupt priority any longer than necessary.
263 	 */
264 	mtx_lock_spin_flags(&callout_lock, MTX_QUIET);
265 	ticks++;
266 	if (!TAILQ_EMPTY(&callwheel[ticks & callwheelmask])) {
267 		need_softclock = 1;
268 	} else if (softticks + 1 == ticks)
269 		++softticks;
270 	mtx_unlock_spin_flags(&callout_lock, MTX_QUIET);
271 
272 	/*
273 	 * swi_sched acquires the thread lock, so we don't want to call it
274 	 * with callout_lock held; incorrect locking order.
275 	 */
276 	if (need_softclock)
277 		swi_sched(softclock_ih, 0);
278 
279 #ifdef SW_WATCHDOG
280 	if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
281 		watchdog_fire();
282 #endif /* SW_WATCHDOG */
283 }
284 
285 /*
286  * Compute number of ticks in the specified amount of time.
287  */
288 int
289 tvtohz(tv)
290 	struct timeval *tv;
291 {
292 	register unsigned long ticks;
293 	register long sec, usec;
294 
295 	/*
296 	 * If the number of usecs in the whole seconds part of the time
297 	 * difference fits in a long, then the total number of usecs will
298 	 * fit in an unsigned long.  Compute the total and convert it to
299 	 * ticks, rounding up and adding 1 to allow for the current tick
300 	 * to expire.  Rounding also depends on unsigned long arithmetic
301 	 * to avoid overflow.
302 	 *
303 	 * Otherwise, if the number of ticks in the whole seconds part of
304 	 * the time difference fits in a long, then convert the parts to
305 	 * ticks separately and add, using similar rounding methods and
306 	 * overflow avoidance.  This method would work in the previous
307 	 * case but it is slightly slower and assumes that hz is integral.
308 	 *
309 	 * Otherwise, round the time difference down to the maximum
310 	 * representable value.
311 	 *
312 	 * If ints have 32 bits, then the maximum value for any timeout in
313 	 * 10ms ticks is 248 days.
314 	 */
315 	sec = tv->tv_sec;
316 	usec = tv->tv_usec;
317 	if (usec < 0) {
318 		sec--;
319 		usec += 1000000;
320 	}
321 	if (sec < 0) {
322 #ifdef DIAGNOSTIC
323 		if (usec > 0) {
324 			sec++;
325 			usec -= 1000000;
326 		}
327 		printf("tvotohz: negative time difference %ld sec %ld usec\n",
328 		       sec, usec);
329 #endif
330 		ticks = 1;
331 	} else if (sec <= LONG_MAX / 1000000)
332 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
333 			/ tick + 1;
334 	else if (sec <= LONG_MAX / hz)
335 		ticks = sec * hz
336 			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
337 	else
338 		ticks = LONG_MAX;
339 	if (ticks > INT_MAX)
340 		ticks = INT_MAX;
341 	return ((int)ticks);
342 }
343 
344 /*
345  * Start profiling on a process.
346  *
347  * Kernel profiling passes proc0 which never exits and hence
348  * keeps the profile clock running constantly.
349  */
350 void
351 startprofclock(p)
352 	register struct proc *p;
353 {
354 
355 	PROC_LOCK_ASSERT(p, MA_OWNED);
356 	if (p->p_flag & P_STOPPROF)
357 		return;
358 	if ((p->p_flag & P_PROFIL) == 0) {
359 		p->p_flag |= P_PROFIL;
360 		mtx_lock_spin(&time_lock);
361 		if (++profprocs == 1)
362 			cpu_startprofclock();
363 		mtx_unlock_spin(&time_lock);
364 	}
365 }
366 
367 /*
368  * Stop profiling on a process.
369  */
370 void
371 stopprofclock(p)
372 	register struct proc *p;
373 {
374 
375 	PROC_LOCK_ASSERT(p, MA_OWNED);
376 	if (p->p_flag & P_PROFIL) {
377 		if (p->p_profthreads != 0) {
378 			p->p_flag |= P_STOPPROF;
379 			while (p->p_profthreads != 0)
380 				msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
381 				    "stopprof", 0);
382 			p->p_flag &= ~P_STOPPROF;
383 		}
384 		if ((p->p_flag & P_PROFIL) == 0)
385 			return;
386 		p->p_flag &= ~P_PROFIL;
387 		mtx_lock_spin(&time_lock);
388 		if (--profprocs == 0)
389 			cpu_stopprofclock();
390 		mtx_unlock_spin(&time_lock);
391 	}
392 }
393 
394 /*
395  * Statistics clock.  Updates rusage information and calls the scheduler
396  * to adjust priorities of the active thread.
397  *
398  * This should be called by all active processors.
399  */
400 void
401 statclock(int usermode)
402 {
403 	struct rusage *ru;
404 	struct vmspace *vm;
405 	struct thread *td;
406 	struct proc *p;
407 	long rss;
408 
409 	td = curthread;
410 	p = td->td_proc;
411 
412 	thread_lock_flags(td, MTX_QUIET);
413 	if (usermode) {
414 		/*
415 		 * Charge the time as appropriate.
416 		 */
417 #ifdef KSE
418 		if (p->p_flag & P_SA)
419 			thread_statclock(1);
420 #endif
421 		td->td_uticks++;
422 		if (p->p_nice > NZERO)
423 			atomic_add_long(&cp_time[CP_NICE], 1);
424 		else
425 			atomic_add_long(&cp_time[CP_USER], 1);
426 	} else {
427 		/*
428 		 * Came from kernel mode, so we were:
429 		 * - handling an interrupt,
430 		 * - doing syscall or trap work on behalf of the current
431 		 *   user process, or
432 		 * - spinning in the idle loop.
433 		 * Whichever it is, charge the time as appropriate.
434 		 * Note that we charge interrupts to the current process,
435 		 * regardless of whether they are ``for'' that process,
436 		 * so that we know how much of its real time was spent
437 		 * in ``non-process'' (i.e., interrupt) work.
438 		 */
439 		if ((td->td_pflags & TDP_ITHREAD) ||
440 		    td->td_intr_nesting_level >= 2) {
441 			td->td_iticks++;
442 			atomic_add_long(&cp_time[CP_INTR], 1);
443 		} else {
444 #ifdef KSE
445 			if (p->p_flag & P_SA)
446 				thread_statclock(0);
447 #endif
448 			td->td_pticks++;
449 			td->td_sticks++;
450 			if (!TD_IS_IDLETHREAD(td))
451 				atomic_add_long(&cp_time[CP_SYS], 1);
452 			else
453 				atomic_add_long(&cp_time[CP_IDLE], 1);
454 		}
455 	}
456 
457 	/* Update resource usage integrals and maximums. */
458 	MPASS(p->p_vmspace != NULL);
459 	vm = p->p_vmspace;
460 	ru = &td->td_ru;
461 	ru->ru_ixrss += pgtok(vm->vm_tsize);
462 	ru->ru_idrss += pgtok(vm->vm_dsize);
463 	ru->ru_isrss += pgtok(vm->vm_ssize);
464 	rss = pgtok(vmspace_resident_count(vm));
465 	if (ru->ru_maxrss < rss)
466 		ru->ru_maxrss = rss;
467 	CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d",
468 	    td, td->td_name, td->td_priority, (stathz)?stathz:hz);
469 	sched_clock(td);
470 	thread_unlock(td);
471 }
472 
473 void
474 profclock(int usermode, uintfptr_t pc)
475 {
476 	struct thread *td;
477 #ifdef GPROF
478 	struct gmonparam *g;
479 	uintfptr_t i;
480 #endif
481 
482 	td = curthread;
483 	if (usermode) {
484 		/*
485 		 * Came from user mode; CPU was in user state.
486 		 * If this process is being profiled, record the tick.
487 		 * if there is no related user location yet, don't
488 		 * bother trying to count it.
489 		 */
490 		if (td->td_proc->p_flag & P_PROFIL)
491 			addupc_intr(td, pc, 1);
492 	}
493 #ifdef GPROF
494 	else {
495 		/*
496 		 * Kernel statistics are just like addupc_intr, only easier.
497 		 */
498 		g = &_gmonparam;
499 		if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
500 			i = PC_TO_I(g, pc);
501 			if (i < g->textsize) {
502 				KCOUNT(g, i)++;
503 			}
504 		}
505 	}
506 #endif
507 }
508 
509 /*
510  * Return information about system clocks.
511  */
512 static int
513 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
514 {
515 	struct clockinfo clkinfo;
516 	/*
517 	 * Construct clockinfo structure.
518 	 */
519 	bzero(&clkinfo, sizeof(clkinfo));
520 	clkinfo.hz = hz;
521 	clkinfo.tick = tick;
522 	clkinfo.profhz = profhz;
523 	clkinfo.stathz = stathz ? stathz : hz;
524 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
525 }
526 
527 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
528 	0, 0, sysctl_kern_clockrate, "S,clockinfo",
529 	"Rate and period of various kernel clocks");
530 
531 #ifdef SW_WATCHDOG
532 
533 static void
534 watchdog_config(void *unused __unused, u_int cmd, int *error)
535 {
536 	u_int u;
537 
538 	u = cmd & WD_INTERVAL;
539 	if (u >= WD_TO_1SEC) {
540 		watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
541 		watchdog_enabled = 1;
542 		*error = 0;
543 	} else {
544 		watchdog_enabled = 0;
545 	}
546 }
547 
548 /*
549  * Handle a watchdog timeout by dumping interrupt information and
550  * then either dropping to DDB or panicking.
551  */
552 static void
553 watchdog_fire(void)
554 {
555 	int nintr;
556 	u_int64_t inttotal;
557 	u_long *curintr;
558 	char *curname;
559 
560 	curintr = intrcnt;
561 	curname = intrnames;
562 	inttotal = 0;
563 	nintr = eintrcnt - intrcnt;
564 
565 	printf("interrupt                   total\n");
566 	while (--nintr >= 0) {
567 		if (*curintr)
568 			printf("%-12s %20lu\n", curname, *curintr);
569 		curname += strlen(curname) + 1;
570 		inttotal += *curintr++;
571 	}
572 	printf("Total        %20ju\n", (uintmax_t)inttotal);
573 
574 #if defined(KDB) && !defined(KDB_UNATTENDED)
575 	kdb_backtrace();
576 	kdb_enter("watchdog timeout");
577 #else
578 	panic("watchdog timeout");
579 #endif
580 }
581 
582 #endif /* SW_WATCHDOG */
583