xref: /freebsd/sys/kern/kern_clock.c (revision e1fe3dba5ce2826061f6489765be9b4a341736a9)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_device_polling.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_ntp.h"
43 #include "opt_watchdog.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/callout.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/ktr.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/resource.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/signalvar.h>
58 #include <sys/smp.h>
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_map.h>
62 #include <sys/sysctl.h>
63 #include <sys/bus.h>
64 #include <sys/interrupt.h>
65 #include <sys/limits.h>
66 #include <sys/timetc.h>
67 
68 #ifdef GPROF
69 #include <sys/gmon.h>
70 #endif
71 
72 #ifdef HWPMC_HOOKS
73 #include <sys/pmckern.h>
74 #endif
75 
76 #ifdef DEVICE_POLLING
77 extern void hardclock_device_poll(void);
78 #endif /* DEVICE_POLLING */
79 
80 static void initclocks(void *dummy);
81 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
82 
83 /* Some of these don't belong here, but it's easiest to concentrate them. */
84 long cp_time[CPUSTATES];
85 
86 static int
87 sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
88 {
89 	int error;
90 #ifdef SCTL_MASK32
91 	int i;
92 	unsigned int cp_time32[CPUSTATES];
93 
94 	if (req->flags & SCTL_MASK32) {
95 		if (!req->oldptr)
96 			return SYSCTL_OUT(req, 0, sizeof(cp_time32));
97 		for (i = 0; i < CPUSTATES; i++)
98 			cp_time32[i] = (unsigned int)cp_time[i];
99 		error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
100 	} else
101 #endif
102 	{
103 		if (!req->oldptr)
104 			return SYSCTL_OUT(req, 0, sizeof(cp_time));
105 		error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
106 	}
107 	return error;
108 }
109 
110 SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD,
111     0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
112 
113 #ifdef SW_WATCHDOG
114 #include <sys/watchdog.h>
115 
116 static int watchdog_ticks;
117 static int watchdog_enabled;
118 static void watchdog_fire(void);
119 static void watchdog_config(void *, u_int, int *);
120 #endif /* SW_WATCHDOG */
121 
122 /*
123  * Clock handling routines.
124  *
125  * This code is written to operate with two timers that run independently of
126  * each other.
127  *
128  * The main timer, running hz times per second, is used to trigger interval
129  * timers, timeouts and rescheduling as needed.
130  *
131  * The second timer handles kernel and user profiling,
132  * and does resource use estimation.  If the second timer is programmable,
133  * it is randomized to avoid aliasing between the two clocks.  For example,
134  * the randomization prevents an adversary from always giving up the cpu
135  * just before its quantum expires.  Otherwise, it would never accumulate
136  * cpu ticks.  The mean frequency of the second timer is stathz.
137  *
138  * If no second timer exists, stathz will be zero; in this case we drive
139  * profiling and statistics off the main clock.  This WILL NOT be accurate;
140  * do not do it unless absolutely necessary.
141  *
142  * The statistics clock may (or may not) be run at a higher rate while
143  * profiling.  This profile clock runs at profhz.  We require that profhz
144  * be an integral multiple of stathz.
145  *
146  * If the statistics clock is running fast, it must be divided by the ratio
147  * profhz/stathz for statistics.  (For profiling, every tick counts.)
148  *
149  * Time-of-day is maintained using a "timecounter", which may or may
150  * not be related to the hardware generating the above mentioned
151  * interrupts.
152  */
153 
154 int	stathz;
155 int	profhz;
156 int	profprocs;
157 int	ticks;
158 int	psratio;
159 
160 /*
161  * Initialize clock frequencies and start both clocks running.
162  */
163 /* ARGSUSED*/
164 static void
165 initclocks(dummy)
166 	void *dummy;
167 {
168 	register int i;
169 
170 	/*
171 	 * Set divisors to 1 (normal case) and let the machine-specific
172 	 * code do its bit.
173 	 */
174 	cpu_initclocks();
175 
176 	/*
177 	 * Compute profhz/stathz, and fix profhz if needed.
178 	 */
179 	i = stathz ? stathz : hz;
180 	if (profhz == 0)
181 		profhz = i;
182 	psratio = profhz / i;
183 #ifdef SW_WATCHDOG
184 	EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
185 #endif
186 }
187 
188 /*
189  * Each time the real-time timer fires, this function is called on all CPUs.
190  * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
191  * the other CPUs in the system need to call this function.
192  */
193 void
194 hardclock_cpu(int usermode)
195 {
196 	struct pstats *pstats;
197 	struct thread *td = curthread;
198 	struct proc *p = td->td_proc;
199 
200 	/*
201 	 * Run current process's virtual and profile time, as needed.
202 	 */
203 	mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
204 	if (p->p_flag & P_SA) {
205 		/* XXXKSE What to do? */
206 	} else {
207 		pstats = p->p_stats;
208 		if (usermode &&
209 		    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
210 		    itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
211 			p->p_sflag |= PS_ALRMPEND;
212 			td->td_flags |= TDF_ASTPENDING;
213 		}
214 		if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
215 		    itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
216 			p->p_sflag |= PS_PROFPEND;
217 			td->td_flags |= TDF_ASTPENDING;
218 		}
219 	}
220 	mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
221 
222 #ifdef	HWPMC_HOOKS
223 	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
224 		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
225 #endif
226 }
227 
228 /*
229  * The real-time timer, interrupting hz times per second.
230  */
231 void
232 hardclock(int usermode, uintfptr_t pc)
233 {
234 	int need_softclock = 0;
235 
236 	hardclock_cpu(usermode);
237 
238 	tc_ticktock();
239 	/*
240 	 * If no separate statistics clock is available, run it from here.
241 	 *
242 	 * XXX: this only works for UP
243 	 */
244 	if (stathz == 0) {
245 		profclock(usermode, pc);
246 		statclock(usermode);
247 	}
248 
249 #ifdef DEVICE_POLLING
250 	hardclock_device_poll();	/* this is very short and quick */
251 #endif /* DEVICE_POLLING */
252 
253 	/*
254 	 * Process callouts at a very low cpu priority, so we don't keep the
255 	 * relatively high clock interrupt priority any longer than necessary.
256 	 */
257 	mtx_lock_spin_flags(&callout_lock, MTX_QUIET);
258 	ticks++;
259 	if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
260 		need_softclock = 1;
261 	} else if (softticks + 1 == ticks)
262 		++softticks;
263 	mtx_unlock_spin_flags(&callout_lock, MTX_QUIET);
264 
265 	/*
266 	 * swi_sched acquires sched_lock, so we don't want to call it with
267 	 * callout_lock held; incorrect locking order.
268 	 */
269 	if (need_softclock)
270 		swi_sched(softclock_ih, 0);
271 
272 #ifdef SW_WATCHDOG
273 	if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
274 		watchdog_fire();
275 #endif /* SW_WATCHDOG */
276 }
277 
278 /*
279  * Compute number of ticks in the specified amount of time.
280  */
281 int
282 tvtohz(tv)
283 	struct timeval *tv;
284 {
285 	register unsigned long ticks;
286 	register long sec, usec;
287 
288 	/*
289 	 * If the number of usecs in the whole seconds part of the time
290 	 * difference fits in a long, then the total number of usecs will
291 	 * fit in an unsigned long.  Compute the total and convert it to
292 	 * ticks, rounding up and adding 1 to allow for the current tick
293 	 * to expire.  Rounding also depends on unsigned long arithmetic
294 	 * to avoid overflow.
295 	 *
296 	 * Otherwise, if the number of ticks in the whole seconds part of
297 	 * the time difference fits in a long, then convert the parts to
298 	 * ticks separately and add, using similar rounding methods and
299 	 * overflow avoidance.  This method would work in the previous
300 	 * case but it is slightly slower and assumes that hz is integral.
301 	 *
302 	 * Otherwise, round the time difference down to the maximum
303 	 * representable value.
304 	 *
305 	 * If ints have 32 bits, then the maximum value for any timeout in
306 	 * 10ms ticks is 248 days.
307 	 */
308 	sec = tv->tv_sec;
309 	usec = tv->tv_usec;
310 	if (usec < 0) {
311 		sec--;
312 		usec += 1000000;
313 	}
314 	if (sec < 0) {
315 #ifdef DIAGNOSTIC
316 		if (usec > 0) {
317 			sec++;
318 			usec -= 1000000;
319 		}
320 		printf("tvotohz: negative time difference %ld sec %ld usec\n",
321 		       sec, usec);
322 #endif
323 		ticks = 1;
324 	} else if (sec <= LONG_MAX / 1000000)
325 		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
326 			/ tick + 1;
327 	else if (sec <= LONG_MAX / hz)
328 		ticks = sec * hz
329 			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
330 	else
331 		ticks = LONG_MAX;
332 	if (ticks > INT_MAX)
333 		ticks = INT_MAX;
334 	return ((int)ticks);
335 }
336 
337 /*
338  * Start profiling on a process.
339  *
340  * Kernel profiling passes proc0 which never exits and hence
341  * keeps the profile clock running constantly.
342  */
343 void
344 startprofclock(p)
345 	register struct proc *p;
346 {
347 
348 	/*
349 	 * XXX; Right now sched_lock protects statclock(), but perhaps
350 	 * it should be protected later on by a time_lock, which would
351 	 * cover psdiv, etc. as well.
352 	 */
353 	PROC_LOCK_ASSERT(p, MA_OWNED);
354 	if (p->p_flag & P_STOPPROF)
355 		return;
356 	if ((p->p_flag & P_PROFIL) == 0) {
357 		mtx_lock_spin(&sched_lock);
358 		p->p_flag |= P_PROFIL;
359 		if (++profprocs == 1)
360 			cpu_startprofclock();
361 		mtx_unlock_spin(&sched_lock);
362 	}
363 }
364 
365 /*
366  * Stop profiling on a process.
367  */
368 void
369 stopprofclock(p)
370 	register struct proc *p;
371 {
372 
373 	PROC_LOCK_ASSERT(p, MA_OWNED);
374 	if (p->p_flag & P_PROFIL) {
375 		if (p->p_profthreads != 0) {
376 			p->p_flag |= P_STOPPROF;
377 			while (p->p_profthreads != 0)
378 				msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
379 				    "stopprof", 0);
380 			p->p_flag &= ~P_STOPPROF;
381 		}
382 		if ((p->p_flag & P_PROFIL) == 0)
383 			return;
384 		mtx_lock_spin(&sched_lock);
385 		p->p_flag &= ~P_PROFIL;
386 		if (--profprocs == 0)
387 			cpu_stopprofclock();
388 		mtx_unlock_spin(&sched_lock);
389 	}
390 }
391 
392 /*
393  * Statistics clock.  Grab profile sample, and if divider reaches 0,
394  * do process and kernel statistics.  Most of the statistics are only
395  * used by user-level statistics programs.  The main exceptions are
396  * ke->ke_uticks, p->p_rux.rux_sticks, p->p_rux.rux_iticks, and p->p_estcpu.
397  * This should be called by all active processors.
398  */
399 void
400 statclock(int usermode)
401 {
402 	struct rusage *ru;
403 	struct vmspace *vm;
404 	struct thread *td;
405 	struct proc *p;
406 	long rss;
407 
408 	td = curthread;
409 	p = td->td_proc;
410 
411 	mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
412 	if (usermode) {
413 		/*
414 		 * Charge the time as appropriate.
415 		 */
416 		if (p->p_flag & P_SA)
417 			thread_statclock(1);
418 		td->td_uticks++;
419 		if (p->p_nice > NZERO)
420 			cp_time[CP_NICE]++;
421 		else
422 			cp_time[CP_USER]++;
423 	} else {
424 		/*
425 		 * Came from kernel mode, so we were:
426 		 * - handling an interrupt,
427 		 * - doing syscall or trap work on behalf of the current
428 		 *   user process, or
429 		 * - spinning in the idle loop.
430 		 * Whichever it is, charge the time as appropriate.
431 		 * Note that we charge interrupts to the current process,
432 		 * regardless of whether they are ``for'' that process,
433 		 * so that we know how much of its real time was spent
434 		 * in ``non-process'' (i.e., interrupt) work.
435 		 */
436 		if ((td->td_pflags & TDP_ITHREAD) ||
437 		    td->td_intr_nesting_level >= 2) {
438 			td->td_iticks++;
439 			cp_time[CP_INTR]++;
440 		} else {
441 			if (p->p_flag & P_SA)
442 				thread_statclock(0);
443 			td->td_pticks++;
444 			td->td_sticks++;
445 			if (td != PCPU_GET(idlethread))
446 				cp_time[CP_SYS]++;
447 			else
448 				cp_time[CP_IDLE]++;
449 		}
450 	}
451 	CTR4(KTR_SCHED, "statclock: %p(%s) prio %d stathz %d",
452 	    td, td->td_proc->p_comm, td->td_priority, (stathz)?stathz:hz);
453 
454 	sched_clock(td);
455 
456 	/* Update resource usage integrals and maximums. */
457 	MPASS(p->p_stats != NULL);
458 	MPASS(p->p_vmspace != NULL);
459 	vm = p->p_vmspace;
460 	ru = &p->p_stats->p_ru;
461 	ru->ru_ixrss += pgtok(vm->vm_tsize);
462 	ru->ru_idrss += pgtok(vm->vm_dsize);
463 	ru->ru_isrss += pgtok(vm->vm_ssize);
464 	rss = pgtok(vmspace_resident_count(vm));
465 	if (ru->ru_maxrss < rss)
466 		ru->ru_maxrss = rss;
467 	mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
468 }
469 
470 void
471 profclock(int usermode, uintfptr_t pc)
472 {
473 	struct thread *td;
474 #ifdef GPROF
475 	struct gmonparam *g;
476 	uintfptr_t i;
477 #endif
478 
479 	td = curthread;
480 	if (usermode) {
481 		/*
482 		 * Came from user mode; CPU was in user state.
483 		 * If this process is being profiled, record the tick.
484 		 * if there is no related user location yet, don't
485 		 * bother trying to count it.
486 		 */
487 		if (td->td_proc->p_flag & P_PROFIL)
488 			addupc_intr(td, pc, 1);
489 	}
490 #ifdef GPROF
491 	else {
492 		/*
493 		 * Kernel statistics are just like addupc_intr, only easier.
494 		 */
495 		g = &_gmonparam;
496 		if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
497 			i = PC_TO_I(g, pc);
498 			if (i < g->textsize) {
499 				KCOUNT(g, i)++;
500 			}
501 		}
502 	}
503 #endif
504 }
505 
506 /*
507  * Return information about system clocks.
508  */
509 static int
510 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
511 {
512 	struct clockinfo clkinfo;
513 	/*
514 	 * Construct clockinfo structure.
515 	 */
516 	bzero(&clkinfo, sizeof(clkinfo));
517 	clkinfo.hz = hz;
518 	clkinfo.tick = tick;
519 	clkinfo.profhz = profhz;
520 	clkinfo.stathz = stathz ? stathz : hz;
521 	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
522 }
523 
524 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
525 	0, 0, sysctl_kern_clockrate, "S,clockinfo",
526 	"Rate and period of various kernel clocks");
527 
528 #ifdef SW_WATCHDOG
529 
530 static void
531 watchdog_config(void *unused __unused, u_int cmd, int *err)
532 {
533 	u_int u;
534 
535 	u = cmd & WD_INTERVAL;
536 	if ((cmd & WD_ACTIVE) && u >= WD_TO_1SEC) {
537 		watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
538 		watchdog_enabled = 1;
539 		*err = 0;
540 	} else {
541 		watchdog_enabled = 0;
542 	}
543 }
544 
545 /*
546  * Handle a watchdog timeout by dumping interrupt information and
547  * then either dropping to DDB or panicing.
548  */
549 static void
550 watchdog_fire(void)
551 {
552 	int nintr;
553 	u_int64_t inttotal;
554 	u_long *curintr;
555 	char *curname;
556 
557 	curintr = intrcnt;
558 	curname = intrnames;
559 	inttotal = 0;
560 	nintr = eintrcnt - intrcnt;
561 
562 	printf("interrupt                   total\n");
563 	while (--nintr >= 0) {
564 		if (*curintr)
565 			printf("%-12s %20lu\n", curname, *curintr);
566 		curname += strlen(curname) + 1;
567 		inttotal += *curintr++;
568 	}
569 	printf("Total        %20ju\n", (uintmax_t)inttotal);
570 
571 #ifdef KDB
572 	kdb_backtrace();
573 	kdb_enter("watchdog timeout");
574 #else
575 	panic("watchdog timeout");
576 #endif /* KDB */
577 }
578 
579 #endif /* SW_WATCHDOG */
580