xref: /freebsd/sys/kern/kern_time.c (revision 0de89efe5c443f213c7ea28773ef2dc6cf3af2ed)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_time.c	8.1 (Berkeley) 6/10/93
34  * $Id: kern_time.c,v 1.34 1997/09/02 20:05:49 bde Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/sysproto.h>
39 #include <sys/resourcevar.h>
40 #include <sys/signalvar.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/sysent.h>
44 #include <sys/proc.h>
45 #include <sys/time.h>
46 #include <sys/vnode.h>
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 
50 struct timezone tz;
51 
52 /*
53  * Time of day and interval timer support.
54  *
55  * These routines provide the kernel entry points to get and set
56  * the time-of-day and per-process interval timers.  Subroutines
57  * here provide support for adding and subtracting timeval structures
58  * and decrementing interval timers, optionally reloading the interval
59  * timers when they expire.
60  */
61 
62 static int	nanosleep1 __P((struct proc *p, struct timespec *rqt,
63 		    struct timespec *rmt));
64 static int	settime __P((struct timeval *));
65 static void	timevalfix __P((struct timeval *));
66 
67 static int
68 settime(tv)
69 	struct timeval *tv;
70 {
71 	struct timeval delta;
72 	struct proc *p;
73 	int s;
74 
75 	/*
76 	 * Must not set clock backwards in highly secure mode.
77 	 */
78 	s = splclock();
79 	delta.tv_sec = tv->tv_sec - time.tv_sec;
80 	delta.tv_usec = tv->tv_usec - time.tv_usec;
81 	splx(s);
82 	timevalfix(&delta);
83 	if (delta.tv_sec < 0 && securelevel > 1)
84 		return (EPERM);
85 
86 	s = splclock();
87 	/*
88 	 * Recalculate delta directly to minimize clock interrupt
89 	 * latency.  Fix it after the ipl has been lowered.
90 	 */
91 	delta.tv_sec = tv->tv_sec - time.tv_sec;
92 	delta.tv_usec = tv->tv_usec - time.tv_usec;
93 	time = *tv;
94 	/*
95 	 * XXX should arrange for microtime() to agree with *tv if
96 	 * it is called now.  As it is, it may add up to about
97 	 * `tick' unwanted usec.
98 	 * Another problem is that clock interrupts may occur at
99 	 * other than multiples of `tick'.  It's not worth fixing
100 	 * this here, since the problem is also caused by tick
101 	 * adjustments.
102 	 */
103 	(void) splsoftclock();
104 	timevalfix(&delta);
105 	timevaladd(&boottime, &delta);
106 	timevaladd(&runtime, &delta);
107 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
108 		if (timerisset(&p->p_realtimer.it_value))
109 			timevaladd(&p->p_realtimer.it_value, &delta);
110 		if (p->p_sleepend)
111 			timevaladd(p->p_sleepend, &delta);
112 	}
113 #ifdef NFS
114 	lease_updatetime(delta.tv_sec);
115 #endif
116 	splx(s);
117 	resettodr();
118 	return (0);
119 }
120 
121 #ifndef _SYS_SYSPROTO_H_
122 struct clock_gettime_args {
123 	clockid_t clock_id;
124 	struct	timespec *tp;
125 };
126 #endif
127 
128 /* ARGSUSED */
129 int
130 clock_gettime(p, uap, retval)
131 	struct proc *p;
132 	struct clock_gettime_args *uap;
133 	register_t *retval;
134 {
135 	struct timeval atv;
136 	struct timespec ats;
137 
138 	if (SCARG(uap, clock_id) != CLOCK_REALTIME)
139 		return (EINVAL);
140 	microtime(&atv);
141 	TIMEVAL_TO_TIMESPEC(&atv, &ats);
142 	return (copyout(&ats, SCARG(uap, tp), sizeof(ats)));
143 }
144 
145 #ifndef _SYS_SYSPROTO_H_
146 struct clock_settime_args {
147 	clockid_t clock_id;
148 	const struct	timespec *tp;
149 };
150 #endif
151 
152 /* ARGSUSED */
153 int
154 clock_settime(p, uap, retval)
155 	struct proc *p;
156 	struct clock_settime_args *uap;
157 	register_t *retval;
158 {
159 	struct timeval atv;
160 	struct timespec ats;
161 	int error;
162 
163 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
164 		return (error);
165 	if (SCARG(uap, clock_id) != CLOCK_REALTIME)
166 		return (EINVAL);
167 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
168 		return (error);
169 	if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000)
170 		return (EINVAL);
171 	TIMESPEC_TO_TIMEVAL(&atv, &ats);
172 	if ((error = settime(&atv)))
173 		return (error);
174 	return (0);
175 }
176 
177 #ifndef _SYS_SYSPROTO_H_
178 struct clock_getres_args {
179 	clockid_t clock_id;
180 	struct	timespec *tp;
181 };
182 #endif
183 
184 int
185 clock_getres(p, uap, retval)
186 	struct proc *p;
187 	struct clock_getres_args *uap;
188 	register_t *retval;
189 {
190 	struct timespec ts;
191 	int error;
192 
193 	if (SCARG(uap, clock_id) != CLOCK_REALTIME)
194 		return (EINVAL);
195 	error = 0;
196 	if (SCARG(uap, tp)) {
197 		ts.tv_sec = 0;
198 		ts.tv_nsec = 1000000000 / hz;
199 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
200 	}
201 	return (error);
202 }
203 
204 static int nanowait;
205 
206 static int
207 nanosleep1(p, rqt, rmt)
208 	struct proc *p;
209 	struct timespec *rqt, *rmt;
210 {
211 	struct timeval atv, utv, rtv;
212 	int error, s, timo, i, n;
213 
214 	if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000)
215 		return (EINVAL);
216 	if (rqt->tv_sec < 0 || rqt->tv_sec == 0 && rqt->tv_nsec == 0)
217 		return (0);
218 	TIMESPEC_TO_TIMEVAL(&atv, rqt)
219 
220 	if (itimerfix(&atv)) {
221 		n = atv.tv_sec / 100000000;
222 		rtv = atv;
223 		rtv.tv_sec %= 100000000;
224 		(void)itimerfix(&rtv);
225 	} else
226 		n = 0;
227 
228 	for (i = 0, error = EWOULDBLOCK; i <= n && error == EWOULDBLOCK; i++) {
229 		if (n > 0) {
230 			if (i == n)
231 				atv = rtv;
232 			else {
233 				atv.tv_sec = 100000000;
234 				atv.tv_usec = 0;
235 			}
236 		}
237 		/*
238 		 * XXX this is not as careful as settimeofday() about minimising
239 		 * interrupt latency.  The hzto() interface is inconvenient as usual.
240 		 */
241 		s = splclock();
242 		timevaladd(&atv, &time);
243 		timo = hzto(&atv);
244 		splx(s);
245 
246 		p->p_sleepend = &atv;
247 		error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp", timo);
248 		p->p_sleepend = NULL;
249 		if (error == ERESTART)
250 			error = EINTR;
251 		if (rmt != NULL && (i == n || error != EWOULDBLOCK)) {
252 			/*-
253 			 * XXX this is unnecessary and possibly wrong if the timeout
254 			 * expired.  Then the remaining time should be zero.  If the
255 			 * calculation gives a nonzero value, then we have a bug.
256 			 * (1) if settimeofday() was called, then the calculation is
257 			 *     probably wrong, since `time' has probably become
258 			 *     inconsistent with the ending time `atv'.
259 			 *     XXX (1) should be fixed now with p->p_sleepend;
260 			 * (2) otherwise, our calculation of `timo' was wrong, perhaps
261 			 *     due to `tick' being wrong when hzto() was called or
262 			 *     changing afterwards (it can be wrong or change due to
263 			 *     hzto() not knowing about adjtime(2) or tickadj(8)).
264 			 *     Then we should be sleeping again instead instead of
265 			 *     returning.  Rounding up in hzto() probably fixes this
266 			 *     problem for small timeouts, but the absolute error may
267 			 *     be large for large timeouts.
268 			 */
269 			s = splclock();
270 			utv = time;
271 			splx(s);
272 			timevalsub(&atv, &utv);
273 			if (atv.tv_sec < 0)
274 				timerclear(&atv);
275 			if (n > 0)
276 				atv.tv_sec += (n - i) * 100000000;
277 			TIMEVAL_TO_TIMESPEC(&atv, rmt);
278 		}
279 	}
280 	return (error == EWOULDBLOCK ? 0 : error);
281 }
282 
283 #ifndef _SYS_SYSPROTO_H_
284 struct nanosleep_args {
285 	struct	timespec *rqtp;
286 	struct	timespec *rmtp;
287 };
288 #endif
289 
290 /* ARGSUSED */
291 int
292 nanosleep(p, uap, retval)
293 	struct proc *p;
294 	struct nanosleep_args *uap;
295 	register_t *retval;
296 {
297 	struct timespec rmt, rqt;
298 	int error, error2;
299 
300 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(rqt));
301 	if (error)
302 		return (error);
303 	if (SCARG(uap, rmtp))
304 		if (!useracc((caddr_t)SCARG(uap, rmtp), sizeof(rmt), B_WRITE))
305 			return (EFAULT);
306 	error = nanosleep1(p, &rqt, &rmt);
307 	if (SCARG(uap, rmtp)) {
308 		error2 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
309 		if (error2)	/* XXX shouldn't happen, did useracc() above */
310 			return (error2);
311 	}
312 	return (error);
313 }
314 
315 #ifndef _SYS_SYSPROTO_H_
316 struct signanosleep_args {
317 	struct	timespec *rqtp;
318 	struct	timespec *rmtp;
319 	sigset_t *mask;
320 };
321 #endif
322 
323 /* ARGSUSED */
324 int
325 signanosleep(p, uap, retval)
326 	struct proc *p;
327 	struct signanosleep_args *uap;
328 	register_t *retval;
329 {
330 	struct timespec rmt, rqt;
331 	int error, error2;
332 	struct sigacts *ps = p->p_sigacts;
333 	sigset_t mask;
334 
335 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(rqt));
336 	if (error)
337 		return (error);
338 	if (SCARG(uap, rmtp))
339 		if (!useracc((caddr_t)SCARG(uap, rmtp), sizeof(rmt), B_WRITE))
340 			return (EFAULT);
341 	error = copyin(SCARG(uap, mask), &mask, sizeof(mask));
342 	if (error)
343 		return (error);
344 
345 	/* See kern_sig.c:sigsuspend() for explanation. */
346 	ps->ps_oldmask = p->p_sigmask;
347 	ps->ps_flags |= SAS_OLDMASK;
348 	p->p_sigmask = mask &~ sigcantmask;
349 
350 	error = nanosleep1(p, &rqt, &rmt);
351 
352 	/* See kern_sig.c:sigsuspend() again. */
353 	p->p_sigmask = ps->ps_oldmask;	/* in case timeout rather than sig */
354 	ps->ps_flags &= ~SAS_OLDMASK;
355 
356 	if (SCARG(uap, rmtp)) {
357 		error2 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
358 		if (error2)	/* XXX shouldn't happen, did useracc() above */
359 			return (error2);
360 	}
361 	return (error);
362 }
363 
364 #ifndef _SYS_SYSPROTO_H_
365 struct gettimeofday_args {
366 	struct	timeval *tp;
367 	struct	timezone *tzp;
368 };
369 #endif
370 /* ARGSUSED */
371 int
372 gettimeofday(p, uap, retval)
373 	struct proc *p;
374 	register struct gettimeofday_args *uap;
375 	int *retval;
376 {
377 	struct timeval atv;
378 	int error = 0;
379 
380 	if (uap->tp) {
381 		microtime(&atv);
382 		if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp,
383 		    sizeof (atv))))
384 			return (error);
385 	}
386 	if (uap->tzp)
387 		error = copyout((caddr_t)&tz, (caddr_t)uap->tzp,
388 		    sizeof (tz));
389 	return (error);
390 }
391 
392 #ifndef _SYS_SYSPROTO_H_
393 struct settimeofday_args {
394 	struct	timeval *tv;
395 	struct	timezone *tzp;
396 };
397 #endif
398 /* ARGSUSED */
399 int
400 settimeofday(p, uap, retval)
401 	struct proc *p;
402 	struct settimeofday_args *uap;
403 	int *retval;
404 {
405 	struct timeval atv;
406 	struct timezone atz;
407 	int error;
408 
409 	if ((error = suser(p->p_ucred, &p->p_acflag)))
410 		return (error);
411 	/* Verify all parameters before changing time. */
412 	if (uap->tv) {
413 		if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv,
414 		    sizeof(atv))))
415 			return (error);
416 		if (atv.tv_usec < 0 || atv.tv_usec >= 1000000)
417 			return (EINVAL);
418 	}
419 	if (uap->tzp &&
420 	    (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz))))
421 		return (error);
422 	if (uap->tv && (error = settime(&atv)))
423 		return (error);
424 	if (uap->tzp)
425 		tz = atz;
426 	return (0);
427 }
428 
429 int	tickdelta;			/* current clock skew, us. per tick */
430 long	timedelta;			/* unapplied time correction, us. */
431 static long	bigadj = 1000000;	/* use 10x skew above bigadj us. */
432 
433 #ifndef _SYS_SYSPROTO_H_
434 struct adjtime_args {
435 	struct timeval *delta;
436 	struct timeval *olddelta;
437 };
438 #endif
439 /* ARGSUSED */
440 int
441 adjtime(p, uap, retval)
442 	struct proc *p;
443 	register struct adjtime_args *uap;
444 	int *retval;
445 {
446 	struct timeval atv;
447 	register long ndelta, ntickdelta, odelta;
448 	int s, error;
449 
450 	if ((error = suser(p->p_ucred, &p->p_acflag)))
451 		return (error);
452 	if ((error =
453 	    copyin((caddr_t)uap->delta, (caddr_t)&atv, sizeof(struct timeval))))
454 		return (error);
455 
456 	/*
457 	 * Compute the total correction and the rate at which to apply it.
458 	 * Round the adjustment down to a whole multiple of the per-tick
459 	 * delta, so that after some number of incremental changes in
460 	 * hardclock(), tickdelta will become zero, lest the correction
461 	 * overshoot and start taking us away from the desired final time.
462 	 */
463 	ndelta = atv.tv_sec * 1000000 + atv.tv_usec;
464 	if (ndelta > bigadj || ndelta < -bigadj)
465 		ntickdelta = 10 * tickadj;
466 	else
467 		ntickdelta = tickadj;
468 	if (ndelta % ntickdelta)
469 		ndelta = ndelta / ntickdelta * ntickdelta;
470 
471 	/*
472 	 * To make hardclock()'s job easier, make the per-tick delta negative
473 	 * if we want time to run slower; then hardclock can simply compute
474 	 * tick + tickdelta, and subtract tickdelta from timedelta.
475 	 */
476 	if (ndelta < 0)
477 		ntickdelta = -ntickdelta;
478 	s = splclock();
479 	odelta = timedelta;
480 	timedelta = ndelta;
481 	tickdelta = ntickdelta;
482 	splx(s);
483 
484 	if (uap->olddelta) {
485 		atv.tv_sec = odelta / 1000000;
486 		atv.tv_usec = odelta % 1000000;
487 		(void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta,
488 		    sizeof(struct timeval));
489 	}
490 	return (0);
491 }
492 
493 /*
494  * Get value of an interval timer.  The process virtual and
495  * profiling virtual time timers are kept in the p_stats area, since
496  * they can be swapped out.  These are kept internally in the
497  * way they are specified externally: in time until they expire.
498  *
499  * The real time interval timer is kept in the process table slot
500  * for the process, and its value (it_value) is kept as an
501  * absolute time rather than as a delta, so that it is easy to keep
502  * periodic real-time signals from drifting.
503  *
504  * Virtual time timers are processed in the hardclock() routine of
505  * kern_clock.c.  The real time timer is processed by a timeout
506  * routine, called from the softclock() routine.  Since a callout
507  * may be delayed in real time due to interrupt processing in the system,
508  * it is possible for the real time timeout routine (realitexpire, given below),
509  * to be delayed in real time past when it is supposed to occur.  It
510  * does not suffice, therefore, to reload the real timer .it_value from the
511  * real time timers .it_interval.  Rather, we compute the next time in
512  * absolute time the timer should go off.
513  */
514 #ifndef _SYS_SYSPROTO_H_
515 struct getitimer_args {
516 	u_int	which;
517 	struct	itimerval *itv;
518 };
519 #endif
520 /* ARGSUSED */
521 int
522 getitimer(p, uap, retval)
523 	struct proc *p;
524 	register struct getitimer_args *uap;
525 	int *retval;
526 {
527 	struct itimerval aitv;
528 	int s;
529 
530 	if (uap->which > ITIMER_PROF)
531 		return (EINVAL);
532 	s = splclock();
533 	if (uap->which == ITIMER_REAL) {
534 		/*
535 		 * Convert from absoulte to relative time in .it_value
536 		 * part of real time timer.  If time for real time timer
537 		 * has passed return 0, else return difference between
538 		 * current time and time for the timer to go off.
539 		 */
540 		aitv = p->p_realtimer;
541 		if (timerisset(&aitv.it_value))
542 			if (timercmp(&aitv.it_value, &time, <))
543 				timerclear(&aitv.it_value);
544 			else
545 				timevalsub(&aitv.it_value, &time);
546 	} else
547 		aitv = p->p_stats->p_timer[uap->which];
548 	splx(s);
549 	return (copyout((caddr_t)&aitv, (caddr_t)uap->itv,
550 	    sizeof (struct itimerval)));
551 }
552 
553 #ifndef _SYS_SYSPROTO_H_
554 struct setitimer_args {
555 	u_int	which;
556 	struct	itimerval *itv, *oitv;
557 };
558 #endif
559 /* ARGSUSED */
560 int
561 setitimer(p, uap, retval)
562 	struct proc *p;
563 	register struct setitimer_args *uap;
564 	int *retval;
565 {
566 	struct itimerval aitv;
567 	register struct itimerval *itvp;
568 	int s, error;
569 
570 	if (uap->which > ITIMER_PROF)
571 		return (EINVAL);
572 	itvp = uap->itv;
573 	if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv,
574 	    sizeof(struct itimerval))))
575 		return (error);
576 	if ((uap->itv = uap->oitv) &&
577 	    (error = getitimer(p, (struct getitimer_args *)uap, retval)))
578 		return (error);
579 	if (itvp == 0)
580 		return (0);
581 	if (itimerfix(&aitv.it_value))
582 		return (EINVAL);
583 	if (!timerisset(&aitv.it_value))
584 		timerclear(&aitv.it_interval);
585 	else if (itimerfix(&aitv.it_interval))
586 		return (EINVAL);
587 	s = splclock();
588 	if (uap->which == ITIMER_REAL) {
589 		if (timerisset(&p->p_realtimer.it_value))
590 			untimeout(realitexpire, (caddr_t)p, p->p_ithandle);
591 		if (timerisset(&aitv.it_value)) {
592 			timevaladd(&aitv.it_value, &time);
593 			p->p_ithandle = timeout(realitexpire, (caddr_t)p,
594 						hzto(&aitv.it_value));
595 		}
596 		p->p_realtimer = aitv;
597 	} else
598 		p->p_stats->p_timer[uap->which] = aitv;
599 	splx(s);
600 	return (0);
601 }
602 
603 /*
604  * Real interval timer expired:
605  * send process whose timer expired an alarm signal.
606  * If time is not set up to reload, then just return.
607  * Else compute next time timer should go off which is > current time.
608  * This is where delay in processing this timeout causes multiple
609  * SIGALRM calls to be compressed into one.
610  * hzto() always adds 1 to allow for the time until the next clock
611  * interrupt being strictly less than 1 clock tick, but we don't want
612  * that here since we want to appear to be in sync with the clock
613  * interrupt even when we're delayed.
614  */
615 void
616 realitexpire(arg)
617 	void *arg;
618 {
619 	register struct proc *p;
620 	int s;
621 
622 	p = (struct proc *)arg;
623 	psignal(p, SIGALRM);
624 	if (!timerisset(&p->p_realtimer.it_interval)) {
625 		timerclear(&p->p_realtimer.it_value);
626 		return;
627 	}
628 	for (;;) {
629 		s = splclock();
630 		timevaladd(&p->p_realtimer.it_value,
631 		    &p->p_realtimer.it_interval);
632 		if (timercmp(&p->p_realtimer.it_value, &time, >)) {
633 			p->p_ithandle =
634 			    timeout(realitexpire, (caddr_t)p,
635 				    hzto(&p->p_realtimer.it_value) - 1);
636 			splx(s);
637 			return;
638 		}
639 		splx(s);
640 	}
641 }
642 
643 /*
644  * Check that a proposed value to load into the .it_value or
645  * .it_interval part of an interval timer is acceptable, and
646  * fix it to have at least minimal value (i.e. if it is less
647  * than the resolution of the clock, round it up.)
648  */
649 int
650 itimerfix(tv)
651 	struct timeval *tv;
652 {
653 
654 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
655 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
656 		return (EINVAL);
657 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
658 		tv->tv_usec = tick;
659 	return (0);
660 }
661 
662 /*
663  * Decrement an interval timer by a specified number
664  * of microseconds, which must be less than a second,
665  * i.e. < 1000000.  If the timer expires, then reload
666  * it.  In this case, carry over (usec - old value) to
667  * reduce the value reloaded into the timer so that
668  * the timer does not drift.  This routine assumes
669  * that it is called in a context where the timers
670  * on which it is operating cannot change in value.
671  */
672 int
673 itimerdecr(itp, usec)
674 	register struct itimerval *itp;
675 	int usec;
676 {
677 
678 	if (itp->it_value.tv_usec < usec) {
679 		if (itp->it_value.tv_sec == 0) {
680 			/* expired, and already in next interval */
681 			usec -= itp->it_value.tv_usec;
682 			goto expire;
683 		}
684 		itp->it_value.tv_usec += 1000000;
685 		itp->it_value.tv_sec--;
686 	}
687 	itp->it_value.tv_usec -= usec;
688 	usec = 0;
689 	if (timerisset(&itp->it_value))
690 		return (1);
691 	/* expired, exactly at end of interval */
692 expire:
693 	if (timerisset(&itp->it_interval)) {
694 		itp->it_value = itp->it_interval;
695 		itp->it_value.tv_usec -= usec;
696 		if (itp->it_value.tv_usec < 0) {
697 			itp->it_value.tv_usec += 1000000;
698 			itp->it_value.tv_sec--;
699 		}
700 	} else
701 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
702 	return (0);
703 }
704 
705 /*
706  * Add and subtract routines for timevals.
707  * N.B.: subtract routine doesn't deal with
708  * results which are before the beginning,
709  * it just gets very confused in this case.
710  * Caveat emptor.
711  */
712 void
713 timevaladd(t1, t2)
714 	struct timeval *t1, *t2;
715 {
716 
717 	t1->tv_sec += t2->tv_sec;
718 	t1->tv_usec += t2->tv_usec;
719 	timevalfix(t1);
720 }
721 
722 void
723 timevalsub(t1, t2)
724 	struct timeval *t1, *t2;
725 {
726 
727 	t1->tv_sec -= t2->tv_sec;
728 	t1->tv_usec -= t2->tv_usec;
729 	timevalfix(t1);
730 }
731 
732 static void
733 timevalfix(t1)
734 	struct timeval *t1;
735 {
736 
737 	if (t1->tv_usec < 0) {
738 		t1->tv_sec--;
739 		t1->tv_usec += 1000000;
740 	}
741 	if (t1->tv_usec >= 1000000) {
742 		t1->tv_sec++;
743 		t1->tv_usec -= 1000000;
744 	}
745 }
746