xref: /freebsd/sys/kern/kern_time.c (revision 17d6c636720d00f77e5d098daf4c278f89d84f7b)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_time.c	8.1 (Berkeley) 6/10/93
34  * $FreeBSD$
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/sysproto.h>
42 #include <sys/resourcevar.h>
43 #include <sys/signalvar.h>
44 #include <sys/kernel.h>
45 #include <sys/systm.h>
46 #include <sys/sysent.h>
47 #include <sys/proc.h>
48 #include <sys/time.h>
49 #include <sys/timetc.h>
50 #include <sys/vnode.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_extern.h>
54 
55 struct timezone tz;
56 
57 /*
58  * Time of day and interval timer support.
59  *
60  * These routines provide the kernel entry points to get and set
61  * the time-of-day and per-process interval timers.  Subroutines
62  * here provide support for adding and subtracting timeval structures
63  * and decrementing interval timers, optionally reloading the interval
64  * timers when they expire.
65  */
66 
67 static int	nanosleep1 __P((struct thread *td, struct timespec *rqt,
68 		    struct timespec *rmt));
69 static int	settime __P((struct proc *, struct timeval *));
70 static void	timevalfix __P((struct timeval *));
71 static void	no_lease_updatetime __P((int));
72 
73 static void
74 no_lease_updatetime(deltat)
75 	int deltat;
76 {
77 }
78 
79 void (*lease_updatetime) __P((int))  = no_lease_updatetime;
80 
81 static int
82 settime(p, tv)
83 	struct proc *p;
84 	struct timeval *tv;
85 {
86 	struct timeval delta, tv1, tv2;
87 	static struct timeval maxtime, laststep;
88 	struct timespec ts;
89 	int s;
90 
91 	s = splclock();
92 	microtime(&tv1);
93 	delta = *tv;
94 	timevalsub(&delta, &tv1);
95 
96 	/*
97 	 * If the system is secure, we do not allow the time to be
98 	 * set to a value earlier than 1 second less than the highest
99 	 * time we have yet seen. The worst a miscreant can do in
100 	 * this circumstance is "freeze" time. He couldn't go
101 	 * back to the past.
102 	 *
103 	 * We similarly do not allow the clock to be stepped more
104 	 * than one second, nor more than once per second. This allows
105 	 * a miscreant to make the clock march double-time, but no worse.
106 	 */
107 	if (securelevel_gt(p->p_ucred, 1) != 0) {
108 		if (delta.tv_sec < 0 || delta.tv_usec < 0) {
109 			/*
110 			 * Update maxtime to latest time we've seen.
111 			 */
112 			if (tv1.tv_sec > maxtime.tv_sec)
113 				maxtime = tv1;
114 			tv2 = *tv;
115 			timevalsub(&tv2, &maxtime);
116 			if (tv2.tv_sec < -1) {
117 				tv->tv_sec = maxtime.tv_sec - 1;
118 				printf("Time adjustment clamped to -1 second\n");
119 			}
120 		} else {
121 			if (tv1.tv_sec == laststep.tv_sec) {
122 				splx(s);
123 				return (EPERM);
124 			}
125 			if (delta.tv_sec > 1) {
126 				tv->tv_sec = tv1.tv_sec + 1;
127 				printf("Time adjustment clamped to +1 second\n");
128 			}
129 			laststep = *tv;
130 		}
131 	}
132 
133 	ts.tv_sec = tv->tv_sec;
134 	ts.tv_nsec = tv->tv_usec * 1000;
135 	tc_setclock(&ts);
136 	(void) splsoftclock();
137 	lease_updatetime(delta.tv_sec);
138 	splx(s);
139 	resettodr();
140 	return (0);
141 }
142 
143 #ifndef _SYS_SYSPROTO_H_
144 struct clock_gettime_args {
145 	clockid_t clock_id;
146 	struct	timespec *tp;
147 };
148 #endif
149 
150 /*
151  * MPSAFE
152  */
153 /* ARGSUSED */
154 int
155 clock_gettime(td, uap)
156 	struct thread *td;
157 	struct clock_gettime_args *uap;
158 {
159 	struct timespec ats;
160 
161 	if (SCARG(uap, clock_id) != CLOCK_REALTIME)
162 		return (EINVAL);
163 	mtx_lock(&Giant);
164 	nanotime(&ats);
165 	mtx_unlock(&Giant);
166 	return (copyout(&ats, SCARG(uap, tp), sizeof(ats)));
167 }
168 
169 #ifndef _SYS_SYSPROTO_H_
170 struct clock_settime_args {
171 	clockid_t clock_id;
172 	const struct	timespec *tp;
173 };
174 #endif
175 
176 /*
177  * MPSAFE
178  */
179 /* ARGSUSED */
180 int
181 clock_settime(td, uap)
182 	struct thread *td;
183 	struct clock_settime_args *uap;
184 {
185 	struct timeval atv;
186 	struct timespec ats;
187 	int error;
188 
189 	mtx_lock(&Giant);
190 	if ((error = suser_td(td)) != 0)
191 		goto done2;
192 	if (SCARG(uap, clock_id) != CLOCK_REALTIME) {
193 		error = EINVAL;
194 		goto done2;
195 	}
196 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
197 		goto done2;
198 	if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000) {
199 		error = EINVAL;
200 		goto done2;
201 	}
202 	/* XXX Don't convert nsec->usec and back */
203 	TIMESPEC_TO_TIMEVAL(&atv, &ats);
204 	error = settime(td->td_proc, &atv);
205 done2:
206 	mtx_unlock(&Giant);
207 	return (error);
208 }
209 
210 #ifndef _SYS_SYSPROTO_H_
211 struct clock_getres_args {
212 	clockid_t clock_id;
213 	struct	timespec *tp;
214 };
215 #endif
216 
217 int
218 clock_getres(td, uap)
219 	struct thread *td;
220 	struct clock_getres_args *uap;
221 {
222 	struct timespec ts;
223 	int error;
224 
225 	if (SCARG(uap, clock_id) != CLOCK_REALTIME)
226 		return (EINVAL);
227 	error = 0;
228 	if (SCARG(uap, tp)) {
229 		ts.tv_sec = 0;
230 		ts.tv_nsec = 1000000000 / timecounter->tc_frequency;
231 		error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
232 	}
233 	return (error);
234 }
235 
236 static int nanowait;
237 
238 static int
239 nanosleep1(td, rqt, rmt)
240 	struct thread *td;
241 	struct timespec *rqt, *rmt;
242 {
243 	struct timespec ts, ts2, ts3;
244 	struct timeval tv;
245 	int error;
246 
247 	if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000)
248 		return (EINVAL);
249 	if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0))
250 		return (0);
251 	getnanouptime(&ts);
252 	timespecadd(&ts, rqt);
253 	TIMESPEC_TO_TIMEVAL(&tv, rqt);
254 	for (;;) {
255 		error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp",
256 		    tvtohz(&tv));
257 		getnanouptime(&ts2);
258 		if (error != EWOULDBLOCK) {
259 			if (error == ERESTART)
260 				error = EINTR;
261 			if (rmt != NULL) {
262 				timespecsub(&ts, &ts2);
263 				if (ts.tv_sec < 0)
264 					timespecclear(&ts);
265 				*rmt = ts;
266 			}
267 			return (error);
268 		}
269 		if (timespeccmp(&ts2, &ts, >=))
270 			return (0);
271 		ts3 = ts;
272 		timespecsub(&ts3, &ts2);
273 		TIMESPEC_TO_TIMEVAL(&tv, &ts3);
274 	}
275 }
276 
277 #ifndef _SYS_SYSPROTO_H_
278 struct nanosleep_args {
279 	struct	timespec *rqtp;
280 	struct	timespec *rmtp;
281 };
282 #endif
283 
284 /*
285  * MPSAFE
286  */
287 /* ARGSUSED */
288 int
289 nanosleep(td, uap)
290 	struct thread *td;
291 	struct nanosleep_args *uap;
292 {
293 	struct timespec rmt, rqt;
294 	int error;
295 
296 	error = copyin(SCARG(uap, rqtp), &rqt, sizeof(rqt));
297 	if (error)
298 		return (error);
299 
300 	mtx_lock(&Giant);
301 	if (SCARG(uap, rmtp)) {
302 		if (!useracc((caddr_t)SCARG(uap, rmtp), sizeof(rmt),
303 		    VM_PROT_WRITE)) {
304 			error = EFAULT;
305 			goto done2;
306 		}
307 	}
308 	error = nanosleep1(td, &rqt, &rmt);
309 	if (error && SCARG(uap, rmtp)) {
310 		int error2;
311 
312 		error2 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
313 		if (error2)	/* XXX shouldn't happen, did useracc() above */
314 			error = error2;
315 	}
316 done2:
317 	mtx_unlock(&Giant);
318 	return (error);
319 }
320 
321 #ifndef _SYS_SYSPROTO_H_
322 struct gettimeofday_args {
323 	struct	timeval *tp;
324 	struct	timezone *tzp;
325 };
326 #endif
327 /*
328  * MPSAFE
329  */
330 /* ARGSUSED */
331 int
332 gettimeofday(td, uap)
333 	struct thread *td;
334 	register struct gettimeofday_args *uap;
335 {
336 	struct timeval atv;
337 	int error = 0;
338 
339 	mtx_lock(&Giant);
340 	if (uap->tp) {
341 		microtime(&atv);
342 		if ((error = copyout((caddr_t)&atv, (caddr_t)uap->tp,
343 		    sizeof (atv)))) {
344 			goto done2;
345 		}
346 	}
347 	if (uap->tzp) {
348 		error = copyout((caddr_t)&tz, (caddr_t)uap->tzp,
349 		    sizeof (tz));
350 	}
351 done2:
352 	mtx_unlock(&Giant);
353 	return (error);
354 }
355 
356 #ifndef _SYS_SYSPROTO_H_
357 struct settimeofday_args {
358 	struct	timeval *tv;
359 	struct	timezone *tzp;
360 };
361 #endif
362 /*
363  * MPSAFE
364  */
365 /* ARGSUSED */
366 int
367 settimeofday(td, uap)
368 	struct thread *td;
369 	struct settimeofday_args *uap;
370 {
371 	struct timeval atv;
372 	struct timezone atz;
373 	int error = 0;
374 
375 	mtx_lock(&Giant);
376 
377 	if ((error = suser_td(td)))
378 		goto done2;
379 	/* Verify all parameters before changing time. */
380 	if (uap->tv) {
381 		if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv,
382 		    sizeof(atv)))) {
383 			goto done2;
384 		}
385 		if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) {
386 			error = EINVAL;
387 			goto done2;
388 		}
389 	}
390 	if (uap->tzp &&
391 	    (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) {
392 		goto done2;
393 	}
394 	if (uap->tv && (error = settime(td->td_proc, &atv)))
395 		goto done2;
396 	if (uap->tzp)
397 		tz = atz;
398 done2:
399 	mtx_unlock(&Giant);
400 	return (error);
401 }
402 
403 int	tickdelta;			/* current clock skew, us. per tick */
404 long	timedelta;			/* unapplied time correction, us. */
405 static long	bigadj = 1000000;	/* use 10x skew above bigadj us. */
406 
407 #ifndef _SYS_SYSPROTO_H_
408 struct adjtime_args {
409 	struct timeval *delta;
410 	struct timeval *olddelta;
411 };
412 #endif
413 /*
414  * MPSAFE
415  */
416 /* ARGSUSED */
417 int
418 adjtime(td, uap)
419 	struct thread *td;
420 	register struct adjtime_args *uap;
421 {
422 	struct timeval atv;
423 	register long ndelta, ntickdelta, odelta;
424 	int s, error;
425 
426 	mtx_lock(&Giant);
427 
428 	if ((error = suser_td(td)))
429 		goto done2;
430 	error = copyin((caddr_t)uap->delta, (caddr_t)&atv,
431 		    sizeof(struct timeval));
432 	if (error)
433 		goto done2;
434 
435 	/*
436 	 * Compute the total correction and the rate at which to apply it.
437 	 * Round the adjustment down to a whole multiple of the per-tick
438 	 * delta, so that after some number of incremental changes in
439 	 * hardclock(), tickdelta will become zero, lest the correction
440 	 * overshoot and start taking us away from the desired final time.
441 	 */
442 	ndelta = atv.tv_sec * 1000000 + atv.tv_usec;
443 	if (ndelta > bigadj || ndelta < -bigadj)
444 		ntickdelta = 10 * tickadj;
445 	else
446 		ntickdelta = tickadj;
447 	if (ndelta % ntickdelta)
448 		ndelta = ndelta / ntickdelta * ntickdelta;
449 
450 	/*
451 	 * To make hardclock()'s job easier, make the per-tick delta negative
452 	 * if we want time to run slower; then hardclock can simply compute
453 	 * tick + tickdelta, and subtract tickdelta from timedelta.
454 	 */
455 	if (ndelta < 0)
456 		ntickdelta = -ntickdelta;
457 	s = splclock();
458 	odelta = timedelta;
459 	timedelta = ndelta;
460 	tickdelta = ntickdelta;
461 	splx(s);
462 
463 	if (uap->olddelta) {
464 		atv.tv_sec = odelta / 1000000;
465 		atv.tv_usec = odelta % 1000000;
466 		(void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta,
467 		    sizeof(struct timeval));
468 	}
469 done2:
470 	mtx_unlock(&Giant);
471 	return (error);
472 }
473 
474 /*
475  * Get value of an interval timer.  The process virtual and
476  * profiling virtual time timers are kept in the p_stats area, since
477  * they can be swapped out.  These are kept internally in the
478  * way they are specified externally: in time until they expire.
479  *
480  * The real time interval timer is kept in the process table slot
481  * for the process, and its value (it_value) is kept as an
482  * absolute time rather than as a delta, so that it is easy to keep
483  * periodic real-time signals from drifting.
484  *
485  * Virtual time timers are processed in the hardclock() routine of
486  * kern_clock.c.  The real time timer is processed by a timeout
487  * routine, called from the softclock() routine.  Since a callout
488  * may be delayed in real time due to interrupt processing in the system,
489  * it is possible for the real time timeout routine (realitexpire, given below),
490  * to be delayed in real time past when it is supposed to occur.  It
491  * does not suffice, therefore, to reload the real timer .it_value from the
492  * real time timers .it_interval.  Rather, we compute the next time in
493  * absolute time the timer should go off.
494  */
495 #ifndef _SYS_SYSPROTO_H_
496 struct getitimer_args {
497 	u_int	which;
498 	struct	itimerval *itv;
499 };
500 #endif
501 /*
502  * MPSAFE
503  */
504 /* ARGSUSED */
505 int
506 getitimer(td, uap)
507 	struct thread *td;
508 	register struct getitimer_args *uap;
509 {
510 	struct proc *p = td->td_proc;
511 	struct timeval ctv;
512 	struct itimerval aitv;
513 	int s;
514 	int error;
515 
516 	if (uap->which > ITIMER_PROF)
517 		return (EINVAL);
518 
519 	mtx_lock(&Giant);
520 
521 	s = splclock(); /* XXX still needed ? */
522 	if (uap->which == ITIMER_REAL) {
523 		/*
524 		 * Convert from absolute to relative time in .it_value
525 		 * part of real time timer.  If time for real time timer
526 		 * has passed return 0, else return difference between
527 		 * current time and time for the timer to go off.
528 		 */
529 		aitv = p->p_realtimer;
530 		if (timevalisset(&aitv.it_value)) {
531 			getmicrouptime(&ctv);
532 			if (timevalcmp(&aitv.it_value, &ctv, <))
533 				timevalclear(&aitv.it_value);
534 			else
535 				timevalsub(&aitv.it_value, &ctv);
536 		}
537 	} else {
538 		aitv = p->p_stats->p_timer[uap->which];
539 	}
540 	splx(s);
541 	error = copyout((caddr_t)&aitv, (caddr_t)uap->itv,
542 	    sizeof (struct itimerval));
543 	mtx_unlock(&Giant);
544 	return(error);
545 }
546 
547 #ifndef _SYS_SYSPROTO_H_
548 struct setitimer_args {
549 	u_int	which;
550 	struct	itimerval *itv, *oitv;
551 };
552 #endif
553 /*
554  * MPSAFE
555  */
556 /* ARGSUSED */
557 int
558 setitimer(td, uap)
559 	struct thread *td;
560 	register struct setitimer_args *uap;
561 {
562 	struct proc *p = td->td_proc;
563 	struct itimerval aitv;
564 	struct timeval ctv;
565 	register struct itimerval *itvp;
566 	int s, error = 0;
567 
568 	if (uap->which > ITIMER_PROF)
569 		return (EINVAL);
570 	itvp = uap->itv;
571 	if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv,
572 	    sizeof(struct itimerval))))
573 		return (error);
574 
575 	mtx_lock(&Giant);
576 
577 	if ((uap->itv = uap->oitv) &&
578 	    (error = getitimer(td, (struct getitimer_args *)uap))) {
579 		goto done2;
580 	}
581 	if (itvp == 0) {
582 		error = 0;
583 		goto done2;
584 	}
585 	if (itimerfix(&aitv.it_value)) {
586 		error = EINVAL;
587 		goto done2;
588 	}
589 	if (!timevalisset(&aitv.it_value)) {
590 		timevalclear(&aitv.it_interval);
591 	} else if (itimerfix(&aitv.it_interval)) {
592 		error = EINVAL;
593 		goto done2;
594 	}
595 	s = splclock(); /* XXX: still needed ? */
596 	if (uap->which == ITIMER_REAL) {
597 		if (timevalisset(&p->p_realtimer.it_value))
598 			callout_stop(&p->p_itcallout);
599 		if (timevalisset(&aitv.it_value))
600 			callout_reset(&p->p_itcallout, tvtohz(&aitv.it_value),
601 			    realitexpire, p);
602 		getmicrouptime(&ctv);
603 		timevaladd(&aitv.it_value, &ctv);
604 		p->p_realtimer = aitv;
605 	} else {
606 		p->p_stats->p_timer[uap->which] = aitv;
607 	}
608 	splx(s);
609 done2:
610 	mtx_unlock(&Giant);
611 	return (error);
612 }
613 
614 /*
615  * Real interval timer expired:
616  * send process whose timer expired an alarm signal.
617  * If time is not set up to reload, then just return.
618  * Else compute next time timer should go off which is > current time.
619  * This is where delay in processing this timeout causes multiple
620  * SIGALRM calls to be compressed into one.
621  * tvtohz() always adds 1 to allow for the time until the next clock
622  * interrupt being strictly less than 1 clock tick, but we don't want
623  * that here since we want to appear to be in sync with the clock
624  * interrupt even when we're delayed.
625  */
626 void
627 realitexpire(arg)
628 	void *arg;
629 {
630 	register struct proc *p;
631 	struct timeval ctv, ntv;
632 	int s;
633 
634 	p = (struct proc *)arg;
635 	PROC_LOCK(p);
636 	psignal(p, SIGALRM);
637 	if (!timevalisset(&p->p_realtimer.it_interval)) {
638 		timevalclear(&p->p_realtimer.it_value);
639 		PROC_UNLOCK(p);
640 		return;
641 	}
642 	for (;;) {
643 		s = splclock(); /* XXX: still neeeded ? */
644 		timevaladd(&p->p_realtimer.it_value,
645 		    &p->p_realtimer.it_interval);
646 		getmicrouptime(&ctv);
647 		if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) {
648 			ntv = p->p_realtimer.it_value;
649 			timevalsub(&ntv, &ctv);
650 			callout_reset(&p->p_itcallout, tvtohz(&ntv) - 1,
651 			    realitexpire, p);
652 			splx(s);
653 			PROC_UNLOCK(p);
654 			return;
655 		}
656 		splx(s);
657 	}
658 	/*NOTREACHED*/
659 }
660 
661 /*
662  * Check that a proposed value to load into the .it_value or
663  * .it_interval part of an interval timer is acceptable, and
664  * fix it to have at least minimal value (i.e. if it is less
665  * than the resolution of the clock, round it up.)
666  */
667 int
668 itimerfix(tv)
669 	struct timeval *tv;
670 {
671 
672 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
673 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
674 		return (EINVAL);
675 	if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
676 		tv->tv_usec = tick;
677 	return (0);
678 }
679 
680 /*
681  * Decrement an interval timer by a specified number
682  * of microseconds, which must be less than a second,
683  * i.e. < 1000000.  If the timer expires, then reload
684  * it.  In this case, carry over (usec - old value) to
685  * reduce the value reloaded into the timer so that
686  * the timer does not drift.  This routine assumes
687  * that it is called in a context where the timers
688  * on which it is operating cannot change in value.
689  */
690 int
691 itimerdecr(itp, usec)
692 	register struct itimerval *itp;
693 	int usec;
694 {
695 
696 	if (itp->it_value.tv_usec < usec) {
697 		if (itp->it_value.tv_sec == 0) {
698 			/* expired, and already in next interval */
699 			usec -= itp->it_value.tv_usec;
700 			goto expire;
701 		}
702 		itp->it_value.tv_usec += 1000000;
703 		itp->it_value.tv_sec--;
704 	}
705 	itp->it_value.tv_usec -= usec;
706 	usec = 0;
707 	if (timevalisset(&itp->it_value))
708 		return (1);
709 	/* expired, exactly at end of interval */
710 expire:
711 	if (timevalisset(&itp->it_interval)) {
712 		itp->it_value = itp->it_interval;
713 		itp->it_value.tv_usec -= usec;
714 		if (itp->it_value.tv_usec < 0) {
715 			itp->it_value.tv_usec += 1000000;
716 			itp->it_value.tv_sec--;
717 		}
718 	} else
719 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
720 	return (0);
721 }
722 
723 /*
724  * Add and subtract routines for timevals.
725  * N.B.: subtract routine doesn't deal with
726  * results which are before the beginning,
727  * it just gets very confused in this case.
728  * Caveat emptor.
729  */
730 void
731 timevaladd(t1, t2)
732 	struct timeval *t1, *t2;
733 {
734 
735 	t1->tv_sec += t2->tv_sec;
736 	t1->tv_usec += t2->tv_usec;
737 	timevalfix(t1);
738 }
739 
740 void
741 timevalsub(t1, t2)
742 	struct timeval *t1, *t2;
743 {
744 
745 	t1->tv_sec -= t2->tv_sec;
746 	t1->tv_usec -= t2->tv_usec;
747 	timevalfix(t1);
748 }
749 
750 static void
751 timevalfix(t1)
752 	struct timeval *t1;
753 {
754 
755 	if (t1->tv_usec < 0) {
756 		t1->tv_sec--;
757 		t1->tv_usec += 1000000;
758 	}
759 	if (t1->tv_usec >= 1000000) {
760 		t1->tv_sec++;
761 		t1->tv_usec -= 1000000;
762 	}
763 }
764