1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD$ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/sysproto.h> 42 #include <sys/resourcevar.h> 43 #include <sys/signalvar.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/sysent.h> 47 #include <sys/proc.h> 48 #include <sys/time.h> 49 #include <sys/timetc.h> 50 #include <sys/vnode.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 55 struct timezone tz; 56 57 /* 58 * Time of day and interval timer support. 59 * 60 * These routines provide the kernel entry points to get and set 61 * the time-of-day and per-process interval timers. Subroutines 62 * here provide support for adding and subtracting timeval structures 63 * and decrementing interval timers, optionally reloading the interval 64 * timers when they expire. 65 */ 66 67 static int nanosleep1(struct thread *td, struct timespec *rqt, 68 struct timespec *rmt); 69 static int settime(struct proc *, struct timeval *); 70 static void timevalfix(struct timeval *); 71 static void no_lease_updatetime(int); 72 73 static void 74 no_lease_updatetime(deltat) 75 int deltat; 76 { 77 } 78 79 void (*lease_updatetime)(int) = no_lease_updatetime; 80 81 static int 82 settime(p, tv) 83 struct proc *p; 84 struct timeval *tv; 85 { 86 struct timeval delta, tv1, tv2; 87 static struct timeval maxtime, laststep; 88 struct timespec ts; 89 int s; 90 91 s = splclock(); 92 microtime(&tv1); 93 delta = *tv; 94 timevalsub(&delta, &tv1); 95 96 /* 97 * If the system is secure, we do not allow the time to be 98 * set to a value earlier than 1 second less than the highest 99 * time we have yet seen. The worst a miscreant can do in 100 * this circumstance is "freeze" time. He couldn't go 101 * back to the past. 102 * 103 * We similarly do not allow the clock to be stepped more 104 * than one second, nor more than once per second. This allows 105 * a miscreant to make the clock march double-time, but no worse. 106 */ 107 if (securelevel_gt(p->p_ucred, 1) != 0) { 108 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 109 /* 110 * Update maxtime to latest time we've seen. 111 */ 112 if (tv1.tv_sec > maxtime.tv_sec) 113 maxtime = tv1; 114 tv2 = *tv; 115 timevalsub(&tv2, &maxtime); 116 if (tv2.tv_sec < -1) { 117 tv->tv_sec = maxtime.tv_sec - 1; 118 printf("Time adjustment clamped to -1 second\n"); 119 } 120 } else { 121 if (tv1.tv_sec == laststep.tv_sec) { 122 splx(s); 123 return (EPERM); 124 } 125 if (delta.tv_sec > 1) { 126 tv->tv_sec = tv1.tv_sec + 1; 127 printf("Time adjustment clamped to +1 second\n"); 128 } 129 laststep = *tv; 130 } 131 } 132 133 ts.tv_sec = tv->tv_sec; 134 ts.tv_nsec = tv->tv_usec * 1000; 135 tc_setclock(&ts); 136 (void) splsoftclock(); 137 lease_updatetime(delta.tv_sec); 138 splx(s); 139 resettodr(); 140 return (0); 141 } 142 143 #ifndef _SYS_SYSPROTO_H_ 144 struct clock_gettime_args { 145 clockid_t clock_id; 146 struct timespec *tp; 147 }; 148 #endif 149 150 /* 151 * MPSAFE 152 */ 153 /* ARGSUSED */ 154 int 155 clock_gettime(td, uap) 156 struct thread *td; 157 struct clock_gettime_args *uap; 158 { 159 struct timespec ats; 160 161 if (SCARG(uap, clock_id) != CLOCK_REALTIME) 162 return (EINVAL); 163 mtx_lock(&Giant); 164 nanotime(&ats); 165 mtx_unlock(&Giant); 166 return (copyout(&ats, SCARG(uap, tp), sizeof(ats))); 167 } 168 169 #ifndef _SYS_SYSPROTO_H_ 170 struct clock_settime_args { 171 clockid_t clock_id; 172 const struct timespec *tp; 173 }; 174 #endif 175 176 /* 177 * MPSAFE 178 */ 179 /* ARGSUSED */ 180 int 181 clock_settime(td, uap) 182 struct thread *td; 183 struct clock_settime_args *uap; 184 { 185 struct timeval atv; 186 struct timespec ats; 187 int error; 188 189 mtx_lock(&Giant); 190 if ((error = suser_td(td)) != 0) 191 goto done2; 192 if (SCARG(uap, clock_id) != CLOCK_REALTIME) { 193 error = EINVAL; 194 goto done2; 195 } 196 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 197 goto done2; 198 if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000) { 199 error = EINVAL; 200 goto done2; 201 } 202 /* XXX Don't convert nsec->usec and back */ 203 TIMESPEC_TO_TIMEVAL(&atv, &ats); 204 error = settime(td->td_proc, &atv); 205 done2: 206 mtx_unlock(&Giant); 207 return (error); 208 } 209 210 #ifndef _SYS_SYSPROTO_H_ 211 struct clock_getres_args { 212 clockid_t clock_id; 213 struct timespec *tp; 214 }; 215 #endif 216 217 int 218 clock_getres(td, uap) 219 struct thread *td; 220 struct clock_getres_args *uap; 221 { 222 struct timespec ts; 223 int error; 224 225 if (SCARG(uap, clock_id) != CLOCK_REALTIME) 226 return (EINVAL); 227 error = 0; 228 if (SCARG(uap, tp)) { 229 ts.tv_sec = 0; 230 ts.tv_nsec = 1000000000 / timecounter->tc_frequency; 231 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 232 } 233 return (error); 234 } 235 236 static int nanowait; 237 238 static int 239 nanosleep1(td, rqt, rmt) 240 struct thread *td; 241 struct timespec *rqt, *rmt; 242 { 243 struct timespec ts, ts2, ts3; 244 struct timeval tv; 245 int error; 246 247 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 248 return (EINVAL); 249 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 250 return (0); 251 getnanouptime(&ts); 252 timespecadd(&ts, rqt); 253 TIMESPEC_TO_TIMEVAL(&tv, rqt); 254 for (;;) { 255 error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp", 256 tvtohz(&tv)); 257 getnanouptime(&ts2); 258 if (error != EWOULDBLOCK) { 259 if (error == ERESTART) 260 error = EINTR; 261 if (rmt != NULL) { 262 timespecsub(&ts, &ts2); 263 if (ts.tv_sec < 0) 264 timespecclear(&ts); 265 *rmt = ts; 266 } 267 return (error); 268 } 269 if (timespeccmp(&ts2, &ts, >=)) 270 return (0); 271 ts3 = ts; 272 timespecsub(&ts3, &ts2); 273 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 274 } 275 } 276 277 #ifndef _SYS_SYSPROTO_H_ 278 struct nanosleep_args { 279 struct timespec *rqtp; 280 struct timespec *rmtp; 281 }; 282 #endif 283 284 /* 285 * MPSAFE 286 */ 287 /* ARGSUSED */ 288 int 289 nanosleep(td, uap) 290 struct thread *td; 291 struct nanosleep_args *uap; 292 { 293 struct timespec rmt, rqt; 294 int error; 295 296 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(rqt)); 297 if (error) 298 return (error); 299 300 mtx_lock(&Giant); 301 if (SCARG(uap, rmtp)) { 302 if (!useracc((caddr_t)SCARG(uap, rmtp), sizeof(rmt), 303 VM_PROT_WRITE)) { 304 error = EFAULT; 305 goto done2; 306 } 307 } 308 error = nanosleep1(td, &rqt, &rmt); 309 if (error && SCARG(uap, rmtp)) { 310 int error2; 311 312 error2 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 313 if (error2) /* XXX shouldn't happen, did useracc() above */ 314 error = error2; 315 } 316 done2: 317 mtx_unlock(&Giant); 318 return (error); 319 } 320 321 #ifndef _SYS_SYSPROTO_H_ 322 struct gettimeofday_args { 323 struct timeval *tp; 324 struct timezone *tzp; 325 }; 326 #endif 327 /* 328 * MPSAFE 329 */ 330 /* ARGSUSED */ 331 int 332 gettimeofday(td, uap) 333 struct thread *td; 334 register struct gettimeofday_args *uap; 335 { 336 struct timeval atv; 337 int error = 0; 338 339 if (uap->tp) { 340 microtime(&atv); 341 error = copyout((caddr_t)&atv, (caddr_t)uap->tp, sizeof (atv)); 342 } 343 if (error == 0 && uap->tzp != NULL) { 344 mtx_lock(&Giant); 345 error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, 346 sizeof (tz)); 347 mtx_unlock(&Giant); 348 } 349 return (error); 350 } 351 352 #ifndef _SYS_SYSPROTO_H_ 353 struct settimeofday_args { 354 struct timeval *tv; 355 struct timezone *tzp; 356 }; 357 #endif 358 /* 359 * MPSAFE 360 */ 361 /* ARGSUSED */ 362 int 363 settimeofday(td, uap) 364 struct thread *td; 365 struct settimeofday_args *uap; 366 { 367 struct timeval atv; 368 struct timezone atz; 369 int error = 0; 370 371 mtx_lock(&Giant); 372 373 if ((error = suser_td(td))) 374 goto done2; 375 /* Verify all parameters before changing time. */ 376 if (uap->tv) { 377 if ((error = copyin((caddr_t)uap->tv, (caddr_t)&atv, 378 sizeof(atv)))) { 379 goto done2; 380 } 381 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) { 382 error = EINVAL; 383 goto done2; 384 } 385 } 386 if (uap->tzp && 387 (error = copyin((caddr_t)uap->tzp, (caddr_t)&atz, sizeof(atz)))) { 388 goto done2; 389 } 390 if (uap->tv && (error = settime(td->td_proc, &atv))) 391 goto done2; 392 if (uap->tzp) 393 tz = atz; 394 done2: 395 mtx_unlock(&Giant); 396 return (error); 397 } 398 399 int tickdelta; /* current clock skew, us. per tick */ 400 long timedelta; /* unapplied time correction, us. */ 401 static long bigadj = 1000000; /* use 10x skew above bigadj us. */ 402 403 #ifndef _SYS_SYSPROTO_H_ 404 struct adjtime_args { 405 struct timeval *delta; 406 struct timeval *olddelta; 407 }; 408 #endif 409 /* 410 * MPSAFE 411 */ 412 /* ARGSUSED */ 413 int 414 adjtime(td, uap) 415 struct thread *td; 416 register struct adjtime_args *uap; 417 { 418 struct timeval atv; 419 register long ndelta, ntickdelta, odelta; 420 int s, error; 421 422 mtx_lock(&Giant); 423 424 if ((error = suser_td(td))) 425 goto done2; 426 error = copyin((caddr_t)uap->delta, (caddr_t)&atv, 427 sizeof(struct timeval)); 428 if (error) 429 goto done2; 430 431 /* 432 * Compute the total correction and the rate at which to apply it. 433 * Round the adjustment down to a whole multiple of the per-tick 434 * delta, so that after some number of incremental changes in 435 * hardclock(), tickdelta will become zero, lest the correction 436 * overshoot and start taking us away from the desired final time. 437 */ 438 ndelta = atv.tv_sec * 1000000 + atv.tv_usec; 439 if (ndelta > bigadj || ndelta < -bigadj) 440 ntickdelta = 10 * tickadj; 441 else 442 ntickdelta = tickadj; 443 if (ndelta % ntickdelta) 444 ndelta = ndelta / ntickdelta * ntickdelta; 445 446 /* 447 * To make hardclock()'s job easier, make the per-tick delta negative 448 * if we want time to run slower; then hardclock can simply compute 449 * tick + tickdelta, and subtract tickdelta from timedelta. 450 */ 451 if (ndelta < 0) 452 ntickdelta = -ntickdelta; 453 s = splclock(); 454 odelta = timedelta; 455 timedelta = ndelta; 456 tickdelta = ntickdelta; 457 splx(s); 458 459 if (uap->olddelta) { 460 atv.tv_sec = odelta / 1000000; 461 atv.tv_usec = odelta % 1000000; 462 (void) copyout((caddr_t)&atv, (caddr_t)uap->olddelta, 463 sizeof(struct timeval)); 464 } 465 done2: 466 mtx_unlock(&Giant); 467 return (error); 468 } 469 470 /* 471 * Get value of an interval timer. The process virtual and 472 * profiling virtual time timers are kept in the p_stats area, since 473 * they can be swapped out. These are kept internally in the 474 * way they are specified externally: in time until they expire. 475 * 476 * The real time interval timer is kept in the process table slot 477 * for the process, and its value (it_value) is kept as an 478 * absolute time rather than as a delta, so that it is easy to keep 479 * periodic real-time signals from drifting. 480 * 481 * Virtual time timers are processed in the hardclock() routine of 482 * kern_clock.c. The real time timer is processed by a timeout 483 * routine, called from the softclock() routine. Since a callout 484 * may be delayed in real time due to interrupt processing in the system, 485 * it is possible for the real time timeout routine (realitexpire, given below), 486 * to be delayed in real time past when it is supposed to occur. It 487 * does not suffice, therefore, to reload the real timer .it_value from the 488 * real time timers .it_interval. Rather, we compute the next time in 489 * absolute time the timer should go off. 490 */ 491 #ifndef _SYS_SYSPROTO_H_ 492 struct getitimer_args { 493 u_int which; 494 struct itimerval *itv; 495 }; 496 #endif 497 /* 498 * MPSAFE 499 */ 500 /* ARGSUSED */ 501 int 502 getitimer(td, uap) 503 struct thread *td; 504 register struct getitimer_args *uap; 505 { 506 struct proc *p = td->td_proc; 507 struct timeval ctv; 508 struct itimerval aitv; 509 int s; 510 int error; 511 512 if (uap->which > ITIMER_PROF) 513 return (EINVAL); 514 515 mtx_lock(&Giant); 516 517 s = splclock(); /* XXX still needed ? */ 518 if (uap->which == ITIMER_REAL) { 519 /* 520 * Convert from absolute to relative time in .it_value 521 * part of real time timer. If time for real time timer 522 * has passed return 0, else return difference between 523 * current time and time for the timer to go off. 524 */ 525 aitv = p->p_realtimer; 526 if (timevalisset(&aitv.it_value)) { 527 getmicrouptime(&ctv); 528 if (timevalcmp(&aitv.it_value, &ctv, <)) 529 timevalclear(&aitv.it_value); 530 else 531 timevalsub(&aitv.it_value, &ctv); 532 } 533 } else { 534 aitv = p->p_stats->p_timer[uap->which]; 535 } 536 splx(s); 537 error = copyout((caddr_t)&aitv, (caddr_t)uap->itv, 538 sizeof (struct itimerval)); 539 mtx_unlock(&Giant); 540 return(error); 541 } 542 543 #ifndef _SYS_SYSPROTO_H_ 544 struct setitimer_args { 545 u_int which; 546 struct itimerval *itv, *oitv; 547 }; 548 #endif 549 /* 550 * MPSAFE 551 */ 552 /* ARGSUSED */ 553 int 554 setitimer(td, uap) 555 struct thread *td; 556 register struct setitimer_args *uap; 557 { 558 struct proc *p = td->td_proc; 559 struct itimerval aitv; 560 struct timeval ctv; 561 register struct itimerval *itvp; 562 int s, error = 0; 563 564 if (uap->which > ITIMER_PROF) 565 return (EINVAL); 566 itvp = uap->itv; 567 if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, 568 sizeof(struct itimerval)))) 569 return (error); 570 571 mtx_lock(&Giant); 572 573 if ((uap->itv = uap->oitv) && 574 (error = getitimer(td, (struct getitimer_args *)uap))) { 575 goto done2; 576 } 577 if (itvp == 0) { 578 error = 0; 579 goto done2; 580 } 581 if (itimerfix(&aitv.it_value)) { 582 error = EINVAL; 583 goto done2; 584 } 585 if (!timevalisset(&aitv.it_value)) { 586 timevalclear(&aitv.it_interval); 587 } else if (itimerfix(&aitv.it_interval)) { 588 error = EINVAL; 589 goto done2; 590 } 591 s = splclock(); /* XXX: still needed ? */ 592 if (uap->which == ITIMER_REAL) { 593 if (timevalisset(&p->p_realtimer.it_value)) 594 callout_stop(&p->p_itcallout); 595 if (timevalisset(&aitv.it_value)) 596 callout_reset(&p->p_itcallout, tvtohz(&aitv.it_value), 597 realitexpire, p); 598 getmicrouptime(&ctv); 599 timevaladd(&aitv.it_value, &ctv); 600 p->p_realtimer = aitv; 601 } else { 602 p->p_stats->p_timer[uap->which] = aitv; 603 } 604 splx(s); 605 done2: 606 mtx_unlock(&Giant); 607 return (error); 608 } 609 610 /* 611 * Real interval timer expired: 612 * send process whose timer expired an alarm signal. 613 * If time is not set up to reload, then just return. 614 * Else compute next time timer should go off which is > current time. 615 * This is where delay in processing this timeout causes multiple 616 * SIGALRM calls to be compressed into one. 617 * tvtohz() always adds 1 to allow for the time until the next clock 618 * interrupt being strictly less than 1 clock tick, but we don't want 619 * that here since we want to appear to be in sync with the clock 620 * interrupt even when we're delayed. 621 */ 622 void 623 realitexpire(arg) 624 void *arg; 625 { 626 register struct proc *p; 627 struct timeval ctv, ntv; 628 int s; 629 630 p = (struct proc *)arg; 631 PROC_LOCK(p); 632 psignal(p, SIGALRM); 633 if (!timevalisset(&p->p_realtimer.it_interval)) { 634 timevalclear(&p->p_realtimer.it_value); 635 PROC_UNLOCK(p); 636 return; 637 } 638 for (;;) { 639 s = splclock(); /* XXX: still neeeded ? */ 640 timevaladd(&p->p_realtimer.it_value, 641 &p->p_realtimer.it_interval); 642 getmicrouptime(&ctv); 643 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 644 ntv = p->p_realtimer.it_value; 645 timevalsub(&ntv, &ctv); 646 callout_reset(&p->p_itcallout, tvtohz(&ntv) - 1, 647 realitexpire, p); 648 splx(s); 649 PROC_UNLOCK(p); 650 return; 651 } 652 splx(s); 653 } 654 /*NOTREACHED*/ 655 } 656 657 /* 658 * Check that a proposed value to load into the .it_value or 659 * .it_interval part of an interval timer is acceptable, and 660 * fix it to have at least minimal value (i.e. if it is less 661 * than the resolution of the clock, round it up.) 662 */ 663 int 664 itimerfix(tv) 665 struct timeval *tv; 666 { 667 668 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 669 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 670 return (EINVAL); 671 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 672 tv->tv_usec = tick; 673 return (0); 674 } 675 676 /* 677 * Decrement an interval timer by a specified number 678 * of microseconds, which must be less than a second, 679 * i.e. < 1000000. If the timer expires, then reload 680 * it. In this case, carry over (usec - old value) to 681 * reduce the value reloaded into the timer so that 682 * the timer does not drift. This routine assumes 683 * that it is called in a context where the timers 684 * on which it is operating cannot change in value. 685 */ 686 int 687 itimerdecr(itp, usec) 688 register struct itimerval *itp; 689 int usec; 690 { 691 692 if (itp->it_value.tv_usec < usec) { 693 if (itp->it_value.tv_sec == 0) { 694 /* expired, and already in next interval */ 695 usec -= itp->it_value.tv_usec; 696 goto expire; 697 } 698 itp->it_value.tv_usec += 1000000; 699 itp->it_value.tv_sec--; 700 } 701 itp->it_value.tv_usec -= usec; 702 usec = 0; 703 if (timevalisset(&itp->it_value)) 704 return (1); 705 /* expired, exactly at end of interval */ 706 expire: 707 if (timevalisset(&itp->it_interval)) { 708 itp->it_value = itp->it_interval; 709 itp->it_value.tv_usec -= usec; 710 if (itp->it_value.tv_usec < 0) { 711 itp->it_value.tv_usec += 1000000; 712 itp->it_value.tv_sec--; 713 } 714 } else 715 itp->it_value.tv_usec = 0; /* sec is already 0 */ 716 return (0); 717 } 718 719 /* 720 * Add and subtract routines for timevals. 721 * N.B.: subtract routine doesn't deal with 722 * results which are before the beginning, 723 * it just gets very confused in this case. 724 * Caveat emptor. 725 */ 726 void 727 timevaladd(t1, t2) 728 struct timeval *t1, *t2; 729 { 730 731 t1->tv_sec += t2->tv_sec; 732 t1->tv_usec += t2->tv_usec; 733 timevalfix(t1); 734 } 735 736 void 737 timevalsub(t1, t2) 738 struct timeval *t1, *t2; 739 { 740 741 t1->tv_sec -= t2->tv_sec; 742 t1->tv_usec -= t2->tv_usec; 743 timevalfix(t1); 744 } 745 746 static void 747 timevalfix(t1) 748 struct timeval *t1; 749 { 750 751 if (t1->tv_usec < 0) { 752 t1->tv_sec--; 753 t1->tv_usec += 1000000; 754 } 755 if (t1->tv_usec >= 1000000) { 756 t1->tv_sec++; 757 t1->tv_usec -= 1000000; 758 } 759 } 760