1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 34 * $FreeBSD$ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/sysproto.h> 42 #include <sys/resourcevar.h> 43 #include <sys/signalvar.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/sysent.h> 47 #include <sys/proc.h> 48 #include <sys/time.h> 49 #include <sys/timetc.h> 50 #include <sys/vnode.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 55 struct timezone tz; 56 57 /* 58 * Time of day and interval timer support. 59 * 60 * These routines provide the kernel entry points to get and set 61 * the time-of-day and per-process interval timers. Subroutines 62 * here provide support for adding and subtracting timeval structures 63 * and decrementing interval timers, optionally reloading the interval 64 * timers when they expire. 65 */ 66 67 static int nanosleep1(struct thread *td, struct timespec *rqt, 68 struct timespec *rmt); 69 static int settime(struct thread *, struct timeval *); 70 static void timevalfix(struct timeval *); 71 static void no_lease_updatetime(int); 72 73 static void 74 no_lease_updatetime(deltat) 75 int deltat; 76 { 77 } 78 79 void (*lease_updatetime)(int) = no_lease_updatetime; 80 81 static int 82 settime(struct thread *td, struct timeval *tv) 83 { 84 struct timeval delta, tv1, tv2; 85 static struct timeval maxtime, laststep; 86 struct timespec ts; 87 int s; 88 89 s = splclock(); 90 microtime(&tv1); 91 delta = *tv; 92 timevalsub(&delta, &tv1); 93 94 /* 95 * If the system is secure, we do not allow the time to be 96 * set to a value earlier than 1 second less than the highest 97 * time we have yet seen. The worst a miscreant can do in 98 * this circumstance is "freeze" time. He couldn't go 99 * back to the past. 100 * 101 * We similarly do not allow the clock to be stepped more 102 * than one second, nor more than once per second. This allows 103 * a miscreant to make the clock march double-time, but no worse. 104 */ 105 if (securelevel_gt(td->td_ucred, 1) != 0) { 106 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 107 /* 108 * Update maxtime to latest time we've seen. 109 */ 110 if (tv1.tv_sec > maxtime.tv_sec) 111 maxtime = tv1; 112 tv2 = *tv; 113 timevalsub(&tv2, &maxtime); 114 if (tv2.tv_sec < -1) { 115 tv->tv_sec = maxtime.tv_sec - 1; 116 printf("Time adjustment clamped to -1 second\n"); 117 } 118 } else { 119 if (tv1.tv_sec == laststep.tv_sec) { 120 splx(s); 121 return (EPERM); 122 } 123 if (delta.tv_sec > 1) { 124 tv->tv_sec = tv1.tv_sec + 1; 125 printf("Time adjustment clamped to +1 second\n"); 126 } 127 laststep = *tv; 128 } 129 } 130 131 ts.tv_sec = tv->tv_sec; 132 ts.tv_nsec = tv->tv_usec * 1000; 133 mtx_lock(&Giant); 134 tc_setclock(&ts); 135 (void) splsoftclock(); 136 lease_updatetime(delta.tv_sec); 137 splx(s); 138 resettodr(); 139 mtx_unlock(&Giant); 140 return (0); 141 } 142 143 #ifndef _SYS_SYSPROTO_H_ 144 struct clock_gettime_args { 145 clockid_t clock_id; 146 struct timespec *tp; 147 }; 148 #endif 149 150 /* 151 * MPSAFE 152 */ 153 /* ARGSUSED */ 154 int 155 clock_gettime(struct thread *td, struct clock_gettime_args *uap) 156 { 157 struct timespec ats; 158 159 if (SCARG(uap, clock_id) != CLOCK_REALTIME) 160 return (EINVAL); 161 mtx_lock(&Giant); 162 nanotime(&ats); 163 mtx_unlock(&Giant); 164 return (copyout(&ats, SCARG(uap, tp), sizeof(ats))); 165 } 166 167 #ifndef _SYS_SYSPROTO_H_ 168 struct clock_settime_args { 169 clockid_t clock_id; 170 const struct timespec *tp; 171 }; 172 #endif 173 174 /* 175 * MPSAFE 176 */ 177 /* ARGSUSED */ 178 int 179 clock_settime(struct thread *td, struct clock_settime_args *uap) 180 { 181 struct timeval atv; 182 struct timespec ats; 183 int error; 184 185 if ((error = suser(td)) != 0) 186 return (error); 187 if (SCARG(uap, clock_id) != CLOCK_REALTIME) 188 return (EINVAL); 189 if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0) 190 return (error); 191 if (ats.tv_nsec < 0 || ats.tv_nsec >= 1000000000) 192 return (EINVAL); 193 /* XXX Don't convert nsec->usec and back */ 194 TIMESPEC_TO_TIMEVAL(&atv, &ats); 195 error = settime(td, &atv); 196 return (error); 197 } 198 199 #ifndef _SYS_SYSPROTO_H_ 200 struct clock_getres_args { 201 clockid_t clock_id; 202 struct timespec *tp; 203 }; 204 #endif 205 206 int 207 clock_getres(struct thread *td, struct clock_getres_args *uap) 208 { 209 struct timespec ts; 210 int error; 211 212 if (SCARG(uap, clock_id) != CLOCK_REALTIME) 213 return (EINVAL); 214 error = 0; 215 if (SCARG(uap, tp)) { 216 ts.tv_sec = 0; 217 /* 218 * Round up the result of the division cheaply by adding 1. 219 * Rounding up is especially important if rounding down 220 * would give 0. Perfect rounding is unimportant. 221 */ 222 ts.tv_nsec = 1000000000 / tc_getfrequency() + 1; 223 error = copyout(&ts, SCARG(uap, tp), sizeof(ts)); 224 } 225 return (error); 226 } 227 228 static int nanowait; 229 230 static int 231 nanosleep1(struct thread *td, struct timespec *rqt, struct timespec *rmt) 232 { 233 struct timespec ts, ts2, ts3; 234 struct timeval tv; 235 int error; 236 237 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 238 return (EINVAL); 239 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 240 return (0); 241 getnanouptime(&ts); 242 timespecadd(&ts, rqt); 243 TIMESPEC_TO_TIMEVAL(&tv, rqt); 244 for (;;) { 245 error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp", 246 tvtohz(&tv)); 247 getnanouptime(&ts2); 248 if (error != EWOULDBLOCK) { 249 if (error == ERESTART) 250 error = EINTR; 251 if (rmt != NULL) { 252 timespecsub(&ts, &ts2); 253 if (ts.tv_sec < 0) 254 timespecclear(&ts); 255 *rmt = ts; 256 } 257 return (error); 258 } 259 if (timespeccmp(&ts2, &ts, >=)) 260 return (0); 261 ts3 = ts; 262 timespecsub(&ts3, &ts2); 263 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 264 } 265 } 266 267 #ifndef _SYS_SYSPROTO_H_ 268 struct nanosleep_args { 269 struct timespec *rqtp; 270 struct timespec *rmtp; 271 }; 272 #endif 273 274 /* 275 * MPSAFE 276 */ 277 /* ARGSUSED */ 278 int 279 nanosleep(struct thread *td, struct nanosleep_args *uap) 280 { 281 struct timespec rmt, rqt; 282 int error; 283 284 error = copyin(SCARG(uap, rqtp), &rqt, sizeof(rqt)); 285 if (error) 286 return (error); 287 288 mtx_lock(&Giant); 289 if (SCARG(uap, rmtp)) { 290 if (!useracc((caddr_t)SCARG(uap, rmtp), sizeof(rmt), 291 VM_PROT_WRITE)) { 292 error = EFAULT; 293 goto done2; 294 } 295 } 296 error = nanosleep1(td, &rqt, &rmt); 297 if (error && SCARG(uap, rmtp)) { 298 int error2; 299 300 error2 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt)); 301 if (error2) /* XXX shouldn't happen, did useracc() above */ 302 error = error2; 303 } 304 done2: 305 mtx_unlock(&Giant); 306 return (error); 307 } 308 309 #ifndef _SYS_SYSPROTO_H_ 310 struct gettimeofday_args { 311 struct timeval *tp; 312 struct timezone *tzp; 313 }; 314 #endif 315 /* 316 * MPSAFE 317 */ 318 /* ARGSUSED */ 319 int 320 gettimeofday(struct thread *td, struct gettimeofday_args *uap) 321 { 322 struct timeval atv; 323 int error = 0; 324 325 if (uap->tp) { 326 microtime(&atv); 327 error = copyout(&atv, uap->tp, sizeof (atv)); 328 } 329 if (error == 0 && uap->tzp != NULL) { 330 mtx_lock(&Giant); 331 error = copyout(&tz, uap->tzp, sizeof (tz)); 332 mtx_unlock(&Giant); 333 } 334 return (error); 335 } 336 337 #ifndef _SYS_SYSPROTO_H_ 338 struct settimeofday_args { 339 struct timeval *tv; 340 struct timezone *tzp; 341 }; 342 #endif 343 /* 344 * MPSAFE 345 */ 346 /* ARGSUSED */ 347 int 348 settimeofday(struct thread *td, struct settimeofday_args *uap) 349 { 350 struct timeval atv; 351 struct timezone atz; 352 int error = 0; 353 354 if ((error = suser(td))) 355 return (error); 356 /* Verify all parameters before changing time. */ 357 if (uap->tv) { 358 if ((error = copyin(uap->tv, &atv, sizeof(atv)))) 359 return (error); 360 if (atv.tv_usec < 0 || atv.tv_usec >= 1000000) 361 return (EINVAL); 362 } 363 if (uap->tzp && 364 (error = copyin(uap->tzp, &atz, sizeof(atz)))) 365 return (error); 366 367 if (uap->tv && (error = settime(td, &atv))) 368 return (error); 369 if (uap->tzp) { 370 mtx_lock(&Giant); 371 tz = atz; 372 mtx_unlock(&Giant); 373 } 374 return (error); 375 } 376 /* 377 * Get value of an interval timer. The process virtual and 378 * profiling virtual time timers are kept in the p_stats area, since 379 * they can be swapped out. These are kept internally in the 380 * way they are specified externally: in time until they expire. 381 * 382 * The real time interval timer is kept in the process table slot 383 * for the process, and its value (it_value) is kept as an 384 * absolute time rather than as a delta, so that it is easy to keep 385 * periodic real-time signals from drifting. 386 * 387 * Virtual time timers are processed in the hardclock() routine of 388 * kern_clock.c. The real time timer is processed by a timeout 389 * routine, called from the softclock() routine. Since a callout 390 * may be delayed in real time due to interrupt processing in the system, 391 * it is possible for the real time timeout routine (realitexpire, given below), 392 * to be delayed in real time past when it is supposed to occur. It 393 * does not suffice, therefore, to reload the real timer .it_value from the 394 * real time timers .it_interval. Rather, we compute the next time in 395 * absolute time the timer should go off. 396 */ 397 #ifndef _SYS_SYSPROTO_H_ 398 struct getitimer_args { 399 u_int which; 400 struct itimerval *itv; 401 }; 402 #endif 403 /* 404 * MPSAFE 405 */ 406 /* ARGSUSED */ 407 int 408 getitimer(struct thread *td, struct getitimer_args *uap) 409 { 410 struct proc *p = td->td_proc; 411 struct timeval ctv; 412 struct itimerval aitv; 413 int s; 414 int error; 415 416 if (uap->which > ITIMER_PROF) 417 return (EINVAL); 418 419 mtx_lock(&Giant); 420 421 s = splclock(); /* XXX still needed ? */ 422 if (uap->which == ITIMER_REAL) { 423 /* 424 * Convert from absolute to relative time in .it_value 425 * part of real time timer. If time for real time timer 426 * has passed return 0, else return difference between 427 * current time and time for the timer to go off. 428 */ 429 aitv = p->p_realtimer; 430 if (timevalisset(&aitv.it_value)) { 431 getmicrouptime(&ctv); 432 if (timevalcmp(&aitv.it_value, &ctv, <)) 433 timevalclear(&aitv.it_value); 434 else 435 timevalsub(&aitv.it_value, &ctv); 436 } 437 } else { 438 aitv = p->p_stats->p_timer[uap->which]; 439 } 440 splx(s); 441 error = copyout(&aitv, uap->itv, sizeof (struct itimerval)); 442 mtx_unlock(&Giant); 443 return(error); 444 } 445 446 #ifndef _SYS_SYSPROTO_H_ 447 struct setitimer_args { 448 u_int which; 449 struct itimerval *itv, *oitv; 450 }; 451 #endif 452 /* 453 * MPSAFE 454 */ 455 /* ARGSUSED */ 456 int 457 setitimer(struct thread *td, struct setitimer_args *uap) 458 { 459 struct proc *p = td->td_proc; 460 struct itimerval aitv; 461 struct timeval ctv; 462 struct itimerval *itvp; 463 int s, error = 0; 464 465 if (uap->which > ITIMER_PROF) 466 return (EINVAL); 467 itvp = uap->itv; 468 if (itvp && (error = copyin(itvp, &aitv, sizeof(struct itimerval)))) 469 return (error); 470 471 mtx_lock(&Giant); 472 473 if ((uap->itv = uap->oitv) && 474 (error = getitimer(td, (struct getitimer_args *)uap))) { 475 goto done2; 476 } 477 if (itvp == 0) { 478 error = 0; 479 goto done2; 480 } 481 if (itimerfix(&aitv.it_value)) { 482 error = EINVAL; 483 goto done2; 484 } 485 if (!timevalisset(&aitv.it_value)) { 486 timevalclear(&aitv.it_interval); 487 } else if (itimerfix(&aitv.it_interval)) { 488 error = EINVAL; 489 goto done2; 490 } 491 s = splclock(); /* XXX: still needed ? */ 492 if (uap->which == ITIMER_REAL) { 493 if (timevalisset(&p->p_realtimer.it_value)) 494 callout_stop(&p->p_itcallout); 495 if (timevalisset(&aitv.it_value)) 496 callout_reset(&p->p_itcallout, tvtohz(&aitv.it_value), 497 realitexpire, p); 498 getmicrouptime(&ctv); 499 timevaladd(&aitv.it_value, &ctv); 500 p->p_realtimer = aitv; 501 } else { 502 p->p_stats->p_timer[uap->which] = aitv; 503 } 504 splx(s); 505 done2: 506 mtx_unlock(&Giant); 507 return (error); 508 } 509 510 /* 511 * Real interval timer expired: 512 * send process whose timer expired an alarm signal. 513 * If time is not set up to reload, then just return. 514 * Else compute next time timer should go off which is > current time. 515 * This is where delay in processing this timeout causes multiple 516 * SIGALRM calls to be compressed into one. 517 * tvtohz() always adds 1 to allow for the time until the next clock 518 * interrupt being strictly less than 1 clock tick, but we don't want 519 * that here since we want to appear to be in sync with the clock 520 * interrupt even when we're delayed. 521 */ 522 void 523 realitexpire(void *arg) 524 { 525 struct proc *p; 526 struct timeval ctv, ntv; 527 int s; 528 529 p = (struct proc *)arg; 530 PROC_LOCK(p); 531 psignal(p, SIGALRM); 532 if (!timevalisset(&p->p_realtimer.it_interval)) { 533 timevalclear(&p->p_realtimer.it_value); 534 PROC_UNLOCK(p); 535 return; 536 } 537 for (;;) { 538 s = splclock(); /* XXX: still neeeded ? */ 539 timevaladd(&p->p_realtimer.it_value, 540 &p->p_realtimer.it_interval); 541 getmicrouptime(&ctv); 542 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 543 ntv = p->p_realtimer.it_value; 544 timevalsub(&ntv, &ctv); 545 callout_reset(&p->p_itcallout, tvtohz(&ntv) - 1, 546 realitexpire, p); 547 splx(s); 548 PROC_UNLOCK(p); 549 return; 550 } 551 splx(s); 552 } 553 /*NOTREACHED*/ 554 } 555 556 /* 557 * Check that a proposed value to load into the .it_value or 558 * .it_interval part of an interval timer is acceptable, and 559 * fix it to have at least minimal value (i.e. if it is less 560 * than the resolution of the clock, round it up.) 561 */ 562 int 563 itimerfix(struct timeval *tv) 564 { 565 566 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || 567 tv->tv_usec < 0 || tv->tv_usec >= 1000000) 568 return (EINVAL); 569 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 570 tv->tv_usec = tick; 571 return (0); 572 } 573 574 /* 575 * Decrement an interval timer by a specified number 576 * of microseconds, which must be less than a second, 577 * i.e. < 1000000. If the timer expires, then reload 578 * it. In this case, carry over (usec - old value) to 579 * reduce the value reloaded into the timer so that 580 * the timer does not drift. This routine assumes 581 * that it is called in a context where the timers 582 * on which it is operating cannot change in value. 583 */ 584 int 585 itimerdecr(struct itimerval *itp, int usec) 586 { 587 588 if (itp->it_value.tv_usec < usec) { 589 if (itp->it_value.tv_sec == 0) { 590 /* expired, and already in next interval */ 591 usec -= itp->it_value.tv_usec; 592 goto expire; 593 } 594 itp->it_value.tv_usec += 1000000; 595 itp->it_value.tv_sec--; 596 } 597 itp->it_value.tv_usec -= usec; 598 usec = 0; 599 if (timevalisset(&itp->it_value)) 600 return (1); 601 /* expired, exactly at end of interval */ 602 expire: 603 if (timevalisset(&itp->it_interval)) { 604 itp->it_value = itp->it_interval; 605 itp->it_value.tv_usec -= usec; 606 if (itp->it_value.tv_usec < 0) { 607 itp->it_value.tv_usec += 1000000; 608 itp->it_value.tv_sec--; 609 } 610 } else 611 itp->it_value.tv_usec = 0; /* sec is already 0 */ 612 return (0); 613 } 614 615 /* 616 * Add and subtract routines for timevals. 617 * N.B.: subtract routine doesn't deal with 618 * results which are before the beginning, 619 * it just gets very confused in this case. 620 * Caveat emptor. 621 */ 622 void 623 timevaladd(struct timeval *t1, struct timeval *t2) 624 { 625 626 t1->tv_sec += t2->tv_sec; 627 t1->tv_usec += t2->tv_usec; 628 timevalfix(t1); 629 } 630 631 void 632 timevalsub(struct timeval *t1, struct timeval *t2) 633 { 634 635 t1->tv_sec -= t2->tv_sec; 636 t1->tv_usec -= t2->tv_usec; 637 timevalfix(t1); 638 } 639 640 static void 641 timevalfix(struct timeval *t1) 642 { 643 644 if (t1->tv_usec < 0) { 645 t1->tv_sec--; 646 t1->tv_usec += 1000000; 647 } 648 if (t1->tv_usec >= 1000000) { 649 t1->tv_sec++; 650 t1->tv_usec -= 1000000; 651 } 652 } 653