1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/limits.h> 38 #include <sys/clock.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/sysproto.h> 42 #include <sys/eventhandler.h> 43 #include <sys/resourcevar.h> 44 #include <sys/signalvar.h> 45 #include <sys/kernel.h> 46 #include <sys/syscallsubr.h> 47 #include <sys/sysctl.h> 48 #include <sys/sysent.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/posix4.h> 52 #include <sys/time.h> 53 #include <sys/timers.h> 54 #include <sys/timetc.h> 55 #include <sys/vnode.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_extern.h> 59 60 #define MAX_CLOCKS (CLOCK_MONOTONIC+1) 61 #define CPUCLOCK_BIT 0x80000000 62 #define CPUCLOCK_PROCESS_BIT 0x40000000 63 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT)) 64 #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid)) 65 #define MAKE_PROCESS_CPUCLOCK(pid) \ 66 (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid)) 67 68 static struct kclock posix_clocks[MAX_CLOCKS]; 69 static uma_zone_t itimer_zone = NULL; 70 71 /* 72 * Time of day and interval timer support. 73 * 74 * These routines provide the kernel entry points to get and set 75 * the time-of-day and per-process interval timers. Subroutines 76 * here provide support for adding and subtracting timeval structures 77 * and decrementing interval timers, optionally reloading the interval 78 * timers when they expire. 79 */ 80 81 static int settime(struct thread *, struct timeval *); 82 static void timevalfix(struct timeval *); 83 84 static void itimer_start(void); 85 static int itimer_init(void *, int, int); 86 static void itimer_fini(void *, int); 87 static void itimer_enter(struct itimer *); 88 static void itimer_leave(struct itimer *); 89 static struct itimer *itimer_find(struct proc *, int); 90 static void itimers_alloc(struct proc *); 91 static void itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp); 92 static void itimers_event_hook_exit(void *arg, struct proc *p); 93 static int realtimer_create(struct itimer *); 94 static int realtimer_gettime(struct itimer *, struct itimerspec *); 95 static int realtimer_settime(struct itimer *, int, 96 struct itimerspec *, struct itimerspec *); 97 static int realtimer_delete(struct itimer *); 98 static void realtimer_clocktime(clockid_t, struct timespec *); 99 static void realtimer_expire(void *); 100 static int kern_timer_create(struct thread *, clockid_t, 101 struct sigevent *, int *, int); 102 static int kern_timer_delete(struct thread *, int); 103 104 int register_posix_clock(int, struct kclock *); 105 void itimer_fire(struct itimer *it); 106 int itimespecfix(struct timespec *ts); 107 108 #define CLOCK_CALL(clock, call, arglist) \ 109 ((*posix_clocks[clock].call) arglist) 110 111 SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL); 112 113 114 static int 115 settime(struct thread *td, struct timeval *tv) 116 { 117 struct timeval delta, tv1, tv2; 118 static struct timeval maxtime, laststep; 119 struct timespec ts; 120 int s; 121 122 s = splclock(); 123 microtime(&tv1); 124 delta = *tv; 125 timevalsub(&delta, &tv1); 126 127 /* 128 * If the system is secure, we do not allow the time to be 129 * set to a value earlier than 1 second less than the highest 130 * time we have yet seen. The worst a miscreant can do in 131 * this circumstance is "freeze" time. He couldn't go 132 * back to the past. 133 * 134 * We similarly do not allow the clock to be stepped more 135 * than one second, nor more than once per second. This allows 136 * a miscreant to make the clock march double-time, but no worse. 137 */ 138 if (securelevel_gt(td->td_ucred, 1) != 0) { 139 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 140 /* 141 * Update maxtime to latest time we've seen. 142 */ 143 if (tv1.tv_sec > maxtime.tv_sec) 144 maxtime = tv1; 145 tv2 = *tv; 146 timevalsub(&tv2, &maxtime); 147 if (tv2.tv_sec < -1) { 148 tv->tv_sec = maxtime.tv_sec - 1; 149 printf("Time adjustment clamped to -1 second\n"); 150 } 151 } else { 152 if (tv1.tv_sec == laststep.tv_sec) { 153 splx(s); 154 return (EPERM); 155 } 156 if (delta.tv_sec > 1) { 157 tv->tv_sec = tv1.tv_sec + 1; 158 printf("Time adjustment clamped to +1 second\n"); 159 } 160 laststep = *tv; 161 } 162 } 163 164 ts.tv_sec = tv->tv_sec; 165 ts.tv_nsec = tv->tv_usec * 1000; 166 mtx_lock(&Giant); 167 tc_setclock(&ts); 168 resettodr(); 169 mtx_unlock(&Giant); 170 return (0); 171 } 172 173 #ifndef _SYS_SYSPROTO_H_ 174 struct clock_getcpuclockid2_args { 175 id_t id; 176 int which, 177 clockid_t *clock_id; 178 }; 179 #endif 180 /* ARGSUSED */ 181 int 182 sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap) 183 { 184 clockid_t clk_id; 185 struct proc *p; 186 pid_t pid; 187 lwpid_t tid; 188 int error; 189 190 switch(uap->which) { 191 case CPUCLOCK_WHICH_PID: 192 if (uap->id != 0) { 193 p = pfind(uap->id); 194 if (p == NULL) 195 return (ESRCH); 196 error = p_cansee(td, p); 197 PROC_UNLOCK(p); 198 if (error) 199 return (error); 200 pid = uap->id; 201 } else { 202 pid = td->td_proc->p_pid; 203 } 204 clk_id = MAKE_PROCESS_CPUCLOCK(pid); 205 break; 206 case CPUCLOCK_WHICH_TID: 207 if (uap->id == 0) 208 tid = td->td_tid; 209 else 210 tid = uap->id; 211 clk_id = MAKE_THREAD_CPUCLOCK(tid); 212 break; 213 default: 214 return (EINVAL); 215 } 216 return (copyout(&clk_id, uap->clock_id, sizeof(clockid_t))); 217 } 218 219 #ifndef _SYS_SYSPROTO_H_ 220 struct clock_gettime_args { 221 clockid_t clock_id; 222 struct timespec *tp; 223 }; 224 #endif 225 /* ARGSUSED */ 226 int 227 sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap) 228 { 229 struct timespec ats; 230 int error; 231 232 error = kern_clock_gettime(td, uap->clock_id, &ats); 233 if (error == 0) 234 error = copyout(&ats, uap->tp, sizeof(ats)); 235 236 return (error); 237 } 238 239 static inline void 240 cputick2timespec(uint64_t runtime, struct timespec *ats) 241 { 242 runtime = cputick2usec(runtime); 243 ats->tv_sec = runtime / 1000000; 244 ats->tv_nsec = runtime % 1000000 * 1000; 245 } 246 247 static void 248 get_thread_cputime(struct thread *targettd, struct timespec *ats) 249 { 250 uint64_t runtime, curtime, switchtime; 251 252 if (targettd == NULL) { /* current thread */ 253 critical_enter(); 254 switchtime = PCPU_GET(switchtime); 255 curtime = cpu_ticks(); 256 runtime = curthread->td_runtime; 257 critical_exit(); 258 runtime += curtime - switchtime; 259 } else { 260 thread_lock(targettd); 261 runtime = targettd->td_runtime; 262 thread_unlock(targettd); 263 } 264 cputick2timespec(runtime, ats); 265 } 266 267 static void 268 get_process_cputime(struct proc *targetp, struct timespec *ats) 269 { 270 uint64_t runtime; 271 struct rusage ru; 272 273 PROC_SLOCK(targetp); 274 rufetch(targetp, &ru); 275 runtime = targetp->p_rux.rux_runtime; 276 PROC_SUNLOCK(targetp); 277 cputick2timespec(runtime, ats); 278 } 279 280 static int 281 get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats) 282 { 283 struct proc *p, *p2; 284 struct thread *td2; 285 lwpid_t tid; 286 pid_t pid; 287 int error; 288 289 p = td->td_proc; 290 if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) { 291 tid = clock_id & CPUCLOCK_ID_MASK; 292 td2 = tdfind(tid, p->p_pid); 293 if (td2 == NULL) 294 return (EINVAL); 295 get_thread_cputime(td2, ats); 296 PROC_UNLOCK(td2->td_proc); 297 } else { 298 pid = clock_id & CPUCLOCK_ID_MASK; 299 p2 = pfind(pid); 300 if (p2 == NULL) 301 return (EINVAL); 302 error = p_cansee(td, p2); 303 if (error) { 304 PROC_UNLOCK(p2); 305 return (EINVAL); 306 } 307 get_process_cputime(p2, ats); 308 PROC_UNLOCK(p2); 309 } 310 return (0); 311 } 312 313 int 314 kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats) 315 { 316 struct timeval sys, user; 317 struct proc *p; 318 319 p = td->td_proc; 320 switch (clock_id) { 321 case CLOCK_REALTIME: /* Default to precise. */ 322 case CLOCK_REALTIME_PRECISE: 323 nanotime(ats); 324 break; 325 case CLOCK_REALTIME_FAST: 326 getnanotime(ats); 327 break; 328 case CLOCK_VIRTUAL: 329 PROC_LOCK(p); 330 PROC_SLOCK(p); 331 calcru(p, &user, &sys); 332 PROC_SUNLOCK(p); 333 PROC_UNLOCK(p); 334 TIMEVAL_TO_TIMESPEC(&user, ats); 335 break; 336 case CLOCK_PROF: 337 PROC_LOCK(p); 338 PROC_SLOCK(p); 339 calcru(p, &user, &sys); 340 PROC_SUNLOCK(p); 341 PROC_UNLOCK(p); 342 timevaladd(&user, &sys); 343 TIMEVAL_TO_TIMESPEC(&user, ats); 344 break; 345 case CLOCK_MONOTONIC: /* Default to precise. */ 346 case CLOCK_MONOTONIC_PRECISE: 347 case CLOCK_UPTIME: 348 case CLOCK_UPTIME_PRECISE: 349 nanouptime(ats); 350 break; 351 case CLOCK_UPTIME_FAST: 352 case CLOCK_MONOTONIC_FAST: 353 getnanouptime(ats); 354 break; 355 case CLOCK_SECOND: 356 ats->tv_sec = time_second; 357 ats->tv_nsec = 0; 358 break; 359 case CLOCK_THREAD_CPUTIME_ID: 360 get_thread_cputime(NULL, ats); 361 break; 362 case CLOCK_PROCESS_CPUTIME_ID: 363 PROC_LOCK(p); 364 get_process_cputime(p, ats); 365 PROC_UNLOCK(p); 366 break; 367 default: 368 if ((int)clock_id >= 0) 369 return (EINVAL); 370 return (get_cputime(td, clock_id, ats)); 371 } 372 return (0); 373 } 374 375 #ifndef _SYS_SYSPROTO_H_ 376 struct clock_settime_args { 377 clockid_t clock_id; 378 const struct timespec *tp; 379 }; 380 #endif 381 /* ARGSUSED */ 382 int 383 sys_clock_settime(struct thread *td, struct clock_settime_args *uap) 384 { 385 struct timespec ats; 386 int error; 387 388 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 389 return (error); 390 return (kern_clock_settime(td, uap->clock_id, &ats)); 391 } 392 393 int 394 kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats) 395 { 396 struct timeval atv; 397 int error; 398 399 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 400 return (error); 401 if (clock_id != CLOCK_REALTIME) 402 return (EINVAL); 403 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 404 return (EINVAL); 405 /* XXX Don't convert nsec->usec and back */ 406 TIMESPEC_TO_TIMEVAL(&atv, ats); 407 error = settime(td, &atv); 408 return (error); 409 } 410 411 #ifndef _SYS_SYSPROTO_H_ 412 struct clock_getres_args { 413 clockid_t clock_id; 414 struct timespec *tp; 415 }; 416 #endif 417 int 418 sys_clock_getres(struct thread *td, struct clock_getres_args *uap) 419 { 420 struct timespec ts; 421 int error; 422 423 if (uap->tp == NULL) 424 return (0); 425 426 error = kern_clock_getres(td, uap->clock_id, &ts); 427 if (error == 0) 428 error = copyout(&ts, uap->tp, sizeof(ts)); 429 return (error); 430 } 431 432 int 433 kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts) 434 { 435 436 ts->tv_sec = 0; 437 switch (clock_id) { 438 case CLOCK_REALTIME: 439 case CLOCK_REALTIME_FAST: 440 case CLOCK_REALTIME_PRECISE: 441 case CLOCK_MONOTONIC: 442 case CLOCK_MONOTONIC_FAST: 443 case CLOCK_MONOTONIC_PRECISE: 444 case CLOCK_UPTIME: 445 case CLOCK_UPTIME_FAST: 446 case CLOCK_UPTIME_PRECISE: 447 /* 448 * Round up the result of the division cheaply by adding 1. 449 * Rounding up is especially important if rounding down 450 * would give 0. Perfect rounding is unimportant. 451 */ 452 ts->tv_nsec = 1000000000 / tc_getfrequency() + 1; 453 break; 454 case CLOCK_VIRTUAL: 455 case CLOCK_PROF: 456 /* Accurately round up here because we can do so cheaply. */ 457 ts->tv_nsec = (1000000000 + hz - 1) / hz; 458 break; 459 case CLOCK_SECOND: 460 ts->tv_sec = 1; 461 ts->tv_nsec = 0; 462 break; 463 case CLOCK_THREAD_CPUTIME_ID: 464 case CLOCK_PROCESS_CPUTIME_ID: 465 cputime: 466 /* sync with cputick2usec */ 467 ts->tv_nsec = 1000000 / cpu_tickrate(); 468 if (ts->tv_nsec == 0) 469 ts->tv_nsec = 1000; 470 break; 471 default: 472 if ((int)clock_id < 0) 473 goto cputime; 474 return (EINVAL); 475 } 476 return (0); 477 } 478 479 static int nanowait; 480 481 int 482 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) 483 { 484 struct timespec ts, ts2, ts3; 485 struct timeval tv; 486 int error; 487 488 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 489 return (EINVAL); 490 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 491 return (0); 492 getnanouptime(&ts); 493 timespecadd(&ts, rqt); 494 TIMESPEC_TO_TIMEVAL(&tv, rqt); 495 for (;;) { 496 error = tsleep(&nanowait, PWAIT | PCATCH, "nanslp", 497 tvtohz(&tv)); 498 getnanouptime(&ts2); 499 if (error != EWOULDBLOCK) { 500 if (error == ERESTART) 501 error = EINTR; 502 if (rmt != NULL) { 503 timespecsub(&ts, &ts2); 504 if (ts.tv_sec < 0) 505 timespecclear(&ts); 506 *rmt = ts; 507 } 508 return (error); 509 } 510 if (timespeccmp(&ts2, &ts, >=)) 511 return (0); 512 ts3 = ts; 513 timespecsub(&ts3, &ts2); 514 TIMESPEC_TO_TIMEVAL(&tv, &ts3); 515 } 516 } 517 518 #ifndef _SYS_SYSPROTO_H_ 519 struct nanosleep_args { 520 struct timespec *rqtp; 521 struct timespec *rmtp; 522 }; 523 #endif 524 /* ARGSUSED */ 525 int 526 sys_nanosleep(struct thread *td, struct nanosleep_args *uap) 527 { 528 struct timespec rmt, rqt; 529 int error; 530 531 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 532 if (error) 533 return (error); 534 535 if (uap->rmtp && 536 !useracc((caddr_t)uap->rmtp, sizeof(rmt), VM_PROT_WRITE)) 537 return (EFAULT); 538 error = kern_nanosleep(td, &rqt, &rmt); 539 if (error && uap->rmtp) { 540 int error2; 541 542 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 543 if (error2) 544 error = error2; 545 } 546 return (error); 547 } 548 549 #ifndef _SYS_SYSPROTO_H_ 550 struct gettimeofday_args { 551 struct timeval *tp; 552 struct timezone *tzp; 553 }; 554 #endif 555 /* ARGSUSED */ 556 int 557 sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap) 558 { 559 struct timeval atv; 560 struct timezone rtz; 561 int error = 0; 562 563 if (uap->tp) { 564 microtime(&atv); 565 error = copyout(&atv, uap->tp, sizeof (atv)); 566 } 567 if (error == 0 && uap->tzp != NULL) { 568 rtz.tz_minuteswest = tz_minuteswest; 569 rtz.tz_dsttime = tz_dsttime; 570 error = copyout(&rtz, uap->tzp, sizeof (rtz)); 571 } 572 return (error); 573 } 574 575 #ifndef _SYS_SYSPROTO_H_ 576 struct settimeofday_args { 577 struct timeval *tv; 578 struct timezone *tzp; 579 }; 580 #endif 581 /* ARGSUSED */ 582 int 583 sys_settimeofday(struct thread *td, struct settimeofday_args *uap) 584 { 585 struct timeval atv, *tvp; 586 struct timezone atz, *tzp; 587 int error; 588 589 if (uap->tv) { 590 error = copyin(uap->tv, &atv, sizeof(atv)); 591 if (error) 592 return (error); 593 tvp = &atv; 594 } else 595 tvp = NULL; 596 if (uap->tzp) { 597 error = copyin(uap->tzp, &atz, sizeof(atz)); 598 if (error) 599 return (error); 600 tzp = &atz; 601 } else 602 tzp = NULL; 603 return (kern_settimeofday(td, tvp, tzp)); 604 } 605 606 int 607 kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp) 608 { 609 int error; 610 611 error = priv_check(td, PRIV_SETTIMEOFDAY); 612 if (error) 613 return (error); 614 /* Verify all parameters before changing time. */ 615 if (tv) { 616 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000) 617 return (EINVAL); 618 error = settime(td, tv); 619 } 620 if (tzp && error == 0) { 621 tz_minuteswest = tzp->tz_minuteswest; 622 tz_dsttime = tzp->tz_dsttime; 623 } 624 return (error); 625 } 626 627 /* 628 * Get value of an interval timer. The process virtual and profiling virtual 629 * time timers are kept in the p_stats area, since they can be swapped out. 630 * These are kept internally in the way they are specified externally: in 631 * time until they expire. 632 * 633 * The real time interval timer is kept in the process table slot for the 634 * process, and its value (it_value) is kept as an absolute time rather than 635 * as a delta, so that it is easy to keep periodic real-time signals from 636 * drifting. 637 * 638 * Virtual time timers are processed in the hardclock() routine of 639 * kern_clock.c. The real time timer is processed by a timeout routine, 640 * called from the softclock() routine. Since a callout may be delayed in 641 * real time due to interrupt processing in the system, it is possible for 642 * the real time timeout routine (realitexpire, given below), to be delayed 643 * in real time past when it is supposed to occur. It does not suffice, 644 * therefore, to reload the real timer .it_value from the real time timers 645 * .it_interval. Rather, we compute the next time in absolute time the timer 646 * should go off. 647 */ 648 #ifndef _SYS_SYSPROTO_H_ 649 struct getitimer_args { 650 u_int which; 651 struct itimerval *itv; 652 }; 653 #endif 654 int 655 sys_getitimer(struct thread *td, struct getitimer_args *uap) 656 { 657 struct itimerval aitv; 658 int error; 659 660 error = kern_getitimer(td, uap->which, &aitv); 661 if (error != 0) 662 return (error); 663 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 664 } 665 666 int 667 kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv) 668 { 669 struct proc *p = td->td_proc; 670 struct timeval ctv; 671 672 if (which > ITIMER_PROF) 673 return (EINVAL); 674 675 if (which == ITIMER_REAL) { 676 /* 677 * Convert from absolute to relative time in .it_value 678 * part of real time timer. If time for real time timer 679 * has passed return 0, else return difference between 680 * current time and time for the timer to go off. 681 */ 682 PROC_LOCK(p); 683 *aitv = p->p_realtimer; 684 PROC_UNLOCK(p); 685 if (timevalisset(&aitv->it_value)) { 686 getmicrouptime(&ctv); 687 if (timevalcmp(&aitv->it_value, &ctv, <)) 688 timevalclear(&aitv->it_value); 689 else 690 timevalsub(&aitv->it_value, &ctv); 691 } 692 } else { 693 PROC_SLOCK(p); 694 *aitv = p->p_stats->p_timer[which]; 695 PROC_SUNLOCK(p); 696 } 697 return (0); 698 } 699 700 #ifndef _SYS_SYSPROTO_H_ 701 struct setitimer_args { 702 u_int which; 703 struct itimerval *itv, *oitv; 704 }; 705 #endif 706 int 707 sys_setitimer(struct thread *td, struct setitimer_args *uap) 708 { 709 struct itimerval aitv, oitv; 710 int error; 711 712 if (uap->itv == NULL) { 713 uap->itv = uap->oitv; 714 return (sys_getitimer(td, (struct getitimer_args *)uap)); 715 } 716 717 if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval)))) 718 return (error); 719 error = kern_setitimer(td, uap->which, &aitv, &oitv); 720 if (error != 0 || uap->oitv == NULL) 721 return (error); 722 return (copyout(&oitv, uap->oitv, sizeof(struct itimerval))); 723 } 724 725 int 726 kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv, 727 struct itimerval *oitv) 728 { 729 struct proc *p = td->td_proc; 730 struct timeval ctv; 731 732 if (aitv == NULL) 733 return (kern_getitimer(td, which, oitv)); 734 735 if (which > ITIMER_PROF) 736 return (EINVAL); 737 if (itimerfix(&aitv->it_value)) 738 return (EINVAL); 739 if (!timevalisset(&aitv->it_value)) 740 timevalclear(&aitv->it_interval); 741 else if (itimerfix(&aitv->it_interval)) 742 return (EINVAL); 743 744 if (which == ITIMER_REAL) { 745 PROC_LOCK(p); 746 if (timevalisset(&p->p_realtimer.it_value)) 747 callout_stop(&p->p_itcallout); 748 getmicrouptime(&ctv); 749 if (timevalisset(&aitv->it_value)) { 750 callout_reset(&p->p_itcallout, tvtohz(&aitv->it_value), 751 realitexpire, p); 752 timevaladd(&aitv->it_value, &ctv); 753 } 754 *oitv = p->p_realtimer; 755 p->p_realtimer = *aitv; 756 PROC_UNLOCK(p); 757 if (timevalisset(&oitv->it_value)) { 758 if (timevalcmp(&oitv->it_value, &ctv, <)) 759 timevalclear(&oitv->it_value); 760 else 761 timevalsub(&oitv->it_value, &ctv); 762 } 763 } else { 764 PROC_SLOCK(p); 765 *oitv = p->p_stats->p_timer[which]; 766 p->p_stats->p_timer[which] = *aitv; 767 PROC_SUNLOCK(p); 768 } 769 return (0); 770 } 771 772 /* 773 * Real interval timer expired: 774 * send process whose timer expired an alarm signal. 775 * If time is not set up to reload, then just return. 776 * Else compute next time timer should go off which is > current time. 777 * This is where delay in processing this timeout causes multiple 778 * SIGALRM calls to be compressed into one. 779 * tvtohz() always adds 1 to allow for the time until the next clock 780 * interrupt being strictly less than 1 clock tick, but we don't want 781 * that here since we want to appear to be in sync with the clock 782 * interrupt even when we're delayed. 783 */ 784 void 785 realitexpire(void *arg) 786 { 787 struct proc *p; 788 struct timeval ctv, ntv; 789 790 p = (struct proc *)arg; 791 PROC_LOCK(p); 792 kern_psignal(p, SIGALRM); 793 if (!timevalisset(&p->p_realtimer.it_interval)) { 794 timevalclear(&p->p_realtimer.it_value); 795 if (p->p_flag & P_WEXIT) 796 wakeup(&p->p_itcallout); 797 PROC_UNLOCK(p); 798 return; 799 } 800 for (;;) { 801 timevaladd(&p->p_realtimer.it_value, 802 &p->p_realtimer.it_interval); 803 getmicrouptime(&ctv); 804 if (timevalcmp(&p->p_realtimer.it_value, &ctv, >)) { 805 ntv = p->p_realtimer.it_value; 806 timevalsub(&ntv, &ctv); 807 callout_reset(&p->p_itcallout, tvtohz(&ntv) - 1, 808 realitexpire, p); 809 PROC_UNLOCK(p); 810 return; 811 } 812 } 813 /*NOTREACHED*/ 814 } 815 816 /* 817 * Check that a proposed value to load into the .it_value or 818 * .it_interval part of an interval timer is acceptable, and 819 * fix it to have at least minimal value (i.e. if it is less 820 * than the resolution of the clock, round it up.) 821 */ 822 int 823 itimerfix(struct timeval *tv) 824 { 825 826 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 827 return (EINVAL); 828 if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) 829 tv->tv_usec = tick; 830 return (0); 831 } 832 833 /* 834 * Decrement an interval timer by a specified number 835 * of microseconds, which must be less than a second, 836 * i.e. < 1000000. If the timer expires, then reload 837 * it. In this case, carry over (usec - old value) to 838 * reduce the value reloaded into the timer so that 839 * the timer does not drift. This routine assumes 840 * that it is called in a context where the timers 841 * on which it is operating cannot change in value. 842 */ 843 int 844 itimerdecr(struct itimerval *itp, int usec) 845 { 846 847 if (itp->it_value.tv_usec < usec) { 848 if (itp->it_value.tv_sec == 0) { 849 /* expired, and already in next interval */ 850 usec -= itp->it_value.tv_usec; 851 goto expire; 852 } 853 itp->it_value.tv_usec += 1000000; 854 itp->it_value.tv_sec--; 855 } 856 itp->it_value.tv_usec -= usec; 857 usec = 0; 858 if (timevalisset(&itp->it_value)) 859 return (1); 860 /* expired, exactly at end of interval */ 861 expire: 862 if (timevalisset(&itp->it_interval)) { 863 itp->it_value = itp->it_interval; 864 itp->it_value.tv_usec -= usec; 865 if (itp->it_value.tv_usec < 0) { 866 itp->it_value.tv_usec += 1000000; 867 itp->it_value.tv_sec--; 868 } 869 } else 870 itp->it_value.tv_usec = 0; /* sec is already 0 */ 871 return (0); 872 } 873 874 /* 875 * Add and subtract routines for timevals. 876 * N.B.: subtract routine doesn't deal with 877 * results which are before the beginning, 878 * it just gets very confused in this case. 879 * Caveat emptor. 880 */ 881 void 882 timevaladd(struct timeval *t1, const struct timeval *t2) 883 { 884 885 t1->tv_sec += t2->tv_sec; 886 t1->tv_usec += t2->tv_usec; 887 timevalfix(t1); 888 } 889 890 void 891 timevalsub(struct timeval *t1, const struct timeval *t2) 892 { 893 894 t1->tv_sec -= t2->tv_sec; 895 t1->tv_usec -= t2->tv_usec; 896 timevalfix(t1); 897 } 898 899 static void 900 timevalfix(struct timeval *t1) 901 { 902 903 if (t1->tv_usec < 0) { 904 t1->tv_sec--; 905 t1->tv_usec += 1000000; 906 } 907 if (t1->tv_usec >= 1000000) { 908 t1->tv_sec++; 909 t1->tv_usec -= 1000000; 910 } 911 } 912 913 /* 914 * ratecheck(): simple time-based rate-limit checking. 915 */ 916 int 917 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 918 { 919 struct timeval tv, delta; 920 int rv = 0; 921 922 getmicrouptime(&tv); /* NB: 10ms precision */ 923 delta = tv; 924 timevalsub(&delta, lasttime); 925 926 /* 927 * check for 0,0 is so that the message will be seen at least once, 928 * even if interval is huge. 929 */ 930 if (timevalcmp(&delta, mininterval, >=) || 931 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 932 *lasttime = tv; 933 rv = 1; 934 } 935 936 return (rv); 937 } 938 939 /* 940 * ppsratecheck(): packets (or events) per second limitation. 941 * 942 * Return 0 if the limit is to be enforced (e.g. the caller 943 * should drop a packet because of the rate limitation). 944 * 945 * maxpps of 0 always causes zero to be returned. maxpps of -1 946 * always causes 1 to be returned; this effectively defeats rate 947 * limiting. 948 * 949 * Note that we maintain the struct timeval for compatibility 950 * with other bsd systems. We reuse the storage and just monitor 951 * clock ticks for minimal overhead. 952 */ 953 int 954 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 955 { 956 int now; 957 958 /* 959 * Reset the last time and counter if this is the first call 960 * or more than a second has passed since the last update of 961 * lasttime. 962 */ 963 now = ticks; 964 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 965 lasttime->tv_sec = now; 966 *curpps = 1; 967 return (maxpps != 0); 968 } else { 969 (*curpps)++; /* NB: ignore potential overflow */ 970 return (maxpps < 0 || *curpps < maxpps); 971 } 972 } 973 974 static void 975 itimer_start(void) 976 { 977 struct kclock rt_clock = { 978 .timer_create = realtimer_create, 979 .timer_delete = realtimer_delete, 980 .timer_settime = realtimer_settime, 981 .timer_gettime = realtimer_gettime, 982 .event_hook = NULL 983 }; 984 985 itimer_zone = uma_zcreate("itimer", sizeof(struct itimer), 986 NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0); 987 register_posix_clock(CLOCK_REALTIME, &rt_clock); 988 register_posix_clock(CLOCK_MONOTONIC, &rt_clock); 989 p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L); 990 p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX); 991 p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX); 992 EVENTHANDLER_REGISTER(process_exit, itimers_event_hook_exit, 993 (void *)ITIMER_EV_EXIT, EVENTHANDLER_PRI_ANY); 994 EVENTHANDLER_REGISTER(process_exec, itimers_event_hook_exec, 995 (void *)ITIMER_EV_EXEC, EVENTHANDLER_PRI_ANY); 996 } 997 998 int 999 register_posix_clock(int clockid, struct kclock *clk) 1000 { 1001 if ((unsigned)clockid >= MAX_CLOCKS) { 1002 printf("%s: invalid clockid\n", __func__); 1003 return (0); 1004 } 1005 posix_clocks[clockid] = *clk; 1006 return (1); 1007 } 1008 1009 static int 1010 itimer_init(void *mem, int size, int flags) 1011 { 1012 struct itimer *it; 1013 1014 it = (struct itimer *)mem; 1015 mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF); 1016 return (0); 1017 } 1018 1019 static void 1020 itimer_fini(void *mem, int size) 1021 { 1022 struct itimer *it; 1023 1024 it = (struct itimer *)mem; 1025 mtx_destroy(&it->it_mtx); 1026 } 1027 1028 static void 1029 itimer_enter(struct itimer *it) 1030 { 1031 1032 mtx_assert(&it->it_mtx, MA_OWNED); 1033 it->it_usecount++; 1034 } 1035 1036 static void 1037 itimer_leave(struct itimer *it) 1038 { 1039 1040 mtx_assert(&it->it_mtx, MA_OWNED); 1041 KASSERT(it->it_usecount > 0, ("invalid it_usecount")); 1042 1043 if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0) 1044 wakeup(it); 1045 } 1046 1047 #ifndef _SYS_SYSPROTO_H_ 1048 struct ktimer_create_args { 1049 clockid_t clock_id; 1050 struct sigevent * evp; 1051 int * timerid; 1052 }; 1053 #endif 1054 int 1055 sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap) 1056 { 1057 struct sigevent *evp1, ev; 1058 int id; 1059 int error; 1060 1061 if (uap->evp != NULL) { 1062 error = copyin(uap->evp, &ev, sizeof(ev)); 1063 if (error != 0) 1064 return (error); 1065 evp1 = &ev; 1066 } else 1067 evp1 = NULL; 1068 1069 error = kern_timer_create(td, uap->clock_id, evp1, &id, -1); 1070 1071 if (error == 0) { 1072 error = copyout(&id, uap->timerid, sizeof(int)); 1073 if (error != 0) 1074 kern_timer_delete(td, id); 1075 } 1076 return (error); 1077 } 1078 1079 static int 1080 kern_timer_create(struct thread *td, clockid_t clock_id, 1081 struct sigevent *evp, int *timerid, int preset_id) 1082 { 1083 struct proc *p = td->td_proc; 1084 struct itimer *it; 1085 int id; 1086 int error; 1087 1088 if (clock_id < 0 || clock_id >= MAX_CLOCKS) 1089 return (EINVAL); 1090 1091 if (posix_clocks[clock_id].timer_create == NULL) 1092 return (EINVAL); 1093 1094 if (evp != NULL) { 1095 if (evp->sigev_notify != SIGEV_NONE && 1096 evp->sigev_notify != SIGEV_SIGNAL && 1097 evp->sigev_notify != SIGEV_THREAD_ID) 1098 return (EINVAL); 1099 if ((evp->sigev_notify == SIGEV_SIGNAL || 1100 evp->sigev_notify == SIGEV_THREAD_ID) && 1101 !_SIG_VALID(evp->sigev_signo)) 1102 return (EINVAL); 1103 } 1104 1105 if (p->p_itimers == NULL) 1106 itimers_alloc(p); 1107 1108 it = uma_zalloc(itimer_zone, M_WAITOK); 1109 it->it_flags = 0; 1110 it->it_usecount = 0; 1111 it->it_active = 0; 1112 timespecclear(&it->it_time.it_value); 1113 timespecclear(&it->it_time.it_interval); 1114 it->it_overrun = 0; 1115 it->it_overrun_last = 0; 1116 it->it_clockid = clock_id; 1117 it->it_timerid = -1; 1118 it->it_proc = p; 1119 ksiginfo_init(&it->it_ksi); 1120 it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT; 1121 error = CLOCK_CALL(clock_id, timer_create, (it)); 1122 if (error != 0) 1123 goto out; 1124 1125 PROC_LOCK(p); 1126 if (preset_id != -1) { 1127 KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id")); 1128 id = preset_id; 1129 if (p->p_itimers->its_timers[id] != NULL) { 1130 PROC_UNLOCK(p); 1131 error = 0; 1132 goto out; 1133 } 1134 } else { 1135 /* 1136 * Find a free timer slot, skipping those reserved 1137 * for setitimer(). 1138 */ 1139 for (id = 3; id < TIMER_MAX; id++) 1140 if (p->p_itimers->its_timers[id] == NULL) 1141 break; 1142 if (id == TIMER_MAX) { 1143 PROC_UNLOCK(p); 1144 error = EAGAIN; 1145 goto out; 1146 } 1147 } 1148 it->it_timerid = id; 1149 p->p_itimers->its_timers[id] = it; 1150 if (evp != NULL) 1151 it->it_sigev = *evp; 1152 else { 1153 it->it_sigev.sigev_notify = SIGEV_SIGNAL; 1154 switch (clock_id) { 1155 default: 1156 case CLOCK_REALTIME: 1157 it->it_sigev.sigev_signo = SIGALRM; 1158 break; 1159 case CLOCK_VIRTUAL: 1160 it->it_sigev.sigev_signo = SIGVTALRM; 1161 break; 1162 case CLOCK_PROF: 1163 it->it_sigev.sigev_signo = SIGPROF; 1164 break; 1165 } 1166 it->it_sigev.sigev_value.sival_int = id; 1167 } 1168 1169 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1170 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1171 it->it_ksi.ksi_signo = it->it_sigev.sigev_signo; 1172 it->it_ksi.ksi_code = SI_TIMER; 1173 it->it_ksi.ksi_value = it->it_sigev.sigev_value; 1174 it->it_ksi.ksi_timerid = id; 1175 } 1176 PROC_UNLOCK(p); 1177 *timerid = id; 1178 return (0); 1179 1180 out: 1181 ITIMER_LOCK(it); 1182 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1183 ITIMER_UNLOCK(it); 1184 uma_zfree(itimer_zone, it); 1185 return (error); 1186 } 1187 1188 #ifndef _SYS_SYSPROTO_H_ 1189 struct ktimer_delete_args { 1190 int timerid; 1191 }; 1192 #endif 1193 int 1194 sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap) 1195 { 1196 return (kern_timer_delete(td, uap->timerid)); 1197 } 1198 1199 static struct itimer * 1200 itimer_find(struct proc *p, int timerid) 1201 { 1202 struct itimer *it; 1203 1204 PROC_LOCK_ASSERT(p, MA_OWNED); 1205 if ((p->p_itimers == NULL) || 1206 (timerid < 0) || (timerid >= TIMER_MAX) || 1207 (it = p->p_itimers->its_timers[timerid]) == NULL) { 1208 return (NULL); 1209 } 1210 ITIMER_LOCK(it); 1211 if ((it->it_flags & ITF_DELETING) != 0) { 1212 ITIMER_UNLOCK(it); 1213 it = NULL; 1214 } 1215 return (it); 1216 } 1217 1218 static int 1219 kern_timer_delete(struct thread *td, int timerid) 1220 { 1221 struct proc *p = td->td_proc; 1222 struct itimer *it; 1223 1224 PROC_LOCK(p); 1225 it = itimer_find(p, timerid); 1226 if (it == NULL) { 1227 PROC_UNLOCK(p); 1228 return (EINVAL); 1229 } 1230 PROC_UNLOCK(p); 1231 1232 it->it_flags |= ITF_DELETING; 1233 while (it->it_usecount > 0) { 1234 it->it_flags |= ITF_WANTED; 1235 msleep(it, &it->it_mtx, PPAUSE, "itimer", 0); 1236 } 1237 it->it_flags &= ~ITF_WANTED; 1238 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1239 ITIMER_UNLOCK(it); 1240 1241 PROC_LOCK(p); 1242 if (KSI_ONQ(&it->it_ksi)) 1243 sigqueue_take(&it->it_ksi); 1244 p->p_itimers->its_timers[timerid] = NULL; 1245 PROC_UNLOCK(p); 1246 uma_zfree(itimer_zone, it); 1247 return (0); 1248 } 1249 1250 #ifndef _SYS_SYSPROTO_H_ 1251 struct ktimer_settime_args { 1252 int timerid; 1253 int flags; 1254 const struct itimerspec * value; 1255 struct itimerspec * ovalue; 1256 }; 1257 #endif 1258 int 1259 sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap) 1260 { 1261 struct proc *p = td->td_proc; 1262 struct itimer *it; 1263 struct itimerspec val, oval, *ovalp; 1264 int error; 1265 1266 error = copyin(uap->value, &val, sizeof(val)); 1267 if (error != 0) 1268 return (error); 1269 1270 if (uap->ovalue != NULL) 1271 ovalp = &oval; 1272 else 1273 ovalp = NULL; 1274 1275 PROC_LOCK(p); 1276 if (uap->timerid < 3 || 1277 (it = itimer_find(p, uap->timerid)) == NULL) { 1278 PROC_UNLOCK(p); 1279 error = EINVAL; 1280 } else { 1281 PROC_UNLOCK(p); 1282 itimer_enter(it); 1283 error = CLOCK_CALL(it->it_clockid, timer_settime, 1284 (it, uap->flags, &val, ovalp)); 1285 itimer_leave(it); 1286 ITIMER_UNLOCK(it); 1287 } 1288 if (error == 0 && uap->ovalue != NULL) 1289 error = copyout(ovalp, uap->ovalue, sizeof(*ovalp)); 1290 return (error); 1291 } 1292 1293 #ifndef _SYS_SYSPROTO_H_ 1294 struct ktimer_gettime_args { 1295 int timerid; 1296 struct itimerspec * value; 1297 }; 1298 #endif 1299 int 1300 sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap) 1301 { 1302 struct proc *p = td->td_proc; 1303 struct itimer *it; 1304 struct itimerspec val; 1305 int error; 1306 1307 PROC_LOCK(p); 1308 if (uap->timerid < 3 || 1309 (it = itimer_find(p, uap->timerid)) == NULL) { 1310 PROC_UNLOCK(p); 1311 error = EINVAL; 1312 } else { 1313 PROC_UNLOCK(p); 1314 itimer_enter(it); 1315 error = CLOCK_CALL(it->it_clockid, timer_gettime, 1316 (it, &val)); 1317 itimer_leave(it); 1318 ITIMER_UNLOCK(it); 1319 } 1320 if (error == 0) 1321 error = copyout(&val, uap->value, sizeof(val)); 1322 return (error); 1323 } 1324 1325 #ifndef _SYS_SYSPROTO_H_ 1326 struct timer_getoverrun_args { 1327 int timerid; 1328 }; 1329 #endif 1330 int 1331 sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap) 1332 { 1333 struct proc *p = td->td_proc; 1334 struct itimer *it; 1335 int error ; 1336 1337 PROC_LOCK(p); 1338 if (uap->timerid < 3 || 1339 (it = itimer_find(p, uap->timerid)) == NULL) { 1340 PROC_UNLOCK(p); 1341 error = EINVAL; 1342 } else { 1343 td->td_retval[0] = it->it_overrun_last; 1344 ITIMER_UNLOCK(it); 1345 PROC_UNLOCK(p); 1346 error = 0; 1347 } 1348 return (error); 1349 } 1350 1351 static int 1352 realtimer_create(struct itimer *it) 1353 { 1354 callout_init_mtx(&it->it_callout, &it->it_mtx, 0); 1355 return (0); 1356 } 1357 1358 static int 1359 realtimer_delete(struct itimer *it) 1360 { 1361 mtx_assert(&it->it_mtx, MA_OWNED); 1362 1363 /* 1364 * clear timer's value and interval to tell realtimer_expire 1365 * to not rearm the timer. 1366 */ 1367 timespecclear(&it->it_time.it_value); 1368 timespecclear(&it->it_time.it_interval); 1369 ITIMER_UNLOCK(it); 1370 callout_drain(&it->it_callout); 1371 ITIMER_LOCK(it); 1372 return (0); 1373 } 1374 1375 static int 1376 realtimer_gettime(struct itimer *it, struct itimerspec *ovalue) 1377 { 1378 struct timespec cts; 1379 1380 mtx_assert(&it->it_mtx, MA_OWNED); 1381 1382 realtimer_clocktime(it->it_clockid, &cts); 1383 *ovalue = it->it_time; 1384 if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) { 1385 timespecsub(&ovalue->it_value, &cts); 1386 if (ovalue->it_value.tv_sec < 0 || 1387 (ovalue->it_value.tv_sec == 0 && 1388 ovalue->it_value.tv_nsec == 0)) { 1389 ovalue->it_value.tv_sec = 0; 1390 ovalue->it_value.tv_nsec = 1; 1391 } 1392 } 1393 return (0); 1394 } 1395 1396 static int 1397 realtimer_settime(struct itimer *it, int flags, 1398 struct itimerspec *value, struct itimerspec *ovalue) 1399 { 1400 struct timespec cts, ts; 1401 struct timeval tv; 1402 struct itimerspec val; 1403 1404 mtx_assert(&it->it_mtx, MA_OWNED); 1405 1406 val = *value; 1407 if (itimespecfix(&val.it_value)) 1408 return (EINVAL); 1409 1410 if (timespecisset(&val.it_value)) { 1411 if (itimespecfix(&val.it_interval)) 1412 return (EINVAL); 1413 } else { 1414 timespecclear(&val.it_interval); 1415 } 1416 1417 if (ovalue != NULL) 1418 realtimer_gettime(it, ovalue); 1419 1420 it->it_time = val; 1421 if (timespecisset(&val.it_value)) { 1422 realtimer_clocktime(it->it_clockid, &cts); 1423 ts = val.it_value; 1424 if ((flags & TIMER_ABSTIME) == 0) { 1425 /* Convert to absolute time. */ 1426 timespecadd(&it->it_time.it_value, &cts); 1427 } else { 1428 timespecsub(&ts, &cts); 1429 /* 1430 * We don't care if ts is negative, tztohz will 1431 * fix it. 1432 */ 1433 } 1434 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1435 callout_reset(&it->it_callout, tvtohz(&tv), 1436 realtimer_expire, it); 1437 } else { 1438 callout_stop(&it->it_callout); 1439 } 1440 1441 return (0); 1442 } 1443 1444 static void 1445 realtimer_clocktime(clockid_t id, struct timespec *ts) 1446 { 1447 if (id == CLOCK_REALTIME) 1448 getnanotime(ts); 1449 else /* CLOCK_MONOTONIC */ 1450 getnanouptime(ts); 1451 } 1452 1453 int 1454 itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi) 1455 { 1456 struct itimer *it; 1457 1458 PROC_LOCK_ASSERT(p, MA_OWNED); 1459 it = itimer_find(p, timerid); 1460 if (it != NULL) { 1461 ksi->ksi_overrun = it->it_overrun; 1462 it->it_overrun_last = it->it_overrun; 1463 it->it_overrun = 0; 1464 ITIMER_UNLOCK(it); 1465 return (0); 1466 } 1467 return (EINVAL); 1468 } 1469 1470 int 1471 itimespecfix(struct timespec *ts) 1472 { 1473 1474 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 1475 return (EINVAL); 1476 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000) 1477 ts->tv_nsec = tick * 1000; 1478 return (0); 1479 } 1480 1481 /* Timeout callback for realtime timer */ 1482 static void 1483 realtimer_expire(void *arg) 1484 { 1485 struct timespec cts, ts; 1486 struct timeval tv; 1487 struct itimer *it; 1488 1489 it = (struct itimer *)arg; 1490 1491 realtimer_clocktime(it->it_clockid, &cts); 1492 /* Only fire if time is reached. */ 1493 if (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1494 if (timespecisset(&it->it_time.it_interval)) { 1495 timespecadd(&it->it_time.it_value, 1496 &it->it_time.it_interval); 1497 while (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1498 if (it->it_overrun < INT_MAX) 1499 it->it_overrun++; 1500 else 1501 it->it_ksi.ksi_errno = ERANGE; 1502 timespecadd(&it->it_time.it_value, 1503 &it->it_time.it_interval); 1504 } 1505 } else { 1506 /* single shot timer ? */ 1507 timespecclear(&it->it_time.it_value); 1508 } 1509 if (timespecisset(&it->it_time.it_value)) { 1510 ts = it->it_time.it_value; 1511 timespecsub(&ts, &cts); 1512 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1513 callout_reset(&it->it_callout, tvtohz(&tv), 1514 realtimer_expire, it); 1515 } 1516 itimer_enter(it); 1517 ITIMER_UNLOCK(it); 1518 itimer_fire(it); 1519 ITIMER_LOCK(it); 1520 itimer_leave(it); 1521 } else if (timespecisset(&it->it_time.it_value)) { 1522 ts = it->it_time.it_value; 1523 timespecsub(&ts, &cts); 1524 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1525 callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, 1526 it); 1527 } 1528 } 1529 1530 void 1531 itimer_fire(struct itimer *it) 1532 { 1533 struct proc *p = it->it_proc; 1534 struct thread *td; 1535 1536 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1537 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1538 if (sigev_findtd(p, &it->it_sigev, &td) != 0) { 1539 ITIMER_LOCK(it); 1540 timespecclear(&it->it_time.it_value); 1541 timespecclear(&it->it_time.it_interval); 1542 callout_stop(&it->it_callout); 1543 ITIMER_UNLOCK(it); 1544 return; 1545 } 1546 if (!KSI_ONQ(&it->it_ksi)) { 1547 it->it_ksi.ksi_errno = 0; 1548 ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev); 1549 tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi); 1550 } else { 1551 if (it->it_overrun < INT_MAX) 1552 it->it_overrun++; 1553 else 1554 it->it_ksi.ksi_errno = ERANGE; 1555 } 1556 PROC_UNLOCK(p); 1557 } 1558 } 1559 1560 static void 1561 itimers_alloc(struct proc *p) 1562 { 1563 struct itimers *its; 1564 int i; 1565 1566 its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO); 1567 LIST_INIT(&its->its_virtual); 1568 LIST_INIT(&its->its_prof); 1569 TAILQ_INIT(&its->its_worklist); 1570 for (i = 0; i < TIMER_MAX; i++) 1571 its->its_timers[i] = NULL; 1572 PROC_LOCK(p); 1573 if (p->p_itimers == NULL) { 1574 p->p_itimers = its; 1575 PROC_UNLOCK(p); 1576 } 1577 else { 1578 PROC_UNLOCK(p); 1579 free(its, M_SUBPROC); 1580 } 1581 } 1582 1583 static void 1584 itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp __unused) 1585 { 1586 itimers_event_hook_exit(arg, p); 1587 } 1588 1589 /* Clean up timers when some process events are being triggered. */ 1590 static void 1591 itimers_event_hook_exit(void *arg, struct proc *p) 1592 { 1593 struct itimers *its; 1594 struct itimer *it; 1595 int event = (int)(intptr_t)arg; 1596 int i; 1597 1598 if (p->p_itimers != NULL) { 1599 its = p->p_itimers; 1600 for (i = 0; i < MAX_CLOCKS; ++i) { 1601 if (posix_clocks[i].event_hook != NULL) 1602 CLOCK_CALL(i, event_hook, (p, i, event)); 1603 } 1604 /* 1605 * According to susv3, XSI interval timers should be inherited 1606 * by new image. 1607 */ 1608 if (event == ITIMER_EV_EXEC) 1609 i = 3; 1610 else if (event == ITIMER_EV_EXIT) 1611 i = 0; 1612 else 1613 panic("unhandled event"); 1614 for (; i < TIMER_MAX; ++i) { 1615 if ((it = its->its_timers[i]) != NULL) 1616 kern_timer_delete(curthread, i); 1617 } 1618 if (its->its_timers[0] == NULL && 1619 its->its_timers[1] == NULL && 1620 its->its_timers[2] == NULL) { 1621 free(its, M_SUBPROC); 1622 p->p_itimers = NULL; 1623 } 1624 } 1625 } 1626