1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/limits.h> 38 #include <sys/clock.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/sysproto.h> 42 #include <sys/eventhandler.h> 43 #include <sys/resourcevar.h> 44 #include <sys/signalvar.h> 45 #include <sys/kernel.h> 46 #include <sys/sleepqueue.h> 47 #include <sys/syscallsubr.h> 48 #include <sys/sysctl.h> 49 #include <sys/sysent.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/posix4.h> 53 #include <sys/time.h> 54 #include <sys/timers.h> 55 #include <sys/timetc.h> 56 #include <sys/vnode.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_extern.h> 60 61 #define MAX_CLOCKS (CLOCK_MONOTONIC+1) 62 #define CPUCLOCK_BIT 0x80000000 63 #define CPUCLOCK_PROCESS_BIT 0x40000000 64 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT)) 65 #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid)) 66 #define MAKE_PROCESS_CPUCLOCK(pid) \ 67 (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid)) 68 69 static struct kclock posix_clocks[MAX_CLOCKS]; 70 static uma_zone_t itimer_zone = NULL; 71 72 /* 73 * Time of day and interval timer support. 74 * 75 * These routines provide the kernel entry points to get and set 76 * the time-of-day and per-process interval timers. Subroutines 77 * here provide support for adding and subtracting timeval structures 78 * and decrementing interval timers, optionally reloading the interval 79 * timers when they expire. 80 */ 81 82 static int settime(struct thread *, struct timeval *); 83 static void timevalfix(struct timeval *); 84 85 static void itimer_start(void); 86 static int itimer_init(void *, int, int); 87 static void itimer_fini(void *, int); 88 static void itimer_enter(struct itimer *); 89 static void itimer_leave(struct itimer *); 90 static struct itimer *itimer_find(struct proc *, int); 91 static void itimers_alloc(struct proc *); 92 static void itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp); 93 static void itimers_event_hook_exit(void *arg, struct proc *p); 94 static int realtimer_create(struct itimer *); 95 static int realtimer_gettime(struct itimer *, struct itimerspec *); 96 static int realtimer_settime(struct itimer *, int, 97 struct itimerspec *, struct itimerspec *); 98 static int realtimer_delete(struct itimer *); 99 static void realtimer_clocktime(clockid_t, struct timespec *); 100 static void realtimer_expire(void *); 101 102 int register_posix_clock(int, struct kclock *); 103 void itimer_fire(struct itimer *it); 104 int itimespecfix(struct timespec *ts); 105 106 #define CLOCK_CALL(clock, call, arglist) \ 107 ((*posix_clocks[clock].call) arglist) 108 109 SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL); 110 111 112 static int 113 settime(struct thread *td, struct timeval *tv) 114 { 115 struct timeval delta, tv1, tv2; 116 static struct timeval maxtime, laststep; 117 struct timespec ts; 118 int s; 119 120 s = splclock(); 121 microtime(&tv1); 122 delta = *tv; 123 timevalsub(&delta, &tv1); 124 125 /* 126 * If the system is secure, we do not allow the time to be 127 * set to a value earlier than 1 second less than the highest 128 * time we have yet seen. The worst a miscreant can do in 129 * this circumstance is "freeze" time. He couldn't go 130 * back to the past. 131 * 132 * We similarly do not allow the clock to be stepped more 133 * than one second, nor more than once per second. This allows 134 * a miscreant to make the clock march double-time, but no worse. 135 */ 136 if (securelevel_gt(td->td_ucred, 1) != 0) { 137 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 138 /* 139 * Update maxtime to latest time we've seen. 140 */ 141 if (tv1.tv_sec > maxtime.tv_sec) 142 maxtime = tv1; 143 tv2 = *tv; 144 timevalsub(&tv2, &maxtime); 145 if (tv2.tv_sec < -1) { 146 tv->tv_sec = maxtime.tv_sec - 1; 147 printf("Time adjustment clamped to -1 second\n"); 148 } 149 } else { 150 if (tv1.tv_sec == laststep.tv_sec) { 151 splx(s); 152 return (EPERM); 153 } 154 if (delta.tv_sec > 1) { 155 tv->tv_sec = tv1.tv_sec + 1; 156 printf("Time adjustment clamped to +1 second\n"); 157 } 158 laststep = *tv; 159 } 160 } 161 162 ts.tv_sec = tv->tv_sec; 163 ts.tv_nsec = tv->tv_usec * 1000; 164 mtx_lock(&Giant); 165 tc_setclock(&ts); 166 resettodr(); 167 mtx_unlock(&Giant); 168 return (0); 169 } 170 171 #ifndef _SYS_SYSPROTO_H_ 172 struct clock_getcpuclockid2_args { 173 id_t id; 174 int which, 175 clockid_t *clock_id; 176 }; 177 #endif 178 /* ARGSUSED */ 179 int 180 sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap) 181 { 182 clockid_t clk_id; 183 int error; 184 185 error = kern_clock_getcpuclockid2(td, uap->id, uap->which, &clk_id); 186 if (error == 0) 187 error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t)); 188 return (error); 189 } 190 191 int 192 kern_clock_getcpuclockid2(struct thread *td, id_t id, int which, 193 clockid_t *clk_id) 194 { 195 struct proc *p; 196 pid_t pid; 197 lwpid_t tid; 198 int error; 199 200 switch (which) { 201 case CPUCLOCK_WHICH_PID: 202 if (id != 0) { 203 error = pget(id, PGET_CANSEE | PGET_NOTID, &p); 204 if (error != 0) 205 return (error); 206 PROC_UNLOCK(p); 207 pid = id; 208 } else { 209 pid = td->td_proc->p_pid; 210 } 211 *clk_id = MAKE_PROCESS_CPUCLOCK(pid); 212 return (0); 213 case CPUCLOCK_WHICH_TID: 214 tid = id == 0 ? td->td_tid : id; 215 *clk_id = MAKE_THREAD_CPUCLOCK(tid); 216 return (0); 217 default: 218 return (EINVAL); 219 } 220 } 221 222 #ifndef _SYS_SYSPROTO_H_ 223 struct clock_gettime_args { 224 clockid_t clock_id; 225 struct timespec *tp; 226 }; 227 #endif 228 /* ARGSUSED */ 229 int 230 sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap) 231 { 232 struct timespec ats; 233 int error; 234 235 error = kern_clock_gettime(td, uap->clock_id, &ats); 236 if (error == 0) 237 error = copyout(&ats, uap->tp, sizeof(ats)); 238 239 return (error); 240 } 241 242 static inline void 243 cputick2timespec(uint64_t runtime, struct timespec *ats) 244 { 245 runtime = cputick2usec(runtime); 246 ats->tv_sec = runtime / 1000000; 247 ats->tv_nsec = runtime % 1000000 * 1000; 248 } 249 250 static void 251 get_thread_cputime(struct thread *targettd, struct timespec *ats) 252 { 253 uint64_t runtime, curtime, switchtime; 254 255 if (targettd == NULL) { /* current thread */ 256 critical_enter(); 257 switchtime = PCPU_GET(switchtime); 258 curtime = cpu_ticks(); 259 runtime = curthread->td_runtime; 260 critical_exit(); 261 runtime += curtime - switchtime; 262 } else { 263 thread_lock(targettd); 264 runtime = targettd->td_runtime; 265 thread_unlock(targettd); 266 } 267 cputick2timespec(runtime, ats); 268 } 269 270 static void 271 get_process_cputime(struct proc *targetp, struct timespec *ats) 272 { 273 uint64_t runtime; 274 struct rusage ru; 275 276 PROC_STATLOCK(targetp); 277 rufetch(targetp, &ru); 278 runtime = targetp->p_rux.rux_runtime; 279 PROC_STATUNLOCK(targetp); 280 cputick2timespec(runtime, ats); 281 } 282 283 static int 284 get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats) 285 { 286 struct proc *p, *p2; 287 struct thread *td2; 288 lwpid_t tid; 289 pid_t pid; 290 int error; 291 292 p = td->td_proc; 293 if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) { 294 tid = clock_id & CPUCLOCK_ID_MASK; 295 td2 = tdfind(tid, p->p_pid); 296 if (td2 == NULL) 297 return (EINVAL); 298 get_thread_cputime(td2, ats); 299 PROC_UNLOCK(td2->td_proc); 300 } else { 301 pid = clock_id & CPUCLOCK_ID_MASK; 302 error = pget(pid, PGET_CANSEE, &p2); 303 if (error != 0) 304 return (EINVAL); 305 get_process_cputime(p2, ats); 306 PROC_UNLOCK(p2); 307 } 308 return (0); 309 } 310 311 int 312 kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats) 313 { 314 struct timeval sys, user; 315 struct proc *p; 316 317 p = td->td_proc; 318 switch (clock_id) { 319 case CLOCK_REALTIME: /* Default to precise. */ 320 case CLOCK_REALTIME_PRECISE: 321 nanotime(ats); 322 break; 323 case CLOCK_REALTIME_FAST: 324 getnanotime(ats); 325 break; 326 case CLOCK_VIRTUAL: 327 PROC_LOCK(p); 328 PROC_STATLOCK(p); 329 calcru(p, &user, &sys); 330 PROC_STATUNLOCK(p); 331 PROC_UNLOCK(p); 332 TIMEVAL_TO_TIMESPEC(&user, ats); 333 break; 334 case CLOCK_PROF: 335 PROC_LOCK(p); 336 PROC_STATLOCK(p); 337 calcru(p, &user, &sys); 338 PROC_STATUNLOCK(p); 339 PROC_UNLOCK(p); 340 timevaladd(&user, &sys); 341 TIMEVAL_TO_TIMESPEC(&user, ats); 342 break; 343 case CLOCK_MONOTONIC: /* Default to precise. */ 344 case CLOCK_MONOTONIC_PRECISE: 345 case CLOCK_UPTIME: 346 case CLOCK_UPTIME_PRECISE: 347 nanouptime(ats); 348 break; 349 case CLOCK_UPTIME_FAST: 350 case CLOCK_MONOTONIC_FAST: 351 getnanouptime(ats); 352 break; 353 case CLOCK_SECOND: 354 ats->tv_sec = time_second; 355 ats->tv_nsec = 0; 356 break; 357 case CLOCK_THREAD_CPUTIME_ID: 358 get_thread_cputime(NULL, ats); 359 break; 360 case CLOCK_PROCESS_CPUTIME_ID: 361 PROC_LOCK(p); 362 get_process_cputime(p, ats); 363 PROC_UNLOCK(p); 364 break; 365 default: 366 if ((int)clock_id >= 0) 367 return (EINVAL); 368 return (get_cputime(td, clock_id, ats)); 369 } 370 return (0); 371 } 372 373 #ifndef _SYS_SYSPROTO_H_ 374 struct clock_settime_args { 375 clockid_t clock_id; 376 const struct timespec *tp; 377 }; 378 #endif 379 /* ARGSUSED */ 380 int 381 sys_clock_settime(struct thread *td, struct clock_settime_args *uap) 382 { 383 struct timespec ats; 384 int error; 385 386 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 387 return (error); 388 return (kern_clock_settime(td, uap->clock_id, &ats)); 389 } 390 391 int 392 kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats) 393 { 394 struct timeval atv; 395 int error; 396 397 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 398 return (error); 399 if (clock_id != CLOCK_REALTIME) 400 return (EINVAL); 401 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000 || 402 ats->tv_sec < 0) 403 return (EINVAL); 404 /* XXX Don't convert nsec->usec and back */ 405 TIMESPEC_TO_TIMEVAL(&atv, ats); 406 error = settime(td, &atv); 407 return (error); 408 } 409 410 #ifndef _SYS_SYSPROTO_H_ 411 struct clock_getres_args { 412 clockid_t clock_id; 413 struct timespec *tp; 414 }; 415 #endif 416 int 417 sys_clock_getres(struct thread *td, struct clock_getres_args *uap) 418 { 419 struct timespec ts; 420 int error; 421 422 if (uap->tp == NULL) 423 return (0); 424 425 error = kern_clock_getres(td, uap->clock_id, &ts); 426 if (error == 0) 427 error = copyout(&ts, uap->tp, sizeof(ts)); 428 return (error); 429 } 430 431 int 432 kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts) 433 { 434 435 ts->tv_sec = 0; 436 switch (clock_id) { 437 case CLOCK_REALTIME: 438 case CLOCK_REALTIME_FAST: 439 case CLOCK_REALTIME_PRECISE: 440 case CLOCK_MONOTONIC: 441 case CLOCK_MONOTONIC_FAST: 442 case CLOCK_MONOTONIC_PRECISE: 443 case CLOCK_UPTIME: 444 case CLOCK_UPTIME_FAST: 445 case CLOCK_UPTIME_PRECISE: 446 /* 447 * Round up the result of the division cheaply by adding 1. 448 * Rounding up is especially important if rounding down 449 * would give 0. Perfect rounding is unimportant. 450 */ 451 ts->tv_nsec = 1000000000 / tc_getfrequency() + 1; 452 break; 453 case CLOCK_VIRTUAL: 454 case CLOCK_PROF: 455 /* Accurately round up here because we can do so cheaply. */ 456 ts->tv_nsec = (1000000000 + hz - 1) / hz; 457 break; 458 case CLOCK_SECOND: 459 ts->tv_sec = 1; 460 ts->tv_nsec = 0; 461 break; 462 case CLOCK_THREAD_CPUTIME_ID: 463 case CLOCK_PROCESS_CPUTIME_ID: 464 cputime: 465 /* sync with cputick2usec */ 466 ts->tv_nsec = 1000000 / cpu_tickrate(); 467 if (ts->tv_nsec == 0) 468 ts->tv_nsec = 1000; 469 break; 470 default: 471 if ((int)clock_id < 0) 472 goto cputime; 473 return (EINVAL); 474 } 475 return (0); 476 } 477 478 static uint8_t nanowait[MAXCPU]; 479 480 int 481 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) 482 { 483 struct timespec ts; 484 sbintime_t sbt, sbtt, prec, tmp; 485 time_t over; 486 int error; 487 488 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 489 return (EINVAL); 490 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 491 return (0); 492 ts = *rqt; 493 if (ts.tv_sec > INT32_MAX / 2) { 494 over = ts.tv_sec - INT32_MAX / 2; 495 ts.tv_sec -= over; 496 } else 497 over = 0; 498 tmp = tstosbt(ts); 499 prec = tmp; 500 prec >>= tc_precexp; 501 if (TIMESEL(&sbt, tmp)) 502 sbt += tc_tick_sbt; 503 sbt += tmp; 504 error = tsleep_sbt(&nanowait[curcpu], PWAIT | PCATCH, "nanslp", 505 sbt, prec, C_ABSOLUTE); 506 if (error != EWOULDBLOCK) { 507 if (error == ERESTART) 508 error = EINTR; 509 TIMESEL(&sbtt, tmp); 510 if (rmt != NULL) { 511 ts = sbttots(sbt - sbtt); 512 ts.tv_sec += over; 513 if (ts.tv_sec < 0) 514 timespecclear(&ts); 515 *rmt = ts; 516 } 517 if (sbtt >= sbt) 518 return (0); 519 return (error); 520 } 521 return (0); 522 } 523 524 #ifndef _SYS_SYSPROTO_H_ 525 struct nanosleep_args { 526 struct timespec *rqtp; 527 struct timespec *rmtp; 528 }; 529 #endif 530 /* ARGSUSED */ 531 int 532 sys_nanosleep(struct thread *td, struct nanosleep_args *uap) 533 { 534 struct timespec rmt, rqt; 535 int error; 536 537 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 538 if (error) 539 return (error); 540 541 if (uap->rmtp && 542 !useracc((caddr_t)uap->rmtp, sizeof(rmt), VM_PROT_WRITE)) 543 return (EFAULT); 544 error = kern_nanosleep(td, &rqt, &rmt); 545 if (error && uap->rmtp) { 546 int error2; 547 548 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 549 if (error2) 550 error = error2; 551 } 552 return (error); 553 } 554 555 #ifndef _SYS_SYSPROTO_H_ 556 struct gettimeofday_args { 557 struct timeval *tp; 558 struct timezone *tzp; 559 }; 560 #endif 561 /* ARGSUSED */ 562 int 563 sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap) 564 { 565 struct timeval atv; 566 struct timezone rtz; 567 int error = 0; 568 569 if (uap->tp) { 570 microtime(&atv); 571 error = copyout(&atv, uap->tp, sizeof (atv)); 572 } 573 if (error == 0 && uap->tzp != NULL) { 574 rtz.tz_minuteswest = tz_minuteswest; 575 rtz.tz_dsttime = tz_dsttime; 576 error = copyout(&rtz, uap->tzp, sizeof (rtz)); 577 } 578 return (error); 579 } 580 581 #ifndef _SYS_SYSPROTO_H_ 582 struct settimeofday_args { 583 struct timeval *tv; 584 struct timezone *tzp; 585 }; 586 #endif 587 /* ARGSUSED */ 588 int 589 sys_settimeofday(struct thread *td, struct settimeofday_args *uap) 590 { 591 struct timeval atv, *tvp; 592 struct timezone atz, *tzp; 593 int error; 594 595 if (uap->tv) { 596 error = copyin(uap->tv, &atv, sizeof(atv)); 597 if (error) 598 return (error); 599 tvp = &atv; 600 } else 601 tvp = NULL; 602 if (uap->tzp) { 603 error = copyin(uap->tzp, &atz, sizeof(atz)); 604 if (error) 605 return (error); 606 tzp = &atz; 607 } else 608 tzp = NULL; 609 return (kern_settimeofday(td, tvp, tzp)); 610 } 611 612 int 613 kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp) 614 { 615 int error; 616 617 error = priv_check(td, PRIV_SETTIMEOFDAY); 618 if (error) 619 return (error); 620 /* Verify all parameters before changing time. */ 621 if (tv) { 622 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000 || 623 tv->tv_sec < 0) 624 return (EINVAL); 625 error = settime(td, tv); 626 } 627 if (tzp && error == 0) { 628 tz_minuteswest = tzp->tz_minuteswest; 629 tz_dsttime = tzp->tz_dsttime; 630 } 631 return (error); 632 } 633 634 /* 635 * Get value of an interval timer. The process virtual and profiling virtual 636 * time timers are kept in the p_stats area, since they can be swapped out. 637 * These are kept internally in the way they are specified externally: in 638 * time until they expire. 639 * 640 * The real time interval timer is kept in the process table slot for the 641 * process, and its value (it_value) is kept as an absolute time rather than 642 * as a delta, so that it is easy to keep periodic real-time signals from 643 * drifting. 644 * 645 * Virtual time timers are processed in the hardclock() routine of 646 * kern_clock.c. The real time timer is processed by a timeout routine, 647 * called from the softclock() routine. Since a callout may be delayed in 648 * real time due to interrupt processing in the system, it is possible for 649 * the real time timeout routine (realitexpire, given below), to be delayed 650 * in real time past when it is supposed to occur. It does not suffice, 651 * therefore, to reload the real timer .it_value from the real time timers 652 * .it_interval. Rather, we compute the next time in absolute time the timer 653 * should go off. 654 */ 655 #ifndef _SYS_SYSPROTO_H_ 656 struct getitimer_args { 657 u_int which; 658 struct itimerval *itv; 659 }; 660 #endif 661 int 662 sys_getitimer(struct thread *td, struct getitimer_args *uap) 663 { 664 struct itimerval aitv; 665 int error; 666 667 error = kern_getitimer(td, uap->which, &aitv); 668 if (error != 0) 669 return (error); 670 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 671 } 672 673 int 674 kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv) 675 { 676 struct proc *p = td->td_proc; 677 struct timeval ctv; 678 679 if (which > ITIMER_PROF) 680 return (EINVAL); 681 682 if (which == ITIMER_REAL) { 683 /* 684 * Convert from absolute to relative time in .it_value 685 * part of real time timer. If time for real time timer 686 * has passed return 0, else return difference between 687 * current time and time for the timer to go off. 688 */ 689 PROC_LOCK(p); 690 *aitv = p->p_realtimer; 691 PROC_UNLOCK(p); 692 if (timevalisset(&aitv->it_value)) { 693 microuptime(&ctv); 694 if (timevalcmp(&aitv->it_value, &ctv, <)) 695 timevalclear(&aitv->it_value); 696 else 697 timevalsub(&aitv->it_value, &ctv); 698 } 699 } else { 700 PROC_ITIMLOCK(p); 701 *aitv = p->p_stats->p_timer[which]; 702 PROC_ITIMUNLOCK(p); 703 } 704 return (0); 705 } 706 707 #ifndef _SYS_SYSPROTO_H_ 708 struct setitimer_args { 709 u_int which; 710 struct itimerval *itv, *oitv; 711 }; 712 #endif 713 int 714 sys_setitimer(struct thread *td, struct setitimer_args *uap) 715 { 716 struct itimerval aitv, oitv; 717 int error; 718 719 if (uap->itv == NULL) { 720 uap->itv = uap->oitv; 721 return (sys_getitimer(td, (struct getitimer_args *)uap)); 722 } 723 724 if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval)))) 725 return (error); 726 error = kern_setitimer(td, uap->which, &aitv, &oitv); 727 if (error != 0 || uap->oitv == NULL) 728 return (error); 729 return (copyout(&oitv, uap->oitv, sizeof(struct itimerval))); 730 } 731 732 int 733 kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv, 734 struct itimerval *oitv) 735 { 736 struct proc *p = td->td_proc; 737 struct timeval ctv; 738 sbintime_t sbt, pr; 739 740 if (aitv == NULL) 741 return (kern_getitimer(td, which, oitv)); 742 743 if (which > ITIMER_PROF) 744 return (EINVAL); 745 if (itimerfix(&aitv->it_value) || 746 aitv->it_value.tv_sec > INT32_MAX / 2) 747 return (EINVAL); 748 if (!timevalisset(&aitv->it_value)) 749 timevalclear(&aitv->it_interval); 750 else if (itimerfix(&aitv->it_interval) || 751 aitv->it_interval.tv_sec > INT32_MAX / 2) 752 return (EINVAL); 753 754 if (which == ITIMER_REAL) { 755 PROC_LOCK(p); 756 if (timevalisset(&p->p_realtimer.it_value)) 757 callout_stop(&p->p_itcallout); 758 microuptime(&ctv); 759 if (timevalisset(&aitv->it_value)) { 760 pr = tvtosbt(aitv->it_value) >> tc_precexp; 761 timevaladd(&aitv->it_value, &ctv); 762 sbt = tvtosbt(aitv->it_value); 763 callout_reset_sbt(&p->p_itcallout, sbt, pr, 764 realitexpire, p, C_ABSOLUTE); 765 } 766 *oitv = p->p_realtimer; 767 p->p_realtimer = *aitv; 768 PROC_UNLOCK(p); 769 if (timevalisset(&oitv->it_value)) { 770 if (timevalcmp(&oitv->it_value, &ctv, <)) 771 timevalclear(&oitv->it_value); 772 else 773 timevalsub(&oitv->it_value, &ctv); 774 } 775 } else { 776 if (aitv->it_interval.tv_sec == 0 && 777 aitv->it_interval.tv_usec != 0 && 778 aitv->it_interval.tv_usec < tick) 779 aitv->it_interval.tv_usec = tick; 780 if (aitv->it_value.tv_sec == 0 && 781 aitv->it_value.tv_usec != 0 && 782 aitv->it_value.tv_usec < tick) 783 aitv->it_value.tv_usec = tick; 784 PROC_ITIMLOCK(p); 785 *oitv = p->p_stats->p_timer[which]; 786 p->p_stats->p_timer[which] = *aitv; 787 PROC_ITIMUNLOCK(p); 788 } 789 return (0); 790 } 791 792 /* 793 * Real interval timer expired: 794 * send process whose timer expired an alarm signal. 795 * If time is not set up to reload, then just return. 796 * Else compute next time timer should go off which is > current time. 797 * This is where delay in processing this timeout causes multiple 798 * SIGALRM calls to be compressed into one. 799 * tvtohz() always adds 1 to allow for the time until the next clock 800 * interrupt being strictly less than 1 clock tick, but we don't want 801 * that here since we want to appear to be in sync with the clock 802 * interrupt even when we're delayed. 803 */ 804 void 805 realitexpire(void *arg) 806 { 807 struct proc *p; 808 struct timeval ctv; 809 sbintime_t isbt; 810 811 p = (struct proc *)arg; 812 kern_psignal(p, SIGALRM); 813 if (!timevalisset(&p->p_realtimer.it_interval)) { 814 timevalclear(&p->p_realtimer.it_value); 815 if (p->p_flag & P_WEXIT) 816 wakeup(&p->p_itcallout); 817 return; 818 } 819 isbt = tvtosbt(p->p_realtimer.it_interval); 820 if (isbt >= sbt_timethreshold) 821 getmicrouptime(&ctv); 822 else 823 microuptime(&ctv); 824 do { 825 timevaladd(&p->p_realtimer.it_value, 826 &p->p_realtimer.it_interval); 827 } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=)); 828 callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value), 829 isbt >> tc_precexp, realitexpire, p, C_ABSOLUTE); 830 } 831 832 /* 833 * Check that a proposed value to load into the .it_value or 834 * .it_interval part of an interval timer is acceptable, and 835 * fix it to have at least minimal value (i.e. if it is less 836 * than the resolution of the clock, round it up.) 837 */ 838 int 839 itimerfix(struct timeval *tv) 840 { 841 842 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 843 return (EINVAL); 844 if (tv->tv_sec == 0 && tv->tv_usec != 0 && 845 tv->tv_usec < (u_int)tick / 16) 846 tv->tv_usec = (u_int)tick / 16; 847 return (0); 848 } 849 850 /* 851 * Decrement an interval timer by a specified number 852 * of microseconds, which must be less than a second, 853 * i.e. < 1000000. If the timer expires, then reload 854 * it. In this case, carry over (usec - old value) to 855 * reduce the value reloaded into the timer so that 856 * the timer does not drift. This routine assumes 857 * that it is called in a context where the timers 858 * on which it is operating cannot change in value. 859 */ 860 int 861 itimerdecr(struct itimerval *itp, int usec) 862 { 863 864 if (itp->it_value.tv_usec < usec) { 865 if (itp->it_value.tv_sec == 0) { 866 /* expired, and already in next interval */ 867 usec -= itp->it_value.tv_usec; 868 goto expire; 869 } 870 itp->it_value.tv_usec += 1000000; 871 itp->it_value.tv_sec--; 872 } 873 itp->it_value.tv_usec -= usec; 874 usec = 0; 875 if (timevalisset(&itp->it_value)) 876 return (1); 877 /* expired, exactly at end of interval */ 878 expire: 879 if (timevalisset(&itp->it_interval)) { 880 itp->it_value = itp->it_interval; 881 itp->it_value.tv_usec -= usec; 882 if (itp->it_value.tv_usec < 0) { 883 itp->it_value.tv_usec += 1000000; 884 itp->it_value.tv_sec--; 885 } 886 } else 887 itp->it_value.tv_usec = 0; /* sec is already 0 */ 888 return (0); 889 } 890 891 /* 892 * Add and subtract routines for timevals. 893 * N.B.: subtract routine doesn't deal with 894 * results which are before the beginning, 895 * it just gets very confused in this case. 896 * Caveat emptor. 897 */ 898 void 899 timevaladd(struct timeval *t1, const struct timeval *t2) 900 { 901 902 t1->tv_sec += t2->tv_sec; 903 t1->tv_usec += t2->tv_usec; 904 timevalfix(t1); 905 } 906 907 void 908 timevalsub(struct timeval *t1, const struct timeval *t2) 909 { 910 911 t1->tv_sec -= t2->tv_sec; 912 t1->tv_usec -= t2->tv_usec; 913 timevalfix(t1); 914 } 915 916 static void 917 timevalfix(struct timeval *t1) 918 { 919 920 if (t1->tv_usec < 0) { 921 t1->tv_sec--; 922 t1->tv_usec += 1000000; 923 } 924 if (t1->tv_usec >= 1000000) { 925 t1->tv_sec++; 926 t1->tv_usec -= 1000000; 927 } 928 } 929 930 /* 931 * ratecheck(): simple time-based rate-limit checking. 932 */ 933 int 934 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 935 { 936 struct timeval tv, delta; 937 int rv = 0; 938 939 getmicrouptime(&tv); /* NB: 10ms precision */ 940 delta = tv; 941 timevalsub(&delta, lasttime); 942 943 /* 944 * check for 0,0 is so that the message will be seen at least once, 945 * even if interval is huge. 946 */ 947 if (timevalcmp(&delta, mininterval, >=) || 948 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 949 *lasttime = tv; 950 rv = 1; 951 } 952 953 return (rv); 954 } 955 956 /* 957 * ppsratecheck(): packets (or events) per second limitation. 958 * 959 * Return 0 if the limit is to be enforced (e.g. the caller 960 * should drop a packet because of the rate limitation). 961 * 962 * maxpps of 0 always causes zero to be returned. maxpps of -1 963 * always causes 1 to be returned; this effectively defeats rate 964 * limiting. 965 * 966 * Note that we maintain the struct timeval for compatibility 967 * with other bsd systems. We reuse the storage and just monitor 968 * clock ticks for minimal overhead. 969 */ 970 int 971 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 972 { 973 int now; 974 975 /* 976 * Reset the last time and counter if this is the first call 977 * or more than a second has passed since the last update of 978 * lasttime. 979 */ 980 now = ticks; 981 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 982 lasttime->tv_sec = now; 983 *curpps = 1; 984 return (maxpps != 0); 985 } else { 986 (*curpps)++; /* NB: ignore potential overflow */ 987 return (maxpps < 0 || *curpps <= maxpps); 988 } 989 } 990 991 static void 992 itimer_start(void) 993 { 994 struct kclock rt_clock = { 995 .timer_create = realtimer_create, 996 .timer_delete = realtimer_delete, 997 .timer_settime = realtimer_settime, 998 .timer_gettime = realtimer_gettime, 999 .event_hook = NULL 1000 }; 1001 1002 itimer_zone = uma_zcreate("itimer", sizeof(struct itimer), 1003 NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0); 1004 register_posix_clock(CLOCK_REALTIME, &rt_clock); 1005 register_posix_clock(CLOCK_MONOTONIC, &rt_clock); 1006 p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L); 1007 p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX); 1008 p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX); 1009 EVENTHANDLER_REGISTER(process_exit, itimers_event_hook_exit, 1010 (void *)ITIMER_EV_EXIT, EVENTHANDLER_PRI_ANY); 1011 EVENTHANDLER_REGISTER(process_exec, itimers_event_hook_exec, 1012 (void *)ITIMER_EV_EXEC, EVENTHANDLER_PRI_ANY); 1013 } 1014 1015 int 1016 register_posix_clock(int clockid, struct kclock *clk) 1017 { 1018 if ((unsigned)clockid >= MAX_CLOCKS) { 1019 printf("%s: invalid clockid\n", __func__); 1020 return (0); 1021 } 1022 posix_clocks[clockid] = *clk; 1023 return (1); 1024 } 1025 1026 static int 1027 itimer_init(void *mem, int size, int flags) 1028 { 1029 struct itimer *it; 1030 1031 it = (struct itimer *)mem; 1032 mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF); 1033 return (0); 1034 } 1035 1036 static void 1037 itimer_fini(void *mem, int size) 1038 { 1039 struct itimer *it; 1040 1041 it = (struct itimer *)mem; 1042 mtx_destroy(&it->it_mtx); 1043 } 1044 1045 static void 1046 itimer_enter(struct itimer *it) 1047 { 1048 1049 mtx_assert(&it->it_mtx, MA_OWNED); 1050 it->it_usecount++; 1051 } 1052 1053 static void 1054 itimer_leave(struct itimer *it) 1055 { 1056 1057 mtx_assert(&it->it_mtx, MA_OWNED); 1058 KASSERT(it->it_usecount > 0, ("invalid it_usecount")); 1059 1060 if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0) 1061 wakeup(it); 1062 } 1063 1064 #ifndef _SYS_SYSPROTO_H_ 1065 struct ktimer_create_args { 1066 clockid_t clock_id; 1067 struct sigevent * evp; 1068 int * timerid; 1069 }; 1070 #endif 1071 int 1072 sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap) 1073 { 1074 struct sigevent *evp, ev; 1075 int id; 1076 int error; 1077 1078 if (uap->evp == NULL) { 1079 evp = NULL; 1080 } else { 1081 error = copyin(uap->evp, &ev, sizeof(ev)); 1082 if (error != 0) 1083 return (error); 1084 evp = &ev; 1085 } 1086 error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1); 1087 if (error == 0) { 1088 error = copyout(&id, uap->timerid, sizeof(int)); 1089 if (error != 0) 1090 kern_ktimer_delete(td, id); 1091 } 1092 return (error); 1093 } 1094 1095 int 1096 kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, 1097 int *timerid, int preset_id) 1098 { 1099 struct proc *p = td->td_proc; 1100 struct itimer *it; 1101 int id; 1102 int error; 1103 1104 if (clock_id < 0 || clock_id >= MAX_CLOCKS) 1105 return (EINVAL); 1106 1107 if (posix_clocks[clock_id].timer_create == NULL) 1108 return (EINVAL); 1109 1110 if (evp != NULL) { 1111 if (evp->sigev_notify != SIGEV_NONE && 1112 evp->sigev_notify != SIGEV_SIGNAL && 1113 evp->sigev_notify != SIGEV_THREAD_ID) 1114 return (EINVAL); 1115 if ((evp->sigev_notify == SIGEV_SIGNAL || 1116 evp->sigev_notify == SIGEV_THREAD_ID) && 1117 !_SIG_VALID(evp->sigev_signo)) 1118 return (EINVAL); 1119 } 1120 1121 if (p->p_itimers == NULL) 1122 itimers_alloc(p); 1123 1124 it = uma_zalloc(itimer_zone, M_WAITOK); 1125 it->it_flags = 0; 1126 it->it_usecount = 0; 1127 it->it_active = 0; 1128 timespecclear(&it->it_time.it_value); 1129 timespecclear(&it->it_time.it_interval); 1130 it->it_overrun = 0; 1131 it->it_overrun_last = 0; 1132 it->it_clockid = clock_id; 1133 it->it_timerid = -1; 1134 it->it_proc = p; 1135 ksiginfo_init(&it->it_ksi); 1136 it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT; 1137 error = CLOCK_CALL(clock_id, timer_create, (it)); 1138 if (error != 0) 1139 goto out; 1140 1141 PROC_LOCK(p); 1142 if (preset_id != -1) { 1143 KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id")); 1144 id = preset_id; 1145 if (p->p_itimers->its_timers[id] != NULL) { 1146 PROC_UNLOCK(p); 1147 error = 0; 1148 goto out; 1149 } 1150 } else { 1151 /* 1152 * Find a free timer slot, skipping those reserved 1153 * for setitimer(). 1154 */ 1155 for (id = 3; id < TIMER_MAX; id++) 1156 if (p->p_itimers->its_timers[id] == NULL) 1157 break; 1158 if (id == TIMER_MAX) { 1159 PROC_UNLOCK(p); 1160 error = EAGAIN; 1161 goto out; 1162 } 1163 } 1164 it->it_timerid = id; 1165 p->p_itimers->its_timers[id] = it; 1166 if (evp != NULL) 1167 it->it_sigev = *evp; 1168 else { 1169 it->it_sigev.sigev_notify = SIGEV_SIGNAL; 1170 switch (clock_id) { 1171 default: 1172 case CLOCK_REALTIME: 1173 it->it_sigev.sigev_signo = SIGALRM; 1174 break; 1175 case CLOCK_VIRTUAL: 1176 it->it_sigev.sigev_signo = SIGVTALRM; 1177 break; 1178 case CLOCK_PROF: 1179 it->it_sigev.sigev_signo = SIGPROF; 1180 break; 1181 } 1182 it->it_sigev.sigev_value.sival_int = id; 1183 } 1184 1185 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1186 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1187 it->it_ksi.ksi_signo = it->it_sigev.sigev_signo; 1188 it->it_ksi.ksi_code = SI_TIMER; 1189 it->it_ksi.ksi_value = it->it_sigev.sigev_value; 1190 it->it_ksi.ksi_timerid = id; 1191 } 1192 PROC_UNLOCK(p); 1193 *timerid = id; 1194 return (0); 1195 1196 out: 1197 ITIMER_LOCK(it); 1198 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1199 ITIMER_UNLOCK(it); 1200 uma_zfree(itimer_zone, it); 1201 return (error); 1202 } 1203 1204 #ifndef _SYS_SYSPROTO_H_ 1205 struct ktimer_delete_args { 1206 int timerid; 1207 }; 1208 #endif 1209 int 1210 sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap) 1211 { 1212 1213 return (kern_ktimer_delete(td, uap->timerid)); 1214 } 1215 1216 static struct itimer * 1217 itimer_find(struct proc *p, int timerid) 1218 { 1219 struct itimer *it; 1220 1221 PROC_LOCK_ASSERT(p, MA_OWNED); 1222 if ((p->p_itimers == NULL) || 1223 (timerid < 0) || (timerid >= TIMER_MAX) || 1224 (it = p->p_itimers->its_timers[timerid]) == NULL) { 1225 return (NULL); 1226 } 1227 ITIMER_LOCK(it); 1228 if ((it->it_flags & ITF_DELETING) != 0) { 1229 ITIMER_UNLOCK(it); 1230 it = NULL; 1231 } 1232 return (it); 1233 } 1234 1235 int 1236 kern_ktimer_delete(struct thread *td, int timerid) 1237 { 1238 struct proc *p = td->td_proc; 1239 struct itimer *it; 1240 1241 PROC_LOCK(p); 1242 it = itimer_find(p, timerid); 1243 if (it == NULL) { 1244 PROC_UNLOCK(p); 1245 return (EINVAL); 1246 } 1247 PROC_UNLOCK(p); 1248 1249 it->it_flags |= ITF_DELETING; 1250 while (it->it_usecount > 0) { 1251 it->it_flags |= ITF_WANTED; 1252 msleep(it, &it->it_mtx, PPAUSE, "itimer", 0); 1253 } 1254 it->it_flags &= ~ITF_WANTED; 1255 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1256 ITIMER_UNLOCK(it); 1257 1258 PROC_LOCK(p); 1259 if (KSI_ONQ(&it->it_ksi)) 1260 sigqueue_take(&it->it_ksi); 1261 p->p_itimers->its_timers[timerid] = NULL; 1262 PROC_UNLOCK(p); 1263 uma_zfree(itimer_zone, it); 1264 return (0); 1265 } 1266 1267 #ifndef _SYS_SYSPROTO_H_ 1268 struct ktimer_settime_args { 1269 int timerid; 1270 int flags; 1271 const struct itimerspec * value; 1272 struct itimerspec * ovalue; 1273 }; 1274 #endif 1275 int 1276 sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap) 1277 { 1278 struct itimerspec val, oval, *ovalp; 1279 int error; 1280 1281 error = copyin(uap->value, &val, sizeof(val)); 1282 if (error != 0) 1283 return (error); 1284 ovalp = uap->ovalue != NULL ? &oval : NULL; 1285 error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp); 1286 if (error == 0 && uap->ovalue != NULL) 1287 error = copyout(ovalp, uap->ovalue, sizeof(*ovalp)); 1288 return (error); 1289 } 1290 1291 int 1292 kern_ktimer_settime(struct thread *td, int timer_id, int flags, 1293 struct itimerspec *val, struct itimerspec *oval) 1294 { 1295 struct proc *p; 1296 struct itimer *it; 1297 int error; 1298 1299 p = td->td_proc; 1300 PROC_LOCK(p); 1301 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { 1302 PROC_UNLOCK(p); 1303 error = EINVAL; 1304 } else { 1305 PROC_UNLOCK(p); 1306 itimer_enter(it); 1307 error = CLOCK_CALL(it->it_clockid, timer_settime, (it, 1308 flags, val, oval)); 1309 itimer_leave(it); 1310 ITIMER_UNLOCK(it); 1311 } 1312 return (error); 1313 } 1314 1315 #ifndef _SYS_SYSPROTO_H_ 1316 struct ktimer_gettime_args { 1317 int timerid; 1318 struct itimerspec * value; 1319 }; 1320 #endif 1321 int 1322 sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap) 1323 { 1324 struct itimerspec val; 1325 int error; 1326 1327 error = kern_ktimer_gettime(td, uap->timerid, &val); 1328 if (error == 0) 1329 error = copyout(&val, uap->value, sizeof(val)); 1330 return (error); 1331 } 1332 1333 int 1334 kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val) 1335 { 1336 struct proc *p; 1337 struct itimer *it; 1338 int error; 1339 1340 p = td->td_proc; 1341 PROC_LOCK(p); 1342 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { 1343 PROC_UNLOCK(p); 1344 error = EINVAL; 1345 } else { 1346 PROC_UNLOCK(p); 1347 itimer_enter(it); 1348 error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, val)); 1349 itimer_leave(it); 1350 ITIMER_UNLOCK(it); 1351 } 1352 return (error); 1353 } 1354 1355 #ifndef _SYS_SYSPROTO_H_ 1356 struct timer_getoverrun_args { 1357 int timerid; 1358 }; 1359 #endif 1360 int 1361 sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap) 1362 { 1363 1364 return (kern_ktimer_getoverrun(td, uap->timerid)); 1365 } 1366 1367 int 1368 kern_ktimer_getoverrun(struct thread *td, int timer_id) 1369 { 1370 struct proc *p = td->td_proc; 1371 struct itimer *it; 1372 int error ; 1373 1374 PROC_LOCK(p); 1375 if (timer_id < 3 || 1376 (it = itimer_find(p, timer_id)) == NULL) { 1377 PROC_UNLOCK(p); 1378 error = EINVAL; 1379 } else { 1380 td->td_retval[0] = it->it_overrun_last; 1381 ITIMER_UNLOCK(it); 1382 PROC_UNLOCK(p); 1383 error = 0; 1384 } 1385 return (error); 1386 } 1387 1388 static int 1389 realtimer_create(struct itimer *it) 1390 { 1391 callout_init_mtx(&it->it_callout, &it->it_mtx, 0); 1392 return (0); 1393 } 1394 1395 static int 1396 realtimer_delete(struct itimer *it) 1397 { 1398 mtx_assert(&it->it_mtx, MA_OWNED); 1399 1400 /* 1401 * clear timer's value and interval to tell realtimer_expire 1402 * to not rearm the timer. 1403 */ 1404 timespecclear(&it->it_time.it_value); 1405 timespecclear(&it->it_time.it_interval); 1406 ITIMER_UNLOCK(it); 1407 callout_drain(&it->it_callout); 1408 ITIMER_LOCK(it); 1409 return (0); 1410 } 1411 1412 static int 1413 realtimer_gettime(struct itimer *it, struct itimerspec *ovalue) 1414 { 1415 struct timespec cts; 1416 1417 mtx_assert(&it->it_mtx, MA_OWNED); 1418 1419 realtimer_clocktime(it->it_clockid, &cts); 1420 *ovalue = it->it_time; 1421 if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) { 1422 timespecsub(&ovalue->it_value, &cts); 1423 if (ovalue->it_value.tv_sec < 0 || 1424 (ovalue->it_value.tv_sec == 0 && 1425 ovalue->it_value.tv_nsec == 0)) { 1426 ovalue->it_value.tv_sec = 0; 1427 ovalue->it_value.tv_nsec = 1; 1428 } 1429 } 1430 return (0); 1431 } 1432 1433 static int 1434 realtimer_settime(struct itimer *it, int flags, 1435 struct itimerspec *value, struct itimerspec *ovalue) 1436 { 1437 struct timespec cts, ts; 1438 struct timeval tv; 1439 struct itimerspec val; 1440 1441 mtx_assert(&it->it_mtx, MA_OWNED); 1442 1443 val = *value; 1444 if (itimespecfix(&val.it_value)) 1445 return (EINVAL); 1446 1447 if (timespecisset(&val.it_value)) { 1448 if (itimespecfix(&val.it_interval)) 1449 return (EINVAL); 1450 } else { 1451 timespecclear(&val.it_interval); 1452 } 1453 1454 if (ovalue != NULL) 1455 realtimer_gettime(it, ovalue); 1456 1457 it->it_time = val; 1458 if (timespecisset(&val.it_value)) { 1459 realtimer_clocktime(it->it_clockid, &cts); 1460 ts = val.it_value; 1461 if ((flags & TIMER_ABSTIME) == 0) { 1462 /* Convert to absolute time. */ 1463 timespecadd(&it->it_time.it_value, &cts); 1464 } else { 1465 timespecsub(&ts, &cts); 1466 /* 1467 * We don't care if ts is negative, tztohz will 1468 * fix it. 1469 */ 1470 } 1471 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1472 callout_reset(&it->it_callout, tvtohz(&tv), 1473 realtimer_expire, it); 1474 } else { 1475 callout_stop(&it->it_callout); 1476 } 1477 1478 return (0); 1479 } 1480 1481 static void 1482 realtimer_clocktime(clockid_t id, struct timespec *ts) 1483 { 1484 if (id == CLOCK_REALTIME) 1485 getnanotime(ts); 1486 else /* CLOCK_MONOTONIC */ 1487 getnanouptime(ts); 1488 } 1489 1490 int 1491 itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi) 1492 { 1493 struct itimer *it; 1494 1495 PROC_LOCK_ASSERT(p, MA_OWNED); 1496 it = itimer_find(p, timerid); 1497 if (it != NULL) { 1498 ksi->ksi_overrun = it->it_overrun; 1499 it->it_overrun_last = it->it_overrun; 1500 it->it_overrun = 0; 1501 ITIMER_UNLOCK(it); 1502 return (0); 1503 } 1504 return (EINVAL); 1505 } 1506 1507 int 1508 itimespecfix(struct timespec *ts) 1509 { 1510 1511 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 1512 return (EINVAL); 1513 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000) 1514 ts->tv_nsec = tick * 1000; 1515 return (0); 1516 } 1517 1518 /* Timeout callback for realtime timer */ 1519 static void 1520 realtimer_expire(void *arg) 1521 { 1522 struct timespec cts, ts; 1523 struct timeval tv; 1524 struct itimer *it; 1525 1526 it = (struct itimer *)arg; 1527 1528 realtimer_clocktime(it->it_clockid, &cts); 1529 /* Only fire if time is reached. */ 1530 if (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1531 if (timespecisset(&it->it_time.it_interval)) { 1532 timespecadd(&it->it_time.it_value, 1533 &it->it_time.it_interval); 1534 while (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1535 if (it->it_overrun < INT_MAX) 1536 it->it_overrun++; 1537 else 1538 it->it_ksi.ksi_errno = ERANGE; 1539 timespecadd(&it->it_time.it_value, 1540 &it->it_time.it_interval); 1541 } 1542 } else { 1543 /* single shot timer ? */ 1544 timespecclear(&it->it_time.it_value); 1545 } 1546 if (timespecisset(&it->it_time.it_value)) { 1547 ts = it->it_time.it_value; 1548 timespecsub(&ts, &cts); 1549 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1550 callout_reset(&it->it_callout, tvtohz(&tv), 1551 realtimer_expire, it); 1552 } 1553 itimer_enter(it); 1554 ITIMER_UNLOCK(it); 1555 itimer_fire(it); 1556 ITIMER_LOCK(it); 1557 itimer_leave(it); 1558 } else if (timespecisset(&it->it_time.it_value)) { 1559 ts = it->it_time.it_value; 1560 timespecsub(&ts, &cts); 1561 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1562 callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, 1563 it); 1564 } 1565 } 1566 1567 void 1568 itimer_fire(struct itimer *it) 1569 { 1570 struct proc *p = it->it_proc; 1571 struct thread *td; 1572 1573 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1574 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1575 if (sigev_findtd(p, &it->it_sigev, &td) != 0) { 1576 ITIMER_LOCK(it); 1577 timespecclear(&it->it_time.it_value); 1578 timespecclear(&it->it_time.it_interval); 1579 callout_stop(&it->it_callout); 1580 ITIMER_UNLOCK(it); 1581 return; 1582 } 1583 if (!KSI_ONQ(&it->it_ksi)) { 1584 it->it_ksi.ksi_errno = 0; 1585 ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev); 1586 tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi); 1587 } else { 1588 if (it->it_overrun < INT_MAX) 1589 it->it_overrun++; 1590 else 1591 it->it_ksi.ksi_errno = ERANGE; 1592 } 1593 PROC_UNLOCK(p); 1594 } 1595 } 1596 1597 static void 1598 itimers_alloc(struct proc *p) 1599 { 1600 struct itimers *its; 1601 int i; 1602 1603 its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO); 1604 LIST_INIT(&its->its_virtual); 1605 LIST_INIT(&its->its_prof); 1606 TAILQ_INIT(&its->its_worklist); 1607 for (i = 0; i < TIMER_MAX; i++) 1608 its->its_timers[i] = NULL; 1609 PROC_LOCK(p); 1610 if (p->p_itimers == NULL) { 1611 p->p_itimers = its; 1612 PROC_UNLOCK(p); 1613 } 1614 else { 1615 PROC_UNLOCK(p); 1616 free(its, M_SUBPROC); 1617 } 1618 } 1619 1620 static void 1621 itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp __unused) 1622 { 1623 itimers_event_hook_exit(arg, p); 1624 } 1625 1626 /* Clean up timers when some process events are being triggered. */ 1627 static void 1628 itimers_event_hook_exit(void *arg, struct proc *p) 1629 { 1630 struct itimers *its; 1631 struct itimer *it; 1632 int event = (int)(intptr_t)arg; 1633 int i; 1634 1635 if (p->p_itimers != NULL) { 1636 its = p->p_itimers; 1637 for (i = 0; i < MAX_CLOCKS; ++i) { 1638 if (posix_clocks[i].event_hook != NULL) 1639 CLOCK_CALL(i, event_hook, (p, i, event)); 1640 } 1641 /* 1642 * According to susv3, XSI interval timers should be inherited 1643 * by new image. 1644 */ 1645 if (event == ITIMER_EV_EXEC) 1646 i = 3; 1647 else if (event == ITIMER_EV_EXIT) 1648 i = 0; 1649 else 1650 panic("unhandled event"); 1651 for (; i < TIMER_MAX; ++i) { 1652 if ((it = its->its_timers[i]) != NULL) 1653 kern_ktimer_delete(curthread, i); 1654 } 1655 if (its->its_timers[0] == NULL && 1656 its->its_timers[1] == NULL && 1657 its->its_timers[2] == NULL) { 1658 free(its, M_SUBPROC); 1659 p->p_itimers = NULL; 1660 } 1661 } 1662 } 1663