1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_ktrace.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/limits.h> 40 #include <sys/clock.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/sysproto.h> 44 #include <sys/eventhandler.h> 45 #include <sys/resourcevar.h> 46 #include <sys/signalvar.h> 47 #include <sys/kernel.h> 48 #include <sys/sleepqueue.h> 49 #include <sys/syscallsubr.h> 50 #include <sys/sysctl.h> 51 #include <sys/sysent.h> 52 #include <sys/priv.h> 53 #include <sys/proc.h> 54 #include <sys/posix4.h> 55 #include <sys/time.h> 56 #include <sys/timers.h> 57 #include <sys/timetc.h> 58 #include <sys/vnode.h> 59 #ifdef KTRACE 60 #include <sys/ktrace.h> 61 #endif 62 63 #include <vm/vm.h> 64 #include <vm/vm_extern.h> 65 66 #define MAX_CLOCKS (CLOCK_MONOTONIC+1) 67 #define CPUCLOCK_BIT 0x80000000 68 #define CPUCLOCK_PROCESS_BIT 0x40000000 69 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT)) 70 #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid)) 71 #define MAKE_PROCESS_CPUCLOCK(pid) \ 72 (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid)) 73 74 static struct kclock posix_clocks[MAX_CLOCKS]; 75 static uma_zone_t itimer_zone = NULL; 76 77 /* 78 * Time of day and interval timer support. 79 * 80 * These routines provide the kernel entry points to get and set 81 * the time-of-day and per-process interval timers. Subroutines 82 * here provide support for adding and subtracting timeval structures 83 * and decrementing interval timers, optionally reloading the interval 84 * timers when they expire. 85 */ 86 87 static int settime(struct thread *, struct timeval *); 88 static void timevalfix(struct timeval *); 89 90 static void itimer_start(void); 91 static int itimer_init(void *, int, int); 92 static void itimer_fini(void *, int); 93 static void itimer_enter(struct itimer *); 94 static void itimer_leave(struct itimer *); 95 static struct itimer *itimer_find(struct proc *, int); 96 static void itimers_alloc(struct proc *); 97 static void itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp); 98 static void itimers_event_hook_exit(void *arg, struct proc *p); 99 static int realtimer_create(struct itimer *); 100 static int realtimer_gettime(struct itimer *, struct itimerspec *); 101 static int realtimer_settime(struct itimer *, int, 102 struct itimerspec *, struct itimerspec *); 103 static int realtimer_delete(struct itimer *); 104 static void realtimer_clocktime(clockid_t, struct timespec *); 105 static void realtimer_expire(void *); 106 107 int register_posix_clock(int, struct kclock *); 108 void itimer_fire(struct itimer *it); 109 int itimespecfix(struct timespec *ts); 110 111 #define CLOCK_CALL(clock, call, arglist) \ 112 ((*posix_clocks[clock].call) arglist) 113 114 SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL); 115 116 117 static int 118 settime(struct thread *td, struct timeval *tv) 119 { 120 struct timeval delta, tv1, tv2; 121 static struct timeval maxtime, laststep; 122 struct timespec ts; 123 124 microtime(&tv1); 125 delta = *tv; 126 timevalsub(&delta, &tv1); 127 128 /* 129 * If the system is secure, we do not allow the time to be 130 * set to a value earlier than 1 second less than the highest 131 * time we have yet seen. The worst a miscreant can do in 132 * this circumstance is "freeze" time. He couldn't go 133 * back to the past. 134 * 135 * We similarly do not allow the clock to be stepped more 136 * than one second, nor more than once per second. This allows 137 * a miscreant to make the clock march double-time, but no worse. 138 */ 139 if (securelevel_gt(td->td_ucred, 1) != 0) { 140 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 141 /* 142 * Update maxtime to latest time we've seen. 143 */ 144 if (tv1.tv_sec > maxtime.tv_sec) 145 maxtime = tv1; 146 tv2 = *tv; 147 timevalsub(&tv2, &maxtime); 148 if (tv2.tv_sec < -1) { 149 tv->tv_sec = maxtime.tv_sec - 1; 150 printf("Time adjustment clamped to -1 second\n"); 151 } 152 } else { 153 if (tv1.tv_sec == laststep.tv_sec) 154 return (EPERM); 155 if (delta.tv_sec > 1) { 156 tv->tv_sec = tv1.tv_sec + 1; 157 printf("Time adjustment clamped to +1 second\n"); 158 } 159 laststep = *tv; 160 } 161 } 162 163 ts.tv_sec = tv->tv_sec; 164 ts.tv_nsec = tv->tv_usec * 1000; 165 tc_setclock(&ts); 166 resettodr(); 167 return (0); 168 } 169 170 #ifndef _SYS_SYSPROTO_H_ 171 struct clock_getcpuclockid2_args { 172 id_t id; 173 int which, 174 clockid_t *clock_id; 175 }; 176 #endif 177 /* ARGSUSED */ 178 int 179 sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap) 180 { 181 clockid_t clk_id; 182 int error; 183 184 error = kern_clock_getcpuclockid2(td, uap->id, uap->which, &clk_id); 185 if (error == 0) 186 error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t)); 187 return (error); 188 } 189 190 int 191 kern_clock_getcpuclockid2(struct thread *td, id_t id, int which, 192 clockid_t *clk_id) 193 { 194 struct proc *p; 195 pid_t pid; 196 lwpid_t tid; 197 int error; 198 199 switch (which) { 200 case CPUCLOCK_WHICH_PID: 201 if (id != 0) { 202 error = pget(id, PGET_CANSEE | PGET_NOTID, &p); 203 if (error != 0) 204 return (error); 205 PROC_UNLOCK(p); 206 pid = id; 207 } else { 208 pid = td->td_proc->p_pid; 209 } 210 *clk_id = MAKE_PROCESS_CPUCLOCK(pid); 211 return (0); 212 case CPUCLOCK_WHICH_TID: 213 tid = id == 0 ? td->td_tid : id; 214 *clk_id = MAKE_THREAD_CPUCLOCK(tid); 215 return (0); 216 default: 217 return (EINVAL); 218 } 219 } 220 221 #ifndef _SYS_SYSPROTO_H_ 222 struct clock_gettime_args { 223 clockid_t clock_id; 224 struct timespec *tp; 225 }; 226 #endif 227 /* ARGSUSED */ 228 int 229 sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap) 230 { 231 struct timespec ats; 232 int error; 233 234 error = kern_clock_gettime(td, uap->clock_id, &ats); 235 if (error == 0) 236 error = copyout(&ats, uap->tp, sizeof(ats)); 237 238 return (error); 239 } 240 241 static inline void 242 cputick2timespec(uint64_t runtime, struct timespec *ats) 243 { 244 runtime = cputick2usec(runtime); 245 ats->tv_sec = runtime / 1000000; 246 ats->tv_nsec = runtime % 1000000 * 1000; 247 } 248 249 static void 250 get_thread_cputime(struct thread *targettd, struct timespec *ats) 251 { 252 uint64_t runtime, curtime, switchtime; 253 254 if (targettd == NULL) { /* current thread */ 255 critical_enter(); 256 switchtime = PCPU_GET(switchtime); 257 curtime = cpu_ticks(); 258 runtime = curthread->td_runtime; 259 critical_exit(); 260 runtime += curtime - switchtime; 261 } else { 262 thread_lock(targettd); 263 runtime = targettd->td_runtime; 264 thread_unlock(targettd); 265 } 266 cputick2timespec(runtime, ats); 267 } 268 269 static void 270 get_process_cputime(struct proc *targetp, struct timespec *ats) 271 { 272 uint64_t runtime; 273 struct rusage ru; 274 275 PROC_STATLOCK(targetp); 276 rufetch(targetp, &ru); 277 runtime = targetp->p_rux.rux_runtime; 278 PROC_STATUNLOCK(targetp); 279 cputick2timespec(runtime, ats); 280 } 281 282 static int 283 get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats) 284 { 285 struct proc *p, *p2; 286 struct thread *td2; 287 lwpid_t tid; 288 pid_t pid; 289 int error; 290 291 p = td->td_proc; 292 if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) { 293 tid = clock_id & CPUCLOCK_ID_MASK; 294 td2 = tdfind(tid, p->p_pid); 295 if (td2 == NULL) 296 return (EINVAL); 297 get_thread_cputime(td2, ats); 298 PROC_UNLOCK(td2->td_proc); 299 } else { 300 pid = clock_id & CPUCLOCK_ID_MASK; 301 error = pget(pid, PGET_CANSEE, &p2); 302 if (error != 0) 303 return (EINVAL); 304 get_process_cputime(p2, ats); 305 PROC_UNLOCK(p2); 306 } 307 return (0); 308 } 309 310 int 311 kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats) 312 { 313 struct timeval sys, user; 314 struct proc *p; 315 316 p = td->td_proc; 317 switch (clock_id) { 318 case CLOCK_REALTIME: /* Default to precise. */ 319 case CLOCK_REALTIME_PRECISE: 320 nanotime(ats); 321 break; 322 case CLOCK_REALTIME_FAST: 323 getnanotime(ats); 324 break; 325 case CLOCK_VIRTUAL: 326 PROC_LOCK(p); 327 PROC_STATLOCK(p); 328 calcru(p, &user, &sys); 329 PROC_STATUNLOCK(p); 330 PROC_UNLOCK(p); 331 TIMEVAL_TO_TIMESPEC(&user, ats); 332 break; 333 case CLOCK_PROF: 334 PROC_LOCK(p); 335 PROC_STATLOCK(p); 336 calcru(p, &user, &sys); 337 PROC_STATUNLOCK(p); 338 PROC_UNLOCK(p); 339 timevaladd(&user, &sys); 340 TIMEVAL_TO_TIMESPEC(&user, ats); 341 break; 342 case CLOCK_MONOTONIC: /* Default to precise. */ 343 case CLOCK_MONOTONIC_PRECISE: 344 case CLOCK_UPTIME: 345 case CLOCK_UPTIME_PRECISE: 346 nanouptime(ats); 347 break; 348 case CLOCK_UPTIME_FAST: 349 case CLOCK_MONOTONIC_FAST: 350 getnanouptime(ats); 351 break; 352 case CLOCK_SECOND: 353 ats->tv_sec = time_second; 354 ats->tv_nsec = 0; 355 break; 356 case CLOCK_THREAD_CPUTIME_ID: 357 get_thread_cputime(NULL, ats); 358 break; 359 case CLOCK_PROCESS_CPUTIME_ID: 360 PROC_LOCK(p); 361 get_process_cputime(p, ats); 362 PROC_UNLOCK(p); 363 break; 364 default: 365 if ((int)clock_id >= 0) 366 return (EINVAL); 367 return (get_cputime(td, clock_id, ats)); 368 } 369 return (0); 370 } 371 372 #ifndef _SYS_SYSPROTO_H_ 373 struct clock_settime_args { 374 clockid_t clock_id; 375 const struct timespec *tp; 376 }; 377 #endif 378 /* ARGSUSED */ 379 int 380 sys_clock_settime(struct thread *td, struct clock_settime_args *uap) 381 { 382 struct timespec ats; 383 int error; 384 385 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 386 return (error); 387 return (kern_clock_settime(td, uap->clock_id, &ats)); 388 } 389 390 static int allow_insane_settime = 0; 391 SYSCTL_INT(_debug, OID_AUTO, allow_insane_settime, CTLFLAG_RWTUN, 392 &allow_insane_settime, 0, 393 "do not perform possibly restrictive checks on settime(2) args"); 394 395 int 396 kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats) 397 { 398 struct timeval atv; 399 int error; 400 401 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 402 return (error); 403 if (clock_id != CLOCK_REALTIME) 404 return (EINVAL); 405 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000 || 406 ats->tv_sec < 0) 407 return (EINVAL); 408 if (!allow_insane_settime && ats->tv_sec > 9999ULL * 366 * 24 * 60 * 60) 409 return (EINVAL); 410 /* XXX Don't convert nsec->usec and back */ 411 TIMESPEC_TO_TIMEVAL(&atv, ats); 412 error = settime(td, &atv); 413 return (error); 414 } 415 416 #ifndef _SYS_SYSPROTO_H_ 417 struct clock_getres_args { 418 clockid_t clock_id; 419 struct timespec *tp; 420 }; 421 #endif 422 int 423 sys_clock_getres(struct thread *td, struct clock_getres_args *uap) 424 { 425 struct timespec ts; 426 int error; 427 428 if (uap->tp == NULL) 429 return (0); 430 431 error = kern_clock_getres(td, uap->clock_id, &ts); 432 if (error == 0) 433 error = copyout(&ts, uap->tp, sizeof(ts)); 434 return (error); 435 } 436 437 int 438 kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts) 439 { 440 441 ts->tv_sec = 0; 442 switch (clock_id) { 443 case CLOCK_REALTIME: 444 case CLOCK_REALTIME_FAST: 445 case CLOCK_REALTIME_PRECISE: 446 case CLOCK_MONOTONIC: 447 case CLOCK_MONOTONIC_FAST: 448 case CLOCK_MONOTONIC_PRECISE: 449 case CLOCK_UPTIME: 450 case CLOCK_UPTIME_FAST: 451 case CLOCK_UPTIME_PRECISE: 452 /* 453 * Round up the result of the division cheaply by adding 1. 454 * Rounding up is especially important if rounding down 455 * would give 0. Perfect rounding is unimportant. 456 */ 457 ts->tv_nsec = 1000000000 / tc_getfrequency() + 1; 458 break; 459 case CLOCK_VIRTUAL: 460 case CLOCK_PROF: 461 /* Accurately round up here because we can do so cheaply. */ 462 ts->tv_nsec = howmany(1000000000, hz); 463 break; 464 case CLOCK_SECOND: 465 ts->tv_sec = 1; 466 ts->tv_nsec = 0; 467 break; 468 case CLOCK_THREAD_CPUTIME_ID: 469 case CLOCK_PROCESS_CPUTIME_ID: 470 cputime: 471 /* sync with cputick2usec */ 472 ts->tv_nsec = 1000000 / cpu_tickrate(); 473 if (ts->tv_nsec == 0) 474 ts->tv_nsec = 1000; 475 break; 476 default: 477 if ((int)clock_id < 0) 478 goto cputime; 479 return (EINVAL); 480 } 481 return (0); 482 } 483 484 static uint8_t nanowait[MAXCPU]; 485 486 int 487 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) 488 { 489 struct timespec ts; 490 sbintime_t sbt, sbtt, prec, tmp; 491 time_t over; 492 int error; 493 494 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 495 return (EINVAL); 496 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 497 return (0); 498 ts = *rqt; 499 if (ts.tv_sec > INT32_MAX / 2) { 500 over = ts.tv_sec - INT32_MAX / 2; 501 ts.tv_sec -= over; 502 } else 503 over = 0; 504 tmp = tstosbt(ts); 505 prec = tmp; 506 prec >>= tc_precexp; 507 if (TIMESEL(&sbt, tmp)) 508 sbt += tc_tick_sbt; 509 sbt += tmp; 510 error = tsleep_sbt(&nanowait[curcpu], PWAIT | PCATCH, "nanslp", 511 sbt, prec, C_ABSOLUTE); 512 if (error != EWOULDBLOCK) { 513 if (error == ERESTART) 514 error = EINTR; 515 TIMESEL(&sbtt, tmp); 516 if (rmt != NULL) { 517 ts = sbttots(sbt - sbtt); 518 ts.tv_sec += over; 519 if (ts.tv_sec < 0) 520 timespecclear(&ts); 521 *rmt = ts; 522 } 523 if (sbtt >= sbt) 524 return (0); 525 return (error); 526 } 527 return (0); 528 } 529 530 #ifndef _SYS_SYSPROTO_H_ 531 struct nanosleep_args { 532 struct timespec *rqtp; 533 struct timespec *rmtp; 534 }; 535 #endif 536 /* ARGSUSED */ 537 int 538 sys_nanosleep(struct thread *td, struct nanosleep_args *uap) 539 { 540 struct timespec rmt, rqt; 541 int error; 542 543 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 544 if (error) 545 return (error); 546 547 if (uap->rmtp && 548 !useracc((caddr_t)uap->rmtp, sizeof(rmt), VM_PROT_WRITE)) 549 return (EFAULT); 550 error = kern_nanosleep(td, &rqt, &rmt); 551 if (error && uap->rmtp) { 552 int error2; 553 554 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 555 if (error2) 556 error = error2; 557 } 558 return (error); 559 } 560 561 #ifndef _SYS_SYSPROTO_H_ 562 struct gettimeofday_args { 563 struct timeval *tp; 564 struct timezone *tzp; 565 }; 566 #endif 567 /* ARGSUSED */ 568 int 569 sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap) 570 { 571 struct timeval atv; 572 struct timezone rtz; 573 int error = 0; 574 575 if (uap->tp) { 576 microtime(&atv); 577 error = copyout(&atv, uap->tp, sizeof (atv)); 578 } 579 if (error == 0 && uap->tzp != NULL) { 580 rtz.tz_minuteswest = tz_minuteswest; 581 rtz.tz_dsttime = tz_dsttime; 582 error = copyout(&rtz, uap->tzp, sizeof (rtz)); 583 } 584 return (error); 585 } 586 587 #ifndef _SYS_SYSPROTO_H_ 588 struct settimeofday_args { 589 struct timeval *tv; 590 struct timezone *tzp; 591 }; 592 #endif 593 /* ARGSUSED */ 594 int 595 sys_settimeofday(struct thread *td, struct settimeofday_args *uap) 596 { 597 struct timeval atv, *tvp; 598 struct timezone atz, *tzp; 599 int error; 600 601 if (uap->tv) { 602 error = copyin(uap->tv, &atv, sizeof(atv)); 603 if (error) 604 return (error); 605 tvp = &atv; 606 } else 607 tvp = NULL; 608 if (uap->tzp) { 609 error = copyin(uap->tzp, &atz, sizeof(atz)); 610 if (error) 611 return (error); 612 tzp = &atz; 613 } else 614 tzp = NULL; 615 return (kern_settimeofday(td, tvp, tzp)); 616 } 617 618 int 619 kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp) 620 { 621 int error; 622 623 error = priv_check(td, PRIV_SETTIMEOFDAY); 624 if (error) 625 return (error); 626 /* Verify all parameters before changing time. */ 627 if (tv) { 628 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000 || 629 tv->tv_sec < 0) 630 return (EINVAL); 631 error = settime(td, tv); 632 } 633 if (tzp && error == 0) { 634 tz_minuteswest = tzp->tz_minuteswest; 635 tz_dsttime = tzp->tz_dsttime; 636 } 637 return (error); 638 } 639 640 /* 641 * Get value of an interval timer. The process virtual and profiling virtual 642 * time timers are kept in the p_stats area, since they can be swapped out. 643 * These are kept internally in the way they are specified externally: in 644 * time until they expire. 645 * 646 * The real time interval timer is kept in the process table slot for the 647 * process, and its value (it_value) is kept as an absolute time rather than 648 * as a delta, so that it is easy to keep periodic real-time signals from 649 * drifting. 650 * 651 * Virtual time timers are processed in the hardclock() routine of 652 * kern_clock.c. The real time timer is processed by a timeout routine, 653 * called from the softclock() routine. Since a callout may be delayed in 654 * real time due to interrupt processing in the system, it is possible for 655 * the real time timeout routine (realitexpire, given below), to be delayed 656 * in real time past when it is supposed to occur. It does not suffice, 657 * therefore, to reload the real timer .it_value from the real time timers 658 * .it_interval. Rather, we compute the next time in absolute time the timer 659 * should go off. 660 */ 661 #ifndef _SYS_SYSPROTO_H_ 662 struct getitimer_args { 663 u_int which; 664 struct itimerval *itv; 665 }; 666 #endif 667 int 668 sys_getitimer(struct thread *td, struct getitimer_args *uap) 669 { 670 struct itimerval aitv; 671 int error; 672 673 error = kern_getitimer(td, uap->which, &aitv); 674 if (error != 0) 675 return (error); 676 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 677 } 678 679 int 680 kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv) 681 { 682 struct proc *p = td->td_proc; 683 struct timeval ctv; 684 685 if (which > ITIMER_PROF) 686 return (EINVAL); 687 688 if (which == ITIMER_REAL) { 689 /* 690 * Convert from absolute to relative time in .it_value 691 * part of real time timer. If time for real time timer 692 * has passed return 0, else return difference between 693 * current time and time for the timer to go off. 694 */ 695 PROC_LOCK(p); 696 *aitv = p->p_realtimer; 697 PROC_UNLOCK(p); 698 if (timevalisset(&aitv->it_value)) { 699 microuptime(&ctv); 700 if (timevalcmp(&aitv->it_value, &ctv, <)) 701 timevalclear(&aitv->it_value); 702 else 703 timevalsub(&aitv->it_value, &ctv); 704 } 705 } else { 706 PROC_ITIMLOCK(p); 707 *aitv = p->p_stats->p_timer[which]; 708 PROC_ITIMUNLOCK(p); 709 } 710 #ifdef KTRACE 711 if (KTRPOINT(td, KTR_STRUCT)) 712 ktritimerval(aitv); 713 #endif 714 return (0); 715 } 716 717 #ifndef _SYS_SYSPROTO_H_ 718 struct setitimer_args { 719 u_int which; 720 struct itimerval *itv, *oitv; 721 }; 722 #endif 723 int 724 sys_setitimer(struct thread *td, struct setitimer_args *uap) 725 { 726 struct itimerval aitv, oitv; 727 int error; 728 729 if (uap->itv == NULL) { 730 uap->itv = uap->oitv; 731 return (sys_getitimer(td, (struct getitimer_args *)uap)); 732 } 733 734 if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval)))) 735 return (error); 736 error = kern_setitimer(td, uap->which, &aitv, &oitv); 737 if (error != 0 || uap->oitv == NULL) 738 return (error); 739 return (copyout(&oitv, uap->oitv, sizeof(struct itimerval))); 740 } 741 742 int 743 kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv, 744 struct itimerval *oitv) 745 { 746 struct proc *p = td->td_proc; 747 struct timeval ctv; 748 sbintime_t sbt, pr; 749 750 if (aitv == NULL) 751 return (kern_getitimer(td, which, oitv)); 752 753 if (which > ITIMER_PROF) 754 return (EINVAL); 755 #ifdef KTRACE 756 if (KTRPOINT(td, KTR_STRUCT)) 757 ktritimerval(aitv); 758 #endif 759 if (itimerfix(&aitv->it_value) || 760 aitv->it_value.tv_sec > INT32_MAX / 2) 761 return (EINVAL); 762 if (!timevalisset(&aitv->it_value)) 763 timevalclear(&aitv->it_interval); 764 else if (itimerfix(&aitv->it_interval) || 765 aitv->it_interval.tv_sec > INT32_MAX / 2) 766 return (EINVAL); 767 768 if (which == ITIMER_REAL) { 769 PROC_LOCK(p); 770 if (timevalisset(&p->p_realtimer.it_value)) 771 callout_stop(&p->p_itcallout); 772 microuptime(&ctv); 773 if (timevalisset(&aitv->it_value)) { 774 pr = tvtosbt(aitv->it_value) >> tc_precexp; 775 timevaladd(&aitv->it_value, &ctv); 776 sbt = tvtosbt(aitv->it_value); 777 callout_reset_sbt(&p->p_itcallout, sbt, pr, 778 realitexpire, p, C_ABSOLUTE); 779 } 780 *oitv = p->p_realtimer; 781 p->p_realtimer = *aitv; 782 PROC_UNLOCK(p); 783 if (timevalisset(&oitv->it_value)) { 784 if (timevalcmp(&oitv->it_value, &ctv, <)) 785 timevalclear(&oitv->it_value); 786 else 787 timevalsub(&oitv->it_value, &ctv); 788 } 789 } else { 790 if (aitv->it_interval.tv_sec == 0 && 791 aitv->it_interval.tv_usec != 0 && 792 aitv->it_interval.tv_usec < tick) 793 aitv->it_interval.tv_usec = tick; 794 if (aitv->it_value.tv_sec == 0 && 795 aitv->it_value.tv_usec != 0 && 796 aitv->it_value.tv_usec < tick) 797 aitv->it_value.tv_usec = tick; 798 PROC_ITIMLOCK(p); 799 *oitv = p->p_stats->p_timer[which]; 800 p->p_stats->p_timer[which] = *aitv; 801 PROC_ITIMUNLOCK(p); 802 } 803 #ifdef KTRACE 804 if (KTRPOINT(td, KTR_STRUCT)) 805 ktritimerval(oitv); 806 #endif 807 return (0); 808 } 809 810 /* 811 * Real interval timer expired: 812 * send process whose timer expired an alarm signal. 813 * If time is not set up to reload, then just return. 814 * Else compute next time timer should go off which is > current time. 815 * This is where delay in processing this timeout causes multiple 816 * SIGALRM calls to be compressed into one. 817 * tvtohz() always adds 1 to allow for the time until the next clock 818 * interrupt being strictly less than 1 clock tick, but we don't want 819 * that here since we want to appear to be in sync with the clock 820 * interrupt even when we're delayed. 821 */ 822 void 823 realitexpire(void *arg) 824 { 825 struct proc *p; 826 struct timeval ctv; 827 sbintime_t isbt; 828 829 p = (struct proc *)arg; 830 kern_psignal(p, SIGALRM); 831 if (!timevalisset(&p->p_realtimer.it_interval)) { 832 timevalclear(&p->p_realtimer.it_value); 833 if (p->p_flag & P_WEXIT) 834 wakeup(&p->p_itcallout); 835 return; 836 } 837 isbt = tvtosbt(p->p_realtimer.it_interval); 838 if (isbt >= sbt_timethreshold) 839 getmicrouptime(&ctv); 840 else 841 microuptime(&ctv); 842 do { 843 timevaladd(&p->p_realtimer.it_value, 844 &p->p_realtimer.it_interval); 845 } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=)); 846 callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value), 847 isbt >> tc_precexp, realitexpire, p, C_ABSOLUTE); 848 } 849 850 /* 851 * Check that a proposed value to load into the .it_value or 852 * .it_interval part of an interval timer is acceptable, and 853 * fix it to have at least minimal value (i.e. if it is less 854 * than the resolution of the clock, round it up.) 855 */ 856 int 857 itimerfix(struct timeval *tv) 858 { 859 860 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 861 return (EINVAL); 862 if (tv->tv_sec == 0 && tv->tv_usec != 0 && 863 tv->tv_usec < (u_int)tick / 16) 864 tv->tv_usec = (u_int)tick / 16; 865 return (0); 866 } 867 868 /* 869 * Decrement an interval timer by a specified number 870 * of microseconds, which must be less than a second, 871 * i.e. < 1000000. If the timer expires, then reload 872 * it. In this case, carry over (usec - old value) to 873 * reduce the value reloaded into the timer so that 874 * the timer does not drift. This routine assumes 875 * that it is called in a context where the timers 876 * on which it is operating cannot change in value. 877 */ 878 int 879 itimerdecr(struct itimerval *itp, int usec) 880 { 881 882 if (itp->it_value.tv_usec < usec) { 883 if (itp->it_value.tv_sec == 0) { 884 /* expired, and already in next interval */ 885 usec -= itp->it_value.tv_usec; 886 goto expire; 887 } 888 itp->it_value.tv_usec += 1000000; 889 itp->it_value.tv_sec--; 890 } 891 itp->it_value.tv_usec -= usec; 892 usec = 0; 893 if (timevalisset(&itp->it_value)) 894 return (1); 895 /* expired, exactly at end of interval */ 896 expire: 897 if (timevalisset(&itp->it_interval)) { 898 itp->it_value = itp->it_interval; 899 itp->it_value.tv_usec -= usec; 900 if (itp->it_value.tv_usec < 0) { 901 itp->it_value.tv_usec += 1000000; 902 itp->it_value.tv_sec--; 903 } 904 } else 905 itp->it_value.tv_usec = 0; /* sec is already 0 */ 906 return (0); 907 } 908 909 /* 910 * Add and subtract routines for timevals. 911 * N.B.: subtract routine doesn't deal with 912 * results which are before the beginning, 913 * it just gets very confused in this case. 914 * Caveat emptor. 915 */ 916 void 917 timevaladd(struct timeval *t1, const struct timeval *t2) 918 { 919 920 t1->tv_sec += t2->tv_sec; 921 t1->tv_usec += t2->tv_usec; 922 timevalfix(t1); 923 } 924 925 void 926 timevalsub(struct timeval *t1, const struct timeval *t2) 927 { 928 929 t1->tv_sec -= t2->tv_sec; 930 t1->tv_usec -= t2->tv_usec; 931 timevalfix(t1); 932 } 933 934 static void 935 timevalfix(struct timeval *t1) 936 { 937 938 if (t1->tv_usec < 0) { 939 t1->tv_sec--; 940 t1->tv_usec += 1000000; 941 } 942 if (t1->tv_usec >= 1000000) { 943 t1->tv_sec++; 944 t1->tv_usec -= 1000000; 945 } 946 } 947 948 /* 949 * ratecheck(): simple time-based rate-limit checking. 950 */ 951 int 952 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 953 { 954 struct timeval tv, delta; 955 int rv = 0; 956 957 getmicrouptime(&tv); /* NB: 10ms precision */ 958 delta = tv; 959 timevalsub(&delta, lasttime); 960 961 /* 962 * check for 0,0 is so that the message will be seen at least once, 963 * even if interval is huge. 964 */ 965 if (timevalcmp(&delta, mininterval, >=) || 966 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 967 *lasttime = tv; 968 rv = 1; 969 } 970 971 return (rv); 972 } 973 974 /* 975 * ppsratecheck(): packets (or events) per second limitation. 976 * 977 * Return 0 if the limit is to be enforced (e.g. the caller 978 * should drop a packet because of the rate limitation). 979 * 980 * maxpps of 0 always causes zero to be returned. maxpps of -1 981 * always causes 1 to be returned; this effectively defeats rate 982 * limiting. 983 * 984 * Note that we maintain the struct timeval for compatibility 985 * with other bsd systems. We reuse the storage and just monitor 986 * clock ticks for minimal overhead. 987 */ 988 int 989 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 990 { 991 int now; 992 993 /* 994 * Reset the last time and counter if this is the first call 995 * or more than a second has passed since the last update of 996 * lasttime. 997 */ 998 now = ticks; 999 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 1000 lasttime->tv_sec = now; 1001 *curpps = 1; 1002 return (maxpps != 0); 1003 } else { 1004 (*curpps)++; /* NB: ignore potential overflow */ 1005 return (maxpps < 0 || *curpps <= maxpps); 1006 } 1007 } 1008 1009 static void 1010 itimer_start(void) 1011 { 1012 struct kclock rt_clock = { 1013 .timer_create = realtimer_create, 1014 .timer_delete = realtimer_delete, 1015 .timer_settime = realtimer_settime, 1016 .timer_gettime = realtimer_gettime, 1017 .event_hook = NULL 1018 }; 1019 1020 itimer_zone = uma_zcreate("itimer", sizeof(struct itimer), 1021 NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0); 1022 register_posix_clock(CLOCK_REALTIME, &rt_clock); 1023 register_posix_clock(CLOCK_MONOTONIC, &rt_clock); 1024 p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L); 1025 p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX); 1026 p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX); 1027 EVENTHANDLER_REGISTER(process_exit, itimers_event_hook_exit, 1028 (void *)ITIMER_EV_EXIT, EVENTHANDLER_PRI_ANY); 1029 EVENTHANDLER_REGISTER(process_exec, itimers_event_hook_exec, 1030 (void *)ITIMER_EV_EXEC, EVENTHANDLER_PRI_ANY); 1031 } 1032 1033 int 1034 register_posix_clock(int clockid, struct kclock *clk) 1035 { 1036 if ((unsigned)clockid >= MAX_CLOCKS) { 1037 printf("%s: invalid clockid\n", __func__); 1038 return (0); 1039 } 1040 posix_clocks[clockid] = *clk; 1041 return (1); 1042 } 1043 1044 static int 1045 itimer_init(void *mem, int size, int flags) 1046 { 1047 struct itimer *it; 1048 1049 it = (struct itimer *)mem; 1050 mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF); 1051 return (0); 1052 } 1053 1054 static void 1055 itimer_fini(void *mem, int size) 1056 { 1057 struct itimer *it; 1058 1059 it = (struct itimer *)mem; 1060 mtx_destroy(&it->it_mtx); 1061 } 1062 1063 static void 1064 itimer_enter(struct itimer *it) 1065 { 1066 1067 mtx_assert(&it->it_mtx, MA_OWNED); 1068 it->it_usecount++; 1069 } 1070 1071 static void 1072 itimer_leave(struct itimer *it) 1073 { 1074 1075 mtx_assert(&it->it_mtx, MA_OWNED); 1076 KASSERT(it->it_usecount > 0, ("invalid it_usecount")); 1077 1078 if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0) 1079 wakeup(it); 1080 } 1081 1082 #ifndef _SYS_SYSPROTO_H_ 1083 struct ktimer_create_args { 1084 clockid_t clock_id; 1085 struct sigevent * evp; 1086 int * timerid; 1087 }; 1088 #endif 1089 int 1090 sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap) 1091 { 1092 struct sigevent *evp, ev; 1093 int id; 1094 int error; 1095 1096 if (uap->evp == NULL) { 1097 evp = NULL; 1098 } else { 1099 error = copyin(uap->evp, &ev, sizeof(ev)); 1100 if (error != 0) 1101 return (error); 1102 evp = &ev; 1103 } 1104 error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1); 1105 if (error == 0) { 1106 error = copyout(&id, uap->timerid, sizeof(int)); 1107 if (error != 0) 1108 kern_ktimer_delete(td, id); 1109 } 1110 return (error); 1111 } 1112 1113 int 1114 kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, 1115 int *timerid, int preset_id) 1116 { 1117 struct proc *p = td->td_proc; 1118 struct itimer *it; 1119 int id; 1120 int error; 1121 1122 if (clock_id < 0 || clock_id >= MAX_CLOCKS) 1123 return (EINVAL); 1124 1125 if (posix_clocks[clock_id].timer_create == NULL) 1126 return (EINVAL); 1127 1128 if (evp != NULL) { 1129 if (evp->sigev_notify != SIGEV_NONE && 1130 evp->sigev_notify != SIGEV_SIGNAL && 1131 evp->sigev_notify != SIGEV_THREAD_ID) 1132 return (EINVAL); 1133 if ((evp->sigev_notify == SIGEV_SIGNAL || 1134 evp->sigev_notify == SIGEV_THREAD_ID) && 1135 !_SIG_VALID(evp->sigev_signo)) 1136 return (EINVAL); 1137 } 1138 1139 if (p->p_itimers == NULL) 1140 itimers_alloc(p); 1141 1142 it = uma_zalloc(itimer_zone, M_WAITOK); 1143 it->it_flags = 0; 1144 it->it_usecount = 0; 1145 it->it_active = 0; 1146 timespecclear(&it->it_time.it_value); 1147 timespecclear(&it->it_time.it_interval); 1148 it->it_overrun = 0; 1149 it->it_overrun_last = 0; 1150 it->it_clockid = clock_id; 1151 it->it_timerid = -1; 1152 it->it_proc = p; 1153 ksiginfo_init(&it->it_ksi); 1154 it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT; 1155 error = CLOCK_CALL(clock_id, timer_create, (it)); 1156 if (error != 0) 1157 goto out; 1158 1159 PROC_LOCK(p); 1160 if (preset_id != -1) { 1161 KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id")); 1162 id = preset_id; 1163 if (p->p_itimers->its_timers[id] != NULL) { 1164 PROC_UNLOCK(p); 1165 error = 0; 1166 goto out; 1167 } 1168 } else { 1169 /* 1170 * Find a free timer slot, skipping those reserved 1171 * for setitimer(). 1172 */ 1173 for (id = 3; id < TIMER_MAX; id++) 1174 if (p->p_itimers->its_timers[id] == NULL) 1175 break; 1176 if (id == TIMER_MAX) { 1177 PROC_UNLOCK(p); 1178 error = EAGAIN; 1179 goto out; 1180 } 1181 } 1182 it->it_timerid = id; 1183 p->p_itimers->its_timers[id] = it; 1184 if (evp != NULL) 1185 it->it_sigev = *evp; 1186 else { 1187 it->it_sigev.sigev_notify = SIGEV_SIGNAL; 1188 switch (clock_id) { 1189 default: 1190 case CLOCK_REALTIME: 1191 it->it_sigev.sigev_signo = SIGALRM; 1192 break; 1193 case CLOCK_VIRTUAL: 1194 it->it_sigev.sigev_signo = SIGVTALRM; 1195 break; 1196 case CLOCK_PROF: 1197 it->it_sigev.sigev_signo = SIGPROF; 1198 break; 1199 } 1200 it->it_sigev.sigev_value.sival_int = id; 1201 } 1202 1203 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1204 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1205 it->it_ksi.ksi_signo = it->it_sigev.sigev_signo; 1206 it->it_ksi.ksi_code = SI_TIMER; 1207 it->it_ksi.ksi_value = it->it_sigev.sigev_value; 1208 it->it_ksi.ksi_timerid = id; 1209 } 1210 PROC_UNLOCK(p); 1211 *timerid = id; 1212 return (0); 1213 1214 out: 1215 ITIMER_LOCK(it); 1216 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1217 ITIMER_UNLOCK(it); 1218 uma_zfree(itimer_zone, it); 1219 return (error); 1220 } 1221 1222 #ifndef _SYS_SYSPROTO_H_ 1223 struct ktimer_delete_args { 1224 int timerid; 1225 }; 1226 #endif 1227 int 1228 sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap) 1229 { 1230 1231 return (kern_ktimer_delete(td, uap->timerid)); 1232 } 1233 1234 static struct itimer * 1235 itimer_find(struct proc *p, int timerid) 1236 { 1237 struct itimer *it; 1238 1239 PROC_LOCK_ASSERT(p, MA_OWNED); 1240 if ((p->p_itimers == NULL) || 1241 (timerid < 0) || (timerid >= TIMER_MAX) || 1242 (it = p->p_itimers->its_timers[timerid]) == NULL) { 1243 return (NULL); 1244 } 1245 ITIMER_LOCK(it); 1246 if ((it->it_flags & ITF_DELETING) != 0) { 1247 ITIMER_UNLOCK(it); 1248 it = NULL; 1249 } 1250 return (it); 1251 } 1252 1253 int 1254 kern_ktimer_delete(struct thread *td, int timerid) 1255 { 1256 struct proc *p = td->td_proc; 1257 struct itimer *it; 1258 1259 PROC_LOCK(p); 1260 it = itimer_find(p, timerid); 1261 if (it == NULL) { 1262 PROC_UNLOCK(p); 1263 return (EINVAL); 1264 } 1265 PROC_UNLOCK(p); 1266 1267 it->it_flags |= ITF_DELETING; 1268 while (it->it_usecount > 0) { 1269 it->it_flags |= ITF_WANTED; 1270 msleep(it, &it->it_mtx, PPAUSE, "itimer", 0); 1271 } 1272 it->it_flags &= ~ITF_WANTED; 1273 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1274 ITIMER_UNLOCK(it); 1275 1276 PROC_LOCK(p); 1277 if (KSI_ONQ(&it->it_ksi)) 1278 sigqueue_take(&it->it_ksi); 1279 p->p_itimers->its_timers[timerid] = NULL; 1280 PROC_UNLOCK(p); 1281 uma_zfree(itimer_zone, it); 1282 return (0); 1283 } 1284 1285 #ifndef _SYS_SYSPROTO_H_ 1286 struct ktimer_settime_args { 1287 int timerid; 1288 int flags; 1289 const struct itimerspec * value; 1290 struct itimerspec * ovalue; 1291 }; 1292 #endif 1293 int 1294 sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap) 1295 { 1296 struct itimerspec val, oval, *ovalp; 1297 int error; 1298 1299 error = copyin(uap->value, &val, sizeof(val)); 1300 if (error != 0) 1301 return (error); 1302 ovalp = uap->ovalue != NULL ? &oval : NULL; 1303 error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp); 1304 if (error == 0 && uap->ovalue != NULL) 1305 error = copyout(ovalp, uap->ovalue, sizeof(*ovalp)); 1306 return (error); 1307 } 1308 1309 int 1310 kern_ktimer_settime(struct thread *td, int timer_id, int flags, 1311 struct itimerspec *val, struct itimerspec *oval) 1312 { 1313 struct proc *p; 1314 struct itimer *it; 1315 int error; 1316 1317 p = td->td_proc; 1318 PROC_LOCK(p); 1319 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { 1320 PROC_UNLOCK(p); 1321 error = EINVAL; 1322 } else { 1323 PROC_UNLOCK(p); 1324 itimer_enter(it); 1325 error = CLOCK_CALL(it->it_clockid, timer_settime, (it, 1326 flags, val, oval)); 1327 itimer_leave(it); 1328 ITIMER_UNLOCK(it); 1329 } 1330 return (error); 1331 } 1332 1333 #ifndef _SYS_SYSPROTO_H_ 1334 struct ktimer_gettime_args { 1335 int timerid; 1336 struct itimerspec * value; 1337 }; 1338 #endif 1339 int 1340 sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap) 1341 { 1342 struct itimerspec val; 1343 int error; 1344 1345 error = kern_ktimer_gettime(td, uap->timerid, &val); 1346 if (error == 0) 1347 error = copyout(&val, uap->value, sizeof(val)); 1348 return (error); 1349 } 1350 1351 int 1352 kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val) 1353 { 1354 struct proc *p; 1355 struct itimer *it; 1356 int error; 1357 1358 p = td->td_proc; 1359 PROC_LOCK(p); 1360 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { 1361 PROC_UNLOCK(p); 1362 error = EINVAL; 1363 } else { 1364 PROC_UNLOCK(p); 1365 itimer_enter(it); 1366 error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, val)); 1367 itimer_leave(it); 1368 ITIMER_UNLOCK(it); 1369 } 1370 return (error); 1371 } 1372 1373 #ifndef _SYS_SYSPROTO_H_ 1374 struct timer_getoverrun_args { 1375 int timerid; 1376 }; 1377 #endif 1378 int 1379 sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap) 1380 { 1381 1382 return (kern_ktimer_getoverrun(td, uap->timerid)); 1383 } 1384 1385 int 1386 kern_ktimer_getoverrun(struct thread *td, int timer_id) 1387 { 1388 struct proc *p = td->td_proc; 1389 struct itimer *it; 1390 int error ; 1391 1392 PROC_LOCK(p); 1393 if (timer_id < 3 || 1394 (it = itimer_find(p, timer_id)) == NULL) { 1395 PROC_UNLOCK(p); 1396 error = EINVAL; 1397 } else { 1398 td->td_retval[0] = it->it_overrun_last; 1399 ITIMER_UNLOCK(it); 1400 PROC_UNLOCK(p); 1401 error = 0; 1402 } 1403 return (error); 1404 } 1405 1406 static int 1407 realtimer_create(struct itimer *it) 1408 { 1409 callout_init_mtx(&it->it_callout, &it->it_mtx, 0); 1410 return (0); 1411 } 1412 1413 static int 1414 realtimer_delete(struct itimer *it) 1415 { 1416 mtx_assert(&it->it_mtx, MA_OWNED); 1417 1418 /* 1419 * clear timer's value and interval to tell realtimer_expire 1420 * to not rearm the timer. 1421 */ 1422 timespecclear(&it->it_time.it_value); 1423 timespecclear(&it->it_time.it_interval); 1424 ITIMER_UNLOCK(it); 1425 callout_drain(&it->it_callout); 1426 ITIMER_LOCK(it); 1427 return (0); 1428 } 1429 1430 static int 1431 realtimer_gettime(struct itimer *it, struct itimerspec *ovalue) 1432 { 1433 struct timespec cts; 1434 1435 mtx_assert(&it->it_mtx, MA_OWNED); 1436 1437 realtimer_clocktime(it->it_clockid, &cts); 1438 *ovalue = it->it_time; 1439 if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) { 1440 timespecsub(&ovalue->it_value, &cts); 1441 if (ovalue->it_value.tv_sec < 0 || 1442 (ovalue->it_value.tv_sec == 0 && 1443 ovalue->it_value.tv_nsec == 0)) { 1444 ovalue->it_value.tv_sec = 0; 1445 ovalue->it_value.tv_nsec = 1; 1446 } 1447 } 1448 return (0); 1449 } 1450 1451 static int 1452 realtimer_settime(struct itimer *it, int flags, 1453 struct itimerspec *value, struct itimerspec *ovalue) 1454 { 1455 struct timespec cts, ts; 1456 struct timeval tv; 1457 struct itimerspec val; 1458 1459 mtx_assert(&it->it_mtx, MA_OWNED); 1460 1461 val = *value; 1462 if (itimespecfix(&val.it_value)) 1463 return (EINVAL); 1464 1465 if (timespecisset(&val.it_value)) { 1466 if (itimespecfix(&val.it_interval)) 1467 return (EINVAL); 1468 } else { 1469 timespecclear(&val.it_interval); 1470 } 1471 1472 if (ovalue != NULL) 1473 realtimer_gettime(it, ovalue); 1474 1475 it->it_time = val; 1476 if (timespecisset(&val.it_value)) { 1477 realtimer_clocktime(it->it_clockid, &cts); 1478 ts = val.it_value; 1479 if ((flags & TIMER_ABSTIME) == 0) { 1480 /* Convert to absolute time. */ 1481 timespecadd(&it->it_time.it_value, &cts); 1482 } else { 1483 timespecsub(&ts, &cts); 1484 /* 1485 * We don't care if ts is negative, tztohz will 1486 * fix it. 1487 */ 1488 } 1489 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1490 callout_reset(&it->it_callout, tvtohz(&tv), 1491 realtimer_expire, it); 1492 } else { 1493 callout_stop(&it->it_callout); 1494 } 1495 1496 return (0); 1497 } 1498 1499 static void 1500 realtimer_clocktime(clockid_t id, struct timespec *ts) 1501 { 1502 if (id == CLOCK_REALTIME) 1503 getnanotime(ts); 1504 else /* CLOCK_MONOTONIC */ 1505 getnanouptime(ts); 1506 } 1507 1508 int 1509 itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi) 1510 { 1511 struct itimer *it; 1512 1513 PROC_LOCK_ASSERT(p, MA_OWNED); 1514 it = itimer_find(p, timerid); 1515 if (it != NULL) { 1516 ksi->ksi_overrun = it->it_overrun; 1517 it->it_overrun_last = it->it_overrun; 1518 it->it_overrun = 0; 1519 ITIMER_UNLOCK(it); 1520 return (0); 1521 } 1522 return (EINVAL); 1523 } 1524 1525 int 1526 itimespecfix(struct timespec *ts) 1527 { 1528 1529 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 1530 return (EINVAL); 1531 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000) 1532 ts->tv_nsec = tick * 1000; 1533 return (0); 1534 } 1535 1536 /* Timeout callback for realtime timer */ 1537 static void 1538 realtimer_expire(void *arg) 1539 { 1540 struct timespec cts, ts; 1541 struct timeval tv; 1542 struct itimer *it; 1543 1544 it = (struct itimer *)arg; 1545 1546 realtimer_clocktime(it->it_clockid, &cts); 1547 /* Only fire if time is reached. */ 1548 if (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1549 if (timespecisset(&it->it_time.it_interval)) { 1550 timespecadd(&it->it_time.it_value, 1551 &it->it_time.it_interval); 1552 while (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1553 if (it->it_overrun < INT_MAX) 1554 it->it_overrun++; 1555 else 1556 it->it_ksi.ksi_errno = ERANGE; 1557 timespecadd(&it->it_time.it_value, 1558 &it->it_time.it_interval); 1559 } 1560 } else { 1561 /* single shot timer ? */ 1562 timespecclear(&it->it_time.it_value); 1563 } 1564 if (timespecisset(&it->it_time.it_value)) { 1565 ts = it->it_time.it_value; 1566 timespecsub(&ts, &cts); 1567 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1568 callout_reset(&it->it_callout, tvtohz(&tv), 1569 realtimer_expire, it); 1570 } 1571 itimer_enter(it); 1572 ITIMER_UNLOCK(it); 1573 itimer_fire(it); 1574 ITIMER_LOCK(it); 1575 itimer_leave(it); 1576 } else if (timespecisset(&it->it_time.it_value)) { 1577 ts = it->it_time.it_value; 1578 timespecsub(&ts, &cts); 1579 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1580 callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, 1581 it); 1582 } 1583 } 1584 1585 void 1586 itimer_fire(struct itimer *it) 1587 { 1588 struct proc *p = it->it_proc; 1589 struct thread *td; 1590 1591 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1592 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1593 if (sigev_findtd(p, &it->it_sigev, &td) != 0) { 1594 ITIMER_LOCK(it); 1595 timespecclear(&it->it_time.it_value); 1596 timespecclear(&it->it_time.it_interval); 1597 callout_stop(&it->it_callout); 1598 ITIMER_UNLOCK(it); 1599 return; 1600 } 1601 if (!KSI_ONQ(&it->it_ksi)) { 1602 it->it_ksi.ksi_errno = 0; 1603 ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev); 1604 tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi); 1605 } else { 1606 if (it->it_overrun < INT_MAX) 1607 it->it_overrun++; 1608 else 1609 it->it_ksi.ksi_errno = ERANGE; 1610 } 1611 PROC_UNLOCK(p); 1612 } 1613 } 1614 1615 static void 1616 itimers_alloc(struct proc *p) 1617 { 1618 struct itimers *its; 1619 int i; 1620 1621 its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO); 1622 LIST_INIT(&its->its_virtual); 1623 LIST_INIT(&its->its_prof); 1624 TAILQ_INIT(&its->its_worklist); 1625 for (i = 0; i < TIMER_MAX; i++) 1626 its->its_timers[i] = NULL; 1627 PROC_LOCK(p); 1628 if (p->p_itimers == NULL) { 1629 p->p_itimers = its; 1630 PROC_UNLOCK(p); 1631 } 1632 else { 1633 PROC_UNLOCK(p); 1634 free(its, M_SUBPROC); 1635 } 1636 } 1637 1638 static void 1639 itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp __unused) 1640 { 1641 itimers_event_hook_exit(arg, p); 1642 } 1643 1644 /* Clean up timers when some process events are being triggered. */ 1645 static void 1646 itimers_event_hook_exit(void *arg, struct proc *p) 1647 { 1648 struct itimers *its; 1649 struct itimer *it; 1650 int event = (int)(intptr_t)arg; 1651 int i; 1652 1653 if (p->p_itimers != NULL) { 1654 its = p->p_itimers; 1655 for (i = 0; i < MAX_CLOCKS; ++i) { 1656 if (posix_clocks[i].event_hook != NULL) 1657 CLOCK_CALL(i, event_hook, (p, i, event)); 1658 } 1659 /* 1660 * According to susv3, XSI interval timers should be inherited 1661 * by new image. 1662 */ 1663 if (event == ITIMER_EV_EXEC) 1664 i = 3; 1665 else if (event == ITIMER_EV_EXIT) 1666 i = 0; 1667 else 1668 panic("unhandled event"); 1669 for (; i < TIMER_MAX; ++i) { 1670 if ((it = its->its_timers[i]) != NULL) 1671 kern_ktimer_delete(curthread, i); 1672 } 1673 if (its->its_timers[0] == NULL && 1674 its->its_timers[1] == NULL && 1675 its->its_timers[2] == NULL) { 1676 free(its, M_SUBPROC); 1677 p->p_itimers = NULL; 1678 } 1679 } 1680 } 1681