1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/limits.h> 38 #include <sys/clock.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/sysproto.h> 42 #include <sys/eventhandler.h> 43 #include <sys/resourcevar.h> 44 #include <sys/signalvar.h> 45 #include <sys/kernel.h> 46 #include <sys/sleepqueue.h> 47 #include <sys/syscallsubr.h> 48 #include <sys/sysctl.h> 49 #include <sys/sysent.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/posix4.h> 53 #include <sys/time.h> 54 #include <sys/timers.h> 55 #include <sys/timetc.h> 56 #include <sys/vnode.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_extern.h> 60 61 #define MAX_CLOCKS (CLOCK_MONOTONIC+1) 62 #define CPUCLOCK_BIT 0x80000000 63 #define CPUCLOCK_PROCESS_BIT 0x40000000 64 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT)) 65 #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid)) 66 #define MAKE_PROCESS_CPUCLOCK(pid) \ 67 (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid)) 68 69 static struct kclock posix_clocks[MAX_CLOCKS]; 70 static uma_zone_t itimer_zone = NULL; 71 72 /* 73 * Time of day and interval timer support. 74 * 75 * These routines provide the kernel entry points to get and set 76 * the time-of-day and per-process interval timers. Subroutines 77 * here provide support for adding and subtracting timeval structures 78 * and decrementing interval timers, optionally reloading the interval 79 * timers when they expire. 80 */ 81 82 static int settime(struct thread *, struct timeval *); 83 static void timevalfix(struct timeval *); 84 85 static void itimer_start(void); 86 static int itimer_init(void *, int, int); 87 static void itimer_fini(void *, int); 88 static void itimer_enter(struct itimer *); 89 static void itimer_leave(struct itimer *); 90 static struct itimer *itimer_find(struct proc *, int); 91 static void itimers_alloc(struct proc *); 92 static void itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp); 93 static void itimers_event_hook_exit(void *arg, struct proc *p); 94 static int realtimer_create(struct itimer *); 95 static int realtimer_gettime(struct itimer *, struct itimerspec *); 96 static int realtimer_settime(struct itimer *, int, 97 struct itimerspec *, struct itimerspec *); 98 static int realtimer_delete(struct itimer *); 99 static void realtimer_clocktime(clockid_t, struct timespec *); 100 static void realtimer_expire(void *); 101 102 int register_posix_clock(int, struct kclock *); 103 void itimer_fire(struct itimer *it); 104 int itimespecfix(struct timespec *ts); 105 106 #define CLOCK_CALL(clock, call, arglist) \ 107 ((*posix_clocks[clock].call) arglist) 108 109 SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL); 110 111 112 static int 113 settime(struct thread *td, struct timeval *tv) 114 { 115 struct timeval delta, tv1, tv2; 116 static struct timeval maxtime, laststep; 117 struct timespec ts; 118 int s; 119 120 s = splclock(); 121 microtime(&tv1); 122 delta = *tv; 123 timevalsub(&delta, &tv1); 124 125 /* 126 * If the system is secure, we do not allow the time to be 127 * set to a value earlier than 1 second less than the highest 128 * time we have yet seen. The worst a miscreant can do in 129 * this circumstance is "freeze" time. He couldn't go 130 * back to the past. 131 * 132 * We similarly do not allow the clock to be stepped more 133 * than one second, nor more than once per second. This allows 134 * a miscreant to make the clock march double-time, but no worse. 135 */ 136 if (securelevel_gt(td->td_ucred, 1) != 0) { 137 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 138 /* 139 * Update maxtime to latest time we've seen. 140 */ 141 if (tv1.tv_sec > maxtime.tv_sec) 142 maxtime = tv1; 143 tv2 = *tv; 144 timevalsub(&tv2, &maxtime); 145 if (tv2.tv_sec < -1) { 146 tv->tv_sec = maxtime.tv_sec - 1; 147 printf("Time adjustment clamped to -1 second\n"); 148 } 149 } else { 150 if (tv1.tv_sec == laststep.tv_sec) { 151 splx(s); 152 return (EPERM); 153 } 154 if (delta.tv_sec > 1) { 155 tv->tv_sec = tv1.tv_sec + 1; 156 printf("Time adjustment clamped to +1 second\n"); 157 } 158 laststep = *tv; 159 } 160 } 161 162 ts.tv_sec = tv->tv_sec; 163 ts.tv_nsec = tv->tv_usec * 1000; 164 mtx_lock(&Giant); 165 tc_setclock(&ts); 166 resettodr(); 167 mtx_unlock(&Giant); 168 return (0); 169 } 170 171 #ifndef _SYS_SYSPROTO_H_ 172 struct clock_getcpuclockid2_args { 173 id_t id; 174 int which, 175 clockid_t *clock_id; 176 }; 177 #endif 178 /* ARGSUSED */ 179 int 180 sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap) 181 { 182 clockid_t clk_id; 183 int error; 184 185 error = kern_clock_getcpuclockid2(td, uap->id, uap->which, &clk_id); 186 if (error == 0) 187 error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t)); 188 return (error); 189 } 190 191 int 192 kern_clock_getcpuclockid2(struct thread *td, id_t id, int which, 193 clockid_t *clk_id) 194 { 195 struct proc *p; 196 pid_t pid; 197 lwpid_t tid; 198 int error; 199 200 switch (which) { 201 case CPUCLOCK_WHICH_PID: 202 if (id != 0) { 203 error = pget(id, PGET_CANSEE | PGET_NOTID, &p); 204 if (error != 0) 205 return (error); 206 PROC_UNLOCK(p); 207 pid = id; 208 } else { 209 pid = td->td_proc->p_pid; 210 } 211 *clk_id = MAKE_PROCESS_CPUCLOCK(pid); 212 return (0); 213 case CPUCLOCK_WHICH_TID: 214 tid = id == 0 ? td->td_tid : id; 215 *clk_id = MAKE_THREAD_CPUCLOCK(tid); 216 return (0); 217 default: 218 return (EINVAL); 219 } 220 } 221 222 #ifndef _SYS_SYSPROTO_H_ 223 struct clock_gettime_args { 224 clockid_t clock_id; 225 struct timespec *tp; 226 }; 227 #endif 228 /* ARGSUSED */ 229 int 230 sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap) 231 { 232 struct timespec ats; 233 int error; 234 235 error = kern_clock_gettime(td, uap->clock_id, &ats); 236 if (error == 0) 237 error = copyout(&ats, uap->tp, sizeof(ats)); 238 239 return (error); 240 } 241 242 static inline void 243 cputick2timespec(uint64_t runtime, struct timespec *ats) 244 { 245 runtime = cputick2usec(runtime); 246 ats->tv_sec = runtime / 1000000; 247 ats->tv_nsec = runtime % 1000000 * 1000; 248 } 249 250 static void 251 get_thread_cputime(struct thread *targettd, struct timespec *ats) 252 { 253 uint64_t runtime, curtime, switchtime; 254 255 if (targettd == NULL) { /* current thread */ 256 critical_enter(); 257 switchtime = PCPU_GET(switchtime); 258 curtime = cpu_ticks(); 259 runtime = curthread->td_runtime; 260 critical_exit(); 261 runtime += curtime - switchtime; 262 } else { 263 thread_lock(targettd); 264 runtime = targettd->td_runtime; 265 thread_unlock(targettd); 266 } 267 cputick2timespec(runtime, ats); 268 } 269 270 static void 271 get_process_cputime(struct proc *targetp, struct timespec *ats) 272 { 273 uint64_t runtime; 274 struct rusage ru; 275 276 PROC_STATLOCK(targetp); 277 rufetch(targetp, &ru); 278 runtime = targetp->p_rux.rux_runtime; 279 PROC_STATUNLOCK(targetp); 280 cputick2timespec(runtime, ats); 281 } 282 283 static int 284 get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats) 285 { 286 struct proc *p, *p2; 287 struct thread *td2; 288 lwpid_t tid; 289 pid_t pid; 290 int error; 291 292 p = td->td_proc; 293 if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) { 294 tid = clock_id & CPUCLOCK_ID_MASK; 295 td2 = tdfind(tid, p->p_pid); 296 if (td2 == NULL) 297 return (EINVAL); 298 get_thread_cputime(td2, ats); 299 PROC_UNLOCK(td2->td_proc); 300 } else { 301 pid = clock_id & CPUCLOCK_ID_MASK; 302 error = pget(pid, PGET_CANSEE, &p2); 303 if (error != 0) 304 return (EINVAL); 305 get_process_cputime(p2, ats); 306 PROC_UNLOCK(p2); 307 } 308 return (0); 309 } 310 311 int 312 kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats) 313 { 314 struct timeval sys, user; 315 struct proc *p; 316 317 p = td->td_proc; 318 switch (clock_id) { 319 case CLOCK_REALTIME: /* Default to precise. */ 320 case CLOCK_REALTIME_PRECISE: 321 nanotime(ats); 322 break; 323 case CLOCK_REALTIME_FAST: 324 getnanotime(ats); 325 break; 326 case CLOCK_VIRTUAL: 327 PROC_LOCK(p); 328 PROC_STATLOCK(p); 329 calcru(p, &user, &sys); 330 PROC_STATUNLOCK(p); 331 PROC_UNLOCK(p); 332 TIMEVAL_TO_TIMESPEC(&user, ats); 333 break; 334 case CLOCK_PROF: 335 PROC_LOCK(p); 336 PROC_STATLOCK(p); 337 calcru(p, &user, &sys); 338 PROC_STATUNLOCK(p); 339 PROC_UNLOCK(p); 340 timevaladd(&user, &sys); 341 TIMEVAL_TO_TIMESPEC(&user, ats); 342 break; 343 case CLOCK_MONOTONIC: /* Default to precise. */ 344 case CLOCK_MONOTONIC_PRECISE: 345 case CLOCK_UPTIME: 346 case CLOCK_UPTIME_PRECISE: 347 nanouptime(ats); 348 break; 349 case CLOCK_UPTIME_FAST: 350 case CLOCK_MONOTONIC_FAST: 351 getnanouptime(ats); 352 break; 353 case CLOCK_SECOND: 354 ats->tv_sec = time_second; 355 ats->tv_nsec = 0; 356 break; 357 case CLOCK_THREAD_CPUTIME_ID: 358 get_thread_cputime(NULL, ats); 359 break; 360 case CLOCK_PROCESS_CPUTIME_ID: 361 PROC_LOCK(p); 362 get_process_cputime(p, ats); 363 PROC_UNLOCK(p); 364 break; 365 default: 366 if ((int)clock_id >= 0) 367 return (EINVAL); 368 return (get_cputime(td, clock_id, ats)); 369 } 370 return (0); 371 } 372 373 #ifndef _SYS_SYSPROTO_H_ 374 struct clock_settime_args { 375 clockid_t clock_id; 376 const struct timespec *tp; 377 }; 378 #endif 379 /* ARGSUSED */ 380 int 381 sys_clock_settime(struct thread *td, struct clock_settime_args *uap) 382 { 383 struct timespec ats; 384 int error; 385 386 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 387 return (error); 388 return (kern_clock_settime(td, uap->clock_id, &ats)); 389 } 390 391 int 392 kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats) 393 { 394 struct timeval atv; 395 int error; 396 397 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 398 return (error); 399 if (clock_id != CLOCK_REALTIME) 400 return (EINVAL); 401 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000) 402 return (EINVAL); 403 /* XXX Don't convert nsec->usec and back */ 404 TIMESPEC_TO_TIMEVAL(&atv, ats); 405 error = settime(td, &atv); 406 return (error); 407 } 408 409 #ifndef _SYS_SYSPROTO_H_ 410 struct clock_getres_args { 411 clockid_t clock_id; 412 struct timespec *tp; 413 }; 414 #endif 415 int 416 sys_clock_getres(struct thread *td, struct clock_getres_args *uap) 417 { 418 struct timespec ts; 419 int error; 420 421 if (uap->tp == NULL) 422 return (0); 423 424 error = kern_clock_getres(td, uap->clock_id, &ts); 425 if (error == 0) 426 error = copyout(&ts, uap->tp, sizeof(ts)); 427 return (error); 428 } 429 430 int 431 kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts) 432 { 433 434 ts->tv_sec = 0; 435 switch (clock_id) { 436 case CLOCK_REALTIME: 437 case CLOCK_REALTIME_FAST: 438 case CLOCK_REALTIME_PRECISE: 439 case CLOCK_MONOTONIC: 440 case CLOCK_MONOTONIC_FAST: 441 case CLOCK_MONOTONIC_PRECISE: 442 case CLOCK_UPTIME: 443 case CLOCK_UPTIME_FAST: 444 case CLOCK_UPTIME_PRECISE: 445 /* 446 * Round up the result of the division cheaply by adding 1. 447 * Rounding up is especially important if rounding down 448 * would give 0. Perfect rounding is unimportant. 449 */ 450 ts->tv_nsec = 1000000000 / tc_getfrequency() + 1; 451 break; 452 case CLOCK_VIRTUAL: 453 case CLOCK_PROF: 454 /* Accurately round up here because we can do so cheaply. */ 455 ts->tv_nsec = (1000000000 + hz - 1) / hz; 456 break; 457 case CLOCK_SECOND: 458 ts->tv_sec = 1; 459 ts->tv_nsec = 0; 460 break; 461 case CLOCK_THREAD_CPUTIME_ID: 462 case CLOCK_PROCESS_CPUTIME_ID: 463 cputime: 464 /* sync with cputick2usec */ 465 ts->tv_nsec = 1000000 / cpu_tickrate(); 466 if (ts->tv_nsec == 0) 467 ts->tv_nsec = 1000; 468 break; 469 default: 470 if ((int)clock_id < 0) 471 goto cputime; 472 return (EINVAL); 473 } 474 return (0); 475 } 476 477 static uint8_t nanowait[MAXCPU]; 478 479 int 480 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) 481 { 482 struct timespec ts; 483 sbintime_t sbt, sbtt, prec, tmp; 484 time_t over; 485 int error; 486 487 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 488 return (EINVAL); 489 if (rqt->tv_sec < 0 || (rqt->tv_sec == 0 && rqt->tv_nsec == 0)) 490 return (0); 491 ts = *rqt; 492 if (ts.tv_sec > INT32_MAX / 2) { 493 over = ts.tv_sec - INT32_MAX / 2; 494 ts.tv_sec -= over; 495 } else 496 over = 0; 497 tmp = tstosbt(ts); 498 prec = tmp; 499 prec >>= tc_precexp; 500 if (TIMESEL(&sbt, tmp)) 501 sbt += tc_tick_sbt; 502 sbt += tmp; 503 error = tsleep_sbt(&nanowait[curcpu], PWAIT | PCATCH, "nanslp", 504 sbt, prec, C_ABSOLUTE); 505 if (error != EWOULDBLOCK) { 506 if (error == ERESTART) 507 error = EINTR; 508 TIMESEL(&sbtt, tmp); 509 if (rmt != NULL) { 510 ts = sbttots(sbt - sbtt); 511 ts.tv_sec += over; 512 if (ts.tv_sec < 0) 513 timespecclear(&ts); 514 *rmt = ts; 515 } 516 if (sbtt >= sbt) 517 return (0); 518 return (error); 519 } 520 return (0); 521 } 522 523 #ifndef _SYS_SYSPROTO_H_ 524 struct nanosleep_args { 525 struct timespec *rqtp; 526 struct timespec *rmtp; 527 }; 528 #endif 529 /* ARGSUSED */ 530 int 531 sys_nanosleep(struct thread *td, struct nanosleep_args *uap) 532 { 533 struct timespec rmt, rqt; 534 int error; 535 536 error = copyin(uap->rqtp, &rqt, sizeof(rqt)); 537 if (error) 538 return (error); 539 540 if (uap->rmtp && 541 !useracc((caddr_t)uap->rmtp, sizeof(rmt), VM_PROT_WRITE)) 542 return (EFAULT); 543 error = kern_nanosleep(td, &rqt, &rmt); 544 if (error && uap->rmtp) { 545 int error2; 546 547 error2 = copyout(&rmt, uap->rmtp, sizeof(rmt)); 548 if (error2) 549 error = error2; 550 } 551 return (error); 552 } 553 554 #ifndef _SYS_SYSPROTO_H_ 555 struct gettimeofday_args { 556 struct timeval *tp; 557 struct timezone *tzp; 558 }; 559 #endif 560 /* ARGSUSED */ 561 int 562 sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap) 563 { 564 struct timeval atv; 565 struct timezone rtz; 566 int error = 0; 567 568 if (uap->tp) { 569 microtime(&atv); 570 error = copyout(&atv, uap->tp, sizeof (atv)); 571 } 572 if (error == 0 && uap->tzp != NULL) { 573 rtz.tz_minuteswest = tz_minuteswest; 574 rtz.tz_dsttime = tz_dsttime; 575 error = copyout(&rtz, uap->tzp, sizeof (rtz)); 576 } 577 return (error); 578 } 579 580 #ifndef _SYS_SYSPROTO_H_ 581 struct settimeofday_args { 582 struct timeval *tv; 583 struct timezone *tzp; 584 }; 585 #endif 586 /* ARGSUSED */ 587 int 588 sys_settimeofday(struct thread *td, struct settimeofday_args *uap) 589 { 590 struct timeval atv, *tvp; 591 struct timezone atz, *tzp; 592 int error; 593 594 if (uap->tv) { 595 error = copyin(uap->tv, &atv, sizeof(atv)); 596 if (error) 597 return (error); 598 tvp = &atv; 599 } else 600 tvp = NULL; 601 if (uap->tzp) { 602 error = copyin(uap->tzp, &atz, sizeof(atz)); 603 if (error) 604 return (error); 605 tzp = &atz; 606 } else 607 tzp = NULL; 608 return (kern_settimeofday(td, tvp, tzp)); 609 } 610 611 int 612 kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp) 613 { 614 int error; 615 616 error = priv_check(td, PRIV_SETTIMEOFDAY); 617 if (error) 618 return (error); 619 /* Verify all parameters before changing time. */ 620 if (tv) { 621 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000) 622 return (EINVAL); 623 error = settime(td, tv); 624 } 625 if (tzp && error == 0) { 626 tz_minuteswest = tzp->tz_minuteswest; 627 tz_dsttime = tzp->tz_dsttime; 628 } 629 return (error); 630 } 631 632 /* 633 * Get value of an interval timer. The process virtual and profiling virtual 634 * time timers are kept in the p_stats area, since they can be swapped out. 635 * These are kept internally in the way they are specified externally: in 636 * time until they expire. 637 * 638 * The real time interval timer is kept in the process table slot for the 639 * process, and its value (it_value) is kept as an absolute time rather than 640 * as a delta, so that it is easy to keep periodic real-time signals from 641 * drifting. 642 * 643 * Virtual time timers are processed in the hardclock() routine of 644 * kern_clock.c. The real time timer is processed by a timeout routine, 645 * called from the softclock() routine. Since a callout may be delayed in 646 * real time due to interrupt processing in the system, it is possible for 647 * the real time timeout routine (realitexpire, given below), to be delayed 648 * in real time past when it is supposed to occur. It does not suffice, 649 * therefore, to reload the real timer .it_value from the real time timers 650 * .it_interval. Rather, we compute the next time in absolute time the timer 651 * should go off. 652 */ 653 #ifndef _SYS_SYSPROTO_H_ 654 struct getitimer_args { 655 u_int which; 656 struct itimerval *itv; 657 }; 658 #endif 659 int 660 sys_getitimer(struct thread *td, struct getitimer_args *uap) 661 { 662 struct itimerval aitv; 663 int error; 664 665 error = kern_getitimer(td, uap->which, &aitv); 666 if (error != 0) 667 return (error); 668 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 669 } 670 671 int 672 kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv) 673 { 674 struct proc *p = td->td_proc; 675 struct timeval ctv; 676 677 if (which > ITIMER_PROF) 678 return (EINVAL); 679 680 if (which == ITIMER_REAL) { 681 /* 682 * Convert from absolute to relative time in .it_value 683 * part of real time timer. If time for real time timer 684 * has passed return 0, else return difference between 685 * current time and time for the timer to go off. 686 */ 687 PROC_LOCK(p); 688 *aitv = p->p_realtimer; 689 PROC_UNLOCK(p); 690 if (timevalisset(&aitv->it_value)) { 691 microuptime(&ctv); 692 if (timevalcmp(&aitv->it_value, &ctv, <)) 693 timevalclear(&aitv->it_value); 694 else 695 timevalsub(&aitv->it_value, &ctv); 696 } 697 } else { 698 PROC_ITIMLOCK(p); 699 *aitv = p->p_stats->p_timer[which]; 700 PROC_ITIMUNLOCK(p); 701 } 702 return (0); 703 } 704 705 #ifndef _SYS_SYSPROTO_H_ 706 struct setitimer_args { 707 u_int which; 708 struct itimerval *itv, *oitv; 709 }; 710 #endif 711 int 712 sys_setitimer(struct thread *td, struct setitimer_args *uap) 713 { 714 struct itimerval aitv, oitv; 715 int error; 716 717 if (uap->itv == NULL) { 718 uap->itv = uap->oitv; 719 return (sys_getitimer(td, (struct getitimer_args *)uap)); 720 } 721 722 if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval)))) 723 return (error); 724 error = kern_setitimer(td, uap->which, &aitv, &oitv); 725 if (error != 0 || uap->oitv == NULL) 726 return (error); 727 return (copyout(&oitv, uap->oitv, sizeof(struct itimerval))); 728 } 729 730 int 731 kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv, 732 struct itimerval *oitv) 733 { 734 struct proc *p = td->td_proc; 735 struct timeval ctv; 736 sbintime_t sbt, pr; 737 738 if (aitv == NULL) 739 return (kern_getitimer(td, which, oitv)); 740 741 if (which > ITIMER_PROF) 742 return (EINVAL); 743 if (itimerfix(&aitv->it_value) || 744 aitv->it_value.tv_sec > INT32_MAX / 2) 745 return (EINVAL); 746 if (!timevalisset(&aitv->it_value)) 747 timevalclear(&aitv->it_interval); 748 else if (itimerfix(&aitv->it_interval) || 749 aitv->it_interval.tv_sec > INT32_MAX / 2) 750 return (EINVAL); 751 752 if (which == ITIMER_REAL) { 753 PROC_LOCK(p); 754 if (timevalisset(&p->p_realtimer.it_value)) 755 callout_stop(&p->p_itcallout); 756 microuptime(&ctv); 757 if (timevalisset(&aitv->it_value)) { 758 pr = tvtosbt(aitv->it_value) >> tc_precexp; 759 timevaladd(&aitv->it_value, &ctv); 760 sbt = tvtosbt(aitv->it_value); 761 callout_reset_sbt(&p->p_itcallout, sbt, pr, 762 realitexpire, p, C_ABSOLUTE); 763 } 764 *oitv = p->p_realtimer; 765 p->p_realtimer = *aitv; 766 PROC_UNLOCK(p); 767 if (timevalisset(&oitv->it_value)) { 768 if (timevalcmp(&oitv->it_value, &ctv, <)) 769 timevalclear(&oitv->it_value); 770 else 771 timevalsub(&oitv->it_value, &ctv); 772 } 773 } else { 774 if (aitv->it_interval.tv_sec == 0 && 775 aitv->it_interval.tv_usec != 0 && 776 aitv->it_interval.tv_usec < tick) 777 aitv->it_interval.tv_usec = tick; 778 if (aitv->it_value.tv_sec == 0 && 779 aitv->it_value.tv_usec != 0 && 780 aitv->it_value.tv_usec < tick) 781 aitv->it_value.tv_usec = tick; 782 PROC_ITIMLOCK(p); 783 *oitv = p->p_stats->p_timer[which]; 784 p->p_stats->p_timer[which] = *aitv; 785 PROC_ITIMUNLOCK(p); 786 } 787 return (0); 788 } 789 790 /* 791 * Real interval timer expired: 792 * send process whose timer expired an alarm signal. 793 * If time is not set up to reload, then just return. 794 * Else compute next time timer should go off which is > current time. 795 * This is where delay in processing this timeout causes multiple 796 * SIGALRM calls to be compressed into one. 797 * tvtohz() always adds 1 to allow for the time until the next clock 798 * interrupt being strictly less than 1 clock tick, but we don't want 799 * that here since we want to appear to be in sync with the clock 800 * interrupt even when we're delayed. 801 */ 802 void 803 realitexpire(void *arg) 804 { 805 struct proc *p; 806 struct timeval ctv; 807 sbintime_t isbt; 808 809 p = (struct proc *)arg; 810 kern_psignal(p, SIGALRM); 811 if (!timevalisset(&p->p_realtimer.it_interval)) { 812 timevalclear(&p->p_realtimer.it_value); 813 if (p->p_flag & P_WEXIT) 814 wakeup(&p->p_itcallout); 815 return; 816 } 817 isbt = tvtosbt(p->p_realtimer.it_interval); 818 if (isbt >= sbt_timethreshold) 819 getmicrouptime(&ctv); 820 else 821 microuptime(&ctv); 822 do { 823 timevaladd(&p->p_realtimer.it_value, 824 &p->p_realtimer.it_interval); 825 } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=)); 826 callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value), 827 isbt >> tc_precexp, realitexpire, p, C_ABSOLUTE); 828 } 829 830 /* 831 * Check that a proposed value to load into the .it_value or 832 * .it_interval part of an interval timer is acceptable, and 833 * fix it to have at least minimal value (i.e. if it is less 834 * than the resolution of the clock, round it up.) 835 */ 836 int 837 itimerfix(struct timeval *tv) 838 { 839 840 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 841 return (EINVAL); 842 if (tv->tv_sec == 0 && tv->tv_usec != 0 && 843 tv->tv_usec < (u_int)tick / 16) 844 tv->tv_usec = (u_int)tick / 16; 845 return (0); 846 } 847 848 /* 849 * Decrement an interval timer by a specified number 850 * of microseconds, which must be less than a second, 851 * i.e. < 1000000. If the timer expires, then reload 852 * it. In this case, carry over (usec - old value) to 853 * reduce the value reloaded into the timer so that 854 * the timer does not drift. This routine assumes 855 * that it is called in a context where the timers 856 * on which it is operating cannot change in value. 857 */ 858 int 859 itimerdecr(struct itimerval *itp, int usec) 860 { 861 862 if (itp->it_value.tv_usec < usec) { 863 if (itp->it_value.tv_sec == 0) { 864 /* expired, and already in next interval */ 865 usec -= itp->it_value.tv_usec; 866 goto expire; 867 } 868 itp->it_value.tv_usec += 1000000; 869 itp->it_value.tv_sec--; 870 } 871 itp->it_value.tv_usec -= usec; 872 usec = 0; 873 if (timevalisset(&itp->it_value)) 874 return (1); 875 /* expired, exactly at end of interval */ 876 expire: 877 if (timevalisset(&itp->it_interval)) { 878 itp->it_value = itp->it_interval; 879 itp->it_value.tv_usec -= usec; 880 if (itp->it_value.tv_usec < 0) { 881 itp->it_value.tv_usec += 1000000; 882 itp->it_value.tv_sec--; 883 } 884 } else 885 itp->it_value.tv_usec = 0; /* sec is already 0 */ 886 return (0); 887 } 888 889 /* 890 * Add and subtract routines for timevals. 891 * N.B.: subtract routine doesn't deal with 892 * results which are before the beginning, 893 * it just gets very confused in this case. 894 * Caveat emptor. 895 */ 896 void 897 timevaladd(struct timeval *t1, const struct timeval *t2) 898 { 899 900 t1->tv_sec += t2->tv_sec; 901 t1->tv_usec += t2->tv_usec; 902 timevalfix(t1); 903 } 904 905 void 906 timevalsub(struct timeval *t1, const struct timeval *t2) 907 { 908 909 t1->tv_sec -= t2->tv_sec; 910 t1->tv_usec -= t2->tv_usec; 911 timevalfix(t1); 912 } 913 914 static void 915 timevalfix(struct timeval *t1) 916 { 917 918 if (t1->tv_usec < 0) { 919 t1->tv_sec--; 920 t1->tv_usec += 1000000; 921 } 922 if (t1->tv_usec >= 1000000) { 923 t1->tv_sec++; 924 t1->tv_usec -= 1000000; 925 } 926 } 927 928 /* 929 * ratecheck(): simple time-based rate-limit checking. 930 */ 931 int 932 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 933 { 934 struct timeval tv, delta; 935 int rv = 0; 936 937 getmicrouptime(&tv); /* NB: 10ms precision */ 938 delta = tv; 939 timevalsub(&delta, lasttime); 940 941 /* 942 * check for 0,0 is so that the message will be seen at least once, 943 * even if interval is huge. 944 */ 945 if (timevalcmp(&delta, mininterval, >=) || 946 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 947 *lasttime = tv; 948 rv = 1; 949 } 950 951 return (rv); 952 } 953 954 /* 955 * ppsratecheck(): packets (or events) per second limitation. 956 * 957 * Return 0 if the limit is to be enforced (e.g. the caller 958 * should drop a packet because of the rate limitation). 959 * 960 * maxpps of 0 always causes zero to be returned. maxpps of -1 961 * always causes 1 to be returned; this effectively defeats rate 962 * limiting. 963 * 964 * Note that we maintain the struct timeval for compatibility 965 * with other bsd systems. We reuse the storage and just monitor 966 * clock ticks for minimal overhead. 967 */ 968 int 969 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 970 { 971 int now; 972 973 /* 974 * Reset the last time and counter if this is the first call 975 * or more than a second has passed since the last update of 976 * lasttime. 977 */ 978 now = ticks; 979 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 980 lasttime->tv_sec = now; 981 *curpps = 1; 982 return (maxpps != 0); 983 } else { 984 (*curpps)++; /* NB: ignore potential overflow */ 985 return (maxpps < 0 || *curpps <= maxpps); 986 } 987 } 988 989 static void 990 itimer_start(void) 991 { 992 struct kclock rt_clock = { 993 .timer_create = realtimer_create, 994 .timer_delete = realtimer_delete, 995 .timer_settime = realtimer_settime, 996 .timer_gettime = realtimer_gettime, 997 .event_hook = NULL 998 }; 999 1000 itimer_zone = uma_zcreate("itimer", sizeof(struct itimer), 1001 NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0); 1002 register_posix_clock(CLOCK_REALTIME, &rt_clock); 1003 register_posix_clock(CLOCK_MONOTONIC, &rt_clock); 1004 p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L); 1005 p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX); 1006 p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX); 1007 EVENTHANDLER_REGISTER(process_exit, itimers_event_hook_exit, 1008 (void *)ITIMER_EV_EXIT, EVENTHANDLER_PRI_ANY); 1009 EVENTHANDLER_REGISTER(process_exec, itimers_event_hook_exec, 1010 (void *)ITIMER_EV_EXEC, EVENTHANDLER_PRI_ANY); 1011 } 1012 1013 int 1014 register_posix_clock(int clockid, struct kclock *clk) 1015 { 1016 if ((unsigned)clockid >= MAX_CLOCKS) { 1017 printf("%s: invalid clockid\n", __func__); 1018 return (0); 1019 } 1020 posix_clocks[clockid] = *clk; 1021 return (1); 1022 } 1023 1024 static int 1025 itimer_init(void *mem, int size, int flags) 1026 { 1027 struct itimer *it; 1028 1029 it = (struct itimer *)mem; 1030 mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF); 1031 return (0); 1032 } 1033 1034 static void 1035 itimer_fini(void *mem, int size) 1036 { 1037 struct itimer *it; 1038 1039 it = (struct itimer *)mem; 1040 mtx_destroy(&it->it_mtx); 1041 } 1042 1043 static void 1044 itimer_enter(struct itimer *it) 1045 { 1046 1047 mtx_assert(&it->it_mtx, MA_OWNED); 1048 it->it_usecount++; 1049 } 1050 1051 static void 1052 itimer_leave(struct itimer *it) 1053 { 1054 1055 mtx_assert(&it->it_mtx, MA_OWNED); 1056 KASSERT(it->it_usecount > 0, ("invalid it_usecount")); 1057 1058 if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0) 1059 wakeup(it); 1060 } 1061 1062 #ifndef _SYS_SYSPROTO_H_ 1063 struct ktimer_create_args { 1064 clockid_t clock_id; 1065 struct sigevent * evp; 1066 int * timerid; 1067 }; 1068 #endif 1069 int 1070 sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap) 1071 { 1072 struct sigevent *evp, ev; 1073 int id; 1074 int error; 1075 1076 if (uap->evp == NULL) { 1077 evp = NULL; 1078 } else { 1079 error = copyin(uap->evp, &ev, sizeof(ev)); 1080 if (error != 0) 1081 return (error); 1082 evp = &ev; 1083 } 1084 error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1); 1085 if (error == 0) { 1086 error = copyout(&id, uap->timerid, sizeof(int)); 1087 if (error != 0) 1088 kern_ktimer_delete(td, id); 1089 } 1090 return (error); 1091 } 1092 1093 int 1094 kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, 1095 int *timerid, int preset_id) 1096 { 1097 struct proc *p = td->td_proc; 1098 struct itimer *it; 1099 int id; 1100 int error; 1101 1102 if (clock_id < 0 || clock_id >= MAX_CLOCKS) 1103 return (EINVAL); 1104 1105 if (posix_clocks[clock_id].timer_create == NULL) 1106 return (EINVAL); 1107 1108 if (evp != NULL) { 1109 if (evp->sigev_notify != SIGEV_NONE && 1110 evp->sigev_notify != SIGEV_SIGNAL && 1111 evp->sigev_notify != SIGEV_THREAD_ID) 1112 return (EINVAL); 1113 if ((evp->sigev_notify == SIGEV_SIGNAL || 1114 evp->sigev_notify == SIGEV_THREAD_ID) && 1115 !_SIG_VALID(evp->sigev_signo)) 1116 return (EINVAL); 1117 } 1118 1119 if (p->p_itimers == NULL) 1120 itimers_alloc(p); 1121 1122 it = uma_zalloc(itimer_zone, M_WAITOK); 1123 it->it_flags = 0; 1124 it->it_usecount = 0; 1125 it->it_active = 0; 1126 timespecclear(&it->it_time.it_value); 1127 timespecclear(&it->it_time.it_interval); 1128 it->it_overrun = 0; 1129 it->it_overrun_last = 0; 1130 it->it_clockid = clock_id; 1131 it->it_timerid = -1; 1132 it->it_proc = p; 1133 ksiginfo_init(&it->it_ksi); 1134 it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT; 1135 error = CLOCK_CALL(clock_id, timer_create, (it)); 1136 if (error != 0) 1137 goto out; 1138 1139 PROC_LOCK(p); 1140 if (preset_id != -1) { 1141 KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id")); 1142 id = preset_id; 1143 if (p->p_itimers->its_timers[id] != NULL) { 1144 PROC_UNLOCK(p); 1145 error = 0; 1146 goto out; 1147 } 1148 } else { 1149 /* 1150 * Find a free timer slot, skipping those reserved 1151 * for setitimer(). 1152 */ 1153 for (id = 3; id < TIMER_MAX; id++) 1154 if (p->p_itimers->its_timers[id] == NULL) 1155 break; 1156 if (id == TIMER_MAX) { 1157 PROC_UNLOCK(p); 1158 error = EAGAIN; 1159 goto out; 1160 } 1161 } 1162 it->it_timerid = id; 1163 p->p_itimers->its_timers[id] = it; 1164 if (evp != NULL) 1165 it->it_sigev = *evp; 1166 else { 1167 it->it_sigev.sigev_notify = SIGEV_SIGNAL; 1168 switch (clock_id) { 1169 default: 1170 case CLOCK_REALTIME: 1171 it->it_sigev.sigev_signo = SIGALRM; 1172 break; 1173 case CLOCK_VIRTUAL: 1174 it->it_sigev.sigev_signo = SIGVTALRM; 1175 break; 1176 case CLOCK_PROF: 1177 it->it_sigev.sigev_signo = SIGPROF; 1178 break; 1179 } 1180 it->it_sigev.sigev_value.sival_int = id; 1181 } 1182 1183 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1184 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1185 it->it_ksi.ksi_signo = it->it_sigev.sigev_signo; 1186 it->it_ksi.ksi_code = SI_TIMER; 1187 it->it_ksi.ksi_value = it->it_sigev.sigev_value; 1188 it->it_ksi.ksi_timerid = id; 1189 } 1190 PROC_UNLOCK(p); 1191 *timerid = id; 1192 return (0); 1193 1194 out: 1195 ITIMER_LOCK(it); 1196 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1197 ITIMER_UNLOCK(it); 1198 uma_zfree(itimer_zone, it); 1199 return (error); 1200 } 1201 1202 #ifndef _SYS_SYSPROTO_H_ 1203 struct ktimer_delete_args { 1204 int timerid; 1205 }; 1206 #endif 1207 int 1208 sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap) 1209 { 1210 1211 return (kern_ktimer_delete(td, uap->timerid)); 1212 } 1213 1214 static struct itimer * 1215 itimer_find(struct proc *p, int timerid) 1216 { 1217 struct itimer *it; 1218 1219 PROC_LOCK_ASSERT(p, MA_OWNED); 1220 if ((p->p_itimers == NULL) || 1221 (timerid < 0) || (timerid >= TIMER_MAX) || 1222 (it = p->p_itimers->its_timers[timerid]) == NULL) { 1223 return (NULL); 1224 } 1225 ITIMER_LOCK(it); 1226 if ((it->it_flags & ITF_DELETING) != 0) { 1227 ITIMER_UNLOCK(it); 1228 it = NULL; 1229 } 1230 return (it); 1231 } 1232 1233 int 1234 kern_ktimer_delete(struct thread *td, int timerid) 1235 { 1236 struct proc *p = td->td_proc; 1237 struct itimer *it; 1238 1239 PROC_LOCK(p); 1240 it = itimer_find(p, timerid); 1241 if (it == NULL) { 1242 PROC_UNLOCK(p); 1243 return (EINVAL); 1244 } 1245 PROC_UNLOCK(p); 1246 1247 it->it_flags |= ITF_DELETING; 1248 while (it->it_usecount > 0) { 1249 it->it_flags |= ITF_WANTED; 1250 msleep(it, &it->it_mtx, PPAUSE, "itimer", 0); 1251 } 1252 it->it_flags &= ~ITF_WANTED; 1253 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1254 ITIMER_UNLOCK(it); 1255 1256 PROC_LOCK(p); 1257 if (KSI_ONQ(&it->it_ksi)) 1258 sigqueue_take(&it->it_ksi); 1259 p->p_itimers->its_timers[timerid] = NULL; 1260 PROC_UNLOCK(p); 1261 uma_zfree(itimer_zone, it); 1262 return (0); 1263 } 1264 1265 #ifndef _SYS_SYSPROTO_H_ 1266 struct ktimer_settime_args { 1267 int timerid; 1268 int flags; 1269 const struct itimerspec * value; 1270 struct itimerspec * ovalue; 1271 }; 1272 #endif 1273 int 1274 sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap) 1275 { 1276 struct itimerspec val, oval, *ovalp; 1277 int error; 1278 1279 error = copyin(uap->value, &val, sizeof(val)); 1280 if (error != 0) 1281 return (error); 1282 ovalp = uap->ovalue != NULL ? &oval : NULL; 1283 error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp); 1284 if (error == 0 && uap->ovalue != NULL) 1285 error = copyout(ovalp, uap->ovalue, sizeof(*ovalp)); 1286 return (error); 1287 } 1288 1289 int 1290 kern_ktimer_settime(struct thread *td, int timer_id, int flags, 1291 struct itimerspec *val, struct itimerspec *oval) 1292 { 1293 struct proc *p; 1294 struct itimer *it; 1295 int error; 1296 1297 p = td->td_proc; 1298 PROC_LOCK(p); 1299 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { 1300 PROC_UNLOCK(p); 1301 error = EINVAL; 1302 } else { 1303 PROC_UNLOCK(p); 1304 itimer_enter(it); 1305 error = CLOCK_CALL(it->it_clockid, timer_settime, (it, 1306 flags, val, oval)); 1307 itimer_leave(it); 1308 ITIMER_UNLOCK(it); 1309 } 1310 return (error); 1311 } 1312 1313 #ifndef _SYS_SYSPROTO_H_ 1314 struct ktimer_gettime_args { 1315 int timerid; 1316 struct itimerspec * value; 1317 }; 1318 #endif 1319 int 1320 sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap) 1321 { 1322 struct itimerspec val; 1323 int error; 1324 1325 error = kern_ktimer_gettime(td, uap->timerid, &val); 1326 if (error == 0) 1327 error = copyout(&val, uap->value, sizeof(val)); 1328 return (error); 1329 } 1330 1331 int 1332 kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val) 1333 { 1334 struct proc *p; 1335 struct itimer *it; 1336 int error; 1337 1338 p = td->td_proc; 1339 PROC_LOCK(p); 1340 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { 1341 PROC_UNLOCK(p); 1342 error = EINVAL; 1343 } else { 1344 PROC_UNLOCK(p); 1345 itimer_enter(it); 1346 error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, val)); 1347 itimer_leave(it); 1348 ITIMER_UNLOCK(it); 1349 } 1350 return (error); 1351 } 1352 1353 #ifndef _SYS_SYSPROTO_H_ 1354 struct timer_getoverrun_args { 1355 int timerid; 1356 }; 1357 #endif 1358 int 1359 sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap) 1360 { 1361 1362 return (kern_ktimer_getoverrun(td, uap->timerid)); 1363 } 1364 1365 int 1366 kern_ktimer_getoverrun(struct thread *td, int timer_id) 1367 { 1368 struct proc *p = td->td_proc; 1369 struct itimer *it; 1370 int error ; 1371 1372 PROC_LOCK(p); 1373 if (timer_id < 3 || 1374 (it = itimer_find(p, timer_id)) == NULL) { 1375 PROC_UNLOCK(p); 1376 error = EINVAL; 1377 } else { 1378 td->td_retval[0] = it->it_overrun_last; 1379 ITIMER_UNLOCK(it); 1380 PROC_UNLOCK(p); 1381 error = 0; 1382 } 1383 return (error); 1384 } 1385 1386 static int 1387 realtimer_create(struct itimer *it) 1388 { 1389 callout_init_mtx(&it->it_callout, &it->it_mtx, 0); 1390 return (0); 1391 } 1392 1393 static int 1394 realtimer_delete(struct itimer *it) 1395 { 1396 mtx_assert(&it->it_mtx, MA_OWNED); 1397 1398 /* 1399 * clear timer's value and interval to tell realtimer_expire 1400 * to not rearm the timer. 1401 */ 1402 timespecclear(&it->it_time.it_value); 1403 timespecclear(&it->it_time.it_interval); 1404 ITIMER_UNLOCK(it); 1405 callout_drain(&it->it_callout); 1406 ITIMER_LOCK(it); 1407 return (0); 1408 } 1409 1410 static int 1411 realtimer_gettime(struct itimer *it, struct itimerspec *ovalue) 1412 { 1413 struct timespec cts; 1414 1415 mtx_assert(&it->it_mtx, MA_OWNED); 1416 1417 realtimer_clocktime(it->it_clockid, &cts); 1418 *ovalue = it->it_time; 1419 if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) { 1420 timespecsub(&ovalue->it_value, &cts); 1421 if (ovalue->it_value.tv_sec < 0 || 1422 (ovalue->it_value.tv_sec == 0 && 1423 ovalue->it_value.tv_nsec == 0)) { 1424 ovalue->it_value.tv_sec = 0; 1425 ovalue->it_value.tv_nsec = 1; 1426 } 1427 } 1428 return (0); 1429 } 1430 1431 static int 1432 realtimer_settime(struct itimer *it, int flags, 1433 struct itimerspec *value, struct itimerspec *ovalue) 1434 { 1435 struct timespec cts, ts; 1436 struct timeval tv; 1437 struct itimerspec val; 1438 1439 mtx_assert(&it->it_mtx, MA_OWNED); 1440 1441 val = *value; 1442 if (itimespecfix(&val.it_value)) 1443 return (EINVAL); 1444 1445 if (timespecisset(&val.it_value)) { 1446 if (itimespecfix(&val.it_interval)) 1447 return (EINVAL); 1448 } else { 1449 timespecclear(&val.it_interval); 1450 } 1451 1452 if (ovalue != NULL) 1453 realtimer_gettime(it, ovalue); 1454 1455 it->it_time = val; 1456 if (timespecisset(&val.it_value)) { 1457 realtimer_clocktime(it->it_clockid, &cts); 1458 ts = val.it_value; 1459 if ((flags & TIMER_ABSTIME) == 0) { 1460 /* Convert to absolute time. */ 1461 timespecadd(&it->it_time.it_value, &cts); 1462 } else { 1463 timespecsub(&ts, &cts); 1464 /* 1465 * We don't care if ts is negative, tztohz will 1466 * fix it. 1467 */ 1468 } 1469 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1470 callout_reset(&it->it_callout, tvtohz(&tv), 1471 realtimer_expire, it); 1472 } else { 1473 callout_stop(&it->it_callout); 1474 } 1475 1476 return (0); 1477 } 1478 1479 static void 1480 realtimer_clocktime(clockid_t id, struct timespec *ts) 1481 { 1482 if (id == CLOCK_REALTIME) 1483 getnanotime(ts); 1484 else /* CLOCK_MONOTONIC */ 1485 getnanouptime(ts); 1486 } 1487 1488 int 1489 itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi) 1490 { 1491 struct itimer *it; 1492 1493 PROC_LOCK_ASSERT(p, MA_OWNED); 1494 it = itimer_find(p, timerid); 1495 if (it != NULL) { 1496 ksi->ksi_overrun = it->it_overrun; 1497 it->it_overrun_last = it->it_overrun; 1498 it->it_overrun = 0; 1499 ITIMER_UNLOCK(it); 1500 return (0); 1501 } 1502 return (EINVAL); 1503 } 1504 1505 int 1506 itimespecfix(struct timespec *ts) 1507 { 1508 1509 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 1510 return (EINVAL); 1511 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000) 1512 ts->tv_nsec = tick * 1000; 1513 return (0); 1514 } 1515 1516 /* Timeout callback for realtime timer */ 1517 static void 1518 realtimer_expire(void *arg) 1519 { 1520 struct timespec cts, ts; 1521 struct timeval tv; 1522 struct itimer *it; 1523 1524 it = (struct itimer *)arg; 1525 1526 realtimer_clocktime(it->it_clockid, &cts); 1527 /* Only fire if time is reached. */ 1528 if (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1529 if (timespecisset(&it->it_time.it_interval)) { 1530 timespecadd(&it->it_time.it_value, 1531 &it->it_time.it_interval); 1532 while (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1533 if (it->it_overrun < INT_MAX) 1534 it->it_overrun++; 1535 else 1536 it->it_ksi.ksi_errno = ERANGE; 1537 timespecadd(&it->it_time.it_value, 1538 &it->it_time.it_interval); 1539 } 1540 } else { 1541 /* single shot timer ? */ 1542 timespecclear(&it->it_time.it_value); 1543 } 1544 if (timespecisset(&it->it_time.it_value)) { 1545 ts = it->it_time.it_value; 1546 timespecsub(&ts, &cts); 1547 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1548 callout_reset(&it->it_callout, tvtohz(&tv), 1549 realtimer_expire, it); 1550 } 1551 itimer_enter(it); 1552 ITIMER_UNLOCK(it); 1553 itimer_fire(it); 1554 ITIMER_LOCK(it); 1555 itimer_leave(it); 1556 } else if (timespecisset(&it->it_time.it_value)) { 1557 ts = it->it_time.it_value; 1558 timespecsub(&ts, &cts); 1559 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1560 callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, 1561 it); 1562 } 1563 } 1564 1565 void 1566 itimer_fire(struct itimer *it) 1567 { 1568 struct proc *p = it->it_proc; 1569 struct thread *td; 1570 1571 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1572 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1573 if (sigev_findtd(p, &it->it_sigev, &td) != 0) { 1574 ITIMER_LOCK(it); 1575 timespecclear(&it->it_time.it_value); 1576 timespecclear(&it->it_time.it_interval); 1577 callout_stop(&it->it_callout); 1578 ITIMER_UNLOCK(it); 1579 return; 1580 } 1581 if (!KSI_ONQ(&it->it_ksi)) { 1582 it->it_ksi.ksi_errno = 0; 1583 ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev); 1584 tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi); 1585 } else { 1586 if (it->it_overrun < INT_MAX) 1587 it->it_overrun++; 1588 else 1589 it->it_ksi.ksi_errno = ERANGE; 1590 } 1591 PROC_UNLOCK(p); 1592 } 1593 } 1594 1595 static void 1596 itimers_alloc(struct proc *p) 1597 { 1598 struct itimers *its; 1599 int i; 1600 1601 its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO); 1602 LIST_INIT(&its->its_virtual); 1603 LIST_INIT(&its->its_prof); 1604 TAILQ_INIT(&its->its_worklist); 1605 for (i = 0; i < TIMER_MAX; i++) 1606 its->its_timers[i] = NULL; 1607 PROC_LOCK(p); 1608 if (p->p_itimers == NULL) { 1609 p->p_itimers = its; 1610 PROC_UNLOCK(p); 1611 } 1612 else { 1613 PROC_UNLOCK(p); 1614 free(its, M_SUBPROC); 1615 } 1616 } 1617 1618 static void 1619 itimers_event_hook_exec(void *arg, struct proc *p, struct image_params *imgp __unused) 1620 { 1621 itimers_event_hook_exit(arg, p); 1622 } 1623 1624 /* Clean up timers when some process events are being triggered. */ 1625 static void 1626 itimers_event_hook_exit(void *arg, struct proc *p) 1627 { 1628 struct itimers *its; 1629 struct itimer *it; 1630 int event = (int)(intptr_t)arg; 1631 int i; 1632 1633 if (p->p_itimers != NULL) { 1634 its = p->p_itimers; 1635 for (i = 0; i < MAX_CLOCKS; ++i) { 1636 if (posix_clocks[i].event_hook != NULL) 1637 CLOCK_CALL(i, event_hook, (p, i, event)); 1638 } 1639 /* 1640 * According to susv3, XSI interval timers should be inherited 1641 * by new image. 1642 */ 1643 if (event == ITIMER_EV_EXEC) 1644 i = 3; 1645 else if (event == ITIMER_EV_EXIT) 1646 i = 0; 1647 else 1648 panic("unhandled event"); 1649 for (; i < TIMER_MAX; ++i) { 1650 if ((it = its->its_timers[i]) != NULL) 1651 kern_ktimer_delete(curthread, i); 1652 } 1653 if (its->its_timers[0] == NULL && 1654 its->its_timers[1] == NULL && 1655 its->its_timers[2] == NULL) { 1656 free(its, M_SUBPROC); 1657 p->p_itimers = NULL; 1658 } 1659 } 1660 } 1661