1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_time.c 8.1 (Berkeley) 6/10/93 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_ktrace.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/limits.h> 42 #include <sys/clock.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/sysproto.h> 46 #include <sys/resourcevar.h> 47 #include <sys/signalvar.h> 48 #include <sys/kernel.h> 49 #include <sys/sleepqueue.h> 50 #include <sys/syscallsubr.h> 51 #include <sys/sysctl.h> 52 #include <sys/sysent.h> 53 #include <sys/priv.h> 54 #include <sys/proc.h> 55 #include <sys/posix4.h> 56 #include <sys/time.h> 57 #include <sys/timers.h> 58 #include <sys/timetc.h> 59 #include <sys/vnode.h> 60 #ifdef KTRACE 61 #include <sys/ktrace.h> 62 #endif 63 64 #include <vm/vm.h> 65 #include <vm/vm_extern.h> 66 67 #define MAX_CLOCKS (CLOCK_MONOTONIC+1) 68 #define CPUCLOCK_BIT 0x80000000 69 #define CPUCLOCK_PROCESS_BIT 0x40000000 70 #define CPUCLOCK_ID_MASK (~(CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT)) 71 #define MAKE_THREAD_CPUCLOCK(tid) (CPUCLOCK_BIT|(tid)) 72 #define MAKE_PROCESS_CPUCLOCK(pid) \ 73 (CPUCLOCK_BIT|CPUCLOCK_PROCESS_BIT|(pid)) 74 75 static struct kclock posix_clocks[MAX_CLOCKS]; 76 static uma_zone_t itimer_zone = NULL; 77 78 /* 79 * Time of day and interval timer support. 80 * 81 * These routines provide the kernel entry points to get and set 82 * the time-of-day and per-process interval timers. Subroutines 83 * here provide support for adding and subtracting timeval structures 84 * and decrementing interval timers, optionally reloading the interval 85 * timers when they expire. 86 */ 87 88 static int settime(struct thread *, struct timeval *); 89 static void timevalfix(struct timeval *); 90 static int user_clock_nanosleep(struct thread *td, clockid_t clock_id, 91 int flags, const struct timespec *ua_rqtp, 92 struct timespec *ua_rmtp); 93 94 static void itimer_start(void); 95 static int itimer_init(void *, int, int); 96 static void itimer_fini(void *, int); 97 static void itimer_enter(struct itimer *); 98 static void itimer_leave(struct itimer *); 99 static struct itimer *itimer_find(struct proc *, int); 100 static void itimers_alloc(struct proc *); 101 static int realtimer_create(struct itimer *); 102 static int realtimer_gettime(struct itimer *, struct itimerspec *); 103 static int realtimer_settime(struct itimer *, int, 104 struct itimerspec *, struct itimerspec *); 105 static int realtimer_delete(struct itimer *); 106 static void realtimer_clocktime(clockid_t, struct timespec *); 107 static void realtimer_expire(void *); 108 109 static int register_posix_clock(int, const struct kclock *); 110 void itimer_fire(struct itimer *it); 111 int itimespecfix(struct timespec *ts); 112 113 #define CLOCK_CALL(clock, call, arglist) \ 114 ((*posix_clocks[clock].call) arglist) 115 116 SYSINIT(posix_timer, SI_SUB_P1003_1B, SI_ORDER_FIRST+4, itimer_start, NULL); 117 118 static int 119 settime(struct thread *td, struct timeval *tv) 120 { 121 struct timeval delta, tv1, tv2; 122 static struct timeval maxtime, laststep; 123 struct timespec ts; 124 125 microtime(&tv1); 126 delta = *tv; 127 timevalsub(&delta, &tv1); 128 129 /* 130 * If the system is secure, we do not allow the time to be 131 * set to a value earlier than 1 second less than the highest 132 * time we have yet seen. The worst a miscreant can do in 133 * this circumstance is "freeze" time. He couldn't go 134 * back to the past. 135 * 136 * We similarly do not allow the clock to be stepped more 137 * than one second, nor more than once per second. This allows 138 * a miscreant to make the clock march double-time, but no worse. 139 */ 140 if (securelevel_gt(td->td_ucred, 1) != 0) { 141 if (delta.tv_sec < 0 || delta.tv_usec < 0) { 142 /* 143 * Update maxtime to latest time we've seen. 144 */ 145 if (tv1.tv_sec > maxtime.tv_sec) 146 maxtime = tv1; 147 tv2 = *tv; 148 timevalsub(&tv2, &maxtime); 149 if (tv2.tv_sec < -1) { 150 tv->tv_sec = maxtime.tv_sec - 1; 151 printf("Time adjustment clamped to -1 second\n"); 152 } 153 } else { 154 if (tv1.tv_sec == laststep.tv_sec) 155 return (EPERM); 156 if (delta.tv_sec > 1) { 157 tv->tv_sec = tv1.tv_sec + 1; 158 printf("Time adjustment clamped to +1 second\n"); 159 } 160 laststep = *tv; 161 } 162 } 163 164 ts.tv_sec = tv->tv_sec; 165 ts.tv_nsec = tv->tv_usec * 1000; 166 tc_setclock(&ts); 167 resettodr(); 168 return (0); 169 } 170 171 #ifndef _SYS_SYSPROTO_H_ 172 struct clock_getcpuclockid2_args { 173 id_t id; 174 int which, 175 clockid_t *clock_id; 176 }; 177 #endif 178 /* ARGSUSED */ 179 int 180 sys_clock_getcpuclockid2(struct thread *td, struct clock_getcpuclockid2_args *uap) 181 { 182 clockid_t clk_id; 183 int error; 184 185 error = kern_clock_getcpuclockid2(td, uap->id, uap->which, &clk_id); 186 if (error == 0) 187 error = copyout(&clk_id, uap->clock_id, sizeof(clockid_t)); 188 return (error); 189 } 190 191 int 192 kern_clock_getcpuclockid2(struct thread *td, id_t id, int which, 193 clockid_t *clk_id) 194 { 195 struct proc *p; 196 pid_t pid; 197 lwpid_t tid; 198 int error; 199 200 switch (which) { 201 case CPUCLOCK_WHICH_PID: 202 if (id != 0) { 203 error = pget(id, PGET_CANSEE | PGET_NOTID, &p); 204 if (error != 0) 205 return (error); 206 PROC_UNLOCK(p); 207 pid = id; 208 } else { 209 pid = td->td_proc->p_pid; 210 } 211 *clk_id = MAKE_PROCESS_CPUCLOCK(pid); 212 return (0); 213 case CPUCLOCK_WHICH_TID: 214 tid = id == 0 ? td->td_tid : id; 215 *clk_id = MAKE_THREAD_CPUCLOCK(tid); 216 return (0); 217 default: 218 return (EINVAL); 219 } 220 } 221 222 #ifndef _SYS_SYSPROTO_H_ 223 struct clock_gettime_args { 224 clockid_t clock_id; 225 struct timespec *tp; 226 }; 227 #endif 228 /* ARGSUSED */ 229 int 230 sys_clock_gettime(struct thread *td, struct clock_gettime_args *uap) 231 { 232 struct timespec ats; 233 int error; 234 235 error = kern_clock_gettime(td, uap->clock_id, &ats); 236 if (error == 0) 237 error = copyout(&ats, uap->tp, sizeof(ats)); 238 239 return (error); 240 } 241 242 static inline void 243 cputick2timespec(uint64_t runtime, struct timespec *ats) 244 { 245 runtime = cputick2usec(runtime); 246 ats->tv_sec = runtime / 1000000; 247 ats->tv_nsec = runtime % 1000000 * 1000; 248 } 249 250 void 251 kern_thread_cputime(struct thread *targettd, struct timespec *ats) 252 { 253 uint64_t runtime, curtime, switchtime; 254 255 if (targettd == NULL) { /* current thread */ 256 critical_enter(); 257 switchtime = PCPU_GET(switchtime); 258 curtime = cpu_ticks(); 259 runtime = curthread->td_runtime; 260 critical_exit(); 261 runtime += curtime - switchtime; 262 } else { 263 PROC_LOCK_ASSERT(targettd->td_proc, MA_OWNED); 264 thread_lock(targettd); 265 runtime = targettd->td_runtime; 266 thread_unlock(targettd); 267 } 268 cputick2timespec(runtime, ats); 269 } 270 271 void 272 kern_process_cputime(struct proc *targetp, struct timespec *ats) 273 { 274 uint64_t runtime; 275 struct rusage ru; 276 277 PROC_LOCK_ASSERT(targetp, MA_OWNED); 278 PROC_STATLOCK(targetp); 279 rufetch(targetp, &ru); 280 runtime = targetp->p_rux.rux_runtime; 281 if (curthread->td_proc == targetp) 282 runtime += cpu_ticks() - PCPU_GET(switchtime); 283 PROC_STATUNLOCK(targetp); 284 cputick2timespec(runtime, ats); 285 } 286 287 static int 288 get_cputime(struct thread *td, clockid_t clock_id, struct timespec *ats) 289 { 290 struct proc *p, *p2; 291 struct thread *td2; 292 lwpid_t tid; 293 pid_t pid; 294 int error; 295 296 p = td->td_proc; 297 if ((clock_id & CPUCLOCK_PROCESS_BIT) == 0) { 298 tid = clock_id & CPUCLOCK_ID_MASK; 299 td2 = tdfind(tid, p->p_pid); 300 if (td2 == NULL) 301 return (EINVAL); 302 kern_thread_cputime(td2, ats); 303 PROC_UNLOCK(td2->td_proc); 304 } else { 305 pid = clock_id & CPUCLOCK_ID_MASK; 306 error = pget(pid, PGET_CANSEE, &p2); 307 if (error != 0) 308 return (EINVAL); 309 kern_process_cputime(p2, ats); 310 PROC_UNLOCK(p2); 311 } 312 return (0); 313 } 314 315 int 316 kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats) 317 { 318 struct timeval sys, user; 319 struct proc *p; 320 321 p = td->td_proc; 322 switch (clock_id) { 323 case CLOCK_REALTIME: /* Default to precise. */ 324 case CLOCK_REALTIME_PRECISE: 325 nanotime(ats); 326 break; 327 case CLOCK_REALTIME_FAST: 328 getnanotime(ats); 329 break; 330 case CLOCK_VIRTUAL: 331 PROC_LOCK(p); 332 PROC_STATLOCK(p); 333 calcru(p, &user, &sys); 334 PROC_STATUNLOCK(p); 335 PROC_UNLOCK(p); 336 TIMEVAL_TO_TIMESPEC(&user, ats); 337 break; 338 case CLOCK_PROF: 339 PROC_LOCK(p); 340 PROC_STATLOCK(p); 341 calcru(p, &user, &sys); 342 PROC_STATUNLOCK(p); 343 PROC_UNLOCK(p); 344 timevaladd(&user, &sys); 345 TIMEVAL_TO_TIMESPEC(&user, ats); 346 break; 347 case CLOCK_MONOTONIC: /* Default to precise. */ 348 case CLOCK_MONOTONIC_PRECISE: 349 case CLOCK_UPTIME: 350 case CLOCK_UPTIME_PRECISE: 351 nanouptime(ats); 352 break; 353 case CLOCK_UPTIME_FAST: 354 case CLOCK_MONOTONIC_FAST: 355 getnanouptime(ats); 356 break; 357 case CLOCK_SECOND: 358 ats->tv_sec = time_second; 359 ats->tv_nsec = 0; 360 break; 361 case CLOCK_THREAD_CPUTIME_ID: 362 kern_thread_cputime(NULL, ats); 363 break; 364 case CLOCK_PROCESS_CPUTIME_ID: 365 PROC_LOCK(p); 366 kern_process_cputime(p, ats); 367 PROC_UNLOCK(p); 368 break; 369 default: 370 if ((int)clock_id >= 0) 371 return (EINVAL); 372 return (get_cputime(td, clock_id, ats)); 373 } 374 return (0); 375 } 376 377 #ifndef _SYS_SYSPROTO_H_ 378 struct clock_settime_args { 379 clockid_t clock_id; 380 const struct timespec *tp; 381 }; 382 #endif 383 /* ARGSUSED */ 384 int 385 sys_clock_settime(struct thread *td, struct clock_settime_args *uap) 386 { 387 struct timespec ats; 388 int error; 389 390 if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) 391 return (error); 392 return (kern_clock_settime(td, uap->clock_id, &ats)); 393 } 394 395 static int allow_insane_settime = 0; 396 SYSCTL_INT(_debug, OID_AUTO, allow_insane_settime, CTLFLAG_RWTUN, 397 &allow_insane_settime, 0, 398 "do not perform possibly restrictive checks on settime(2) args"); 399 400 int 401 kern_clock_settime(struct thread *td, clockid_t clock_id, struct timespec *ats) 402 { 403 struct timeval atv; 404 int error; 405 406 if ((error = priv_check(td, PRIV_CLOCK_SETTIME)) != 0) 407 return (error); 408 if (clock_id != CLOCK_REALTIME) 409 return (EINVAL); 410 if (ats->tv_nsec < 0 || ats->tv_nsec >= 1000000000 || 411 ats->tv_sec < 0) 412 return (EINVAL); 413 if (!allow_insane_settime && 414 (ats->tv_sec > 8000ULL * 365 * 24 * 60 * 60 || 415 ats->tv_sec < utc_offset())) 416 return (EINVAL); 417 /* XXX Don't convert nsec->usec and back */ 418 TIMESPEC_TO_TIMEVAL(&atv, ats); 419 error = settime(td, &atv); 420 return (error); 421 } 422 423 #ifndef _SYS_SYSPROTO_H_ 424 struct clock_getres_args { 425 clockid_t clock_id; 426 struct timespec *tp; 427 }; 428 #endif 429 int 430 sys_clock_getres(struct thread *td, struct clock_getres_args *uap) 431 { 432 struct timespec ts; 433 int error; 434 435 if (uap->tp == NULL) 436 return (0); 437 438 error = kern_clock_getres(td, uap->clock_id, &ts); 439 if (error == 0) 440 error = copyout(&ts, uap->tp, sizeof(ts)); 441 return (error); 442 } 443 444 int 445 kern_clock_getres(struct thread *td, clockid_t clock_id, struct timespec *ts) 446 { 447 448 ts->tv_sec = 0; 449 switch (clock_id) { 450 case CLOCK_REALTIME: 451 case CLOCK_REALTIME_FAST: 452 case CLOCK_REALTIME_PRECISE: 453 case CLOCK_MONOTONIC: 454 case CLOCK_MONOTONIC_FAST: 455 case CLOCK_MONOTONIC_PRECISE: 456 case CLOCK_UPTIME: 457 case CLOCK_UPTIME_FAST: 458 case CLOCK_UPTIME_PRECISE: 459 /* 460 * Round up the result of the division cheaply by adding 1. 461 * Rounding up is especially important if rounding down 462 * would give 0. Perfect rounding is unimportant. 463 */ 464 ts->tv_nsec = 1000000000 / tc_getfrequency() + 1; 465 break; 466 case CLOCK_VIRTUAL: 467 case CLOCK_PROF: 468 /* Accurately round up here because we can do so cheaply. */ 469 ts->tv_nsec = howmany(1000000000, hz); 470 break; 471 case CLOCK_SECOND: 472 ts->tv_sec = 1; 473 ts->tv_nsec = 0; 474 break; 475 case CLOCK_THREAD_CPUTIME_ID: 476 case CLOCK_PROCESS_CPUTIME_ID: 477 cputime: 478 /* sync with cputick2usec */ 479 ts->tv_nsec = 1000000 / cpu_tickrate(); 480 if (ts->tv_nsec == 0) 481 ts->tv_nsec = 1000; 482 break; 483 default: 484 if ((int)clock_id < 0) 485 goto cputime; 486 return (EINVAL); 487 } 488 return (0); 489 } 490 491 int 492 kern_nanosleep(struct thread *td, struct timespec *rqt, struct timespec *rmt) 493 { 494 495 return (kern_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME, rqt, 496 rmt)); 497 } 498 499 static uint8_t nanowait[MAXCPU]; 500 501 int 502 kern_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags, 503 const struct timespec *rqt, struct timespec *rmt) 504 { 505 struct timespec ts, now; 506 sbintime_t sbt, sbtt, prec, tmp; 507 time_t over; 508 int error; 509 bool is_abs_real; 510 511 if (rqt->tv_nsec < 0 || rqt->tv_nsec >= 1000000000) 512 return (EINVAL); 513 if ((flags & ~TIMER_ABSTIME) != 0) 514 return (EINVAL); 515 switch (clock_id) { 516 case CLOCK_REALTIME: 517 case CLOCK_REALTIME_PRECISE: 518 case CLOCK_REALTIME_FAST: 519 case CLOCK_SECOND: 520 is_abs_real = (flags & TIMER_ABSTIME) != 0; 521 break; 522 case CLOCK_MONOTONIC: 523 case CLOCK_MONOTONIC_PRECISE: 524 case CLOCK_MONOTONIC_FAST: 525 case CLOCK_UPTIME: 526 case CLOCK_UPTIME_PRECISE: 527 case CLOCK_UPTIME_FAST: 528 is_abs_real = false; 529 break; 530 case CLOCK_VIRTUAL: 531 case CLOCK_PROF: 532 case CLOCK_PROCESS_CPUTIME_ID: 533 return (ENOTSUP); 534 case CLOCK_THREAD_CPUTIME_ID: 535 default: 536 return (EINVAL); 537 } 538 do { 539 ts = *rqt; 540 if ((flags & TIMER_ABSTIME) != 0) { 541 if (is_abs_real) 542 td->td_rtcgen = 543 atomic_load_acq_int(&rtc_generation); 544 error = kern_clock_gettime(td, clock_id, &now); 545 KASSERT(error == 0, ("kern_clock_gettime: %d", error)); 546 timespecsub(&ts, &now, &ts); 547 } 548 if (ts.tv_sec < 0 || (ts.tv_sec == 0 && ts.tv_nsec == 0)) { 549 error = EWOULDBLOCK; 550 break; 551 } 552 if (ts.tv_sec > INT32_MAX / 2) { 553 over = ts.tv_sec - INT32_MAX / 2; 554 ts.tv_sec -= over; 555 } else 556 over = 0; 557 tmp = tstosbt(ts); 558 prec = tmp; 559 prec >>= tc_precexp; 560 if (TIMESEL(&sbt, tmp)) 561 sbt += tc_tick_sbt; 562 sbt += tmp; 563 error = tsleep_sbt(&nanowait[curcpu], PWAIT | PCATCH, "nanslp", 564 sbt, prec, C_ABSOLUTE); 565 } while (error == 0 && is_abs_real && td->td_rtcgen == 0); 566 td->td_rtcgen = 0; 567 if (error != EWOULDBLOCK) { 568 if (TIMESEL(&sbtt, tmp)) 569 sbtt += tc_tick_sbt; 570 if (sbtt >= sbt) 571 return (0); 572 if (error == ERESTART) 573 error = EINTR; 574 if ((flags & TIMER_ABSTIME) == 0 && rmt != NULL) { 575 ts = sbttots(sbt - sbtt); 576 ts.tv_sec += over; 577 if (ts.tv_sec < 0) 578 timespecclear(&ts); 579 *rmt = ts; 580 } 581 return (error); 582 } 583 return (0); 584 } 585 586 #ifndef _SYS_SYSPROTO_H_ 587 struct nanosleep_args { 588 struct timespec *rqtp; 589 struct timespec *rmtp; 590 }; 591 #endif 592 /* ARGSUSED */ 593 int 594 sys_nanosleep(struct thread *td, struct nanosleep_args *uap) 595 { 596 597 return (user_clock_nanosleep(td, CLOCK_REALTIME, TIMER_RELTIME, 598 uap->rqtp, uap->rmtp)); 599 } 600 601 #ifndef _SYS_SYSPROTO_H_ 602 struct clock_nanosleep_args { 603 clockid_t clock_id; 604 int flags; 605 struct timespec *rqtp; 606 struct timespec *rmtp; 607 }; 608 #endif 609 /* ARGSUSED */ 610 int 611 sys_clock_nanosleep(struct thread *td, struct clock_nanosleep_args *uap) 612 { 613 int error; 614 615 error = user_clock_nanosleep(td, uap->clock_id, uap->flags, uap->rqtp, 616 uap->rmtp); 617 return (kern_posix_error(td, error)); 618 } 619 620 static int 621 user_clock_nanosleep(struct thread *td, clockid_t clock_id, int flags, 622 const struct timespec *ua_rqtp, struct timespec *ua_rmtp) 623 { 624 struct timespec rmt, rqt; 625 int error, error2; 626 627 error = copyin(ua_rqtp, &rqt, sizeof(rqt)); 628 if (error) 629 return (error); 630 error = kern_clock_nanosleep(td, clock_id, flags, &rqt, &rmt); 631 if (error == EINTR && ua_rmtp != NULL && (flags & TIMER_ABSTIME) == 0) { 632 error2 = copyout(&rmt, ua_rmtp, sizeof(rmt)); 633 if (error2 != 0) 634 error = error2; 635 } 636 return (error); 637 } 638 639 #ifndef _SYS_SYSPROTO_H_ 640 struct gettimeofday_args { 641 struct timeval *tp; 642 struct timezone *tzp; 643 }; 644 #endif 645 /* ARGSUSED */ 646 int 647 sys_gettimeofday(struct thread *td, struct gettimeofday_args *uap) 648 { 649 struct timeval atv; 650 struct timezone rtz; 651 int error = 0; 652 653 if (uap->tp) { 654 microtime(&atv); 655 error = copyout(&atv, uap->tp, sizeof (atv)); 656 } 657 if (error == 0 && uap->tzp != NULL) { 658 rtz.tz_minuteswest = 0; 659 rtz.tz_dsttime = 0; 660 error = copyout(&rtz, uap->tzp, sizeof (rtz)); 661 } 662 return (error); 663 } 664 665 #ifndef _SYS_SYSPROTO_H_ 666 struct settimeofday_args { 667 struct timeval *tv; 668 struct timezone *tzp; 669 }; 670 #endif 671 /* ARGSUSED */ 672 int 673 sys_settimeofday(struct thread *td, struct settimeofday_args *uap) 674 { 675 struct timeval atv, *tvp; 676 struct timezone atz, *tzp; 677 int error; 678 679 if (uap->tv) { 680 error = copyin(uap->tv, &atv, sizeof(atv)); 681 if (error) 682 return (error); 683 tvp = &atv; 684 } else 685 tvp = NULL; 686 if (uap->tzp) { 687 error = copyin(uap->tzp, &atz, sizeof(atz)); 688 if (error) 689 return (error); 690 tzp = &atz; 691 } else 692 tzp = NULL; 693 return (kern_settimeofday(td, tvp, tzp)); 694 } 695 696 int 697 kern_settimeofday(struct thread *td, struct timeval *tv, struct timezone *tzp) 698 { 699 int error; 700 701 error = priv_check(td, PRIV_SETTIMEOFDAY); 702 if (error) 703 return (error); 704 /* Verify all parameters before changing time. */ 705 if (tv) { 706 if (tv->tv_usec < 0 || tv->tv_usec >= 1000000 || 707 tv->tv_sec < 0) 708 return (EINVAL); 709 error = settime(td, tv); 710 } 711 return (error); 712 } 713 714 /* 715 * Get value of an interval timer. The process virtual and profiling virtual 716 * time timers are kept in the p_stats area, since they can be swapped out. 717 * These are kept internally in the way they are specified externally: in 718 * time until they expire. 719 * 720 * The real time interval timer is kept in the process table slot for the 721 * process, and its value (it_value) is kept as an absolute time rather than 722 * as a delta, so that it is easy to keep periodic real-time signals from 723 * drifting. 724 * 725 * Virtual time timers are processed in the hardclock() routine of 726 * kern_clock.c. The real time timer is processed by a timeout routine, 727 * called from the softclock() routine. Since a callout may be delayed in 728 * real time due to interrupt processing in the system, it is possible for 729 * the real time timeout routine (realitexpire, given below), to be delayed 730 * in real time past when it is supposed to occur. It does not suffice, 731 * therefore, to reload the real timer .it_value from the real time timers 732 * .it_interval. Rather, we compute the next time in absolute time the timer 733 * should go off. 734 */ 735 #ifndef _SYS_SYSPROTO_H_ 736 struct getitimer_args { 737 u_int which; 738 struct itimerval *itv; 739 }; 740 #endif 741 int 742 sys_getitimer(struct thread *td, struct getitimer_args *uap) 743 { 744 struct itimerval aitv; 745 int error; 746 747 error = kern_getitimer(td, uap->which, &aitv); 748 if (error != 0) 749 return (error); 750 return (copyout(&aitv, uap->itv, sizeof (struct itimerval))); 751 } 752 753 int 754 kern_getitimer(struct thread *td, u_int which, struct itimerval *aitv) 755 { 756 struct proc *p = td->td_proc; 757 struct timeval ctv; 758 759 if (which > ITIMER_PROF) 760 return (EINVAL); 761 762 if (which == ITIMER_REAL) { 763 /* 764 * Convert from absolute to relative time in .it_value 765 * part of real time timer. If time for real time timer 766 * has passed return 0, else return difference between 767 * current time and time for the timer to go off. 768 */ 769 PROC_LOCK(p); 770 *aitv = p->p_realtimer; 771 PROC_UNLOCK(p); 772 if (timevalisset(&aitv->it_value)) { 773 microuptime(&ctv); 774 if (timevalcmp(&aitv->it_value, &ctv, <)) 775 timevalclear(&aitv->it_value); 776 else 777 timevalsub(&aitv->it_value, &ctv); 778 } 779 } else { 780 PROC_ITIMLOCK(p); 781 *aitv = p->p_stats->p_timer[which]; 782 PROC_ITIMUNLOCK(p); 783 } 784 #ifdef KTRACE 785 if (KTRPOINT(td, KTR_STRUCT)) 786 ktritimerval(aitv); 787 #endif 788 return (0); 789 } 790 791 #ifndef _SYS_SYSPROTO_H_ 792 struct setitimer_args { 793 u_int which; 794 struct itimerval *itv, *oitv; 795 }; 796 #endif 797 int 798 sys_setitimer(struct thread *td, struct setitimer_args *uap) 799 { 800 struct itimerval aitv, oitv; 801 int error; 802 803 if (uap->itv == NULL) { 804 uap->itv = uap->oitv; 805 return (sys_getitimer(td, (struct getitimer_args *)uap)); 806 } 807 808 if ((error = copyin(uap->itv, &aitv, sizeof(struct itimerval)))) 809 return (error); 810 error = kern_setitimer(td, uap->which, &aitv, &oitv); 811 if (error != 0 || uap->oitv == NULL) 812 return (error); 813 return (copyout(&oitv, uap->oitv, sizeof(struct itimerval))); 814 } 815 816 int 817 kern_setitimer(struct thread *td, u_int which, struct itimerval *aitv, 818 struct itimerval *oitv) 819 { 820 struct proc *p = td->td_proc; 821 struct timeval ctv; 822 sbintime_t sbt, pr; 823 824 if (aitv == NULL) 825 return (kern_getitimer(td, which, oitv)); 826 827 if (which > ITIMER_PROF) 828 return (EINVAL); 829 #ifdef KTRACE 830 if (KTRPOINT(td, KTR_STRUCT)) 831 ktritimerval(aitv); 832 #endif 833 if (itimerfix(&aitv->it_value) || 834 aitv->it_value.tv_sec > INT32_MAX / 2) 835 return (EINVAL); 836 if (!timevalisset(&aitv->it_value)) 837 timevalclear(&aitv->it_interval); 838 else if (itimerfix(&aitv->it_interval) || 839 aitv->it_interval.tv_sec > INT32_MAX / 2) 840 return (EINVAL); 841 842 if (which == ITIMER_REAL) { 843 PROC_LOCK(p); 844 if (timevalisset(&p->p_realtimer.it_value)) 845 callout_stop(&p->p_itcallout); 846 microuptime(&ctv); 847 if (timevalisset(&aitv->it_value)) { 848 pr = tvtosbt(aitv->it_value) >> tc_precexp; 849 timevaladd(&aitv->it_value, &ctv); 850 sbt = tvtosbt(aitv->it_value); 851 callout_reset_sbt(&p->p_itcallout, sbt, pr, 852 realitexpire, p, C_ABSOLUTE); 853 } 854 *oitv = p->p_realtimer; 855 p->p_realtimer = *aitv; 856 PROC_UNLOCK(p); 857 if (timevalisset(&oitv->it_value)) { 858 if (timevalcmp(&oitv->it_value, &ctv, <)) 859 timevalclear(&oitv->it_value); 860 else 861 timevalsub(&oitv->it_value, &ctv); 862 } 863 } else { 864 if (aitv->it_interval.tv_sec == 0 && 865 aitv->it_interval.tv_usec != 0 && 866 aitv->it_interval.tv_usec < tick) 867 aitv->it_interval.tv_usec = tick; 868 if (aitv->it_value.tv_sec == 0 && 869 aitv->it_value.tv_usec != 0 && 870 aitv->it_value.tv_usec < tick) 871 aitv->it_value.tv_usec = tick; 872 PROC_ITIMLOCK(p); 873 *oitv = p->p_stats->p_timer[which]; 874 p->p_stats->p_timer[which] = *aitv; 875 PROC_ITIMUNLOCK(p); 876 } 877 #ifdef KTRACE 878 if (KTRPOINT(td, KTR_STRUCT)) 879 ktritimerval(oitv); 880 #endif 881 return (0); 882 } 883 884 /* 885 * Real interval timer expired: 886 * send process whose timer expired an alarm signal. 887 * If time is not set up to reload, then just return. 888 * Else compute next time timer should go off which is > current time. 889 * This is where delay in processing this timeout causes multiple 890 * SIGALRM calls to be compressed into one. 891 * tvtohz() always adds 1 to allow for the time until the next clock 892 * interrupt being strictly less than 1 clock tick, but we don't want 893 * that here since we want to appear to be in sync with the clock 894 * interrupt even when we're delayed. 895 */ 896 void 897 realitexpire(void *arg) 898 { 899 struct proc *p; 900 struct timeval ctv; 901 sbintime_t isbt; 902 903 p = (struct proc *)arg; 904 kern_psignal(p, SIGALRM); 905 if (!timevalisset(&p->p_realtimer.it_interval)) { 906 timevalclear(&p->p_realtimer.it_value); 907 if (p->p_flag & P_WEXIT) 908 wakeup(&p->p_itcallout); 909 return; 910 } 911 isbt = tvtosbt(p->p_realtimer.it_interval); 912 if (isbt >= sbt_timethreshold) 913 getmicrouptime(&ctv); 914 else 915 microuptime(&ctv); 916 do { 917 timevaladd(&p->p_realtimer.it_value, 918 &p->p_realtimer.it_interval); 919 } while (timevalcmp(&p->p_realtimer.it_value, &ctv, <=)); 920 callout_reset_sbt(&p->p_itcallout, tvtosbt(p->p_realtimer.it_value), 921 isbt >> tc_precexp, realitexpire, p, C_ABSOLUTE); 922 } 923 924 /* 925 * Check that a proposed value to load into the .it_value or 926 * .it_interval part of an interval timer is acceptable, and 927 * fix it to have at least minimal value (i.e. if it is less 928 * than the resolution of the clock, round it up.) 929 */ 930 int 931 itimerfix(struct timeval *tv) 932 { 933 934 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) 935 return (EINVAL); 936 if (tv->tv_sec == 0 && tv->tv_usec != 0 && 937 tv->tv_usec < (u_int)tick / 16) 938 tv->tv_usec = (u_int)tick / 16; 939 return (0); 940 } 941 942 /* 943 * Decrement an interval timer by a specified number 944 * of microseconds, which must be less than a second, 945 * i.e. < 1000000. If the timer expires, then reload 946 * it. In this case, carry over (usec - old value) to 947 * reduce the value reloaded into the timer so that 948 * the timer does not drift. This routine assumes 949 * that it is called in a context where the timers 950 * on which it is operating cannot change in value. 951 */ 952 int 953 itimerdecr(struct itimerval *itp, int usec) 954 { 955 956 if (itp->it_value.tv_usec < usec) { 957 if (itp->it_value.tv_sec == 0) { 958 /* expired, and already in next interval */ 959 usec -= itp->it_value.tv_usec; 960 goto expire; 961 } 962 itp->it_value.tv_usec += 1000000; 963 itp->it_value.tv_sec--; 964 } 965 itp->it_value.tv_usec -= usec; 966 usec = 0; 967 if (timevalisset(&itp->it_value)) 968 return (1); 969 /* expired, exactly at end of interval */ 970 expire: 971 if (timevalisset(&itp->it_interval)) { 972 itp->it_value = itp->it_interval; 973 itp->it_value.tv_usec -= usec; 974 if (itp->it_value.tv_usec < 0) { 975 itp->it_value.tv_usec += 1000000; 976 itp->it_value.tv_sec--; 977 } 978 } else 979 itp->it_value.tv_usec = 0; /* sec is already 0 */ 980 return (0); 981 } 982 983 /* 984 * Add and subtract routines for timevals. 985 * N.B.: subtract routine doesn't deal with 986 * results which are before the beginning, 987 * it just gets very confused in this case. 988 * Caveat emptor. 989 */ 990 void 991 timevaladd(struct timeval *t1, const struct timeval *t2) 992 { 993 994 t1->tv_sec += t2->tv_sec; 995 t1->tv_usec += t2->tv_usec; 996 timevalfix(t1); 997 } 998 999 void 1000 timevalsub(struct timeval *t1, const struct timeval *t2) 1001 { 1002 1003 t1->tv_sec -= t2->tv_sec; 1004 t1->tv_usec -= t2->tv_usec; 1005 timevalfix(t1); 1006 } 1007 1008 static void 1009 timevalfix(struct timeval *t1) 1010 { 1011 1012 if (t1->tv_usec < 0) { 1013 t1->tv_sec--; 1014 t1->tv_usec += 1000000; 1015 } 1016 if (t1->tv_usec >= 1000000) { 1017 t1->tv_sec++; 1018 t1->tv_usec -= 1000000; 1019 } 1020 } 1021 1022 /* 1023 * ratecheck(): simple time-based rate-limit checking. 1024 */ 1025 int 1026 ratecheck(struct timeval *lasttime, const struct timeval *mininterval) 1027 { 1028 struct timeval tv, delta; 1029 int rv = 0; 1030 1031 getmicrouptime(&tv); /* NB: 10ms precision */ 1032 delta = tv; 1033 timevalsub(&delta, lasttime); 1034 1035 /* 1036 * check for 0,0 is so that the message will be seen at least once, 1037 * even if interval is huge. 1038 */ 1039 if (timevalcmp(&delta, mininterval, >=) || 1040 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) { 1041 *lasttime = tv; 1042 rv = 1; 1043 } 1044 1045 return (rv); 1046 } 1047 1048 /* 1049 * ppsratecheck(): packets (or events) per second limitation. 1050 * 1051 * Return 0 if the limit is to be enforced (e.g. the caller 1052 * should drop a packet because of the rate limitation). 1053 * 1054 * maxpps of 0 always causes zero to be returned. maxpps of -1 1055 * always causes 1 to be returned; this effectively defeats rate 1056 * limiting. 1057 * 1058 * Note that we maintain the struct timeval for compatibility 1059 * with other bsd systems. We reuse the storage and just monitor 1060 * clock ticks for minimal overhead. 1061 */ 1062 int 1063 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 1064 { 1065 int now; 1066 1067 /* 1068 * Reset the last time and counter if this is the first call 1069 * or more than a second has passed since the last update of 1070 * lasttime. 1071 */ 1072 now = ticks; 1073 if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) { 1074 lasttime->tv_sec = now; 1075 *curpps = 1; 1076 return (maxpps != 0); 1077 } else { 1078 (*curpps)++; /* NB: ignore potential overflow */ 1079 return (maxpps < 0 || *curpps <= maxpps); 1080 } 1081 } 1082 1083 static void 1084 itimer_start(void) 1085 { 1086 static const struct kclock rt_clock = { 1087 .timer_create = realtimer_create, 1088 .timer_delete = realtimer_delete, 1089 .timer_settime = realtimer_settime, 1090 .timer_gettime = realtimer_gettime, 1091 }; 1092 1093 itimer_zone = uma_zcreate("itimer", sizeof(struct itimer), 1094 NULL, NULL, itimer_init, itimer_fini, UMA_ALIGN_PTR, 0); 1095 register_posix_clock(CLOCK_REALTIME, &rt_clock); 1096 register_posix_clock(CLOCK_MONOTONIC, &rt_clock); 1097 p31b_setcfg(CTL_P1003_1B_TIMERS, 200112L); 1098 p31b_setcfg(CTL_P1003_1B_DELAYTIMER_MAX, INT_MAX); 1099 p31b_setcfg(CTL_P1003_1B_TIMER_MAX, TIMER_MAX); 1100 } 1101 1102 static int 1103 register_posix_clock(int clockid, const struct kclock *clk) 1104 { 1105 if ((unsigned)clockid >= MAX_CLOCKS) { 1106 printf("%s: invalid clockid\n", __func__); 1107 return (0); 1108 } 1109 posix_clocks[clockid] = *clk; 1110 return (1); 1111 } 1112 1113 static int 1114 itimer_init(void *mem, int size, int flags) 1115 { 1116 struct itimer *it; 1117 1118 it = (struct itimer *)mem; 1119 mtx_init(&it->it_mtx, "itimer lock", NULL, MTX_DEF); 1120 return (0); 1121 } 1122 1123 static void 1124 itimer_fini(void *mem, int size) 1125 { 1126 struct itimer *it; 1127 1128 it = (struct itimer *)mem; 1129 mtx_destroy(&it->it_mtx); 1130 } 1131 1132 static void 1133 itimer_enter(struct itimer *it) 1134 { 1135 1136 mtx_assert(&it->it_mtx, MA_OWNED); 1137 it->it_usecount++; 1138 } 1139 1140 static void 1141 itimer_leave(struct itimer *it) 1142 { 1143 1144 mtx_assert(&it->it_mtx, MA_OWNED); 1145 KASSERT(it->it_usecount > 0, ("invalid it_usecount")); 1146 1147 if (--it->it_usecount == 0 && (it->it_flags & ITF_WANTED) != 0) 1148 wakeup(it); 1149 } 1150 1151 #ifndef _SYS_SYSPROTO_H_ 1152 struct ktimer_create_args { 1153 clockid_t clock_id; 1154 struct sigevent * evp; 1155 int * timerid; 1156 }; 1157 #endif 1158 int 1159 sys_ktimer_create(struct thread *td, struct ktimer_create_args *uap) 1160 { 1161 struct sigevent *evp, ev; 1162 int id; 1163 int error; 1164 1165 if (uap->evp == NULL) { 1166 evp = NULL; 1167 } else { 1168 error = copyin(uap->evp, &ev, sizeof(ev)); 1169 if (error != 0) 1170 return (error); 1171 evp = &ev; 1172 } 1173 error = kern_ktimer_create(td, uap->clock_id, evp, &id, -1); 1174 if (error == 0) { 1175 error = copyout(&id, uap->timerid, sizeof(int)); 1176 if (error != 0) 1177 kern_ktimer_delete(td, id); 1178 } 1179 return (error); 1180 } 1181 1182 int 1183 kern_ktimer_create(struct thread *td, clockid_t clock_id, struct sigevent *evp, 1184 int *timerid, int preset_id) 1185 { 1186 struct proc *p = td->td_proc; 1187 struct itimer *it; 1188 int id; 1189 int error; 1190 1191 if (clock_id < 0 || clock_id >= MAX_CLOCKS) 1192 return (EINVAL); 1193 1194 if (posix_clocks[clock_id].timer_create == NULL) 1195 return (EINVAL); 1196 1197 if (evp != NULL) { 1198 if (evp->sigev_notify != SIGEV_NONE && 1199 evp->sigev_notify != SIGEV_SIGNAL && 1200 evp->sigev_notify != SIGEV_THREAD_ID) 1201 return (EINVAL); 1202 if ((evp->sigev_notify == SIGEV_SIGNAL || 1203 evp->sigev_notify == SIGEV_THREAD_ID) && 1204 !_SIG_VALID(evp->sigev_signo)) 1205 return (EINVAL); 1206 } 1207 1208 if (p->p_itimers == NULL) 1209 itimers_alloc(p); 1210 1211 it = uma_zalloc(itimer_zone, M_WAITOK); 1212 it->it_flags = 0; 1213 it->it_usecount = 0; 1214 it->it_active = 0; 1215 timespecclear(&it->it_time.it_value); 1216 timespecclear(&it->it_time.it_interval); 1217 it->it_overrun = 0; 1218 it->it_overrun_last = 0; 1219 it->it_clockid = clock_id; 1220 it->it_timerid = -1; 1221 it->it_proc = p; 1222 ksiginfo_init(&it->it_ksi); 1223 it->it_ksi.ksi_flags |= KSI_INS | KSI_EXT; 1224 error = CLOCK_CALL(clock_id, timer_create, (it)); 1225 if (error != 0) 1226 goto out; 1227 1228 PROC_LOCK(p); 1229 if (preset_id != -1) { 1230 KASSERT(preset_id >= 0 && preset_id < 3, ("invalid preset_id")); 1231 id = preset_id; 1232 if (p->p_itimers->its_timers[id] != NULL) { 1233 PROC_UNLOCK(p); 1234 error = 0; 1235 goto out; 1236 } 1237 } else { 1238 /* 1239 * Find a free timer slot, skipping those reserved 1240 * for setitimer(). 1241 */ 1242 for (id = 3; id < TIMER_MAX; id++) 1243 if (p->p_itimers->its_timers[id] == NULL) 1244 break; 1245 if (id == TIMER_MAX) { 1246 PROC_UNLOCK(p); 1247 error = EAGAIN; 1248 goto out; 1249 } 1250 } 1251 it->it_timerid = id; 1252 p->p_itimers->its_timers[id] = it; 1253 if (evp != NULL) 1254 it->it_sigev = *evp; 1255 else { 1256 it->it_sigev.sigev_notify = SIGEV_SIGNAL; 1257 switch (clock_id) { 1258 default: 1259 case CLOCK_REALTIME: 1260 it->it_sigev.sigev_signo = SIGALRM; 1261 break; 1262 case CLOCK_VIRTUAL: 1263 it->it_sigev.sigev_signo = SIGVTALRM; 1264 break; 1265 case CLOCK_PROF: 1266 it->it_sigev.sigev_signo = SIGPROF; 1267 break; 1268 } 1269 it->it_sigev.sigev_value.sival_int = id; 1270 } 1271 1272 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1273 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1274 it->it_ksi.ksi_signo = it->it_sigev.sigev_signo; 1275 it->it_ksi.ksi_code = SI_TIMER; 1276 it->it_ksi.ksi_value = it->it_sigev.sigev_value; 1277 it->it_ksi.ksi_timerid = id; 1278 } 1279 PROC_UNLOCK(p); 1280 *timerid = id; 1281 return (0); 1282 1283 out: 1284 ITIMER_LOCK(it); 1285 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1286 ITIMER_UNLOCK(it); 1287 uma_zfree(itimer_zone, it); 1288 return (error); 1289 } 1290 1291 #ifndef _SYS_SYSPROTO_H_ 1292 struct ktimer_delete_args { 1293 int timerid; 1294 }; 1295 #endif 1296 int 1297 sys_ktimer_delete(struct thread *td, struct ktimer_delete_args *uap) 1298 { 1299 1300 return (kern_ktimer_delete(td, uap->timerid)); 1301 } 1302 1303 static struct itimer * 1304 itimer_find(struct proc *p, int timerid) 1305 { 1306 struct itimer *it; 1307 1308 PROC_LOCK_ASSERT(p, MA_OWNED); 1309 if ((p->p_itimers == NULL) || 1310 (timerid < 0) || (timerid >= TIMER_MAX) || 1311 (it = p->p_itimers->its_timers[timerid]) == NULL) { 1312 return (NULL); 1313 } 1314 ITIMER_LOCK(it); 1315 if ((it->it_flags & ITF_DELETING) != 0) { 1316 ITIMER_UNLOCK(it); 1317 it = NULL; 1318 } 1319 return (it); 1320 } 1321 1322 int 1323 kern_ktimer_delete(struct thread *td, int timerid) 1324 { 1325 struct proc *p = td->td_proc; 1326 struct itimer *it; 1327 1328 PROC_LOCK(p); 1329 it = itimer_find(p, timerid); 1330 if (it == NULL) { 1331 PROC_UNLOCK(p); 1332 return (EINVAL); 1333 } 1334 PROC_UNLOCK(p); 1335 1336 it->it_flags |= ITF_DELETING; 1337 while (it->it_usecount > 0) { 1338 it->it_flags |= ITF_WANTED; 1339 msleep(it, &it->it_mtx, PPAUSE, "itimer", 0); 1340 } 1341 it->it_flags &= ~ITF_WANTED; 1342 CLOCK_CALL(it->it_clockid, timer_delete, (it)); 1343 ITIMER_UNLOCK(it); 1344 1345 PROC_LOCK(p); 1346 if (KSI_ONQ(&it->it_ksi)) 1347 sigqueue_take(&it->it_ksi); 1348 p->p_itimers->its_timers[timerid] = NULL; 1349 PROC_UNLOCK(p); 1350 uma_zfree(itimer_zone, it); 1351 return (0); 1352 } 1353 1354 #ifndef _SYS_SYSPROTO_H_ 1355 struct ktimer_settime_args { 1356 int timerid; 1357 int flags; 1358 const struct itimerspec * value; 1359 struct itimerspec * ovalue; 1360 }; 1361 #endif 1362 int 1363 sys_ktimer_settime(struct thread *td, struct ktimer_settime_args *uap) 1364 { 1365 struct itimerspec val, oval, *ovalp; 1366 int error; 1367 1368 error = copyin(uap->value, &val, sizeof(val)); 1369 if (error != 0) 1370 return (error); 1371 ovalp = uap->ovalue != NULL ? &oval : NULL; 1372 error = kern_ktimer_settime(td, uap->timerid, uap->flags, &val, ovalp); 1373 if (error == 0 && uap->ovalue != NULL) 1374 error = copyout(ovalp, uap->ovalue, sizeof(*ovalp)); 1375 return (error); 1376 } 1377 1378 int 1379 kern_ktimer_settime(struct thread *td, int timer_id, int flags, 1380 struct itimerspec *val, struct itimerspec *oval) 1381 { 1382 struct proc *p; 1383 struct itimer *it; 1384 int error; 1385 1386 p = td->td_proc; 1387 PROC_LOCK(p); 1388 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { 1389 PROC_UNLOCK(p); 1390 error = EINVAL; 1391 } else { 1392 PROC_UNLOCK(p); 1393 itimer_enter(it); 1394 error = CLOCK_CALL(it->it_clockid, timer_settime, (it, 1395 flags, val, oval)); 1396 itimer_leave(it); 1397 ITIMER_UNLOCK(it); 1398 } 1399 return (error); 1400 } 1401 1402 #ifndef _SYS_SYSPROTO_H_ 1403 struct ktimer_gettime_args { 1404 int timerid; 1405 struct itimerspec * value; 1406 }; 1407 #endif 1408 int 1409 sys_ktimer_gettime(struct thread *td, struct ktimer_gettime_args *uap) 1410 { 1411 struct itimerspec val; 1412 int error; 1413 1414 error = kern_ktimer_gettime(td, uap->timerid, &val); 1415 if (error == 0) 1416 error = copyout(&val, uap->value, sizeof(val)); 1417 return (error); 1418 } 1419 1420 int 1421 kern_ktimer_gettime(struct thread *td, int timer_id, struct itimerspec *val) 1422 { 1423 struct proc *p; 1424 struct itimer *it; 1425 int error; 1426 1427 p = td->td_proc; 1428 PROC_LOCK(p); 1429 if (timer_id < 3 || (it = itimer_find(p, timer_id)) == NULL) { 1430 PROC_UNLOCK(p); 1431 error = EINVAL; 1432 } else { 1433 PROC_UNLOCK(p); 1434 itimer_enter(it); 1435 error = CLOCK_CALL(it->it_clockid, timer_gettime, (it, val)); 1436 itimer_leave(it); 1437 ITIMER_UNLOCK(it); 1438 } 1439 return (error); 1440 } 1441 1442 #ifndef _SYS_SYSPROTO_H_ 1443 struct timer_getoverrun_args { 1444 int timerid; 1445 }; 1446 #endif 1447 int 1448 sys_ktimer_getoverrun(struct thread *td, struct ktimer_getoverrun_args *uap) 1449 { 1450 1451 return (kern_ktimer_getoverrun(td, uap->timerid)); 1452 } 1453 1454 int 1455 kern_ktimer_getoverrun(struct thread *td, int timer_id) 1456 { 1457 struct proc *p = td->td_proc; 1458 struct itimer *it; 1459 int error ; 1460 1461 PROC_LOCK(p); 1462 if (timer_id < 3 || 1463 (it = itimer_find(p, timer_id)) == NULL) { 1464 PROC_UNLOCK(p); 1465 error = EINVAL; 1466 } else { 1467 td->td_retval[0] = it->it_overrun_last; 1468 ITIMER_UNLOCK(it); 1469 PROC_UNLOCK(p); 1470 error = 0; 1471 } 1472 return (error); 1473 } 1474 1475 static int 1476 realtimer_create(struct itimer *it) 1477 { 1478 callout_init_mtx(&it->it_callout, &it->it_mtx, 0); 1479 return (0); 1480 } 1481 1482 static int 1483 realtimer_delete(struct itimer *it) 1484 { 1485 mtx_assert(&it->it_mtx, MA_OWNED); 1486 1487 /* 1488 * clear timer's value and interval to tell realtimer_expire 1489 * to not rearm the timer. 1490 */ 1491 timespecclear(&it->it_time.it_value); 1492 timespecclear(&it->it_time.it_interval); 1493 ITIMER_UNLOCK(it); 1494 callout_drain(&it->it_callout); 1495 ITIMER_LOCK(it); 1496 return (0); 1497 } 1498 1499 static int 1500 realtimer_gettime(struct itimer *it, struct itimerspec *ovalue) 1501 { 1502 struct timespec cts; 1503 1504 mtx_assert(&it->it_mtx, MA_OWNED); 1505 1506 realtimer_clocktime(it->it_clockid, &cts); 1507 *ovalue = it->it_time; 1508 if (ovalue->it_value.tv_sec != 0 || ovalue->it_value.tv_nsec != 0) { 1509 timespecsub(&ovalue->it_value, &cts, &ovalue->it_value); 1510 if (ovalue->it_value.tv_sec < 0 || 1511 (ovalue->it_value.tv_sec == 0 && 1512 ovalue->it_value.tv_nsec == 0)) { 1513 ovalue->it_value.tv_sec = 0; 1514 ovalue->it_value.tv_nsec = 1; 1515 } 1516 } 1517 return (0); 1518 } 1519 1520 static int 1521 realtimer_settime(struct itimer *it, int flags, 1522 struct itimerspec *value, struct itimerspec *ovalue) 1523 { 1524 struct timespec cts, ts; 1525 struct timeval tv; 1526 struct itimerspec val; 1527 1528 mtx_assert(&it->it_mtx, MA_OWNED); 1529 1530 val = *value; 1531 if (itimespecfix(&val.it_value)) 1532 return (EINVAL); 1533 1534 if (timespecisset(&val.it_value)) { 1535 if (itimespecfix(&val.it_interval)) 1536 return (EINVAL); 1537 } else { 1538 timespecclear(&val.it_interval); 1539 } 1540 1541 if (ovalue != NULL) 1542 realtimer_gettime(it, ovalue); 1543 1544 it->it_time = val; 1545 if (timespecisset(&val.it_value)) { 1546 realtimer_clocktime(it->it_clockid, &cts); 1547 ts = val.it_value; 1548 if ((flags & TIMER_ABSTIME) == 0) { 1549 /* Convert to absolute time. */ 1550 timespecadd(&it->it_time.it_value, &cts, 1551 &it->it_time.it_value); 1552 } else { 1553 timespecsub(&ts, &cts, &ts); 1554 /* 1555 * We don't care if ts is negative, tztohz will 1556 * fix it. 1557 */ 1558 } 1559 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1560 callout_reset(&it->it_callout, tvtohz(&tv), 1561 realtimer_expire, it); 1562 } else { 1563 callout_stop(&it->it_callout); 1564 } 1565 1566 return (0); 1567 } 1568 1569 static void 1570 realtimer_clocktime(clockid_t id, struct timespec *ts) 1571 { 1572 if (id == CLOCK_REALTIME) 1573 getnanotime(ts); 1574 else /* CLOCK_MONOTONIC */ 1575 getnanouptime(ts); 1576 } 1577 1578 int 1579 itimer_accept(struct proc *p, int timerid, ksiginfo_t *ksi) 1580 { 1581 struct itimer *it; 1582 1583 PROC_LOCK_ASSERT(p, MA_OWNED); 1584 it = itimer_find(p, timerid); 1585 if (it != NULL) { 1586 ksi->ksi_overrun = it->it_overrun; 1587 it->it_overrun_last = it->it_overrun; 1588 it->it_overrun = 0; 1589 ITIMER_UNLOCK(it); 1590 return (0); 1591 } 1592 return (EINVAL); 1593 } 1594 1595 int 1596 itimespecfix(struct timespec *ts) 1597 { 1598 1599 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000) 1600 return (EINVAL); 1601 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < tick * 1000) 1602 ts->tv_nsec = tick * 1000; 1603 return (0); 1604 } 1605 1606 /* Timeout callback for realtime timer */ 1607 static void 1608 realtimer_expire(void *arg) 1609 { 1610 struct timespec cts, ts; 1611 struct timeval tv; 1612 struct itimer *it; 1613 1614 it = (struct itimer *)arg; 1615 1616 realtimer_clocktime(it->it_clockid, &cts); 1617 /* Only fire if time is reached. */ 1618 if (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1619 if (timespecisset(&it->it_time.it_interval)) { 1620 timespecadd(&it->it_time.it_value, 1621 &it->it_time.it_interval, 1622 &it->it_time.it_value); 1623 while (timespeccmp(&cts, &it->it_time.it_value, >=)) { 1624 if (it->it_overrun < INT_MAX) 1625 it->it_overrun++; 1626 else 1627 it->it_ksi.ksi_errno = ERANGE; 1628 timespecadd(&it->it_time.it_value, 1629 &it->it_time.it_interval, 1630 &it->it_time.it_value); 1631 } 1632 } else { 1633 /* single shot timer ? */ 1634 timespecclear(&it->it_time.it_value); 1635 } 1636 if (timespecisset(&it->it_time.it_value)) { 1637 timespecsub(&it->it_time.it_value, &cts, &ts); 1638 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1639 callout_reset(&it->it_callout, tvtohz(&tv), 1640 realtimer_expire, it); 1641 } 1642 itimer_enter(it); 1643 ITIMER_UNLOCK(it); 1644 itimer_fire(it); 1645 ITIMER_LOCK(it); 1646 itimer_leave(it); 1647 } else if (timespecisset(&it->it_time.it_value)) { 1648 ts = it->it_time.it_value; 1649 timespecsub(&ts, &cts, &ts); 1650 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1651 callout_reset(&it->it_callout, tvtohz(&tv), realtimer_expire, 1652 it); 1653 } 1654 } 1655 1656 void 1657 itimer_fire(struct itimer *it) 1658 { 1659 struct proc *p = it->it_proc; 1660 struct thread *td; 1661 1662 if (it->it_sigev.sigev_notify == SIGEV_SIGNAL || 1663 it->it_sigev.sigev_notify == SIGEV_THREAD_ID) { 1664 if (sigev_findtd(p, &it->it_sigev, &td) != 0) { 1665 ITIMER_LOCK(it); 1666 timespecclear(&it->it_time.it_value); 1667 timespecclear(&it->it_time.it_interval); 1668 callout_stop(&it->it_callout); 1669 ITIMER_UNLOCK(it); 1670 return; 1671 } 1672 if (!KSI_ONQ(&it->it_ksi)) { 1673 it->it_ksi.ksi_errno = 0; 1674 ksiginfo_set_sigev(&it->it_ksi, &it->it_sigev); 1675 tdsendsignal(p, td, it->it_ksi.ksi_signo, &it->it_ksi); 1676 } else { 1677 if (it->it_overrun < INT_MAX) 1678 it->it_overrun++; 1679 else 1680 it->it_ksi.ksi_errno = ERANGE; 1681 } 1682 PROC_UNLOCK(p); 1683 } 1684 } 1685 1686 static void 1687 itimers_alloc(struct proc *p) 1688 { 1689 struct itimers *its; 1690 int i; 1691 1692 its = malloc(sizeof (struct itimers), M_SUBPROC, M_WAITOK | M_ZERO); 1693 LIST_INIT(&its->its_virtual); 1694 LIST_INIT(&its->its_prof); 1695 TAILQ_INIT(&its->its_worklist); 1696 for (i = 0; i < TIMER_MAX; i++) 1697 its->its_timers[i] = NULL; 1698 PROC_LOCK(p); 1699 if (p->p_itimers == NULL) { 1700 p->p_itimers = its; 1701 PROC_UNLOCK(p); 1702 } 1703 else { 1704 PROC_UNLOCK(p); 1705 free(its, M_SUBPROC); 1706 } 1707 } 1708 1709 /* Clean up timers when some process events are being triggered. */ 1710 static void 1711 itimers_event_exit_exec(int start_idx, struct proc *p) 1712 { 1713 struct itimers *its; 1714 struct itimer *it; 1715 int i; 1716 1717 its = p->p_itimers; 1718 if (its == NULL) 1719 return; 1720 1721 for (i = start_idx; i < TIMER_MAX; ++i) { 1722 if ((it = its->its_timers[i]) != NULL) 1723 kern_ktimer_delete(curthread, i); 1724 } 1725 if (its->its_timers[0] == NULL && its->its_timers[1] == NULL && 1726 its->its_timers[2] == NULL) { 1727 free(its, M_SUBPROC); 1728 p->p_itimers = NULL; 1729 } 1730 } 1731 1732 void 1733 itimers_exec(struct proc *p) 1734 { 1735 /* 1736 * According to susv3, XSI interval timers should be inherited 1737 * by new image. 1738 */ 1739 itimers_event_exit_exec(3, p); 1740 } 1741 1742 void 1743 itimers_exit(struct proc *p) 1744 { 1745 itimers_event_exit_exec(0, p); 1746 } 1747