1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 29 */ 30 31 #include <sys/timer.h> 32 #include <sys/systm.h> 33 #include <sys/param.h> 34 #include <sys/kmem.h> 35 #include <sys/debug.h> 36 #include <sys/policy.h> 37 #include <sys/port_impl.h> 38 #include <sys/port_kernel.h> 39 #include <sys/contract/process_impl.h> 40 41 static kmem_cache_t *clock_timer_cache; 42 static clock_backend_t *clock_backend[CLOCK_MAX]; 43 static int timer_port_callback(void *, int *, pid_t, int, void *); 44 static void timer_close_port(void *, int, pid_t, int); 45 46 #define CLOCK_BACKEND(clk) \ 47 ((clk) < CLOCK_MAX && (clk) >= 0 ? clock_backend[(clk)] : NULL) 48 49 /* 50 * Tunable to increase the maximum number of POSIX timers per-process. This 51 * may _only_ be tuned in /etc/system or by patching the kernel binary; it 52 * _cannot_ be tuned on a running system. 53 */ 54 int timer_max = _TIMER_MAX; 55 56 /* 57 * timer_lock() locks the specified interval timer. It doesn't look at the 58 * ITLK_REMOVE bit; it's up to callers to look at this if they need to 59 * care. p_lock must be held on entry; it may be dropped and reaquired, 60 * but timer_lock() will always return with p_lock held. 61 * 62 * Note that timer_create() doesn't call timer_lock(); it creates timers 63 * with the ITLK_LOCKED bit explictly set. 64 */ 65 static void 66 timer_lock(proc_t *p, itimer_t *it) 67 { 68 ASSERT(MUTEX_HELD(&p->p_lock)); 69 70 while (it->it_lock & ITLK_LOCKED) { 71 it->it_blockers++; 72 cv_wait(&it->it_cv, &p->p_lock); 73 it->it_blockers--; 74 } 75 76 it->it_lock |= ITLK_LOCKED; 77 } 78 79 /* 80 * timer_unlock() unlocks the specified interval timer, waking up any 81 * waiters. p_lock must be held on entry; it will not be dropped by 82 * timer_unlock(). 83 */ 84 static void 85 timer_unlock(proc_t *p, itimer_t *it) 86 { 87 ASSERT(MUTEX_HELD(&p->p_lock)); 88 ASSERT(it->it_lock & ITLK_LOCKED); 89 it->it_lock &= ~ITLK_LOCKED; 90 cv_signal(&it->it_cv); 91 } 92 93 /* 94 * timer_delete_locked() takes a proc pointer, timer ID and locked interval 95 * timer, and deletes the specified timer. It must be called with p_lock 96 * held, and cannot be called on a timer which already has ITLK_REMOVE set; 97 * the caller must check this. timer_delete_locked() will set the ITLK_REMOVE 98 * bit and will iteratively unlock and lock the interval timer until all 99 * blockers have seen the ITLK_REMOVE and cleared out. It will then zero 100 * out the specified entry in the p_itimer array, and call into the clock 101 * backend to complete the deletion. 102 * 103 * This function will always return with p_lock held. 104 */ 105 static void 106 timer_delete_locked(proc_t *p, timer_t tid, itimer_t *it) 107 { 108 ASSERT(MUTEX_HELD(&p->p_lock)); 109 ASSERT(!(it->it_lock & ITLK_REMOVE)); 110 ASSERT(it->it_lock & ITLK_LOCKED); 111 112 it->it_lock |= ITLK_REMOVE; 113 114 /* 115 * If there are threads waiting to lock this timer, we'll unlock 116 * the timer, and block on the cv. Threads blocking our removal will 117 * have the opportunity to run; when they see the ITLK_REMOVE flag 118 * set, they will immediately unlock the timer. 119 */ 120 while (it->it_blockers) { 121 timer_unlock(p, it); 122 cv_wait(&it->it_cv, &p->p_lock); 123 timer_lock(p, it); 124 } 125 126 ASSERT(p->p_itimer[tid] == it); 127 p->p_itimer[tid] = NULL; 128 129 /* 130 * No one is blocked on this timer, and no one will be (we've set 131 * p_itimer[tid] to be NULL; no one can find it). Now we call into 132 * the clock backend to delete the timer; it is up to the backend to 133 * guarantee that timer_fire() has completed (and will never again 134 * be called) for this timer. 135 */ 136 mutex_exit(&p->p_lock); 137 138 it->it_backend->clk_timer_delete(it); 139 140 if (it->it_portev) { 141 mutex_enter(&it->it_mutex); 142 if (it->it_portev) { 143 port_kevent_t *pev; 144 /* dissociate timer from the event port */ 145 (void) port_dissociate_ksource(it->it_portfd, 146 PORT_SOURCE_TIMER, (port_source_t *)it->it_portsrc); 147 pev = (port_kevent_t *)it->it_portev; 148 it->it_portev = NULL; 149 it->it_flags &= ~IT_PORT; 150 it->it_pending = 0; 151 mutex_exit(&it->it_mutex); 152 (void) port_remove_done_event(pev); 153 port_free_event(pev); 154 } else { 155 mutex_exit(&it->it_mutex); 156 } 157 } 158 159 mutex_enter(&p->p_lock); 160 161 /* 162 * We need to be careful freeing the sigqueue for this timer; 163 * if a signal is pending, the sigqueue needs to be freed 164 * synchronously in siginfofree(). The need to free the sigqueue 165 * in siginfofree() is indicated by setting sq_func to NULL. 166 */ 167 if (it->it_pending > 0) { 168 it->it_sigq->sq_func = NULL; 169 } else { 170 kmem_free(it->it_sigq, sizeof (sigqueue_t)); 171 } 172 173 ASSERT(it->it_blockers == 0); 174 kmem_cache_free(clock_timer_cache, it); 175 } 176 177 /* 178 * timer_grab() and its companion routine, timer_release(), are wrappers 179 * around timer_lock()/_unlock() which allow the timer_*(3R) routines to 180 * (a) share error handling code and (b) not grab p_lock themselves. Routines 181 * which are called with p_lock held (e.g. timer_lwpbind(), timer_lwpexit()) 182 * must call timer_lock()/_unlock() explictly. 183 * 184 * timer_grab() takes a proc and a timer ID, and returns a pointer to a 185 * locked interval timer. p_lock must _not_ be held on entry; timer_grab() 186 * may acquire p_lock, but will always return with p_lock dropped. 187 * 188 * If timer_grab() fails, it will return NULL. timer_grab() will fail if 189 * one or more of the following is true: 190 * 191 * (a) The specified timer ID is out of range. 192 * 193 * (b) The specified timer ID does not correspond to a timer ID returned 194 * from timer_create(3R). 195 * 196 * (c) The specified timer ID is currently being removed. 197 * 198 */ 199 static itimer_t * 200 timer_grab(proc_t *p, timer_t tid) 201 { 202 itimer_t **itp, *it; 203 204 if (tid >= timer_max || tid < 0) 205 return (NULL); 206 207 mutex_enter(&p->p_lock); 208 209 if ((itp = p->p_itimer) == NULL || (it = itp[tid]) == NULL) { 210 mutex_exit(&p->p_lock); 211 return (NULL); 212 } 213 214 timer_lock(p, it); 215 216 if (it->it_lock & ITLK_REMOVE) { 217 /* 218 * Someone is removing this timer; it will soon be invalid. 219 */ 220 timer_unlock(p, it); 221 mutex_exit(&p->p_lock); 222 return (NULL); 223 } 224 225 mutex_exit(&p->p_lock); 226 227 return (it); 228 } 229 230 /* 231 * timer_release() releases a timer acquired with timer_grab(). p_lock 232 * should not be held on entry; timer_release() will acquire p_lock but 233 * will drop it before returning. 234 */ 235 static void 236 timer_release(proc_t *p, itimer_t *it) 237 { 238 mutex_enter(&p->p_lock); 239 timer_unlock(p, it); 240 mutex_exit(&p->p_lock); 241 } 242 243 /* 244 * timer_delete_grabbed() deletes a timer acquired with timer_grab(). 245 * p_lock should not be held on entry; timer_delete_grabbed() will acquire 246 * p_lock, but will drop it before returning. 247 */ 248 static void 249 timer_delete_grabbed(proc_t *p, timer_t tid, itimer_t *it) 250 { 251 mutex_enter(&p->p_lock); 252 timer_delete_locked(p, tid, it); 253 mutex_exit(&p->p_lock); 254 } 255 256 void 257 clock_timer_init() 258 { 259 clock_timer_cache = kmem_cache_create("timer_cache", 260 sizeof (itimer_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 261 } 262 263 void 264 clock_add_backend(clockid_t clock, clock_backend_t *backend) 265 { 266 ASSERT(clock >= 0 && clock < CLOCK_MAX); 267 ASSERT(clock_backend[clock] == NULL); 268 269 clock_backend[clock] = backend; 270 } 271 272 int 273 clock_settime(clockid_t clock, timespec_t *tp) 274 { 275 timespec_t t; 276 clock_backend_t *backend; 277 int error; 278 279 if ((backend = CLOCK_BACKEND(clock)) == NULL) 280 return (set_errno(EINVAL)); 281 282 if (secpolicy_settime(CRED()) != 0) 283 return (set_errno(EPERM)); 284 285 if (get_udatamodel() == DATAMODEL_NATIVE) { 286 if (copyin(tp, &t, sizeof (timespec_t)) != 0) 287 return (set_errno(EFAULT)); 288 } else { 289 timespec32_t t32; 290 291 if (copyin(tp, &t32, sizeof (timespec32_t)) != 0) 292 return (set_errno(EFAULT)); 293 294 TIMESPEC32_TO_TIMESPEC(&t, &t32); 295 } 296 297 if (itimerspecfix(&t)) 298 return (set_errno(EINVAL)); 299 300 error = backend->clk_clock_settime(&t); 301 302 if (error) 303 return (set_errno(error)); 304 305 return (0); 306 } 307 308 int 309 clock_gettime(clockid_t clock, timespec_t *tp) 310 { 311 timespec_t t; 312 clock_backend_t *backend; 313 int error; 314 315 if ((backend = CLOCK_BACKEND(clock)) == NULL) 316 return (set_errno(EINVAL)); 317 318 error = backend->clk_clock_gettime(&t); 319 320 if (error) 321 return (set_errno(error)); 322 323 if (get_udatamodel() == DATAMODEL_NATIVE) { 324 if (copyout(&t, tp, sizeof (timespec_t)) != 0) 325 return (set_errno(EFAULT)); 326 } else { 327 timespec32_t t32; 328 329 if (TIMESPEC_OVERFLOW(&t)) 330 return (set_errno(EOVERFLOW)); 331 TIMESPEC_TO_TIMESPEC32(&t32, &t); 332 333 if (copyout(&t32, tp, sizeof (timespec32_t)) != 0) 334 return (set_errno(EFAULT)); 335 } 336 337 return (0); 338 } 339 340 int 341 clock_getres(clockid_t clock, timespec_t *tp) 342 { 343 timespec_t t; 344 clock_backend_t *backend; 345 int error; 346 347 /* 348 * Strangely, the standard defines clock_getres() with a NULL tp 349 * to do nothing (regardless of the validity of the specified 350 * clock_id). Go figure. 351 */ 352 if (tp == NULL) 353 return (0); 354 355 if ((backend = CLOCK_BACKEND(clock)) == NULL) 356 return (set_errno(EINVAL)); 357 358 error = backend->clk_clock_getres(&t); 359 360 if (error) 361 return (set_errno(error)); 362 363 if (get_udatamodel() == DATAMODEL_NATIVE) { 364 if (copyout(&t, tp, sizeof (timespec_t)) != 0) 365 return (set_errno(EFAULT)); 366 } else { 367 timespec32_t t32; 368 369 if (TIMESPEC_OVERFLOW(&t)) 370 return (set_errno(EOVERFLOW)); 371 TIMESPEC_TO_TIMESPEC32(&t32, &t); 372 373 if (copyout(&t32, tp, sizeof (timespec32_t)) != 0) 374 return (set_errno(EFAULT)); 375 } 376 377 return (0); 378 } 379 380 void 381 timer_signal(sigqueue_t *sigq) 382 { 383 itimer_t *it = (itimer_t *)sigq->sq_backptr; 384 385 /* 386 * There are some conditions during a fork or an exit when we can 387 * call siginfofree() without p_lock held. To prevent a race 388 * between timer_signal() and timer_fire() with regard to it_pending, 389 * we therefore acquire it_mutex in both paths. 390 */ 391 mutex_enter(&it->it_mutex); 392 ASSERT(it->it_pending > 0); 393 it->it_overrun = it->it_pending - 1; 394 it->it_pending = 0; 395 mutex_exit(&it->it_mutex); 396 } 397 398 /* 399 * This routine is called from the clock backend. 400 */ 401 void 402 timer_fire(itimer_t *it) 403 { 404 proc_t *p; 405 int proc_lock_held; 406 407 if (it->it_flags & IT_SIGNAL) { 408 /* 409 * See the comment in timer_signal() for why it is not 410 * sufficient to only grab p_lock here. Because p_lock can be 411 * held on entry to timer_signal(), the lock ordering is 412 * necessarily p_lock before it_mutex. 413 */ 414 415 p = it->it_proc; 416 proc_lock_held = 1; 417 mutex_enter(&p->p_lock); 418 } else { 419 /* 420 * IT_PORT: 421 * If a timer was ever programmed to send events to a port, 422 * the IT_PORT flag will remain set until: 423 * a) the timer is deleted (see timer_delete_locked()) or 424 * b) the port is being closed (see timer_close_port()). 425 * Both cases are synchronized with the it_mutex. 426 * We don't need to use the p_lock because it is only 427 * required in the IT_SIGNAL case. 428 * If IT_PORT was set and the port is being closed then 429 * the timer notification is set to NONE. In such a case 430 * the timer itself and the it_pending counter remain active 431 * until the application deletes the counter or the process 432 * exits. 433 */ 434 proc_lock_held = 0; 435 } 436 mutex_enter(&it->it_mutex); 437 438 if (it->it_pending > 0) { 439 if (it->it_pending < INT_MAX) 440 it->it_pending++; 441 mutex_exit(&it->it_mutex); 442 } else { 443 if (it->it_flags & IT_PORT) { 444 it->it_pending = 1; 445 port_send_event((port_kevent_t *)it->it_portev); 446 mutex_exit(&it->it_mutex); 447 } else if (it->it_flags & IT_SIGNAL) { 448 it->it_pending = 1; 449 mutex_exit(&it->it_mutex); 450 sigaddqa(p, NULL, it->it_sigq); 451 } else { 452 mutex_exit(&it->it_mutex); 453 } 454 } 455 456 if (proc_lock_held) 457 mutex_exit(&p->p_lock); 458 } 459 460 int 461 timer_create(clockid_t clock, struct sigevent *evp, timer_t *tid) 462 { 463 struct sigevent ev; 464 proc_t *p = curproc; 465 clock_backend_t *backend; 466 itimer_t *it, **itp; 467 sigqueue_t *sigq; 468 cred_t *cr = CRED(); 469 int error = 0; 470 timer_t i; 471 port_notify_t tim_pnevp; 472 port_kevent_t *pkevp = NULL; 473 474 if ((backend = CLOCK_BACKEND(clock)) == NULL) 475 return (set_errno(EINVAL)); 476 477 if (evp != NULL) { 478 /* 479 * short copyin() for binary compatibility 480 * fetch oldsigevent to determine how much to copy in. 481 */ 482 if (get_udatamodel() == DATAMODEL_NATIVE) { 483 if (copyin(evp, &ev, sizeof (struct oldsigevent))) 484 return (set_errno(EFAULT)); 485 486 if (ev.sigev_notify == SIGEV_PORT || 487 ev.sigev_notify == SIGEV_THREAD) { 488 if (copyin(ev.sigev_value.sival_ptr, &tim_pnevp, 489 sizeof (port_notify_t))) 490 return (set_errno(EFAULT)); 491 } 492 #ifdef _SYSCALL32_IMPL 493 } else { 494 struct sigevent32 ev32; 495 port_notify32_t tim_pnevp32; 496 497 if (copyin(evp, &ev32, sizeof (struct oldsigevent32))) 498 return (set_errno(EFAULT)); 499 ev.sigev_notify = ev32.sigev_notify; 500 ev.sigev_signo = ev32.sigev_signo; 501 /* 502 * See comment in sigqueue32() on handling of 32-bit 503 * sigvals in a 64-bit kernel. 504 */ 505 ev.sigev_value.sival_int = ev32.sigev_value.sival_int; 506 if (ev.sigev_notify == SIGEV_PORT || 507 ev.sigev_notify == SIGEV_THREAD) { 508 if (copyin((void *)(uintptr_t) 509 ev32.sigev_value.sival_ptr, 510 (void *)&tim_pnevp32, 511 sizeof (port_notify32_t))) 512 return (set_errno(EFAULT)); 513 tim_pnevp.portnfy_port = 514 tim_pnevp32.portnfy_port; 515 tim_pnevp.portnfy_user = 516 (void *)(uintptr_t)tim_pnevp32.portnfy_user; 517 } 518 #endif 519 } 520 switch (ev.sigev_notify) { 521 case SIGEV_NONE: 522 break; 523 case SIGEV_SIGNAL: 524 if (ev.sigev_signo < 1 || ev.sigev_signo >= NSIG) 525 return (set_errno(EINVAL)); 526 break; 527 case SIGEV_THREAD: 528 case SIGEV_PORT: 529 break; 530 default: 531 return (set_errno(EINVAL)); 532 } 533 } else { 534 /* 535 * Use the clock's default sigevent (this is a structure copy). 536 */ 537 ev = backend->clk_default; 538 } 539 540 /* 541 * We'll allocate our timer and sigqueue now, before we grab p_lock. 542 * If we can't find an empty slot, we'll free them before returning. 543 */ 544 it = kmem_cache_alloc(clock_timer_cache, KM_SLEEP); 545 bzero(it, sizeof (itimer_t)); 546 mutex_init(&it->it_mutex, NULL, MUTEX_DEFAULT, NULL); 547 sigq = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 548 549 mutex_enter(&p->p_lock); 550 551 /* 552 * If this is this process' first timer, we need to attempt to allocate 553 * an array of timerstr_t pointers. We drop p_lock to perform the 554 * allocation; if we return to discover that p_itimer is non-NULL, 555 * we will free our allocation and drive on. 556 */ 557 if ((itp = p->p_itimer) == NULL) { 558 mutex_exit(&p->p_lock); 559 itp = kmem_zalloc(timer_max * sizeof (itimer_t *), KM_SLEEP); 560 mutex_enter(&p->p_lock); 561 562 if (p->p_itimer == NULL) 563 p->p_itimer = itp; 564 else { 565 kmem_free(itp, timer_max * sizeof (itimer_t *)); 566 itp = p->p_itimer; 567 } 568 } 569 570 for (i = 0; i < timer_max && itp[i] != NULL; i++) 571 continue; 572 573 if (i == timer_max) { 574 /* 575 * We couldn't find a slot. Drop p_lock, free the preallocated 576 * timer and sigqueue, and return an error. 577 */ 578 mutex_exit(&p->p_lock); 579 kmem_cache_free(clock_timer_cache, it); 580 kmem_free(sigq, sizeof (sigqueue_t)); 581 582 return (set_errno(EAGAIN)); 583 } 584 585 ASSERT(i < timer_max && itp[i] == NULL); 586 587 /* 588 * If we develop other notification mechanisms, this will need 589 * to call into (yet another) backend. 590 */ 591 sigq->sq_info.si_signo = ev.sigev_signo; 592 if (evp == NULL) 593 sigq->sq_info.si_value.sival_int = i; 594 else 595 sigq->sq_info.si_value = ev.sigev_value; 596 sigq->sq_info.si_code = SI_TIMER; 597 sigq->sq_info.si_pid = p->p_pid; 598 sigq->sq_info.si_ctid = PRCTID(p); 599 sigq->sq_info.si_zoneid = getzoneid(); 600 sigq->sq_info.si_uid = crgetruid(cr); 601 sigq->sq_func = timer_signal; 602 sigq->sq_next = NULL; 603 sigq->sq_backptr = it; 604 it->it_sigq = sigq; 605 it->it_backend = backend; 606 it->it_lock = ITLK_LOCKED; 607 itp[i] = it; 608 609 610 if (ev.sigev_notify == SIGEV_THREAD || 611 ev.sigev_notify == SIGEV_PORT) { 612 int port; 613 614 /* 615 * This timer is programmed to use event port notification when 616 * the timer fires: 617 * - allocate a port event structure and prepare it to be sent 618 * to the port as soon as the timer fires. 619 * - when the timer fires : 620 * - if event structure was already sent to the port then this 621 * is a timer fire overflow => increment overflow counter. 622 * - otherwise send pre-allocated event structure to the port. 623 * - the events field of the port_event_t structure counts the 624 * number of timer fired events. 625 * - The event structured is allocated using the 626 * PORT_ALLOC_CACHED flag. 627 * This flag indicates that the timer itself will manage and 628 * free the event structure when required. 629 */ 630 631 it->it_flags |= IT_PORT; 632 port = tim_pnevp.portnfy_port; 633 634 /* associate timer as event source with the port */ 635 error = port_associate_ksource(port, PORT_SOURCE_TIMER, 636 (port_source_t **)&it->it_portsrc, timer_close_port, 637 (void *)it, NULL); 638 if (error) { 639 itp[i] = NULL; /* clear slot */ 640 mutex_exit(&p->p_lock); 641 kmem_cache_free(clock_timer_cache, it); 642 kmem_free(sigq, sizeof (sigqueue_t)); 643 return (set_errno(error)); 644 } 645 646 /* allocate an event structure/slot */ 647 error = port_alloc_event(port, PORT_ALLOC_SCACHED, 648 PORT_SOURCE_TIMER, &pkevp); 649 if (error) { 650 (void) port_dissociate_ksource(port, PORT_SOURCE_TIMER, 651 (port_source_t *)it->it_portsrc); 652 itp[i] = NULL; /* clear slot */ 653 mutex_exit(&p->p_lock); 654 kmem_cache_free(clock_timer_cache, it); 655 kmem_free(sigq, sizeof (sigqueue_t)); 656 return (set_errno(error)); 657 } 658 659 /* initialize event data */ 660 port_init_event(pkevp, i, tim_pnevp.portnfy_user, 661 timer_port_callback, it); 662 it->it_portev = pkevp; 663 it->it_portfd = port; 664 } else { 665 if (ev.sigev_notify == SIGEV_SIGNAL) 666 it->it_flags |= IT_SIGNAL; 667 } 668 669 mutex_exit(&p->p_lock); 670 671 /* 672 * Call on the backend to verify the event argument (or return 673 * EINVAL if this clock type does not support timers). 674 */ 675 if ((error = backend->clk_timer_create(it, &ev)) != 0) 676 goto err; 677 678 it->it_lwp = ttolwp(curthread); 679 it->it_proc = p; 680 681 if (copyout(&i, tid, sizeof (timer_t)) != 0) { 682 error = EFAULT; 683 goto err; 684 } 685 686 /* 687 * If we're here, then we have successfully created the timer; we 688 * just need to release the timer and return. 689 */ 690 timer_release(p, it); 691 692 return (0); 693 694 err: 695 /* 696 * If we're here, an error has occurred late in the timer creation 697 * process. We need to regrab p_lock, and delete the incipient timer. 698 * Since we never unlocked the timer (it was born locked), it's 699 * impossible for a removal to be pending. 700 */ 701 ASSERT(!(it->it_lock & ITLK_REMOVE)); 702 timer_delete_grabbed(p, i, it); 703 704 return (set_errno(error)); 705 } 706 707 int 708 timer_gettime(timer_t tid, itimerspec_t *val) 709 { 710 proc_t *p = curproc; 711 itimer_t *it; 712 itimerspec_t when; 713 int error; 714 715 if ((it = timer_grab(p, tid)) == NULL) 716 return (set_errno(EINVAL)); 717 718 error = it->it_backend->clk_timer_gettime(it, &when); 719 720 timer_release(p, it); 721 722 if (error == 0) { 723 if (get_udatamodel() == DATAMODEL_NATIVE) { 724 if (copyout(&when, val, sizeof (itimerspec_t))) 725 error = EFAULT; 726 } else { 727 if (ITIMERSPEC_OVERFLOW(&when)) 728 error = EOVERFLOW; 729 else { 730 itimerspec32_t w32; 731 732 ITIMERSPEC_TO_ITIMERSPEC32(&w32, &when) 733 if (copyout(&w32, val, sizeof (itimerspec32_t))) 734 error = EFAULT; 735 } 736 } 737 } 738 739 return (error ? set_errno(error) : 0); 740 } 741 742 int 743 timer_settime(timer_t tid, int flags, itimerspec_t *val, itimerspec_t *oval) 744 { 745 itimerspec_t when; 746 itimer_t *it; 747 proc_t *p = curproc; 748 int error; 749 750 if (oval != NULL) { 751 if ((error = timer_gettime(tid, oval)) != 0) 752 return (error); 753 } 754 755 if (get_udatamodel() == DATAMODEL_NATIVE) { 756 if (copyin(val, &when, sizeof (itimerspec_t))) 757 return (set_errno(EFAULT)); 758 } else { 759 itimerspec32_t w32; 760 761 if (copyin(val, &w32, sizeof (itimerspec32_t))) 762 return (set_errno(EFAULT)); 763 764 ITIMERSPEC32_TO_ITIMERSPEC(&when, &w32); 765 } 766 767 if (itimerspecfix(&when.it_value) || 768 (itimerspecfix(&when.it_interval) && 769 timerspecisset(&when.it_value))) { 770 return (set_errno(EINVAL)); 771 } 772 773 if ((it = timer_grab(p, tid)) == NULL) 774 return (set_errno(EINVAL)); 775 776 error = it->it_backend->clk_timer_settime(it, flags, &when); 777 778 timer_release(p, it); 779 780 return (error ? set_errno(error) : 0); 781 } 782 783 int 784 timer_delete(timer_t tid) 785 { 786 proc_t *p = curproc; 787 itimer_t *it; 788 789 if ((it = timer_grab(p, tid)) == NULL) 790 return (set_errno(EINVAL)); 791 792 timer_delete_grabbed(p, tid, it); 793 794 return (0); 795 } 796 797 int 798 timer_getoverrun(timer_t tid) 799 { 800 int overrun; 801 proc_t *p = curproc; 802 itimer_t *it; 803 804 if ((it = timer_grab(p, tid)) == NULL) 805 return (set_errno(EINVAL)); 806 807 /* 808 * The it_overrun field is protected by p_lock; we need to acquire 809 * it before looking at the value. 810 */ 811 mutex_enter(&p->p_lock); 812 overrun = it->it_overrun; 813 mutex_exit(&p->p_lock); 814 815 timer_release(p, it); 816 817 return (overrun); 818 } 819 820 /* 821 * Entered/exited with p_lock held, but will repeatedly drop and regrab p_lock. 822 */ 823 void 824 timer_lwpexit(void) 825 { 826 timer_t i; 827 proc_t *p = curproc; 828 klwp_t *lwp = ttolwp(curthread); 829 itimer_t *it, **itp; 830 831 ASSERT(MUTEX_HELD(&p->p_lock)); 832 833 if ((itp = p->p_itimer) == NULL) 834 return; 835 836 for (i = 0; i < timer_max; i++) { 837 if ((it = itp[i]) == NULL) 838 continue; 839 840 timer_lock(p, it); 841 842 if ((it->it_lock & ITLK_REMOVE) || it->it_lwp != lwp) { 843 /* 844 * This timer is either being removed or it isn't 845 * associated with this lwp. 846 */ 847 timer_unlock(p, it); 848 continue; 849 } 850 851 /* 852 * The LWP that created this timer is going away. To the user, 853 * our behavior here is explicitly undefined. We will simply 854 * null out the it_lwp field; if the LWP was bound to a CPU, 855 * the cyclic will stay bound to that CPU until the process 856 * exits. 857 */ 858 it->it_lwp = NULL; 859 timer_unlock(p, it); 860 } 861 } 862 863 /* 864 * Called to notify of an LWP binding change. Entered/exited with p_lock 865 * held, but will repeatedly drop and regrab p_lock. 866 */ 867 void 868 timer_lwpbind() 869 { 870 timer_t i; 871 proc_t *p = curproc; 872 klwp_t *lwp = ttolwp(curthread); 873 itimer_t *it, **itp; 874 875 ASSERT(MUTEX_HELD(&p->p_lock)); 876 877 if ((itp = p->p_itimer) == NULL) 878 return; 879 880 for (i = 0; i < timer_max; i++) { 881 if ((it = itp[i]) == NULL) 882 continue; 883 884 timer_lock(p, it); 885 886 if (!(it->it_lock & ITLK_REMOVE) && it->it_lwp == lwp) { 887 /* 888 * Drop p_lock and jump into the backend. 889 */ 890 mutex_exit(&p->p_lock); 891 it->it_backend->clk_timer_lwpbind(it); 892 mutex_enter(&p->p_lock); 893 } 894 895 timer_unlock(p, it); 896 } 897 } 898 899 /* 900 * This function should only be called if p_itimer is non-NULL. 901 */ 902 void 903 timer_exit(void) 904 { 905 timer_t i; 906 proc_t *p = curproc; 907 908 ASSERT(p->p_itimer != NULL); 909 910 for (i = 0; i < timer_max; i++) 911 (void) timer_delete(i); 912 913 kmem_free(p->p_itimer, timer_max * sizeof (itimer_t *)); 914 p->p_itimer = NULL; 915 } 916 917 /* 918 * timer_port_callback() is a callback function which is associated with the 919 * timer event and is activated just before the event is delivered to the user. 920 * The timer uses this function to update/set the overflow counter and 921 * to reenable the use of the event structure. 922 */ 923 924 /* ARGSUSED */ 925 static int 926 timer_port_callback(void *arg, int *events, pid_t pid, int flag, void *evp) 927 { 928 itimer_t *it = arg; 929 930 mutex_enter(&it->it_mutex); 931 if (curproc != it->it_proc) { 932 /* can not deliver timer events to another proc */ 933 mutex_exit(&it->it_mutex); 934 return (EACCES); 935 } 936 *events = it->it_pending; /* 1 = 1 event, >1 # of overflows */ 937 it->it_pending = 0; /* reinit overflow counter */ 938 /* 939 * This function can also be activated when the port is being closed 940 * and a timer event is already submitted to the port. 941 * In such a case the event port framework will use the 942 * close-callback function to notify the events sources. 943 * The timer close-callback function is timer_close_port() which 944 * will free all allocated resources (including the allocated 945 * port event structure). 946 * For that reason we don't need to check the value of flag here. 947 */ 948 mutex_exit(&it->it_mutex); 949 return (0); 950 } 951 952 /* 953 * port is being closed ... free all allocated port event structures 954 * The delivered arg currently correspond to the first timer associated with 955 * the port and it is not useable in this case. 956 * We have to scan the list of activated timers in the current proc and 957 * compare them with the delivered port id. 958 */ 959 960 /* ARGSUSED */ 961 static void 962 timer_close_port(void *arg, int port, pid_t pid, int lastclose) 963 { 964 proc_t *p = curproc; 965 timer_t tid; 966 itimer_t *it; 967 968 for (tid = 0; tid < timer_max; tid++) { 969 if ((it = timer_grab(p, tid)) == NULL) 970 continue; 971 if (it->it_portev) { 972 mutex_enter(&it->it_mutex); 973 if (it->it_portfd == port) { 974 port_kevent_t *pev; 975 pev = (port_kevent_t *)it->it_portev; 976 it->it_portev = NULL; 977 it->it_flags &= ~IT_PORT; 978 mutex_exit(&it->it_mutex); 979 (void) port_remove_done_event(pev); 980 port_free_event(pev); 981 } else { 982 mutex_exit(&it->it_mutex); 983 } 984 } 985 timer_release(p, it); 986 } 987 } 988