1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 */
30
31 #include <sys/timer.h>
32 #include <sys/systm.h>
33 #include <sys/param.h>
34 #include <sys/kmem.h>
35 #include <sys/debug.h>
36 #include <sys/policy.h>
37 #include <sys/port_impl.h>
38 #include <sys/port_kernel.h>
39 #include <sys/contract/process_impl.h>
40
41 static kmem_cache_t *clock_timer_cache;
42 static clock_backend_t *clock_backend[CLOCK_MAX];
43 static int timer_port_callback(void *, int *, pid_t, int, void *);
44 static void timer_close_port(void *, int, pid_t, int);
45
46 #define CLOCK_BACKEND(clk) \
47 ((clk) < CLOCK_MAX && (clk) >= 0 ? clock_backend[(clk)] : NULL)
48
49 /*
50 * Tunable to increase the maximum number of POSIX timers per-process. This
51 * may _only_ be tuned in /etc/system or by patching the kernel binary; it
52 * _cannot_ be tuned on a running system.
53 */
54 int timer_max = _TIMER_MAX;
55
56 /*
57 * timer_lock() locks the specified interval timer. It doesn't look at the
58 * ITLK_REMOVE bit; it's up to callers to look at this if they need to
59 * care. p_lock must be held on entry; it may be dropped and reaquired,
60 * but timer_lock() will always return with p_lock held.
61 *
62 * Note that timer_create() doesn't call timer_lock(); it creates timers
63 * with the ITLK_LOCKED bit explictly set.
64 */
65 static void
timer_lock(proc_t * p,itimer_t * it)66 timer_lock(proc_t *p, itimer_t *it)
67 {
68 ASSERT(MUTEX_HELD(&p->p_lock));
69
70 while (it->it_lock & ITLK_LOCKED) {
71 it->it_blockers++;
72 cv_wait(&it->it_cv, &p->p_lock);
73 it->it_blockers--;
74 }
75
76 it->it_lock |= ITLK_LOCKED;
77 }
78
79 /*
80 * timer_unlock() unlocks the specified interval timer, waking up any
81 * waiters. p_lock must be held on entry; it will not be dropped by
82 * timer_unlock().
83 */
84 static void
timer_unlock(proc_t * p,itimer_t * it)85 timer_unlock(proc_t *p, itimer_t *it)
86 {
87 ASSERT(MUTEX_HELD(&p->p_lock));
88 ASSERT(it->it_lock & ITLK_LOCKED);
89 it->it_lock &= ~ITLK_LOCKED;
90 cv_signal(&it->it_cv);
91 }
92
93 /*
94 * timer_delete_locked() takes a proc pointer, timer ID and locked interval
95 * timer, and deletes the specified timer. It must be called with p_lock
96 * held, and cannot be called on a timer which already has ITLK_REMOVE set;
97 * the caller must check this. timer_delete_locked() will set the ITLK_REMOVE
98 * bit and will iteratively unlock and lock the interval timer until all
99 * blockers have seen the ITLK_REMOVE and cleared out. It will then zero
100 * out the specified entry in the p_itimer array, and call into the clock
101 * backend to complete the deletion.
102 *
103 * This function will always return with p_lock held.
104 */
105 static void
timer_delete_locked(proc_t * p,timer_t tid,itimer_t * it)106 timer_delete_locked(proc_t *p, timer_t tid, itimer_t *it)
107 {
108 ASSERT(MUTEX_HELD(&p->p_lock));
109 ASSERT(!(it->it_lock & ITLK_REMOVE));
110 ASSERT(it->it_lock & ITLK_LOCKED);
111
112 it->it_lock |= ITLK_REMOVE;
113
114 /*
115 * If there are threads waiting to lock this timer, we'll unlock
116 * the timer, and block on the cv. Threads blocking our removal will
117 * have the opportunity to run; when they see the ITLK_REMOVE flag
118 * set, they will immediately unlock the timer.
119 */
120 while (it->it_blockers) {
121 timer_unlock(p, it);
122 cv_wait(&it->it_cv, &p->p_lock);
123 timer_lock(p, it);
124 }
125
126 ASSERT(p->p_itimer[tid] == it);
127 p->p_itimer[tid] = NULL;
128
129 /*
130 * No one is blocked on this timer, and no one will be (we've set
131 * p_itimer[tid] to be NULL; no one can find it). Now we call into
132 * the clock backend to delete the timer; it is up to the backend to
133 * guarantee that timer_fire() has completed (and will never again
134 * be called) for this timer.
135 */
136 mutex_exit(&p->p_lock);
137
138 it->it_backend->clk_timer_delete(it);
139
140 if (it->it_portev) {
141 mutex_enter(&it->it_mutex);
142 if (it->it_portev) {
143 port_kevent_t *pev;
144 /* dissociate timer from the event port */
145 (void) port_dissociate_ksource(it->it_portfd,
146 PORT_SOURCE_TIMER, (port_source_t *)it->it_portsrc);
147 pev = (port_kevent_t *)it->it_portev;
148 it->it_portev = NULL;
149 it->it_flags &= ~IT_PORT;
150 it->it_pending = 0;
151 mutex_exit(&it->it_mutex);
152 (void) port_remove_done_event(pev);
153 port_free_event(pev);
154 } else {
155 mutex_exit(&it->it_mutex);
156 }
157 }
158
159 mutex_enter(&p->p_lock);
160
161 /*
162 * We need to be careful freeing the sigqueue for this timer;
163 * if a signal is pending, the sigqueue needs to be freed
164 * synchronously in siginfofree(). The need to free the sigqueue
165 * in siginfofree() is indicated by setting sq_func to NULL.
166 */
167 if (it->it_pending > 0) {
168 it->it_sigq->sq_func = NULL;
169 } else {
170 kmem_free(it->it_sigq, sizeof (sigqueue_t));
171 }
172
173 ASSERT(it->it_blockers == 0);
174 kmem_cache_free(clock_timer_cache, it);
175 }
176
177 /*
178 * timer_grab() and its companion routine, timer_release(), are wrappers
179 * around timer_lock()/_unlock() which allow the timer_*(3R) routines to
180 * (a) share error handling code and (b) not grab p_lock themselves. Routines
181 * which are called with p_lock held (e.g. timer_lwpbind(), timer_lwpexit())
182 * must call timer_lock()/_unlock() explictly.
183 *
184 * timer_grab() takes a proc and a timer ID, and returns a pointer to a
185 * locked interval timer. p_lock must _not_ be held on entry; timer_grab()
186 * may acquire p_lock, but will always return with p_lock dropped.
187 *
188 * If timer_grab() fails, it will return NULL. timer_grab() will fail if
189 * one or more of the following is true:
190 *
191 * (a) The specified timer ID is out of range.
192 *
193 * (b) The specified timer ID does not correspond to a timer ID returned
194 * from timer_create(3R).
195 *
196 * (c) The specified timer ID is currently being removed.
197 *
198 */
199 static itimer_t *
timer_grab(proc_t * p,timer_t tid)200 timer_grab(proc_t *p, timer_t tid)
201 {
202 itimer_t **itp, *it;
203
204 if (tid >= timer_max || tid < 0)
205 return (NULL);
206
207 mutex_enter(&p->p_lock);
208
209 if ((itp = p->p_itimer) == NULL || (it = itp[tid]) == NULL) {
210 mutex_exit(&p->p_lock);
211 return (NULL);
212 }
213
214 timer_lock(p, it);
215
216 if (it->it_lock & ITLK_REMOVE) {
217 /*
218 * Someone is removing this timer; it will soon be invalid.
219 */
220 timer_unlock(p, it);
221 mutex_exit(&p->p_lock);
222 return (NULL);
223 }
224
225 mutex_exit(&p->p_lock);
226
227 return (it);
228 }
229
230 /*
231 * timer_release() releases a timer acquired with timer_grab(). p_lock
232 * should not be held on entry; timer_release() will acquire p_lock but
233 * will drop it before returning.
234 */
235 static void
timer_release(proc_t * p,itimer_t * it)236 timer_release(proc_t *p, itimer_t *it)
237 {
238 mutex_enter(&p->p_lock);
239 timer_unlock(p, it);
240 mutex_exit(&p->p_lock);
241 }
242
243 /*
244 * timer_delete_grabbed() deletes a timer acquired with timer_grab().
245 * p_lock should not be held on entry; timer_delete_grabbed() will acquire
246 * p_lock, but will drop it before returning.
247 */
248 static void
timer_delete_grabbed(proc_t * p,timer_t tid,itimer_t * it)249 timer_delete_grabbed(proc_t *p, timer_t tid, itimer_t *it)
250 {
251 mutex_enter(&p->p_lock);
252 timer_delete_locked(p, tid, it);
253 mutex_exit(&p->p_lock);
254 }
255
256 void
clock_timer_init()257 clock_timer_init()
258 {
259 clock_timer_cache = kmem_cache_create("timer_cache",
260 sizeof (itimer_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
261 }
262
263 void
clock_add_backend(clockid_t clock,clock_backend_t * backend)264 clock_add_backend(clockid_t clock, clock_backend_t *backend)
265 {
266 ASSERT(clock >= 0 && clock < CLOCK_MAX);
267 ASSERT(clock_backend[clock] == NULL);
268
269 clock_backend[clock] = backend;
270 }
271
272 clock_backend_t *
clock_get_backend(clockid_t clock)273 clock_get_backend(clockid_t clock)
274 {
275 if (clock < 0 || clock >= CLOCK_MAX)
276 return (NULL);
277
278 return (clock_backend[clock]);
279 }
280
281 int
clock_settime(clockid_t clock,timespec_t * tp)282 clock_settime(clockid_t clock, timespec_t *tp)
283 {
284 timespec_t t;
285 clock_backend_t *backend;
286 int error;
287
288 if ((backend = CLOCK_BACKEND(clock)) == NULL)
289 return (set_errno(EINVAL));
290
291 if (secpolicy_settime(CRED()) != 0)
292 return (set_errno(EPERM));
293
294 if (get_udatamodel() == DATAMODEL_NATIVE) {
295 if (copyin(tp, &t, sizeof (timespec_t)) != 0)
296 return (set_errno(EFAULT));
297 } else {
298 timespec32_t t32;
299
300 if (copyin(tp, &t32, sizeof (timespec32_t)) != 0)
301 return (set_errno(EFAULT));
302
303 TIMESPEC32_TO_TIMESPEC(&t, &t32);
304 }
305
306 if (itimerspecfix(&t))
307 return (set_errno(EINVAL));
308
309 error = backend->clk_clock_settime(&t);
310
311 if (error)
312 return (set_errno(error));
313
314 return (0);
315 }
316
317 int
clock_gettime(clockid_t clock,timespec_t * tp)318 clock_gettime(clockid_t clock, timespec_t *tp)
319 {
320 timespec_t t;
321 clock_backend_t *backend;
322 int error;
323
324 if ((backend = CLOCK_BACKEND(clock)) == NULL)
325 return (set_errno(EINVAL));
326
327 error = backend->clk_clock_gettime(&t);
328
329 if (error)
330 return (set_errno(error));
331
332 if (get_udatamodel() == DATAMODEL_NATIVE) {
333 if (copyout(&t, tp, sizeof (timespec_t)) != 0)
334 return (set_errno(EFAULT));
335 } else {
336 timespec32_t t32;
337
338 if (TIMESPEC_OVERFLOW(&t))
339 return (set_errno(EOVERFLOW));
340 TIMESPEC_TO_TIMESPEC32(&t32, &t);
341
342 if (copyout(&t32, tp, sizeof (timespec32_t)) != 0)
343 return (set_errno(EFAULT));
344 }
345
346 return (0);
347 }
348
349 int
clock_getres(clockid_t clock,timespec_t * tp)350 clock_getres(clockid_t clock, timespec_t *tp)
351 {
352 timespec_t t;
353 clock_backend_t *backend;
354 int error;
355
356 /*
357 * Strangely, the standard defines clock_getres() with a NULL tp
358 * to do nothing (regardless of the validity of the specified
359 * clock_id). Go figure.
360 */
361 if (tp == NULL)
362 return (0);
363
364 if ((backend = CLOCK_BACKEND(clock)) == NULL)
365 return (set_errno(EINVAL));
366
367 error = backend->clk_clock_getres(&t);
368
369 if (error)
370 return (set_errno(error));
371
372 if (get_udatamodel() == DATAMODEL_NATIVE) {
373 if (copyout(&t, tp, sizeof (timespec_t)) != 0)
374 return (set_errno(EFAULT));
375 } else {
376 timespec32_t t32;
377
378 if (TIMESPEC_OVERFLOW(&t))
379 return (set_errno(EOVERFLOW));
380 TIMESPEC_TO_TIMESPEC32(&t32, &t);
381
382 if (copyout(&t32, tp, sizeof (timespec32_t)) != 0)
383 return (set_errno(EFAULT));
384 }
385
386 return (0);
387 }
388
389 void
timer_signal(sigqueue_t * sigq)390 timer_signal(sigqueue_t *sigq)
391 {
392 itimer_t *it = (itimer_t *)sigq->sq_backptr;
393
394 /*
395 * There are some conditions during a fork or an exit when we can
396 * call siginfofree() without p_lock held. To prevent a race
397 * between timer_signal() and timer_fire() with regard to it_pending,
398 * we therefore acquire it_mutex in both paths.
399 */
400 mutex_enter(&it->it_mutex);
401 ASSERT(it->it_pending > 0);
402 it->it_overrun = it->it_pending - 1;
403 it->it_pending = 0;
404 mutex_exit(&it->it_mutex);
405 }
406
407 /*
408 * This routine is called from the clock backend.
409 */
410 static void
timer_fire(itimer_t * it)411 timer_fire(itimer_t *it)
412 {
413 proc_t *p;
414 int proc_lock_held;
415
416 if (it->it_flags & IT_SIGNAL) {
417 /*
418 * See the comment in timer_signal() for why it is not
419 * sufficient to only grab p_lock here. Because p_lock can be
420 * held on entry to timer_signal(), the lock ordering is
421 * necessarily p_lock before it_mutex.
422 */
423
424 p = it->it_proc;
425 proc_lock_held = 1;
426 mutex_enter(&p->p_lock);
427 } else {
428 /*
429 * IT_PORT:
430 * If a timer was ever programmed to send events to a port,
431 * the IT_PORT flag will remain set until:
432 * a) the timer is deleted (see timer_delete_locked()) or
433 * b) the port is being closed (see timer_close_port()).
434 * Both cases are synchronized with the it_mutex.
435 * We don't need to use the p_lock because it is only
436 * required in the IT_SIGNAL case.
437 * If IT_PORT was set and the port is being closed then
438 * the timer notification is set to NONE. In such a case
439 * the timer itself and the it_pending counter remain active
440 * until the application deletes the counter or the process
441 * exits.
442 */
443 proc_lock_held = 0;
444 }
445 mutex_enter(&it->it_mutex);
446
447 if (it->it_pending > 0) {
448 if (it->it_pending < INT_MAX)
449 it->it_pending++;
450 mutex_exit(&it->it_mutex);
451 } else {
452 if (it->it_flags & IT_PORT) {
453 it->it_pending = 1;
454 port_send_event((port_kevent_t *)it->it_portev);
455 mutex_exit(&it->it_mutex);
456 } else if (it->it_flags & IT_SIGNAL) {
457 it->it_pending = 1;
458 mutex_exit(&it->it_mutex);
459 sigaddqa(p, NULL, it->it_sigq);
460 } else {
461 mutex_exit(&it->it_mutex);
462 }
463 }
464
465 if (proc_lock_held)
466 mutex_exit(&p->p_lock);
467 }
468
469 int
timer_create(clockid_t clock,struct sigevent * evp,timer_t * tid)470 timer_create(clockid_t clock, struct sigevent *evp, timer_t *tid)
471 {
472 struct sigevent ev;
473 proc_t *p = curproc;
474 clock_backend_t *backend;
475 itimer_t *it, **itp;
476 sigqueue_t *sigq;
477 cred_t *cr = CRED();
478 int error = 0;
479 timer_t i;
480 port_notify_t tim_pnevp;
481 port_kevent_t *pkevp = NULL;
482
483 if ((backend = CLOCK_BACKEND(clock)) == NULL)
484 return (set_errno(EINVAL));
485
486 if (evp != NULL) {
487 /*
488 * short copyin() for binary compatibility
489 * fetch oldsigevent to determine how much to copy in.
490 */
491 if (get_udatamodel() == DATAMODEL_NATIVE) {
492 if (copyin(evp, &ev, sizeof (struct oldsigevent)))
493 return (set_errno(EFAULT));
494
495 if (ev.sigev_notify == SIGEV_PORT ||
496 ev.sigev_notify == SIGEV_THREAD) {
497 if (copyin(ev.sigev_value.sival_ptr, &tim_pnevp,
498 sizeof (port_notify_t)))
499 return (set_errno(EFAULT));
500 }
501 #ifdef _SYSCALL32_IMPL
502 } else {
503 struct sigevent32 ev32;
504 port_notify32_t tim_pnevp32;
505
506 if (copyin(evp, &ev32, sizeof (struct oldsigevent32)))
507 return (set_errno(EFAULT));
508 ev.sigev_notify = ev32.sigev_notify;
509 ev.sigev_signo = ev32.sigev_signo;
510 /*
511 * See comment in sigqueue32() on handling of 32-bit
512 * sigvals in a 64-bit kernel.
513 */
514 ev.sigev_value.sival_int = ev32.sigev_value.sival_int;
515 if (ev.sigev_notify == SIGEV_PORT ||
516 ev.sigev_notify == SIGEV_THREAD) {
517 if (copyin((void *)(uintptr_t)
518 ev32.sigev_value.sival_ptr,
519 (void *)&tim_pnevp32,
520 sizeof (port_notify32_t)))
521 return (set_errno(EFAULT));
522 tim_pnevp.portnfy_port =
523 tim_pnevp32.portnfy_port;
524 tim_pnevp.portnfy_user =
525 (void *)(uintptr_t)tim_pnevp32.portnfy_user;
526 }
527 #endif
528 }
529 switch (ev.sigev_notify) {
530 case SIGEV_NONE:
531 break;
532 case SIGEV_SIGNAL:
533 if (ev.sigev_signo < 1 || ev.sigev_signo >= NSIG)
534 return (set_errno(EINVAL));
535 break;
536 case SIGEV_THREAD:
537 case SIGEV_PORT:
538 break;
539 default:
540 return (set_errno(EINVAL));
541 }
542 } else {
543 /*
544 * Use the clock's default sigevent (this is a structure copy).
545 */
546 ev = backend->clk_default;
547 }
548
549 /*
550 * We'll allocate our timer and sigqueue now, before we grab p_lock.
551 * If we can't find an empty slot, we'll free them before returning.
552 */
553 it = kmem_cache_alloc(clock_timer_cache, KM_SLEEP);
554 bzero(it, sizeof (itimer_t));
555 mutex_init(&it->it_mutex, NULL, MUTEX_DEFAULT, NULL);
556 sigq = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
557
558 mutex_enter(&p->p_lock);
559
560 /*
561 * If this is this process' first timer, we need to attempt to allocate
562 * an array of timerstr_t pointers. We drop p_lock to perform the
563 * allocation; if we return to discover that p_itimer is non-NULL,
564 * we will free our allocation and drive on.
565 */
566 if ((itp = p->p_itimer) == NULL) {
567 mutex_exit(&p->p_lock);
568 itp = kmem_zalloc(timer_max * sizeof (itimer_t *), KM_SLEEP);
569 mutex_enter(&p->p_lock);
570
571 if (p->p_itimer == NULL)
572 p->p_itimer = itp;
573 else {
574 kmem_free(itp, timer_max * sizeof (itimer_t *));
575 itp = p->p_itimer;
576 }
577 }
578
579 for (i = 0; i < timer_max && itp[i] != NULL; i++)
580 continue;
581
582 if (i == timer_max) {
583 /*
584 * We couldn't find a slot. Drop p_lock, free the preallocated
585 * timer and sigqueue, and return an error.
586 */
587 mutex_exit(&p->p_lock);
588 kmem_cache_free(clock_timer_cache, it);
589 kmem_free(sigq, sizeof (sigqueue_t));
590
591 return (set_errno(EAGAIN));
592 }
593
594 ASSERT(i < timer_max && itp[i] == NULL);
595
596 /*
597 * If we develop other notification mechanisms, this will need
598 * to call into (yet another) backend.
599 */
600 sigq->sq_info.si_signo = ev.sigev_signo;
601 if (evp == NULL)
602 sigq->sq_info.si_value.sival_int = i;
603 else
604 sigq->sq_info.si_value = ev.sigev_value;
605 sigq->sq_info.si_code = SI_TIMER;
606 sigq->sq_info.si_pid = p->p_pid;
607 sigq->sq_info.si_ctid = PRCTID(p);
608 sigq->sq_info.si_zoneid = getzoneid();
609 sigq->sq_info.si_uid = crgetruid(cr);
610 sigq->sq_func = timer_signal;
611 sigq->sq_next = NULL;
612 sigq->sq_backptr = it;
613 it->it_sigq = sigq;
614 it->it_backend = backend;
615 it->it_lock = ITLK_LOCKED;
616 itp[i] = it;
617
618
619 if (ev.sigev_notify == SIGEV_THREAD ||
620 ev.sigev_notify == SIGEV_PORT) {
621 int port;
622
623 /*
624 * This timer is programmed to use event port notification when
625 * the timer fires:
626 * - allocate a port event structure and prepare it to be sent
627 * to the port as soon as the timer fires.
628 * - when the timer fires :
629 * - if event structure was already sent to the port then this
630 * is a timer fire overflow => increment overflow counter.
631 * - otherwise send pre-allocated event structure to the port.
632 * - the events field of the port_event_t structure counts the
633 * number of timer fired events.
634 * - The event structured is allocated using the
635 * PORT_ALLOC_CACHED flag.
636 * This flag indicates that the timer itself will manage and
637 * free the event structure when required.
638 */
639
640 it->it_flags |= IT_PORT;
641 port = tim_pnevp.portnfy_port;
642
643 /* associate timer as event source with the port */
644 error = port_associate_ksource(port, PORT_SOURCE_TIMER,
645 (port_source_t **)&it->it_portsrc, timer_close_port,
646 (void *)it, NULL);
647 if (error) {
648 itp[i] = NULL; /* clear slot */
649 mutex_exit(&p->p_lock);
650 kmem_cache_free(clock_timer_cache, it);
651 kmem_free(sigq, sizeof (sigqueue_t));
652 return (set_errno(error));
653 }
654
655 /* allocate an event structure/slot */
656 error = port_alloc_event(port, PORT_ALLOC_SCACHED,
657 PORT_SOURCE_TIMER, &pkevp);
658 if (error) {
659 (void) port_dissociate_ksource(port, PORT_SOURCE_TIMER,
660 (port_source_t *)it->it_portsrc);
661 itp[i] = NULL; /* clear slot */
662 mutex_exit(&p->p_lock);
663 kmem_cache_free(clock_timer_cache, it);
664 kmem_free(sigq, sizeof (sigqueue_t));
665 return (set_errno(error));
666 }
667
668 /* initialize event data */
669 port_init_event(pkevp, i, tim_pnevp.portnfy_user,
670 timer_port_callback, it);
671 it->it_portev = pkevp;
672 it->it_portfd = port;
673 } else {
674 if (ev.sigev_notify == SIGEV_SIGNAL)
675 it->it_flags |= IT_SIGNAL;
676 }
677
678 mutex_exit(&p->p_lock);
679
680 /*
681 * Call on the backend to verify the event argument (or return
682 * EINVAL if this clock type does not support timers).
683 */
684 if ((error = backend->clk_timer_create(it, timer_fire)) != 0)
685 goto err;
686
687 it->it_lwp = ttolwp(curthread);
688 it->it_proc = p;
689
690 if (copyout(&i, tid, sizeof (timer_t)) != 0) {
691 error = EFAULT;
692 goto err;
693 }
694
695 /*
696 * If we're here, then we have successfully created the timer; we
697 * just need to release the timer and return.
698 */
699 timer_release(p, it);
700
701 return (0);
702
703 err:
704 /*
705 * If we're here, an error has occurred late in the timer creation
706 * process. We need to regrab p_lock, and delete the incipient timer.
707 * Since we never unlocked the timer (it was born locked), it's
708 * impossible for a removal to be pending.
709 */
710 ASSERT(!(it->it_lock & ITLK_REMOVE));
711 timer_delete_grabbed(p, i, it);
712
713 return (set_errno(error));
714 }
715
716 int
timer_gettime(timer_t tid,itimerspec_t * val)717 timer_gettime(timer_t tid, itimerspec_t *val)
718 {
719 proc_t *p = curproc;
720 itimer_t *it;
721 itimerspec_t when;
722 int error;
723
724 if ((it = timer_grab(p, tid)) == NULL)
725 return (set_errno(EINVAL));
726
727 error = it->it_backend->clk_timer_gettime(it, &when);
728
729 timer_release(p, it);
730
731 if (error == 0) {
732 if (get_udatamodel() == DATAMODEL_NATIVE) {
733 if (copyout(&when, val, sizeof (itimerspec_t)))
734 error = EFAULT;
735 } else {
736 if (ITIMERSPEC_OVERFLOW(&when))
737 error = EOVERFLOW;
738 else {
739 itimerspec32_t w32;
740
741 ITIMERSPEC_TO_ITIMERSPEC32(&w32, &when)
742 if (copyout(&w32, val, sizeof (itimerspec32_t)))
743 error = EFAULT;
744 }
745 }
746 }
747
748 return (error ? set_errno(error) : 0);
749 }
750
751 int
timer_settime(timer_t tid,int flags,itimerspec_t * val,itimerspec_t * oval)752 timer_settime(timer_t tid, int flags, itimerspec_t *val, itimerspec_t *oval)
753 {
754 itimerspec_t when;
755 itimer_t *it;
756 proc_t *p = curproc;
757 int error;
758
759 if (oval != NULL) {
760 if ((error = timer_gettime(tid, oval)) != 0)
761 return (error);
762 }
763
764 if (get_udatamodel() == DATAMODEL_NATIVE) {
765 if (copyin(val, &when, sizeof (itimerspec_t)))
766 return (set_errno(EFAULT));
767 } else {
768 itimerspec32_t w32;
769
770 if (copyin(val, &w32, sizeof (itimerspec32_t)))
771 return (set_errno(EFAULT));
772
773 ITIMERSPEC32_TO_ITIMERSPEC(&when, &w32);
774 }
775
776 if (itimerspecfix(&when.it_value) ||
777 (itimerspecfix(&when.it_interval) &&
778 timerspecisset(&when.it_value))) {
779 return (set_errno(EINVAL));
780 }
781
782 if ((it = timer_grab(p, tid)) == NULL)
783 return (set_errno(EINVAL));
784
785 error = it->it_backend->clk_timer_settime(it, flags, &when);
786
787 timer_release(p, it);
788
789 return (error ? set_errno(error) : 0);
790 }
791
792 int
timer_delete(timer_t tid)793 timer_delete(timer_t tid)
794 {
795 proc_t *p = curproc;
796 itimer_t *it;
797
798 if ((it = timer_grab(p, tid)) == NULL)
799 return (set_errno(EINVAL));
800
801 timer_delete_grabbed(p, tid, it);
802
803 return (0);
804 }
805
806 int
timer_getoverrun(timer_t tid)807 timer_getoverrun(timer_t tid)
808 {
809 int overrun;
810 proc_t *p = curproc;
811 itimer_t *it;
812
813 if ((it = timer_grab(p, tid)) == NULL)
814 return (set_errno(EINVAL));
815
816 /*
817 * The it_overrun field is protected by p_lock; we need to acquire
818 * it before looking at the value.
819 */
820 mutex_enter(&p->p_lock);
821 overrun = it->it_overrun;
822 mutex_exit(&p->p_lock);
823
824 timer_release(p, it);
825
826 return (overrun);
827 }
828
829 /*
830 * Entered/exited with p_lock held, but will repeatedly drop and regrab p_lock.
831 */
832 void
timer_lwpexit(void)833 timer_lwpexit(void)
834 {
835 timer_t i;
836 proc_t *p = curproc;
837 klwp_t *lwp = ttolwp(curthread);
838 itimer_t *it, **itp;
839
840 ASSERT(MUTEX_HELD(&p->p_lock));
841
842 if ((itp = p->p_itimer) == NULL)
843 return;
844
845 for (i = 0; i < timer_max; i++) {
846 if ((it = itp[i]) == NULL)
847 continue;
848
849 timer_lock(p, it);
850
851 if ((it->it_lock & ITLK_REMOVE) || it->it_lwp != lwp) {
852 /*
853 * This timer is either being removed or it isn't
854 * associated with this lwp.
855 */
856 timer_unlock(p, it);
857 continue;
858 }
859
860 /*
861 * The LWP that created this timer is going away. To the user,
862 * our behavior here is explicitly undefined. We will simply
863 * null out the it_lwp field; if the LWP was bound to a CPU,
864 * the cyclic will stay bound to that CPU until the process
865 * exits.
866 */
867 it->it_lwp = NULL;
868 timer_unlock(p, it);
869 }
870 }
871
872 /*
873 * Called to notify of an LWP binding change. Entered/exited with p_lock
874 * held, but will repeatedly drop and regrab p_lock.
875 */
876 void
timer_lwpbind()877 timer_lwpbind()
878 {
879 timer_t i;
880 proc_t *p = curproc;
881 klwp_t *lwp = ttolwp(curthread);
882 itimer_t *it, **itp;
883
884 ASSERT(MUTEX_HELD(&p->p_lock));
885
886 if ((itp = p->p_itimer) == NULL)
887 return;
888
889 for (i = 0; i < timer_max; i++) {
890 if ((it = itp[i]) == NULL)
891 continue;
892
893 timer_lock(p, it);
894
895 if (!(it->it_lock & ITLK_REMOVE) && it->it_lwp == lwp) {
896 /*
897 * Drop p_lock and jump into the backend.
898 */
899 mutex_exit(&p->p_lock);
900 it->it_backend->clk_timer_lwpbind(it);
901 mutex_enter(&p->p_lock);
902 }
903
904 timer_unlock(p, it);
905 }
906 }
907
908 /*
909 * This function should only be called if p_itimer is non-NULL.
910 */
911 void
timer_exit(void)912 timer_exit(void)
913 {
914 timer_t i;
915 proc_t *p = curproc;
916
917 ASSERT(p->p_itimer != NULL);
918
919 for (i = 0; i < timer_max; i++)
920 (void) timer_delete(i);
921
922 kmem_free(p->p_itimer, timer_max * sizeof (itimer_t *));
923 p->p_itimer = NULL;
924 }
925
926 /*
927 * timer_port_callback() is a callback function which is associated with the
928 * timer event and is activated just before the event is delivered to the user.
929 * The timer uses this function to update/set the overflow counter and
930 * to reenable the use of the event structure.
931 */
932
933 /* ARGSUSED */
934 static int
timer_port_callback(void * arg,int * events,pid_t pid,int flag,void * evp)935 timer_port_callback(void *arg, int *events, pid_t pid, int flag, void *evp)
936 {
937 itimer_t *it = arg;
938
939 mutex_enter(&it->it_mutex);
940 if (curproc != it->it_proc) {
941 /* can not deliver timer events to another proc */
942 mutex_exit(&it->it_mutex);
943 return (EACCES);
944 }
945 *events = it->it_pending; /* 1 = 1 event, >1 # of overflows */
946 it->it_pending = 0; /* reinit overflow counter */
947 /*
948 * This function can also be activated when the port is being closed
949 * and a timer event is already submitted to the port.
950 * In such a case the event port framework will use the
951 * close-callback function to notify the events sources.
952 * The timer close-callback function is timer_close_port() which
953 * will free all allocated resources (including the allocated
954 * port event structure).
955 * For that reason we don't need to check the value of flag here.
956 */
957 mutex_exit(&it->it_mutex);
958 return (0);
959 }
960
961 /*
962 * port is being closed ... free all allocated port event structures
963 * The delivered arg currently correspond to the first timer associated with
964 * the port and it is not useable in this case.
965 * We have to scan the list of activated timers in the current proc and
966 * compare them with the delivered port id.
967 */
968
969 /* ARGSUSED */
970 static void
timer_close_port(void * arg,int port,pid_t pid,int lastclose)971 timer_close_port(void *arg, int port, pid_t pid, int lastclose)
972 {
973 proc_t *p = curproc;
974 timer_t tid;
975 itimer_t *it;
976
977 for (tid = 0; tid < timer_max; tid++) {
978 if ((it = timer_grab(p, tid)) == NULL)
979 continue;
980 if (it->it_portev) {
981 mutex_enter(&it->it_mutex);
982 if (it->it_portfd == port) {
983 port_kevent_t *pev;
984 pev = (port_kevent_t *)it->it_portev;
985 it->it_portev = NULL;
986 it->it_flags &= ~IT_PORT;
987 mutex_exit(&it->it_mutex);
988 (void) port_remove_done_event(pev);
989 port_free_event(pev);
990 } else {
991 mutex_exit(&it->it_mutex);
992 }
993 }
994 timer_release(p, it);
995 }
996 }
997