1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * 2002-10-15 Posix Clocks & timers 4 * by George Anzinger george@mvista.com 5 * Copyright (C) 2002 2003 by MontaVista Software. 6 * 7 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. 8 * Copyright (C) 2004 Boris Hu 9 * 10 * These are all the functions necessary to implement POSIX clocks & timers 11 */ 12 #include <linux/mm.h> 13 #include <linux/interrupt.h> 14 #include <linux/slab.h> 15 #include <linux/time.h> 16 #include <linux/mutex.h> 17 #include <linux/sched/task.h> 18 19 #include <linux/uaccess.h> 20 #include <linux/list.h> 21 #include <linux/init.h> 22 #include <linux/compiler.h> 23 #include <linux/hash.h> 24 #include <linux/posix-clock.h> 25 #include <linux/posix-timers.h> 26 #include <linux/syscalls.h> 27 #include <linux/wait.h> 28 #include <linux/workqueue.h> 29 #include <linux/export.h> 30 #include <linux/hashtable.h> 31 #include <linux/compat.h> 32 #include <linux/nospec.h> 33 #include <linux/time_namespace.h> 34 35 #include "timekeeping.h" 36 #include "posix-timers.h" 37 38 static struct kmem_cache *posix_timers_cache; 39 40 /* 41 * Timers are managed in a hash table for lockless lookup. The hash key is 42 * constructed from current::signal and the timer ID and the timer is 43 * matched against current::signal and the timer ID when walking the hash 44 * bucket list. 45 * 46 * This allows checkpoint/restore to reconstruct the exact timer IDs for 47 * a process. 48 */ 49 static DEFINE_HASHTABLE(posix_timers_hashtable, 9); 50 static DEFINE_SPINLOCK(hash_lock); 51 52 static const struct k_clock * const posix_clocks[]; 53 static const struct k_clock *clockid_to_kclock(const clockid_t id); 54 static const struct k_clock clock_realtime, clock_monotonic; 55 56 /* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */ 57 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ 58 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) 59 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" 60 #endif 61 62 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); 63 64 #define lock_timer(tid, flags) \ 65 ({ struct k_itimer *__timr; \ 66 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ 67 __timr; \ 68 }) 69 70 static int hash(struct signal_struct *sig, unsigned int nr) 71 { 72 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); 73 } 74 75 static struct k_itimer *__posix_timers_find(struct hlist_head *head, 76 struct signal_struct *sig, 77 timer_t id) 78 { 79 struct k_itimer *timer; 80 81 hlist_for_each_entry_rcu(timer, head, t_hash, lockdep_is_held(&hash_lock)) { 82 /* timer->it_signal can be set concurrently */ 83 if ((READ_ONCE(timer->it_signal) == sig) && (timer->it_id == id)) 84 return timer; 85 } 86 return NULL; 87 } 88 89 static struct k_itimer *posix_timer_by_id(timer_t id) 90 { 91 struct signal_struct *sig = current->signal; 92 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; 93 94 return __posix_timers_find(head, sig, id); 95 } 96 97 static int posix_timer_add(struct k_itimer *timer) 98 { 99 struct signal_struct *sig = current->signal; 100 struct hlist_head *head; 101 unsigned int cnt, id; 102 103 /* 104 * FIXME: Replace this by a per signal struct xarray once there is 105 * a plan to handle the resulting CRIU regression gracefully. 106 */ 107 for (cnt = 0; cnt <= INT_MAX; cnt++) { 108 spin_lock(&hash_lock); 109 id = sig->next_posix_timer_id; 110 111 /* Write the next ID back. Clamp it to the positive space */ 112 sig->next_posix_timer_id = (id + 1) & INT_MAX; 113 114 head = &posix_timers_hashtable[hash(sig, id)]; 115 if (!__posix_timers_find(head, sig, id)) { 116 hlist_add_head_rcu(&timer->t_hash, head); 117 spin_unlock(&hash_lock); 118 return id; 119 } 120 spin_unlock(&hash_lock); 121 } 122 /* POSIX return code when no timer ID could be allocated */ 123 return -EAGAIN; 124 } 125 126 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) 127 { 128 spin_unlock_irqrestore(&timr->it_lock, flags); 129 } 130 131 static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp) 132 { 133 ktime_get_real_ts64(tp); 134 return 0; 135 } 136 137 static ktime_t posix_get_realtime_ktime(clockid_t which_clock) 138 { 139 return ktime_get_real(); 140 } 141 142 static int posix_clock_realtime_set(const clockid_t which_clock, 143 const struct timespec64 *tp) 144 { 145 return do_sys_settimeofday64(tp, NULL); 146 } 147 148 static int posix_clock_realtime_adj(const clockid_t which_clock, 149 struct __kernel_timex *t) 150 { 151 return do_adjtimex(t); 152 } 153 154 static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp) 155 { 156 ktime_get_ts64(tp); 157 timens_add_monotonic(tp); 158 return 0; 159 } 160 161 static ktime_t posix_get_monotonic_ktime(clockid_t which_clock) 162 { 163 return ktime_get(); 164 } 165 166 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) 167 { 168 ktime_get_raw_ts64(tp); 169 timens_add_monotonic(tp); 170 return 0; 171 } 172 173 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) 174 { 175 ktime_get_coarse_real_ts64(tp); 176 return 0; 177 } 178 179 static int posix_get_monotonic_coarse(clockid_t which_clock, 180 struct timespec64 *tp) 181 { 182 ktime_get_coarse_ts64(tp); 183 timens_add_monotonic(tp); 184 return 0; 185 } 186 187 static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) 188 { 189 *tp = ktime_to_timespec64(KTIME_LOW_RES); 190 return 0; 191 } 192 193 static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp) 194 { 195 ktime_get_boottime_ts64(tp); 196 timens_add_boottime(tp); 197 return 0; 198 } 199 200 static ktime_t posix_get_boottime_ktime(const clockid_t which_clock) 201 { 202 return ktime_get_boottime(); 203 } 204 205 static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp) 206 { 207 ktime_get_clocktai_ts64(tp); 208 return 0; 209 } 210 211 static ktime_t posix_get_tai_ktime(clockid_t which_clock) 212 { 213 return ktime_get_clocktai(); 214 } 215 216 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) 217 { 218 tp->tv_sec = 0; 219 tp->tv_nsec = hrtimer_resolution; 220 return 0; 221 } 222 223 static __init int init_posix_timers(void) 224 { 225 posix_timers_cache = kmem_cache_create("posix_timers_cache", 226 sizeof(struct k_itimer), 0, 227 SLAB_PANIC | SLAB_ACCOUNT, NULL); 228 return 0; 229 } 230 __initcall(init_posix_timers); 231 232 /* 233 * The siginfo si_overrun field and the return value of timer_getoverrun(2) 234 * are of type int. Clamp the overrun value to INT_MAX 235 */ 236 static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval) 237 { 238 s64 sum = timr->it_overrun_last + (s64)baseval; 239 240 return sum > (s64)INT_MAX ? INT_MAX : (int)sum; 241 } 242 243 static void common_hrtimer_rearm(struct k_itimer *timr) 244 { 245 struct hrtimer *timer = &timr->it.real.timer; 246 247 timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), 248 timr->it_interval); 249 hrtimer_restart(timer); 250 } 251 252 /* 253 * This function is called from the signal delivery code. It decides 254 * whether the signal should be dropped and rearms interval timers. 255 */ 256 bool posixtimer_deliver_signal(struct kernel_siginfo *info) 257 { 258 struct k_itimer *timr; 259 unsigned long flags; 260 bool ret = false; 261 262 /* 263 * Release siglock to ensure proper locking order versus 264 * timr::it_lock. Keep interrupts disabled. 265 */ 266 spin_unlock(¤t->sighand->siglock); 267 268 timr = lock_timer(info->si_tid, &flags); 269 if (!timr) 270 goto out; 271 272 if (timr->it_interval && timr->it_signal_seq == info->si_sys_private) { 273 timr->kclock->timer_rearm(timr); 274 275 timr->it_status = POSIX_TIMER_ARMED; 276 timr->it_overrun_last = timr->it_overrun; 277 timr->it_overrun = -1LL; 278 ++timr->it_signal_seq; 279 280 info->si_overrun = timer_overrun_to_int(timr, info->si_overrun); 281 } 282 ret = true; 283 284 unlock_timer(timr, flags); 285 out: 286 spin_lock(¤t->sighand->siglock); 287 288 /* Don't expose the si_sys_private value to userspace */ 289 info->si_sys_private = 0; 290 return ret; 291 } 292 293 int posix_timer_queue_signal(struct k_itimer *timr) 294 { 295 enum posix_timer_state state = POSIX_TIMER_DISARMED; 296 int ret, si_private = 0; 297 enum pid_type type; 298 299 lockdep_assert_held(&timr->it_lock); 300 301 if (timr->it_interval) { 302 state = POSIX_TIMER_REQUEUE_PENDING; 303 si_private = ++timr->it_signal_seq; 304 } 305 timr->it_status = state; 306 307 type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID; 308 ret = send_sigqueue(timr->sigq, timr->it_pid, type, si_private); 309 /* If we failed to send the signal the timer stops. */ 310 return ret > 0; 311 } 312 313 /* 314 * This function gets called when a POSIX.1b interval timer expires from 315 * the HRTIMER interrupt (soft interrupt on RT kernels). 316 * 317 * Handles CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME and CLOCK_TAI 318 * based timers. 319 */ 320 static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) 321 { 322 struct k_itimer *timr = container_of(timer, struct k_itimer, it.real.timer); 323 enum hrtimer_restart ret = HRTIMER_NORESTART; 324 unsigned long flags; 325 326 spin_lock_irqsave(&timr->it_lock, flags); 327 328 if (posix_timer_queue_signal(timr)) { 329 /* 330 * The signal was not queued due to SIG_IGN. As a 331 * consequence the timer is not going to be rearmed from 332 * the signal delivery path. But as a real signal handler 333 * can be installed later the timer must be rearmed here. 334 */ 335 if (timr->it_interval != 0) { 336 ktime_t now = hrtimer_cb_get_time(timer); 337 338 /* 339 * FIXME: What we really want, is to stop this 340 * timer completely and restart it in case the 341 * SIG_IGN is removed. This is a non trivial 342 * change to the signal handling code. 343 * 344 * For now let timers with an interval less than a 345 * jiffy expire every jiffy and recheck for a 346 * valid signal handler. 347 * 348 * This avoids interrupt starvation in case of a 349 * very small interval, which would expire the 350 * timer immediately again. 351 * 352 * Moving now ahead of time by one jiffy tricks 353 * hrtimer_forward() to expire the timer later, 354 * while it still maintains the overrun accuracy 355 * for the price of a slight inconsistency in the 356 * timer_gettime() case. This is at least better 357 * than a timer storm. 358 * 359 * Only required when high resolution timers are 360 * enabled as the periodic tick based timers are 361 * automatically aligned to the next tick. 362 */ 363 if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) { 364 ktime_t kj = TICK_NSEC; 365 366 if (timr->it_interval < kj) 367 now = ktime_add(now, kj); 368 } 369 370 timr->it_overrun += hrtimer_forward(timer, now, timr->it_interval); 371 ret = HRTIMER_RESTART; 372 ++timr->it_signal_seq; 373 timr->it_status = POSIX_TIMER_ARMED; 374 } 375 } 376 377 unlock_timer(timr, flags); 378 return ret; 379 } 380 381 static struct pid *good_sigevent(sigevent_t * event) 382 { 383 struct pid *pid = task_tgid(current); 384 struct task_struct *rtn; 385 386 switch (event->sigev_notify) { 387 case SIGEV_SIGNAL | SIGEV_THREAD_ID: 388 pid = find_vpid(event->sigev_notify_thread_id); 389 rtn = pid_task(pid, PIDTYPE_PID); 390 if (!rtn || !same_thread_group(rtn, current)) 391 return NULL; 392 fallthrough; 393 case SIGEV_SIGNAL: 394 case SIGEV_THREAD: 395 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) 396 return NULL; 397 fallthrough; 398 case SIGEV_NONE: 399 return pid; 400 default: 401 return NULL; 402 } 403 } 404 405 static struct k_itimer * alloc_posix_timer(void) 406 { 407 struct k_itimer *tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); 408 409 if (!tmr) 410 return tmr; 411 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { 412 kmem_cache_free(posix_timers_cache, tmr); 413 return NULL; 414 } 415 clear_siginfo(&tmr->sigq->info); 416 return tmr; 417 } 418 419 static void posix_timer_free(struct k_itimer *tmr) 420 { 421 put_pid(tmr->it_pid); 422 sigqueue_free(tmr->sigq); 423 kfree_rcu(tmr, rcu); 424 } 425 426 static void posix_timer_unhash_and_free(struct k_itimer *tmr) 427 { 428 spin_lock(&hash_lock); 429 hlist_del_rcu(&tmr->t_hash); 430 spin_unlock(&hash_lock); 431 posix_timer_free(tmr); 432 } 433 434 static int common_timer_create(struct k_itimer *new_timer) 435 { 436 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); 437 return 0; 438 } 439 440 /* Create a POSIX.1b interval timer. */ 441 static int do_timer_create(clockid_t which_clock, struct sigevent *event, 442 timer_t __user *created_timer_id) 443 { 444 const struct k_clock *kc = clockid_to_kclock(which_clock); 445 struct k_itimer *new_timer; 446 int error, new_timer_id; 447 448 if (!kc) 449 return -EINVAL; 450 if (!kc->timer_create) 451 return -EOPNOTSUPP; 452 453 new_timer = alloc_posix_timer(); 454 if (unlikely(!new_timer)) 455 return -EAGAIN; 456 457 spin_lock_init(&new_timer->it_lock); 458 459 /* 460 * Add the timer to the hash table. The timer is not yet valid 461 * because new_timer::it_signal is still NULL. The timer id is also 462 * not yet visible to user space. 463 */ 464 new_timer_id = posix_timer_add(new_timer); 465 if (new_timer_id < 0) { 466 posix_timer_free(new_timer); 467 return new_timer_id; 468 } 469 470 new_timer->it_id = (timer_t) new_timer_id; 471 new_timer->it_clock = which_clock; 472 new_timer->kclock = kc; 473 new_timer->it_overrun = -1LL; 474 475 if (event) { 476 rcu_read_lock(); 477 new_timer->it_pid = get_pid(good_sigevent(event)); 478 rcu_read_unlock(); 479 if (!new_timer->it_pid) { 480 error = -EINVAL; 481 goto out; 482 } 483 new_timer->it_sigev_notify = event->sigev_notify; 484 new_timer->sigq->info.si_signo = event->sigev_signo; 485 new_timer->sigq->info.si_value = event->sigev_value; 486 } else { 487 new_timer->it_sigev_notify = SIGEV_SIGNAL; 488 new_timer->sigq->info.si_signo = SIGALRM; 489 memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t)); 490 new_timer->sigq->info.si_value.sival_int = new_timer->it_id; 491 new_timer->it_pid = get_pid(task_tgid(current)); 492 } 493 494 new_timer->sigq->info.si_tid = new_timer->it_id; 495 new_timer->sigq->info.si_code = SI_TIMER; 496 497 if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { 498 error = -EFAULT; 499 goto out; 500 } 501 /* 502 * After succesful copy out, the timer ID is visible to user space 503 * now but not yet valid because new_timer::signal is still NULL. 504 * 505 * Complete the initialization with the clock specific create 506 * callback. 507 */ 508 error = kc->timer_create(new_timer); 509 if (error) 510 goto out; 511 512 spin_lock_irq(¤t->sighand->siglock); 513 /* This makes the timer valid in the hash table */ 514 WRITE_ONCE(new_timer->it_signal, current->signal); 515 hlist_add_head(&new_timer->list, ¤t->signal->posix_timers); 516 spin_unlock_irq(¤t->sighand->siglock); 517 /* 518 * After unlocking sighand::siglock @new_timer is subject to 519 * concurrent removal and cannot be touched anymore 520 */ 521 return 0; 522 out: 523 posix_timer_unhash_and_free(new_timer); 524 return error; 525 } 526 527 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, 528 struct sigevent __user *, timer_event_spec, 529 timer_t __user *, created_timer_id) 530 { 531 if (timer_event_spec) { 532 sigevent_t event; 533 534 if (copy_from_user(&event, timer_event_spec, sizeof (event))) 535 return -EFAULT; 536 return do_timer_create(which_clock, &event, created_timer_id); 537 } 538 return do_timer_create(which_clock, NULL, created_timer_id); 539 } 540 541 #ifdef CONFIG_COMPAT 542 COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock, 543 struct compat_sigevent __user *, timer_event_spec, 544 timer_t __user *, created_timer_id) 545 { 546 if (timer_event_spec) { 547 sigevent_t event; 548 549 if (get_compat_sigevent(&event, timer_event_spec)) 550 return -EFAULT; 551 return do_timer_create(which_clock, &event, created_timer_id); 552 } 553 return do_timer_create(which_clock, NULL, created_timer_id); 554 } 555 #endif 556 557 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) 558 { 559 struct k_itimer *timr; 560 561 /* 562 * timer_t could be any type >= int and we want to make sure any 563 * @timer_id outside positive int range fails lookup. 564 */ 565 if ((unsigned long long)timer_id > INT_MAX) 566 return NULL; 567 568 /* 569 * The hash lookup and the timers are RCU protected. 570 * 571 * Timers are added to the hash in invalid state where 572 * timr::it_signal == NULL. timer::it_signal is only set after the 573 * rest of the initialization succeeded. 574 * 575 * Timer destruction happens in steps: 576 * 1) Set timr::it_signal to NULL with timr::it_lock held 577 * 2) Release timr::it_lock 578 * 3) Remove from the hash under hash_lock 579 * 4) Call RCU for removal after the grace period 580 * 581 * Holding rcu_read_lock() accross the lookup ensures that 582 * the timer cannot be freed. 583 * 584 * The lookup validates locklessly that timr::it_signal == 585 * current::it_signal and timr::it_id == @timer_id. timr::it_id 586 * can't change, but timr::it_signal becomes NULL during 587 * destruction. 588 */ 589 rcu_read_lock(); 590 timr = posix_timer_by_id(timer_id); 591 if (timr) { 592 spin_lock_irqsave(&timr->it_lock, *flags); 593 /* 594 * Validate under timr::it_lock that timr::it_signal is 595 * still valid. Pairs with #1 above. 596 */ 597 if (timr->it_signal == current->signal) { 598 rcu_read_unlock(); 599 return timr; 600 } 601 spin_unlock_irqrestore(&timr->it_lock, *flags); 602 } 603 rcu_read_unlock(); 604 605 return NULL; 606 } 607 608 static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) 609 { 610 struct hrtimer *timer = &timr->it.real.timer; 611 612 return __hrtimer_expires_remaining_adjusted(timer, now); 613 } 614 615 static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) 616 { 617 struct hrtimer *timer = &timr->it.real.timer; 618 619 return hrtimer_forward(timer, now, timr->it_interval); 620 } 621 622 /* 623 * Get the time remaining on a POSIX.1b interval timer. 624 * 625 * Two issues to handle here: 626 * 627 * 1) The timer has a requeue pending. The return value must appear as 628 * if the timer has been requeued right now. 629 * 630 * 2) The timer is a SIGEV_NONE timer. These timers are never enqueued 631 * into the hrtimer queue and therefore never expired. Emulate expiry 632 * here taking #1 into account. 633 */ 634 void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) 635 { 636 const struct k_clock *kc = timr->kclock; 637 ktime_t now, remaining, iv; 638 bool sig_none; 639 640 sig_none = timr->it_sigev_notify == SIGEV_NONE; 641 iv = timr->it_interval; 642 643 /* interval timer ? */ 644 if (iv) { 645 cur_setting->it_interval = ktime_to_timespec64(iv); 646 } else if (timr->it_status == POSIX_TIMER_DISARMED) { 647 /* 648 * SIGEV_NONE oneshot timers are never queued and therefore 649 * timr->it_status is always DISARMED. The check below 650 * vs. remaining time will handle this case. 651 * 652 * For all other timers there is nothing to update here, so 653 * return. 654 */ 655 if (!sig_none) 656 return; 657 } 658 659 now = kc->clock_get_ktime(timr->it_clock); 660 661 /* 662 * If this is an interval timer and either has requeue pending or 663 * is a SIGEV_NONE timer move the expiry time forward by intervals, 664 * so expiry is > now. 665 */ 666 if (iv && (timr->it_signal_seq & REQUEUE_PENDING || sig_none)) 667 timr->it_overrun += kc->timer_forward(timr, now); 668 669 remaining = kc->timer_remaining(timr, now); 670 /* 671 * As @now is retrieved before a possible timer_forward() and 672 * cannot be reevaluated by the compiler @remaining is based on the 673 * same @now value. Therefore @remaining is consistent vs. @now. 674 * 675 * Consequently all interval timers, i.e. @iv > 0, cannot have a 676 * remaining time <= 0 because timer_forward() guarantees to move 677 * them forward so that the next timer expiry is > @now. 678 */ 679 if (remaining <= 0) { 680 /* 681 * A single shot SIGEV_NONE timer must return 0, when it is 682 * expired! Timers which have a real signal delivery mode 683 * must return a remaining time greater than 0 because the 684 * signal has not yet been delivered. 685 */ 686 if (!sig_none) 687 cur_setting->it_value.tv_nsec = 1; 688 } else { 689 cur_setting->it_value = ktime_to_timespec64(remaining); 690 } 691 } 692 693 static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting) 694 { 695 const struct k_clock *kc; 696 struct k_itimer *timr; 697 unsigned long flags; 698 int ret = 0; 699 700 timr = lock_timer(timer_id, &flags); 701 if (!timr) 702 return -EINVAL; 703 704 memset(setting, 0, sizeof(*setting)); 705 kc = timr->kclock; 706 if (WARN_ON_ONCE(!kc || !kc->timer_get)) 707 ret = -EINVAL; 708 else 709 kc->timer_get(timr, setting); 710 711 unlock_timer(timr, flags); 712 return ret; 713 } 714 715 /* Get the time remaining on a POSIX.1b interval timer. */ 716 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, 717 struct __kernel_itimerspec __user *, setting) 718 { 719 struct itimerspec64 cur_setting; 720 721 int ret = do_timer_gettime(timer_id, &cur_setting); 722 if (!ret) { 723 if (put_itimerspec64(&cur_setting, setting)) 724 ret = -EFAULT; 725 } 726 return ret; 727 } 728 729 #ifdef CONFIG_COMPAT_32BIT_TIME 730 731 SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id, 732 struct old_itimerspec32 __user *, setting) 733 { 734 struct itimerspec64 cur_setting; 735 736 int ret = do_timer_gettime(timer_id, &cur_setting); 737 if (!ret) { 738 if (put_old_itimerspec32(&cur_setting, setting)) 739 ret = -EFAULT; 740 } 741 return ret; 742 } 743 744 #endif 745 746 /** 747 * sys_timer_getoverrun - Get the number of overruns of a POSIX.1b interval timer 748 * @timer_id: The timer ID which identifies the timer 749 * 750 * The "overrun count" of a timer is one plus the number of expiration 751 * intervals which have elapsed between the first expiry, which queues the 752 * signal and the actual signal delivery. On signal delivery the "overrun 753 * count" is calculated and cached, so it can be returned directly here. 754 * 755 * As this is relative to the last queued signal the returned overrun count 756 * is meaningless outside of the signal delivery path and even there it 757 * does not accurately reflect the current state when user space evaluates 758 * it. 759 * 760 * Returns: 761 * -EINVAL @timer_id is invalid 762 * 1..INT_MAX The number of overruns related to the last delivered signal 763 */ 764 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) 765 { 766 struct k_itimer *timr; 767 unsigned long flags; 768 int overrun; 769 770 timr = lock_timer(timer_id, &flags); 771 if (!timr) 772 return -EINVAL; 773 774 overrun = timer_overrun_to_int(timr, 0); 775 unlock_timer(timr, flags); 776 777 return overrun; 778 } 779 780 static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, 781 bool absolute, bool sigev_none) 782 { 783 struct hrtimer *timer = &timr->it.real.timer; 784 enum hrtimer_mode mode; 785 786 mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; 787 /* 788 * Posix magic: Relative CLOCK_REALTIME timers are not affected by 789 * clock modifications, so they become CLOCK_MONOTONIC based under the 790 * hood. See hrtimer_init(). Update timr->kclock, so the generic 791 * functions which use timr->kclock->clock_get_*() work. 792 * 793 * Note: it_clock stays unmodified, because the next timer_set() might 794 * use ABSTIME, so it needs to switch back. 795 */ 796 if (timr->it_clock == CLOCK_REALTIME) 797 timr->kclock = absolute ? &clock_realtime : &clock_monotonic; 798 799 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); 800 timr->it.real.timer.function = posix_timer_fn; 801 802 if (!absolute) 803 expires = ktime_add_safe(expires, timer->base->get_time()); 804 hrtimer_set_expires(timer, expires); 805 806 if (!sigev_none) 807 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 808 } 809 810 static int common_hrtimer_try_to_cancel(struct k_itimer *timr) 811 { 812 return hrtimer_try_to_cancel(&timr->it.real.timer); 813 } 814 815 static void common_timer_wait_running(struct k_itimer *timer) 816 { 817 hrtimer_cancel_wait_running(&timer->it.real.timer); 818 } 819 820 /* 821 * On PREEMPT_RT this prevents priority inversion and a potential livelock 822 * against the ksoftirqd thread in case that ksoftirqd gets preempted while 823 * executing a hrtimer callback. 824 * 825 * See the comments in hrtimer_cancel_wait_running(). For PREEMPT_RT=n this 826 * just results in a cpu_relax(). 827 * 828 * For POSIX CPU timers with CONFIG_POSIX_CPU_TIMERS_TASK_WORK=n this is 829 * just a cpu_relax(). With CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y this 830 * prevents spinning on an eventually scheduled out task and a livelock 831 * when the task which tries to delete or disarm the timer has preempted 832 * the task which runs the expiry in task work context. 833 */ 834 static struct k_itimer *timer_wait_running(struct k_itimer *timer, 835 unsigned long *flags) 836 { 837 const struct k_clock *kc = READ_ONCE(timer->kclock); 838 timer_t timer_id = READ_ONCE(timer->it_id); 839 840 /* Prevent kfree(timer) after dropping the lock */ 841 rcu_read_lock(); 842 unlock_timer(timer, *flags); 843 844 /* 845 * kc->timer_wait_running() might drop RCU lock. So @timer 846 * cannot be touched anymore after the function returns! 847 */ 848 if (!WARN_ON_ONCE(!kc->timer_wait_running)) 849 kc->timer_wait_running(timer); 850 851 rcu_read_unlock(); 852 /* Relock the timer. It might be not longer hashed. */ 853 return lock_timer(timer_id, flags); 854 } 855 856 /* 857 * Set up the new interval and reset the signal delivery data 858 */ 859 void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting) 860 { 861 if (new_setting->it_value.tv_sec || new_setting->it_value.tv_nsec) 862 timer->it_interval = timespec64_to_ktime(new_setting->it_interval); 863 else 864 timer->it_interval = 0; 865 866 /* Prevent reloading in case there is a signal pending */ 867 timer->it_signal_seq = (timer->it_signal_seq + 2) & ~REQUEUE_PENDING; 868 /* Reset overrun accounting */ 869 timer->it_overrun_last = 0; 870 timer->it_overrun = -1LL; 871 } 872 873 /* Set a POSIX.1b interval timer. */ 874 int common_timer_set(struct k_itimer *timr, int flags, 875 struct itimerspec64 *new_setting, 876 struct itimerspec64 *old_setting) 877 { 878 const struct k_clock *kc = timr->kclock; 879 bool sigev_none; 880 ktime_t expires; 881 882 if (old_setting) 883 common_timer_get(timr, old_setting); 884 885 /* Prevent rearming by clearing the interval */ 886 timr->it_interval = 0; 887 /* 888 * Careful here. On SMP systems the timer expiry function could be 889 * active and spinning on timr->it_lock. 890 */ 891 if (kc->timer_try_to_cancel(timr) < 0) 892 return TIMER_RETRY; 893 894 timr->it_status = POSIX_TIMER_DISARMED; 895 posix_timer_set_common(timr, new_setting); 896 897 /* Keep timer disarmed when it_value is zero */ 898 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) 899 return 0; 900 901 expires = timespec64_to_ktime(new_setting->it_value); 902 if (flags & TIMER_ABSTIME) 903 expires = timens_ktime_to_host(timr->it_clock, expires); 904 sigev_none = timr->it_sigev_notify == SIGEV_NONE; 905 906 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); 907 if (!sigev_none) 908 timr->it_status = POSIX_TIMER_ARMED; 909 return 0; 910 } 911 912 static int do_timer_settime(timer_t timer_id, int tmr_flags, 913 struct itimerspec64 *new_spec64, 914 struct itimerspec64 *old_spec64) 915 { 916 const struct k_clock *kc; 917 struct k_itimer *timr; 918 unsigned long flags; 919 int error; 920 921 if (!timespec64_valid(&new_spec64->it_interval) || 922 !timespec64_valid(&new_spec64->it_value)) 923 return -EINVAL; 924 925 if (old_spec64) 926 memset(old_spec64, 0, sizeof(*old_spec64)); 927 928 timr = lock_timer(timer_id, &flags); 929 retry: 930 if (!timr) 931 return -EINVAL; 932 933 if (old_spec64) 934 old_spec64->it_interval = ktime_to_timespec64(timr->it_interval); 935 936 kc = timr->kclock; 937 if (WARN_ON_ONCE(!kc || !kc->timer_set)) 938 error = -EINVAL; 939 else 940 error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64); 941 942 if (error == TIMER_RETRY) { 943 // We already got the old time... 944 old_spec64 = NULL; 945 /* Unlocks and relocks the timer if it still exists */ 946 timr = timer_wait_running(timr, &flags); 947 goto retry; 948 } 949 unlock_timer(timr, flags); 950 951 return error; 952 } 953 954 /* Set a POSIX.1b interval timer */ 955 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, 956 const struct __kernel_itimerspec __user *, new_setting, 957 struct __kernel_itimerspec __user *, old_setting) 958 { 959 struct itimerspec64 new_spec, old_spec, *rtn; 960 int error = 0; 961 962 if (!new_setting) 963 return -EINVAL; 964 965 if (get_itimerspec64(&new_spec, new_setting)) 966 return -EFAULT; 967 968 rtn = old_setting ? &old_spec : NULL; 969 error = do_timer_settime(timer_id, flags, &new_spec, rtn); 970 if (!error && old_setting) { 971 if (put_itimerspec64(&old_spec, old_setting)) 972 error = -EFAULT; 973 } 974 return error; 975 } 976 977 #ifdef CONFIG_COMPAT_32BIT_TIME 978 SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags, 979 struct old_itimerspec32 __user *, new, 980 struct old_itimerspec32 __user *, old) 981 { 982 struct itimerspec64 new_spec, old_spec; 983 struct itimerspec64 *rtn = old ? &old_spec : NULL; 984 int error = 0; 985 986 if (!new) 987 return -EINVAL; 988 if (get_old_itimerspec32(&new_spec, new)) 989 return -EFAULT; 990 991 error = do_timer_settime(timer_id, flags, &new_spec, rtn); 992 if (!error && old) { 993 if (put_old_itimerspec32(&old_spec, old)) 994 error = -EFAULT; 995 } 996 return error; 997 } 998 #endif 999 1000 int common_timer_del(struct k_itimer *timer) 1001 { 1002 const struct k_clock *kc = timer->kclock; 1003 1004 timer->it_interval = 0; 1005 if (kc->timer_try_to_cancel(timer) < 0) 1006 return TIMER_RETRY; 1007 timer->it_status = POSIX_TIMER_DISARMED; 1008 return 0; 1009 } 1010 1011 static inline int timer_delete_hook(struct k_itimer *timer) 1012 { 1013 const struct k_clock *kc = timer->kclock; 1014 1015 if (WARN_ON_ONCE(!kc || !kc->timer_del)) 1016 return -EINVAL; 1017 return kc->timer_del(timer); 1018 } 1019 1020 /* Delete a POSIX.1b interval timer. */ 1021 SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) 1022 { 1023 struct k_itimer *timer; 1024 unsigned long flags; 1025 1026 timer = lock_timer(timer_id, &flags); 1027 1028 retry_delete: 1029 if (!timer) 1030 return -EINVAL; 1031 1032 if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) { 1033 /* Unlocks and relocks the timer if it still exists */ 1034 timer = timer_wait_running(timer, &flags); 1035 goto retry_delete; 1036 } 1037 1038 spin_lock(¤t->sighand->siglock); 1039 hlist_del(&timer->list); 1040 spin_unlock(¤t->sighand->siglock); 1041 /* 1042 * A concurrent lookup could check timer::it_signal lockless. It 1043 * will reevaluate with timer::it_lock held and observe the NULL. 1044 */ 1045 WRITE_ONCE(timer->it_signal, NULL); 1046 1047 unlock_timer(timer, flags); 1048 posix_timer_unhash_and_free(timer); 1049 return 0; 1050 } 1051 1052 /* 1053 * Delete a timer if it is armed, remove it from the hash and schedule it 1054 * for RCU freeing. 1055 */ 1056 static void itimer_delete(struct k_itimer *timer) 1057 { 1058 unsigned long flags; 1059 1060 /* 1061 * irqsave is required to make timer_wait_running() work. 1062 */ 1063 spin_lock_irqsave(&timer->it_lock, flags); 1064 1065 retry_delete: 1066 /* 1067 * Even if the timer is not longer accessible from other tasks 1068 * it still might be armed and queued in the underlying timer 1069 * mechanism. Worse, that timer mechanism might run the expiry 1070 * function concurrently. 1071 */ 1072 if (timer_delete_hook(timer) == TIMER_RETRY) { 1073 /* 1074 * Timer is expired concurrently, prevent livelocks 1075 * and pointless spinning on RT. 1076 * 1077 * timer_wait_running() drops timer::it_lock, which opens 1078 * the possibility for another task to delete the timer. 1079 * 1080 * That's not possible here because this is invoked from 1081 * do_exit() only for the last thread of the thread group. 1082 * So no other task can access and delete that timer. 1083 */ 1084 if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer)) 1085 return; 1086 1087 goto retry_delete; 1088 } 1089 hlist_del(&timer->list); 1090 1091 /* 1092 * Setting timer::it_signal to NULL is technically not required 1093 * here as nothing can access the timer anymore legitimately via 1094 * the hash table. Set it to NULL nevertheless so that all deletion 1095 * paths are consistent. 1096 */ 1097 WRITE_ONCE(timer->it_signal, NULL); 1098 1099 spin_unlock_irqrestore(&timer->it_lock, flags); 1100 posix_timer_unhash_and_free(timer); 1101 } 1102 1103 /* 1104 * Invoked from do_exit() when the last thread of a thread group exits. 1105 * At that point no other task can access the timers of the dying 1106 * task anymore. 1107 */ 1108 void exit_itimers(struct task_struct *tsk) 1109 { 1110 struct hlist_head timers; 1111 1112 if (hlist_empty(&tsk->signal->posix_timers)) 1113 return; 1114 1115 /* Protect against concurrent read via /proc/$PID/timers */ 1116 spin_lock_irq(&tsk->sighand->siglock); 1117 hlist_move_list(&tsk->signal->posix_timers, &timers); 1118 spin_unlock_irq(&tsk->sighand->siglock); 1119 1120 /* The timers are not longer accessible via tsk::signal */ 1121 while (!hlist_empty(&timers)) 1122 itimer_delete(hlist_entry(timers.first, struct k_itimer, list)); 1123 } 1124 1125 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, 1126 const struct __kernel_timespec __user *, tp) 1127 { 1128 const struct k_clock *kc = clockid_to_kclock(which_clock); 1129 struct timespec64 new_tp; 1130 1131 if (!kc || !kc->clock_set) 1132 return -EINVAL; 1133 1134 if (get_timespec64(&new_tp, tp)) 1135 return -EFAULT; 1136 1137 /* 1138 * Permission checks have to be done inside the clock specific 1139 * setter callback. 1140 */ 1141 return kc->clock_set(which_clock, &new_tp); 1142 } 1143 1144 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, 1145 struct __kernel_timespec __user *, tp) 1146 { 1147 const struct k_clock *kc = clockid_to_kclock(which_clock); 1148 struct timespec64 kernel_tp; 1149 int error; 1150 1151 if (!kc) 1152 return -EINVAL; 1153 1154 error = kc->clock_get_timespec(which_clock, &kernel_tp); 1155 1156 if (!error && put_timespec64(&kernel_tp, tp)) 1157 error = -EFAULT; 1158 1159 return error; 1160 } 1161 1162 int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx) 1163 { 1164 const struct k_clock *kc = clockid_to_kclock(which_clock); 1165 1166 if (!kc) 1167 return -EINVAL; 1168 if (!kc->clock_adj) 1169 return -EOPNOTSUPP; 1170 1171 return kc->clock_adj(which_clock, ktx); 1172 } 1173 1174 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, 1175 struct __kernel_timex __user *, utx) 1176 { 1177 struct __kernel_timex ktx; 1178 int err; 1179 1180 if (copy_from_user(&ktx, utx, sizeof(ktx))) 1181 return -EFAULT; 1182 1183 err = do_clock_adjtime(which_clock, &ktx); 1184 1185 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) 1186 return -EFAULT; 1187 1188 return err; 1189 } 1190 1191 /** 1192 * sys_clock_getres - Get the resolution of a clock 1193 * @which_clock: The clock to get the resolution for 1194 * @tp: Pointer to a a user space timespec64 for storage 1195 * 1196 * POSIX defines: 1197 * 1198 * "The clock_getres() function shall return the resolution of any 1199 * clock. Clock resolutions are implementation-defined and cannot be set by 1200 * a process. If the argument res is not NULL, the resolution of the 1201 * specified clock shall be stored in the location pointed to by res. If 1202 * res is NULL, the clock resolution is not returned. If the time argument 1203 * of clock_settime() is not a multiple of res, then the value is truncated 1204 * to a multiple of res." 1205 * 1206 * Due to the various hardware constraints the real resolution can vary 1207 * wildly and even change during runtime when the underlying devices are 1208 * replaced. The kernel also can use hardware devices with different 1209 * resolutions for reading the time and for arming timers. 1210 * 1211 * The kernel therefore deviates from the POSIX spec in various aspects: 1212 * 1213 * 1) The resolution returned to user space 1214 * 1215 * For CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME, CLOCK_TAI, 1216 * CLOCK_REALTIME_ALARM, CLOCK_BOOTTIME_ALAREM and CLOCK_MONOTONIC_RAW 1217 * the kernel differentiates only two cases: 1218 * 1219 * I) Low resolution mode: 1220 * 1221 * When high resolution timers are disabled at compile or runtime 1222 * the resolution returned is nanoseconds per tick, which represents 1223 * the precision at which timers expire. 1224 * 1225 * II) High resolution mode: 1226 * 1227 * When high resolution timers are enabled the resolution returned 1228 * is always one nanosecond independent of the actual resolution of 1229 * the underlying hardware devices. 1230 * 1231 * For CLOCK_*_ALARM the actual resolution depends on system 1232 * state. When system is running the resolution is the same as the 1233 * resolution of the other clocks. During suspend the actual 1234 * resolution is the resolution of the underlying RTC device which 1235 * might be way less precise than the clockevent device used during 1236 * running state. 1237 * 1238 * For CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE the resolution 1239 * returned is always nanoseconds per tick. 1240 * 1241 * For CLOCK_PROCESS_CPUTIME and CLOCK_THREAD_CPUTIME the resolution 1242 * returned is always one nanosecond under the assumption that the 1243 * underlying scheduler clock has a better resolution than nanoseconds 1244 * per tick. 1245 * 1246 * For dynamic POSIX clocks (PTP devices) the resolution returned is 1247 * always one nanosecond. 1248 * 1249 * 2) Affect on sys_clock_settime() 1250 * 1251 * The kernel does not truncate the time which is handed in to 1252 * sys_clock_settime(). The kernel internal timekeeping is always using 1253 * nanoseconds precision independent of the clocksource device which is 1254 * used to read the time from. The resolution of that device only 1255 * affects the presicion of the time returned by sys_clock_gettime(). 1256 * 1257 * Returns: 1258 * 0 Success. @tp contains the resolution 1259 * -EINVAL @which_clock is not a valid clock ID 1260 * -EFAULT Copying the resolution to @tp faulted 1261 * -ENODEV Dynamic POSIX clock is not backed by a device 1262 * -EOPNOTSUPP Dynamic POSIX clock does not support getres() 1263 */ 1264 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, 1265 struct __kernel_timespec __user *, tp) 1266 { 1267 const struct k_clock *kc = clockid_to_kclock(which_clock); 1268 struct timespec64 rtn_tp; 1269 int error; 1270 1271 if (!kc) 1272 return -EINVAL; 1273 1274 error = kc->clock_getres(which_clock, &rtn_tp); 1275 1276 if (!error && tp && put_timespec64(&rtn_tp, tp)) 1277 error = -EFAULT; 1278 1279 return error; 1280 } 1281 1282 #ifdef CONFIG_COMPAT_32BIT_TIME 1283 1284 SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock, 1285 struct old_timespec32 __user *, tp) 1286 { 1287 const struct k_clock *kc = clockid_to_kclock(which_clock); 1288 struct timespec64 ts; 1289 1290 if (!kc || !kc->clock_set) 1291 return -EINVAL; 1292 1293 if (get_old_timespec32(&ts, tp)) 1294 return -EFAULT; 1295 1296 return kc->clock_set(which_clock, &ts); 1297 } 1298 1299 SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock, 1300 struct old_timespec32 __user *, tp) 1301 { 1302 const struct k_clock *kc = clockid_to_kclock(which_clock); 1303 struct timespec64 ts; 1304 int err; 1305 1306 if (!kc) 1307 return -EINVAL; 1308 1309 err = kc->clock_get_timespec(which_clock, &ts); 1310 1311 if (!err && put_old_timespec32(&ts, tp)) 1312 err = -EFAULT; 1313 1314 return err; 1315 } 1316 1317 SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, 1318 struct old_timex32 __user *, utp) 1319 { 1320 struct __kernel_timex ktx; 1321 int err; 1322 1323 err = get_old_timex32(&ktx, utp); 1324 if (err) 1325 return err; 1326 1327 err = do_clock_adjtime(which_clock, &ktx); 1328 1329 if (err >= 0 && put_old_timex32(utp, &ktx)) 1330 return -EFAULT; 1331 1332 return err; 1333 } 1334 1335 SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock, 1336 struct old_timespec32 __user *, tp) 1337 { 1338 const struct k_clock *kc = clockid_to_kclock(which_clock); 1339 struct timespec64 ts; 1340 int err; 1341 1342 if (!kc) 1343 return -EINVAL; 1344 1345 err = kc->clock_getres(which_clock, &ts); 1346 if (!err && tp && put_old_timespec32(&ts, tp)) 1347 return -EFAULT; 1348 1349 return err; 1350 } 1351 1352 #endif 1353 1354 /* 1355 * sys_clock_nanosleep() for CLOCK_REALTIME and CLOCK_TAI 1356 */ 1357 static int common_nsleep(const clockid_t which_clock, int flags, 1358 const struct timespec64 *rqtp) 1359 { 1360 ktime_t texp = timespec64_to_ktime(*rqtp); 1361 1362 return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? 1363 HRTIMER_MODE_ABS : HRTIMER_MODE_REL, 1364 which_clock); 1365 } 1366 1367 /* 1368 * sys_clock_nanosleep() for CLOCK_MONOTONIC and CLOCK_BOOTTIME 1369 * 1370 * Absolute nanosleeps for these clocks are time-namespace adjusted. 1371 */ 1372 static int common_nsleep_timens(const clockid_t which_clock, int flags, 1373 const struct timespec64 *rqtp) 1374 { 1375 ktime_t texp = timespec64_to_ktime(*rqtp); 1376 1377 if (flags & TIMER_ABSTIME) 1378 texp = timens_ktime_to_host(which_clock, texp); 1379 1380 return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? 1381 HRTIMER_MODE_ABS : HRTIMER_MODE_REL, 1382 which_clock); 1383 } 1384 1385 SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, 1386 const struct __kernel_timespec __user *, rqtp, 1387 struct __kernel_timespec __user *, rmtp) 1388 { 1389 const struct k_clock *kc = clockid_to_kclock(which_clock); 1390 struct timespec64 t; 1391 1392 if (!kc) 1393 return -EINVAL; 1394 if (!kc->nsleep) 1395 return -EOPNOTSUPP; 1396 1397 if (get_timespec64(&t, rqtp)) 1398 return -EFAULT; 1399 1400 if (!timespec64_valid(&t)) 1401 return -EINVAL; 1402 if (flags & TIMER_ABSTIME) 1403 rmtp = NULL; 1404 current->restart_block.fn = do_no_restart_syscall; 1405 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; 1406 current->restart_block.nanosleep.rmtp = rmtp; 1407 1408 return kc->nsleep(which_clock, flags, &t); 1409 } 1410 1411 #ifdef CONFIG_COMPAT_32BIT_TIME 1412 1413 SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, 1414 struct old_timespec32 __user *, rqtp, 1415 struct old_timespec32 __user *, rmtp) 1416 { 1417 const struct k_clock *kc = clockid_to_kclock(which_clock); 1418 struct timespec64 t; 1419 1420 if (!kc) 1421 return -EINVAL; 1422 if (!kc->nsleep) 1423 return -EOPNOTSUPP; 1424 1425 if (get_old_timespec32(&t, rqtp)) 1426 return -EFAULT; 1427 1428 if (!timespec64_valid(&t)) 1429 return -EINVAL; 1430 if (flags & TIMER_ABSTIME) 1431 rmtp = NULL; 1432 current->restart_block.fn = do_no_restart_syscall; 1433 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; 1434 current->restart_block.nanosleep.compat_rmtp = rmtp; 1435 1436 return kc->nsleep(which_clock, flags, &t); 1437 } 1438 1439 #endif 1440 1441 static const struct k_clock clock_realtime = { 1442 .clock_getres = posix_get_hrtimer_res, 1443 .clock_get_timespec = posix_get_realtime_timespec, 1444 .clock_get_ktime = posix_get_realtime_ktime, 1445 .clock_set = posix_clock_realtime_set, 1446 .clock_adj = posix_clock_realtime_adj, 1447 .nsleep = common_nsleep, 1448 .timer_create = common_timer_create, 1449 .timer_set = common_timer_set, 1450 .timer_get = common_timer_get, 1451 .timer_del = common_timer_del, 1452 .timer_rearm = common_hrtimer_rearm, 1453 .timer_forward = common_hrtimer_forward, 1454 .timer_remaining = common_hrtimer_remaining, 1455 .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1456 .timer_wait_running = common_timer_wait_running, 1457 .timer_arm = common_hrtimer_arm, 1458 }; 1459 1460 static const struct k_clock clock_monotonic = { 1461 .clock_getres = posix_get_hrtimer_res, 1462 .clock_get_timespec = posix_get_monotonic_timespec, 1463 .clock_get_ktime = posix_get_monotonic_ktime, 1464 .nsleep = common_nsleep_timens, 1465 .timer_create = common_timer_create, 1466 .timer_set = common_timer_set, 1467 .timer_get = common_timer_get, 1468 .timer_del = common_timer_del, 1469 .timer_rearm = common_hrtimer_rearm, 1470 .timer_forward = common_hrtimer_forward, 1471 .timer_remaining = common_hrtimer_remaining, 1472 .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1473 .timer_wait_running = common_timer_wait_running, 1474 .timer_arm = common_hrtimer_arm, 1475 }; 1476 1477 static const struct k_clock clock_monotonic_raw = { 1478 .clock_getres = posix_get_hrtimer_res, 1479 .clock_get_timespec = posix_get_monotonic_raw, 1480 }; 1481 1482 static const struct k_clock clock_realtime_coarse = { 1483 .clock_getres = posix_get_coarse_res, 1484 .clock_get_timespec = posix_get_realtime_coarse, 1485 }; 1486 1487 static const struct k_clock clock_monotonic_coarse = { 1488 .clock_getres = posix_get_coarse_res, 1489 .clock_get_timespec = posix_get_monotonic_coarse, 1490 }; 1491 1492 static const struct k_clock clock_tai = { 1493 .clock_getres = posix_get_hrtimer_res, 1494 .clock_get_ktime = posix_get_tai_ktime, 1495 .clock_get_timespec = posix_get_tai_timespec, 1496 .nsleep = common_nsleep, 1497 .timer_create = common_timer_create, 1498 .timer_set = common_timer_set, 1499 .timer_get = common_timer_get, 1500 .timer_del = common_timer_del, 1501 .timer_rearm = common_hrtimer_rearm, 1502 .timer_forward = common_hrtimer_forward, 1503 .timer_remaining = common_hrtimer_remaining, 1504 .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1505 .timer_wait_running = common_timer_wait_running, 1506 .timer_arm = common_hrtimer_arm, 1507 }; 1508 1509 static const struct k_clock clock_boottime = { 1510 .clock_getres = posix_get_hrtimer_res, 1511 .clock_get_ktime = posix_get_boottime_ktime, 1512 .clock_get_timespec = posix_get_boottime_timespec, 1513 .nsleep = common_nsleep_timens, 1514 .timer_create = common_timer_create, 1515 .timer_set = common_timer_set, 1516 .timer_get = common_timer_get, 1517 .timer_del = common_timer_del, 1518 .timer_rearm = common_hrtimer_rearm, 1519 .timer_forward = common_hrtimer_forward, 1520 .timer_remaining = common_hrtimer_remaining, 1521 .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1522 .timer_wait_running = common_timer_wait_running, 1523 .timer_arm = common_hrtimer_arm, 1524 }; 1525 1526 static const struct k_clock * const posix_clocks[] = { 1527 [CLOCK_REALTIME] = &clock_realtime, 1528 [CLOCK_MONOTONIC] = &clock_monotonic, 1529 [CLOCK_PROCESS_CPUTIME_ID] = &clock_process, 1530 [CLOCK_THREAD_CPUTIME_ID] = &clock_thread, 1531 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, 1532 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, 1533 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, 1534 [CLOCK_BOOTTIME] = &clock_boottime, 1535 [CLOCK_REALTIME_ALARM] = &alarm_clock, 1536 [CLOCK_BOOTTIME_ALARM] = &alarm_clock, 1537 [CLOCK_TAI] = &clock_tai, 1538 }; 1539 1540 static const struct k_clock *clockid_to_kclock(const clockid_t id) 1541 { 1542 clockid_t idx = id; 1543 1544 if (id < 0) { 1545 return (id & CLOCKFD_MASK) == CLOCKFD ? 1546 &clock_posix_dynamic : &clock_posix_cpu; 1547 } 1548 1549 if (id >= ARRAY_SIZE(posix_clocks)) 1550 return NULL; 1551 1552 return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))]; 1553 } 1554