1 /* 2 * This file contains the procedures for the handling of select and poll 3 * 4 * Created for Linux based loosely upon Mathius Lattner's minix 5 * patches by Peter MacDonald. Heavily edited by Linus. 6 * 7 * 4 February 1994 8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS 9 * flag set in its personality we do *not* modify the given timeout 10 * parameter to reflect time remaining. 11 * 12 * 24 January 2000 13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/syscalls.h> 20 #include <linux/export.h> 21 #include <linux/slab.h> 22 #include <linux/poll.h> 23 #include <linux/personality.h> /* for STICKY_TIMEOUTS */ 24 #include <linux/file.h> 25 #include <linux/fdtable.h> 26 #include <linux/fs.h> 27 #include <linux/rcupdate.h> 28 #include <linux/hrtimer.h> 29 #include <linux/sched/rt.h> 30 #include <linux/freezer.h> 31 #include <net/busy_poll.h> 32 33 #include <asm/uaccess.h> 34 35 36 /* 37 * Estimate expected accuracy in ns from a timeval. 38 * 39 * After quite a bit of churning around, we've settled on 40 * a simple thing of taking 0.1% of the timeout as the 41 * slack, with a cap of 100 msec. 42 * "nice" tasks get a 0.5% slack instead. 43 * 44 * Consider this comment an open invitation to come up with even 45 * better solutions.. 46 */ 47 48 #define MAX_SLACK (100 * NSEC_PER_MSEC) 49 50 static long __estimate_accuracy(struct timespec64 *tv) 51 { 52 long slack; 53 int divfactor = 1000; 54 55 if (tv->tv_sec < 0) 56 return 0; 57 58 if (task_nice(current) > 0) 59 divfactor = divfactor / 5; 60 61 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) 62 return MAX_SLACK; 63 64 slack = tv->tv_nsec / divfactor; 65 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); 66 67 if (slack > MAX_SLACK) 68 return MAX_SLACK; 69 70 return slack; 71 } 72 73 u64 select_estimate_accuracy(struct timespec64 *tv) 74 { 75 u64 ret; 76 struct timespec64 now; 77 78 /* 79 * Realtime tasks get a slack of 0 for obvious reasons. 80 */ 81 82 if (rt_task(current)) 83 return 0; 84 85 ktime_get_ts64(&now); 86 now = timespec64_sub(*tv, now); 87 ret = __estimate_accuracy(&now); 88 if (ret < current->timer_slack_ns) 89 return current->timer_slack_ns; 90 return ret; 91 } 92 93 94 95 struct poll_table_page { 96 struct poll_table_page * next; 97 struct poll_table_entry * entry; 98 struct poll_table_entry entries[0]; 99 }; 100 101 #define POLL_TABLE_FULL(table) \ 102 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) 103 104 /* 105 * Ok, Peter made a complicated, but straightforward multiple_wait() function. 106 * I have rewritten this, taking some shortcuts: This code may not be easy to 107 * follow, but it should be free of race-conditions, and it's practical. If you 108 * understand what I'm doing here, then you understand how the linux 109 * sleep/wakeup mechanism works. 110 * 111 * Two very simple procedures, poll_wait() and poll_freewait() make all the 112 * work. poll_wait() is an inline-function defined in <linux/poll.h>, 113 * as all select/poll functions have to call it to add an entry to the 114 * poll table. 115 */ 116 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 117 poll_table *p); 118 119 void poll_initwait(struct poll_wqueues *pwq) 120 { 121 init_poll_funcptr(&pwq->pt, __pollwait); 122 pwq->polling_task = current; 123 pwq->triggered = 0; 124 pwq->error = 0; 125 pwq->table = NULL; 126 pwq->inline_index = 0; 127 } 128 EXPORT_SYMBOL(poll_initwait); 129 130 static void free_poll_entry(struct poll_table_entry *entry) 131 { 132 remove_wait_queue(entry->wait_address, &entry->wait); 133 fput(entry->filp); 134 } 135 136 void poll_freewait(struct poll_wqueues *pwq) 137 { 138 struct poll_table_page * p = pwq->table; 139 int i; 140 for (i = 0; i < pwq->inline_index; i++) 141 free_poll_entry(pwq->inline_entries + i); 142 while (p) { 143 struct poll_table_entry * entry; 144 struct poll_table_page *old; 145 146 entry = p->entry; 147 do { 148 entry--; 149 free_poll_entry(entry); 150 } while (entry > p->entries); 151 old = p; 152 p = p->next; 153 free_page((unsigned long) old); 154 } 155 } 156 EXPORT_SYMBOL(poll_freewait); 157 158 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) 159 { 160 struct poll_table_page *table = p->table; 161 162 if (p->inline_index < N_INLINE_POLL_ENTRIES) 163 return p->inline_entries + p->inline_index++; 164 165 if (!table || POLL_TABLE_FULL(table)) { 166 struct poll_table_page *new_table; 167 168 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); 169 if (!new_table) { 170 p->error = -ENOMEM; 171 return NULL; 172 } 173 new_table->entry = new_table->entries; 174 new_table->next = table; 175 p->table = new_table; 176 table = new_table; 177 } 178 179 return table->entry++; 180 } 181 182 static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 183 { 184 struct poll_wqueues *pwq = wait->private; 185 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); 186 187 /* 188 * Although this function is called under waitqueue lock, LOCK 189 * doesn't imply write barrier and the users expect write 190 * barrier semantics on wakeup functions. The following 191 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 192 * and is paired with smp_store_mb() in poll_schedule_timeout. 193 */ 194 smp_wmb(); 195 pwq->triggered = 1; 196 197 /* 198 * Perform the default wake up operation using a dummy 199 * waitqueue. 200 * 201 * TODO: This is hacky but there currently is no interface to 202 * pass in @sync. @sync is scheduled to be removed and once 203 * that happens, wake_up_process() can be used directly. 204 */ 205 return default_wake_function(&dummy_wait, mode, sync, key); 206 } 207 208 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 209 { 210 struct poll_table_entry *entry; 211 212 entry = container_of(wait, struct poll_table_entry, wait); 213 if (key && !((unsigned long)key & entry->key)) 214 return 0; 215 return __pollwake(wait, mode, sync, key); 216 } 217 218 /* Add a new entry */ 219 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 220 poll_table *p) 221 { 222 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); 223 struct poll_table_entry *entry = poll_get_entry(pwq); 224 if (!entry) 225 return; 226 entry->filp = get_file(filp); 227 entry->wait_address = wait_address; 228 entry->key = p->_key; 229 init_waitqueue_func_entry(&entry->wait, pollwake); 230 entry->wait.private = pwq; 231 add_wait_queue(wait_address, &entry->wait); 232 } 233 234 int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 235 ktime_t *expires, unsigned long slack) 236 { 237 int rc = -EINTR; 238 239 set_current_state(state); 240 if (!pwq->triggered) 241 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); 242 __set_current_state(TASK_RUNNING); 243 244 /* 245 * Prepare for the next iteration. 246 * 247 * The following smp_store_mb() serves two purposes. First, it's 248 * the counterpart rmb of the wmb in pollwake() such that data 249 * written before wake up is always visible after wake up. 250 * Second, the full barrier guarantees that triggered clearing 251 * doesn't pass event check of the next iteration. Note that 252 * this problem doesn't exist for the first iteration as 253 * add_wait_queue() has full barrier semantics. 254 */ 255 smp_store_mb(pwq->triggered, 0); 256 257 return rc; 258 } 259 EXPORT_SYMBOL(poll_schedule_timeout); 260 261 /** 262 * poll_select_set_timeout - helper function to setup the timeout value 263 * @to: pointer to timespec64 variable for the final timeout 264 * @sec: seconds (from user space) 265 * @nsec: nanoseconds (from user space) 266 * 267 * Note, we do not use a timespec for the user space value here, That 268 * way we can use the function for timeval and compat interfaces as well. 269 * 270 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. 271 */ 272 int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec) 273 { 274 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec}; 275 276 if (!timespec64_valid(&ts)) 277 return -EINVAL; 278 279 /* Optimize for the zero timeout value here */ 280 if (!sec && !nsec) { 281 to->tv_sec = to->tv_nsec = 0; 282 } else { 283 ktime_get_ts64(to); 284 *to = timespec64_add_safe(*to, ts); 285 } 286 return 0; 287 } 288 289 static int poll_select_copy_remaining(struct timespec64 *end_time, 290 void __user *p, 291 int timeval, int ret) 292 { 293 struct timespec64 rts64; 294 struct timespec rts; 295 struct timeval rtv; 296 297 if (!p) 298 return ret; 299 300 if (current->personality & STICKY_TIMEOUTS) 301 goto sticky; 302 303 /* No update for zero timeout */ 304 if (!end_time->tv_sec && !end_time->tv_nsec) 305 return ret; 306 307 ktime_get_ts64(&rts64); 308 rts64 = timespec64_sub(*end_time, rts64); 309 if (rts64.tv_sec < 0) 310 rts64.tv_sec = rts64.tv_nsec = 0; 311 312 rts = timespec64_to_timespec(rts64); 313 314 if (timeval) { 315 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) 316 memset(&rtv, 0, sizeof(rtv)); 317 rtv.tv_sec = rts64.tv_sec; 318 rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC; 319 320 if (!copy_to_user(p, &rtv, sizeof(rtv))) 321 return ret; 322 323 } else if (!copy_to_user(p, &rts, sizeof(rts))) 324 return ret; 325 326 /* 327 * If an application puts its timeval in read-only memory, we 328 * don't want the Linux-specific update to the timeval to 329 * cause a fault after the select has completed 330 * successfully. However, because we're not updating the 331 * timeval, we can't restart the system call. 332 */ 333 334 sticky: 335 if (ret == -ERESTARTNOHAND) 336 ret = -EINTR; 337 return ret; 338 } 339 340 #define FDS_IN(fds, n) (fds->in + n) 341 #define FDS_OUT(fds, n) (fds->out + n) 342 #define FDS_EX(fds, n) (fds->ex + n) 343 344 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) 345 346 static int max_select_fd(unsigned long n, fd_set_bits *fds) 347 { 348 unsigned long *open_fds; 349 unsigned long set; 350 int max; 351 struct fdtable *fdt; 352 353 /* handle last in-complete long-word first */ 354 set = ~(~0UL << (n & (BITS_PER_LONG-1))); 355 n /= BITS_PER_LONG; 356 fdt = files_fdtable(current->files); 357 open_fds = fdt->open_fds + n; 358 max = 0; 359 if (set) { 360 set &= BITS(fds, n); 361 if (set) { 362 if (!(set & ~*open_fds)) 363 goto get_max; 364 return -EBADF; 365 } 366 } 367 while (n) { 368 open_fds--; 369 n--; 370 set = BITS(fds, n); 371 if (!set) 372 continue; 373 if (set & ~*open_fds) 374 return -EBADF; 375 if (max) 376 continue; 377 get_max: 378 do { 379 max++; 380 set >>= 1; 381 } while (set); 382 max += n * BITS_PER_LONG; 383 } 384 385 return max; 386 } 387 388 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) 389 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 390 #define POLLEX_SET (POLLPRI) 391 392 static inline void wait_key_set(poll_table *wait, unsigned long in, 393 unsigned long out, unsigned long bit, 394 unsigned int ll_flag) 395 { 396 wait->_key = POLLEX_SET | ll_flag; 397 if (in & bit) 398 wait->_key |= POLLIN_SET; 399 if (out & bit) 400 wait->_key |= POLLOUT_SET; 401 } 402 403 int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) 404 { 405 ktime_t expire, *to = NULL; 406 struct poll_wqueues table; 407 poll_table *wait; 408 int retval, i, timed_out = 0; 409 u64 slack = 0; 410 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 411 unsigned long busy_end = 0; 412 413 rcu_read_lock(); 414 retval = max_select_fd(n, fds); 415 rcu_read_unlock(); 416 417 if (retval < 0) 418 return retval; 419 n = retval; 420 421 poll_initwait(&table); 422 wait = &table.pt; 423 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 424 wait->_qproc = NULL; 425 timed_out = 1; 426 } 427 428 if (end_time && !timed_out) 429 slack = select_estimate_accuracy(end_time); 430 431 retval = 0; 432 for (;;) { 433 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 434 bool can_busy_loop = false; 435 436 inp = fds->in; outp = fds->out; exp = fds->ex; 437 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; 438 439 for (i = 0; i < n; ++rinp, ++routp, ++rexp) { 440 unsigned long in, out, ex, all_bits, bit = 1, mask, j; 441 unsigned long res_in = 0, res_out = 0, res_ex = 0; 442 443 in = *inp++; out = *outp++; ex = *exp++; 444 all_bits = in | out | ex; 445 if (all_bits == 0) { 446 i += BITS_PER_LONG; 447 continue; 448 } 449 450 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) { 451 struct fd f; 452 if (i >= n) 453 break; 454 if (!(bit & all_bits)) 455 continue; 456 f = fdget(i); 457 if (f.file) { 458 const struct file_operations *f_op; 459 f_op = f.file->f_op; 460 mask = DEFAULT_POLLMASK; 461 if (f_op->poll) { 462 wait_key_set(wait, in, out, 463 bit, busy_flag); 464 mask = (*f_op->poll)(f.file, wait); 465 } 466 fdput(f); 467 if ((mask & POLLIN_SET) && (in & bit)) { 468 res_in |= bit; 469 retval++; 470 wait->_qproc = NULL; 471 } 472 if ((mask & POLLOUT_SET) && (out & bit)) { 473 res_out |= bit; 474 retval++; 475 wait->_qproc = NULL; 476 } 477 if ((mask & POLLEX_SET) && (ex & bit)) { 478 res_ex |= bit; 479 retval++; 480 wait->_qproc = NULL; 481 } 482 /* got something, stop busy polling */ 483 if (retval) { 484 can_busy_loop = false; 485 busy_flag = 0; 486 487 /* 488 * only remember a returned 489 * POLL_BUSY_LOOP if we asked for it 490 */ 491 } else if (busy_flag & mask) 492 can_busy_loop = true; 493 494 } 495 } 496 if (res_in) 497 *rinp = res_in; 498 if (res_out) 499 *routp = res_out; 500 if (res_ex) 501 *rexp = res_ex; 502 cond_resched(); 503 } 504 wait->_qproc = NULL; 505 if (retval || timed_out || signal_pending(current)) 506 break; 507 if (table.error) { 508 retval = table.error; 509 break; 510 } 511 512 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 513 if (can_busy_loop && !need_resched()) { 514 if (!busy_end) { 515 busy_end = busy_loop_end_time(); 516 continue; 517 } 518 if (!busy_loop_timeout(busy_end)) 519 continue; 520 } 521 busy_flag = 0; 522 523 /* 524 * If this is the first loop and we have a timeout 525 * given, then we convert to ktime_t and set the to 526 * pointer to the expiry value. 527 */ 528 if (end_time && !to) { 529 expire = timespec64_to_ktime(*end_time); 530 to = &expire; 531 } 532 533 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, 534 to, slack)) 535 timed_out = 1; 536 } 537 538 poll_freewait(&table); 539 540 return retval; 541 } 542 543 /* 544 * We can actually return ERESTARTSYS instead of EINTR, but I'd 545 * like to be certain this leads to no problems. So I return 546 * EINTR just for safety. 547 * 548 * Update: ERESTARTSYS breaks at least the xview clock binary, so 549 * I'm trying ERESTARTNOHAND which restart only when you want to. 550 */ 551 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 552 fd_set __user *exp, struct timespec64 *end_time) 553 { 554 fd_set_bits fds; 555 void *bits; 556 int ret, max_fds; 557 unsigned int size; 558 struct fdtable *fdt; 559 /* Allocate small arguments on the stack to save memory and be faster */ 560 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 561 562 ret = -EINVAL; 563 if (n < 0) 564 goto out_nofds; 565 566 /* max_fds can increase, so grab it once to avoid race */ 567 rcu_read_lock(); 568 fdt = files_fdtable(current->files); 569 max_fds = fdt->max_fds; 570 rcu_read_unlock(); 571 if (n > max_fds) 572 n = max_fds; 573 574 /* 575 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 576 * since we used fdset we need to allocate memory in units of 577 * long-words. 578 */ 579 size = FDS_BYTES(n); 580 bits = stack_fds; 581 if (size > sizeof(stack_fds) / 6) { 582 /* Not enough space in on-stack array; must use kmalloc */ 583 ret = -ENOMEM; 584 bits = kmalloc(6 * size, GFP_KERNEL); 585 if (!bits) 586 goto out_nofds; 587 } 588 fds.in = bits; 589 fds.out = bits + size; 590 fds.ex = bits + 2*size; 591 fds.res_in = bits + 3*size; 592 fds.res_out = bits + 4*size; 593 fds.res_ex = bits + 5*size; 594 595 if ((ret = get_fd_set(n, inp, fds.in)) || 596 (ret = get_fd_set(n, outp, fds.out)) || 597 (ret = get_fd_set(n, exp, fds.ex))) 598 goto out; 599 zero_fd_set(n, fds.res_in); 600 zero_fd_set(n, fds.res_out); 601 zero_fd_set(n, fds.res_ex); 602 603 ret = do_select(n, &fds, end_time); 604 605 if (ret < 0) 606 goto out; 607 if (!ret) { 608 ret = -ERESTARTNOHAND; 609 if (signal_pending(current)) 610 goto out; 611 ret = 0; 612 } 613 614 if (set_fd_set(n, inp, fds.res_in) || 615 set_fd_set(n, outp, fds.res_out) || 616 set_fd_set(n, exp, fds.res_ex)) 617 ret = -EFAULT; 618 619 out: 620 if (bits != stack_fds) 621 kfree(bits); 622 out_nofds: 623 return ret; 624 } 625 626 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, 627 fd_set __user *, exp, struct timeval __user *, tvp) 628 { 629 struct timespec64 end_time, *to = NULL; 630 struct timeval tv; 631 int ret; 632 633 if (tvp) { 634 if (copy_from_user(&tv, tvp, sizeof(tv))) 635 return -EFAULT; 636 637 to = &end_time; 638 if (poll_select_set_timeout(to, 639 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 640 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 641 return -EINVAL; 642 } 643 644 ret = core_sys_select(n, inp, outp, exp, to); 645 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); 646 647 return ret; 648 } 649 650 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, 651 fd_set __user *exp, struct timespec __user *tsp, 652 const sigset_t __user *sigmask, size_t sigsetsize) 653 { 654 sigset_t ksigmask, sigsaved; 655 struct timespec ts; 656 struct timespec64 ts64, end_time, *to = NULL; 657 int ret; 658 659 if (tsp) { 660 if (copy_from_user(&ts, tsp, sizeof(ts))) 661 return -EFAULT; 662 ts64 = timespec_to_timespec64(ts); 663 664 to = &end_time; 665 if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec)) 666 return -EINVAL; 667 } 668 669 if (sigmask) { 670 /* XXX: Don't preclude handling different sized sigset_t's. */ 671 if (sigsetsize != sizeof(sigset_t)) 672 return -EINVAL; 673 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 674 return -EFAULT; 675 676 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 677 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 678 } 679 680 ret = core_sys_select(n, inp, outp, exp, to); 681 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 682 683 if (ret == -ERESTARTNOHAND) { 684 /* 685 * Don't restore the signal mask yet. Let do_signal() deliver 686 * the signal on the way back to userspace, before the signal 687 * mask is restored. 688 */ 689 if (sigmask) { 690 memcpy(¤t->saved_sigmask, &sigsaved, 691 sizeof(sigsaved)); 692 set_restore_sigmask(); 693 } 694 } else if (sigmask) 695 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 696 697 return ret; 698 } 699 700 /* 701 * Most architectures can't handle 7-argument syscalls. So we provide a 702 * 6-argument version where the sixth argument is a pointer to a structure 703 * which has a pointer to the sigset_t itself followed by a size_t containing 704 * the sigset size. 705 */ 706 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, 707 fd_set __user *, exp, struct timespec __user *, tsp, 708 void __user *, sig) 709 { 710 size_t sigsetsize = 0; 711 sigset_t __user *up = NULL; 712 713 if (sig) { 714 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) 715 || __get_user(up, (sigset_t __user * __user *)sig) 716 || __get_user(sigsetsize, 717 (size_t __user *)(sig+sizeof(void *)))) 718 return -EFAULT; 719 } 720 721 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); 722 } 723 724 #ifdef __ARCH_WANT_SYS_OLD_SELECT 725 struct sel_arg_struct { 726 unsigned long n; 727 fd_set __user *inp, *outp, *exp; 728 struct timeval __user *tvp; 729 }; 730 731 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) 732 { 733 struct sel_arg_struct a; 734 735 if (copy_from_user(&a, arg, sizeof(a))) 736 return -EFAULT; 737 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 738 } 739 #endif 740 741 struct poll_list { 742 struct poll_list *next; 743 int len; 744 struct pollfd entries[0]; 745 }; 746 747 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) 748 749 /* 750 * Fish for pollable events on the pollfd->fd file descriptor. We're only 751 * interested in events matching the pollfd->events mask, and the result 752 * matching that mask is both recorded in pollfd->revents and returned. The 753 * pwait poll_table will be used by the fd-provided poll handler for waiting, 754 * if pwait->_qproc is non-NULL. 755 */ 756 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, 757 bool *can_busy_poll, 758 unsigned int busy_flag) 759 { 760 unsigned int mask; 761 int fd; 762 763 mask = 0; 764 fd = pollfd->fd; 765 if (fd >= 0) { 766 struct fd f = fdget(fd); 767 mask = POLLNVAL; 768 if (f.file) { 769 mask = DEFAULT_POLLMASK; 770 if (f.file->f_op->poll) { 771 pwait->_key = pollfd->events|POLLERR|POLLHUP; 772 pwait->_key |= busy_flag; 773 mask = f.file->f_op->poll(f.file, pwait); 774 if (mask & busy_flag) 775 *can_busy_poll = true; 776 } 777 /* Mask out unneeded events. */ 778 mask &= pollfd->events | POLLERR | POLLHUP; 779 fdput(f); 780 } 781 } 782 pollfd->revents = mask; 783 784 return mask; 785 } 786 787 static int do_poll(struct poll_list *list, struct poll_wqueues *wait, 788 struct timespec64 *end_time) 789 { 790 poll_table* pt = &wait->pt; 791 ktime_t expire, *to = NULL; 792 int timed_out = 0, count = 0; 793 u64 slack = 0; 794 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 795 unsigned long busy_end = 0; 796 797 /* Optimise the no-wait case */ 798 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 799 pt->_qproc = NULL; 800 timed_out = 1; 801 } 802 803 if (end_time && !timed_out) 804 slack = select_estimate_accuracy(end_time); 805 806 for (;;) { 807 struct poll_list *walk; 808 bool can_busy_loop = false; 809 810 for (walk = list; walk != NULL; walk = walk->next) { 811 struct pollfd * pfd, * pfd_end; 812 813 pfd = walk->entries; 814 pfd_end = pfd + walk->len; 815 for (; pfd != pfd_end; pfd++) { 816 /* 817 * Fish for events. If we found one, record it 818 * and kill poll_table->_qproc, so we don't 819 * needlessly register any other waiters after 820 * this. They'll get immediately deregistered 821 * when we break out and return. 822 */ 823 if (do_pollfd(pfd, pt, &can_busy_loop, 824 busy_flag)) { 825 count++; 826 pt->_qproc = NULL; 827 /* found something, stop busy polling */ 828 busy_flag = 0; 829 can_busy_loop = false; 830 } 831 } 832 } 833 /* 834 * All waiters have already been registered, so don't provide 835 * a poll_table->_qproc to them on the next loop iteration. 836 */ 837 pt->_qproc = NULL; 838 if (!count) { 839 count = wait->error; 840 if (signal_pending(current)) 841 count = -EINTR; 842 } 843 if (count || timed_out) 844 break; 845 846 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 847 if (can_busy_loop && !need_resched()) { 848 if (!busy_end) { 849 busy_end = busy_loop_end_time(); 850 continue; 851 } 852 if (!busy_loop_timeout(busy_end)) 853 continue; 854 } 855 busy_flag = 0; 856 857 /* 858 * If this is the first loop and we have a timeout 859 * given, then we convert to ktime_t and set the to 860 * pointer to the expiry value. 861 */ 862 if (end_time && !to) { 863 expire = timespec64_to_ktime(*end_time); 864 to = &expire; 865 } 866 867 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) 868 timed_out = 1; 869 } 870 return count; 871 } 872 873 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ 874 sizeof(struct pollfd)) 875 876 int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, 877 struct timespec64 *end_time) 878 { 879 struct poll_wqueues table; 880 int err = -EFAULT, fdcount, len, size; 881 /* Allocate small arguments on the stack to save memory and be 882 faster - use long to make sure the buffer is aligned properly 883 on 64 bit archs to avoid unaligned access */ 884 long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; 885 struct poll_list *const head = (struct poll_list *)stack_pps; 886 struct poll_list *walk = head; 887 unsigned long todo = nfds; 888 889 if (nfds > rlimit(RLIMIT_NOFILE)) 890 return -EINVAL; 891 892 len = min_t(unsigned int, nfds, N_STACK_PPS); 893 for (;;) { 894 walk->next = NULL; 895 walk->len = len; 896 if (!len) 897 break; 898 899 if (copy_from_user(walk->entries, ufds + nfds-todo, 900 sizeof(struct pollfd) * walk->len)) 901 goto out_fds; 902 903 todo -= walk->len; 904 if (!todo) 905 break; 906 907 len = min(todo, POLLFD_PER_PAGE); 908 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; 909 walk = walk->next = kmalloc(size, GFP_KERNEL); 910 if (!walk) { 911 err = -ENOMEM; 912 goto out_fds; 913 } 914 } 915 916 poll_initwait(&table); 917 fdcount = do_poll(head, &table, end_time); 918 poll_freewait(&table); 919 920 for (walk = head; walk; walk = walk->next) { 921 struct pollfd *fds = walk->entries; 922 int j; 923 924 for (j = 0; j < walk->len; j++, ufds++) 925 if (__put_user(fds[j].revents, &ufds->revents)) 926 goto out_fds; 927 } 928 929 err = fdcount; 930 out_fds: 931 walk = head->next; 932 while (walk) { 933 struct poll_list *pos = walk; 934 walk = walk->next; 935 kfree(pos); 936 } 937 938 return err; 939 } 940 941 static long do_restart_poll(struct restart_block *restart_block) 942 { 943 struct pollfd __user *ufds = restart_block->poll.ufds; 944 int nfds = restart_block->poll.nfds; 945 struct timespec64 *to = NULL, end_time; 946 int ret; 947 948 if (restart_block->poll.has_timeout) { 949 end_time.tv_sec = restart_block->poll.tv_sec; 950 end_time.tv_nsec = restart_block->poll.tv_nsec; 951 to = &end_time; 952 } 953 954 ret = do_sys_poll(ufds, nfds, to); 955 956 if (ret == -EINTR) { 957 restart_block->fn = do_restart_poll; 958 ret = -ERESTART_RESTARTBLOCK; 959 } 960 return ret; 961 } 962 963 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, 964 int, timeout_msecs) 965 { 966 struct timespec64 end_time, *to = NULL; 967 int ret; 968 969 if (timeout_msecs >= 0) { 970 to = &end_time; 971 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, 972 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); 973 } 974 975 ret = do_sys_poll(ufds, nfds, to); 976 977 if (ret == -EINTR) { 978 struct restart_block *restart_block; 979 980 restart_block = ¤t->restart_block; 981 restart_block->fn = do_restart_poll; 982 restart_block->poll.ufds = ufds; 983 restart_block->poll.nfds = nfds; 984 985 if (timeout_msecs >= 0) { 986 restart_block->poll.tv_sec = end_time.tv_sec; 987 restart_block->poll.tv_nsec = end_time.tv_nsec; 988 restart_block->poll.has_timeout = 1; 989 } else 990 restart_block->poll.has_timeout = 0; 991 992 ret = -ERESTART_RESTARTBLOCK; 993 } 994 return ret; 995 } 996 997 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, 998 struct timespec __user *, tsp, const sigset_t __user *, sigmask, 999 size_t, sigsetsize) 1000 { 1001 sigset_t ksigmask, sigsaved; 1002 struct timespec ts; 1003 struct timespec64 end_time, *to = NULL; 1004 int ret; 1005 1006 if (tsp) { 1007 if (copy_from_user(&ts, tsp, sizeof(ts))) 1008 return -EFAULT; 1009 1010 to = &end_time; 1011 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1012 return -EINVAL; 1013 } 1014 1015 if (sigmask) { 1016 /* XXX: Don't preclude handling different sized sigset_t's. */ 1017 if (sigsetsize != sizeof(sigset_t)) 1018 return -EINVAL; 1019 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 1020 return -EFAULT; 1021 1022 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1023 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1024 } 1025 1026 ret = do_sys_poll(ufds, nfds, to); 1027 1028 /* We can restart this syscall, usually */ 1029 if (ret == -EINTR) { 1030 /* 1031 * Don't restore the signal mask yet. Let do_signal() deliver 1032 * the signal on the way back to userspace, before the signal 1033 * mask is restored. 1034 */ 1035 if (sigmask) { 1036 memcpy(¤t->saved_sigmask, &sigsaved, 1037 sizeof(sigsaved)); 1038 set_restore_sigmask(); 1039 } 1040 ret = -ERESTARTNOHAND; 1041 } else if (sigmask) 1042 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1043 1044 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 1045 1046 return ret; 1047 } 1048