1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file contains the procedures for the handling of select and poll 4 * 5 * Created for Linux based loosely upon Mathius Lattner's minix 6 * patches by Peter MacDonald. Heavily edited by Linus. 7 * 8 * 4 February 1994 9 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS 10 * flag set in its personality we do *not* modify the given timeout 11 * parameter to reflect time remaining. 12 * 13 * 24 January 2000 14 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 15 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/sched/signal.h> 20 #include <linux/sched/rt.h> 21 #include <linux/syscalls.h> 22 #include <linux/export.h> 23 #include <linux/slab.h> 24 #include <linux/poll.h> 25 #include <linux/personality.h> /* for STICKY_TIMEOUTS */ 26 #include <linux/file.h> 27 #include <linux/fdtable.h> 28 #include <linux/fs.h> 29 #include <linux/rcupdate.h> 30 #include <linux/hrtimer.h> 31 #include <linux/freezer.h> 32 #include <net/busy_poll.h> 33 #include <linux/vmalloc.h> 34 35 #include <linux/uaccess.h> 36 37 38 /* 39 * Estimate expected accuracy in ns from a timeval. 40 * 41 * After quite a bit of churning around, we've settled on 42 * a simple thing of taking 0.1% of the timeout as the 43 * slack, with a cap of 100 msec. 44 * "nice" tasks get a 0.5% slack instead. 45 * 46 * Consider this comment an open invitation to come up with even 47 * better solutions.. 48 */ 49 50 #define MAX_SLACK (100 * NSEC_PER_MSEC) 51 52 static long __estimate_accuracy(struct timespec64 *tv) 53 { 54 long slack; 55 int divfactor = 1000; 56 57 if (tv->tv_sec < 0) 58 return 0; 59 60 if (task_nice(current) > 0) 61 divfactor = divfactor / 5; 62 63 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) 64 return MAX_SLACK; 65 66 slack = tv->tv_nsec / divfactor; 67 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); 68 69 if (slack > MAX_SLACK) 70 return MAX_SLACK; 71 72 return slack; 73 } 74 75 u64 select_estimate_accuracy(struct timespec64 *tv) 76 { 77 u64 ret; 78 struct timespec64 now; 79 80 /* 81 * Realtime tasks get a slack of 0 for obvious reasons. 82 */ 83 84 if (rt_task(current)) 85 return 0; 86 87 ktime_get_ts64(&now); 88 now = timespec64_sub(*tv, now); 89 ret = __estimate_accuracy(&now); 90 if (ret < current->timer_slack_ns) 91 return current->timer_slack_ns; 92 return ret; 93 } 94 95 96 97 struct poll_table_page { 98 struct poll_table_page * next; 99 struct poll_table_entry * entry; 100 struct poll_table_entry entries[0]; 101 }; 102 103 #define POLL_TABLE_FULL(table) \ 104 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) 105 106 /* 107 * Ok, Peter made a complicated, but straightforward multiple_wait() function. 108 * I have rewritten this, taking some shortcuts: This code may not be easy to 109 * follow, but it should be free of race-conditions, and it's practical. If you 110 * understand what I'm doing here, then you understand how the linux 111 * sleep/wakeup mechanism works. 112 * 113 * Two very simple procedures, poll_wait() and poll_freewait() make all the 114 * work. poll_wait() is an inline-function defined in <linux/poll.h>, 115 * as all select/poll functions have to call it to add an entry to the 116 * poll table. 117 */ 118 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 119 poll_table *p); 120 121 void poll_initwait(struct poll_wqueues *pwq) 122 { 123 init_poll_funcptr(&pwq->pt, __pollwait); 124 pwq->polling_task = current; 125 pwq->triggered = 0; 126 pwq->error = 0; 127 pwq->table = NULL; 128 pwq->inline_index = 0; 129 } 130 EXPORT_SYMBOL(poll_initwait); 131 132 static void free_poll_entry(struct poll_table_entry *entry) 133 { 134 remove_wait_queue(entry->wait_address, &entry->wait); 135 fput(entry->filp); 136 } 137 138 void poll_freewait(struct poll_wqueues *pwq) 139 { 140 struct poll_table_page * p = pwq->table; 141 int i; 142 for (i = 0; i < pwq->inline_index; i++) 143 free_poll_entry(pwq->inline_entries + i); 144 while (p) { 145 struct poll_table_entry * entry; 146 struct poll_table_page *old; 147 148 entry = p->entry; 149 do { 150 entry--; 151 free_poll_entry(entry); 152 } while (entry > p->entries); 153 old = p; 154 p = p->next; 155 free_page((unsigned long) old); 156 } 157 } 158 EXPORT_SYMBOL(poll_freewait); 159 160 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) 161 { 162 struct poll_table_page *table = p->table; 163 164 if (p->inline_index < N_INLINE_POLL_ENTRIES) 165 return p->inline_entries + p->inline_index++; 166 167 if (!table || POLL_TABLE_FULL(table)) { 168 struct poll_table_page *new_table; 169 170 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); 171 if (!new_table) { 172 p->error = -ENOMEM; 173 return NULL; 174 } 175 new_table->entry = new_table->entries; 176 new_table->next = table; 177 p->table = new_table; 178 table = new_table; 179 } 180 181 return table->entry++; 182 } 183 184 static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 185 { 186 struct poll_wqueues *pwq = wait->private; 187 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); 188 189 /* 190 * Although this function is called under waitqueue lock, LOCK 191 * doesn't imply write barrier and the users expect write 192 * barrier semantics on wakeup functions. The following 193 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 194 * and is paired with smp_store_mb() in poll_schedule_timeout. 195 */ 196 smp_wmb(); 197 pwq->triggered = 1; 198 199 /* 200 * Perform the default wake up operation using a dummy 201 * waitqueue. 202 * 203 * TODO: This is hacky but there currently is no interface to 204 * pass in @sync. @sync is scheduled to be removed and once 205 * that happens, wake_up_process() can be used directly. 206 */ 207 return default_wake_function(&dummy_wait, mode, sync, key); 208 } 209 210 static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 211 { 212 struct poll_table_entry *entry; 213 214 entry = container_of(wait, struct poll_table_entry, wait); 215 if (key && !((unsigned long)key & entry->key)) 216 return 0; 217 return __pollwake(wait, mode, sync, key); 218 } 219 220 /* Add a new entry */ 221 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 222 poll_table *p) 223 { 224 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); 225 struct poll_table_entry *entry = poll_get_entry(pwq); 226 if (!entry) 227 return; 228 entry->filp = get_file(filp); 229 entry->wait_address = wait_address; 230 entry->key = p->_key; 231 init_waitqueue_func_entry(&entry->wait, pollwake); 232 entry->wait.private = pwq; 233 add_wait_queue(wait_address, &entry->wait); 234 } 235 236 int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 237 ktime_t *expires, unsigned long slack) 238 { 239 int rc = -EINTR; 240 241 set_current_state(state); 242 if (!pwq->triggered) 243 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); 244 __set_current_state(TASK_RUNNING); 245 246 /* 247 * Prepare for the next iteration. 248 * 249 * The following smp_store_mb() serves two purposes. First, it's 250 * the counterpart rmb of the wmb in pollwake() such that data 251 * written before wake up is always visible after wake up. 252 * Second, the full barrier guarantees that triggered clearing 253 * doesn't pass event check of the next iteration. Note that 254 * this problem doesn't exist for the first iteration as 255 * add_wait_queue() has full barrier semantics. 256 */ 257 smp_store_mb(pwq->triggered, 0); 258 259 return rc; 260 } 261 EXPORT_SYMBOL(poll_schedule_timeout); 262 263 /** 264 * poll_select_set_timeout - helper function to setup the timeout value 265 * @to: pointer to timespec64 variable for the final timeout 266 * @sec: seconds (from user space) 267 * @nsec: nanoseconds (from user space) 268 * 269 * Note, we do not use a timespec for the user space value here, That 270 * way we can use the function for timeval and compat interfaces as well. 271 * 272 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. 273 */ 274 int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec) 275 { 276 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec}; 277 278 if (!timespec64_valid(&ts)) 279 return -EINVAL; 280 281 /* Optimize for the zero timeout value here */ 282 if (!sec && !nsec) { 283 to->tv_sec = to->tv_nsec = 0; 284 } else { 285 ktime_get_ts64(to); 286 *to = timespec64_add_safe(*to, ts); 287 } 288 return 0; 289 } 290 291 static int poll_select_copy_remaining(struct timespec64 *end_time, 292 void __user *p, 293 int timeval, int ret) 294 { 295 struct timespec64 rts64; 296 struct timespec rts; 297 struct timeval rtv; 298 299 if (!p) 300 return ret; 301 302 if (current->personality & STICKY_TIMEOUTS) 303 goto sticky; 304 305 /* No update for zero timeout */ 306 if (!end_time->tv_sec && !end_time->tv_nsec) 307 return ret; 308 309 ktime_get_ts64(&rts64); 310 rts64 = timespec64_sub(*end_time, rts64); 311 if (rts64.tv_sec < 0) 312 rts64.tv_sec = rts64.tv_nsec = 0; 313 314 rts = timespec64_to_timespec(rts64); 315 316 if (timeval) { 317 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) 318 memset(&rtv, 0, sizeof(rtv)); 319 rtv.tv_sec = rts64.tv_sec; 320 rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC; 321 322 if (!copy_to_user(p, &rtv, sizeof(rtv))) 323 return ret; 324 325 } else if (!copy_to_user(p, &rts, sizeof(rts))) 326 return ret; 327 328 /* 329 * If an application puts its timeval in read-only memory, we 330 * don't want the Linux-specific update to the timeval to 331 * cause a fault after the select has completed 332 * successfully. However, because we're not updating the 333 * timeval, we can't restart the system call. 334 */ 335 336 sticky: 337 if (ret == -ERESTARTNOHAND) 338 ret = -EINTR; 339 return ret; 340 } 341 342 /* 343 * Scalable version of the fd_set. 344 */ 345 346 typedef struct { 347 unsigned long *in, *out, *ex; 348 unsigned long *res_in, *res_out, *res_ex; 349 } fd_set_bits; 350 351 /* 352 * How many longwords for "nr" bits? 353 */ 354 #define FDS_BITPERLONG (8*sizeof(long)) 355 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) 356 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) 357 358 /* 359 * We do a VERIFY_WRITE here even though we are only reading this time: 360 * we'll write to it eventually.. 361 * 362 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. 363 */ 364 static inline 365 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 366 { 367 nr = FDS_BYTES(nr); 368 if (ufdset) 369 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; 370 371 memset(fdset, 0, nr); 372 return 0; 373 } 374 375 static inline unsigned long __must_check 376 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 377 { 378 if (ufdset) 379 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); 380 return 0; 381 } 382 383 static inline 384 void zero_fd_set(unsigned long nr, unsigned long *fdset) 385 { 386 memset(fdset, 0, FDS_BYTES(nr)); 387 } 388 389 #define FDS_IN(fds, n) (fds->in + n) 390 #define FDS_OUT(fds, n) (fds->out + n) 391 #define FDS_EX(fds, n) (fds->ex + n) 392 393 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) 394 395 static int max_select_fd(unsigned long n, fd_set_bits *fds) 396 { 397 unsigned long *open_fds; 398 unsigned long set; 399 int max; 400 struct fdtable *fdt; 401 402 /* handle last in-complete long-word first */ 403 set = ~(~0UL << (n & (BITS_PER_LONG-1))); 404 n /= BITS_PER_LONG; 405 fdt = files_fdtable(current->files); 406 open_fds = fdt->open_fds + n; 407 max = 0; 408 if (set) { 409 set &= BITS(fds, n); 410 if (set) { 411 if (!(set & ~*open_fds)) 412 goto get_max; 413 return -EBADF; 414 } 415 } 416 while (n) { 417 open_fds--; 418 n--; 419 set = BITS(fds, n); 420 if (!set) 421 continue; 422 if (set & ~*open_fds) 423 return -EBADF; 424 if (max) 425 continue; 426 get_max: 427 do { 428 max++; 429 set >>= 1; 430 } while (set); 431 max += n * BITS_PER_LONG; 432 } 433 434 return max; 435 } 436 437 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) 438 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 439 #define POLLEX_SET (POLLPRI) 440 441 static inline void wait_key_set(poll_table *wait, unsigned long in, 442 unsigned long out, unsigned long bit, 443 unsigned int ll_flag) 444 { 445 wait->_key = POLLEX_SET | ll_flag; 446 if (in & bit) 447 wait->_key |= POLLIN_SET; 448 if (out & bit) 449 wait->_key |= POLLOUT_SET; 450 } 451 452 static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) 453 { 454 ktime_t expire, *to = NULL; 455 struct poll_wqueues table; 456 poll_table *wait; 457 int retval, i, timed_out = 0; 458 u64 slack = 0; 459 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 460 unsigned long busy_start = 0; 461 462 rcu_read_lock(); 463 retval = max_select_fd(n, fds); 464 rcu_read_unlock(); 465 466 if (retval < 0) 467 return retval; 468 n = retval; 469 470 poll_initwait(&table); 471 wait = &table.pt; 472 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 473 wait->_qproc = NULL; 474 timed_out = 1; 475 } 476 477 if (end_time && !timed_out) 478 slack = select_estimate_accuracy(end_time); 479 480 retval = 0; 481 for (;;) { 482 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 483 bool can_busy_loop = false; 484 485 inp = fds->in; outp = fds->out; exp = fds->ex; 486 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; 487 488 for (i = 0; i < n; ++rinp, ++routp, ++rexp) { 489 unsigned long in, out, ex, all_bits, bit = 1, mask, j; 490 unsigned long res_in = 0, res_out = 0, res_ex = 0; 491 492 in = *inp++; out = *outp++; ex = *exp++; 493 all_bits = in | out | ex; 494 if (all_bits == 0) { 495 i += BITS_PER_LONG; 496 continue; 497 } 498 499 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) { 500 struct fd f; 501 if (i >= n) 502 break; 503 if (!(bit & all_bits)) 504 continue; 505 f = fdget(i); 506 if (f.file) { 507 const struct file_operations *f_op; 508 f_op = f.file->f_op; 509 mask = DEFAULT_POLLMASK; 510 if (f_op->poll) { 511 wait_key_set(wait, in, out, 512 bit, busy_flag); 513 mask = (*f_op->poll)(f.file, wait); 514 } 515 fdput(f); 516 if ((mask & POLLIN_SET) && (in & bit)) { 517 res_in |= bit; 518 retval++; 519 wait->_qproc = NULL; 520 } 521 if ((mask & POLLOUT_SET) && (out & bit)) { 522 res_out |= bit; 523 retval++; 524 wait->_qproc = NULL; 525 } 526 if ((mask & POLLEX_SET) && (ex & bit)) { 527 res_ex |= bit; 528 retval++; 529 wait->_qproc = NULL; 530 } 531 /* got something, stop busy polling */ 532 if (retval) { 533 can_busy_loop = false; 534 busy_flag = 0; 535 536 /* 537 * only remember a returned 538 * POLL_BUSY_LOOP if we asked for it 539 */ 540 } else if (busy_flag & mask) 541 can_busy_loop = true; 542 543 } 544 } 545 if (res_in) 546 *rinp = res_in; 547 if (res_out) 548 *routp = res_out; 549 if (res_ex) 550 *rexp = res_ex; 551 cond_resched(); 552 } 553 wait->_qproc = NULL; 554 if (retval || timed_out || signal_pending(current)) 555 break; 556 if (table.error) { 557 retval = table.error; 558 break; 559 } 560 561 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 562 if (can_busy_loop && !need_resched()) { 563 if (!busy_start) { 564 busy_start = busy_loop_current_time(); 565 continue; 566 } 567 if (!busy_loop_timeout(busy_start)) 568 continue; 569 } 570 busy_flag = 0; 571 572 /* 573 * If this is the first loop and we have a timeout 574 * given, then we convert to ktime_t and set the to 575 * pointer to the expiry value. 576 */ 577 if (end_time && !to) { 578 expire = timespec64_to_ktime(*end_time); 579 to = &expire; 580 } 581 582 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, 583 to, slack)) 584 timed_out = 1; 585 } 586 587 poll_freewait(&table); 588 589 return retval; 590 } 591 592 /* 593 * We can actually return ERESTARTSYS instead of EINTR, but I'd 594 * like to be certain this leads to no problems. So I return 595 * EINTR just for safety. 596 * 597 * Update: ERESTARTSYS breaks at least the xview clock binary, so 598 * I'm trying ERESTARTNOHAND which restart only when you want to. 599 */ 600 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 601 fd_set __user *exp, struct timespec64 *end_time) 602 { 603 fd_set_bits fds; 604 void *bits; 605 int ret, max_fds; 606 size_t size, alloc_size; 607 struct fdtable *fdt; 608 /* Allocate small arguments on the stack to save memory and be faster */ 609 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 610 611 ret = -EINVAL; 612 if (n < 0) 613 goto out_nofds; 614 615 /* max_fds can increase, so grab it once to avoid race */ 616 rcu_read_lock(); 617 fdt = files_fdtable(current->files); 618 max_fds = fdt->max_fds; 619 rcu_read_unlock(); 620 if (n > max_fds) 621 n = max_fds; 622 623 /* 624 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 625 * since we used fdset we need to allocate memory in units of 626 * long-words. 627 */ 628 size = FDS_BYTES(n); 629 bits = stack_fds; 630 if (size > sizeof(stack_fds) / 6) { 631 /* Not enough space in on-stack array; must use kmalloc */ 632 ret = -ENOMEM; 633 if (size > (SIZE_MAX / 6)) 634 goto out_nofds; 635 636 alloc_size = 6 * size; 637 bits = kvmalloc(alloc_size, GFP_KERNEL); 638 if (!bits) 639 goto out_nofds; 640 } 641 fds.in = bits; 642 fds.out = bits + size; 643 fds.ex = bits + 2*size; 644 fds.res_in = bits + 3*size; 645 fds.res_out = bits + 4*size; 646 fds.res_ex = bits + 5*size; 647 648 if ((ret = get_fd_set(n, inp, fds.in)) || 649 (ret = get_fd_set(n, outp, fds.out)) || 650 (ret = get_fd_set(n, exp, fds.ex))) 651 goto out; 652 zero_fd_set(n, fds.res_in); 653 zero_fd_set(n, fds.res_out); 654 zero_fd_set(n, fds.res_ex); 655 656 ret = do_select(n, &fds, end_time); 657 658 if (ret < 0) 659 goto out; 660 if (!ret) { 661 ret = -ERESTARTNOHAND; 662 if (signal_pending(current)) 663 goto out; 664 ret = 0; 665 } 666 667 if (set_fd_set(n, inp, fds.res_in) || 668 set_fd_set(n, outp, fds.res_out) || 669 set_fd_set(n, exp, fds.res_ex)) 670 ret = -EFAULT; 671 672 out: 673 if (bits != stack_fds) 674 kvfree(bits); 675 out_nofds: 676 return ret; 677 } 678 679 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, 680 fd_set __user *, exp, struct timeval __user *, tvp) 681 { 682 struct timespec64 end_time, *to = NULL; 683 struct timeval tv; 684 int ret; 685 686 if (tvp) { 687 if (copy_from_user(&tv, tvp, sizeof(tv))) 688 return -EFAULT; 689 690 to = &end_time; 691 if (poll_select_set_timeout(to, 692 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 693 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 694 return -EINVAL; 695 } 696 697 ret = core_sys_select(n, inp, outp, exp, to); 698 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); 699 700 return ret; 701 } 702 703 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, 704 fd_set __user *exp, struct timespec __user *tsp, 705 const sigset_t __user *sigmask, size_t sigsetsize) 706 { 707 sigset_t ksigmask, sigsaved; 708 struct timespec ts; 709 struct timespec64 ts64, end_time, *to = NULL; 710 int ret; 711 712 if (tsp) { 713 if (copy_from_user(&ts, tsp, sizeof(ts))) 714 return -EFAULT; 715 ts64 = timespec_to_timespec64(ts); 716 717 to = &end_time; 718 if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec)) 719 return -EINVAL; 720 } 721 722 if (sigmask) { 723 /* XXX: Don't preclude handling different sized sigset_t's. */ 724 if (sigsetsize != sizeof(sigset_t)) 725 return -EINVAL; 726 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 727 return -EFAULT; 728 729 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 730 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 731 } 732 733 ret = core_sys_select(n, inp, outp, exp, to); 734 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 735 736 if (ret == -ERESTARTNOHAND) { 737 /* 738 * Don't restore the signal mask yet. Let do_signal() deliver 739 * the signal on the way back to userspace, before the signal 740 * mask is restored. 741 */ 742 if (sigmask) { 743 memcpy(¤t->saved_sigmask, &sigsaved, 744 sizeof(sigsaved)); 745 set_restore_sigmask(); 746 } 747 } else if (sigmask) 748 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 749 750 return ret; 751 } 752 753 /* 754 * Most architectures can't handle 7-argument syscalls. So we provide a 755 * 6-argument version where the sixth argument is a pointer to a structure 756 * which has a pointer to the sigset_t itself followed by a size_t containing 757 * the sigset size. 758 */ 759 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, 760 fd_set __user *, exp, struct timespec __user *, tsp, 761 void __user *, sig) 762 { 763 size_t sigsetsize = 0; 764 sigset_t __user *up = NULL; 765 766 if (sig) { 767 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) 768 || __get_user(up, (sigset_t __user * __user *)sig) 769 || __get_user(sigsetsize, 770 (size_t __user *)(sig+sizeof(void *)))) 771 return -EFAULT; 772 } 773 774 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); 775 } 776 777 #ifdef __ARCH_WANT_SYS_OLD_SELECT 778 struct sel_arg_struct { 779 unsigned long n; 780 fd_set __user *inp, *outp, *exp; 781 struct timeval __user *tvp; 782 }; 783 784 SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) 785 { 786 struct sel_arg_struct a; 787 788 if (copy_from_user(&a, arg, sizeof(a))) 789 return -EFAULT; 790 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 791 } 792 #endif 793 794 struct poll_list { 795 struct poll_list *next; 796 int len; 797 struct pollfd entries[0]; 798 }; 799 800 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) 801 802 /* 803 * Fish for pollable events on the pollfd->fd file descriptor. We're only 804 * interested in events matching the pollfd->events mask, and the result 805 * matching that mask is both recorded in pollfd->revents and returned. The 806 * pwait poll_table will be used by the fd-provided poll handler for waiting, 807 * if pwait->_qproc is non-NULL. 808 */ 809 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, 810 bool *can_busy_poll, 811 unsigned int busy_flag) 812 { 813 unsigned int mask; 814 int fd; 815 816 mask = 0; 817 fd = pollfd->fd; 818 if (fd >= 0) { 819 struct fd f = fdget(fd); 820 mask = POLLNVAL; 821 if (f.file) { 822 mask = DEFAULT_POLLMASK; 823 if (f.file->f_op->poll) { 824 pwait->_key = pollfd->events|POLLERR|POLLHUP; 825 pwait->_key |= busy_flag; 826 mask = f.file->f_op->poll(f.file, pwait); 827 if (mask & busy_flag) 828 *can_busy_poll = true; 829 } 830 /* Mask out unneeded events. */ 831 mask &= pollfd->events | POLLERR | POLLHUP; 832 fdput(f); 833 } 834 } 835 pollfd->revents = mask; 836 837 return mask; 838 } 839 840 static int do_poll(struct poll_list *list, struct poll_wqueues *wait, 841 struct timespec64 *end_time) 842 { 843 poll_table* pt = &wait->pt; 844 ktime_t expire, *to = NULL; 845 int timed_out = 0, count = 0; 846 u64 slack = 0; 847 unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 848 unsigned long busy_start = 0; 849 850 /* Optimise the no-wait case */ 851 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 852 pt->_qproc = NULL; 853 timed_out = 1; 854 } 855 856 if (end_time && !timed_out) 857 slack = select_estimate_accuracy(end_time); 858 859 for (;;) { 860 struct poll_list *walk; 861 bool can_busy_loop = false; 862 863 for (walk = list; walk != NULL; walk = walk->next) { 864 struct pollfd * pfd, * pfd_end; 865 866 pfd = walk->entries; 867 pfd_end = pfd + walk->len; 868 for (; pfd != pfd_end; pfd++) { 869 /* 870 * Fish for events. If we found one, record it 871 * and kill poll_table->_qproc, so we don't 872 * needlessly register any other waiters after 873 * this. They'll get immediately deregistered 874 * when we break out and return. 875 */ 876 if (do_pollfd(pfd, pt, &can_busy_loop, 877 busy_flag)) { 878 count++; 879 pt->_qproc = NULL; 880 /* found something, stop busy polling */ 881 busy_flag = 0; 882 can_busy_loop = false; 883 } 884 } 885 } 886 /* 887 * All waiters have already been registered, so don't provide 888 * a poll_table->_qproc to them on the next loop iteration. 889 */ 890 pt->_qproc = NULL; 891 if (!count) { 892 count = wait->error; 893 if (signal_pending(current)) 894 count = -EINTR; 895 } 896 if (count || timed_out) 897 break; 898 899 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 900 if (can_busy_loop && !need_resched()) { 901 if (!busy_start) { 902 busy_start = busy_loop_current_time(); 903 continue; 904 } 905 if (!busy_loop_timeout(busy_start)) 906 continue; 907 } 908 busy_flag = 0; 909 910 /* 911 * If this is the first loop and we have a timeout 912 * given, then we convert to ktime_t and set the to 913 * pointer to the expiry value. 914 */ 915 if (end_time && !to) { 916 expire = timespec64_to_ktime(*end_time); 917 to = &expire; 918 } 919 920 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) 921 timed_out = 1; 922 } 923 return count; 924 } 925 926 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ 927 sizeof(struct pollfd)) 928 929 static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, 930 struct timespec64 *end_time) 931 { 932 struct poll_wqueues table; 933 int err = -EFAULT, fdcount, len, size; 934 /* Allocate small arguments on the stack to save memory and be 935 faster - use long to make sure the buffer is aligned properly 936 on 64 bit archs to avoid unaligned access */ 937 long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; 938 struct poll_list *const head = (struct poll_list *)stack_pps; 939 struct poll_list *walk = head; 940 unsigned long todo = nfds; 941 942 if (nfds > rlimit(RLIMIT_NOFILE)) 943 return -EINVAL; 944 945 len = min_t(unsigned int, nfds, N_STACK_PPS); 946 for (;;) { 947 walk->next = NULL; 948 walk->len = len; 949 if (!len) 950 break; 951 952 if (copy_from_user(walk->entries, ufds + nfds-todo, 953 sizeof(struct pollfd) * walk->len)) 954 goto out_fds; 955 956 todo -= walk->len; 957 if (!todo) 958 break; 959 960 len = min(todo, POLLFD_PER_PAGE); 961 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; 962 walk = walk->next = kmalloc(size, GFP_KERNEL); 963 if (!walk) { 964 err = -ENOMEM; 965 goto out_fds; 966 } 967 } 968 969 poll_initwait(&table); 970 fdcount = do_poll(head, &table, end_time); 971 poll_freewait(&table); 972 973 for (walk = head; walk; walk = walk->next) { 974 struct pollfd *fds = walk->entries; 975 int j; 976 977 for (j = 0; j < walk->len; j++, ufds++) 978 if (__put_user(fds[j].revents, &ufds->revents)) 979 goto out_fds; 980 } 981 982 err = fdcount; 983 out_fds: 984 walk = head->next; 985 while (walk) { 986 struct poll_list *pos = walk; 987 walk = walk->next; 988 kfree(pos); 989 } 990 991 return err; 992 } 993 994 static long do_restart_poll(struct restart_block *restart_block) 995 { 996 struct pollfd __user *ufds = restart_block->poll.ufds; 997 int nfds = restart_block->poll.nfds; 998 struct timespec64 *to = NULL, end_time; 999 int ret; 1000 1001 if (restart_block->poll.has_timeout) { 1002 end_time.tv_sec = restart_block->poll.tv_sec; 1003 end_time.tv_nsec = restart_block->poll.tv_nsec; 1004 to = &end_time; 1005 } 1006 1007 ret = do_sys_poll(ufds, nfds, to); 1008 1009 if (ret == -EINTR) { 1010 restart_block->fn = do_restart_poll; 1011 ret = -ERESTART_RESTARTBLOCK; 1012 } 1013 return ret; 1014 } 1015 1016 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, 1017 int, timeout_msecs) 1018 { 1019 struct timespec64 end_time, *to = NULL; 1020 int ret; 1021 1022 if (timeout_msecs >= 0) { 1023 to = &end_time; 1024 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, 1025 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); 1026 } 1027 1028 ret = do_sys_poll(ufds, nfds, to); 1029 1030 if (ret == -EINTR) { 1031 struct restart_block *restart_block; 1032 1033 restart_block = ¤t->restart_block; 1034 restart_block->fn = do_restart_poll; 1035 restart_block->poll.ufds = ufds; 1036 restart_block->poll.nfds = nfds; 1037 1038 if (timeout_msecs >= 0) { 1039 restart_block->poll.tv_sec = end_time.tv_sec; 1040 restart_block->poll.tv_nsec = end_time.tv_nsec; 1041 restart_block->poll.has_timeout = 1; 1042 } else 1043 restart_block->poll.has_timeout = 0; 1044 1045 ret = -ERESTART_RESTARTBLOCK; 1046 } 1047 return ret; 1048 } 1049 1050 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, 1051 struct timespec __user *, tsp, const sigset_t __user *, sigmask, 1052 size_t, sigsetsize) 1053 { 1054 sigset_t ksigmask, sigsaved; 1055 struct timespec ts; 1056 struct timespec64 end_time, *to = NULL; 1057 int ret; 1058 1059 if (tsp) { 1060 if (copy_from_user(&ts, tsp, sizeof(ts))) 1061 return -EFAULT; 1062 1063 to = &end_time; 1064 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1065 return -EINVAL; 1066 } 1067 1068 if (sigmask) { 1069 /* XXX: Don't preclude handling different sized sigset_t's. */ 1070 if (sigsetsize != sizeof(sigset_t)) 1071 return -EINVAL; 1072 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 1073 return -EFAULT; 1074 1075 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1076 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1077 } 1078 1079 ret = do_sys_poll(ufds, nfds, to); 1080 1081 /* We can restart this syscall, usually */ 1082 if (ret == -EINTR) { 1083 /* 1084 * Don't restore the signal mask yet. Let do_signal() deliver 1085 * the signal on the way back to userspace, before the signal 1086 * mask is restored. 1087 */ 1088 if (sigmask) { 1089 memcpy(¤t->saved_sigmask, &sigsaved, 1090 sizeof(sigsaved)); 1091 set_restore_sigmask(); 1092 } 1093 ret = -ERESTARTNOHAND; 1094 } else if (sigmask) 1095 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1096 1097 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 1098 1099 return ret; 1100 } 1101 1102 #ifdef CONFIG_COMPAT 1103 #define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t)) 1104 1105 static 1106 int compat_poll_select_copy_remaining(struct timespec *end_time, void __user *p, 1107 int timeval, int ret) 1108 { 1109 struct timespec ts; 1110 1111 if (!p) 1112 return ret; 1113 1114 if (current->personality & STICKY_TIMEOUTS) 1115 goto sticky; 1116 1117 /* No update for zero timeout */ 1118 if (!end_time->tv_sec && !end_time->tv_nsec) 1119 return ret; 1120 1121 ktime_get_ts(&ts); 1122 ts = timespec_sub(*end_time, ts); 1123 if (ts.tv_sec < 0) 1124 ts.tv_sec = ts.tv_nsec = 0; 1125 1126 if (timeval) { 1127 struct compat_timeval rtv; 1128 1129 rtv.tv_sec = ts.tv_sec; 1130 rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC; 1131 1132 if (!copy_to_user(p, &rtv, sizeof(rtv))) 1133 return ret; 1134 } else { 1135 struct compat_timespec rts; 1136 1137 rts.tv_sec = ts.tv_sec; 1138 rts.tv_nsec = ts.tv_nsec; 1139 1140 if (!copy_to_user(p, &rts, sizeof(rts))) 1141 return ret; 1142 } 1143 /* 1144 * If an application puts its timeval in read-only memory, we 1145 * don't want the Linux-specific update to the timeval to 1146 * cause a fault after the select has completed 1147 * successfully. However, because we're not updating the 1148 * timeval, we can't restart the system call. 1149 */ 1150 1151 sticky: 1152 if (ret == -ERESTARTNOHAND) 1153 ret = -EINTR; 1154 return ret; 1155 } 1156 1157 /* 1158 * Ooo, nasty. We need here to frob 32-bit unsigned longs to 1159 * 64-bit unsigned longs. 1160 */ 1161 static 1162 int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1163 unsigned long *fdset) 1164 { 1165 if (ufdset) { 1166 return compat_get_bitmap(fdset, ufdset, nr); 1167 } else { 1168 zero_fd_set(nr, fdset); 1169 return 0; 1170 } 1171 } 1172 1173 static 1174 int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1175 unsigned long *fdset) 1176 { 1177 if (!ufdset) 1178 return 0; 1179 return compat_put_bitmap(ufdset, fdset, nr); 1180 } 1181 1182 1183 /* 1184 * This is a virtual copy of sys_select from fs/select.c and probably 1185 * should be compared to it from time to time 1186 */ 1187 1188 /* 1189 * We can actually return ERESTARTSYS instead of EINTR, but I'd 1190 * like to be certain this leads to no problems. So I return 1191 * EINTR just for safety. 1192 * 1193 * Update: ERESTARTSYS breaks at least the xview clock binary, so 1194 * I'm trying ERESTARTNOHAND which restart only when you want to. 1195 */ 1196 static int compat_core_sys_select(int n, compat_ulong_t __user *inp, 1197 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1198 struct timespec *end_time) 1199 { 1200 fd_set_bits fds; 1201 void *bits; 1202 int size, max_fds, ret = -EINVAL; 1203 struct fdtable *fdt; 1204 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 1205 1206 if (n < 0) 1207 goto out_nofds; 1208 1209 /* max_fds can increase, so grab it once to avoid race */ 1210 rcu_read_lock(); 1211 fdt = files_fdtable(current->files); 1212 max_fds = fdt->max_fds; 1213 rcu_read_unlock(); 1214 if (n > max_fds) 1215 n = max_fds; 1216 1217 /* 1218 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 1219 * since we used fdset we need to allocate memory in units of 1220 * long-words. 1221 */ 1222 size = FDS_BYTES(n); 1223 bits = stack_fds; 1224 if (size > sizeof(stack_fds) / 6) { 1225 bits = kmalloc(6 * size, GFP_KERNEL); 1226 ret = -ENOMEM; 1227 if (!bits) 1228 goto out_nofds; 1229 } 1230 fds.in = (unsigned long *) bits; 1231 fds.out = (unsigned long *) (bits + size); 1232 fds.ex = (unsigned long *) (bits + 2*size); 1233 fds.res_in = (unsigned long *) (bits + 3*size); 1234 fds.res_out = (unsigned long *) (bits + 4*size); 1235 fds.res_ex = (unsigned long *) (bits + 5*size); 1236 1237 if ((ret = compat_get_fd_set(n, inp, fds.in)) || 1238 (ret = compat_get_fd_set(n, outp, fds.out)) || 1239 (ret = compat_get_fd_set(n, exp, fds.ex))) 1240 goto out; 1241 zero_fd_set(n, fds.res_in); 1242 zero_fd_set(n, fds.res_out); 1243 zero_fd_set(n, fds.res_ex); 1244 1245 ret = do_select(n, &fds, end_time); 1246 1247 if (ret < 0) 1248 goto out; 1249 if (!ret) { 1250 ret = -ERESTARTNOHAND; 1251 if (signal_pending(current)) 1252 goto out; 1253 ret = 0; 1254 } 1255 1256 if (compat_set_fd_set(n, inp, fds.res_in) || 1257 compat_set_fd_set(n, outp, fds.res_out) || 1258 compat_set_fd_set(n, exp, fds.res_ex)) 1259 ret = -EFAULT; 1260 out: 1261 if (bits != stack_fds) 1262 kfree(bits); 1263 out_nofds: 1264 return ret; 1265 } 1266 1267 COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, 1268 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, 1269 struct compat_timeval __user *, tvp) 1270 { 1271 struct timespec end_time, *to = NULL; 1272 struct compat_timeval tv; 1273 int ret; 1274 1275 if (tvp) { 1276 if (copy_from_user(&tv, tvp, sizeof(tv))) 1277 return -EFAULT; 1278 1279 to = &end_time; 1280 if (poll_select_set_timeout(to, 1281 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 1282 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 1283 return -EINVAL; 1284 } 1285 1286 ret = compat_core_sys_select(n, inp, outp, exp, to); 1287 ret = compat_poll_select_copy_remaining(&end_time, tvp, 1, ret); 1288 1289 return ret; 1290 } 1291 1292 struct compat_sel_arg_struct { 1293 compat_ulong_t n; 1294 compat_uptr_t inp; 1295 compat_uptr_t outp; 1296 compat_uptr_t exp; 1297 compat_uptr_t tvp; 1298 }; 1299 1300 COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg) 1301 { 1302 struct compat_sel_arg_struct a; 1303 1304 if (copy_from_user(&a, arg, sizeof(a))) 1305 return -EFAULT; 1306 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), 1307 compat_ptr(a.exp), compat_ptr(a.tvp)); 1308 } 1309 1310 static long do_compat_pselect(int n, compat_ulong_t __user *inp, 1311 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1312 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask, 1313 compat_size_t sigsetsize) 1314 { 1315 compat_sigset_t ss32; 1316 sigset_t ksigmask, sigsaved; 1317 struct compat_timespec ts; 1318 struct timespec end_time, *to = NULL; 1319 int ret; 1320 1321 if (tsp) { 1322 if (copy_from_user(&ts, tsp, sizeof(ts))) 1323 return -EFAULT; 1324 1325 to = &end_time; 1326 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1327 return -EINVAL; 1328 } 1329 1330 if (sigmask) { 1331 if (sigsetsize != sizeof(compat_sigset_t)) 1332 return -EINVAL; 1333 if (copy_from_user(&ss32, sigmask, sizeof(ss32))) 1334 return -EFAULT; 1335 sigset_from_compat(&ksigmask, &ss32); 1336 1337 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1338 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1339 } 1340 1341 ret = compat_core_sys_select(n, inp, outp, exp, to); 1342 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret); 1343 1344 if (ret == -ERESTARTNOHAND) { 1345 /* 1346 * Don't restore the signal mask yet. Let do_signal() deliver 1347 * the signal on the way back to userspace, before the signal 1348 * mask is restored. 1349 */ 1350 if (sigmask) { 1351 memcpy(¤t->saved_sigmask, &sigsaved, 1352 sizeof(sigsaved)); 1353 set_restore_sigmask(); 1354 } 1355 } else if (sigmask) 1356 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1357 1358 return ret; 1359 } 1360 1361 COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp, 1362 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, 1363 struct compat_timespec __user *, tsp, void __user *, sig) 1364 { 1365 compat_size_t sigsetsize = 0; 1366 compat_uptr_t up = 0; 1367 1368 if (sig) { 1369 if (!access_ok(VERIFY_READ, sig, 1370 sizeof(compat_uptr_t)+sizeof(compat_size_t)) || 1371 __get_user(up, (compat_uptr_t __user *)sig) || 1372 __get_user(sigsetsize, 1373 (compat_size_t __user *)(sig+sizeof(up)))) 1374 return -EFAULT; 1375 } 1376 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), 1377 sigsetsize); 1378 } 1379 1380 COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, 1381 unsigned int, nfds, struct compat_timespec __user *, tsp, 1382 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) 1383 { 1384 compat_sigset_t ss32; 1385 sigset_t ksigmask, sigsaved; 1386 struct compat_timespec ts; 1387 struct timespec end_time, *to = NULL; 1388 int ret; 1389 1390 if (tsp) { 1391 if (copy_from_user(&ts, tsp, sizeof(ts))) 1392 return -EFAULT; 1393 1394 to = &end_time; 1395 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1396 return -EINVAL; 1397 } 1398 1399 if (sigmask) { 1400 if (sigsetsize != sizeof(compat_sigset_t)) 1401 return -EINVAL; 1402 if (copy_from_user(&ss32, sigmask, sizeof(ss32))) 1403 return -EFAULT; 1404 sigset_from_compat(&ksigmask, &ss32); 1405 1406 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1407 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1408 } 1409 1410 ret = do_sys_poll(ufds, nfds, to); 1411 1412 /* We can restart this syscall, usually */ 1413 if (ret == -EINTR) { 1414 /* 1415 * Don't restore the signal mask yet. Let do_signal() deliver 1416 * the signal on the way back to userspace, before the signal 1417 * mask is restored. 1418 */ 1419 if (sigmask) { 1420 memcpy(¤t->saved_sigmask, &sigsaved, 1421 sizeof(sigsaved)); 1422 set_restore_sigmask(); 1423 } 1424 ret = -ERESTARTNOHAND; 1425 } else if (sigmask) 1426 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1427 1428 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret); 1429 1430 return ret; 1431 } 1432 #endif 1433