1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 #include "opt_ktrace.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/sysproto.h> 46 #include <sys/filedesc.h> 47 #include <sys/filio.h> 48 #include <sys/fcntl.h> 49 #include <sys/file.h> 50 #include <sys/proc.h> 51 #include <sys/signalvar.h> 52 #include <sys/socketvar.h> 53 #include <sys/uio.h> 54 #include <sys/kernel.h> 55 #include <sys/ktr.h> 56 #include <sys/limits.h> 57 #include <sys/malloc.h> 58 #include <sys/poll.h> 59 #include <sys/resourcevar.h> 60 #include <sys/selinfo.h> 61 #include <sys/sleepqueue.h> 62 #include <sys/syscallsubr.h> 63 #include <sys/sysctl.h> 64 #include <sys/sysent.h> 65 #include <sys/vnode.h> 66 #include <sys/bio.h> 67 #include <sys/buf.h> 68 #include <sys/condvar.h> 69 #ifdef KTRACE 70 #include <sys/ktrace.h> 71 #endif 72 73 #include <security/audit/audit.h> 74 75 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 76 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 77 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 78 79 static int pollscan(struct thread *, struct pollfd *, u_int); 80 static int pollrescan(struct thread *); 81 static int selscan(struct thread *, fd_mask **, fd_mask **, int); 82 static int selrescan(struct thread *, fd_mask **, fd_mask **); 83 static void selfdalloc(struct thread *, void *); 84 static void selfdfree(struct seltd *, struct selfd *); 85 static int dofileread(struct thread *, int, struct file *, struct uio *, 86 off_t, int); 87 static int dofilewrite(struct thread *, int, struct file *, struct uio *, 88 off_t, int); 89 static void doselwakeup(struct selinfo *, int); 90 static void seltdinit(struct thread *); 91 static int seltdwait(struct thread *, int); 92 static void seltdclear(struct thread *); 93 94 /* 95 * One seltd per-thread allocated on demand as needed. 96 * 97 * t - protected by st_mtx 98 * k - Only accessed by curthread or read-only 99 */ 100 struct seltd { 101 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */ 102 struct selfd *st_free1; /* (k) free fd for read set. */ 103 struct selfd *st_free2; /* (k) free fd for write set. */ 104 struct mtx st_mtx; /* Protects struct seltd */ 105 struct cv st_wait; /* (t) Wait channel. */ 106 int st_flags; /* (t) SELTD_ flags. */ 107 }; 108 109 #define SELTD_PENDING 0x0001 /* We have pending events. */ 110 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */ 111 112 /* 113 * One selfd allocated per-thread per-file-descriptor. 114 * f - protected by sf_mtx 115 */ 116 struct selfd { 117 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */ 118 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */ 119 struct selinfo *sf_si; /* (f) selinfo when linked. */ 120 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */ 121 struct seltd *sf_td; /* (k) owning seltd. */ 122 void *sf_cookie; /* (k) fd or pollfd. */ 123 }; 124 125 static uma_zone_t selfd_zone; 126 127 #ifndef _SYS_SYSPROTO_H_ 128 struct read_args { 129 int fd; 130 void *buf; 131 size_t nbyte; 132 }; 133 #endif 134 int 135 read(td, uap) 136 struct thread *td; 137 struct read_args *uap; 138 { 139 struct uio auio; 140 struct iovec aiov; 141 int error; 142 143 if (uap->nbyte > INT_MAX) 144 return (EINVAL); 145 aiov.iov_base = uap->buf; 146 aiov.iov_len = uap->nbyte; 147 auio.uio_iov = &aiov; 148 auio.uio_iovcnt = 1; 149 auio.uio_resid = uap->nbyte; 150 auio.uio_segflg = UIO_USERSPACE; 151 error = kern_readv(td, uap->fd, &auio); 152 return(error); 153 } 154 155 /* 156 * Positioned read system call 157 */ 158 #ifndef _SYS_SYSPROTO_H_ 159 struct pread_args { 160 int fd; 161 void *buf; 162 size_t nbyte; 163 int pad; 164 off_t offset; 165 }; 166 #endif 167 int 168 pread(td, uap) 169 struct thread *td; 170 struct pread_args *uap; 171 { 172 struct uio auio; 173 struct iovec aiov; 174 int error; 175 176 if (uap->nbyte > INT_MAX) 177 return (EINVAL); 178 aiov.iov_base = uap->buf; 179 aiov.iov_len = uap->nbyte; 180 auio.uio_iov = &aiov; 181 auio.uio_iovcnt = 1; 182 auio.uio_resid = uap->nbyte; 183 auio.uio_segflg = UIO_USERSPACE; 184 error = kern_preadv(td, uap->fd, &auio, uap->offset); 185 return(error); 186 } 187 188 int 189 freebsd6_pread(td, uap) 190 struct thread *td; 191 struct freebsd6_pread_args *uap; 192 { 193 struct pread_args oargs; 194 195 oargs.fd = uap->fd; 196 oargs.buf = uap->buf; 197 oargs.nbyte = uap->nbyte; 198 oargs.offset = uap->offset; 199 return (pread(td, &oargs)); 200 } 201 202 /* 203 * Scatter read system call. 204 */ 205 #ifndef _SYS_SYSPROTO_H_ 206 struct readv_args { 207 int fd; 208 struct iovec *iovp; 209 u_int iovcnt; 210 }; 211 #endif 212 int 213 readv(struct thread *td, struct readv_args *uap) 214 { 215 struct uio *auio; 216 int error; 217 218 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 219 if (error) 220 return (error); 221 error = kern_readv(td, uap->fd, auio); 222 free(auio, M_IOV); 223 return (error); 224 } 225 226 int 227 kern_readv(struct thread *td, int fd, struct uio *auio) 228 { 229 struct file *fp; 230 int error; 231 232 error = fget_read(td, fd, &fp); 233 if (error) 234 return (error); 235 error = dofileread(td, fd, fp, auio, (off_t)-1, 0); 236 fdrop(fp, td); 237 return (error); 238 } 239 240 /* 241 * Scatter positioned read system call. 242 */ 243 #ifndef _SYS_SYSPROTO_H_ 244 struct preadv_args { 245 int fd; 246 struct iovec *iovp; 247 u_int iovcnt; 248 off_t offset; 249 }; 250 #endif 251 int 252 preadv(struct thread *td, struct preadv_args *uap) 253 { 254 struct uio *auio; 255 int error; 256 257 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 258 if (error) 259 return (error); 260 error = kern_preadv(td, uap->fd, auio, uap->offset); 261 free(auio, M_IOV); 262 return (error); 263 } 264 265 int 266 kern_preadv(td, fd, auio, offset) 267 struct thread *td; 268 int fd; 269 struct uio *auio; 270 off_t offset; 271 { 272 struct file *fp; 273 int error; 274 275 error = fget_read(td, fd, &fp); 276 if (error) 277 return (error); 278 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 279 error = ESPIPE; 280 else if (offset < 0 && fp->f_vnode->v_type != VCHR) 281 error = EINVAL; 282 else 283 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET); 284 fdrop(fp, td); 285 return (error); 286 } 287 288 /* 289 * Common code for readv and preadv that reads data in 290 * from a file using the passed in uio, offset, and flags. 291 */ 292 static int 293 dofileread(td, fd, fp, auio, offset, flags) 294 struct thread *td; 295 int fd; 296 struct file *fp; 297 struct uio *auio; 298 off_t offset; 299 int flags; 300 { 301 ssize_t cnt; 302 int error; 303 #ifdef KTRACE 304 struct uio *ktruio = NULL; 305 #endif 306 307 /* Finish zero length reads right here */ 308 if (auio->uio_resid == 0) { 309 td->td_retval[0] = 0; 310 return(0); 311 } 312 auio->uio_rw = UIO_READ; 313 auio->uio_offset = offset; 314 auio->uio_td = td; 315 #ifdef KTRACE 316 if (KTRPOINT(td, KTR_GENIO)) 317 ktruio = cloneuio(auio); 318 #endif 319 cnt = auio->uio_resid; 320 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) { 321 if (auio->uio_resid != cnt && (error == ERESTART || 322 error == EINTR || error == EWOULDBLOCK)) 323 error = 0; 324 } 325 cnt -= auio->uio_resid; 326 #ifdef KTRACE 327 if (ktruio != NULL) { 328 ktruio->uio_resid = cnt; 329 ktrgenio(fd, UIO_READ, ktruio, error); 330 } 331 #endif 332 td->td_retval[0] = cnt; 333 return (error); 334 } 335 336 #ifndef _SYS_SYSPROTO_H_ 337 struct write_args { 338 int fd; 339 const void *buf; 340 size_t nbyte; 341 }; 342 #endif 343 int 344 write(td, uap) 345 struct thread *td; 346 struct write_args *uap; 347 { 348 struct uio auio; 349 struct iovec aiov; 350 int error; 351 352 if (uap->nbyte > INT_MAX) 353 return (EINVAL); 354 aiov.iov_base = (void *)(uintptr_t)uap->buf; 355 aiov.iov_len = uap->nbyte; 356 auio.uio_iov = &aiov; 357 auio.uio_iovcnt = 1; 358 auio.uio_resid = uap->nbyte; 359 auio.uio_segflg = UIO_USERSPACE; 360 error = kern_writev(td, uap->fd, &auio); 361 return(error); 362 } 363 364 /* 365 * Positioned write system call. 366 */ 367 #ifndef _SYS_SYSPROTO_H_ 368 struct pwrite_args { 369 int fd; 370 const void *buf; 371 size_t nbyte; 372 int pad; 373 off_t offset; 374 }; 375 #endif 376 int 377 pwrite(td, uap) 378 struct thread *td; 379 struct pwrite_args *uap; 380 { 381 struct uio auio; 382 struct iovec aiov; 383 int error; 384 385 if (uap->nbyte > INT_MAX) 386 return (EINVAL); 387 aiov.iov_base = (void *)(uintptr_t)uap->buf; 388 aiov.iov_len = uap->nbyte; 389 auio.uio_iov = &aiov; 390 auio.uio_iovcnt = 1; 391 auio.uio_resid = uap->nbyte; 392 auio.uio_segflg = UIO_USERSPACE; 393 error = kern_pwritev(td, uap->fd, &auio, uap->offset); 394 return(error); 395 } 396 397 int 398 freebsd6_pwrite(td, uap) 399 struct thread *td; 400 struct freebsd6_pwrite_args *uap; 401 { 402 struct pwrite_args oargs; 403 404 oargs.fd = uap->fd; 405 oargs.buf = uap->buf; 406 oargs.nbyte = uap->nbyte; 407 oargs.offset = uap->offset; 408 return (pwrite(td, &oargs)); 409 } 410 411 /* 412 * Gather write system call. 413 */ 414 #ifndef _SYS_SYSPROTO_H_ 415 struct writev_args { 416 int fd; 417 struct iovec *iovp; 418 u_int iovcnt; 419 }; 420 #endif 421 int 422 writev(struct thread *td, struct writev_args *uap) 423 { 424 struct uio *auio; 425 int error; 426 427 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 428 if (error) 429 return (error); 430 error = kern_writev(td, uap->fd, auio); 431 free(auio, M_IOV); 432 return (error); 433 } 434 435 int 436 kern_writev(struct thread *td, int fd, struct uio *auio) 437 { 438 struct file *fp; 439 int error; 440 441 error = fget_write(td, fd, &fp); 442 if (error) 443 return (error); 444 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0); 445 fdrop(fp, td); 446 return (error); 447 } 448 449 /* 450 * Gather positioned write system call. 451 */ 452 #ifndef _SYS_SYSPROTO_H_ 453 struct pwritev_args { 454 int fd; 455 struct iovec *iovp; 456 u_int iovcnt; 457 off_t offset; 458 }; 459 #endif 460 int 461 pwritev(struct thread *td, struct pwritev_args *uap) 462 { 463 struct uio *auio; 464 int error; 465 466 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 467 if (error) 468 return (error); 469 error = kern_pwritev(td, uap->fd, auio, uap->offset); 470 free(auio, M_IOV); 471 return (error); 472 } 473 474 int 475 kern_pwritev(td, fd, auio, offset) 476 struct thread *td; 477 struct uio *auio; 478 int fd; 479 off_t offset; 480 { 481 struct file *fp; 482 int error; 483 484 error = fget_write(td, fd, &fp); 485 if (error) 486 return (error); 487 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 488 error = ESPIPE; 489 else if (offset < 0 && fp->f_vnode->v_type != VCHR) 490 error = EINVAL; 491 else 492 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET); 493 fdrop(fp, td); 494 return (error); 495 } 496 497 /* 498 * Common code for writev and pwritev that writes data to 499 * a file using the passed in uio, offset, and flags. 500 */ 501 static int 502 dofilewrite(td, fd, fp, auio, offset, flags) 503 struct thread *td; 504 int fd; 505 struct file *fp; 506 struct uio *auio; 507 off_t offset; 508 int flags; 509 { 510 ssize_t cnt; 511 int error; 512 #ifdef KTRACE 513 struct uio *ktruio = NULL; 514 #endif 515 516 auio->uio_rw = UIO_WRITE; 517 auio->uio_td = td; 518 auio->uio_offset = offset; 519 #ifdef KTRACE 520 if (KTRPOINT(td, KTR_GENIO)) 521 ktruio = cloneuio(auio); 522 #endif 523 cnt = auio->uio_resid; 524 if (fp->f_type == DTYPE_VNODE) 525 bwillwrite(); 526 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) { 527 if (auio->uio_resid != cnt && (error == ERESTART || 528 error == EINTR || error == EWOULDBLOCK)) 529 error = 0; 530 /* Socket layer is responsible for issuing SIGPIPE. */ 531 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) { 532 PROC_LOCK(td->td_proc); 533 psignal(td->td_proc, SIGPIPE); 534 PROC_UNLOCK(td->td_proc); 535 } 536 } 537 cnt -= auio->uio_resid; 538 #ifdef KTRACE 539 if (ktruio != NULL) { 540 ktruio->uio_resid = cnt; 541 ktrgenio(fd, UIO_WRITE, ktruio, error); 542 } 543 #endif 544 td->td_retval[0] = cnt; 545 return (error); 546 } 547 548 /* 549 * Truncate a file given a file descriptor. 550 * 551 * Can't use fget_write() here, since must return EINVAL and not EBADF if the 552 * descriptor isn't writable. 553 */ 554 int 555 kern_ftruncate(td, fd, length) 556 struct thread *td; 557 int fd; 558 off_t length; 559 { 560 struct file *fp; 561 int error; 562 563 AUDIT_ARG(fd, fd); 564 if (length < 0) 565 return (EINVAL); 566 error = fget(td, fd, &fp); 567 if (error) 568 return (error); 569 AUDIT_ARG(file, td->td_proc, fp); 570 if (!(fp->f_flag & FWRITE)) { 571 fdrop(fp, td); 572 return (EINVAL); 573 } 574 error = fo_truncate(fp, length, td->td_ucred, td); 575 fdrop(fp, td); 576 return (error); 577 } 578 579 #ifndef _SYS_SYSPROTO_H_ 580 struct ftruncate_args { 581 int fd; 582 int pad; 583 off_t length; 584 }; 585 #endif 586 int 587 ftruncate(td, uap) 588 struct thread *td; 589 struct ftruncate_args *uap; 590 { 591 592 return (kern_ftruncate(td, uap->fd, uap->length)); 593 } 594 595 #if defined(COMPAT_43) 596 #ifndef _SYS_SYSPROTO_H_ 597 struct oftruncate_args { 598 int fd; 599 long length; 600 }; 601 #endif 602 int 603 oftruncate(td, uap) 604 struct thread *td; 605 struct oftruncate_args *uap; 606 { 607 608 return (kern_ftruncate(td, uap->fd, uap->length)); 609 } 610 #endif /* COMPAT_43 */ 611 612 #ifndef _SYS_SYSPROTO_H_ 613 struct ioctl_args { 614 int fd; 615 u_long com; 616 caddr_t data; 617 }; 618 #endif 619 /* ARGSUSED */ 620 int 621 ioctl(struct thread *td, struct ioctl_args *uap) 622 { 623 u_long com; 624 int arg, error; 625 u_int size; 626 caddr_t data; 627 628 if (uap->com > 0xffffffff) { 629 printf( 630 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n", 631 td->td_proc->p_pid, td->td_name, uap->com); 632 uap->com &= 0xffffffff; 633 } 634 com = uap->com; 635 636 /* 637 * Interpret high order word to find amount of data to be 638 * copied to/from the user's address space. 639 */ 640 size = IOCPARM_LEN(com); 641 if ((size > IOCPARM_MAX) || 642 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) || 643 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43) 644 ((com & IOC_OUT) && size == 0) || 645 #else 646 ((com & (IOC_IN | IOC_OUT)) && size == 0) || 647 #endif 648 ((com & IOC_VOID) && size > 0 && size != sizeof(int))) 649 return (ENOTTY); 650 651 if (size > 0) { 652 if (!(com & IOC_VOID)) 653 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK); 654 else { 655 /* Integer argument. */ 656 arg = (intptr_t)uap->data; 657 data = (void *)&arg; 658 size = 0; 659 } 660 } else 661 data = (void *)&uap->data; 662 if (com & IOC_IN) { 663 error = copyin(uap->data, data, (u_int)size); 664 if (error) { 665 if (size > 0) 666 free(data, M_IOCTLOPS); 667 return (error); 668 } 669 } else if (com & IOC_OUT) { 670 /* 671 * Zero the buffer so the user always 672 * gets back something deterministic. 673 */ 674 bzero(data, size); 675 } 676 677 error = kern_ioctl(td, uap->fd, com, data); 678 679 if (error == 0 && (com & IOC_OUT)) 680 error = copyout(data, uap->data, (u_int)size); 681 682 if (size > 0) 683 free(data, M_IOCTLOPS); 684 return (error); 685 } 686 687 int 688 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data) 689 { 690 struct file *fp; 691 struct filedesc *fdp; 692 int error; 693 int tmp; 694 695 if ((error = fget(td, fd, &fp)) != 0) 696 return (error); 697 if ((fp->f_flag & (FREAD | FWRITE)) == 0) { 698 fdrop(fp, td); 699 return (EBADF); 700 } 701 fdp = td->td_proc->p_fd; 702 switch (com) { 703 case FIONCLEX: 704 FILEDESC_XLOCK(fdp); 705 fdp->fd_ofileflags[fd] &= ~UF_EXCLOSE; 706 FILEDESC_XUNLOCK(fdp); 707 goto out; 708 case FIOCLEX: 709 FILEDESC_XLOCK(fdp); 710 fdp->fd_ofileflags[fd] |= UF_EXCLOSE; 711 FILEDESC_XUNLOCK(fdp); 712 goto out; 713 case FIONBIO: 714 if ((tmp = *(int *)data)) 715 atomic_set_int(&fp->f_flag, FNONBLOCK); 716 else 717 atomic_clear_int(&fp->f_flag, FNONBLOCK); 718 data = (void *)&tmp; 719 break; 720 case FIOASYNC: 721 if ((tmp = *(int *)data)) 722 atomic_set_int(&fp->f_flag, FASYNC); 723 else 724 atomic_clear_int(&fp->f_flag, FASYNC); 725 data = (void *)&tmp; 726 break; 727 } 728 729 error = fo_ioctl(fp, com, data, td->td_ucred, td); 730 out: 731 fdrop(fp, td); 732 return (error); 733 } 734 735 #ifndef _SYS_SYSPROTO_H_ 736 struct select_args { 737 int nd; 738 fd_set *in, *ou, *ex; 739 struct timeval *tv; 740 }; 741 #endif 742 int 743 select(td, uap) 744 register struct thread *td; 745 register struct select_args *uap; 746 { 747 struct timeval tv, *tvp; 748 int error; 749 750 if (uap->tv != NULL) { 751 error = copyin(uap->tv, &tv, sizeof(tv)); 752 if (error) 753 return (error); 754 tvp = &tv; 755 } else 756 tvp = NULL; 757 758 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp)); 759 } 760 761 int 762 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, 763 fd_set *fd_ex, struct timeval *tvp) 764 { 765 struct filedesc *fdp; 766 /* 767 * The magic 2048 here is chosen to be just enough for FD_SETSIZE 768 * infds with the new FD_SETSIZE of 1024, and more than enough for 769 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE 770 * of 256. 771 */ 772 fd_mask s_selbits[howmany(2048, NFDBITS)]; 773 fd_mask *ibits[3], *obits[3], *selbits, *sbp; 774 struct timeval atv, rtv, ttv; 775 int error, timo; 776 u_int nbufbytes, ncpbytes, nfdbits; 777 778 if (nd < 0) 779 return (EINVAL); 780 fdp = td->td_proc->p_fd; 781 782 FILEDESC_SLOCK(fdp); 783 if (nd > td->td_proc->p_fd->fd_nfiles) 784 nd = td->td_proc->p_fd->fd_nfiles; /* forgiving; slightly wrong */ 785 FILEDESC_SUNLOCK(fdp); 786 787 /* 788 * Allocate just enough bits for the non-null fd_sets. Use the 789 * preallocated auto buffer if possible. 790 */ 791 nfdbits = roundup(nd, NFDBITS); 792 ncpbytes = nfdbits / NBBY; 793 nbufbytes = 0; 794 if (fd_in != NULL) 795 nbufbytes += 2 * ncpbytes; 796 if (fd_ou != NULL) 797 nbufbytes += 2 * ncpbytes; 798 if (fd_ex != NULL) 799 nbufbytes += 2 * ncpbytes; 800 if (nbufbytes <= sizeof s_selbits) 801 selbits = &s_selbits[0]; 802 else 803 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK); 804 805 /* 806 * Assign pointers into the bit buffers and fetch the input bits. 807 * Put the output buffers together so that they can be bzeroed 808 * together. 809 */ 810 sbp = selbits; 811 #define getbits(name, x) \ 812 do { \ 813 if (name == NULL) \ 814 ibits[x] = NULL; \ 815 else { \ 816 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \ 817 obits[x] = sbp; \ 818 sbp += ncpbytes / sizeof *sbp; \ 819 error = copyin(name, ibits[x], ncpbytes); \ 820 if (error != 0) \ 821 goto done; \ 822 } \ 823 } while (0) 824 getbits(fd_in, 0); 825 getbits(fd_ou, 1); 826 getbits(fd_ex, 2); 827 #undef getbits 828 if (nbufbytes != 0) 829 bzero(selbits, nbufbytes / 2); 830 831 if (tvp != NULL) { 832 atv = *tvp; 833 if (itimerfix(&atv)) { 834 error = EINVAL; 835 goto done; 836 } 837 getmicrouptime(&rtv); 838 timevaladd(&atv, &rtv); 839 } else { 840 atv.tv_sec = 0; 841 atv.tv_usec = 0; 842 } 843 timo = 0; 844 seltdinit(td); 845 /* Iterate until the timeout expires or descriptors become ready. */ 846 for (;;) { 847 error = selscan(td, ibits, obits, nd); 848 if (error || td->td_retval[0] != 0) 849 break; 850 if (atv.tv_sec || atv.tv_usec) { 851 getmicrouptime(&rtv); 852 if (timevalcmp(&rtv, &atv, >=)) 853 break; 854 ttv = atv; 855 timevalsub(&ttv, &rtv); 856 timo = ttv.tv_sec > 24 * 60 * 60 ? 857 24 * 60 * 60 * hz : tvtohz(&ttv); 858 } 859 error = seltdwait(td, timo); 860 if (error) 861 break; 862 error = selrescan(td, ibits, obits); 863 if (error || td->td_retval[0] != 0) 864 break; 865 } 866 seltdclear(td); 867 868 done: 869 /* select is not restarted after signals... */ 870 if (error == ERESTART) 871 error = EINTR; 872 if (error == EWOULDBLOCK) 873 error = 0; 874 #define putbits(name, x) \ 875 if (name && (error2 = copyout(obits[x], name, ncpbytes))) \ 876 error = error2; 877 if (error == 0) { 878 int error2; 879 880 putbits(fd_in, 0); 881 putbits(fd_ou, 1); 882 putbits(fd_ex, 2); 883 #undef putbits 884 } 885 if (selbits != &s_selbits[0]) 886 free(selbits, M_SELECT); 887 888 return (error); 889 } 890 891 /* 892 * Traverse the list of fds attached to this thread's seltd and check for 893 * completion. 894 */ 895 static int 896 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits) 897 { 898 struct seltd *stp; 899 struct selfd *sfp; 900 struct selfd *sfn; 901 struct selinfo *si; 902 struct file *fp; 903 int msk, fd; 904 int n = 0; 905 /* Note: backend also returns POLLHUP/POLLERR if appropriate. */ 906 static int flag[3] = { POLLRDNORM, POLLWRNORM, POLLRDBAND }; 907 struct filedesc *fdp = td->td_proc->p_fd; 908 909 stp = td->td_sel; 910 FILEDESC_SLOCK(fdp); 911 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 912 fd = (int)(uintptr_t)sfp->sf_cookie; 913 si = sfp->sf_si; 914 selfdfree(stp, sfp); 915 /* If the selinfo wasn't cleared the event didn't fire. */ 916 if (si != NULL) 917 continue; 918 if ((fp = fget_locked(fdp, fd)) == NULL) { 919 FILEDESC_SUNLOCK(fdp); 920 return (EBADF); 921 } 922 for (msk = 0; msk < 3; msk++) { 923 if (ibits[msk] == NULL) 924 continue; 925 if ((ibits[msk][fd/NFDBITS] & 926 ((fd_mask) 1 << (fd % NFDBITS))) == 0) 927 continue; 928 if (fo_poll(fp, flag[msk], td->td_ucred, td)) { 929 obits[msk][(fd)/NFDBITS] |= 930 ((fd_mask)1 << ((fd) % NFDBITS)); 931 n++; 932 } 933 } 934 } 935 FILEDESC_SUNLOCK(fdp); 936 stp->st_flags = 0; 937 td->td_retval[0] = n; 938 return (0); 939 } 940 941 /* 942 * Perform the initial filedescriptor scan and register ourselves with 943 * each selinfo. 944 */ 945 static int 946 selscan(td, ibits, obits, nfd) 947 struct thread *td; 948 fd_mask **ibits, **obits; 949 int nfd; 950 { 951 int msk, i, fd; 952 fd_mask bits; 953 struct file *fp; 954 int n = 0; 955 /* Note: backend also returns POLLHUP/POLLERR if appropriate. */ 956 static int flag[3] = { POLLRDNORM, POLLWRNORM, POLLRDBAND }; 957 struct filedesc *fdp = td->td_proc->p_fd; 958 959 FILEDESC_SLOCK(fdp); 960 for (msk = 0; msk < 3; msk++) { 961 if (ibits[msk] == NULL) 962 continue; 963 for (i = 0; i < nfd; i += NFDBITS) { 964 bits = ibits[msk][i/NFDBITS]; 965 /* ffs(int mask) not portable, fd_mask is long */ 966 for (fd = i; bits && fd < nfd; fd++, bits >>= 1) { 967 if (!(bits & 1)) 968 continue; 969 if ((fp = fget_locked(fdp, fd)) == NULL) { 970 FILEDESC_SUNLOCK(fdp); 971 return (EBADF); 972 } 973 selfdalloc(td, (void *)(uintptr_t)fd); 974 if (fo_poll(fp, flag[msk], td->td_ucred, 975 td)) { 976 obits[msk][(fd)/NFDBITS] |= 977 ((fd_mask)1 << ((fd) % NFDBITS)); 978 n++; 979 } 980 } 981 } 982 } 983 FILEDESC_SUNLOCK(fdp); 984 td->td_retval[0] = n; 985 return (0); 986 } 987 988 #ifndef _SYS_SYSPROTO_H_ 989 struct poll_args { 990 struct pollfd *fds; 991 u_int nfds; 992 int timeout; 993 }; 994 #endif 995 int 996 poll(td, uap) 997 struct thread *td; 998 struct poll_args *uap; 999 { 1000 struct pollfd *bits; 1001 struct pollfd smallbits[32]; 1002 struct timeval atv, rtv, ttv; 1003 int error = 0, timo; 1004 u_int nfds; 1005 size_t ni; 1006 1007 nfds = uap->nfds; 1008 1009 /* 1010 * This is kinda bogus. We have fd limits, but that is not 1011 * really related to the size of the pollfd array. Make sure 1012 * we let the process use at least FD_SETSIZE entries and at 1013 * least enough for the current limits. We want to be reasonably 1014 * safe, but not overly restrictive. 1015 */ 1016 PROC_LOCK(td->td_proc); 1017 if ((nfds > lim_cur(td->td_proc, RLIMIT_NOFILE)) && 1018 (nfds > FD_SETSIZE)) { 1019 PROC_UNLOCK(td->td_proc); 1020 return (EINVAL); 1021 } 1022 PROC_UNLOCK(td->td_proc); 1023 ni = nfds * sizeof(struct pollfd); 1024 if (ni > sizeof(smallbits)) 1025 bits = malloc(ni, M_TEMP, M_WAITOK); 1026 else 1027 bits = smallbits; 1028 error = copyin(uap->fds, bits, ni); 1029 if (error) 1030 goto done; 1031 if (uap->timeout != INFTIM) { 1032 atv.tv_sec = uap->timeout / 1000; 1033 atv.tv_usec = (uap->timeout % 1000) * 1000; 1034 if (itimerfix(&atv)) { 1035 error = EINVAL; 1036 goto done; 1037 } 1038 getmicrouptime(&rtv); 1039 timevaladd(&atv, &rtv); 1040 } else { 1041 atv.tv_sec = 0; 1042 atv.tv_usec = 0; 1043 } 1044 timo = 0; 1045 seltdinit(td); 1046 /* Iterate until the timeout expires or descriptors become ready. */ 1047 for (;;) { 1048 error = pollscan(td, bits, nfds); 1049 if (error || td->td_retval[0] != 0) 1050 break; 1051 if (atv.tv_sec || atv.tv_usec) { 1052 getmicrouptime(&rtv); 1053 if (timevalcmp(&rtv, &atv, >=)) 1054 break; 1055 ttv = atv; 1056 timevalsub(&ttv, &rtv); 1057 timo = ttv.tv_sec > 24 * 60 * 60 ? 1058 24 * 60 * 60 * hz : tvtohz(&ttv); 1059 } 1060 error = seltdwait(td, timo); 1061 if (error) 1062 break; 1063 error = pollrescan(td); 1064 if (error || td->td_retval[0] != 0) 1065 break; 1066 } 1067 seltdclear(td); 1068 1069 done: 1070 /* poll is not restarted after signals... */ 1071 if (error == ERESTART) 1072 error = EINTR; 1073 if (error == EWOULDBLOCK) 1074 error = 0; 1075 if (error == 0) { 1076 error = copyout(bits, uap->fds, ni); 1077 if (error) 1078 goto out; 1079 } 1080 out: 1081 if (ni > sizeof(smallbits)) 1082 free(bits, M_TEMP); 1083 return (error); 1084 } 1085 1086 static int 1087 pollrescan(struct thread *td) 1088 { 1089 struct seltd *stp; 1090 struct selfd *sfp; 1091 struct selfd *sfn; 1092 struct selinfo *si; 1093 struct filedesc *fdp; 1094 struct file *fp; 1095 struct pollfd *fd; 1096 int n; 1097 1098 n = 0; 1099 fdp = td->td_proc->p_fd; 1100 stp = td->td_sel; 1101 FILEDESC_SLOCK(fdp); 1102 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1103 fd = (struct pollfd *)sfp->sf_cookie; 1104 si = sfp->sf_si; 1105 selfdfree(stp, sfp); 1106 /* If the selinfo wasn't cleared the event didn't fire. */ 1107 if (si != NULL) 1108 continue; 1109 fp = fdp->fd_ofiles[fd->fd]; 1110 if (fp == NULL) { 1111 fd->revents = POLLNVAL; 1112 n++; 1113 continue; 1114 } 1115 /* 1116 * Note: backend also returns POLLHUP and 1117 * POLLERR if appropriate. 1118 */ 1119 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td); 1120 if (fd->revents != 0) 1121 n++; 1122 } 1123 FILEDESC_SUNLOCK(fdp); 1124 stp->st_flags = 0; 1125 td->td_retval[0] = n; 1126 return (0); 1127 } 1128 1129 1130 static int 1131 pollscan(td, fds, nfd) 1132 struct thread *td; 1133 struct pollfd *fds; 1134 u_int nfd; 1135 { 1136 struct filedesc *fdp = td->td_proc->p_fd; 1137 int i; 1138 struct file *fp; 1139 int n = 0; 1140 1141 FILEDESC_SLOCK(fdp); 1142 for (i = 0; i < nfd; i++, fds++) { 1143 if (fds->fd >= fdp->fd_nfiles) { 1144 fds->revents = POLLNVAL; 1145 n++; 1146 } else if (fds->fd < 0) { 1147 fds->revents = 0; 1148 } else { 1149 fp = fdp->fd_ofiles[fds->fd]; 1150 if (fp == NULL) { 1151 fds->revents = POLLNVAL; 1152 n++; 1153 } else { 1154 /* 1155 * Note: backend also returns POLLHUP and 1156 * POLLERR if appropriate. 1157 */ 1158 selfdalloc(td, fds); 1159 fds->revents = fo_poll(fp, fds->events, 1160 td->td_ucred, td); 1161 if (fds->revents != 0) 1162 n++; 1163 } 1164 } 1165 } 1166 FILEDESC_SUNLOCK(fdp); 1167 td->td_retval[0] = n; 1168 return (0); 1169 } 1170 1171 /* 1172 * OpenBSD poll system call. 1173 * 1174 * XXX this isn't quite a true representation.. OpenBSD uses select ops. 1175 */ 1176 #ifndef _SYS_SYSPROTO_H_ 1177 struct openbsd_poll_args { 1178 struct pollfd *fds; 1179 u_int nfds; 1180 int timeout; 1181 }; 1182 #endif 1183 int 1184 openbsd_poll(td, uap) 1185 register struct thread *td; 1186 register struct openbsd_poll_args *uap; 1187 { 1188 return (poll(td, (struct poll_args *)uap)); 1189 } 1190 1191 /* 1192 * XXX This was created specifically to support netncp and netsmb. This 1193 * allows the caller to specify a socket to wait for events on. It returns 1194 * 0 if any events matched and an error otherwise. There is no way to 1195 * determine which events fired. 1196 */ 1197 int 1198 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td) 1199 { 1200 struct timeval atv, rtv, ttv; 1201 int error, timo; 1202 1203 if (tvp != NULL) { 1204 atv = *tvp; 1205 if (itimerfix(&atv)) 1206 return (EINVAL); 1207 getmicrouptime(&rtv); 1208 timevaladd(&atv, &rtv); 1209 } else { 1210 atv.tv_sec = 0; 1211 atv.tv_usec = 0; 1212 } 1213 1214 timo = 0; 1215 seltdinit(td); 1216 /* 1217 * Iterate until the timeout expires or the socket becomes ready. 1218 */ 1219 for (;;) { 1220 selfdalloc(td, NULL); 1221 error = sopoll(so, events, NULL, td); 1222 /* error here is actually the ready events. */ 1223 if (error) 1224 return (0); 1225 if (atv.tv_sec || atv.tv_usec) { 1226 getmicrouptime(&rtv); 1227 if (timevalcmp(&rtv, &atv, >=)) { 1228 seltdclear(td); 1229 return (EWOULDBLOCK); 1230 } 1231 ttv = atv; 1232 timevalsub(&ttv, &rtv); 1233 timo = ttv.tv_sec > 24 * 60 * 60 ? 1234 24 * 60 * 60 * hz : tvtohz(&ttv); 1235 } 1236 error = seltdwait(td, timo); 1237 seltdclear(td); 1238 if (error) 1239 break; 1240 } 1241 /* XXX Duplicates ncp/smb behavior. */ 1242 if (error == ERESTART) 1243 error = 0; 1244 return (error); 1245 } 1246 1247 /* 1248 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines 1249 * have two select sets, one for read and another for write. 1250 */ 1251 static void 1252 selfdalloc(struct thread *td, void *cookie) 1253 { 1254 struct seltd *stp; 1255 1256 stp = td->td_sel; 1257 if (stp->st_free1 == NULL) 1258 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO); 1259 stp->st_free1->sf_td = stp; 1260 stp->st_free1->sf_cookie = cookie; 1261 if (stp->st_free2 == NULL) 1262 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO); 1263 stp->st_free2->sf_td = stp; 1264 stp->st_free2->sf_cookie = cookie; 1265 } 1266 1267 static void 1268 selfdfree(struct seltd *stp, struct selfd *sfp) 1269 { 1270 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link); 1271 mtx_lock(sfp->sf_mtx); 1272 if (sfp->sf_si) 1273 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads); 1274 mtx_unlock(sfp->sf_mtx); 1275 uma_zfree(selfd_zone, sfp); 1276 } 1277 1278 /* 1279 * Record a select request. 1280 */ 1281 void 1282 selrecord(selector, sip) 1283 struct thread *selector; 1284 struct selinfo *sip; 1285 { 1286 struct selfd *sfp; 1287 struct seltd *stp; 1288 struct mtx *mtxp; 1289 1290 stp = selector->td_sel; 1291 /* 1292 * Don't record when doing a rescan. 1293 */ 1294 if (stp->st_flags & SELTD_RESCAN) 1295 return; 1296 /* 1297 * Grab one of the preallocated descriptors. 1298 */ 1299 sfp = NULL; 1300 if ((sfp = stp->st_free1) != NULL) 1301 stp->st_free1 = NULL; 1302 else if ((sfp = stp->st_free2) != NULL) 1303 stp->st_free2 = NULL; 1304 else 1305 panic("selrecord: No free selfd on selq"); 1306 mtxp = mtx_pool_find(mtxpool_sleep, sip); 1307 /* 1308 * Initialize the sfp and queue it in the thread. 1309 */ 1310 sfp->sf_si = sip; 1311 sfp->sf_mtx = mtxp; 1312 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link); 1313 /* 1314 * Now that we've locked the sip, check for initialization. 1315 */ 1316 mtx_lock(mtxp); 1317 if (sip->si_mtx == NULL) { 1318 sip->si_mtx = mtxp; 1319 TAILQ_INIT(&sip->si_tdlist); 1320 } 1321 /* 1322 * Add this thread to the list of selfds listening on this selinfo. 1323 */ 1324 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads); 1325 mtx_unlock(sip->si_mtx); 1326 } 1327 1328 /* Wake up a selecting thread. */ 1329 void 1330 selwakeup(sip) 1331 struct selinfo *sip; 1332 { 1333 doselwakeup(sip, -1); 1334 } 1335 1336 /* Wake up a selecting thread, and set its priority. */ 1337 void 1338 selwakeuppri(sip, pri) 1339 struct selinfo *sip; 1340 int pri; 1341 { 1342 doselwakeup(sip, pri); 1343 } 1344 1345 /* 1346 * Do a wakeup when a selectable event occurs. 1347 */ 1348 static void 1349 doselwakeup(sip, pri) 1350 struct selinfo *sip; 1351 int pri; 1352 { 1353 struct selfd *sfp; 1354 struct selfd *sfn; 1355 struct seltd *stp; 1356 1357 /* If it's not initialized there can't be any waiters. */ 1358 if (sip->si_mtx == NULL) 1359 return; 1360 /* 1361 * Locking the selinfo locks all selfds associated with it. 1362 */ 1363 mtx_lock(sip->si_mtx); 1364 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) { 1365 /* 1366 * Once we remove this sfp from the list and clear the 1367 * sf_si seltdclear will know to ignore this si. 1368 */ 1369 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads); 1370 sfp->sf_si = NULL; 1371 stp = sfp->sf_td; 1372 mtx_lock(&stp->st_mtx); 1373 stp->st_flags |= SELTD_PENDING; 1374 cv_broadcastpri(&stp->st_wait, pri); 1375 mtx_unlock(&stp->st_mtx); 1376 } 1377 mtx_unlock(sip->si_mtx); 1378 } 1379 1380 static void 1381 seltdinit(struct thread *td) 1382 { 1383 struct seltd *stp; 1384 1385 if ((stp = td->td_sel) != NULL) 1386 goto out; 1387 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO); 1388 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF); 1389 cv_init(&stp->st_wait, "select"); 1390 out: 1391 stp->st_flags = 0; 1392 STAILQ_INIT(&stp->st_selq); 1393 } 1394 1395 static int 1396 seltdwait(struct thread *td, int timo) 1397 { 1398 struct seltd *stp; 1399 int error; 1400 1401 stp = td->td_sel; 1402 /* 1403 * An event of interest may occur while we do not hold the seltd 1404 * locked so check the pending flag before we sleep. 1405 */ 1406 mtx_lock(&stp->st_mtx); 1407 /* 1408 * Any further calls to selrecord will be a rescan. 1409 */ 1410 stp->st_flags |= SELTD_RESCAN; 1411 if (stp->st_flags & SELTD_PENDING) { 1412 mtx_unlock(&stp->st_mtx); 1413 return (0); 1414 } 1415 if (timo > 0) 1416 error = cv_timedwait_sig(&stp->st_wait, &stp->st_mtx, timo); 1417 else 1418 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx); 1419 mtx_unlock(&stp->st_mtx); 1420 1421 return (error); 1422 } 1423 1424 void 1425 seltdfini(struct thread *td) 1426 { 1427 struct seltd *stp; 1428 1429 stp = td->td_sel; 1430 if (stp == NULL) 1431 return; 1432 if (stp->st_free1) 1433 uma_zfree(selfd_zone, stp->st_free1); 1434 if (stp->st_free2) 1435 uma_zfree(selfd_zone, stp->st_free2); 1436 td->td_sel = NULL; 1437 free(stp, M_SELECT); 1438 } 1439 1440 /* 1441 * Remove the references to the thread from all of the objects we were 1442 * polling. 1443 */ 1444 static void 1445 seltdclear(struct thread *td) 1446 { 1447 struct seltd *stp; 1448 struct selfd *sfp; 1449 struct selfd *sfn; 1450 1451 stp = td->td_sel; 1452 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) 1453 selfdfree(stp, sfp); 1454 stp->st_flags = 0; 1455 } 1456 1457 static void selectinit(void *); 1458 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL); 1459 static void 1460 selectinit(void *dummy __unused) 1461 { 1462 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL, 1463 NULL, NULL, UMA_ALIGN_PTR, 0); 1464 } 1465