1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 #include "opt_ktrace.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/filedesc.h> 50 #include <sys/filio.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/proc.h> 54 #include <sys/signalvar.h> 55 #include <sys/socketvar.h> 56 #include <sys/uio.h> 57 #include <sys/kernel.h> 58 #include <sys/limits.h> 59 #include <sys/malloc.h> 60 #include <sys/poll.h> 61 #include <sys/resourcevar.h> 62 #include <sys/selinfo.h> 63 #include <sys/syscallsubr.h> 64 #include <sys/sysctl.h> 65 #include <sys/sysent.h> 66 #include <sys/bio.h> 67 #include <sys/buf.h> 68 #include <sys/condvar.h> 69 #ifdef KTRACE 70 #include <sys/ktrace.h> 71 #endif 72 #include <vm/vm.h> 73 #include <vm/vm_page.h> 74 75 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 76 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 77 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 78 79 static int pollscan(struct thread *, struct pollfd *, u_int); 80 static int selscan(struct thread *, fd_mask **, fd_mask **, int); 81 static int dofileread(struct thread *, struct file *, int, void *, 82 size_t, off_t, int); 83 static int dofilewrite(struct thread *, struct file *, int, 84 const void *, size_t, off_t, int); 85 86 /* 87 * Read system call. 88 */ 89 #ifndef _SYS_SYSPROTO_H_ 90 struct read_args { 91 int fd; 92 void *buf; 93 size_t nbyte; 94 }; 95 #endif 96 /* 97 * MPSAFE 98 */ 99 int 100 read(td, uap) 101 struct thread *td; 102 struct read_args *uap; 103 { 104 struct file *fp; 105 int error; 106 107 if ((error = fget_read(td, uap->fd, &fp)) == 0) { 108 error = dofileread(td, fp, uap->fd, uap->buf, 109 uap->nbyte, (off_t)-1, 0); 110 fdrop(fp, td); 111 } 112 return(error); 113 } 114 115 /* 116 * Pread system call 117 */ 118 #ifndef _SYS_SYSPROTO_H_ 119 struct pread_args { 120 int fd; 121 void *buf; 122 size_t nbyte; 123 int pad; 124 off_t offset; 125 }; 126 #endif 127 /* 128 * MPSAFE 129 */ 130 int 131 pread(td, uap) 132 struct thread *td; 133 struct pread_args *uap; 134 { 135 struct file *fp; 136 int error; 137 138 if ((error = fget_read(td, uap->fd, &fp)) != 0) 139 return (error); 140 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) { 141 error = ESPIPE; 142 } else { 143 error = dofileread(td, fp, uap->fd, uap->buf, uap->nbyte, 144 uap->offset, FOF_OFFSET); 145 } 146 fdrop(fp, td); 147 return(error); 148 } 149 150 /* 151 * Code common for read and pread 152 */ 153 static int 154 dofileread(td, fp, fd, buf, nbyte, offset, flags) 155 struct thread *td; 156 struct file *fp; 157 int fd, flags; 158 void *buf; 159 size_t nbyte; 160 off_t offset; 161 { 162 struct uio auio; 163 struct iovec aiov; 164 long cnt, error = 0; 165 #ifdef KTRACE 166 struct iovec ktriov; 167 struct uio ktruio; 168 int didktr = 0; 169 #endif 170 171 aiov.iov_base = buf; 172 aiov.iov_len = nbyte; 173 auio.uio_iov = &aiov; 174 auio.uio_iovcnt = 1; 175 auio.uio_offset = offset; 176 if (nbyte > INT_MAX) 177 return (EINVAL); 178 auio.uio_resid = nbyte; 179 auio.uio_rw = UIO_READ; 180 auio.uio_segflg = UIO_USERSPACE; 181 auio.uio_td = td; 182 #ifdef KTRACE 183 /* 184 * if tracing, save a copy of iovec 185 */ 186 if (KTRPOINT(td, KTR_GENIO)) { 187 ktriov = aiov; 188 ktruio = auio; 189 didktr = 1; 190 } 191 #endif 192 cnt = nbyte; 193 194 if ((error = fo_read(fp, &auio, td->td_ucred, flags, td))) { 195 if (auio.uio_resid != cnt && (error == ERESTART || 196 error == EINTR || error == EWOULDBLOCK)) 197 error = 0; 198 } 199 cnt -= auio.uio_resid; 200 #ifdef KTRACE 201 if (didktr && error == 0) { 202 ktruio.uio_iov = &ktriov; 203 ktruio.uio_resid = cnt; 204 ktrgenio(fd, UIO_READ, &ktruio, error); 205 } 206 #endif 207 td->td_retval[0] = cnt; 208 return (error); 209 } 210 211 /* 212 * Scatter read system call. 213 */ 214 #ifndef _SYS_SYSPROTO_H_ 215 struct readv_args { 216 int fd; 217 struct iovec *iovp; 218 u_int iovcnt; 219 }; 220 #endif 221 /* 222 * MPSAFE 223 */ 224 int 225 readv(td, uap) 226 struct thread *td; 227 struct readv_args *uap; 228 { 229 struct file *fp; 230 struct uio auio; 231 struct iovec *iov; 232 struct iovec *needfree; 233 struct iovec aiov[UIO_SMALLIOV]; 234 long i, cnt; 235 int error; 236 u_int iovlen; 237 #ifdef KTRACE 238 struct iovec *ktriov = NULL; 239 struct uio ktruio; 240 #endif 241 242 if ((error = fget_read(td, uap->fd, &fp)) != 0) 243 return (error); 244 needfree = NULL; 245 /* note: can't use iovlen until iovcnt is validated */ 246 iovlen = uap->iovcnt * sizeof (struct iovec); 247 if (uap->iovcnt > UIO_SMALLIOV) { 248 if (uap->iovcnt > UIO_MAXIOV) { 249 error = EINVAL; 250 goto done; 251 } 252 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK); 253 needfree = iov; 254 } else 255 iov = aiov; 256 auio.uio_iov = iov; 257 auio.uio_iovcnt = uap->iovcnt; 258 auio.uio_rw = UIO_READ; 259 auio.uio_segflg = UIO_USERSPACE; 260 auio.uio_td = td; 261 auio.uio_offset = -1; 262 if ((error = copyin(uap->iovp, iov, iovlen))) 263 goto done; 264 auio.uio_resid = 0; 265 for (i = 0; i < uap->iovcnt; i++) { 266 if (iov->iov_len > INT_MAX - auio.uio_resid) { 267 error = EINVAL; 268 goto done; 269 } 270 auio.uio_resid += iov->iov_len; 271 iov++; 272 } 273 #ifdef KTRACE 274 /* 275 * if tracing, save a copy of iovec 276 */ 277 if (KTRPOINT(td, KTR_GENIO)) { 278 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); 279 bcopy(auio.uio_iov, ktriov, iovlen); 280 ktruio = auio; 281 } 282 #endif 283 cnt = auio.uio_resid; 284 if ((error = fo_read(fp, &auio, td->td_ucred, 0, td))) { 285 if (auio.uio_resid != cnt && (error == ERESTART || 286 error == EINTR || error == EWOULDBLOCK)) 287 error = 0; 288 } 289 cnt -= auio.uio_resid; 290 #ifdef KTRACE 291 if (ktriov != NULL) { 292 if (error == 0) { 293 ktruio.uio_iov = ktriov; 294 ktruio.uio_resid = cnt; 295 ktrgenio(uap->fd, UIO_READ, &ktruio, error); 296 } 297 FREE(ktriov, M_TEMP); 298 } 299 #endif 300 td->td_retval[0] = cnt; 301 done: 302 fdrop(fp, td); 303 if (needfree) 304 FREE(needfree, M_IOV); 305 return (error); 306 } 307 308 /* 309 * Write system call 310 */ 311 #ifndef _SYS_SYSPROTO_H_ 312 struct write_args { 313 int fd; 314 const void *buf; 315 size_t nbyte; 316 }; 317 #endif 318 /* 319 * MPSAFE 320 */ 321 int 322 write(td, uap) 323 struct thread *td; 324 struct write_args *uap; 325 { 326 struct file *fp; 327 int error; 328 329 if ((error = fget_write(td, uap->fd, &fp)) == 0) { 330 error = dofilewrite(td, fp, uap->fd, uap->buf, uap->nbyte, 331 (off_t)-1, 0); 332 fdrop(fp, td); 333 } else { 334 error = EBADF; /* XXX this can't be right */ 335 } 336 return(error); 337 } 338 339 /* 340 * Pwrite system call 341 */ 342 #ifndef _SYS_SYSPROTO_H_ 343 struct pwrite_args { 344 int fd; 345 const void *buf; 346 size_t nbyte; 347 int pad; 348 off_t offset; 349 }; 350 #endif 351 /* 352 * MPSAFE 353 */ 354 int 355 pwrite(td, uap) 356 struct thread *td; 357 struct pwrite_args *uap; 358 { 359 struct file *fp; 360 int error; 361 362 if ((error = fget_write(td, uap->fd, &fp)) == 0) { 363 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) { 364 error = ESPIPE; 365 } else { 366 error = dofilewrite(td, fp, uap->fd, uap->buf, 367 uap->nbyte, uap->offset, FOF_OFFSET); 368 } 369 fdrop(fp, td); 370 } else { 371 error = EBADF; /* this can't be right */ 372 } 373 return(error); 374 } 375 376 static int 377 dofilewrite(td, fp, fd, buf, nbyte, offset, flags) 378 struct thread *td; 379 struct file *fp; 380 int fd, flags; 381 const void *buf; 382 size_t nbyte; 383 off_t offset; 384 { 385 struct uio auio; 386 struct iovec aiov; 387 long cnt, error = 0; 388 #ifdef KTRACE 389 struct iovec ktriov; 390 struct uio ktruio; 391 int didktr = 0; 392 #endif 393 394 aiov.iov_base = (void *)(uintptr_t)buf; 395 aiov.iov_len = nbyte; 396 auio.uio_iov = &aiov; 397 auio.uio_iovcnt = 1; 398 auio.uio_offset = offset; 399 if (nbyte > INT_MAX) 400 return (EINVAL); 401 auio.uio_resid = nbyte; 402 auio.uio_rw = UIO_WRITE; 403 auio.uio_segflg = UIO_USERSPACE; 404 auio.uio_td = td; 405 #ifdef KTRACE 406 /* 407 * if tracing, save a copy of iovec and uio 408 */ 409 if (KTRPOINT(td, KTR_GENIO)) { 410 ktriov = aiov; 411 ktruio = auio; 412 didktr = 1; 413 } 414 #endif 415 cnt = nbyte; 416 if (fp->f_type == DTYPE_VNODE) 417 bwillwrite(); 418 if ((error = fo_write(fp, &auio, td->td_ucred, flags, td))) { 419 if (auio.uio_resid != cnt && (error == ERESTART || 420 error == EINTR || error == EWOULDBLOCK)) 421 error = 0; 422 /* Socket layer is responsible for issuing SIGPIPE. */ 423 if (error == EPIPE && fp->f_type != DTYPE_SOCKET) { 424 PROC_LOCK(td->td_proc); 425 psignal(td->td_proc, SIGPIPE); 426 PROC_UNLOCK(td->td_proc); 427 } 428 } 429 cnt -= auio.uio_resid; 430 #ifdef KTRACE 431 if (didktr && error == 0) { 432 ktruio.uio_iov = &ktriov; 433 ktruio.uio_resid = cnt; 434 ktrgenio(fd, UIO_WRITE, &ktruio, error); 435 } 436 #endif 437 td->td_retval[0] = cnt; 438 return (error); 439 } 440 441 /* 442 * Gather write system call 443 */ 444 #ifndef _SYS_SYSPROTO_H_ 445 struct writev_args { 446 int fd; 447 struct iovec *iovp; 448 u_int iovcnt; 449 }; 450 #endif 451 /* 452 * MPSAFE 453 */ 454 int 455 writev(td, uap) 456 struct thread *td; 457 register struct writev_args *uap; 458 { 459 struct file *fp; 460 struct uio auio; 461 register struct iovec *iov; 462 struct iovec *needfree; 463 struct iovec aiov[UIO_SMALLIOV]; 464 long i, cnt, error = 0; 465 u_int iovlen; 466 #ifdef KTRACE 467 struct iovec *ktriov = NULL; 468 struct uio ktruio; 469 #endif 470 471 mtx_lock(&Giant); 472 if ((error = fget_write(td, uap->fd, &fp)) != 0) { 473 error = EBADF; 474 goto done2; 475 } 476 /* note: can't use iovlen until iovcnt is validated */ 477 iovlen = uap->iovcnt * sizeof (struct iovec); 478 if (uap->iovcnt > UIO_SMALLIOV) { 479 if (uap->iovcnt > UIO_MAXIOV) { 480 needfree = NULL; 481 error = EINVAL; 482 goto done; 483 } 484 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK); 485 needfree = iov; 486 } else { 487 iov = aiov; 488 needfree = NULL; 489 } 490 auio.uio_iov = iov; 491 auio.uio_iovcnt = uap->iovcnt; 492 auio.uio_rw = UIO_WRITE; 493 auio.uio_segflg = UIO_USERSPACE; 494 auio.uio_td = td; 495 auio.uio_offset = -1; 496 if ((error = copyin(uap->iovp, iov, iovlen))) 497 goto done; 498 auio.uio_resid = 0; 499 for (i = 0; i < uap->iovcnt; i++) { 500 if (iov->iov_len > INT_MAX - auio.uio_resid) { 501 error = EINVAL; 502 goto done; 503 } 504 auio.uio_resid += iov->iov_len; 505 iov++; 506 } 507 #ifdef KTRACE 508 /* 509 * if tracing, save a copy of iovec and uio 510 */ 511 if (KTRPOINT(td, KTR_GENIO)) { 512 MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); 513 bcopy(auio.uio_iov, ktriov, iovlen); 514 ktruio = auio; 515 } 516 #endif 517 cnt = auio.uio_resid; 518 if (fp->f_type == DTYPE_VNODE) 519 bwillwrite(); 520 if ((error = fo_write(fp, &auio, td->td_ucred, 0, td))) { 521 if (auio.uio_resid != cnt && (error == ERESTART || 522 error == EINTR || error == EWOULDBLOCK)) 523 error = 0; 524 if (error == EPIPE) { 525 PROC_LOCK(td->td_proc); 526 psignal(td->td_proc, SIGPIPE); 527 PROC_UNLOCK(td->td_proc); 528 } 529 } 530 cnt -= auio.uio_resid; 531 #ifdef KTRACE 532 if (ktriov != NULL) { 533 if (error == 0) { 534 ktruio.uio_iov = ktriov; 535 ktruio.uio_resid = cnt; 536 ktrgenio(uap->fd, UIO_WRITE, &ktruio, error); 537 } 538 FREE(ktriov, M_TEMP); 539 } 540 #endif 541 td->td_retval[0] = cnt; 542 done: 543 fdrop(fp, td); 544 if (needfree) 545 FREE(needfree, M_IOV); 546 done2: 547 mtx_unlock(&Giant); 548 return (error); 549 } 550 551 /* 552 * Ioctl system call 553 */ 554 #ifndef _SYS_SYSPROTO_H_ 555 struct ioctl_args { 556 int fd; 557 u_long com; 558 caddr_t data; 559 }; 560 #endif 561 /* 562 * MPSAFE 563 */ 564 /* ARGSUSED */ 565 int 566 ioctl(td, uap) 567 struct thread *td; 568 register struct ioctl_args *uap; 569 { 570 struct file *fp; 571 register struct filedesc *fdp; 572 register u_long com; 573 int error = 0; 574 register u_int size; 575 caddr_t data, memp; 576 int tmp; 577 #define STK_PARAMS 128 578 union { 579 char stkbuf[STK_PARAMS]; 580 long align; 581 } ubuf; 582 583 if ((error = fget(td, uap->fd, &fp)) != 0) 584 return (error); 585 mtx_lock(&Giant); 586 if ((fp->f_flag & (FREAD | FWRITE)) == 0) { 587 fdrop(fp, td); 588 mtx_unlock(&Giant); 589 return (EBADF); 590 } 591 fdp = td->td_proc->p_fd; 592 switch (com = uap->com) { 593 case FIONCLEX: 594 FILEDESC_LOCK(fdp); 595 fdp->fd_ofileflags[uap->fd] &= ~UF_EXCLOSE; 596 FILEDESC_UNLOCK(fdp); 597 fdrop(fp, td); 598 mtx_unlock(&Giant); 599 return (0); 600 case FIOCLEX: 601 FILEDESC_LOCK(fdp); 602 fdp->fd_ofileflags[uap->fd] |= UF_EXCLOSE; 603 FILEDESC_UNLOCK(fdp); 604 fdrop(fp, td); 605 mtx_unlock(&Giant); 606 return (0); 607 } 608 609 /* 610 * Interpret high order word to find amount of data to be 611 * copied to/from the user's address space. 612 */ 613 size = IOCPARM_LEN(com); 614 if (size > IOCPARM_MAX) { 615 fdrop(fp, td); 616 mtx_unlock(&Giant); 617 return (ENOTTY); 618 } 619 620 memp = NULL; 621 if (size > sizeof (ubuf.stkbuf)) { 622 memp = malloc((u_long)size, M_IOCTLOPS, M_WAITOK); 623 data = memp; 624 } else { 625 data = ubuf.stkbuf; 626 } 627 if (com&IOC_IN) { 628 if (size) { 629 error = copyin(uap->data, data, (u_int)size); 630 if (error) { 631 if (memp) 632 free(memp, M_IOCTLOPS); 633 fdrop(fp, td); 634 goto done; 635 } 636 } else { 637 *(caddr_t *)data = uap->data; 638 } 639 } else if ((com&IOC_OUT) && size) { 640 /* 641 * Zero the buffer so the user always 642 * gets back something deterministic. 643 */ 644 bzero(data, size); 645 } else if (com&IOC_VOID) { 646 *(caddr_t *)data = uap->data; 647 } 648 649 switch (com) { 650 651 case FIONBIO: 652 FILE_LOCK(fp); 653 if ((tmp = *(int *)data)) 654 fp->f_flag |= FNONBLOCK; 655 else 656 fp->f_flag &= ~FNONBLOCK; 657 FILE_UNLOCK(fp); 658 error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 659 break; 660 661 case FIOASYNC: 662 FILE_LOCK(fp); 663 if ((tmp = *(int *)data)) 664 fp->f_flag |= FASYNC; 665 else 666 fp->f_flag &= ~FASYNC; 667 FILE_UNLOCK(fp); 668 error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td); 669 break; 670 671 default: 672 error = fo_ioctl(fp, com, data, td->td_ucred, td); 673 /* 674 * Copy any data to user, size was 675 * already set and checked above. 676 */ 677 if (error == 0 && (com&IOC_OUT) && size) 678 error = copyout(data, uap->data, (u_int)size); 679 break; 680 } 681 if (memp) 682 free(memp, M_IOCTLOPS); 683 fdrop(fp, td); 684 done: 685 mtx_unlock(&Giant); 686 return (error); 687 } 688 689 /* 690 * sellock and selwait are initialized in selectinit() via SYSINIT. 691 */ 692 struct mtx sellock; 693 struct cv selwait; 694 u_int nselcoll; /* Select collisions since boot */ 695 SYSCTL_UINT(_kern, OID_AUTO, nselcoll, CTLFLAG_RD, &nselcoll, 0, ""); 696 697 /* 698 * Select system call. 699 */ 700 #ifndef _SYS_SYSPROTO_H_ 701 struct select_args { 702 int nd; 703 fd_set *in, *ou, *ex; 704 struct timeval *tv; 705 }; 706 #endif 707 /* 708 * MPSAFE 709 */ 710 int 711 select(td, uap) 712 register struct thread *td; 713 register struct select_args *uap; 714 { 715 struct timeval tv, *tvp; 716 int error; 717 718 if (uap->tv != NULL) { 719 error = copyin(uap->tv, &tv, sizeof(tv)); 720 if (error) 721 return (error); 722 tvp = &tv; 723 } else 724 tvp = NULL; 725 726 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp)); 727 } 728 729 int 730 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, 731 fd_set *fd_ex, struct timeval *tvp) 732 { 733 struct filedesc *fdp; 734 /* 735 * The magic 2048 here is chosen to be just enough for FD_SETSIZE 736 * infds with the new FD_SETSIZE of 1024, and more than enough for 737 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE 738 * of 256. 739 */ 740 fd_mask s_selbits[howmany(2048, NFDBITS)]; 741 fd_mask *ibits[3], *obits[3], *selbits, *sbp; 742 struct timeval atv, rtv, ttv; 743 int error, timo; 744 u_int ncoll, nbufbytes, ncpbytes, nfdbits; 745 746 if (nd < 0) 747 return (EINVAL); 748 fdp = td->td_proc->p_fd; 749 mtx_lock(&Giant); 750 FILEDESC_LOCK(fdp); 751 752 if (nd > td->td_proc->p_fd->fd_nfiles) 753 nd = td->td_proc->p_fd->fd_nfiles; /* forgiving; slightly wrong */ 754 FILEDESC_UNLOCK(fdp); 755 756 /* 757 * Allocate just enough bits for the non-null fd_sets. Use the 758 * preallocated auto buffer if possible. 759 */ 760 nfdbits = roundup(nd, NFDBITS); 761 ncpbytes = nfdbits / NBBY; 762 nbufbytes = 0; 763 if (fd_in != NULL) 764 nbufbytes += 2 * ncpbytes; 765 if (fd_ou != NULL) 766 nbufbytes += 2 * ncpbytes; 767 if (fd_ex != NULL) 768 nbufbytes += 2 * ncpbytes; 769 if (nbufbytes <= sizeof s_selbits) 770 selbits = &s_selbits[0]; 771 else 772 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK); 773 774 /* 775 * Assign pointers into the bit buffers and fetch the input bits. 776 * Put the output buffers together so that they can be bzeroed 777 * together. 778 */ 779 sbp = selbits; 780 #define getbits(name, x) \ 781 do { \ 782 if (name == NULL) \ 783 ibits[x] = NULL; \ 784 else { \ 785 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \ 786 obits[x] = sbp; \ 787 sbp += ncpbytes / sizeof *sbp; \ 788 error = copyin(name, ibits[x], ncpbytes); \ 789 if (error != 0) \ 790 goto done_nosellock; \ 791 } \ 792 } while (0) 793 getbits(fd_in, 0); 794 getbits(fd_ou, 1); 795 getbits(fd_ex, 2); 796 #undef getbits 797 if (nbufbytes != 0) 798 bzero(selbits, nbufbytes / 2); 799 800 if (tvp != NULL) { 801 atv = *tvp; 802 if (itimerfix(&atv)) { 803 error = EINVAL; 804 goto done_nosellock; 805 } 806 getmicrouptime(&rtv); 807 timevaladd(&atv, &rtv); 808 } else { 809 atv.tv_sec = 0; 810 atv.tv_usec = 0; 811 } 812 timo = 0; 813 TAILQ_INIT(&td->td_selq); 814 mtx_lock(&sellock); 815 retry: 816 ncoll = nselcoll; 817 mtx_lock_spin(&sched_lock); 818 td->td_flags |= TDF_SELECT; 819 mtx_unlock_spin(&sched_lock); 820 mtx_unlock(&sellock); 821 822 error = selscan(td, ibits, obits, nd); 823 mtx_lock(&sellock); 824 if (error || td->td_retval[0]) 825 goto done; 826 if (atv.tv_sec || atv.tv_usec) { 827 getmicrouptime(&rtv); 828 if (timevalcmp(&rtv, &atv, >=)) 829 goto done; 830 ttv = atv; 831 timevalsub(&ttv, &rtv); 832 timo = ttv.tv_sec > 24 * 60 * 60 ? 833 24 * 60 * 60 * hz : tvtohz(&ttv); 834 } 835 836 /* 837 * An event of interest may occur while we do not hold 838 * sellock, so check TDF_SELECT and the number of 839 * collisions and rescan the file descriptors if 840 * necessary. 841 */ 842 mtx_lock_spin(&sched_lock); 843 if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) { 844 mtx_unlock_spin(&sched_lock); 845 goto retry; 846 } 847 mtx_unlock_spin(&sched_lock); 848 849 if (timo > 0) 850 error = cv_timedwait_sig(&selwait, &sellock, timo); 851 else 852 error = cv_wait_sig(&selwait, &sellock); 853 854 if (error == 0) 855 goto retry; 856 857 done: 858 clear_selinfo_list(td); 859 mtx_lock_spin(&sched_lock); 860 td->td_flags &= ~TDF_SELECT; 861 mtx_unlock_spin(&sched_lock); 862 mtx_unlock(&sellock); 863 864 done_nosellock: 865 /* select is not restarted after signals... */ 866 if (error == ERESTART) 867 error = EINTR; 868 if (error == EWOULDBLOCK) 869 error = 0; 870 #define putbits(name, x) \ 871 if (name && (error2 = copyout(obits[x], name, ncpbytes))) \ 872 error = error2; 873 if (error == 0) { 874 int error2; 875 876 putbits(fd_in, 0); 877 putbits(fd_ou, 1); 878 putbits(fd_ex, 2); 879 #undef putbits 880 } 881 if (selbits != &s_selbits[0]) 882 free(selbits, M_SELECT); 883 884 mtx_unlock(&Giant); 885 return (error); 886 } 887 888 static int 889 selscan(td, ibits, obits, nfd) 890 struct thread *td; 891 fd_mask **ibits, **obits; 892 int nfd; 893 { 894 int msk, i, fd; 895 fd_mask bits; 896 struct file *fp; 897 int n = 0; 898 /* Note: backend also returns POLLHUP/POLLERR if appropriate. */ 899 static int flag[3] = { POLLRDNORM, POLLWRNORM, POLLRDBAND }; 900 struct filedesc *fdp = td->td_proc->p_fd; 901 902 FILEDESC_LOCK(fdp); 903 for (msk = 0; msk < 3; msk++) { 904 if (ibits[msk] == NULL) 905 continue; 906 for (i = 0; i < nfd; i += NFDBITS) { 907 bits = ibits[msk][i/NFDBITS]; 908 /* ffs(int mask) not portable, fd_mask is long */ 909 for (fd = i; bits && fd < nfd; fd++, bits >>= 1) { 910 if (!(bits & 1)) 911 continue; 912 if ((fp = fget_locked(fdp, fd)) == NULL) { 913 FILEDESC_UNLOCK(fdp); 914 return (EBADF); 915 } 916 if (fo_poll(fp, flag[msk], td->td_ucred, 917 td)) { 918 obits[msk][(fd)/NFDBITS] |= 919 ((fd_mask)1 << ((fd) % NFDBITS)); 920 n++; 921 } 922 } 923 } 924 } 925 FILEDESC_UNLOCK(fdp); 926 td->td_retval[0] = n; 927 return (0); 928 } 929 930 /* 931 * Poll system call. 932 */ 933 #ifndef _SYS_SYSPROTO_H_ 934 struct poll_args { 935 struct pollfd *fds; 936 u_int nfds; 937 int timeout; 938 }; 939 #endif 940 /* 941 * MPSAFE 942 */ 943 int 944 poll(td, uap) 945 struct thread *td; 946 struct poll_args *uap; 947 { 948 caddr_t bits; 949 char smallbits[32 * sizeof(struct pollfd)]; 950 struct timeval atv, rtv, ttv; 951 int error = 0, timo; 952 u_int ncoll, nfds; 953 size_t ni; 954 955 nfds = uap->nfds; 956 957 mtx_lock(&Giant); 958 /* 959 * This is kinda bogus. We have fd limits, but that is not 960 * really related to the size of the pollfd array. Make sure 961 * we let the process use at least FD_SETSIZE entries and at 962 * least enough for the current limits. We want to be reasonably 963 * safe, but not overly restrictive. 964 */ 965 if ((nfds > td->td_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur) && 966 (nfds > FD_SETSIZE)) { 967 error = EINVAL; 968 goto done2; 969 } 970 ni = nfds * sizeof(struct pollfd); 971 if (ni > sizeof(smallbits)) 972 bits = malloc(ni, M_TEMP, M_WAITOK); 973 else 974 bits = smallbits; 975 error = copyin(uap->fds, bits, ni); 976 if (error) 977 goto done_nosellock; 978 if (uap->timeout != INFTIM) { 979 atv.tv_sec = uap->timeout / 1000; 980 atv.tv_usec = (uap->timeout % 1000) * 1000; 981 if (itimerfix(&atv)) { 982 error = EINVAL; 983 goto done_nosellock; 984 } 985 getmicrouptime(&rtv); 986 timevaladd(&atv, &rtv); 987 } else { 988 atv.tv_sec = 0; 989 atv.tv_usec = 0; 990 } 991 timo = 0; 992 TAILQ_INIT(&td->td_selq); 993 mtx_lock(&sellock); 994 retry: 995 ncoll = nselcoll; 996 mtx_lock_spin(&sched_lock); 997 td->td_flags |= TDF_SELECT; 998 mtx_unlock_spin(&sched_lock); 999 mtx_unlock(&sellock); 1000 1001 error = pollscan(td, (struct pollfd *)bits, nfds); 1002 mtx_lock(&sellock); 1003 if (error || td->td_retval[0]) 1004 goto done; 1005 if (atv.tv_sec || atv.tv_usec) { 1006 getmicrouptime(&rtv); 1007 if (timevalcmp(&rtv, &atv, >=)) 1008 goto done; 1009 ttv = atv; 1010 timevalsub(&ttv, &rtv); 1011 timo = ttv.tv_sec > 24 * 60 * 60 ? 1012 24 * 60 * 60 * hz : tvtohz(&ttv); 1013 } 1014 /* 1015 * An event of interest may occur while we do not hold 1016 * sellock, so check TDF_SELECT and the number of collisions 1017 * and rescan the file descriptors if necessary. 1018 */ 1019 mtx_lock_spin(&sched_lock); 1020 if ((td->td_flags & TDF_SELECT) == 0 || nselcoll != ncoll) { 1021 mtx_unlock_spin(&sched_lock); 1022 goto retry; 1023 } 1024 mtx_unlock_spin(&sched_lock); 1025 1026 if (timo > 0) 1027 error = cv_timedwait_sig(&selwait, &sellock, timo); 1028 else 1029 error = cv_wait_sig(&selwait, &sellock); 1030 1031 if (error == 0) 1032 goto retry; 1033 1034 done: 1035 clear_selinfo_list(td); 1036 mtx_lock_spin(&sched_lock); 1037 td->td_flags &= ~TDF_SELECT; 1038 mtx_unlock_spin(&sched_lock); 1039 mtx_unlock(&sellock); 1040 1041 done_nosellock: 1042 /* poll is not restarted after signals... */ 1043 if (error == ERESTART) 1044 error = EINTR; 1045 if (error == EWOULDBLOCK) 1046 error = 0; 1047 if (error == 0) { 1048 error = copyout(bits, uap->fds, ni); 1049 if (error) 1050 goto out; 1051 } 1052 out: 1053 if (ni > sizeof(smallbits)) 1054 free(bits, M_TEMP); 1055 done2: 1056 mtx_unlock(&Giant); 1057 return (error); 1058 } 1059 1060 static int 1061 pollscan(td, fds, nfd) 1062 struct thread *td; 1063 struct pollfd *fds; 1064 u_int nfd; 1065 { 1066 register struct filedesc *fdp = td->td_proc->p_fd; 1067 int i; 1068 struct file *fp; 1069 int n = 0; 1070 1071 FILEDESC_LOCK(fdp); 1072 for (i = 0; i < nfd; i++, fds++) { 1073 if (fds->fd >= fdp->fd_nfiles) { 1074 fds->revents = POLLNVAL; 1075 n++; 1076 } else if (fds->fd < 0) { 1077 fds->revents = 0; 1078 } else { 1079 fp = fdp->fd_ofiles[fds->fd]; 1080 if (fp == NULL) { 1081 fds->revents = POLLNVAL; 1082 n++; 1083 } else { 1084 /* 1085 * Note: backend also returns POLLHUP and 1086 * POLLERR if appropriate. 1087 */ 1088 fds->revents = fo_poll(fp, fds->events, 1089 td->td_ucred, td); 1090 if (fds->revents != 0) 1091 n++; 1092 } 1093 } 1094 } 1095 FILEDESC_UNLOCK(fdp); 1096 td->td_retval[0] = n; 1097 return (0); 1098 } 1099 1100 /* 1101 * OpenBSD poll system call. 1102 * XXX this isn't quite a true representation.. OpenBSD uses select ops. 1103 */ 1104 #ifndef _SYS_SYSPROTO_H_ 1105 struct openbsd_poll_args { 1106 struct pollfd *fds; 1107 u_int nfds; 1108 int timeout; 1109 }; 1110 #endif 1111 /* 1112 * MPSAFE 1113 */ 1114 int 1115 openbsd_poll(td, uap) 1116 register struct thread *td; 1117 register struct openbsd_poll_args *uap; 1118 { 1119 return (poll(td, (struct poll_args *)uap)); 1120 } 1121 1122 /* 1123 * Remove the references to the thread from all of the objects 1124 * we were polling. 1125 * 1126 * This code assumes that the underlying owner of the selinfo 1127 * structure will hold sellock before it changes it, and that 1128 * it will unlink itself from our list if it goes away. 1129 */ 1130 void 1131 clear_selinfo_list(td) 1132 struct thread *td; 1133 { 1134 struct selinfo *si; 1135 1136 mtx_assert(&sellock, MA_OWNED); 1137 TAILQ_FOREACH(si, &td->td_selq, si_thrlist) 1138 si->si_thread = NULL; 1139 TAILQ_INIT(&td->td_selq); 1140 } 1141 1142 /*ARGSUSED*/ 1143 int 1144 seltrue(dev, events, td) 1145 dev_t dev; 1146 int events; 1147 struct thread *td; 1148 { 1149 1150 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 1151 } 1152 1153 /* 1154 * Record a select request. 1155 */ 1156 void 1157 selrecord(selector, sip) 1158 struct thread *selector; 1159 struct selinfo *sip; 1160 { 1161 1162 mtx_lock(&sellock); 1163 /* 1164 * If the selinfo's thread pointer is NULL then take ownership of it. 1165 * 1166 * If the thread pointer is not NULL and it points to another 1167 * thread, then we have a collision. 1168 * 1169 * If the thread pointer is not NULL and points back to us then leave 1170 * it alone as we've already added pointed it at us and added it to 1171 * our list. 1172 */ 1173 if (sip->si_thread == NULL) { 1174 sip->si_thread = selector; 1175 TAILQ_INSERT_TAIL(&selector->td_selq, sip, si_thrlist); 1176 } else if (sip->si_thread != selector) { 1177 sip->si_flags |= SI_COLL; 1178 } 1179 1180 mtx_unlock(&sellock); 1181 } 1182 1183 /* 1184 * Do a wakeup when a selectable event occurs. 1185 */ 1186 void 1187 selwakeup(sip) 1188 struct selinfo *sip; 1189 { 1190 struct thread *td; 1191 1192 mtx_lock(&sellock); 1193 td = sip->si_thread; 1194 if ((sip->si_flags & SI_COLL) != 0) { 1195 nselcoll++; 1196 sip->si_flags &= ~SI_COLL; 1197 cv_broadcast(&selwait); 1198 } 1199 if (td == NULL) { 1200 mtx_unlock(&sellock); 1201 return; 1202 } 1203 TAILQ_REMOVE(&td->td_selq, sip, si_thrlist); 1204 sip->si_thread = NULL; 1205 mtx_lock_spin(&sched_lock); 1206 if (td->td_wchan == &selwait) { 1207 cv_waitq_remove(td); 1208 TD_CLR_SLEEPING(td); 1209 setrunnable(td); 1210 } else 1211 td->td_flags &= ~TDF_SELECT; 1212 mtx_unlock_spin(&sched_lock); 1213 mtx_unlock(&sellock); 1214 } 1215 1216 static void selectinit(void *); 1217 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, selectinit, NULL) 1218 1219 /* ARGSUSED*/ 1220 static void 1221 selectinit(dummy) 1222 void *dummy; 1223 { 1224 cv_init(&selwait, "select"); 1225 mtx_init(&sellock, "sellck", NULL, MTX_DEF); 1226 } 1227