1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_capsicum.h" 41 #include "opt_compat.h" 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sysproto.h> 47 #include <sys/capsicum.h> 48 #include <sys/filedesc.h> 49 #include <sys/filio.h> 50 #include <sys/fcntl.h> 51 #include <sys/file.h> 52 #include <sys/lock.h> 53 #include <sys/proc.h> 54 #include <sys/signalvar.h> 55 #include <sys/socketvar.h> 56 #include <sys/uio.h> 57 #include <sys/kernel.h> 58 #include <sys/ktr.h> 59 #include <sys/limits.h> 60 #include <sys/malloc.h> 61 #include <sys/poll.h> 62 #include <sys/resourcevar.h> 63 #include <sys/selinfo.h> 64 #include <sys/sleepqueue.h> 65 #include <sys/syscallsubr.h> 66 #include <sys/sysctl.h> 67 #include <sys/sysent.h> 68 #include <sys/vnode.h> 69 #include <sys/bio.h> 70 #include <sys/buf.h> 71 #include <sys/condvar.h> 72 #ifdef KTRACE 73 #include <sys/ktrace.h> 74 #endif 75 76 #include <security/audit/audit.h> 77 78 /* 79 * The following macro defines how many bytes will be allocated from 80 * the stack instead of memory allocated when passing the IOCTL data 81 * structures from userspace and to the kernel. Some IOCTLs having 82 * small data structures are used very frequently and this small 83 * buffer on the stack gives a significant speedup improvement for 84 * those requests. The value of this define should be greater or equal 85 * to 64 bytes and should also be power of two. The data structure is 86 * currently hard-aligned to a 8-byte boundary on the stack. This 87 * should currently be sufficient for all supported platforms. 88 */ 89 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */ 90 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */ 91 92 int iosize_max_clamp = 0; 93 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW, 94 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX"); 95 int devfs_iosize_max_clamp = 1; 96 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW, 97 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices"); 98 99 /* 100 * Assert that the return value of read(2) and write(2) syscalls fits 101 * into a register. If not, an architecture will need to provide the 102 * usermode wrappers to reconstruct the result. 103 */ 104 CTASSERT(sizeof(register_t) >= sizeof(size_t)); 105 106 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 107 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 108 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 109 110 static int pollout(struct thread *, struct pollfd *, struct pollfd *, 111 u_int); 112 static int pollscan(struct thread *, struct pollfd *, u_int); 113 static int pollrescan(struct thread *); 114 static int selscan(struct thread *, fd_mask **, fd_mask **, int); 115 static int selrescan(struct thread *, fd_mask **, fd_mask **); 116 static void selfdalloc(struct thread *, void *); 117 static void selfdfree(struct seltd *, struct selfd *); 118 static int dofileread(struct thread *, int, struct file *, struct uio *, 119 off_t, int); 120 static int dofilewrite(struct thread *, int, struct file *, struct uio *, 121 off_t, int); 122 static void doselwakeup(struct selinfo *, int); 123 static void seltdinit(struct thread *); 124 static int seltdwait(struct thread *, sbintime_t, sbintime_t); 125 static void seltdclear(struct thread *); 126 127 /* 128 * One seltd per-thread allocated on demand as needed. 129 * 130 * t - protected by st_mtx 131 * k - Only accessed by curthread or read-only 132 */ 133 struct seltd { 134 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */ 135 struct selfd *st_free1; /* (k) free fd for read set. */ 136 struct selfd *st_free2; /* (k) free fd for write set. */ 137 struct mtx st_mtx; /* Protects struct seltd */ 138 struct cv st_wait; /* (t) Wait channel. */ 139 int st_flags; /* (t) SELTD_ flags. */ 140 }; 141 142 #define SELTD_PENDING 0x0001 /* We have pending events. */ 143 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */ 144 145 /* 146 * One selfd allocated per-thread per-file-descriptor. 147 * f - protected by sf_mtx 148 */ 149 struct selfd { 150 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */ 151 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */ 152 struct selinfo *sf_si; /* (f) selinfo when linked. */ 153 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */ 154 struct seltd *sf_td; /* (k) owning seltd. */ 155 void *sf_cookie; /* (k) fd or pollfd. */ 156 }; 157 158 static uma_zone_t selfd_zone; 159 static struct mtx_pool *mtxpool_select; 160 161 #ifndef _SYS_SYSPROTO_H_ 162 struct read_args { 163 int fd; 164 void *buf; 165 size_t nbyte; 166 }; 167 #endif 168 int 169 sys_read(td, uap) 170 struct thread *td; 171 struct read_args *uap; 172 { 173 struct uio auio; 174 struct iovec aiov; 175 int error; 176 177 if (uap->nbyte > IOSIZE_MAX) 178 return (EINVAL); 179 aiov.iov_base = uap->buf; 180 aiov.iov_len = uap->nbyte; 181 auio.uio_iov = &aiov; 182 auio.uio_iovcnt = 1; 183 auio.uio_resid = uap->nbyte; 184 auio.uio_segflg = UIO_USERSPACE; 185 error = kern_readv(td, uap->fd, &auio); 186 return(error); 187 } 188 189 /* 190 * Positioned read system call 191 */ 192 #ifndef _SYS_SYSPROTO_H_ 193 struct pread_args { 194 int fd; 195 void *buf; 196 size_t nbyte; 197 int pad; 198 off_t offset; 199 }; 200 #endif 201 int 202 sys_pread(td, uap) 203 struct thread *td; 204 struct pread_args *uap; 205 { 206 struct uio auio; 207 struct iovec aiov; 208 int error; 209 210 if (uap->nbyte > IOSIZE_MAX) 211 return (EINVAL); 212 aiov.iov_base = uap->buf; 213 aiov.iov_len = uap->nbyte; 214 auio.uio_iov = &aiov; 215 auio.uio_iovcnt = 1; 216 auio.uio_resid = uap->nbyte; 217 auio.uio_segflg = UIO_USERSPACE; 218 error = kern_preadv(td, uap->fd, &auio, uap->offset); 219 return(error); 220 } 221 222 int 223 freebsd6_pread(td, uap) 224 struct thread *td; 225 struct freebsd6_pread_args *uap; 226 { 227 struct pread_args oargs; 228 229 oargs.fd = uap->fd; 230 oargs.buf = uap->buf; 231 oargs.nbyte = uap->nbyte; 232 oargs.offset = uap->offset; 233 return (sys_pread(td, &oargs)); 234 } 235 236 /* 237 * Scatter read system call. 238 */ 239 #ifndef _SYS_SYSPROTO_H_ 240 struct readv_args { 241 int fd; 242 struct iovec *iovp; 243 u_int iovcnt; 244 }; 245 #endif 246 int 247 sys_readv(struct thread *td, struct readv_args *uap) 248 { 249 struct uio *auio; 250 int error; 251 252 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 253 if (error) 254 return (error); 255 error = kern_readv(td, uap->fd, auio); 256 free(auio, M_IOV); 257 return (error); 258 } 259 260 int 261 kern_readv(struct thread *td, int fd, struct uio *auio) 262 { 263 struct file *fp; 264 cap_rights_t rights; 265 int error; 266 267 error = fget_read(td, fd, cap_rights_init(&rights, CAP_READ), &fp); 268 if (error) 269 return (error); 270 error = dofileread(td, fd, fp, auio, (off_t)-1, 0); 271 fdrop(fp, td); 272 return (error); 273 } 274 275 /* 276 * Scatter positioned read system call. 277 */ 278 #ifndef _SYS_SYSPROTO_H_ 279 struct preadv_args { 280 int fd; 281 struct iovec *iovp; 282 u_int iovcnt; 283 off_t offset; 284 }; 285 #endif 286 int 287 sys_preadv(struct thread *td, struct preadv_args *uap) 288 { 289 struct uio *auio; 290 int error; 291 292 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 293 if (error) 294 return (error); 295 error = kern_preadv(td, uap->fd, auio, uap->offset); 296 free(auio, M_IOV); 297 return (error); 298 } 299 300 int 301 kern_preadv(td, fd, auio, offset) 302 struct thread *td; 303 int fd; 304 struct uio *auio; 305 off_t offset; 306 { 307 struct file *fp; 308 cap_rights_t rights; 309 int error; 310 311 error = fget_read(td, fd, cap_rights_init(&rights, CAP_PREAD), &fp); 312 if (error) 313 return (error); 314 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 315 error = ESPIPE; 316 else if (offset < 0 && fp->f_vnode->v_type != VCHR) 317 error = EINVAL; 318 else 319 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET); 320 fdrop(fp, td); 321 return (error); 322 } 323 324 /* 325 * Common code for readv and preadv that reads data in 326 * from a file using the passed in uio, offset, and flags. 327 */ 328 static int 329 dofileread(td, fd, fp, auio, offset, flags) 330 struct thread *td; 331 int fd; 332 struct file *fp; 333 struct uio *auio; 334 off_t offset; 335 int flags; 336 { 337 ssize_t cnt; 338 int error; 339 #ifdef KTRACE 340 struct uio *ktruio = NULL; 341 #endif 342 343 /* Finish zero length reads right here */ 344 if (auio->uio_resid == 0) { 345 td->td_retval[0] = 0; 346 return(0); 347 } 348 auio->uio_rw = UIO_READ; 349 auio->uio_offset = offset; 350 auio->uio_td = td; 351 #ifdef KTRACE 352 if (KTRPOINT(td, KTR_GENIO)) 353 ktruio = cloneuio(auio); 354 #endif 355 cnt = auio->uio_resid; 356 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) { 357 if (auio->uio_resid != cnt && (error == ERESTART || 358 error == EINTR || error == EWOULDBLOCK)) 359 error = 0; 360 } 361 cnt -= auio->uio_resid; 362 #ifdef KTRACE 363 if (ktruio != NULL) { 364 ktruio->uio_resid = cnt; 365 ktrgenio(fd, UIO_READ, ktruio, error); 366 } 367 #endif 368 td->td_retval[0] = cnt; 369 return (error); 370 } 371 372 #ifndef _SYS_SYSPROTO_H_ 373 struct write_args { 374 int fd; 375 const void *buf; 376 size_t nbyte; 377 }; 378 #endif 379 int 380 sys_write(td, uap) 381 struct thread *td; 382 struct write_args *uap; 383 { 384 struct uio auio; 385 struct iovec aiov; 386 int error; 387 388 if (uap->nbyte > IOSIZE_MAX) 389 return (EINVAL); 390 aiov.iov_base = (void *)(uintptr_t)uap->buf; 391 aiov.iov_len = uap->nbyte; 392 auio.uio_iov = &aiov; 393 auio.uio_iovcnt = 1; 394 auio.uio_resid = uap->nbyte; 395 auio.uio_segflg = UIO_USERSPACE; 396 error = kern_writev(td, uap->fd, &auio); 397 return(error); 398 } 399 400 /* 401 * Positioned write system call. 402 */ 403 #ifndef _SYS_SYSPROTO_H_ 404 struct pwrite_args { 405 int fd; 406 const void *buf; 407 size_t nbyte; 408 int pad; 409 off_t offset; 410 }; 411 #endif 412 int 413 sys_pwrite(td, uap) 414 struct thread *td; 415 struct pwrite_args *uap; 416 { 417 struct uio auio; 418 struct iovec aiov; 419 int error; 420 421 if (uap->nbyte > IOSIZE_MAX) 422 return (EINVAL); 423 aiov.iov_base = (void *)(uintptr_t)uap->buf; 424 aiov.iov_len = uap->nbyte; 425 auio.uio_iov = &aiov; 426 auio.uio_iovcnt = 1; 427 auio.uio_resid = uap->nbyte; 428 auio.uio_segflg = UIO_USERSPACE; 429 error = kern_pwritev(td, uap->fd, &auio, uap->offset); 430 return(error); 431 } 432 433 int 434 freebsd6_pwrite(td, uap) 435 struct thread *td; 436 struct freebsd6_pwrite_args *uap; 437 { 438 struct pwrite_args oargs; 439 440 oargs.fd = uap->fd; 441 oargs.buf = uap->buf; 442 oargs.nbyte = uap->nbyte; 443 oargs.offset = uap->offset; 444 return (sys_pwrite(td, &oargs)); 445 } 446 447 /* 448 * Gather write system call. 449 */ 450 #ifndef _SYS_SYSPROTO_H_ 451 struct writev_args { 452 int fd; 453 struct iovec *iovp; 454 u_int iovcnt; 455 }; 456 #endif 457 int 458 sys_writev(struct thread *td, struct writev_args *uap) 459 { 460 struct uio *auio; 461 int error; 462 463 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 464 if (error) 465 return (error); 466 error = kern_writev(td, uap->fd, auio); 467 free(auio, M_IOV); 468 return (error); 469 } 470 471 int 472 kern_writev(struct thread *td, int fd, struct uio *auio) 473 { 474 struct file *fp; 475 cap_rights_t rights; 476 int error; 477 478 error = fget_write(td, fd, cap_rights_init(&rights, CAP_WRITE), &fp); 479 if (error) 480 return (error); 481 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0); 482 fdrop(fp, td); 483 return (error); 484 } 485 486 /* 487 * Gather positioned write system call. 488 */ 489 #ifndef _SYS_SYSPROTO_H_ 490 struct pwritev_args { 491 int fd; 492 struct iovec *iovp; 493 u_int iovcnt; 494 off_t offset; 495 }; 496 #endif 497 int 498 sys_pwritev(struct thread *td, struct pwritev_args *uap) 499 { 500 struct uio *auio; 501 int error; 502 503 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 504 if (error) 505 return (error); 506 error = kern_pwritev(td, uap->fd, auio, uap->offset); 507 free(auio, M_IOV); 508 return (error); 509 } 510 511 int 512 kern_pwritev(td, fd, auio, offset) 513 struct thread *td; 514 struct uio *auio; 515 int fd; 516 off_t offset; 517 { 518 struct file *fp; 519 cap_rights_t rights; 520 int error; 521 522 error = fget_write(td, fd, cap_rights_init(&rights, CAP_PWRITE), &fp); 523 if (error) 524 return (error); 525 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 526 error = ESPIPE; 527 else if (offset < 0 && fp->f_vnode->v_type != VCHR) 528 error = EINVAL; 529 else 530 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET); 531 fdrop(fp, td); 532 return (error); 533 } 534 535 /* 536 * Common code for writev and pwritev that writes data to 537 * a file using the passed in uio, offset, and flags. 538 */ 539 static int 540 dofilewrite(td, fd, fp, auio, offset, flags) 541 struct thread *td; 542 int fd; 543 struct file *fp; 544 struct uio *auio; 545 off_t offset; 546 int flags; 547 { 548 ssize_t cnt; 549 int error; 550 #ifdef KTRACE 551 struct uio *ktruio = NULL; 552 #endif 553 554 auio->uio_rw = UIO_WRITE; 555 auio->uio_td = td; 556 auio->uio_offset = offset; 557 #ifdef KTRACE 558 if (KTRPOINT(td, KTR_GENIO)) 559 ktruio = cloneuio(auio); 560 #endif 561 cnt = auio->uio_resid; 562 if (fp->f_type == DTYPE_VNODE && 563 (fp->f_vnread_flags & FDEVFS_VNODE) == 0) 564 bwillwrite(); 565 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) { 566 if (auio->uio_resid != cnt && (error == ERESTART || 567 error == EINTR || error == EWOULDBLOCK)) 568 error = 0; 569 /* Socket layer is responsible for issuing SIGPIPE. */ 570 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) { 571 PROC_LOCK(td->td_proc); 572 tdsignal(td, SIGPIPE); 573 PROC_UNLOCK(td->td_proc); 574 } 575 } 576 cnt -= auio->uio_resid; 577 #ifdef KTRACE 578 if (ktruio != NULL) { 579 ktruio->uio_resid = cnt; 580 ktrgenio(fd, UIO_WRITE, ktruio, error); 581 } 582 #endif 583 td->td_retval[0] = cnt; 584 return (error); 585 } 586 587 /* 588 * Truncate a file given a file descriptor. 589 * 590 * Can't use fget_write() here, since must return EINVAL and not EBADF if the 591 * descriptor isn't writable. 592 */ 593 int 594 kern_ftruncate(td, fd, length) 595 struct thread *td; 596 int fd; 597 off_t length; 598 { 599 struct file *fp; 600 cap_rights_t rights; 601 int error; 602 603 AUDIT_ARG_FD(fd); 604 if (length < 0) 605 return (EINVAL); 606 error = fget(td, fd, cap_rights_init(&rights, CAP_FTRUNCATE), &fp); 607 if (error) 608 return (error); 609 AUDIT_ARG_FILE(td->td_proc, fp); 610 if (!(fp->f_flag & FWRITE)) { 611 fdrop(fp, td); 612 return (EINVAL); 613 } 614 error = fo_truncate(fp, length, td->td_ucred, td); 615 fdrop(fp, td); 616 return (error); 617 } 618 619 #ifndef _SYS_SYSPROTO_H_ 620 struct ftruncate_args { 621 int fd; 622 int pad; 623 off_t length; 624 }; 625 #endif 626 int 627 sys_ftruncate(td, uap) 628 struct thread *td; 629 struct ftruncate_args *uap; 630 { 631 632 return (kern_ftruncate(td, uap->fd, uap->length)); 633 } 634 635 #if defined(COMPAT_43) 636 #ifndef _SYS_SYSPROTO_H_ 637 struct oftruncate_args { 638 int fd; 639 long length; 640 }; 641 #endif 642 int 643 oftruncate(td, uap) 644 struct thread *td; 645 struct oftruncate_args *uap; 646 { 647 648 return (kern_ftruncate(td, uap->fd, uap->length)); 649 } 650 #endif /* COMPAT_43 */ 651 652 #ifndef _SYS_SYSPROTO_H_ 653 struct ioctl_args { 654 int fd; 655 u_long com; 656 caddr_t data; 657 }; 658 #endif 659 /* ARGSUSED */ 660 int 661 sys_ioctl(struct thread *td, struct ioctl_args *uap) 662 { 663 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN); 664 u_long com; 665 int arg, error; 666 u_int size; 667 caddr_t data; 668 669 if (uap->com > 0xffffffff) { 670 printf( 671 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n", 672 td->td_proc->p_pid, td->td_name, uap->com); 673 uap->com &= 0xffffffff; 674 } 675 com = uap->com; 676 677 /* 678 * Interpret high order word to find amount of data to be 679 * copied to/from the user's address space. 680 */ 681 size = IOCPARM_LEN(com); 682 if ((size > IOCPARM_MAX) || 683 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) || 684 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43) 685 ((com & IOC_OUT) && size == 0) || 686 #else 687 ((com & (IOC_IN | IOC_OUT)) && size == 0) || 688 #endif 689 ((com & IOC_VOID) && size > 0 && size != sizeof(int))) 690 return (ENOTTY); 691 692 if (size > 0) { 693 if (com & IOC_VOID) { 694 /* Integer argument. */ 695 arg = (intptr_t)uap->data; 696 data = (void *)&arg; 697 size = 0; 698 } else { 699 if (size > SYS_IOCTL_SMALL_SIZE) 700 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK); 701 else 702 data = smalldata; 703 } 704 } else 705 data = (void *)&uap->data; 706 if (com & IOC_IN) { 707 error = copyin(uap->data, data, (u_int)size); 708 if (error != 0) 709 goto out; 710 } else if (com & IOC_OUT) { 711 /* 712 * Zero the buffer so the user always 713 * gets back something deterministic. 714 */ 715 bzero(data, size); 716 } 717 718 error = kern_ioctl(td, uap->fd, com, data); 719 720 if (error == 0 && (com & IOC_OUT)) 721 error = copyout(data, uap->data, (u_int)size); 722 723 out: 724 if (size > SYS_IOCTL_SMALL_SIZE) 725 free(data, M_IOCTLOPS); 726 return (error); 727 } 728 729 int 730 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data) 731 { 732 struct file *fp; 733 struct filedesc *fdp; 734 #ifndef CAPABILITIES 735 cap_rights_t rights; 736 #endif 737 int error, tmp, locked; 738 739 AUDIT_ARG_FD(fd); 740 AUDIT_ARG_CMD(com); 741 742 fdp = td->td_proc->p_fd; 743 744 switch (com) { 745 case FIONCLEX: 746 case FIOCLEX: 747 FILEDESC_XLOCK(fdp); 748 locked = LA_XLOCKED; 749 break; 750 default: 751 #ifdef CAPABILITIES 752 FILEDESC_SLOCK(fdp); 753 locked = LA_SLOCKED; 754 #else 755 locked = LA_UNLOCKED; 756 #endif 757 break; 758 } 759 760 #ifdef CAPABILITIES 761 if ((fp = fget_locked(fdp, fd)) == NULL) { 762 error = EBADF; 763 goto out; 764 } 765 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) { 766 fp = NULL; /* fhold() was not called yet */ 767 goto out; 768 } 769 fhold(fp); 770 if (locked == LA_SLOCKED) { 771 FILEDESC_SUNLOCK(fdp); 772 locked = LA_UNLOCKED; 773 } 774 #else 775 error = fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp); 776 if (error != 0) { 777 fp = NULL; 778 goto out; 779 } 780 #endif 781 if ((fp->f_flag & (FREAD | FWRITE)) == 0) { 782 error = EBADF; 783 goto out; 784 } 785 786 switch (com) { 787 case FIONCLEX: 788 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE; 789 goto out; 790 case FIOCLEX: 791 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE; 792 goto out; 793 case FIONBIO: 794 if ((tmp = *(int *)data)) 795 atomic_set_int(&fp->f_flag, FNONBLOCK); 796 else 797 atomic_clear_int(&fp->f_flag, FNONBLOCK); 798 data = (void *)&tmp; 799 break; 800 case FIOASYNC: 801 if ((tmp = *(int *)data)) 802 atomic_set_int(&fp->f_flag, FASYNC); 803 else 804 atomic_clear_int(&fp->f_flag, FASYNC); 805 data = (void *)&tmp; 806 break; 807 } 808 809 error = fo_ioctl(fp, com, data, td->td_ucred, td); 810 out: 811 switch (locked) { 812 case LA_XLOCKED: 813 FILEDESC_XUNLOCK(fdp); 814 break; 815 #ifdef CAPABILITIES 816 case LA_SLOCKED: 817 FILEDESC_SUNLOCK(fdp); 818 break; 819 #endif 820 default: 821 FILEDESC_UNLOCK_ASSERT(fdp); 822 break; 823 } 824 if (fp != NULL) 825 fdrop(fp, td); 826 return (error); 827 } 828 829 int 830 poll_no_poll(int events) 831 { 832 /* 833 * Return true for read/write. If the user asked for something 834 * special, return POLLNVAL, so that clients have a way of 835 * determining reliably whether or not the extended 836 * functionality is present without hard-coding knowledge 837 * of specific filesystem implementations. 838 */ 839 if (events & ~POLLSTANDARD) 840 return (POLLNVAL); 841 842 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 843 } 844 845 int 846 sys_pselect(struct thread *td, struct pselect_args *uap) 847 { 848 struct timespec ts; 849 struct timeval tv, *tvp; 850 sigset_t set, *uset; 851 int error; 852 853 if (uap->ts != NULL) { 854 error = copyin(uap->ts, &ts, sizeof(ts)); 855 if (error != 0) 856 return (error); 857 TIMESPEC_TO_TIMEVAL(&tv, &ts); 858 tvp = &tv; 859 } else 860 tvp = NULL; 861 if (uap->sm != NULL) { 862 error = copyin(uap->sm, &set, sizeof(set)); 863 if (error != 0) 864 return (error); 865 uset = &set; 866 } else 867 uset = NULL; 868 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, 869 uset, NFDBITS)); 870 } 871 872 int 873 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex, 874 struct timeval *tvp, sigset_t *uset, int abi_nfdbits) 875 { 876 int error; 877 878 if (uset != NULL) { 879 error = kern_sigprocmask(td, SIG_SETMASK, uset, 880 &td->td_oldsigmask, 0); 881 if (error != 0) 882 return (error); 883 td->td_pflags |= TDP_OLDMASK; 884 /* 885 * Make sure that ast() is called on return to 886 * usermode and TDP_OLDMASK is cleared, restoring old 887 * sigmask. 888 */ 889 thread_lock(td); 890 td->td_flags |= TDF_ASTPENDING; 891 thread_unlock(td); 892 } 893 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits); 894 return (error); 895 } 896 897 #ifndef _SYS_SYSPROTO_H_ 898 struct select_args { 899 int nd; 900 fd_set *in, *ou, *ex; 901 struct timeval *tv; 902 }; 903 #endif 904 int 905 sys_select(struct thread *td, struct select_args *uap) 906 { 907 struct timeval tv, *tvp; 908 int error; 909 910 if (uap->tv != NULL) { 911 error = copyin(uap->tv, &tv, sizeof(tv)); 912 if (error) 913 return (error); 914 tvp = &tv; 915 } else 916 tvp = NULL; 917 918 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, 919 NFDBITS)); 920 } 921 922 /* 923 * In the unlikely case when user specified n greater then the last 924 * open file descriptor, check that no bits are set after the last 925 * valid fd. We must return EBADF if any is set. 926 * 927 * There are applications that rely on the behaviour. 928 * 929 * nd is fd_lastfile + 1. 930 */ 931 static int 932 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits) 933 { 934 char *addr, *oaddr; 935 int b, i, res; 936 uint8_t bits; 937 938 if (nd >= ndu || fd_in == NULL) 939 return (0); 940 941 oaddr = NULL; 942 bits = 0; /* silence gcc */ 943 for (i = nd; i < ndu; i++) { 944 b = i / NBBY; 945 #if BYTE_ORDER == LITTLE_ENDIAN 946 addr = (char *)fd_in + b; 947 #else 948 addr = (char *)fd_in; 949 if (abi_nfdbits == NFDBITS) { 950 addr += rounddown(b, sizeof(fd_mask)) + 951 sizeof(fd_mask) - 1 - b % sizeof(fd_mask); 952 } else { 953 addr += rounddown(b, sizeof(uint32_t)) + 954 sizeof(uint32_t) - 1 - b % sizeof(uint32_t); 955 } 956 #endif 957 if (addr != oaddr) { 958 res = fubyte(addr); 959 if (res == -1) 960 return (EFAULT); 961 oaddr = addr; 962 bits = res; 963 } 964 if ((bits & (1 << (i % NBBY))) != 0) 965 return (EBADF); 966 } 967 return (0); 968 } 969 970 int 971 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, 972 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits) 973 { 974 struct filedesc *fdp; 975 /* 976 * The magic 2048 here is chosen to be just enough for FD_SETSIZE 977 * infds with the new FD_SETSIZE of 1024, and more than enough for 978 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE 979 * of 256. 980 */ 981 fd_mask s_selbits[howmany(2048, NFDBITS)]; 982 fd_mask *ibits[3], *obits[3], *selbits, *sbp; 983 struct timeval rtv; 984 sbintime_t asbt, precision, rsbt; 985 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits; 986 int error, lf, ndu; 987 988 if (nd < 0) 989 return (EINVAL); 990 fdp = td->td_proc->p_fd; 991 ndu = nd; 992 lf = fdp->fd_lastfile; 993 if (nd > lf + 1) 994 nd = lf + 1; 995 996 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits); 997 if (error != 0) 998 return (error); 999 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits); 1000 if (error != 0) 1001 return (error); 1002 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits); 1003 if (error != 0) 1004 return (error); 1005 1006 /* 1007 * Allocate just enough bits for the non-null fd_sets. Use the 1008 * preallocated auto buffer if possible. 1009 */ 1010 nfdbits = roundup(nd, NFDBITS); 1011 ncpbytes = nfdbits / NBBY; 1012 ncpubytes = roundup(nd, abi_nfdbits) / NBBY; 1013 nbufbytes = 0; 1014 if (fd_in != NULL) 1015 nbufbytes += 2 * ncpbytes; 1016 if (fd_ou != NULL) 1017 nbufbytes += 2 * ncpbytes; 1018 if (fd_ex != NULL) 1019 nbufbytes += 2 * ncpbytes; 1020 if (nbufbytes <= sizeof s_selbits) 1021 selbits = &s_selbits[0]; 1022 else 1023 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK); 1024 1025 /* 1026 * Assign pointers into the bit buffers and fetch the input bits. 1027 * Put the output buffers together so that they can be bzeroed 1028 * together. 1029 */ 1030 sbp = selbits; 1031 #define getbits(name, x) \ 1032 do { \ 1033 if (name == NULL) { \ 1034 ibits[x] = NULL; \ 1035 obits[x] = NULL; \ 1036 } else { \ 1037 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \ 1038 obits[x] = sbp; \ 1039 sbp += ncpbytes / sizeof *sbp; \ 1040 error = copyin(name, ibits[x], ncpubytes); \ 1041 if (error != 0) \ 1042 goto done; \ 1043 bzero((char *)ibits[x] + ncpubytes, \ 1044 ncpbytes - ncpubytes); \ 1045 } \ 1046 } while (0) 1047 getbits(fd_in, 0); 1048 getbits(fd_ou, 1); 1049 getbits(fd_ex, 2); 1050 #undef getbits 1051 1052 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__) 1053 /* 1054 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS, 1055 * we are running under 32-bit emulation. This should be more 1056 * generic. 1057 */ 1058 #define swizzle_fdset(bits) \ 1059 if (abi_nfdbits != NFDBITS && bits != NULL) { \ 1060 int i; \ 1061 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \ 1062 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \ 1063 } 1064 #else 1065 #define swizzle_fdset(bits) 1066 #endif 1067 1068 /* Make sure the bit order makes it through an ABI transition */ 1069 swizzle_fdset(ibits[0]); 1070 swizzle_fdset(ibits[1]); 1071 swizzle_fdset(ibits[2]); 1072 1073 if (nbufbytes != 0) 1074 bzero(selbits, nbufbytes / 2); 1075 1076 precision = 0; 1077 if (tvp != NULL) { 1078 rtv = *tvp; 1079 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 || 1080 rtv.tv_usec >= 1000000) { 1081 error = EINVAL; 1082 goto done; 1083 } 1084 if (!timevalisset(&rtv)) 1085 asbt = 0; 1086 else if (rtv.tv_sec <= INT32_MAX) { 1087 rsbt = tvtosbt(rtv); 1088 precision = rsbt; 1089 precision >>= tc_precexp; 1090 if (TIMESEL(&asbt, rsbt)) 1091 asbt += tc_tick_sbt; 1092 if (asbt <= SBT_MAX - rsbt) 1093 asbt += rsbt; 1094 else 1095 asbt = -1; 1096 } else 1097 asbt = -1; 1098 } else 1099 asbt = -1; 1100 seltdinit(td); 1101 /* Iterate until the timeout expires or descriptors become ready. */ 1102 for (;;) { 1103 error = selscan(td, ibits, obits, nd); 1104 if (error || td->td_retval[0] != 0) 1105 break; 1106 error = seltdwait(td, asbt, precision); 1107 if (error) 1108 break; 1109 error = selrescan(td, ibits, obits); 1110 if (error || td->td_retval[0] != 0) 1111 break; 1112 } 1113 seltdclear(td); 1114 1115 done: 1116 /* select is not restarted after signals... */ 1117 if (error == ERESTART) 1118 error = EINTR; 1119 if (error == EWOULDBLOCK) 1120 error = 0; 1121 1122 /* swizzle bit order back, if necessary */ 1123 swizzle_fdset(obits[0]); 1124 swizzle_fdset(obits[1]); 1125 swizzle_fdset(obits[2]); 1126 #undef swizzle_fdset 1127 1128 #define putbits(name, x) \ 1129 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \ 1130 error = error2; 1131 if (error == 0) { 1132 int error2; 1133 1134 putbits(fd_in, 0); 1135 putbits(fd_ou, 1); 1136 putbits(fd_ex, 2); 1137 #undef putbits 1138 } 1139 if (selbits != &s_selbits[0]) 1140 free(selbits, M_SELECT); 1141 1142 return (error); 1143 } 1144 /* 1145 * Convert a select bit set to poll flags. 1146 * 1147 * The backend always returns POLLHUP/POLLERR if appropriate and we 1148 * return this as a set bit in any set. 1149 */ 1150 static int select_flags[3] = { 1151 POLLRDNORM | POLLHUP | POLLERR, 1152 POLLWRNORM | POLLHUP | POLLERR, 1153 POLLRDBAND | POLLERR 1154 }; 1155 1156 /* 1157 * Compute the fo_poll flags required for a fd given by the index and 1158 * bit position in the fd_mask array. 1159 */ 1160 static __inline int 1161 selflags(fd_mask **ibits, int idx, fd_mask bit) 1162 { 1163 int flags; 1164 int msk; 1165 1166 flags = 0; 1167 for (msk = 0; msk < 3; msk++) { 1168 if (ibits[msk] == NULL) 1169 continue; 1170 if ((ibits[msk][idx] & bit) == 0) 1171 continue; 1172 flags |= select_flags[msk]; 1173 } 1174 return (flags); 1175 } 1176 1177 /* 1178 * Set the appropriate output bits given a mask of fired events and the 1179 * input bits originally requested. 1180 */ 1181 static __inline int 1182 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events) 1183 { 1184 int msk; 1185 int n; 1186 1187 n = 0; 1188 for (msk = 0; msk < 3; msk++) { 1189 if ((events & select_flags[msk]) == 0) 1190 continue; 1191 if (ibits[msk] == NULL) 1192 continue; 1193 if ((ibits[msk][idx] & bit) == 0) 1194 continue; 1195 /* 1196 * XXX Check for a duplicate set. This can occur because a 1197 * socket calls selrecord() twice for each poll() call 1198 * resulting in two selfds per real fd. selrescan() will 1199 * call selsetbits twice as a result. 1200 */ 1201 if ((obits[msk][idx] & bit) != 0) 1202 continue; 1203 obits[msk][idx] |= bit; 1204 n++; 1205 } 1206 1207 return (n); 1208 } 1209 1210 static __inline int 1211 getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp) 1212 { 1213 cap_rights_t rights; 1214 1215 cap_rights_init(&rights, CAP_EVENT); 1216 1217 return (fget_unlocked(fdp, fd, &rights, fpp, NULL)); 1218 } 1219 1220 /* 1221 * Traverse the list of fds attached to this thread's seltd and check for 1222 * completion. 1223 */ 1224 static int 1225 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits) 1226 { 1227 struct filedesc *fdp; 1228 struct selinfo *si; 1229 struct seltd *stp; 1230 struct selfd *sfp; 1231 struct selfd *sfn; 1232 struct file *fp; 1233 fd_mask bit; 1234 int fd, ev, n, idx; 1235 int error; 1236 1237 fdp = td->td_proc->p_fd; 1238 stp = td->td_sel; 1239 n = 0; 1240 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1241 fd = (int)(uintptr_t)sfp->sf_cookie; 1242 si = sfp->sf_si; 1243 selfdfree(stp, sfp); 1244 /* If the selinfo wasn't cleared the event didn't fire. */ 1245 if (si != NULL) 1246 continue; 1247 error = getselfd_cap(fdp, fd, &fp); 1248 if (error) 1249 return (error); 1250 idx = fd / NFDBITS; 1251 bit = (fd_mask)1 << (fd % NFDBITS); 1252 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td); 1253 fdrop(fp, td); 1254 if (ev != 0) 1255 n += selsetbits(ibits, obits, idx, bit, ev); 1256 } 1257 stp->st_flags = 0; 1258 td->td_retval[0] = n; 1259 return (0); 1260 } 1261 1262 /* 1263 * Perform the initial filedescriptor scan and register ourselves with 1264 * each selinfo. 1265 */ 1266 static int 1267 selscan(td, ibits, obits, nfd) 1268 struct thread *td; 1269 fd_mask **ibits, **obits; 1270 int nfd; 1271 { 1272 struct filedesc *fdp; 1273 struct file *fp; 1274 fd_mask bit; 1275 int ev, flags, end, fd; 1276 int n, idx; 1277 int error; 1278 1279 fdp = td->td_proc->p_fd; 1280 n = 0; 1281 for (idx = 0, fd = 0; fd < nfd; idx++) { 1282 end = imin(fd + NFDBITS, nfd); 1283 for (bit = 1; fd < end; bit <<= 1, fd++) { 1284 /* Compute the list of events we're interested in. */ 1285 flags = selflags(ibits, idx, bit); 1286 if (flags == 0) 1287 continue; 1288 error = getselfd_cap(fdp, fd, &fp); 1289 if (error) 1290 return (error); 1291 selfdalloc(td, (void *)(uintptr_t)fd); 1292 ev = fo_poll(fp, flags, td->td_ucred, td); 1293 fdrop(fp, td); 1294 if (ev != 0) 1295 n += selsetbits(ibits, obits, idx, bit, ev); 1296 } 1297 } 1298 1299 td->td_retval[0] = n; 1300 return (0); 1301 } 1302 1303 int 1304 sys_poll(struct thread *td, struct poll_args *uap) 1305 { 1306 struct timespec ts, *tsp; 1307 1308 if (uap->timeout != INFTIM) { 1309 if (uap->timeout < 0) 1310 return (EINVAL); 1311 ts.tv_sec = uap->timeout / 1000; 1312 ts.tv_nsec = (uap->timeout % 1000) * 1000000; 1313 tsp = &ts; 1314 } else 1315 tsp = NULL; 1316 1317 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL)); 1318 } 1319 1320 int 1321 kern_poll(struct thread *td, struct pollfd *fds, u_int nfds, 1322 struct timespec *tsp, sigset_t *uset) 1323 { 1324 struct pollfd *bits; 1325 struct pollfd smallbits[32]; 1326 sbintime_t sbt, precision, tmp; 1327 time_t over; 1328 struct timespec ts; 1329 int error; 1330 size_t ni; 1331 1332 precision = 0; 1333 if (tsp != NULL) { 1334 if (tsp->tv_sec < 0) 1335 return (EINVAL); 1336 if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000) 1337 return (EINVAL); 1338 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 1339 sbt = 0; 1340 else { 1341 ts = *tsp; 1342 if (ts.tv_sec > INT32_MAX / 2) { 1343 over = ts.tv_sec - INT32_MAX / 2; 1344 ts.tv_sec -= over; 1345 } else 1346 over = 0; 1347 tmp = tstosbt(ts); 1348 precision = tmp; 1349 precision >>= tc_precexp; 1350 if (TIMESEL(&sbt, tmp)) 1351 sbt += tc_tick_sbt; 1352 sbt += tmp; 1353 } 1354 } else 1355 sbt = -1; 1356 1357 if (nfds > maxfilesperproc && nfds > FD_SETSIZE) 1358 return (EINVAL); 1359 ni = nfds * sizeof(struct pollfd); 1360 if (ni > sizeof(smallbits)) 1361 bits = malloc(ni, M_TEMP, M_WAITOK); 1362 else 1363 bits = smallbits; 1364 error = copyin(fds, bits, ni); 1365 if (error) 1366 goto done; 1367 1368 if (uset != NULL) { 1369 error = kern_sigprocmask(td, SIG_SETMASK, uset, 1370 &td->td_oldsigmask, 0); 1371 if (error) 1372 goto done; 1373 td->td_pflags |= TDP_OLDMASK; 1374 /* 1375 * Make sure that ast() is called on return to 1376 * usermode and TDP_OLDMASK is cleared, restoring old 1377 * sigmask. 1378 */ 1379 thread_lock(td); 1380 td->td_flags |= TDF_ASTPENDING; 1381 thread_unlock(td); 1382 } 1383 1384 seltdinit(td); 1385 /* Iterate until the timeout expires or descriptors become ready. */ 1386 for (;;) { 1387 error = pollscan(td, bits, nfds); 1388 if (error || td->td_retval[0] != 0) 1389 break; 1390 error = seltdwait(td, sbt, precision); 1391 if (error) 1392 break; 1393 error = pollrescan(td); 1394 if (error || td->td_retval[0] != 0) 1395 break; 1396 } 1397 seltdclear(td); 1398 1399 done: 1400 /* poll is not restarted after signals... */ 1401 if (error == ERESTART) 1402 error = EINTR; 1403 if (error == EWOULDBLOCK) 1404 error = 0; 1405 if (error == 0) { 1406 error = pollout(td, bits, fds, nfds); 1407 if (error) 1408 goto out; 1409 } 1410 out: 1411 if (ni > sizeof(smallbits)) 1412 free(bits, M_TEMP); 1413 return (error); 1414 } 1415 1416 int 1417 sys_ppoll(struct thread *td, struct ppoll_args *uap) 1418 { 1419 struct timespec ts, *tsp; 1420 sigset_t set, *ssp; 1421 int error; 1422 1423 if (uap->ts != NULL) { 1424 error = copyin(uap->ts, &ts, sizeof(ts)); 1425 if (error) 1426 return (error); 1427 tsp = &ts; 1428 } else 1429 tsp = NULL; 1430 if (uap->set != NULL) { 1431 error = copyin(uap->set, &set, sizeof(set)); 1432 if (error) 1433 return (error); 1434 ssp = &set; 1435 } else 1436 ssp = NULL; 1437 /* 1438 * fds is still a pointer to user space. kern_poll() will 1439 * take care of copyin that array to the kernel space. 1440 */ 1441 1442 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp)); 1443 } 1444 1445 static int 1446 pollrescan(struct thread *td) 1447 { 1448 struct seltd *stp; 1449 struct selfd *sfp; 1450 struct selfd *sfn; 1451 struct selinfo *si; 1452 struct filedesc *fdp; 1453 struct file *fp; 1454 struct pollfd *fd; 1455 #ifdef CAPABILITIES 1456 cap_rights_t rights; 1457 #endif 1458 int n; 1459 1460 n = 0; 1461 fdp = td->td_proc->p_fd; 1462 stp = td->td_sel; 1463 FILEDESC_SLOCK(fdp); 1464 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1465 fd = (struct pollfd *)sfp->sf_cookie; 1466 si = sfp->sf_si; 1467 selfdfree(stp, sfp); 1468 /* If the selinfo wasn't cleared the event didn't fire. */ 1469 if (si != NULL) 1470 continue; 1471 fp = fdp->fd_ofiles[fd->fd].fde_file; 1472 #ifdef CAPABILITIES 1473 if (fp == NULL || 1474 cap_check(cap_rights(fdp, fd->fd), 1475 cap_rights_init(&rights, CAP_EVENT)) != 0) 1476 #else 1477 if (fp == NULL) 1478 #endif 1479 { 1480 fd->revents = POLLNVAL; 1481 n++; 1482 continue; 1483 } 1484 1485 /* 1486 * Note: backend also returns POLLHUP and 1487 * POLLERR if appropriate. 1488 */ 1489 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td); 1490 if (fd->revents != 0) 1491 n++; 1492 } 1493 FILEDESC_SUNLOCK(fdp); 1494 stp->st_flags = 0; 1495 td->td_retval[0] = n; 1496 return (0); 1497 } 1498 1499 1500 static int 1501 pollout(td, fds, ufds, nfd) 1502 struct thread *td; 1503 struct pollfd *fds; 1504 struct pollfd *ufds; 1505 u_int nfd; 1506 { 1507 int error = 0; 1508 u_int i = 0; 1509 u_int n = 0; 1510 1511 for (i = 0; i < nfd; i++) { 1512 error = copyout(&fds->revents, &ufds->revents, 1513 sizeof(ufds->revents)); 1514 if (error) 1515 return (error); 1516 if (fds->revents != 0) 1517 n++; 1518 fds++; 1519 ufds++; 1520 } 1521 td->td_retval[0] = n; 1522 return (0); 1523 } 1524 1525 static int 1526 pollscan(td, fds, nfd) 1527 struct thread *td; 1528 struct pollfd *fds; 1529 u_int nfd; 1530 { 1531 struct filedesc *fdp = td->td_proc->p_fd; 1532 struct file *fp; 1533 #ifdef CAPABILITIES 1534 cap_rights_t rights; 1535 #endif 1536 int i, n = 0; 1537 1538 FILEDESC_SLOCK(fdp); 1539 for (i = 0; i < nfd; i++, fds++) { 1540 if (fds->fd > fdp->fd_lastfile) { 1541 fds->revents = POLLNVAL; 1542 n++; 1543 } else if (fds->fd < 0) { 1544 fds->revents = 0; 1545 } else { 1546 fp = fdp->fd_ofiles[fds->fd].fde_file; 1547 #ifdef CAPABILITIES 1548 if (fp == NULL || 1549 cap_check(cap_rights(fdp, fds->fd), 1550 cap_rights_init(&rights, CAP_EVENT)) != 0) 1551 #else 1552 if (fp == NULL) 1553 #endif 1554 { 1555 fds->revents = POLLNVAL; 1556 n++; 1557 } else { 1558 /* 1559 * Note: backend also returns POLLHUP and 1560 * POLLERR if appropriate. 1561 */ 1562 selfdalloc(td, fds); 1563 fds->revents = fo_poll(fp, fds->events, 1564 td->td_ucred, td); 1565 /* 1566 * POSIX requires POLLOUT to be never 1567 * set simultaneously with POLLHUP. 1568 */ 1569 if ((fds->revents & POLLHUP) != 0) 1570 fds->revents &= ~POLLOUT; 1571 1572 if (fds->revents != 0) 1573 n++; 1574 } 1575 } 1576 } 1577 FILEDESC_SUNLOCK(fdp); 1578 td->td_retval[0] = n; 1579 return (0); 1580 } 1581 1582 /* 1583 * OpenBSD poll system call. 1584 * 1585 * XXX this isn't quite a true representation.. OpenBSD uses select ops. 1586 */ 1587 #ifndef _SYS_SYSPROTO_H_ 1588 struct openbsd_poll_args { 1589 struct pollfd *fds; 1590 u_int nfds; 1591 int timeout; 1592 }; 1593 #endif 1594 int 1595 sys_openbsd_poll(td, uap) 1596 register struct thread *td; 1597 register struct openbsd_poll_args *uap; 1598 { 1599 return (sys_poll(td, (struct poll_args *)uap)); 1600 } 1601 1602 /* 1603 * XXX This was created specifically to support netncp and netsmb. This 1604 * allows the caller to specify a socket to wait for events on. It returns 1605 * 0 if any events matched and an error otherwise. There is no way to 1606 * determine which events fired. 1607 */ 1608 int 1609 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td) 1610 { 1611 struct timeval rtv; 1612 sbintime_t asbt, precision, rsbt; 1613 int error; 1614 1615 precision = 0; /* stupid gcc! */ 1616 if (tvp != NULL) { 1617 rtv = *tvp; 1618 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 || 1619 rtv.tv_usec >= 1000000) 1620 return (EINVAL); 1621 if (!timevalisset(&rtv)) 1622 asbt = 0; 1623 else if (rtv.tv_sec <= INT32_MAX) { 1624 rsbt = tvtosbt(rtv); 1625 precision = rsbt; 1626 precision >>= tc_precexp; 1627 if (TIMESEL(&asbt, rsbt)) 1628 asbt += tc_tick_sbt; 1629 if (asbt <= SBT_MAX - rsbt) 1630 asbt += rsbt; 1631 else 1632 asbt = -1; 1633 } else 1634 asbt = -1; 1635 } else 1636 asbt = -1; 1637 seltdinit(td); 1638 /* 1639 * Iterate until the timeout expires or the socket becomes ready. 1640 */ 1641 for (;;) { 1642 selfdalloc(td, NULL); 1643 error = sopoll(so, events, NULL, td); 1644 /* error here is actually the ready events. */ 1645 if (error) 1646 return (0); 1647 error = seltdwait(td, asbt, precision); 1648 if (error) 1649 break; 1650 } 1651 seltdclear(td); 1652 /* XXX Duplicates ncp/smb behavior. */ 1653 if (error == ERESTART) 1654 error = 0; 1655 return (error); 1656 } 1657 1658 /* 1659 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines 1660 * have two select sets, one for read and another for write. 1661 */ 1662 static void 1663 selfdalloc(struct thread *td, void *cookie) 1664 { 1665 struct seltd *stp; 1666 1667 stp = td->td_sel; 1668 if (stp->st_free1 == NULL) 1669 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO); 1670 stp->st_free1->sf_td = stp; 1671 stp->st_free1->sf_cookie = cookie; 1672 if (stp->st_free2 == NULL) 1673 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO); 1674 stp->st_free2->sf_td = stp; 1675 stp->st_free2->sf_cookie = cookie; 1676 } 1677 1678 static void 1679 selfdfree(struct seltd *stp, struct selfd *sfp) 1680 { 1681 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link); 1682 if (sfp->sf_si != NULL) { 1683 mtx_lock(sfp->sf_mtx); 1684 if (sfp->sf_si != NULL) 1685 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads); 1686 mtx_unlock(sfp->sf_mtx); 1687 } 1688 uma_zfree(selfd_zone, sfp); 1689 } 1690 1691 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */ 1692 void 1693 seldrain(sip) 1694 struct selinfo *sip; 1695 { 1696 1697 /* 1698 * This feature is already provided by doselwakeup(), thus it is 1699 * enough to go for it. 1700 * Eventually, the context, should take care to avoid races 1701 * between thread calling select()/poll() and file descriptor 1702 * detaching, but, again, the races are just the same as 1703 * selwakeup(). 1704 */ 1705 doselwakeup(sip, -1); 1706 } 1707 1708 /* 1709 * Record a select request. 1710 */ 1711 void 1712 selrecord(selector, sip) 1713 struct thread *selector; 1714 struct selinfo *sip; 1715 { 1716 struct selfd *sfp; 1717 struct seltd *stp; 1718 struct mtx *mtxp; 1719 1720 stp = selector->td_sel; 1721 /* 1722 * Don't record when doing a rescan. 1723 */ 1724 if (stp->st_flags & SELTD_RESCAN) 1725 return; 1726 /* 1727 * Grab one of the preallocated descriptors. 1728 */ 1729 sfp = NULL; 1730 if ((sfp = stp->st_free1) != NULL) 1731 stp->st_free1 = NULL; 1732 else if ((sfp = stp->st_free2) != NULL) 1733 stp->st_free2 = NULL; 1734 else 1735 panic("selrecord: No free selfd on selq"); 1736 mtxp = sip->si_mtx; 1737 if (mtxp == NULL) 1738 mtxp = mtx_pool_find(mtxpool_select, sip); 1739 /* 1740 * Initialize the sfp and queue it in the thread. 1741 */ 1742 sfp->sf_si = sip; 1743 sfp->sf_mtx = mtxp; 1744 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link); 1745 /* 1746 * Now that we've locked the sip, check for initialization. 1747 */ 1748 mtx_lock(mtxp); 1749 if (sip->si_mtx == NULL) { 1750 sip->si_mtx = mtxp; 1751 TAILQ_INIT(&sip->si_tdlist); 1752 } 1753 /* 1754 * Add this thread to the list of selfds listening on this selinfo. 1755 */ 1756 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads); 1757 mtx_unlock(sip->si_mtx); 1758 } 1759 1760 /* Wake up a selecting thread. */ 1761 void 1762 selwakeup(sip) 1763 struct selinfo *sip; 1764 { 1765 doselwakeup(sip, -1); 1766 } 1767 1768 /* Wake up a selecting thread, and set its priority. */ 1769 void 1770 selwakeuppri(sip, pri) 1771 struct selinfo *sip; 1772 int pri; 1773 { 1774 doselwakeup(sip, pri); 1775 } 1776 1777 /* 1778 * Do a wakeup when a selectable event occurs. 1779 */ 1780 static void 1781 doselwakeup(sip, pri) 1782 struct selinfo *sip; 1783 int pri; 1784 { 1785 struct selfd *sfp; 1786 struct selfd *sfn; 1787 struct seltd *stp; 1788 1789 /* If it's not initialized there can't be any waiters. */ 1790 if (sip->si_mtx == NULL) 1791 return; 1792 /* 1793 * Locking the selinfo locks all selfds associated with it. 1794 */ 1795 mtx_lock(sip->si_mtx); 1796 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) { 1797 /* 1798 * Once we remove this sfp from the list and clear the 1799 * sf_si seltdclear will know to ignore this si. 1800 */ 1801 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads); 1802 sfp->sf_si = NULL; 1803 stp = sfp->sf_td; 1804 mtx_lock(&stp->st_mtx); 1805 stp->st_flags |= SELTD_PENDING; 1806 cv_broadcastpri(&stp->st_wait, pri); 1807 mtx_unlock(&stp->st_mtx); 1808 } 1809 mtx_unlock(sip->si_mtx); 1810 } 1811 1812 static void 1813 seltdinit(struct thread *td) 1814 { 1815 struct seltd *stp; 1816 1817 if ((stp = td->td_sel) != NULL) 1818 goto out; 1819 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO); 1820 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF); 1821 cv_init(&stp->st_wait, "select"); 1822 out: 1823 stp->st_flags = 0; 1824 STAILQ_INIT(&stp->st_selq); 1825 } 1826 1827 static int 1828 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision) 1829 { 1830 struct seltd *stp; 1831 int error; 1832 1833 stp = td->td_sel; 1834 /* 1835 * An event of interest may occur while we do not hold the seltd 1836 * locked so check the pending flag before we sleep. 1837 */ 1838 mtx_lock(&stp->st_mtx); 1839 /* 1840 * Any further calls to selrecord will be a rescan. 1841 */ 1842 stp->st_flags |= SELTD_RESCAN; 1843 if (stp->st_flags & SELTD_PENDING) { 1844 mtx_unlock(&stp->st_mtx); 1845 return (0); 1846 } 1847 if (sbt == 0) 1848 error = EWOULDBLOCK; 1849 else if (sbt != -1) 1850 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx, 1851 sbt, precision, C_ABSOLUTE); 1852 else 1853 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx); 1854 mtx_unlock(&stp->st_mtx); 1855 1856 return (error); 1857 } 1858 1859 void 1860 seltdfini(struct thread *td) 1861 { 1862 struct seltd *stp; 1863 1864 stp = td->td_sel; 1865 if (stp == NULL) 1866 return; 1867 if (stp->st_free1) 1868 uma_zfree(selfd_zone, stp->st_free1); 1869 if (stp->st_free2) 1870 uma_zfree(selfd_zone, stp->st_free2); 1871 td->td_sel = NULL; 1872 free(stp, M_SELECT); 1873 } 1874 1875 /* 1876 * Remove the references to the thread from all of the objects we were 1877 * polling. 1878 */ 1879 static void 1880 seltdclear(struct thread *td) 1881 { 1882 struct seltd *stp; 1883 struct selfd *sfp; 1884 struct selfd *sfn; 1885 1886 stp = td->td_sel; 1887 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) 1888 selfdfree(stp, sfp); 1889 stp->st_flags = 0; 1890 } 1891 1892 static void selectinit(void *); 1893 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL); 1894 static void 1895 selectinit(void *dummy __unused) 1896 { 1897 1898 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL, 1899 NULL, NULL, UMA_ALIGN_PTR, 0); 1900 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF); 1901 } 1902