1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_capsicum.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/capsicum.h> 49 #include <sys/filedesc.h> 50 #include <sys/filio.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/lock.h> 54 #include <sys/proc.h> 55 #include <sys/signalvar.h> 56 #include <sys/socketvar.h> 57 #include <sys/uio.h> 58 #include <sys/kernel.h> 59 #include <sys/ktr.h> 60 #include <sys/limits.h> 61 #include <sys/malloc.h> 62 #include <sys/poll.h> 63 #include <sys/resourcevar.h> 64 #include <sys/selinfo.h> 65 #include <sys/sleepqueue.h> 66 #include <sys/syscallsubr.h> 67 #include <sys/sysctl.h> 68 #include <sys/sysent.h> 69 #include <sys/vnode.h> 70 #include <sys/bio.h> 71 #include <sys/buf.h> 72 #include <sys/condvar.h> 73 #ifdef KTRACE 74 #include <sys/ktrace.h> 75 #endif 76 77 #include <security/audit/audit.h> 78 79 /* 80 * The following macro defines how many bytes will be allocated from 81 * the stack instead of memory allocated when passing the IOCTL data 82 * structures from userspace and to the kernel. Some IOCTLs having 83 * small data structures are used very frequently and this small 84 * buffer on the stack gives a significant speedup improvement for 85 * those requests. The value of this define should be greater or equal 86 * to 64 bytes and should also be power of two. The data structure is 87 * currently hard-aligned to a 8-byte boundary on the stack. This 88 * should currently be sufficient for all supported platforms. 89 */ 90 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */ 91 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */ 92 93 #ifdef __LP64__ 94 static int iosize_max_clamp = 0; 95 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW, 96 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX"); 97 static int devfs_iosize_max_clamp = 1; 98 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW, 99 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices"); 100 #endif 101 102 /* 103 * Assert that the return value of read(2) and write(2) syscalls fits 104 * into a register. If not, an architecture will need to provide the 105 * usermode wrappers to reconstruct the result. 106 */ 107 CTASSERT(sizeof(register_t) >= sizeof(size_t)); 108 109 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 110 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 111 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 112 113 static int pollout(struct thread *, struct pollfd *, struct pollfd *, 114 u_int); 115 static int pollscan(struct thread *, struct pollfd *, u_int); 116 static int pollrescan(struct thread *); 117 static int selscan(struct thread *, fd_mask **, fd_mask **, int); 118 static int selrescan(struct thread *, fd_mask **, fd_mask **); 119 static void selfdalloc(struct thread *, void *); 120 static void selfdfree(struct seltd *, struct selfd *); 121 static int dofileread(struct thread *, int, struct file *, struct uio *, 122 off_t, int); 123 static int dofilewrite(struct thread *, int, struct file *, struct uio *, 124 off_t, int); 125 static void doselwakeup(struct selinfo *, int); 126 static void seltdinit(struct thread *); 127 static int seltdwait(struct thread *, sbintime_t, sbintime_t); 128 static void seltdclear(struct thread *); 129 130 /* 131 * One seltd per-thread allocated on demand as needed. 132 * 133 * t - protected by st_mtx 134 * k - Only accessed by curthread or read-only 135 */ 136 struct seltd { 137 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */ 138 struct selfd *st_free1; /* (k) free fd for read set. */ 139 struct selfd *st_free2; /* (k) free fd for write set. */ 140 struct mtx st_mtx; /* Protects struct seltd */ 141 struct cv st_wait; /* (t) Wait channel. */ 142 int st_flags; /* (t) SELTD_ flags. */ 143 }; 144 145 #define SELTD_PENDING 0x0001 /* We have pending events. */ 146 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */ 147 148 /* 149 * One selfd allocated per-thread per-file-descriptor. 150 * f - protected by sf_mtx 151 */ 152 struct selfd { 153 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */ 154 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */ 155 struct selinfo *sf_si; /* (f) selinfo when linked. */ 156 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */ 157 struct seltd *sf_td; /* (k) owning seltd. */ 158 void *sf_cookie; /* (k) fd or pollfd. */ 159 u_int sf_refs; 160 }; 161 162 static uma_zone_t selfd_zone; 163 static struct mtx_pool *mtxpool_select; 164 165 #ifdef __LP64__ 166 size_t 167 devfs_iosize_max(void) 168 { 169 170 return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ? 171 INT_MAX : SSIZE_MAX); 172 } 173 174 size_t 175 iosize_max(void) 176 { 177 178 return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ? 179 INT_MAX : SSIZE_MAX); 180 } 181 #endif 182 183 #ifndef _SYS_SYSPROTO_H_ 184 struct read_args { 185 int fd; 186 void *buf; 187 size_t nbyte; 188 }; 189 #endif 190 int 191 sys_read(td, uap) 192 struct thread *td; 193 struct read_args *uap; 194 { 195 struct uio auio; 196 struct iovec aiov; 197 int error; 198 199 if (uap->nbyte > IOSIZE_MAX) 200 return (EINVAL); 201 aiov.iov_base = uap->buf; 202 aiov.iov_len = uap->nbyte; 203 auio.uio_iov = &aiov; 204 auio.uio_iovcnt = 1; 205 auio.uio_resid = uap->nbyte; 206 auio.uio_segflg = UIO_USERSPACE; 207 error = kern_readv(td, uap->fd, &auio); 208 return (error); 209 } 210 211 /* 212 * Positioned read system call 213 */ 214 #ifndef _SYS_SYSPROTO_H_ 215 struct pread_args { 216 int fd; 217 void *buf; 218 size_t nbyte; 219 int pad; 220 off_t offset; 221 }; 222 #endif 223 int 224 sys_pread(struct thread *td, struct pread_args *uap) 225 { 226 227 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 228 } 229 230 int 231 kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset) 232 { 233 struct uio auio; 234 struct iovec aiov; 235 int error; 236 237 if (nbyte > IOSIZE_MAX) 238 return (EINVAL); 239 aiov.iov_base = buf; 240 aiov.iov_len = nbyte; 241 auio.uio_iov = &aiov; 242 auio.uio_iovcnt = 1; 243 auio.uio_resid = nbyte; 244 auio.uio_segflg = UIO_USERSPACE; 245 error = kern_preadv(td, fd, &auio, offset); 246 return (error); 247 } 248 249 #if defined(COMPAT_FREEBSD6) 250 int 251 freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap) 252 { 253 254 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 255 } 256 #endif 257 258 /* 259 * Scatter read system call. 260 */ 261 #ifndef _SYS_SYSPROTO_H_ 262 struct readv_args { 263 int fd; 264 struct iovec *iovp; 265 u_int iovcnt; 266 }; 267 #endif 268 int 269 sys_readv(struct thread *td, struct readv_args *uap) 270 { 271 struct uio *auio; 272 int error; 273 274 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 275 if (error) 276 return (error); 277 error = kern_readv(td, uap->fd, auio); 278 free(auio, M_IOV); 279 return (error); 280 } 281 282 int 283 kern_readv(struct thread *td, int fd, struct uio *auio) 284 { 285 struct file *fp; 286 int error; 287 288 error = fget_read(td, fd, &cap_read_rights, &fp); 289 if (error) 290 return (error); 291 error = dofileread(td, fd, fp, auio, (off_t)-1, 0); 292 fdrop(fp, td); 293 return (error); 294 } 295 296 /* 297 * Scatter positioned read system call. 298 */ 299 #ifndef _SYS_SYSPROTO_H_ 300 struct preadv_args { 301 int fd; 302 struct iovec *iovp; 303 u_int iovcnt; 304 off_t offset; 305 }; 306 #endif 307 int 308 sys_preadv(struct thread *td, struct preadv_args *uap) 309 { 310 struct uio *auio; 311 int error; 312 313 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 314 if (error) 315 return (error); 316 error = kern_preadv(td, uap->fd, auio, uap->offset); 317 free(auio, M_IOV); 318 return (error); 319 } 320 321 int 322 kern_preadv(td, fd, auio, offset) 323 struct thread *td; 324 int fd; 325 struct uio *auio; 326 off_t offset; 327 { 328 struct file *fp; 329 int error; 330 331 error = fget_read(td, fd, &cap_pread_rights, &fp); 332 if (error) 333 return (error); 334 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 335 error = ESPIPE; 336 else if (offset < 0 && 337 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) 338 error = EINVAL; 339 else 340 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET); 341 fdrop(fp, td); 342 return (error); 343 } 344 345 /* 346 * Common code for readv and preadv that reads data in 347 * from a file using the passed in uio, offset, and flags. 348 */ 349 static int 350 dofileread(td, fd, fp, auio, offset, flags) 351 struct thread *td; 352 int fd; 353 struct file *fp; 354 struct uio *auio; 355 off_t offset; 356 int flags; 357 { 358 ssize_t cnt; 359 int error; 360 #ifdef KTRACE 361 struct uio *ktruio = NULL; 362 #endif 363 364 AUDIT_ARG_FD(fd); 365 366 /* Finish zero length reads right here */ 367 if (auio->uio_resid == 0) { 368 td->td_retval[0] = 0; 369 return (0); 370 } 371 auio->uio_rw = UIO_READ; 372 auio->uio_offset = offset; 373 auio->uio_td = td; 374 #ifdef KTRACE 375 if (KTRPOINT(td, KTR_GENIO)) 376 ktruio = cloneuio(auio); 377 #endif 378 cnt = auio->uio_resid; 379 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) { 380 if (auio->uio_resid != cnt && (error == ERESTART || 381 error == EINTR || error == EWOULDBLOCK)) 382 error = 0; 383 } 384 cnt -= auio->uio_resid; 385 #ifdef KTRACE 386 if (ktruio != NULL) { 387 ktruio->uio_resid = cnt; 388 ktrgenio(fd, UIO_READ, ktruio, error); 389 } 390 #endif 391 td->td_retval[0] = cnt; 392 return (error); 393 } 394 395 #ifndef _SYS_SYSPROTO_H_ 396 struct write_args { 397 int fd; 398 const void *buf; 399 size_t nbyte; 400 }; 401 #endif 402 int 403 sys_write(td, uap) 404 struct thread *td; 405 struct write_args *uap; 406 { 407 struct uio auio; 408 struct iovec aiov; 409 int error; 410 411 if (uap->nbyte > IOSIZE_MAX) 412 return (EINVAL); 413 aiov.iov_base = (void *)(uintptr_t)uap->buf; 414 aiov.iov_len = uap->nbyte; 415 auio.uio_iov = &aiov; 416 auio.uio_iovcnt = 1; 417 auio.uio_resid = uap->nbyte; 418 auio.uio_segflg = UIO_USERSPACE; 419 error = kern_writev(td, uap->fd, &auio); 420 return (error); 421 } 422 423 /* 424 * Positioned write system call. 425 */ 426 #ifndef _SYS_SYSPROTO_H_ 427 struct pwrite_args { 428 int fd; 429 const void *buf; 430 size_t nbyte; 431 int pad; 432 off_t offset; 433 }; 434 #endif 435 int 436 sys_pwrite(struct thread *td, struct pwrite_args *uap) 437 { 438 439 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 440 } 441 442 int 443 kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte, 444 off_t offset) 445 { 446 struct uio auio; 447 struct iovec aiov; 448 int error; 449 450 if (nbyte > IOSIZE_MAX) 451 return (EINVAL); 452 aiov.iov_base = (void *)(uintptr_t)buf; 453 aiov.iov_len = nbyte; 454 auio.uio_iov = &aiov; 455 auio.uio_iovcnt = 1; 456 auio.uio_resid = nbyte; 457 auio.uio_segflg = UIO_USERSPACE; 458 error = kern_pwritev(td, fd, &auio, offset); 459 return (error); 460 } 461 462 #if defined(COMPAT_FREEBSD6) 463 int 464 freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap) 465 { 466 467 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 468 } 469 #endif 470 471 /* 472 * Gather write system call. 473 */ 474 #ifndef _SYS_SYSPROTO_H_ 475 struct writev_args { 476 int fd; 477 struct iovec *iovp; 478 u_int iovcnt; 479 }; 480 #endif 481 int 482 sys_writev(struct thread *td, struct writev_args *uap) 483 { 484 struct uio *auio; 485 int error; 486 487 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 488 if (error) 489 return (error); 490 error = kern_writev(td, uap->fd, auio); 491 free(auio, M_IOV); 492 return (error); 493 } 494 495 int 496 kern_writev(struct thread *td, int fd, struct uio *auio) 497 { 498 struct file *fp; 499 int error; 500 501 error = fget_write(td, fd, &cap_write_rights, &fp); 502 if (error) 503 return (error); 504 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0); 505 fdrop(fp, td); 506 return (error); 507 } 508 509 /* 510 * Gather positioned write system call. 511 */ 512 #ifndef _SYS_SYSPROTO_H_ 513 struct pwritev_args { 514 int fd; 515 struct iovec *iovp; 516 u_int iovcnt; 517 off_t offset; 518 }; 519 #endif 520 int 521 sys_pwritev(struct thread *td, struct pwritev_args *uap) 522 { 523 struct uio *auio; 524 int error; 525 526 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 527 if (error) 528 return (error); 529 error = kern_pwritev(td, uap->fd, auio, uap->offset); 530 free(auio, M_IOV); 531 return (error); 532 } 533 534 int 535 kern_pwritev(td, fd, auio, offset) 536 struct thread *td; 537 struct uio *auio; 538 int fd; 539 off_t offset; 540 { 541 struct file *fp; 542 int error; 543 544 error = fget_write(td, fd, &cap_pwrite_rights, &fp); 545 if (error) 546 return (error); 547 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 548 error = ESPIPE; 549 else if (offset < 0 && 550 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) 551 error = EINVAL; 552 else 553 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET); 554 fdrop(fp, td); 555 return (error); 556 } 557 558 /* 559 * Common code for writev and pwritev that writes data to 560 * a file using the passed in uio, offset, and flags. 561 */ 562 static int 563 dofilewrite(td, fd, fp, auio, offset, flags) 564 struct thread *td; 565 int fd; 566 struct file *fp; 567 struct uio *auio; 568 off_t offset; 569 int flags; 570 { 571 ssize_t cnt; 572 int error; 573 #ifdef KTRACE 574 struct uio *ktruio = NULL; 575 #endif 576 577 AUDIT_ARG_FD(fd); 578 auio->uio_rw = UIO_WRITE; 579 auio->uio_td = td; 580 auio->uio_offset = offset; 581 #ifdef KTRACE 582 if (KTRPOINT(td, KTR_GENIO)) 583 ktruio = cloneuio(auio); 584 #endif 585 cnt = auio->uio_resid; 586 if (fp->f_type == DTYPE_VNODE && 587 (fp->f_vnread_flags & FDEVFS_VNODE) == 0) 588 bwillwrite(); 589 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) { 590 if (auio->uio_resid != cnt && (error == ERESTART || 591 error == EINTR || error == EWOULDBLOCK)) 592 error = 0; 593 /* Socket layer is responsible for issuing SIGPIPE. */ 594 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) { 595 PROC_LOCK(td->td_proc); 596 tdsignal(td, SIGPIPE); 597 PROC_UNLOCK(td->td_proc); 598 } 599 } 600 cnt -= auio->uio_resid; 601 #ifdef KTRACE 602 if (ktruio != NULL) { 603 ktruio->uio_resid = cnt; 604 ktrgenio(fd, UIO_WRITE, ktruio, error); 605 } 606 #endif 607 td->td_retval[0] = cnt; 608 return (error); 609 } 610 611 /* 612 * Truncate a file given a file descriptor. 613 * 614 * Can't use fget_write() here, since must return EINVAL and not EBADF if the 615 * descriptor isn't writable. 616 */ 617 int 618 kern_ftruncate(td, fd, length) 619 struct thread *td; 620 int fd; 621 off_t length; 622 { 623 struct file *fp; 624 int error; 625 626 AUDIT_ARG_FD(fd); 627 if (length < 0) 628 return (EINVAL); 629 error = fget(td, fd, &cap_ftruncate_rights, &fp); 630 if (error) 631 return (error); 632 AUDIT_ARG_FILE(td->td_proc, fp); 633 if (!(fp->f_flag & FWRITE)) { 634 fdrop(fp, td); 635 return (EINVAL); 636 } 637 error = fo_truncate(fp, length, td->td_ucred, td); 638 fdrop(fp, td); 639 return (error); 640 } 641 642 #ifndef _SYS_SYSPROTO_H_ 643 struct ftruncate_args { 644 int fd; 645 int pad; 646 off_t length; 647 }; 648 #endif 649 int 650 sys_ftruncate(td, uap) 651 struct thread *td; 652 struct ftruncate_args *uap; 653 { 654 655 return (kern_ftruncate(td, uap->fd, uap->length)); 656 } 657 658 #if defined(COMPAT_43) 659 #ifndef _SYS_SYSPROTO_H_ 660 struct oftruncate_args { 661 int fd; 662 long length; 663 }; 664 #endif 665 int 666 oftruncate(td, uap) 667 struct thread *td; 668 struct oftruncate_args *uap; 669 { 670 671 return (kern_ftruncate(td, uap->fd, uap->length)); 672 } 673 #endif /* COMPAT_43 */ 674 675 #ifndef _SYS_SYSPROTO_H_ 676 struct ioctl_args { 677 int fd; 678 u_long com; 679 caddr_t data; 680 }; 681 #endif 682 /* ARGSUSED */ 683 int 684 sys_ioctl(struct thread *td, struct ioctl_args *uap) 685 { 686 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN); 687 u_long com; 688 int arg, error; 689 u_int size; 690 caddr_t data; 691 692 if (uap->com > 0xffffffff) { 693 printf( 694 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n", 695 td->td_proc->p_pid, td->td_name, uap->com); 696 uap->com &= 0xffffffff; 697 } 698 com = uap->com; 699 700 /* 701 * Interpret high order word to find amount of data to be 702 * copied to/from the user's address space. 703 */ 704 size = IOCPARM_LEN(com); 705 if ((size > IOCPARM_MAX) || 706 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) || 707 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43) 708 ((com & IOC_OUT) && size == 0) || 709 #else 710 ((com & (IOC_IN | IOC_OUT)) && size == 0) || 711 #endif 712 ((com & IOC_VOID) && size > 0 && size != sizeof(int))) 713 return (ENOTTY); 714 715 if (size > 0) { 716 if (com & IOC_VOID) { 717 /* Integer argument. */ 718 arg = (intptr_t)uap->data; 719 data = (void *)&arg; 720 size = 0; 721 } else { 722 if (size > SYS_IOCTL_SMALL_SIZE) 723 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK); 724 else 725 data = smalldata; 726 } 727 } else 728 data = (void *)&uap->data; 729 if (com & IOC_IN) { 730 error = copyin(uap->data, data, (u_int)size); 731 if (error != 0) 732 goto out; 733 } else if (com & IOC_OUT) { 734 /* 735 * Zero the buffer so the user always 736 * gets back something deterministic. 737 */ 738 bzero(data, size); 739 } 740 741 error = kern_ioctl(td, uap->fd, com, data); 742 743 if (error == 0 && (com & IOC_OUT)) 744 error = copyout(data, uap->data, (u_int)size); 745 746 out: 747 if (size > SYS_IOCTL_SMALL_SIZE) 748 free(data, M_IOCTLOPS); 749 return (error); 750 } 751 752 int 753 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data) 754 { 755 struct file *fp; 756 struct filedesc *fdp; 757 int error, tmp, locked; 758 759 AUDIT_ARG_FD(fd); 760 AUDIT_ARG_CMD(com); 761 762 fdp = td->td_proc->p_fd; 763 764 switch (com) { 765 case FIONCLEX: 766 case FIOCLEX: 767 FILEDESC_XLOCK(fdp); 768 locked = LA_XLOCKED; 769 break; 770 default: 771 #ifdef CAPABILITIES 772 FILEDESC_SLOCK(fdp); 773 locked = LA_SLOCKED; 774 #else 775 locked = LA_UNLOCKED; 776 #endif 777 break; 778 } 779 780 #ifdef CAPABILITIES 781 if ((fp = fget_locked(fdp, fd)) == NULL) { 782 error = EBADF; 783 goto out; 784 } 785 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) { 786 fp = NULL; /* fhold() was not called yet */ 787 goto out; 788 } 789 fhold(fp); 790 if (locked == LA_SLOCKED) { 791 FILEDESC_SUNLOCK(fdp); 792 locked = LA_UNLOCKED; 793 } 794 #else 795 error = fget(td, fd, &cap_ioctl_rights, &fp); 796 if (error != 0) { 797 fp = NULL; 798 goto out; 799 } 800 #endif 801 if ((fp->f_flag & (FREAD | FWRITE)) == 0) { 802 error = EBADF; 803 goto out; 804 } 805 806 switch (com) { 807 case FIONCLEX: 808 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE; 809 goto out; 810 case FIOCLEX: 811 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE; 812 goto out; 813 case FIONBIO: 814 if ((tmp = *(int *)data)) 815 atomic_set_int(&fp->f_flag, FNONBLOCK); 816 else 817 atomic_clear_int(&fp->f_flag, FNONBLOCK); 818 data = (void *)&tmp; 819 break; 820 case FIOASYNC: 821 if ((tmp = *(int *)data)) 822 atomic_set_int(&fp->f_flag, FASYNC); 823 else 824 atomic_clear_int(&fp->f_flag, FASYNC); 825 data = (void *)&tmp; 826 break; 827 } 828 829 error = fo_ioctl(fp, com, data, td->td_ucred, td); 830 out: 831 switch (locked) { 832 case LA_XLOCKED: 833 FILEDESC_XUNLOCK(fdp); 834 break; 835 #ifdef CAPABILITIES 836 case LA_SLOCKED: 837 FILEDESC_SUNLOCK(fdp); 838 break; 839 #endif 840 default: 841 FILEDESC_UNLOCK_ASSERT(fdp); 842 break; 843 } 844 if (fp != NULL) 845 fdrop(fp, td); 846 return (error); 847 } 848 849 int 850 poll_no_poll(int events) 851 { 852 /* 853 * Return true for read/write. If the user asked for something 854 * special, return POLLNVAL, so that clients have a way of 855 * determining reliably whether or not the extended 856 * functionality is present without hard-coding knowledge 857 * of specific filesystem implementations. 858 */ 859 if (events & ~POLLSTANDARD) 860 return (POLLNVAL); 861 862 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 863 } 864 865 int 866 sys_pselect(struct thread *td, struct pselect_args *uap) 867 { 868 struct timespec ts; 869 struct timeval tv, *tvp; 870 sigset_t set, *uset; 871 int error; 872 873 if (uap->ts != NULL) { 874 error = copyin(uap->ts, &ts, sizeof(ts)); 875 if (error != 0) 876 return (error); 877 TIMESPEC_TO_TIMEVAL(&tv, &ts); 878 tvp = &tv; 879 } else 880 tvp = NULL; 881 if (uap->sm != NULL) { 882 error = copyin(uap->sm, &set, sizeof(set)); 883 if (error != 0) 884 return (error); 885 uset = &set; 886 } else 887 uset = NULL; 888 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, 889 uset, NFDBITS)); 890 } 891 892 int 893 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex, 894 struct timeval *tvp, sigset_t *uset, int abi_nfdbits) 895 { 896 int error; 897 898 if (uset != NULL) { 899 error = kern_sigprocmask(td, SIG_SETMASK, uset, 900 &td->td_oldsigmask, 0); 901 if (error != 0) 902 return (error); 903 td->td_pflags |= TDP_OLDMASK; 904 /* 905 * Make sure that ast() is called on return to 906 * usermode and TDP_OLDMASK is cleared, restoring old 907 * sigmask. 908 */ 909 thread_lock(td); 910 td->td_flags |= TDF_ASTPENDING; 911 thread_unlock(td); 912 } 913 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits); 914 return (error); 915 } 916 917 #ifndef _SYS_SYSPROTO_H_ 918 struct select_args { 919 int nd; 920 fd_set *in, *ou, *ex; 921 struct timeval *tv; 922 }; 923 #endif 924 int 925 sys_select(struct thread *td, struct select_args *uap) 926 { 927 struct timeval tv, *tvp; 928 int error; 929 930 if (uap->tv != NULL) { 931 error = copyin(uap->tv, &tv, sizeof(tv)); 932 if (error) 933 return (error); 934 tvp = &tv; 935 } else 936 tvp = NULL; 937 938 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, 939 NFDBITS)); 940 } 941 942 /* 943 * In the unlikely case when user specified n greater then the last 944 * open file descriptor, check that no bits are set after the last 945 * valid fd. We must return EBADF if any is set. 946 * 947 * There are applications that rely on the behaviour. 948 * 949 * nd is fd_lastfile + 1. 950 */ 951 static int 952 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits) 953 { 954 char *addr, *oaddr; 955 int b, i, res; 956 uint8_t bits; 957 958 if (nd >= ndu || fd_in == NULL) 959 return (0); 960 961 oaddr = NULL; 962 bits = 0; /* silence gcc */ 963 for (i = nd; i < ndu; i++) { 964 b = i / NBBY; 965 #if BYTE_ORDER == LITTLE_ENDIAN 966 addr = (char *)fd_in + b; 967 #else 968 addr = (char *)fd_in; 969 if (abi_nfdbits == NFDBITS) { 970 addr += rounddown(b, sizeof(fd_mask)) + 971 sizeof(fd_mask) - 1 - b % sizeof(fd_mask); 972 } else { 973 addr += rounddown(b, sizeof(uint32_t)) + 974 sizeof(uint32_t) - 1 - b % sizeof(uint32_t); 975 } 976 #endif 977 if (addr != oaddr) { 978 res = fubyte(addr); 979 if (res == -1) 980 return (EFAULT); 981 oaddr = addr; 982 bits = res; 983 } 984 if ((bits & (1 << (i % NBBY))) != 0) 985 return (EBADF); 986 } 987 return (0); 988 } 989 990 int 991 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, 992 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits) 993 { 994 struct filedesc *fdp; 995 /* 996 * The magic 2048 here is chosen to be just enough for FD_SETSIZE 997 * infds with the new FD_SETSIZE of 1024, and more than enough for 998 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE 999 * of 256. 1000 */ 1001 fd_mask s_selbits[howmany(2048, NFDBITS)]; 1002 fd_mask *ibits[3], *obits[3], *selbits, *sbp; 1003 struct timeval rtv; 1004 sbintime_t asbt, precision, rsbt; 1005 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits; 1006 int error, lf, ndu; 1007 1008 if (nd < 0) 1009 return (EINVAL); 1010 fdp = td->td_proc->p_fd; 1011 ndu = nd; 1012 lf = fdp->fd_lastfile; 1013 if (nd > lf + 1) 1014 nd = lf + 1; 1015 1016 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits); 1017 if (error != 0) 1018 return (error); 1019 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits); 1020 if (error != 0) 1021 return (error); 1022 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits); 1023 if (error != 0) 1024 return (error); 1025 1026 /* 1027 * Allocate just enough bits for the non-null fd_sets. Use the 1028 * preallocated auto buffer if possible. 1029 */ 1030 nfdbits = roundup(nd, NFDBITS); 1031 ncpbytes = nfdbits / NBBY; 1032 ncpubytes = roundup(nd, abi_nfdbits) / NBBY; 1033 nbufbytes = 0; 1034 if (fd_in != NULL) 1035 nbufbytes += 2 * ncpbytes; 1036 if (fd_ou != NULL) 1037 nbufbytes += 2 * ncpbytes; 1038 if (fd_ex != NULL) 1039 nbufbytes += 2 * ncpbytes; 1040 if (nbufbytes <= sizeof s_selbits) 1041 selbits = &s_selbits[0]; 1042 else 1043 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK); 1044 1045 /* 1046 * Assign pointers into the bit buffers and fetch the input bits. 1047 * Put the output buffers together so that they can be bzeroed 1048 * together. 1049 */ 1050 sbp = selbits; 1051 #define getbits(name, x) \ 1052 do { \ 1053 if (name == NULL) { \ 1054 ibits[x] = NULL; \ 1055 obits[x] = NULL; \ 1056 } else { \ 1057 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \ 1058 obits[x] = sbp; \ 1059 sbp += ncpbytes / sizeof *sbp; \ 1060 error = copyin(name, ibits[x], ncpubytes); \ 1061 if (error != 0) \ 1062 goto done; \ 1063 bzero((char *)ibits[x] + ncpubytes, \ 1064 ncpbytes - ncpubytes); \ 1065 } \ 1066 } while (0) 1067 getbits(fd_in, 0); 1068 getbits(fd_ou, 1); 1069 getbits(fd_ex, 2); 1070 #undef getbits 1071 1072 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__) 1073 /* 1074 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS, 1075 * we are running under 32-bit emulation. This should be more 1076 * generic. 1077 */ 1078 #define swizzle_fdset(bits) \ 1079 if (abi_nfdbits != NFDBITS && bits != NULL) { \ 1080 int i; \ 1081 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \ 1082 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \ 1083 } 1084 #else 1085 #define swizzle_fdset(bits) 1086 #endif 1087 1088 /* Make sure the bit order makes it through an ABI transition */ 1089 swizzle_fdset(ibits[0]); 1090 swizzle_fdset(ibits[1]); 1091 swizzle_fdset(ibits[2]); 1092 1093 if (nbufbytes != 0) 1094 bzero(selbits, nbufbytes / 2); 1095 1096 precision = 0; 1097 if (tvp != NULL) { 1098 rtv = *tvp; 1099 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 || 1100 rtv.tv_usec >= 1000000) { 1101 error = EINVAL; 1102 goto done; 1103 } 1104 if (!timevalisset(&rtv)) 1105 asbt = 0; 1106 else if (rtv.tv_sec <= INT32_MAX) { 1107 rsbt = tvtosbt(rtv); 1108 precision = rsbt; 1109 precision >>= tc_precexp; 1110 if (TIMESEL(&asbt, rsbt)) 1111 asbt += tc_tick_sbt; 1112 if (asbt <= SBT_MAX - rsbt) 1113 asbt += rsbt; 1114 else 1115 asbt = -1; 1116 } else 1117 asbt = -1; 1118 } else 1119 asbt = -1; 1120 seltdinit(td); 1121 /* Iterate until the timeout expires or descriptors become ready. */ 1122 for (;;) { 1123 error = selscan(td, ibits, obits, nd); 1124 if (error || td->td_retval[0] != 0) 1125 break; 1126 error = seltdwait(td, asbt, precision); 1127 if (error) 1128 break; 1129 error = selrescan(td, ibits, obits); 1130 if (error || td->td_retval[0] != 0) 1131 break; 1132 } 1133 seltdclear(td); 1134 1135 done: 1136 /* select is not restarted after signals... */ 1137 if (error == ERESTART) 1138 error = EINTR; 1139 if (error == EWOULDBLOCK) 1140 error = 0; 1141 1142 /* swizzle bit order back, if necessary */ 1143 swizzle_fdset(obits[0]); 1144 swizzle_fdset(obits[1]); 1145 swizzle_fdset(obits[2]); 1146 #undef swizzle_fdset 1147 1148 #define putbits(name, x) \ 1149 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \ 1150 error = error2; 1151 if (error == 0) { 1152 int error2; 1153 1154 putbits(fd_in, 0); 1155 putbits(fd_ou, 1); 1156 putbits(fd_ex, 2); 1157 #undef putbits 1158 } 1159 if (selbits != &s_selbits[0]) 1160 free(selbits, M_SELECT); 1161 1162 return (error); 1163 } 1164 /* 1165 * Convert a select bit set to poll flags. 1166 * 1167 * The backend always returns POLLHUP/POLLERR if appropriate and we 1168 * return this as a set bit in any set. 1169 */ 1170 static int select_flags[3] = { 1171 POLLRDNORM | POLLHUP | POLLERR, 1172 POLLWRNORM | POLLHUP | POLLERR, 1173 POLLRDBAND | POLLERR 1174 }; 1175 1176 /* 1177 * Compute the fo_poll flags required for a fd given by the index and 1178 * bit position in the fd_mask array. 1179 */ 1180 static __inline int 1181 selflags(fd_mask **ibits, int idx, fd_mask bit) 1182 { 1183 int flags; 1184 int msk; 1185 1186 flags = 0; 1187 for (msk = 0; msk < 3; msk++) { 1188 if (ibits[msk] == NULL) 1189 continue; 1190 if ((ibits[msk][idx] & bit) == 0) 1191 continue; 1192 flags |= select_flags[msk]; 1193 } 1194 return (flags); 1195 } 1196 1197 /* 1198 * Set the appropriate output bits given a mask of fired events and the 1199 * input bits originally requested. 1200 */ 1201 static __inline int 1202 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events) 1203 { 1204 int msk; 1205 int n; 1206 1207 n = 0; 1208 for (msk = 0; msk < 3; msk++) { 1209 if ((events & select_flags[msk]) == 0) 1210 continue; 1211 if (ibits[msk] == NULL) 1212 continue; 1213 if ((ibits[msk][idx] & bit) == 0) 1214 continue; 1215 /* 1216 * XXX Check for a duplicate set. This can occur because a 1217 * socket calls selrecord() twice for each poll() call 1218 * resulting in two selfds per real fd. selrescan() will 1219 * call selsetbits twice as a result. 1220 */ 1221 if ((obits[msk][idx] & bit) != 0) 1222 continue; 1223 obits[msk][idx] |= bit; 1224 n++; 1225 } 1226 1227 return (n); 1228 } 1229 1230 static __inline int 1231 getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp) 1232 { 1233 1234 return (fget_unlocked(fdp, fd, &cap_event_rights, fpp, NULL)); 1235 } 1236 1237 /* 1238 * Traverse the list of fds attached to this thread's seltd and check for 1239 * completion. 1240 */ 1241 static int 1242 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits) 1243 { 1244 struct filedesc *fdp; 1245 struct selinfo *si; 1246 struct seltd *stp; 1247 struct selfd *sfp; 1248 struct selfd *sfn; 1249 struct file *fp; 1250 fd_mask bit; 1251 int fd, ev, n, idx; 1252 int error; 1253 1254 fdp = td->td_proc->p_fd; 1255 stp = td->td_sel; 1256 n = 0; 1257 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1258 fd = (int)(uintptr_t)sfp->sf_cookie; 1259 si = sfp->sf_si; 1260 selfdfree(stp, sfp); 1261 /* If the selinfo wasn't cleared the event didn't fire. */ 1262 if (si != NULL) 1263 continue; 1264 error = getselfd_cap(fdp, fd, &fp); 1265 if (error) 1266 return (error); 1267 idx = fd / NFDBITS; 1268 bit = (fd_mask)1 << (fd % NFDBITS); 1269 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td); 1270 fdrop(fp, td); 1271 if (ev != 0) 1272 n += selsetbits(ibits, obits, idx, bit, ev); 1273 } 1274 stp->st_flags = 0; 1275 td->td_retval[0] = n; 1276 return (0); 1277 } 1278 1279 /* 1280 * Perform the initial filedescriptor scan and register ourselves with 1281 * each selinfo. 1282 */ 1283 static int 1284 selscan(td, ibits, obits, nfd) 1285 struct thread *td; 1286 fd_mask **ibits, **obits; 1287 int nfd; 1288 { 1289 struct filedesc *fdp; 1290 struct file *fp; 1291 fd_mask bit; 1292 int ev, flags, end, fd; 1293 int n, idx; 1294 int error; 1295 1296 fdp = td->td_proc->p_fd; 1297 n = 0; 1298 for (idx = 0, fd = 0; fd < nfd; idx++) { 1299 end = imin(fd + NFDBITS, nfd); 1300 for (bit = 1; fd < end; bit <<= 1, fd++) { 1301 /* Compute the list of events we're interested in. */ 1302 flags = selflags(ibits, idx, bit); 1303 if (flags == 0) 1304 continue; 1305 error = getselfd_cap(fdp, fd, &fp); 1306 if (error) 1307 return (error); 1308 selfdalloc(td, (void *)(uintptr_t)fd); 1309 ev = fo_poll(fp, flags, td->td_ucred, td); 1310 fdrop(fp, td); 1311 if (ev != 0) 1312 n += selsetbits(ibits, obits, idx, bit, ev); 1313 } 1314 } 1315 1316 td->td_retval[0] = n; 1317 return (0); 1318 } 1319 1320 int 1321 sys_poll(struct thread *td, struct poll_args *uap) 1322 { 1323 struct timespec ts, *tsp; 1324 1325 if (uap->timeout != INFTIM) { 1326 if (uap->timeout < 0) 1327 return (EINVAL); 1328 ts.tv_sec = uap->timeout / 1000; 1329 ts.tv_nsec = (uap->timeout % 1000) * 1000000; 1330 tsp = &ts; 1331 } else 1332 tsp = NULL; 1333 1334 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL)); 1335 } 1336 1337 int 1338 kern_poll(struct thread *td, struct pollfd *fds, u_int nfds, 1339 struct timespec *tsp, sigset_t *uset) 1340 { 1341 struct pollfd *bits; 1342 struct pollfd smallbits[32]; 1343 sbintime_t sbt, precision, tmp; 1344 time_t over; 1345 struct timespec ts; 1346 int error; 1347 size_t ni; 1348 1349 precision = 0; 1350 if (tsp != NULL) { 1351 if (tsp->tv_sec < 0) 1352 return (EINVAL); 1353 if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000) 1354 return (EINVAL); 1355 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 1356 sbt = 0; 1357 else { 1358 ts = *tsp; 1359 if (ts.tv_sec > INT32_MAX / 2) { 1360 over = ts.tv_sec - INT32_MAX / 2; 1361 ts.tv_sec -= over; 1362 } else 1363 over = 0; 1364 tmp = tstosbt(ts); 1365 precision = tmp; 1366 precision >>= tc_precexp; 1367 if (TIMESEL(&sbt, tmp)) 1368 sbt += tc_tick_sbt; 1369 sbt += tmp; 1370 } 1371 } else 1372 sbt = -1; 1373 1374 if (nfds > maxfilesperproc && nfds > FD_SETSIZE) 1375 return (EINVAL); 1376 ni = nfds * sizeof(struct pollfd); 1377 if (ni > sizeof(smallbits)) 1378 bits = malloc(ni, M_TEMP, M_WAITOK); 1379 else 1380 bits = smallbits; 1381 error = copyin(fds, bits, ni); 1382 if (error) 1383 goto done; 1384 1385 if (uset != NULL) { 1386 error = kern_sigprocmask(td, SIG_SETMASK, uset, 1387 &td->td_oldsigmask, 0); 1388 if (error) 1389 goto done; 1390 td->td_pflags |= TDP_OLDMASK; 1391 /* 1392 * Make sure that ast() is called on return to 1393 * usermode and TDP_OLDMASK is cleared, restoring old 1394 * sigmask. 1395 */ 1396 thread_lock(td); 1397 td->td_flags |= TDF_ASTPENDING; 1398 thread_unlock(td); 1399 } 1400 1401 seltdinit(td); 1402 /* Iterate until the timeout expires or descriptors become ready. */ 1403 for (;;) { 1404 error = pollscan(td, bits, nfds); 1405 if (error || td->td_retval[0] != 0) 1406 break; 1407 error = seltdwait(td, sbt, precision); 1408 if (error) 1409 break; 1410 error = pollrescan(td); 1411 if (error || td->td_retval[0] != 0) 1412 break; 1413 } 1414 seltdclear(td); 1415 1416 done: 1417 /* poll is not restarted after signals... */ 1418 if (error == ERESTART) 1419 error = EINTR; 1420 if (error == EWOULDBLOCK) 1421 error = 0; 1422 if (error == 0) { 1423 error = pollout(td, bits, fds, nfds); 1424 if (error) 1425 goto out; 1426 } 1427 out: 1428 if (ni > sizeof(smallbits)) 1429 free(bits, M_TEMP); 1430 return (error); 1431 } 1432 1433 int 1434 sys_ppoll(struct thread *td, struct ppoll_args *uap) 1435 { 1436 struct timespec ts, *tsp; 1437 sigset_t set, *ssp; 1438 int error; 1439 1440 if (uap->ts != NULL) { 1441 error = copyin(uap->ts, &ts, sizeof(ts)); 1442 if (error) 1443 return (error); 1444 tsp = &ts; 1445 } else 1446 tsp = NULL; 1447 if (uap->set != NULL) { 1448 error = copyin(uap->set, &set, sizeof(set)); 1449 if (error) 1450 return (error); 1451 ssp = &set; 1452 } else 1453 ssp = NULL; 1454 /* 1455 * fds is still a pointer to user space. kern_poll() will 1456 * take care of copyin that array to the kernel space. 1457 */ 1458 1459 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp)); 1460 } 1461 1462 static int 1463 pollrescan(struct thread *td) 1464 { 1465 struct seltd *stp; 1466 struct selfd *sfp; 1467 struct selfd *sfn; 1468 struct selinfo *si; 1469 struct filedesc *fdp; 1470 struct file *fp; 1471 struct pollfd *fd; 1472 int n; 1473 1474 n = 0; 1475 fdp = td->td_proc->p_fd; 1476 stp = td->td_sel; 1477 FILEDESC_SLOCK(fdp); 1478 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1479 fd = (struct pollfd *)sfp->sf_cookie; 1480 si = sfp->sf_si; 1481 selfdfree(stp, sfp); 1482 /* If the selinfo wasn't cleared the event didn't fire. */ 1483 if (si != NULL) 1484 continue; 1485 fp = fdp->fd_ofiles[fd->fd].fde_file; 1486 #ifdef CAPABILITIES 1487 if (fp == NULL || 1488 cap_check(cap_rights(fdp, fd->fd), &cap_event_rights) != 0) 1489 #else 1490 if (fp == NULL) 1491 #endif 1492 { 1493 fd->revents = POLLNVAL; 1494 n++; 1495 continue; 1496 } 1497 1498 /* 1499 * Note: backend also returns POLLHUP and 1500 * POLLERR if appropriate. 1501 */ 1502 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td); 1503 if (fd->revents != 0) 1504 n++; 1505 } 1506 FILEDESC_SUNLOCK(fdp); 1507 stp->st_flags = 0; 1508 td->td_retval[0] = n; 1509 return (0); 1510 } 1511 1512 1513 static int 1514 pollout(td, fds, ufds, nfd) 1515 struct thread *td; 1516 struct pollfd *fds; 1517 struct pollfd *ufds; 1518 u_int nfd; 1519 { 1520 int error = 0; 1521 u_int i = 0; 1522 u_int n = 0; 1523 1524 for (i = 0; i < nfd; i++) { 1525 error = copyout(&fds->revents, &ufds->revents, 1526 sizeof(ufds->revents)); 1527 if (error) 1528 return (error); 1529 if (fds->revents != 0) 1530 n++; 1531 fds++; 1532 ufds++; 1533 } 1534 td->td_retval[0] = n; 1535 return (0); 1536 } 1537 1538 static int 1539 pollscan(td, fds, nfd) 1540 struct thread *td; 1541 struct pollfd *fds; 1542 u_int nfd; 1543 { 1544 struct filedesc *fdp = td->td_proc->p_fd; 1545 struct file *fp; 1546 int i, n = 0; 1547 1548 FILEDESC_SLOCK(fdp); 1549 for (i = 0; i < nfd; i++, fds++) { 1550 if (fds->fd > fdp->fd_lastfile) { 1551 fds->revents = POLLNVAL; 1552 n++; 1553 } else if (fds->fd < 0) { 1554 fds->revents = 0; 1555 } else { 1556 fp = fdp->fd_ofiles[fds->fd].fde_file; 1557 #ifdef CAPABILITIES 1558 if (fp == NULL || 1559 cap_check(cap_rights(fdp, fds->fd), &cap_event_rights) != 0) 1560 #else 1561 if (fp == NULL) 1562 #endif 1563 { 1564 fds->revents = POLLNVAL; 1565 n++; 1566 } else { 1567 /* 1568 * Note: backend also returns POLLHUP and 1569 * POLLERR if appropriate. 1570 */ 1571 selfdalloc(td, fds); 1572 fds->revents = fo_poll(fp, fds->events, 1573 td->td_ucred, td); 1574 /* 1575 * POSIX requires POLLOUT to be never 1576 * set simultaneously with POLLHUP. 1577 */ 1578 if ((fds->revents & POLLHUP) != 0) 1579 fds->revents &= ~POLLOUT; 1580 1581 if (fds->revents != 0) 1582 n++; 1583 } 1584 } 1585 } 1586 FILEDESC_SUNLOCK(fdp); 1587 td->td_retval[0] = n; 1588 return (0); 1589 } 1590 1591 /* 1592 * XXX This was created specifically to support netncp and netsmb. This 1593 * allows the caller to specify a socket to wait for events on. It returns 1594 * 0 if any events matched and an error otherwise. There is no way to 1595 * determine which events fired. 1596 */ 1597 int 1598 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td) 1599 { 1600 struct timeval rtv; 1601 sbintime_t asbt, precision, rsbt; 1602 int error; 1603 1604 precision = 0; /* stupid gcc! */ 1605 if (tvp != NULL) { 1606 rtv = *tvp; 1607 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 || 1608 rtv.tv_usec >= 1000000) 1609 return (EINVAL); 1610 if (!timevalisset(&rtv)) 1611 asbt = 0; 1612 else if (rtv.tv_sec <= INT32_MAX) { 1613 rsbt = tvtosbt(rtv); 1614 precision = rsbt; 1615 precision >>= tc_precexp; 1616 if (TIMESEL(&asbt, rsbt)) 1617 asbt += tc_tick_sbt; 1618 if (asbt <= SBT_MAX - rsbt) 1619 asbt += rsbt; 1620 else 1621 asbt = -1; 1622 } else 1623 asbt = -1; 1624 } else 1625 asbt = -1; 1626 seltdinit(td); 1627 /* 1628 * Iterate until the timeout expires or the socket becomes ready. 1629 */ 1630 for (;;) { 1631 selfdalloc(td, NULL); 1632 error = sopoll(so, events, NULL, td); 1633 /* error here is actually the ready events. */ 1634 if (error) 1635 return (0); 1636 error = seltdwait(td, asbt, precision); 1637 if (error) 1638 break; 1639 } 1640 seltdclear(td); 1641 /* XXX Duplicates ncp/smb behavior. */ 1642 if (error == ERESTART) 1643 error = 0; 1644 return (error); 1645 } 1646 1647 /* 1648 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines 1649 * have two select sets, one for read and another for write. 1650 */ 1651 static void 1652 selfdalloc(struct thread *td, void *cookie) 1653 { 1654 struct seltd *stp; 1655 1656 stp = td->td_sel; 1657 if (stp->st_free1 == NULL) 1658 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO); 1659 stp->st_free1->sf_td = stp; 1660 stp->st_free1->sf_cookie = cookie; 1661 if (stp->st_free2 == NULL) 1662 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO); 1663 stp->st_free2->sf_td = stp; 1664 stp->st_free2->sf_cookie = cookie; 1665 } 1666 1667 static void 1668 selfdfree(struct seltd *stp, struct selfd *sfp) 1669 { 1670 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link); 1671 if (sfp->sf_si != NULL) { 1672 mtx_lock(sfp->sf_mtx); 1673 if (sfp->sf_si != NULL) { 1674 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads); 1675 refcount_release(&sfp->sf_refs); 1676 } 1677 mtx_unlock(sfp->sf_mtx); 1678 } 1679 if (refcount_release(&sfp->sf_refs)) 1680 uma_zfree(selfd_zone, sfp); 1681 } 1682 1683 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */ 1684 void 1685 seldrain(sip) 1686 struct selinfo *sip; 1687 { 1688 1689 /* 1690 * This feature is already provided by doselwakeup(), thus it is 1691 * enough to go for it. 1692 * Eventually, the context, should take care to avoid races 1693 * between thread calling select()/poll() and file descriptor 1694 * detaching, but, again, the races are just the same as 1695 * selwakeup(). 1696 */ 1697 doselwakeup(sip, -1); 1698 } 1699 1700 /* 1701 * Record a select request. 1702 */ 1703 void 1704 selrecord(selector, sip) 1705 struct thread *selector; 1706 struct selinfo *sip; 1707 { 1708 struct selfd *sfp; 1709 struct seltd *stp; 1710 struct mtx *mtxp; 1711 1712 stp = selector->td_sel; 1713 /* 1714 * Don't record when doing a rescan. 1715 */ 1716 if (stp->st_flags & SELTD_RESCAN) 1717 return; 1718 /* 1719 * Grab one of the preallocated descriptors. 1720 */ 1721 sfp = NULL; 1722 if ((sfp = stp->st_free1) != NULL) 1723 stp->st_free1 = NULL; 1724 else if ((sfp = stp->st_free2) != NULL) 1725 stp->st_free2 = NULL; 1726 else 1727 panic("selrecord: No free selfd on selq"); 1728 mtxp = sip->si_mtx; 1729 if (mtxp == NULL) 1730 mtxp = mtx_pool_find(mtxpool_select, sip); 1731 /* 1732 * Initialize the sfp and queue it in the thread. 1733 */ 1734 sfp->sf_si = sip; 1735 sfp->sf_mtx = mtxp; 1736 refcount_init(&sfp->sf_refs, 2); 1737 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link); 1738 /* 1739 * Now that we've locked the sip, check for initialization. 1740 */ 1741 mtx_lock(mtxp); 1742 if (sip->si_mtx == NULL) { 1743 sip->si_mtx = mtxp; 1744 TAILQ_INIT(&sip->si_tdlist); 1745 } 1746 /* 1747 * Add this thread to the list of selfds listening on this selinfo. 1748 */ 1749 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads); 1750 mtx_unlock(sip->si_mtx); 1751 } 1752 1753 /* Wake up a selecting thread. */ 1754 void 1755 selwakeup(sip) 1756 struct selinfo *sip; 1757 { 1758 doselwakeup(sip, -1); 1759 } 1760 1761 /* Wake up a selecting thread, and set its priority. */ 1762 void 1763 selwakeuppri(sip, pri) 1764 struct selinfo *sip; 1765 int pri; 1766 { 1767 doselwakeup(sip, pri); 1768 } 1769 1770 /* 1771 * Do a wakeup when a selectable event occurs. 1772 */ 1773 static void 1774 doselwakeup(sip, pri) 1775 struct selinfo *sip; 1776 int pri; 1777 { 1778 struct selfd *sfp; 1779 struct selfd *sfn; 1780 struct seltd *stp; 1781 1782 /* If it's not initialized there can't be any waiters. */ 1783 if (sip->si_mtx == NULL) 1784 return; 1785 /* 1786 * Locking the selinfo locks all selfds associated with it. 1787 */ 1788 mtx_lock(sip->si_mtx); 1789 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) { 1790 /* 1791 * Once we remove this sfp from the list and clear the 1792 * sf_si seltdclear will know to ignore this si. 1793 */ 1794 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads); 1795 sfp->sf_si = NULL; 1796 stp = sfp->sf_td; 1797 mtx_lock(&stp->st_mtx); 1798 stp->st_flags |= SELTD_PENDING; 1799 cv_broadcastpri(&stp->st_wait, pri); 1800 mtx_unlock(&stp->st_mtx); 1801 if (refcount_release(&sfp->sf_refs)) 1802 uma_zfree(selfd_zone, sfp); 1803 } 1804 mtx_unlock(sip->si_mtx); 1805 } 1806 1807 static void 1808 seltdinit(struct thread *td) 1809 { 1810 struct seltd *stp; 1811 1812 if ((stp = td->td_sel) != NULL) 1813 goto out; 1814 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO); 1815 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF); 1816 cv_init(&stp->st_wait, "select"); 1817 out: 1818 stp->st_flags = 0; 1819 STAILQ_INIT(&stp->st_selq); 1820 } 1821 1822 static int 1823 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision) 1824 { 1825 struct seltd *stp; 1826 int error; 1827 1828 stp = td->td_sel; 1829 /* 1830 * An event of interest may occur while we do not hold the seltd 1831 * locked so check the pending flag before we sleep. 1832 */ 1833 mtx_lock(&stp->st_mtx); 1834 /* 1835 * Any further calls to selrecord will be a rescan. 1836 */ 1837 stp->st_flags |= SELTD_RESCAN; 1838 if (stp->st_flags & SELTD_PENDING) { 1839 mtx_unlock(&stp->st_mtx); 1840 return (0); 1841 } 1842 if (sbt == 0) 1843 error = EWOULDBLOCK; 1844 else if (sbt != -1) 1845 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx, 1846 sbt, precision, C_ABSOLUTE); 1847 else 1848 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx); 1849 mtx_unlock(&stp->st_mtx); 1850 1851 return (error); 1852 } 1853 1854 void 1855 seltdfini(struct thread *td) 1856 { 1857 struct seltd *stp; 1858 1859 stp = td->td_sel; 1860 if (stp == NULL) 1861 return; 1862 if (stp->st_free1) 1863 uma_zfree(selfd_zone, stp->st_free1); 1864 if (stp->st_free2) 1865 uma_zfree(selfd_zone, stp->st_free2); 1866 td->td_sel = NULL; 1867 cv_destroy(&stp->st_wait); 1868 mtx_destroy(&stp->st_mtx); 1869 free(stp, M_SELECT); 1870 } 1871 1872 /* 1873 * Remove the references to the thread from all of the objects we were 1874 * polling. 1875 */ 1876 static void 1877 seltdclear(struct thread *td) 1878 { 1879 struct seltd *stp; 1880 struct selfd *sfp; 1881 struct selfd *sfn; 1882 1883 stp = td->td_sel; 1884 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) 1885 selfdfree(stp, sfp); 1886 stp->st_flags = 0; 1887 } 1888 1889 static void selectinit(void *); 1890 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL); 1891 static void 1892 selectinit(void *dummy __unused) 1893 { 1894 1895 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL, 1896 NULL, NULL, UMA_ALIGN_PTR, 0); 1897 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF); 1898 } 1899 1900 /* 1901 * Set up a syscall return value that follows the convention specified for 1902 * posix_* functions. 1903 */ 1904 int 1905 kern_posix_error(struct thread *td, int error) 1906 { 1907 1908 if (error <= 0) 1909 return (error); 1910 td->td_errno = error; 1911 td->td_pflags |= TDP_NERRNO; 1912 td->td_retval[0] = error; 1913 return (0); 1914 } 1915