1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_capsicum.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sysproto.h> 48 #include <sys/capsicum.h> 49 #include <sys/filedesc.h> 50 #include <sys/filio.h> 51 #include <sys/fcntl.h> 52 #include <sys/file.h> 53 #include <sys/lock.h> 54 #include <sys/proc.h> 55 #include <sys/signalvar.h> 56 #include <sys/socketvar.h> 57 #include <sys/uio.h> 58 #include <sys/eventfd.h> 59 #include <sys/kernel.h> 60 #include <sys/ktr.h> 61 #include <sys/limits.h> 62 #include <sys/malloc.h> 63 #include <sys/poll.h> 64 #include <sys/resourcevar.h> 65 #include <sys/selinfo.h> 66 #include <sys/sleepqueue.h> 67 #include <sys/specialfd.h> 68 #include <sys/syscallsubr.h> 69 #include <sys/sysctl.h> 70 #include <sys/sysent.h> 71 #include <sys/vnode.h> 72 #include <sys/bio.h> 73 #include <sys/buf.h> 74 #include <sys/condvar.h> 75 #ifdef KTRACE 76 #include <sys/ktrace.h> 77 #endif 78 79 #include <security/audit/audit.h> 80 81 /* 82 * The following macro defines how many bytes will be allocated from 83 * the stack instead of memory allocated when passing the IOCTL data 84 * structures from userspace and to the kernel. Some IOCTLs having 85 * small data structures are used very frequently and this small 86 * buffer on the stack gives a significant speedup improvement for 87 * those requests. The value of this define should be greater or equal 88 * to 64 bytes and should also be power of two. The data structure is 89 * currently hard-aligned to a 8-byte boundary on the stack. This 90 * should currently be sufficient for all supported platforms. 91 */ 92 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */ 93 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */ 94 95 #ifdef __LP64__ 96 static int iosize_max_clamp = 0; 97 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW, 98 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX"); 99 static int devfs_iosize_max_clamp = 1; 100 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW, 101 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices"); 102 #endif 103 104 /* 105 * Assert that the return value of read(2) and write(2) syscalls fits 106 * into a register. If not, an architecture will need to provide the 107 * usermode wrappers to reconstruct the result. 108 */ 109 CTASSERT(sizeof(register_t) >= sizeof(size_t)); 110 111 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 112 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 113 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 114 115 static int pollout(struct thread *, struct pollfd *, struct pollfd *, 116 u_int); 117 static int pollscan(struct thread *, struct pollfd *, u_int); 118 static int pollrescan(struct thread *); 119 static int selscan(struct thread *, fd_mask **, fd_mask **, int); 120 static int selrescan(struct thread *, fd_mask **, fd_mask **); 121 static void selfdalloc(struct thread *, void *); 122 static void selfdfree(struct seltd *, struct selfd *); 123 static int dofileread(struct thread *, int, struct file *, struct uio *, 124 off_t, int); 125 static int dofilewrite(struct thread *, int, struct file *, struct uio *, 126 off_t, int); 127 static void doselwakeup(struct selinfo *, int); 128 static void seltdinit(struct thread *); 129 static int seltdwait(struct thread *, sbintime_t, sbintime_t); 130 static void seltdclear(struct thread *); 131 132 /* 133 * One seltd per-thread allocated on demand as needed. 134 * 135 * t - protected by st_mtx 136 * k - Only accessed by curthread or read-only 137 */ 138 struct seltd { 139 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */ 140 struct selfd *st_free1; /* (k) free fd for read set. */ 141 struct selfd *st_free2; /* (k) free fd for write set. */ 142 struct mtx st_mtx; /* Protects struct seltd */ 143 struct cv st_wait; /* (t) Wait channel. */ 144 int st_flags; /* (t) SELTD_ flags. */ 145 }; 146 147 #define SELTD_PENDING 0x0001 /* We have pending events. */ 148 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */ 149 150 /* 151 * One selfd allocated per-thread per-file-descriptor. 152 * f - protected by sf_mtx 153 */ 154 struct selfd { 155 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */ 156 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */ 157 struct selinfo *sf_si; /* (f) selinfo when linked. */ 158 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */ 159 struct seltd *sf_td; /* (k) owning seltd. */ 160 void *sf_cookie; /* (k) fd or pollfd. */ 161 }; 162 163 MALLOC_DEFINE(M_SELFD, "selfd", "selfd"); 164 static struct mtx_pool *mtxpool_select; 165 166 #ifdef __LP64__ 167 size_t 168 devfs_iosize_max(void) 169 { 170 171 return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ? 172 INT_MAX : SSIZE_MAX); 173 } 174 175 size_t 176 iosize_max(void) 177 { 178 179 return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ? 180 INT_MAX : SSIZE_MAX); 181 } 182 #endif 183 184 #ifndef _SYS_SYSPROTO_H_ 185 struct read_args { 186 int fd; 187 void *buf; 188 size_t nbyte; 189 }; 190 #endif 191 int 192 sys_read(struct thread *td, struct read_args *uap) 193 { 194 struct uio auio; 195 struct iovec aiov; 196 int error; 197 198 if (uap->nbyte > IOSIZE_MAX) 199 return (EINVAL); 200 aiov.iov_base = uap->buf; 201 aiov.iov_len = uap->nbyte; 202 auio.uio_iov = &aiov; 203 auio.uio_iovcnt = 1; 204 auio.uio_resid = uap->nbyte; 205 auio.uio_segflg = UIO_USERSPACE; 206 error = kern_readv(td, uap->fd, &auio); 207 return (error); 208 } 209 210 /* 211 * Positioned read system call 212 */ 213 #ifndef _SYS_SYSPROTO_H_ 214 struct pread_args { 215 int fd; 216 void *buf; 217 size_t nbyte; 218 int pad; 219 off_t offset; 220 }; 221 #endif 222 int 223 sys_pread(struct thread *td, struct pread_args *uap) 224 { 225 226 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 227 } 228 229 int 230 kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset) 231 { 232 struct uio auio; 233 struct iovec aiov; 234 int error; 235 236 if (nbyte > IOSIZE_MAX) 237 return (EINVAL); 238 aiov.iov_base = buf; 239 aiov.iov_len = nbyte; 240 auio.uio_iov = &aiov; 241 auio.uio_iovcnt = 1; 242 auio.uio_resid = nbyte; 243 auio.uio_segflg = UIO_USERSPACE; 244 error = kern_preadv(td, fd, &auio, offset); 245 return (error); 246 } 247 248 #if defined(COMPAT_FREEBSD6) 249 int 250 freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap) 251 { 252 253 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 254 } 255 #endif 256 257 /* 258 * Scatter read system call. 259 */ 260 #ifndef _SYS_SYSPROTO_H_ 261 struct readv_args { 262 int fd; 263 struct iovec *iovp; 264 u_int iovcnt; 265 }; 266 #endif 267 int 268 sys_readv(struct thread *td, struct readv_args *uap) 269 { 270 struct uio *auio; 271 int error; 272 273 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 274 if (error) 275 return (error); 276 error = kern_readv(td, uap->fd, auio); 277 free(auio, M_IOV); 278 return (error); 279 } 280 281 int 282 kern_readv(struct thread *td, int fd, struct uio *auio) 283 { 284 struct file *fp; 285 int error; 286 287 error = fget_read(td, fd, &cap_read_rights, &fp); 288 if (error) 289 return (error); 290 error = dofileread(td, fd, fp, auio, (off_t)-1, 0); 291 fdrop(fp, td); 292 return (error); 293 } 294 295 /* 296 * Scatter positioned read system call. 297 */ 298 #ifndef _SYS_SYSPROTO_H_ 299 struct preadv_args { 300 int fd; 301 struct iovec *iovp; 302 u_int iovcnt; 303 off_t offset; 304 }; 305 #endif 306 int 307 sys_preadv(struct thread *td, struct preadv_args *uap) 308 { 309 struct uio *auio; 310 int error; 311 312 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 313 if (error) 314 return (error); 315 error = kern_preadv(td, uap->fd, auio, uap->offset); 316 free(auio, M_IOV); 317 return (error); 318 } 319 320 int 321 kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset) 322 { 323 struct file *fp; 324 int error; 325 326 error = fget_read(td, fd, &cap_pread_rights, &fp); 327 if (error) 328 return (error); 329 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 330 error = ESPIPE; 331 else if (offset < 0 && 332 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) 333 error = EINVAL; 334 else 335 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET); 336 fdrop(fp, td); 337 return (error); 338 } 339 340 /* 341 * Common code for readv and preadv that reads data in 342 * from a file using the passed in uio, offset, and flags. 343 */ 344 static int 345 dofileread(struct thread *td, int fd, struct file *fp, struct uio *auio, 346 off_t offset, int flags) 347 { 348 ssize_t cnt; 349 int error; 350 #ifdef KTRACE 351 struct uio *ktruio = NULL; 352 #endif 353 354 AUDIT_ARG_FD(fd); 355 356 /* Finish zero length reads right here */ 357 if (auio->uio_resid == 0) { 358 td->td_retval[0] = 0; 359 return (0); 360 } 361 auio->uio_rw = UIO_READ; 362 auio->uio_offset = offset; 363 auio->uio_td = td; 364 #ifdef KTRACE 365 if (KTRPOINT(td, KTR_GENIO)) 366 ktruio = cloneuio(auio); 367 #endif 368 cnt = auio->uio_resid; 369 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) { 370 if (auio->uio_resid != cnt && (error == ERESTART || 371 error == EINTR || error == EWOULDBLOCK)) 372 error = 0; 373 } 374 cnt -= auio->uio_resid; 375 #ifdef KTRACE 376 if (ktruio != NULL) { 377 ktruio->uio_resid = cnt; 378 ktrgenio(fd, UIO_READ, ktruio, error); 379 } 380 #endif 381 td->td_retval[0] = cnt; 382 return (error); 383 } 384 385 #ifndef _SYS_SYSPROTO_H_ 386 struct write_args { 387 int fd; 388 const void *buf; 389 size_t nbyte; 390 }; 391 #endif 392 int 393 sys_write(struct thread *td, struct write_args *uap) 394 { 395 struct uio auio; 396 struct iovec aiov; 397 int error; 398 399 if (uap->nbyte > IOSIZE_MAX) 400 return (EINVAL); 401 aiov.iov_base = (void *)(uintptr_t)uap->buf; 402 aiov.iov_len = uap->nbyte; 403 auio.uio_iov = &aiov; 404 auio.uio_iovcnt = 1; 405 auio.uio_resid = uap->nbyte; 406 auio.uio_segflg = UIO_USERSPACE; 407 error = kern_writev(td, uap->fd, &auio); 408 return (error); 409 } 410 411 /* 412 * Positioned write system call. 413 */ 414 #ifndef _SYS_SYSPROTO_H_ 415 struct pwrite_args { 416 int fd; 417 const void *buf; 418 size_t nbyte; 419 int pad; 420 off_t offset; 421 }; 422 #endif 423 int 424 sys_pwrite(struct thread *td, struct pwrite_args *uap) 425 { 426 427 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 428 } 429 430 int 431 kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte, 432 off_t offset) 433 { 434 struct uio auio; 435 struct iovec aiov; 436 int error; 437 438 if (nbyte > IOSIZE_MAX) 439 return (EINVAL); 440 aiov.iov_base = (void *)(uintptr_t)buf; 441 aiov.iov_len = nbyte; 442 auio.uio_iov = &aiov; 443 auio.uio_iovcnt = 1; 444 auio.uio_resid = nbyte; 445 auio.uio_segflg = UIO_USERSPACE; 446 error = kern_pwritev(td, fd, &auio, offset); 447 return (error); 448 } 449 450 #if defined(COMPAT_FREEBSD6) 451 int 452 freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap) 453 { 454 455 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 456 } 457 #endif 458 459 /* 460 * Gather write system call. 461 */ 462 #ifndef _SYS_SYSPROTO_H_ 463 struct writev_args { 464 int fd; 465 struct iovec *iovp; 466 u_int iovcnt; 467 }; 468 #endif 469 int 470 sys_writev(struct thread *td, struct writev_args *uap) 471 { 472 struct uio *auio; 473 int error; 474 475 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 476 if (error) 477 return (error); 478 error = kern_writev(td, uap->fd, auio); 479 free(auio, M_IOV); 480 return (error); 481 } 482 483 int 484 kern_writev(struct thread *td, int fd, struct uio *auio) 485 { 486 struct file *fp; 487 int error; 488 489 error = fget_write(td, fd, &cap_write_rights, &fp); 490 if (error) 491 return (error); 492 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0); 493 fdrop(fp, td); 494 return (error); 495 } 496 497 /* 498 * Gather positioned write system call. 499 */ 500 #ifndef _SYS_SYSPROTO_H_ 501 struct pwritev_args { 502 int fd; 503 struct iovec *iovp; 504 u_int iovcnt; 505 off_t offset; 506 }; 507 #endif 508 int 509 sys_pwritev(struct thread *td, struct pwritev_args *uap) 510 { 511 struct uio *auio; 512 int error; 513 514 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 515 if (error) 516 return (error); 517 error = kern_pwritev(td, uap->fd, auio, uap->offset); 518 free(auio, M_IOV); 519 return (error); 520 } 521 522 int 523 kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset) 524 { 525 struct file *fp; 526 int error; 527 528 error = fget_write(td, fd, &cap_pwrite_rights, &fp); 529 if (error) 530 return (error); 531 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 532 error = ESPIPE; 533 else if (offset < 0 && 534 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) 535 error = EINVAL; 536 else 537 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET); 538 fdrop(fp, td); 539 return (error); 540 } 541 542 /* 543 * Common code for writev and pwritev that writes data to 544 * a file using the passed in uio, offset, and flags. 545 */ 546 static int 547 dofilewrite(struct thread *td, int fd, struct file *fp, struct uio *auio, 548 off_t offset, int flags) 549 { 550 ssize_t cnt; 551 int error; 552 #ifdef KTRACE 553 struct uio *ktruio = NULL; 554 #endif 555 556 AUDIT_ARG_FD(fd); 557 auio->uio_rw = UIO_WRITE; 558 auio->uio_td = td; 559 auio->uio_offset = offset; 560 #ifdef KTRACE 561 if (KTRPOINT(td, KTR_GENIO)) 562 ktruio = cloneuio(auio); 563 #endif 564 cnt = auio->uio_resid; 565 error = fo_write(fp, auio, td->td_ucred, flags, td); 566 /* 567 * Socket layer is responsible for special error handling, 568 * see sousrsend(). 569 */ 570 if (error != 0 && fp->f_type != DTYPE_SOCKET) { 571 if (auio->uio_resid != cnt && (error == ERESTART || 572 error == EINTR || error == EWOULDBLOCK)) 573 error = 0; 574 if (error == EPIPE) { 575 PROC_LOCK(td->td_proc); 576 tdsignal(td, SIGPIPE); 577 PROC_UNLOCK(td->td_proc); 578 } 579 } 580 cnt -= auio->uio_resid; 581 #ifdef KTRACE 582 if (ktruio != NULL) { 583 ktruio->uio_resid = cnt; 584 ktrgenio(fd, UIO_WRITE, ktruio, error); 585 } 586 #endif 587 td->td_retval[0] = cnt; 588 return (error); 589 } 590 591 /* 592 * Truncate a file given a file descriptor. 593 * 594 * Can't use fget_write() here, since must return EINVAL and not EBADF if the 595 * descriptor isn't writable. 596 */ 597 int 598 kern_ftruncate(struct thread *td, int fd, off_t length) 599 { 600 struct file *fp; 601 int error; 602 603 AUDIT_ARG_FD(fd); 604 if (length < 0) 605 return (EINVAL); 606 error = fget(td, fd, &cap_ftruncate_rights, &fp); 607 if (error) 608 return (error); 609 AUDIT_ARG_FILE(td->td_proc, fp); 610 if (!(fp->f_flag & FWRITE)) { 611 fdrop(fp, td); 612 return (EINVAL); 613 } 614 error = fo_truncate(fp, length, td->td_ucred, td); 615 fdrop(fp, td); 616 return (error); 617 } 618 619 #ifndef _SYS_SYSPROTO_H_ 620 struct ftruncate_args { 621 int fd; 622 int pad; 623 off_t length; 624 }; 625 #endif 626 int 627 sys_ftruncate(struct thread *td, struct ftruncate_args *uap) 628 { 629 630 return (kern_ftruncate(td, uap->fd, uap->length)); 631 } 632 633 #if defined(COMPAT_43) 634 #ifndef _SYS_SYSPROTO_H_ 635 struct oftruncate_args { 636 int fd; 637 long length; 638 }; 639 #endif 640 int 641 oftruncate(struct thread *td, struct oftruncate_args *uap) 642 { 643 644 return (kern_ftruncate(td, uap->fd, uap->length)); 645 } 646 #endif /* COMPAT_43 */ 647 648 #ifndef _SYS_SYSPROTO_H_ 649 struct ioctl_args { 650 int fd; 651 u_long com; 652 caddr_t data; 653 }; 654 #endif 655 /* ARGSUSED */ 656 int 657 sys_ioctl(struct thread *td, struct ioctl_args *uap) 658 { 659 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN); 660 uint32_t com; 661 int arg, error; 662 u_int size; 663 caddr_t data; 664 665 #ifdef INVARIANTS 666 if (uap->com > 0xffffffff) { 667 printf( 668 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n", 669 td->td_proc->p_pid, td->td_name, uap->com); 670 } 671 #endif 672 com = (uint32_t)uap->com; 673 674 /* 675 * Interpret high order word to find amount of data to be 676 * copied to/from the user's address space. 677 */ 678 size = IOCPARM_LEN(com); 679 if ((size > IOCPARM_MAX) || 680 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) || 681 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43) 682 ((com & IOC_OUT) && size == 0) || 683 #else 684 ((com & (IOC_IN | IOC_OUT)) && size == 0) || 685 #endif 686 ((com & IOC_VOID) && size > 0 && size != sizeof(int))) 687 return (ENOTTY); 688 689 if (size > 0) { 690 if (com & IOC_VOID) { 691 /* Integer argument. */ 692 arg = (intptr_t)uap->data; 693 data = (void *)&arg; 694 size = 0; 695 } else { 696 if (size > SYS_IOCTL_SMALL_SIZE) 697 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK); 698 else 699 data = smalldata; 700 } 701 } else 702 data = (void *)&uap->data; 703 if (com & IOC_IN) { 704 error = copyin(uap->data, data, (u_int)size); 705 if (error != 0) 706 goto out; 707 } else if (com & IOC_OUT) { 708 /* 709 * Zero the buffer so the user always 710 * gets back something deterministic. 711 */ 712 bzero(data, size); 713 } 714 715 error = kern_ioctl(td, uap->fd, com, data); 716 717 if (error == 0 && (com & IOC_OUT)) 718 error = copyout(data, uap->data, (u_int)size); 719 720 out: 721 if (size > SYS_IOCTL_SMALL_SIZE) 722 free(data, M_IOCTLOPS); 723 return (error); 724 } 725 726 int 727 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data) 728 { 729 struct file *fp; 730 struct filedesc *fdp; 731 int error, tmp, locked; 732 733 AUDIT_ARG_FD(fd); 734 AUDIT_ARG_CMD(com); 735 736 fdp = td->td_proc->p_fd; 737 738 switch (com) { 739 case FIONCLEX: 740 case FIOCLEX: 741 FILEDESC_XLOCK(fdp); 742 locked = LA_XLOCKED; 743 break; 744 default: 745 #ifdef CAPABILITIES 746 FILEDESC_SLOCK(fdp); 747 locked = LA_SLOCKED; 748 #else 749 locked = LA_UNLOCKED; 750 #endif 751 break; 752 } 753 754 #ifdef CAPABILITIES 755 if ((fp = fget_noref(fdp, fd)) == NULL) { 756 error = EBADF; 757 goto out; 758 } 759 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) { 760 fp = NULL; /* fhold() was not called yet */ 761 goto out; 762 } 763 if (!fhold(fp)) { 764 error = EBADF; 765 fp = NULL; 766 goto out; 767 } 768 if (locked == LA_SLOCKED) { 769 FILEDESC_SUNLOCK(fdp); 770 locked = LA_UNLOCKED; 771 } 772 #else 773 error = fget(td, fd, &cap_ioctl_rights, &fp); 774 if (error != 0) { 775 fp = NULL; 776 goto out; 777 } 778 #endif 779 if ((fp->f_flag & (FREAD | FWRITE)) == 0) { 780 error = EBADF; 781 goto out; 782 } 783 784 switch (com) { 785 case FIONCLEX: 786 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE; 787 goto out; 788 case FIOCLEX: 789 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE; 790 goto out; 791 case FIONBIO: 792 if ((tmp = *(int *)data)) 793 atomic_set_int(&fp->f_flag, FNONBLOCK); 794 else 795 atomic_clear_int(&fp->f_flag, FNONBLOCK); 796 data = (void *)&tmp; 797 break; 798 case FIOASYNC: 799 if ((tmp = *(int *)data)) 800 atomic_set_int(&fp->f_flag, FASYNC); 801 else 802 atomic_clear_int(&fp->f_flag, FASYNC); 803 data = (void *)&tmp; 804 break; 805 } 806 807 error = fo_ioctl(fp, com, data, td->td_ucred, td); 808 out: 809 switch (locked) { 810 case LA_XLOCKED: 811 FILEDESC_XUNLOCK(fdp); 812 break; 813 #ifdef CAPABILITIES 814 case LA_SLOCKED: 815 FILEDESC_SUNLOCK(fdp); 816 break; 817 #endif 818 default: 819 FILEDESC_UNLOCK_ASSERT(fdp); 820 break; 821 } 822 if (fp != NULL) 823 fdrop(fp, td); 824 return (error); 825 } 826 827 int 828 sys_posix_fallocate(struct thread *td, struct posix_fallocate_args *uap) 829 { 830 int error; 831 832 error = kern_posix_fallocate(td, uap->fd, uap->offset, uap->len); 833 return (kern_posix_error(td, error)); 834 } 835 836 int 837 kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len) 838 { 839 struct file *fp; 840 int error; 841 842 AUDIT_ARG_FD(fd); 843 if (offset < 0 || len <= 0) 844 return (EINVAL); 845 /* Check for wrap. */ 846 if (offset > OFF_MAX - len) 847 return (EFBIG); 848 AUDIT_ARG_FD(fd); 849 error = fget(td, fd, &cap_pwrite_rights, &fp); 850 if (error != 0) 851 return (error); 852 AUDIT_ARG_FILE(td->td_proc, fp); 853 if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) { 854 error = ESPIPE; 855 goto out; 856 } 857 if ((fp->f_flag & FWRITE) == 0) { 858 error = EBADF; 859 goto out; 860 } 861 862 error = fo_fallocate(fp, offset, len, td); 863 out: 864 fdrop(fp, td); 865 return (error); 866 } 867 868 int 869 sys_fspacectl(struct thread *td, struct fspacectl_args *uap) 870 { 871 struct spacectl_range rqsr, rmsr; 872 int error, cerror; 873 874 error = copyin(uap->rqsr, &rqsr, sizeof(rqsr)); 875 if (error != 0) 876 return (error); 877 878 error = kern_fspacectl(td, uap->fd, uap->cmd, &rqsr, uap->flags, 879 &rmsr); 880 if (uap->rmsr != NULL) { 881 cerror = copyout(&rmsr, uap->rmsr, sizeof(rmsr)); 882 if (error == 0) 883 error = cerror; 884 } 885 return (error); 886 } 887 888 int 889 kern_fspacectl(struct thread *td, int fd, int cmd, 890 const struct spacectl_range *rqsr, int flags, struct spacectl_range *rmsrp) 891 { 892 struct file *fp; 893 struct spacectl_range rmsr; 894 int error; 895 896 AUDIT_ARG_FD(fd); 897 AUDIT_ARG_CMD(cmd); 898 AUDIT_ARG_FFLAGS(flags); 899 900 if (rqsr == NULL) 901 return (EINVAL); 902 rmsr = *rqsr; 903 if (rmsrp != NULL) 904 *rmsrp = rmsr; 905 906 if (cmd != SPACECTL_DEALLOC || 907 rqsr->r_offset < 0 || rqsr->r_len <= 0 || 908 rqsr->r_offset > OFF_MAX - rqsr->r_len || 909 (flags & ~SPACECTL_F_SUPPORTED) != 0) 910 return (EINVAL); 911 912 error = fget_write(td, fd, &cap_pwrite_rights, &fp); 913 if (error != 0) 914 return (error); 915 AUDIT_ARG_FILE(td->td_proc, fp); 916 if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) { 917 error = ESPIPE; 918 goto out; 919 } 920 if ((fp->f_flag & FWRITE) == 0) { 921 error = EBADF; 922 goto out; 923 } 924 925 error = fo_fspacectl(fp, cmd, &rmsr.r_offset, &rmsr.r_len, flags, 926 td->td_ucred, td); 927 /* fspacectl is not restarted after signals if the file is modified. */ 928 if (rmsr.r_len != rqsr->r_len && (error == ERESTART || 929 error == EINTR || error == EWOULDBLOCK)) 930 error = 0; 931 if (rmsrp != NULL) 932 *rmsrp = rmsr; 933 out: 934 fdrop(fp, td); 935 return (error); 936 } 937 938 int 939 kern_specialfd(struct thread *td, int type, void *arg) 940 { 941 struct file *fp; 942 struct specialfd_eventfd *ae; 943 int error, fd, fflags; 944 945 fflags = 0; 946 error = falloc_noinstall(td, &fp); 947 if (error != 0) 948 return (error); 949 950 switch (type) { 951 case SPECIALFD_EVENTFD: 952 ae = arg; 953 if ((ae->flags & EFD_CLOEXEC) != 0) 954 fflags |= O_CLOEXEC; 955 error = eventfd_create_file(td, fp, ae->initval, ae->flags); 956 break; 957 default: 958 error = EINVAL; 959 break; 960 } 961 962 if (error == 0) 963 error = finstall(td, fp, &fd, fflags, NULL); 964 fdrop(fp, td); 965 if (error == 0) 966 td->td_retval[0] = fd; 967 return (error); 968 } 969 970 int 971 sys___specialfd(struct thread *td, struct __specialfd_args *args) 972 { 973 struct specialfd_eventfd ae; 974 int error; 975 976 switch (args->type) { 977 case SPECIALFD_EVENTFD: 978 if (args->len != sizeof(struct specialfd_eventfd)) { 979 error = EINVAL; 980 break; 981 } 982 error = copyin(args->req, &ae, sizeof(ae)); 983 if (error != 0) 984 break; 985 if ((ae.flags & ~(EFD_CLOEXEC | EFD_NONBLOCK | 986 EFD_SEMAPHORE)) != 0) { 987 error = EINVAL; 988 break; 989 } 990 error = kern_specialfd(td, args->type, &ae); 991 break; 992 default: 993 error = EINVAL; 994 break; 995 } 996 return (error); 997 } 998 999 int 1000 poll_no_poll(int events) 1001 { 1002 /* 1003 * Return true for read/write. If the user asked for something 1004 * special, return POLLNVAL, so that clients have a way of 1005 * determining reliably whether or not the extended 1006 * functionality is present without hard-coding knowledge 1007 * of specific filesystem implementations. 1008 */ 1009 if (events & ~POLLSTANDARD) 1010 return (POLLNVAL); 1011 1012 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 1013 } 1014 1015 int 1016 sys_pselect(struct thread *td, struct pselect_args *uap) 1017 { 1018 struct timespec ts; 1019 struct timeval tv, *tvp; 1020 sigset_t set, *uset; 1021 int error; 1022 1023 if (uap->ts != NULL) { 1024 error = copyin(uap->ts, &ts, sizeof(ts)); 1025 if (error != 0) 1026 return (error); 1027 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1028 tvp = &tv; 1029 } else 1030 tvp = NULL; 1031 if (uap->sm != NULL) { 1032 error = copyin(uap->sm, &set, sizeof(set)); 1033 if (error != 0) 1034 return (error); 1035 uset = &set; 1036 } else 1037 uset = NULL; 1038 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, 1039 uset, NFDBITS)); 1040 } 1041 1042 int 1043 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex, 1044 struct timeval *tvp, sigset_t *uset, int abi_nfdbits) 1045 { 1046 int error; 1047 1048 if (uset != NULL) { 1049 error = kern_sigprocmask(td, SIG_SETMASK, uset, 1050 &td->td_oldsigmask, 0); 1051 if (error != 0) 1052 return (error); 1053 td->td_pflags |= TDP_OLDMASK; 1054 /* 1055 * Make sure that ast() is called on return to 1056 * usermode and TDP_OLDMASK is cleared, restoring old 1057 * sigmask. 1058 */ 1059 ast_sched(td, TDA_SIGSUSPEND); 1060 } 1061 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits); 1062 return (error); 1063 } 1064 1065 #ifndef _SYS_SYSPROTO_H_ 1066 struct select_args { 1067 int nd; 1068 fd_set *in, *ou, *ex; 1069 struct timeval *tv; 1070 }; 1071 #endif 1072 int 1073 sys_select(struct thread *td, struct select_args *uap) 1074 { 1075 struct timeval tv, *tvp; 1076 int error; 1077 1078 if (uap->tv != NULL) { 1079 error = copyin(uap->tv, &tv, sizeof(tv)); 1080 if (error) 1081 return (error); 1082 tvp = &tv; 1083 } else 1084 tvp = NULL; 1085 1086 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, 1087 NFDBITS)); 1088 } 1089 1090 /* 1091 * In the unlikely case when user specified n greater then the last 1092 * open file descriptor, check that no bits are set after the last 1093 * valid fd. We must return EBADF if any is set. 1094 * 1095 * There are applications that rely on the behaviour. 1096 * 1097 * nd is fd_nfiles. 1098 */ 1099 static int 1100 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits) 1101 { 1102 char *addr, *oaddr; 1103 int b, i, res; 1104 uint8_t bits; 1105 1106 if (nd >= ndu || fd_in == NULL) 1107 return (0); 1108 1109 oaddr = NULL; 1110 bits = 0; /* silence gcc */ 1111 for (i = nd; i < ndu; i++) { 1112 b = i / NBBY; 1113 #if BYTE_ORDER == LITTLE_ENDIAN 1114 addr = (char *)fd_in + b; 1115 #else 1116 addr = (char *)fd_in; 1117 if (abi_nfdbits == NFDBITS) { 1118 addr += rounddown(b, sizeof(fd_mask)) + 1119 sizeof(fd_mask) - 1 - b % sizeof(fd_mask); 1120 } else { 1121 addr += rounddown(b, sizeof(uint32_t)) + 1122 sizeof(uint32_t) - 1 - b % sizeof(uint32_t); 1123 } 1124 #endif 1125 if (addr != oaddr) { 1126 res = fubyte(addr); 1127 if (res == -1) 1128 return (EFAULT); 1129 oaddr = addr; 1130 bits = res; 1131 } 1132 if ((bits & (1 << (i % NBBY))) != 0) 1133 return (EBADF); 1134 } 1135 return (0); 1136 } 1137 1138 int 1139 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, 1140 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits) 1141 { 1142 struct filedesc *fdp; 1143 /* 1144 * The magic 2048 here is chosen to be just enough for FD_SETSIZE 1145 * infds with the new FD_SETSIZE of 1024, and more than enough for 1146 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE 1147 * of 256. 1148 */ 1149 fd_mask s_selbits[howmany(2048, NFDBITS)]; 1150 fd_mask *ibits[3], *obits[3], *selbits, *sbp; 1151 struct timeval rtv; 1152 sbintime_t asbt, precision, rsbt; 1153 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits; 1154 int error, lf, ndu; 1155 1156 if (nd < 0) 1157 return (EINVAL); 1158 fdp = td->td_proc->p_fd; 1159 ndu = nd; 1160 lf = fdp->fd_nfiles; 1161 if (nd > lf) 1162 nd = lf; 1163 1164 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits); 1165 if (error != 0) 1166 return (error); 1167 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits); 1168 if (error != 0) 1169 return (error); 1170 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits); 1171 if (error != 0) 1172 return (error); 1173 1174 /* 1175 * Allocate just enough bits for the non-null fd_sets. Use the 1176 * preallocated auto buffer if possible. 1177 */ 1178 nfdbits = roundup(nd, NFDBITS); 1179 ncpbytes = nfdbits / NBBY; 1180 ncpubytes = roundup(nd, abi_nfdbits) / NBBY; 1181 nbufbytes = 0; 1182 if (fd_in != NULL) 1183 nbufbytes += 2 * ncpbytes; 1184 if (fd_ou != NULL) 1185 nbufbytes += 2 * ncpbytes; 1186 if (fd_ex != NULL) 1187 nbufbytes += 2 * ncpbytes; 1188 if (nbufbytes <= sizeof s_selbits) 1189 selbits = &s_selbits[0]; 1190 else 1191 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK); 1192 1193 /* 1194 * Assign pointers into the bit buffers and fetch the input bits. 1195 * Put the output buffers together so that they can be bzeroed 1196 * together. 1197 */ 1198 sbp = selbits; 1199 #define getbits(name, x) \ 1200 do { \ 1201 if (name == NULL) { \ 1202 ibits[x] = NULL; \ 1203 obits[x] = NULL; \ 1204 } else { \ 1205 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \ 1206 obits[x] = sbp; \ 1207 sbp += ncpbytes / sizeof *sbp; \ 1208 error = copyin(name, ibits[x], ncpubytes); \ 1209 if (error != 0) \ 1210 goto done; \ 1211 if (ncpbytes != ncpubytes) \ 1212 bzero((char *)ibits[x] + ncpubytes, \ 1213 ncpbytes - ncpubytes); \ 1214 } \ 1215 } while (0) 1216 getbits(fd_in, 0); 1217 getbits(fd_ou, 1); 1218 getbits(fd_ex, 2); 1219 #undef getbits 1220 1221 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__) 1222 /* 1223 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS, 1224 * we are running under 32-bit emulation. This should be more 1225 * generic. 1226 */ 1227 #define swizzle_fdset(bits) \ 1228 if (abi_nfdbits != NFDBITS && bits != NULL) { \ 1229 int i; \ 1230 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \ 1231 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \ 1232 } 1233 #else 1234 #define swizzle_fdset(bits) 1235 #endif 1236 1237 /* Make sure the bit order makes it through an ABI transition */ 1238 swizzle_fdset(ibits[0]); 1239 swizzle_fdset(ibits[1]); 1240 swizzle_fdset(ibits[2]); 1241 1242 if (nbufbytes != 0) 1243 bzero(selbits, nbufbytes / 2); 1244 1245 precision = 0; 1246 if (tvp != NULL) { 1247 rtv = *tvp; 1248 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 || 1249 rtv.tv_usec >= 1000000) { 1250 error = EINVAL; 1251 goto done; 1252 } 1253 if (!timevalisset(&rtv)) 1254 asbt = 0; 1255 else if (rtv.tv_sec <= INT32_MAX) { 1256 rsbt = tvtosbt(rtv); 1257 precision = rsbt; 1258 precision >>= tc_precexp; 1259 if (TIMESEL(&asbt, rsbt)) 1260 asbt += tc_tick_sbt; 1261 if (asbt <= SBT_MAX - rsbt) 1262 asbt += rsbt; 1263 else 1264 asbt = -1; 1265 } else 1266 asbt = -1; 1267 } else 1268 asbt = -1; 1269 seltdinit(td); 1270 /* Iterate until the timeout expires or descriptors become ready. */ 1271 for (;;) { 1272 error = selscan(td, ibits, obits, nd); 1273 if (error || td->td_retval[0] != 0) 1274 break; 1275 error = seltdwait(td, asbt, precision); 1276 if (error) 1277 break; 1278 error = selrescan(td, ibits, obits); 1279 if (error || td->td_retval[0] != 0) 1280 break; 1281 } 1282 seltdclear(td); 1283 1284 done: 1285 /* select is not restarted after signals... */ 1286 if (error == ERESTART) 1287 error = EINTR; 1288 if (error == EWOULDBLOCK) 1289 error = 0; 1290 1291 /* swizzle bit order back, if necessary */ 1292 swizzle_fdset(obits[0]); 1293 swizzle_fdset(obits[1]); 1294 swizzle_fdset(obits[2]); 1295 #undef swizzle_fdset 1296 1297 #define putbits(name, x) \ 1298 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \ 1299 error = error2; 1300 if (error == 0) { 1301 int error2; 1302 1303 putbits(fd_in, 0); 1304 putbits(fd_ou, 1); 1305 putbits(fd_ex, 2); 1306 #undef putbits 1307 } 1308 if (selbits != &s_selbits[0]) 1309 free(selbits, M_SELECT); 1310 1311 return (error); 1312 } 1313 /* 1314 * Convert a select bit set to poll flags. 1315 * 1316 * The backend always returns POLLHUP/POLLERR if appropriate and we 1317 * return this as a set bit in any set. 1318 */ 1319 static const int select_flags[3] = { 1320 POLLRDNORM | POLLHUP | POLLERR, 1321 POLLWRNORM | POLLHUP | POLLERR, 1322 POLLRDBAND | POLLERR 1323 }; 1324 1325 /* 1326 * Compute the fo_poll flags required for a fd given by the index and 1327 * bit position in the fd_mask array. 1328 */ 1329 static __inline int 1330 selflags(fd_mask **ibits, int idx, fd_mask bit) 1331 { 1332 int flags; 1333 int msk; 1334 1335 flags = 0; 1336 for (msk = 0; msk < 3; msk++) { 1337 if (ibits[msk] == NULL) 1338 continue; 1339 if ((ibits[msk][idx] & bit) == 0) 1340 continue; 1341 flags |= select_flags[msk]; 1342 } 1343 return (flags); 1344 } 1345 1346 /* 1347 * Set the appropriate output bits given a mask of fired events and the 1348 * input bits originally requested. 1349 */ 1350 static __inline int 1351 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events) 1352 { 1353 int msk; 1354 int n; 1355 1356 n = 0; 1357 for (msk = 0; msk < 3; msk++) { 1358 if ((events & select_flags[msk]) == 0) 1359 continue; 1360 if (ibits[msk] == NULL) 1361 continue; 1362 if ((ibits[msk][idx] & bit) == 0) 1363 continue; 1364 /* 1365 * XXX Check for a duplicate set. This can occur because a 1366 * socket calls selrecord() twice for each poll() call 1367 * resulting in two selfds per real fd. selrescan() will 1368 * call selsetbits twice as a result. 1369 */ 1370 if ((obits[msk][idx] & bit) != 0) 1371 continue; 1372 obits[msk][idx] |= bit; 1373 n++; 1374 } 1375 1376 return (n); 1377 } 1378 1379 /* 1380 * Traverse the list of fds attached to this thread's seltd and check for 1381 * completion. 1382 */ 1383 static int 1384 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits) 1385 { 1386 struct filedesc *fdp; 1387 struct selinfo *si; 1388 struct seltd *stp; 1389 struct selfd *sfp; 1390 struct selfd *sfn; 1391 struct file *fp; 1392 fd_mask bit; 1393 int fd, ev, n, idx; 1394 int error; 1395 bool only_user; 1396 1397 fdp = td->td_proc->p_fd; 1398 stp = td->td_sel; 1399 n = 0; 1400 only_user = FILEDESC_IS_ONLY_USER(fdp); 1401 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1402 fd = (int)(uintptr_t)sfp->sf_cookie; 1403 si = sfp->sf_si; 1404 selfdfree(stp, sfp); 1405 /* If the selinfo wasn't cleared the event didn't fire. */ 1406 if (si != NULL) 1407 continue; 1408 if (only_user) 1409 error = fget_only_user(fdp, fd, &cap_event_rights, &fp); 1410 else 1411 error = fget_unlocked(td, fd, &cap_event_rights, &fp); 1412 if (__predict_false(error != 0)) 1413 return (error); 1414 idx = fd / NFDBITS; 1415 bit = (fd_mask)1 << (fd % NFDBITS); 1416 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td); 1417 if (only_user) 1418 fput_only_user(fdp, fp); 1419 else 1420 fdrop(fp, td); 1421 if (ev != 0) 1422 n += selsetbits(ibits, obits, idx, bit, ev); 1423 } 1424 stp->st_flags = 0; 1425 td->td_retval[0] = n; 1426 return (0); 1427 } 1428 1429 /* 1430 * Perform the initial filedescriptor scan and register ourselves with 1431 * each selinfo. 1432 */ 1433 static int 1434 selscan(struct thread *td, fd_mask **ibits, fd_mask **obits, int nfd) 1435 { 1436 struct filedesc *fdp; 1437 struct file *fp; 1438 fd_mask bit; 1439 int ev, flags, end, fd; 1440 int n, idx; 1441 int error; 1442 bool only_user; 1443 1444 fdp = td->td_proc->p_fd; 1445 n = 0; 1446 only_user = FILEDESC_IS_ONLY_USER(fdp); 1447 for (idx = 0, fd = 0; fd < nfd; idx++) { 1448 end = imin(fd + NFDBITS, nfd); 1449 for (bit = 1; fd < end; bit <<= 1, fd++) { 1450 /* Compute the list of events we're interested in. */ 1451 flags = selflags(ibits, idx, bit); 1452 if (flags == 0) 1453 continue; 1454 if (only_user) 1455 error = fget_only_user(fdp, fd, &cap_event_rights, &fp); 1456 else 1457 error = fget_unlocked(td, fd, &cap_event_rights, &fp); 1458 if (__predict_false(error != 0)) 1459 return (error); 1460 selfdalloc(td, (void *)(uintptr_t)fd); 1461 ev = fo_poll(fp, flags, td->td_ucred, td); 1462 if (only_user) 1463 fput_only_user(fdp, fp); 1464 else 1465 fdrop(fp, td); 1466 if (ev != 0) 1467 n += selsetbits(ibits, obits, idx, bit, ev); 1468 } 1469 } 1470 1471 td->td_retval[0] = n; 1472 return (0); 1473 } 1474 1475 int 1476 sys_poll(struct thread *td, struct poll_args *uap) 1477 { 1478 struct timespec ts, *tsp; 1479 1480 if (uap->timeout != INFTIM) { 1481 if (uap->timeout < 0) 1482 return (EINVAL); 1483 ts.tv_sec = uap->timeout / 1000; 1484 ts.tv_nsec = (uap->timeout % 1000) * 1000000; 1485 tsp = &ts; 1486 } else 1487 tsp = NULL; 1488 1489 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL)); 1490 } 1491 1492 /* 1493 * kfds points to an array in the kernel. 1494 */ 1495 int 1496 kern_poll_kfds(struct thread *td, struct pollfd *kfds, u_int nfds, 1497 struct timespec *tsp, sigset_t *uset) 1498 { 1499 sbintime_t sbt, precision, tmp; 1500 time_t over; 1501 struct timespec ts; 1502 int error; 1503 1504 precision = 0; 1505 if (tsp != NULL) { 1506 if (!timespecvalid_interval(tsp)) 1507 return (EINVAL); 1508 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 1509 sbt = 0; 1510 else { 1511 ts = *tsp; 1512 if (ts.tv_sec > INT32_MAX / 2) { 1513 over = ts.tv_sec - INT32_MAX / 2; 1514 ts.tv_sec -= over; 1515 } else 1516 over = 0; 1517 tmp = tstosbt(ts); 1518 precision = tmp; 1519 precision >>= tc_precexp; 1520 if (TIMESEL(&sbt, tmp)) 1521 sbt += tc_tick_sbt; 1522 sbt += tmp; 1523 } 1524 } else 1525 sbt = -1; 1526 1527 if (uset != NULL) { 1528 error = kern_sigprocmask(td, SIG_SETMASK, uset, 1529 &td->td_oldsigmask, 0); 1530 if (error) 1531 return (error); 1532 td->td_pflags |= TDP_OLDMASK; 1533 /* 1534 * Make sure that ast() is called on return to 1535 * usermode and TDP_OLDMASK is cleared, restoring old 1536 * sigmask. 1537 */ 1538 ast_sched(td, TDA_SIGSUSPEND); 1539 } 1540 1541 seltdinit(td); 1542 /* Iterate until the timeout expires or descriptors become ready. */ 1543 for (;;) { 1544 error = pollscan(td, kfds, nfds); 1545 if (error || td->td_retval[0] != 0) 1546 break; 1547 error = seltdwait(td, sbt, precision); 1548 if (error) 1549 break; 1550 error = pollrescan(td); 1551 if (error || td->td_retval[0] != 0) 1552 break; 1553 } 1554 seltdclear(td); 1555 1556 /* poll is not restarted after signals... */ 1557 if (error == ERESTART) 1558 error = EINTR; 1559 if (error == EWOULDBLOCK) 1560 error = 0; 1561 return (error); 1562 } 1563 1564 int 1565 sys_ppoll(struct thread *td, struct ppoll_args *uap) 1566 { 1567 struct timespec ts, *tsp; 1568 sigset_t set, *ssp; 1569 int error; 1570 1571 if (uap->ts != NULL) { 1572 error = copyin(uap->ts, &ts, sizeof(ts)); 1573 if (error) 1574 return (error); 1575 tsp = &ts; 1576 } else 1577 tsp = NULL; 1578 if (uap->set != NULL) { 1579 error = copyin(uap->set, &set, sizeof(set)); 1580 if (error) 1581 return (error); 1582 ssp = &set; 1583 } else 1584 ssp = NULL; 1585 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp)); 1586 } 1587 1588 /* 1589 * ufds points to an array in user space. 1590 */ 1591 int 1592 kern_poll(struct thread *td, struct pollfd *ufds, u_int nfds, 1593 struct timespec *tsp, sigset_t *set) 1594 { 1595 struct pollfd *kfds; 1596 struct pollfd stackfds[32]; 1597 int error; 1598 1599 if (kern_poll_maxfds(nfds)) 1600 return (EINVAL); 1601 if (nfds > nitems(stackfds)) 1602 kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK); 1603 else 1604 kfds = stackfds; 1605 error = copyin(ufds, kfds, nfds * sizeof(*kfds)); 1606 if (error != 0) 1607 goto out; 1608 1609 error = kern_poll_kfds(td, kfds, nfds, tsp, set); 1610 if (error == 0) 1611 error = pollout(td, kfds, ufds, nfds); 1612 1613 out: 1614 if (nfds > nitems(stackfds)) 1615 free(kfds, M_TEMP); 1616 return (error); 1617 } 1618 1619 bool 1620 kern_poll_maxfds(u_int nfds) 1621 { 1622 1623 /* 1624 * This is kinda bogus. We have fd limits, but that is not 1625 * really related to the size of the pollfd array. Make sure 1626 * we let the process use at least FD_SETSIZE entries and at 1627 * least enough for the system-wide limits. We want to be reasonably 1628 * safe, but not overly restrictive. 1629 */ 1630 return (nfds > maxfilesperproc && nfds > FD_SETSIZE); 1631 } 1632 1633 static int 1634 pollrescan(struct thread *td) 1635 { 1636 struct seltd *stp; 1637 struct selfd *sfp; 1638 struct selfd *sfn; 1639 struct selinfo *si; 1640 struct filedesc *fdp; 1641 struct file *fp; 1642 struct pollfd *fd; 1643 int n, error; 1644 bool only_user; 1645 1646 n = 0; 1647 fdp = td->td_proc->p_fd; 1648 stp = td->td_sel; 1649 only_user = FILEDESC_IS_ONLY_USER(fdp); 1650 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1651 fd = (struct pollfd *)sfp->sf_cookie; 1652 si = sfp->sf_si; 1653 selfdfree(stp, sfp); 1654 /* If the selinfo wasn't cleared the event didn't fire. */ 1655 if (si != NULL) 1656 continue; 1657 if (only_user) 1658 error = fget_only_user(fdp, fd->fd, &cap_event_rights, &fp); 1659 else 1660 error = fget_unlocked(td, fd->fd, &cap_event_rights, &fp); 1661 if (__predict_false(error != 0)) { 1662 fd->revents = POLLNVAL; 1663 n++; 1664 continue; 1665 } 1666 /* 1667 * Note: backend also returns POLLHUP and 1668 * POLLERR if appropriate. 1669 */ 1670 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td); 1671 if (only_user) 1672 fput_only_user(fdp, fp); 1673 else 1674 fdrop(fp, td); 1675 if (fd->revents != 0) 1676 n++; 1677 } 1678 stp->st_flags = 0; 1679 td->td_retval[0] = n; 1680 return (0); 1681 } 1682 1683 static int 1684 pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd) 1685 { 1686 int error = 0; 1687 u_int i = 0; 1688 u_int n = 0; 1689 1690 for (i = 0; i < nfd; i++) { 1691 error = copyout(&fds->revents, &ufds->revents, 1692 sizeof(ufds->revents)); 1693 if (error) 1694 return (error); 1695 if (fds->revents != 0) 1696 n++; 1697 fds++; 1698 ufds++; 1699 } 1700 td->td_retval[0] = n; 1701 return (0); 1702 } 1703 1704 static int 1705 pollscan(struct thread *td, struct pollfd *fds, u_int nfd) 1706 { 1707 struct filedesc *fdp; 1708 struct file *fp; 1709 int i, n, error; 1710 bool only_user; 1711 1712 n = 0; 1713 fdp = td->td_proc->p_fd; 1714 only_user = FILEDESC_IS_ONLY_USER(fdp); 1715 for (i = 0; i < nfd; i++, fds++) { 1716 if (fds->fd < 0) { 1717 fds->revents = 0; 1718 continue; 1719 } 1720 if (only_user) 1721 error = fget_only_user(fdp, fds->fd, &cap_event_rights, &fp); 1722 else 1723 error = fget_unlocked(td, fds->fd, &cap_event_rights, &fp); 1724 if (__predict_false(error != 0)) { 1725 fds->revents = POLLNVAL; 1726 n++; 1727 continue; 1728 } 1729 /* 1730 * Note: backend also returns POLLHUP and 1731 * POLLERR if appropriate. 1732 */ 1733 selfdalloc(td, fds); 1734 fds->revents = fo_poll(fp, fds->events, 1735 td->td_ucred, td); 1736 if (only_user) 1737 fput_only_user(fdp, fp); 1738 else 1739 fdrop(fp, td); 1740 /* 1741 * POSIX requires POLLOUT to be never 1742 * set simultaneously with POLLHUP. 1743 */ 1744 if ((fds->revents & POLLHUP) != 0) 1745 fds->revents &= ~POLLOUT; 1746 1747 if (fds->revents != 0) 1748 n++; 1749 } 1750 td->td_retval[0] = n; 1751 return (0); 1752 } 1753 1754 /* 1755 * XXX This was created specifically to support netncp and netsmb. This 1756 * allows the caller to specify a socket to wait for events on. It returns 1757 * 0 if any events matched and an error otherwise. There is no way to 1758 * determine which events fired. 1759 */ 1760 int 1761 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td) 1762 { 1763 struct timeval rtv; 1764 sbintime_t asbt, precision, rsbt; 1765 int error; 1766 1767 precision = 0; /* stupid gcc! */ 1768 if (tvp != NULL) { 1769 rtv = *tvp; 1770 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 || 1771 rtv.tv_usec >= 1000000) 1772 return (EINVAL); 1773 if (!timevalisset(&rtv)) 1774 asbt = 0; 1775 else if (rtv.tv_sec <= INT32_MAX) { 1776 rsbt = tvtosbt(rtv); 1777 precision = rsbt; 1778 precision >>= tc_precexp; 1779 if (TIMESEL(&asbt, rsbt)) 1780 asbt += tc_tick_sbt; 1781 if (asbt <= SBT_MAX - rsbt) 1782 asbt += rsbt; 1783 else 1784 asbt = -1; 1785 } else 1786 asbt = -1; 1787 } else 1788 asbt = -1; 1789 seltdinit(td); 1790 /* 1791 * Iterate until the timeout expires or the socket becomes ready. 1792 */ 1793 for (;;) { 1794 selfdalloc(td, NULL); 1795 if (sopoll(so, events, NULL, td) != 0) { 1796 error = 0; 1797 break; 1798 } 1799 error = seltdwait(td, asbt, precision); 1800 if (error) 1801 break; 1802 } 1803 seltdclear(td); 1804 /* XXX Duplicates ncp/smb behavior. */ 1805 if (error == ERESTART) 1806 error = 0; 1807 return (error); 1808 } 1809 1810 /* 1811 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines 1812 * have two select sets, one for read and another for write. 1813 */ 1814 static void 1815 selfdalloc(struct thread *td, void *cookie) 1816 { 1817 struct seltd *stp; 1818 1819 stp = td->td_sel; 1820 if (stp->st_free1 == NULL) 1821 stp->st_free1 = malloc(sizeof(*stp->st_free1), M_SELFD, M_WAITOK|M_ZERO); 1822 stp->st_free1->sf_td = stp; 1823 stp->st_free1->sf_cookie = cookie; 1824 if (stp->st_free2 == NULL) 1825 stp->st_free2 = malloc(sizeof(*stp->st_free2), M_SELFD, M_WAITOK|M_ZERO); 1826 stp->st_free2->sf_td = stp; 1827 stp->st_free2->sf_cookie = cookie; 1828 } 1829 1830 static void 1831 selfdfree(struct seltd *stp, struct selfd *sfp) 1832 { 1833 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link); 1834 /* 1835 * Paired with doselwakeup. 1836 */ 1837 if (atomic_load_acq_ptr((uintptr_t *)&sfp->sf_si) != (uintptr_t)NULL) { 1838 mtx_lock(sfp->sf_mtx); 1839 if (sfp->sf_si != NULL) { 1840 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads); 1841 } 1842 mtx_unlock(sfp->sf_mtx); 1843 } 1844 free(sfp, M_SELFD); 1845 } 1846 1847 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */ 1848 void 1849 seldrain(struct selinfo *sip) 1850 { 1851 1852 /* 1853 * This feature is already provided by doselwakeup(), thus it is 1854 * enough to go for it. 1855 * Eventually, the context, should take care to avoid races 1856 * between thread calling select()/poll() and file descriptor 1857 * detaching, but, again, the races are just the same as 1858 * selwakeup(). 1859 */ 1860 doselwakeup(sip, -1); 1861 } 1862 1863 /* 1864 * Record a select request. 1865 */ 1866 void 1867 selrecord(struct thread *selector, struct selinfo *sip) 1868 { 1869 struct selfd *sfp; 1870 struct seltd *stp; 1871 struct mtx *mtxp; 1872 1873 stp = selector->td_sel; 1874 /* 1875 * Don't record when doing a rescan. 1876 */ 1877 if (stp->st_flags & SELTD_RESCAN) 1878 return; 1879 /* 1880 * Grab one of the preallocated descriptors. 1881 */ 1882 sfp = NULL; 1883 if ((sfp = stp->st_free1) != NULL) 1884 stp->st_free1 = NULL; 1885 else if ((sfp = stp->st_free2) != NULL) 1886 stp->st_free2 = NULL; 1887 else 1888 panic("selrecord: No free selfd on selq"); 1889 mtxp = sip->si_mtx; 1890 if (mtxp == NULL) 1891 mtxp = mtx_pool_find(mtxpool_select, sip); 1892 /* 1893 * Initialize the sfp and queue it in the thread. 1894 */ 1895 sfp->sf_si = sip; 1896 sfp->sf_mtx = mtxp; 1897 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link); 1898 /* 1899 * Now that we've locked the sip, check for initialization. 1900 */ 1901 mtx_lock(mtxp); 1902 if (sip->si_mtx == NULL) { 1903 sip->si_mtx = mtxp; 1904 TAILQ_INIT(&sip->si_tdlist); 1905 } 1906 /* 1907 * Add this thread to the list of selfds listening on this selinfo. 1908 */ 1909 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads); 1910 mtx_unlock(sip->si_mtx); 1911 } 1912 1913 /* Wake up a selecting thread. */ 1914 void 1915 selwakeup(struct selinfo *sip) 1916 { 1917 doselwakeup(sip, -1); 1918 } 1919 1920 /* Wake up a selecting thread, and set its priority. */ 1921 void 1922 selwakeuppri(struct selinfo *sip, int pri) 1923 { 1924 doselwakeup(sip, pri); 1925 } 1926 1927 /* 1928 * Do a wakeup when a selectable event occurs. 1929 */ 1930 static void 1931 doselwakeup(struct selinfo *sip, int pri) 1932 { 1933 struct selfd *sfp; 1934 struct selfd *sfn; 1935 struct seltd *stp; 1936 1937 /* If it's not initialized there can't be any waiters. */ 1938 if (sip->si_mtx == NULL) 1939 return; 1940 /* 1941 * Locking the selinfo locks all selfds associated with it. 1942 */ 1943 mtx_lock(sip->si_mtx); 1944 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) { 1945 /* 1946 * Once we remove this sfp from the list and clear the 1947 * sf_si seltdclear will know to ignore this si. 1948 */ 1949 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads); 1950 stp = sfp->sf_td; 1951 mtx_lock(&stp->st_mtx); 1952 stp->st_flags |= SELTD_PENDING; 1953 cv_broadcastpri(&stp->st_wait, pri); 1954 mtx_unlock(&stp->st_mtx); 1955 /* 1956 * Paired with selfdfree. 1957 * 1958 * Storing this only after the wakeup provides an invariant that 1959 * stp is not used after selfdfree returns. 1960 */ 1961 atomic_store_rel_ptr((uintptr_t *)&sfp->sf_si, (uintptr_t)NULL); 1962 } 1963 mtx_unlock(sip->si_mtx); 1964 } 1965 1966 static void 1967 seltdinit(struct thread *td) 1968 { 1969 struct seltd *stp; 1970 1971 stp = td->td_sel; 1972 if (stp != NULL) { 1973 MPASS(stp->st_flags == 0); 1974 MPASS(STAILQ_EMPTY(&stp->st_selq)); 1975 return; 1976 } 1977 stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO); 1978 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF); 1979 cv_init(&stp->st_wait, "select"); 1980 stp->st_flags = 0; 1981 STAILQ_INIT(&stp->st_selq); 1982 td->td_sel = stp; 1983 } 1984 1985 static int 1986 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision) 1987 { 1988 struct seltd *stp; 1989 int error; 1990 1991 stp = td->td_sel; 1992 /* 1993 * An event of interest may occur while we do not hold the seltd 1994 * locked so check the pending flag before we sleep. 1995 */ 1996 mtx_lock(&stp->st_mtx); 1997 /* 1998 * Any further calls to selrecord will be a rescan. 1999 */ 2000 stp->st_flags |= SELTD_RESCAN; 2001 if (stp->st_flags & SELTD_PENDING) { 2002 mtx_unlock(&stp->st_mtx); 2003 return (0); 2004 } 2005 if (sbt == 0) 2006 error = EWOULDBLOCK; 2007 else if (sbt != -1) 2008 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx, 2009 sbt, precision, C_ABSOLUTE); 2010 else 2011 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx); 2012 mtx_unlock(&stp->st_mtx); 2013 2014 return (error); 2015 } 2016 2017 void 2018 seltdfini(struct thread *td) 2019 { 2020 struct seltd *stp; 2021 2022 stp = td->td_sel; 2023 if (stp == NULL) 2024 return; 2025 MPASS(stp->st_flags == 0); 2026 MPASS(STAILQ_EMPTY(&stp->st_selq)); 2027 if (stp->st_free1) 2028 free(stp->st_free1, M_SELFD); 2029 if (stp->st_free2) 2030 free(stp->st_free2, M_SELFD); 2031 td->td_sel = NULL; 2032 cv_destroy(&stp->st_wait); 2033 mtx_destroy(&stp->st_mtx); 2034 free(stp, M_SELECT); 2035 } 2036 2037 /* 2038 * Remove the references to the thread from all of the objects we were 2039 * polling. 2040 */ 2041 static void 2042 seltdclear(struct thread *td) 2043 { 2044 struct seltd *stp; 2045 struct selfd *sfp; 2046 struct selfd *sfn; 2047 2048 stp = td->td_sel; 2049 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) 2050 selfdfree(stp, sfp); 2051 stp->st_flags = 0; 2052 } 2053 2054 static void selectinit(void *); 2055 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL); 2056 static void 2057 selectinit(void *dummy __unused) 2058 { 2059 2060 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF); 2061 } 2062 2063 /* 2064 * Set up a syscall return value that follows the convention specified for 2065 * posix_* functions. 2066 */ 2067 int 2068 kern_posix_error(struct thread *td, int error) 2069 { 2070 2071 if (error <= 0) 2072 return (error); 2073 td->td_errno = error; 2074 td->td_pflags |= TDP_NERRNO; 2075 td->td_retval[0] = error; 2076 return (0); 2077 } 2078