1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 #include "opt_capsicum.h" 39 #include "opt_ktrace.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysproto.h> 44 #include <sys/capsicum.h> 45 #include <sys/filedesc.h> 46 #include <sys/filio.h> 47 #include <sys/fcntl.h> 48 #include <sys/file.h> 49 #include <sys/lock.h> 50 #include <sys/proc.h> 51 #include <sys/signalvar.h> 52 #include <sys/socketvar.h> 53 #include <sys/uio.h> 54 #include <sys/eventfd.h> 55 #include <sys/kernel.h> 56 #include <sys/ktr.h> 57 #include <sys/limits.h> 58 #include <sys/malloc.h> 59 #include <sys/poll.h> 60 #include <sys/resourcevar.h> 61 #include <sys/selinfo.h> 62 #include <sys/sleepqueue.h> 63 #include <sys/specialfd.h> 64 #include <sys/syscallsubr.h> 65 #include <sys/sysctl.h> 66 #include <sys/sysent.h> 67 #include <sys/vnode.h> 68 #include <sys/unistd.h> 69 #include <sys/bio.h> 70 #include <sys/buf.h> 71 #include <sys/condvar.h> 72 #ifdef KTRACE 73 #include <sys/ktrace.h> 74 #endif 75 76 #include <security/audit/audit.h> 77 78 /* 79 * The following macro defines how many bytes will be allocated from 80 * the stack instead of memory allocated when passing the IOCTL data 81 * structures from userspace and to the kernel. Some IOCTLs having 82 * small data structures are used very frequently and this small 83 * buffer on the stack gives a significant speedup improvement for 84 * those requests. The value of this define should be greater or equal 85 * to 64 bytes and should also be power of two. The data structure is 86 * currently hard-aligned to a 8-byte boundary on the stack. This 87 * should currently be sufficient for all supported platforms. 88 */ 89 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */ 90 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */ 91 92 #ifdef __LP64__ 93 static int iosize_max_clamp = 0; 94 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW, 95 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX"); 96 static int devfs_iosize_max_clamp = 1; 97 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW, 98 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices"); 99 #endif 100 101 /* 102 * Assert that the return value of read(2) and write(2) syscalls fits 103 * into a register. If not, an architecture will need to provide the 104 * usermode wrappers to reconstruct the result. 105 */ 106 CTASSERT(sizeof(register_t) >= sizeof(size_t)); 107 108 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer"); 109 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer"); 110 MALLOC_DEFINE(M_IOV, "iov", "large iov's"); 111 112 static int pollout(struct thread *, struct pollfd *, struct pollfd *, 113 u_int); 114 static int pollscan(struct thread *, struct pollfd *, u_int); 115 static int pollrescan(struct thread *); 116 static int selscan(struct thread *, fd_mask **, fd_mask **, int); 117 static int selrescan(struct thread *, fd_mask **, fd_mask **); 118 static void selfdalloc(struct thread *, void *); 119 static void selfdfree(struct seltd *, struct selfd *); 120 static int dofileread(struct thread *, int, struct file *, struct uio *, 121 off_t, int); 122 static int dofilewrite(struct thread *, int, struct file *, struct uio *, 123 off_t, int); 124 static void doselwakeup(struct selinfo *, int); 125 static void seltdinit(struct thread *); 126 static int seltdwait(struct thread *, sbintime_t, sbintime_t); 127 static void seltdclear(struct thread *); 128 129 /* 130 * One seltd per-thread allocated on demand as needed. 131 * 132 * t - protected by st_mtx 133 * k - Only accessed by curthread or read-only 134 */ 135 struct seltd { 136 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */ 137 struct selfd *st_free1; /* (k) free fd for read set. */ 138 struct selfd *st_free2; /* (k) free fd for write set. */ 139 struct mtx st_mtx; /* Protects struct seltd */ 140 struct cv st_wait; /* (t) Wait channel. */ 141 int st_flags; /* (t) SELTD_ flags. */ 142 }; 143 144 #define SELTD_PENDING 0x0001 /* We have pending events. */ 145 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */ 146 147 /* 148 * One selfd allocated per-thread per-file-descriptor. 149 * f - protected by sf_mtx 150 */ 151 struct selfd { 152 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */ 153 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */ 154 struct selinfo *sf_si; /* (f) selinfo when linked. */ 155 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */ 156 struct seltd *sf_td; /* (k) owning seltd. */ 157 void *sf_cookie; /* (k) fd or pollfd. */ 158 }; 159 160 MALLOC_DEFINE(M_SELFD, "selfd", "selfd"); 161 static struct mtx_pool *mtxpool_select; 162 163 #ifdef __LP64__ 164 size_t 165 devfs_iosize_max(void) 166 { 167 168 return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ? 169 INT_MAX : SSIZE_MAX); 170 } 171 172 size_t 173 iosize_max(void) 174 { 175 176 return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ? 177 INT_MAX : SSIZE_MAX); 178 } 179 #endif 180 181 #ifndef _SYS_SYSPROTO_H_ 182 struct read_args { 183 int fd; 184 void *buf; 185 size_t nbyte; 186 }; 187 #endif 188 int 189 sys_read(struct thread *td, struct read_args *uap) 190 { 191 struct uio auio; 192 struct iovec aiov; 193 int error; 194 195 if (uap->nbyte > IOSIZE_MAX) 196 return (EINVAL); 197 aiov.iov_base = uap->buf; 198 aiov.iov_len = uap->nbyte; 199 auio.uio_iov = &aiov; 200 auio.uio_iovcnt = 1; 201 auio.uio_resid = uap->nbyte; 202 auio.uio_segflg = UIO_USERSPACE; 203 error = kern_readv(td, uap->fd, &auio); 204 return (error); 205 } 206 207 /* 208 * Positioned read system call 209 */ 210 #ifndef _SYS_SYSPROTO_H_ 211 struct pread_args { 212 int fd; 213 void *buf; 214 size_t nbyte; 215 int pad; 216 off_t offset; 217 }; 218 #endif 219 int 220 sys_pread(struct thread *td, struct pread_args *uap) 221 { 222 223 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 224 } 225 226 int 227 kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset) 228 { 229 struct uio auio; 230 struct iovec aiov; 231 int error; 232 233 if (nbyte > IOSIZE_MAX) 234 return (EINVAL); 235 aiov.iov_base = buf; 236 aiov.iov_len = nbyte; 237 auio.uio_iov = &aiov; 238 auio.uio_iovcnt = 1; 239 auio.uio_resid = nbyte; 240 auio.uio_segflg = UIO_USERSPACE; 241 error = kern_preadv(td, fd, &auio, offset); 242 return (error); 243 } 244 245 #if defined(COMPAT_FREEBSD6) 246 int 247 freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap) 248 { 249 250 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 251 } 252 #endif 253 254 /* 255 * Scatter read system call. 256 */ 257 #ifndef _SYS_SYSPROTO_H_ 258 struct readv_args { 259 int fd; 260 struct iovec *iovp; 261 u_int iovcnt; 262 }; 263 #endif 264 int 265 sys_readv(struct thread *td, struct readv_args *uap) 266 { 267 struct uio *auio; 268 int error; 269 270 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 271 if (error) 272 return (error); 273 error = kern_readv(td, uap->fd, auio); 274 freeuio(auio); 275 return (error); 276 } 277 278 int 279 kern_readv(struct thread *td, int fd, struct uio *auio) 280 { 281 struct file *fp; 282 int error; 283 284 error = fget_read(td, fd, &cap_read_rights, &fp); 285 if (error) 286 return (error); 287 error = dofileread(td, fd, fp, auio, (off_t)-1, 0); 288 fdrop(fp, td); 289 return (error); 290 } 291 292 /* 293 * Scatter positioned read system call. 294 */ 295 #ifndef _SYS_SYSPROTO_H_ 296 struct preadv_args { 297 int fd; 298 struct iovec *iovp; 299 u_int iovcnt; 300 off_t offset; 301 }; 302 #endif 303 int 304 sys_preadv(struct thread *td, struct preadv_args *uap) 305 { 306 struct uio *auio; 307 int error; 308 309 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 310 if (error) 311 return (error); 312 error = kern_preadv(td, uap->fd, auio, uap->offset); 313 freeuio(auio); 314 return (error); 315 } 316 317 int 318 kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset) 319 { 320 struct file *fp; 321 int error; 322 323 error = fget_read(td, fd, &cap_pread_rights, &fp); 324 if (error) 325 return (error); 326 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 327 error = ESPIPE; 328 else if (offset < 0 && 329 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) 330 error = EINVAL; 331 else 332 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET); 333 fdrop(fp, td); 334 return (error); 335 } 336 337 /* 338 * Common code for readv and preadv that reads data in 339 * from a file using the passed in uio, offset, and flags. 340 */ 341 static int 342 dofileread(struct thread *td, int fd, struct file *fp, struct uio *auio, 343 off_t offset, int flags) 344 { 345 ssize_t cnt; 346 int error; 347 #ifdef KTRACE 348 struct uio *ktruio = NULL; 349 #endif 350 351 AUDIT_ARG_FD(fd); 352 353 /* Finish zero length reads right here */ 354 if (auio->uio_resid == 0) { 355 td->td_retval[0] = 0; 356 return (0); 357 } 358 auio->uio_rw = UIO_READ; 359 auio->uio_offset = offset; 360 auio->uio_td = td; 361 #ifdef KTRACE 362 if (KTRPOINT(td, KTR_GENIO)) 363 ktruio = cloneuio(auio); 364 #endif 365 cnt = auio->uio_resid; 366 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) { 367 if (auio->uio_resid != cnt && (error == ERESTART || 368 error == EINTR || error == EWOULDBLOCK)) 369 error = 0; 370 } 371 cnt -= auio->uio_resid; 372 #ifdef KTRACE 373 if (ktruio != NULL) { 374 ktruio->uio_resid = cnt; 375 ktrgenio(fd, UIO_READ, ktruio, error); 376 } 377 #endif 378 td->td_retval[0] = cnt; 379 return (error); 380 } 381 382 #ifndef _SYS_SYSPROTO_H_ 383 struct write_args { 384 int fd; 385 const void *buf; 386 size_t nbyte; 387 }; 388 #endif 389 int 390 sys_write(struct thread *td, struct write_args *uap) 391 { 392 struct uio auio; 393 struct iovec aiov; 394 int error; 395 396 if (uap->nbyte > IOSIZE_MAX) 397 return (EINVAL); 398 aiov.iov_base = (void *)(uintptr_t)uap->buf; 399 aiov.iov_len = uap->nbyte; 400 auio.uio_iov = &aiov; 401 auio.uio_iovcnt = 1; 402 auio.uio_resid = uap->nbyte; 403 auio.uio_segflg = UIO_USERSPACE; 404 error = kern_writev(td, uap->fd, &auio); 405 return (error); 406 } 407 408 /* 409 * Positioned write system call. 410 */ 411 #ifndef _SYS_SYSPROTO_H_ 412 struct pwrite_args { 413 int fd; 414 const void *buf; 415 size_t nbyte; 416 int pad; 417 off_t offset; 418 }; 419 #endif 420 int 421 sys_pwrite(struct thread *td, struct pwrite_args *uap) 422 { 423 424 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 425 } 426 427 int 428 kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte, 429 off_t offset) 430 { 431 struct uio auio; 432 struct iovec aiov; 433 int error; 434 435 if (nbyte > IOSIZE_MAX) 436 return (EINVAL); 437 aiov.iov_base = (void *)(uintptr_t)buf; 438 aiov.iov_len = nbyte; 439 auio.uio_iov = &aiov; 440 auio.uio_iovcnt = 1; 441 auio.uio_resid = nbyte; 442 auio.uio_segflg = UIO_USERSPACE; 443 error = kern_pwritev(td, fd, &auio, offset); 444 return (error); 445 } 446 447 #if defined(COMPAT_FREEBSD6) 448 int 449 freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap) 450 { 451 452 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset)); 453 } 454 #endif 455 456 /* 457 * Gather write system call. 458 */ 459 #ifndef _SYS_SYSPROTO_H_ 460 struct writev_args { 461 int fd; 462 struct iovec *iovp; 463 u_int iovcnt; 464 }; 465 #endif 466 int 467 sys_writev(struct thread *td, struct writev_args *uap) 468 { 469 struct uio *auio; 470 int error; 471 472 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 473 if (error) 474 return (error); 475 error = kern_writev(td, uap->fd, auio); 476 freeuio(auio); 477 return (error); 478 } 479 480 int 481 kern_writev(struct thread *td, int fd, struct uio *auio) 482 { 483 struct file *fp; 484 int error; 485 486 error = fget_write(td, fd, &cap_write_rights, &fp); 487 if (error) 488 return (error); 489 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0); 490 fdrop(fp, td); 491 return (error); 492 } 493 494 /* 495 * Gather positioned write system call. 496 */ 497 #ifndef _SYS_SYSPROTO_H_ 498 struct pwritev_args { 499 int fd; 500 struct iovec *iovp; 501 u_int iovcnt; 502 off_t offset; 503 }; 504 #endif 505 int 506 sys_pwritev(struct thread *td, struct pwritev_args *uap) 507 { 508 struct uio *auio; 509 int error; 510 511 error = copyinuio(uap->iovp, uap->iovcnt, &auio); 512 if (error) 513 return (error); 514 error = kern_pwritev(td, uap->fd, auio, uap->offset); 515 freeuio(auio); 516 return (error); 517 } 518 519 int 520 kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset) 521 { 522 struct file *fp; 523 int error; 524 525 error = fget_write(td, fd, &cap_pwrite_rights, &fp); 526 if (error) 527 return (error); 528 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE)) 529 error = ESPIPE; 530 else if (offset < 0 && 531 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR)) 532 error = EINVAL; 533 else 534 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET); 535 fdrop(fp, td); 536 return (error); 537 } 538 539 /* 540 * Common code for writev and pwritev that writes data to 541 * a file using the passed in uio, offset, and flags. 542 */ 543 static int 544 dofilewrite(struct thread *td, int fd, struct file *fp, struct uio *auio, 545 off_t offset, int flags) 546 { 547 ssize_t cnt; 548 int error; 549 #ifdef KTRACE 550 struct uio *ktruio = NULL; 551 #endif 552 553 AUDIT_ARG_FD(fd); 554 auio->uio_rw = UIO_WRITE; 555 auio->uio_td = td; 556 auio->uio_offset = offset; 557 #ifdef KTRACE 558 if (KTRPOINT(td, KTR_GENIO)) 559 ktruio = cloneuio(auio); 560 #endif 561 cnt = auio->uio_resid; 562 error = fo_write(fp, auio, td->td_ucred, flags, td); 563 /* 564 * Socket layer is responsible for special error handling, 565 * see sousrsend(). 566 */ 567 if (error != 0 && fp->f_type != DTYPE_SOCKET) { 568 if (auio->uio_resid != cnt && (error == ERESTART || 569 error == EINTR || error == EWOULDBLOCK)) 570 error = 0; 571 if (error == EPIPE) { 572 PROC_LOCK(td->td_proc); 573 tdsignal(td, SIGPIPE); 574 PROC_UNLOCK(td->td_proc); 575 } 576 } 577 cnt -= auio->uio_resid; 578 #ifdef KTRACE 579 if (ktruio != NULL) { 580 if (error == 0) 581 ktruio->uio_resid = cnt; 582 ktrgenio(fd, UIO_WRITE, ktruio, error); 583 } 584 #endif 585 td->td_retval[0] = cnt; 586 return (error); 587 } 588 589 /* 590 * Truncate a file given a file descriptor. 591 * 592 * Can't use fget_write() here, since must return EINVAL and not EBADF if the 593 * descriptor isn't writable. 594 */ 595 int 596 kern_ftruncate(struct thread *td, int fd, off_t length) 597 { 598 struct file *fp; 599 int error; 600 601 AUDIT_ARG_FD(fd); 602 if (length < 0) 603 return (EINVAL); 604 error = fget(td, fd, &cap_ftruncate_rights, &fp); 605 if (error) 606 return (error); 607 AUDIT_ARG_FILE(td->td_proc, fp); 608 if (!(fp->f_flag & FWRITE)) { 609 fdrop(fp, td); 610 return (EINVAL); 611 } 612 error = fo_truncate(fp, length, td->td_ucred, td); 613 fdrop(fp, td); 614 return (error); 615 } 616 617 #ifndef _SYS_SYSPROTO_H_ 618 struct ftruncate_args { 619 int fd; 620 int pad; 621 off_t length; 622 }; 623 #endif 624 int 625 sys_ftruncate(struct thread *td, struct ftruncate_args *uap) 626 { 627 628 return (kern_ftruncate(td, uap->fd, uap->length)); 629 } 630 631 #if defined(COMPAT_43) 632 #ifndef _SYS_SYSPROTO_H_ 633 struct oftruncate_args { 634 int fd; 635 long length; 636 }; 637 #endif 638 int 639 oftruncate(struct thread *td, struct oftruncate_args *uap) 640 { 641 642 return (kern_ftruncate(td, uap->fd, uap->length)); 643 } 644 #endif /* COMPAT_43 */ 645 646 #ifndef _SYS_SYSPROTO_H_ 647 struct ioctl_args { 648 int fd; 649 u_long com; 650 caddr_t data; 651 }; 652 #endif 653 /* ARGSUSED */ 654 int 655 sys_ioctl(struct thread *td, struct ioctl_args *uap) 656 { 657 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN); 658 uint32_t com; 659 int arg, error; 660 u_int size; 661 caddr_t data; 662 663 #ifdef INVARIANTS 664 if (uap->com > 0xffffffff) { 665 printf( 666 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n", 667 td->td_proc->p_pid, td->td_name, uap->com); 668 } 669 #endif 670 com = (uint32_t)uap->com; 671 672 /* 673 * Interpret high order word to find amount of data to be 674 * copied to/from the user's address space. 675 */ 676 size = IOCPARM_LEN(com); 677 if ((size > IOCPARM_MAX) || 678 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) || 679 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43) 680 ((com & IOC_OUT) && size == 0) || 681 #else 682 ((com & (IOC_IN | IOC_OUT)) && size == 0) || 683 #endif 684 ((com & IOC_VOID) && size > 0 && size != sizeof(int))) 685 return (ENOTTY); 686 687 if (size > 0) { 688 if (com & IOC_VOID) { 689 /* Integer argument. */ 690 arg = (intptr_t)uap->data; 691 data = (void *)&arg; 692 size = 0; 693 } else { 694 if (size > SYS_IOCTL_SMALL_SIZE) 695 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK); 696 else 697 data = smalldata; 698 } 699 } else 700 data = (void *)&uap->data; 701 if (com & IOC_IN) { 702 error = copyin(uap->data, data, (u_int)size); 703 if (error != 0) 704 goto out; 705 } else if (com & IOC_OUT) { 706 /* 707 * Zero the buffer so the user always 708 * gets back something deterministic. 709 */ 710 bzero(data, size); 711 } 712 713 error = kern_ioctl(td, uap->fd, com, data); 714 715 if (error == 0 && (com & IOC_OUT)) 716 error = copyout(data, uap->data, (u_int)size); 717 718 out: 719 if (size > SYS_IOCTL_SMALL_SIZE) 720 free(data, M_IOCTLOPS); 721 return (error); 722 } 723 724 int 725 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data) 726 { 727 struct file *fp; 728 struct filedesc *fdp; 729 int error, tmp, locked; 730 731 AUDIT_ARG_FD(fd); 732 AUDIT_ARG_CMD(com); 733 734 fdp = td->td_proc->p_fd; 735 736 switch (com) { 737 case FIONCLEX: 738 case FIOCLEX: 739 FILEDESC_XLOCK(fdp); 740 locked = LA_XLOCKED; 741 break; 742 default: 743 #ifdef CAPABILITIES 744 FILEDESC_SLOCK(fdp); 745 locked = LA_SLOCKED; 746 #else 747 locked = LA_UNLOCKED; 748 #endif 749 break; 750 } 751 752 #ifdef CAPABILITIES 753 if ((fp = fget_noref(fdp, fd)) == NULL) { 754 error = EBADF; 755 goto out; 756 } 757 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) { 758 fp = NULL; /* fhold() was not called yet */ 759 goto out; 760 } 761 if (!fhold(fp)) { 762 error = EBADF; 763 fp = NULL; 764 goto out; 765 } 766 if (locked == LA_SLOCKED) { 767 FILEDESC_SUNLOCK(fdp); 768 locked = LA_UNLOCKED; 769 } 770 #else 771 error = fget(td, fd, &cap_ioctl_rights, &fp); 772 if (error != 0) { 773 fp = NULL; 774 goto out; 775 } 776 #endif 777 if ((fp->f_flag & (FREAD | FWRITE)) == 0) { 778 error = EBADF; 779 goto out; 780 } 781 782 switch (com) { 783 case FIONCLEX: 784 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE; 785 goto out; 786 case FIOCLEX: 787 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE; 788 goto out; 789 case FIONBIO: 790 if ((tmp = *(int *)data)) 791 atomic_set_int(&fp->f_flag, FNONBLOCK); 792 else 793 atomic_clear_int(&fp->f_flag, FNONBLOCK); 794 data = (void *)&tmp; 795 break; 796 case FIOASYNC: 797 if ((tmp = *(int *)data)) 798 atomic_set_int(&fp->f_flag, FASYNC); 799 else 800 atomic_clear_int(&fp->f_flag, FASYNC); 801 data = (void *)&tmp; 802 break; 803 } 804 805 error = fo_ioctl(fp, com, data, td->td_ucred, td); 806 out: 807 switch (locked) { 808 case LA_XLOCKED: 809 FILEDESC_XUNLOCK(fdp); 810 break; 811 #ifdef CAPABILITIES 812 case LA_SLOCKED: 813 FILEDESC_SUNLOCK(fdp); 814 break; 815 #endif 816 default: 817 FILEDESC_UNLOCK_ASSERT(fdp); 818 break; 819 } 820 if (fp != NULL) 821 fdrop(fp, td); 822 return (error); 823 } 824 825 int 826 sys_posix_fallocate(struct thread *td, struct posix_fallocate_args *uap) 827 { 828 int error; 829 830 error = kern_posix_fallocate(td, uap->fd, uap->offset, uap->len); 831 return (kern_posix_error(td, error)); 832 } 833 834 int 835 kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len) 836 { 837 struct file *fp; 838 int error; 839 840 AUDIT_ARG_FD(fd); 841 if (offset < 0 || len <= 0) 842 return (EINVAL); 843 /* Check for wrap. */ 844 if (offset > OFF_MAX - len) 845 return (EFBIG); 846 AUDIT_ARG_FD(fd); 847 error = fget(td, fd, &cap_pwrite_rights, &fp); 848 if (error != 0) 849 return (error); 850 AUDIT_ARG_FILE(td->td_proc, fp); 851 if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) { 852 error = ESPIPE; 853 goto out; 854 } 855 if ((fp->f_flag & FWRITE) == 0) { 856 error = EBADF; 857 goto out; 858 } 859 860 error = fo_fallocate(fp, offset, len, td); 861 out: 862 fdrop(fp, td); 863 return (error); 864 } 865 866 int 867 sys_fspacectl(struct thread *td, struct fspacectl_args *uap) 868 { 869 struct spacectl_range rqsr, rmsr; 870 int error, cerror; 871 872 error = copyin(uap->rqsr, &rqsr, sizeof(rqsr)); 873 if (error != 0) 874 return (error); 875 876 error = kern_fspacectl(td, uap->fd, uap->cmd, &rqsr, uap->flags, 877 &rmsr); 878 if (uap->rmsr != NULL) { 879 cerror = copyout(&rmsr, uap->rmsr, sizeof(rmsr)); 880 if (error == 0) 881 error = cerror; 882 } 883 return (error); 884 } 885 886 int 887 kern_fspacectl(struct thread *td, int fd, int cmd, 888 const struct spacectl_range *rqsr, int flags, struct spacectl_range *rmsrp) 889 { 890 struct file *fp; 891 struct spacectl_range rmsr; 892 int error; 893 894 AUDIT_ARG_FD(fd); 895 AUDIT_ARG_CMD(cmd); 896 AUDIT_ARG_FFLAGS(flags); 897 898 if (rqsr == NULL) 899 return (EINVAL); 900 rmsr = *rqsr; 901 if (rmsrp != NULL) 902 *rmsrp = rmsr; 903 904 if (cmd != SPACECTL_DEALLOC || 905 rqsr->r_offset < 0 || rqsr->r_len <= 0 || 906 rqsr->r_offset > OFF_MAX - rqsr->r_len || 907 (flags & ~SPACECTL_F_SUPPORTED) != 0) 908 return (EINVAL); 909 910 error = fget_write(td, fd, &cap_pwrite_rights, &fp); 911 if (error != 0) 912 return (error); 913 AUDIT_ARG_FILE(td->td_proc, fp); 914 if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) { 915 error = ESPIPE; 916 goto out; 917 } 918 if ((fp->f_flag & FWRITE) == 0) { 919 error = EBADF; 920 goto out; 921 } 922 923 error = fo_fspacectl(fp, cmd, &rmsr.r_offset, &rmsr.r_len, flags, 924 td->td_ucred, td); 925 /* fspacectl is not restarted after signals if the file is modified. */ 926 if (rmsr.r_len != rqsr->r_len && (error == ERESTART || 927 error == EINTR || error == EWOULDBLOCK)) 928 error = 0; 929 if (rmsrp != NULL) 930 *rmsrp = rmsr; 931 out: 932 fdrop(fp, td); 933 return (error); 934 } 935 936 int 937 kern_specialfd(struct thread *td, int type, void *arg) 938 { 939 struct file *fp; 940 struct specialfd_eventfd *ae; 941 int error, fd, fflags; 942 943 fflags = 0; 944 error = falloc_noinstall(td, &fp); 945 if (error != 0) 946 return (error); 947 948 switch (type) { 949 case SPECIALFD_EVENTFD: 950 ae = arg; 951 if ((ae->flags & EFD_CLOEXEC) != 0) 952 fflags |= O_CLOEXEC; 953 error = eventfd_create_file(td, fp, ae->initval, ae->flags); 954 break; 955 default: 956 error = EINVAL; 957 break; 958 } 959 960 if (error == 0) 961 error = finstall(td, fp, &fd, fflags, NULL); 962 fdrop(fp, td); 963 if (error == 0) 964 td->td_retval[0] = fd; 965 return (error); 966 } 967 968 int 969 sys___specialfd(struct thread *td, struct __specialfd_args *args) 970 { 971 struct specialfd_eventfd ae; 972 int error; 973 974 switch (args->type) { 975 case SPECIALFD_EVENTFD: 976 if (args->len != sizeof(struct specialfd_eventfd)) { 977 error = EINVAL; 978 break; 979 } 980 error = copyin(args->req, &ae, sizeof(ae)); 981 if (error != 0) 982 break; 983 if ((ae.flags & ~(EFD_CLOEXEC | EFD_NONBLOCK | 984 EFD_SEMAPHORE)) != 0) { 985 error = EINVAL; 986 break; 987 } 988 error = kern_specialfd(td, args->type, &ae); 989 break; 990 default: 991 error = EINVAL; 992 break; 993 } 994 return (error); 995 } 996 997 int 998 poll_no_poll(int events) 999 { 1000 /* 1001 * Return true for read/write. If the user asked for something 1002 * special, return POLLNVAL, so that clients have a way of 1003 * determining reliably whether or not the extended 1004 * functionality is present without hard-coding knowledge 1005 * of specific filesystem implementations. 1006 */ 1007 if (events & ~POLLSTANDARD) 1008 return (POLLNVAL); 1009 1010 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 1011 } 1012 1013 int 1014 sys_pselect(struct thread *td, struct pselect_args *uap) 1015 { 1016 struct timespec ts; 1017 struct timeval tv, *tvp; 1018 sigset_t set, *uset; 1019 int error; 1020 1021 if (uap->ts != NULL) { 1022 error = copyin(uap->ts, &ts, sizeof(ts)); 1023 if (error != 0) 1024 return (error); 1025 TIMESPEC_TO_TIMEVAL(&tv, &ts); 1026 tvp = &tv; 1027 } else 1028 tvp = NULL; 1029 if (uap->sm != NULL) { 1030 error = copyin(uap->sm, &set, sizeof(set)); 1031 if (error != 0) 1032 return (error); 1033 uset = &set; 1034 } else 1035 uset = NULL; 1036 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, 1037 uset, NFDBITS)); 1038 } 1039 1040 int 1041 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex, 1042 struct timeval *tvp, sigset_t *uset, int abi_nfdbits) 1043 { 1044 int error; 1045 1046 if (uset != NULL) { 1047 error = kern_sigprocmask(td, SIG_SETMASK, uset, 1048 &td->td_oldsigmask, 0); 1049 if (error != 0) 1050 return (error); 1051 td->td_pflags |= TDP_OLDMASK; 1052 } 1053 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits); 1054 if (uset != NULL) { 1055 /* 1056 * Make sure that ast() is called on return to 1057 * usermode and TDP_OLDMASK is cleared, restoring old 1058 * sigmask. If we didn't get interrupted, then the caller is 1059 * likely not expecting a signal to hit that should normally be 1060 * blocked by its signal mask, so we restore the mask before 1061 * any signals could be delivered. 1062 */ 1063 if (error == EINTR) { 1064 ast_sched(td, TDA_SIGSUSPEND); 1065 } else { 1066 /* *select(2) should never restart. */ 1067 MPASS(error != ERESTART); 1068 ast_sched(td, TDA_PSELECT); 1069 } 1070 } 1071 1072 return (error); 1073 } 1074 1075 #ifndef _SYS_SYSPROTO_H_ 1076 struct select_args { 1077 int nd; 1078 fd_set *in, *ou, *ex; 1079 struct timeval *tv; 1080 }; 1081 #endif 1082 int 1083 sys_select(struct thread *td, struct select_args *uap) 1084 { 1085 struct timeval tv, *tvp; 1086 int error; 1087 1088 if (uap->tv != NULL) { 1089 error = copyin(uap->tv, &tv, sizeof(tv)); 1090 if (error) 1091 return (error); 1092 tvp = &tv; 1093 } else 1094 tvp = NULL; 1095 1096 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp, 1097 NFDBITS)); 1098 } 1099 1100 /* 1101 * In the unlikely case when user specified n greater then the last 1102 * open file descriptor, check that no bits are set after the last 1103 * valid fd. We must return EBADF if any is set. 1104 * 1105 * There are applications that rely on the behaviour. 1106 * 1107 * nd is fd_nfiles. 1108 */ 1109 static int 1110 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits) 1111 { 1112 char *addr, *oaddr; 1113 int b, i, res; 1114 uint8_t bits; 1115 1116 if (nd >= ndu || fd_in == NULL) 1117 return (0); 1118 1119 oaddr = NULL; 1120 bits = 0; /* silence gcc */ 1121 for (i = nd; i < ndu; i++) { 1122 b = i / NBBY; 1123 #if BYTE_ORDER == LITTLE_ENDIAN 1124 addr = (char *)fd_in + b; 1125 #else 1126 addr = (char *)fd_in; 1127 if (abi_nfdbits == NFDBITS) { 1128 addr += rounddown(b, sizeof(fd_mask)) + 1129 sizeof(fd_mask) - 1 - b % sizeof(fd_mask); 1130 } else { 1131 addr += rounddown(b, sizeof(uint32_t)) + 1132 sizeof(uint32_t) - 1 - b % sizeof(uint32_t); 1133 } 1134 #endif 1135 if (addr != oaddr) { 1136 res = fubyte(addr); 1137 if (res == -1) 1138 return (EFAULT); 1139 oaddr = addr; 1140 bits = res; 1141 } 1142 if ((bits & (1 << (i % NBBY))) != 0) 1143 return (EBADF); 1144 } 1145 return (0); 1146 } 1147 1148 int 1149 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou, 1150 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits) 1151 { 1152 struct filedesc *fdp; 1153 /* 1154 * The magic 2048 here is chosen to be just enough for FD_SETSIZE 1155 * infds with the new FD_SETSIZE of 1024, and more than enough for 1156 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE 1157 * of 256. 1158 */ 1159 fd_mask s_selbits[howmany(2048, NFDBITS)]; 1160 fd_mask *ibits[3], *obits[3], *selbits, *sbp; 1161 struct timeval rtv; 1162 sbintime_t asbt, precision, rsbt; 1163 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits; 1164 int error, lf, ndu; 1165 1166 if (nd < 0) 1167 return (EINVAL); 1168 fdp = td->td_proc->p_fd; 1169 ndu = nd; 1170 lf = fdp->fd_nfiles; 1171 if (nd > lf) 1172 nd = lf; 1173 1174 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits); 1175 if (error != 0) 1176 return (error); 1177 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits); 1178 if (error != 0) 1179 return (error); 1180 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits); 1181 if (error != 0) 1182 return (error); 1183 1184 /* 1185 * Allocate just enough bits for the non-null fd_sets. Use the 1186 * preallocated auto buffer if possible. 1187 */ 1188 nfdbits = roundup(nd, NFDBITS); 1189 ncpbytes = nfdbits / NBBY; 1190 ncpubytes = roundup(nd, abi_nfdbits) / NBBY; 1191 nbufbytes = 0; 1192 if (fd_in != NULL) 1193 nbufbytes += 2 * ncpbytes; 1194 if (fd_ou != NULL) 1195 nbufbytes += 2 * ncpbytes; 1196 if (fd_ex != NULL) 1197 nbufbytes += 2 * ncpbytes; 1198 if (nbufbytes <= sizeof s_selbits) 1199 selbits = &s_selbits[0]; 1200 else 1201 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK); 1202 1203 /* 1204 * Assign pointers into the bit buffers and fetch the input bits. 1205 * Put the output buffers together so that they can be bzeroed 1206 * together. 1207 */ 1208 sbp = selbits; 1209 #define getbits(name, x) \ 1210 do { \ 1211 if (name == NULL) { \ 1212 ibits[x] = NULL; \ 1213 obits[x] = NULL; \ 1214 } else { \ 1215 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \ 1216 obits[x] = sbp; \ 1217 sbp += ncpbytes / sizeof *sbp; \ 1218 error = copyin(name, ibits[x], ncpubytes); \ 1219 if (error != 0) \ 1220 goto done; \ 1221 if (ncpbytes != ncpubytes) \ 1222 bzero((char *)ibits[x] + ncpubytes, \ 1223 ncpbytes - ncpubytes); \ 1224 } \ 1225 } while (0) 1226 getbits(fd_in, 0); 1227 getbits(fd_ou, 1); 1228 getbits(fd_ex, 2); 1229 #undef getbits 1230 1231 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__) 1232 /* 1233 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS, 1234 * we are running under 32-bit emulation. This should be more 1235 * generic. 1236 */ 1237 #define swizzle_fdset(bits) \ 1238 if (abi_nfdbits != NFDBITS && bits != NULL) { \ 1239 int i; \ 1240 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \ 1241 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \ 1242 } 1243 #else 1244 #define swizzle_fdset(bits) 1245 #endif 1246 1247 /* Make sure the bit order makes it through an ABI transition */ 1248 swizzle_fdset(ibits[0]); 1249 swizzle_fdset(ibits[1]); 1250 swizzle_fdset(ibits[2]); 1251 1252 if (nbufbytes != 0) 1253 bzero(selbits, nbufbytes / 2); 1254 1255 precision = 0; 1256 if (tvp != NULL) { 1257 rtv = *tvp; 1258 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 || 1259 rtv.tv_usec >= 1000000) { 1260 error = EINVAL; 1261 goto done; 1262 } 1263 if (!timevalisset(&rtv)) 1264 asbt = 0; 1265 else if (rtv.tv_sec <= INT32_MAX) { 1266 rsbt = tvtosbt(rtv); 1267 precision = rsbt; 1268 precision >>= tc_precexp; 1269 if (TIMESEL(&asbt, rsbt)) 1270 asbt += tc_tick_sbt; 1271 if (asbt <= SBT_MAX - rsbt) 1272 asbt += rsbt; 1273 else 1274 asbt = -1; 1275 } else 1276 asbt = -1; 1277 } else 1278 asbt = -1; 1279 seltdinit(td); 1280 /* Iterate until the timeout expires or descriptors become ready. */ 1281 for (;;) { 1282 error = selscan(td, ibits, obits, nd); 1283 if (error || td->td_retval[0] != 0) 1284 break; 1285 error = seltdwait(td, asbt, precision); 1286 if (error) 1287 break; 1288 error = selrescan(td, ibits, obits); 1289 if (error || td->td_retval[0] != 0) 1290 break; 1291 } 1292 seltdclear(td); 1293 1294 done: 1295 /* select is not restarted after signals... */ 1296 if (error == ERESTART) 1297 error = EINTR; 1298 if (error == EWOULDBLOCK) 1299 error = 0; 1300 1301 /* swizzle bit order back, if necessary */ 1302 swizzle_fdset(obits[0]); 1303 swizzle_fdset(obits[1]); 1304 swizzle_fdset(obits[2]); 1305 #undef swizzle_fdset 1306 1307 #define putbits(name, x) \ 1308 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \ 1309 error = error2; 1310 if (error == 0) { 1311 int error2; 1312 1313 putbits(fd_in, 0); 1314 putbits(fd_ou, 1); 1315 putbits(fd_ex, 2); 1316 #undef putbits 1317 } 1318 if (selbits != &s_selbits[0]) 1319 free(selbits, M_SELECT); 1320 1321 return (error); 1322 } 1323 /* 1324 * Convert a select bit set to poll flags. 1325 * 1326 * The backend always returns POLLHUP/POLLERR if appropriate and we 1327 * return this as a set bit in any set. 1328 */ 1329 static const int select_flags[3] = { 1330 POLLRDNORM | POLLHUP | POLLERR, 1331 POLLWRNORM | POLLHUP | POLLERR, 1332 POLLRDBAND | POLLERR 1333 }; 1334 1335 /* 1336 * Compute the fo_poll flags required for a fd given by the index and 1337 * bit position in the fd_mask array. 1338 */ 1339 static __inline int 1340 selflags(fd_mask **ibits, int idx, fd_mask bit) 1341 { 1342 int flags; 1343 int msk; 1344 1345 flags = 0; 1346 for (msk = 0; msk < 3; msk++) { 1347 if (ibits[msk] == NULL) 1348 continue; 1349 if ((ibits[msk][idx] & bit) == 0) 1350 continue; 1351 flags |= select_flags[msk]; 1352 } 1353 return (flags); 1354 } 1355 1356 /* 1357 * Set the appropriate output bits given a mask of fired events and the 1358 * input bits originally requested. 1359 */ 1360 static __inline int 1361 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events) 1362 { 1363 int msk; 1364 int n; 1365 1366 n = 0; 1367 for (msk = 0; msk < 3; msk++) { 1368 if ((events & select_flags[msk]) == 0) 1369 continue; 1370 if (ibits[msk] == NULL) 1371 continue; 1372 if ((ibits[msk][idx] & bit) == 0) 1373 continue; 1374 /* 1375 * XXX Check for a duplicate set. This can occur because a 1376 * socket calls selrecord() twice for each poll() call 1377 * resulting in two selfds per real fd. selrescan() will 1378 * call selsetbits twice as a result. 1379 */ 1380 if ((obits[msk][idx] & bit) != 0) 1381 continue; 1382 obits[msk][idx] |= bit; 1383 n++; 1384 } 1385 1386 return (n); 1387 } 1388 1389 /* 1390 * Traverse the list of fds attached to this thread's seltd and check for 1391 * completion. 1392 */ 1393 static int 1394 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits) 1395 { 1396 struct filedesc *fdp; 1397 struct selinfo *si; 1398 struct seltd *stp; 1399 struct selfd *sfp; 1400 struct selfd *sfn; 1401 struct file *fp; 1402 fd_mask bit; 1403 int fd, ev, n, idx; 1404 int error; 1405 bool only_user; 1406 1407 fdp = td->td_proc->p_fd; 1408 stp = td->td_sel; 1409 n = 0; 1410 only_user = FILEDESC_IS_ONLY_USER(fdp); 1411 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1412 fd = (int)(uintptr_t)sfp->sf_cookie; 1413 si = sfp->sf_si; 1414 selfdfree(stp, sfp); 1415 /* If the selinfo wasn't cleared the event didn't fire. */ 1416 if (si != NULL) 1417 continue; 1418 if (only_user) 1419 error = fget_only_user(fdp, fd, &cap_event_rights, &fp); 1420 else 1421 error = fget_unlocked(td, fd, &cap_event_rights, &fp); 1422 if (__predict_false(error != 0)) 1423 return (error); 1424 idx = fd / NFDBITS; 1425 bit = (fd_mask)1 << (fd % NFDBITS); 1426 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td); 1427 if (only_user) 1428 fput_only_user(fdp, fp); 1429 else 1430 fdrop(fp, td); 1431 if (ev != 0) 1432 n += selsetbits(ibits, obits, idx, bit, ev); 1433 } 1434 stp->st_flags = 0; 1435 td->td_retval[0] = n; 1436 return (0); 1437 } 1438 1439 /* 1440 * Perform the initial filedescriptor scan and register ourselves with 1441 * each selinfo. 1442 */ 1443 static int 1444 selscan(struct thread *td, fd_mask **ibits, fd_mask **obits, int nfd) 1445 { 1446 struct filedesc *fdp; 1447 struct file *fp; 1448 fd_mask bit; 1449 int ev, flags, end, fd; 1450 int n, idx; 1451 int error; 1452 bool only_user; 1453 1454 fdp = td->td_proc->p_fd; 1455 n = 0; 1456 only_user = FILEDESC_IS_ONLY_USER(fdp); 1457 for (idx = 0, fd = 0; fd < nfd; idx++) { 1458 end = imin(fd + NFDBITS, nfd); 1459 for (bit = 1; fd < end; bit <<= 1, fd++) { 1460 /* Compute the list of events we're interested in. */ 1461 flags = selflags(ibits, idx, bit); 1462 if (flags == 0) 1463 continue; 1464 if (only_user) 1465 error = fget_only_user(fdp, fd, &cap_event_rights, &fp); 1466 else 1467 error = fget_unlocked(td, fd, &cap_event_rights, &fp); 1468 if (__predict_false(error != 0)) 1469 return (error); 1470 selfdalloc(td, (void *)(uintptr_t)fd); 1471 ev = fo_poll(fp, flags, td->td_ucred, td); 1472 if (only_user) 1473 fput_only_user(fdp, fp); 1474 else 1475 fdrop(fp, td); 1476 if (ev != 0) 1477 n += selsetbits(ibits, obits, idx, bit, ev); 1478 } 1479 } 1480 1481 td->td_retval[0] = n; 1482 return (0); 1483 } 1484 1485 int 1486 sys_poll(struct thread *td, struct poll_args *uap) 1487 { 1488 struct timespec ts, *tsp; 1489 1490 if (uap->timeout != INFTIM) { 1491 if (uap->timeout < 0) 1492 return (EINVAL); 1493 ts.tv_sec = uap->timeout / 1000; 1494 ts.tv_nsec = (uap->timeout % 1000) * 1000000; 1495 tsp = &ts; 1496 } else 1497 tsp = NULL; 1498 1499 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL)); 1500 } 1501 1502 /* 1503 * kfds points to an array in the kernel. 1504 */ 1505 int 1506 kern_poll_kfds(struct thread *td, struct pollfd *kfds, u_int nfds, 1507 struct timespec *tsp, sigset_t *uset) 1508 { 1509 sbintime_t sbt, precision, tmp; 1510 time_t over; 1511 struct timespec ts; 1512 int error; 1513 1514 precision = 0; 1515 if (tsp != NULL) { 1516 if (!timespecvalid_interval(tsp)) 1517 return (EINVAL); 1518 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0) 1519 sbt = 0; 1520 else { 1521 ts = *tsp; 1522 if (ts.tv_sec > INT32_MAX / 2) { 1523 over = ts.tv_sec - INT32_MAX / 2; 1524 ts.tv_sec -= over; 1525 } else 1526 over = 0; 1527 tmp = tstosbt(ts); 1528 precision = tmp; 1529 precision >>= tc_precexp; 1530 if (TIMESEL(&sbt, tmp)) 1531 sbt += tc_tick_sbt; 1532 sbt += tmp; 1533 } 1534 } else 1535 sbt = -1; 1536 1537 if (uset != NULL) { 1538 error = kern_sigprocmask(td, SIG_SETMASK, uset, 1539 &td->td_oldsigmask, 0); 1540 if (error) 1541 return (error); 1542 td->td_pflags |= TDP_OLDMASK; 1543 } 1544 1545 seltdinit(td); 1546 /* Iterate until the timeout expires or descriptors become ready. */ 1547 for (;;) { 1548 error = pollscan(td, kfds, nfds); 1549 if (error || td->td_retval[0] != 0) 1550 break; 1551 error = seltdwait(td, sbt, precision); 1552 if (error) 1553 break; 1554 error = pollrescan(td); 1555 if (error || td->td_retval[0] != 0) 1556 break; 1557 } 1558 seltdclear(td); 1559 1560 /* poll is not restarted after signals... */ 1561 if (error == ERESTART) 1562 error = EINTR; 1563 if (error == EWOULDBLOCK) 1564 error = 0; 1565 1566 if (uset != NULL) { 1567 /* 1568 * Make sure that ast() is called on return to 1569 * usermode and TDP_OLDMASK is cleared, restoring old 1570 * sigmask. If we didn't get interrupted, then the caller is 1571 * likely not expecting a signal to hit that should normally be 1572 * blocked by its signal mask, so we restore the mask before 1573 * any signals could be delivered. 1574 */ 1575 if (error == EINTR) 1576 ast_sched(td, TDA_SIGSUSPEND); 1577 else 1578 ast_sched(td, TDA_PSELECT); 1579 } 1580 1581 return (error); 1582 } 1583 1584 int 1585 sys_ppoll(struct thread *td, struct ppoll_args *uap) 1586 { 1587 struct timespec ts, *tsp; 1588 sigset_t set, *ssp; 1589 int error; 1590 1591 if (uap->ts != NULL) { 1592 error = copyin(uap->ts, &ts, sizeof(ts)); 1593 if (error) 1594 return (error); 1595 tsp = &ts; 1596 } else 1597 tsp = NULL; 1598 if (uap->set != NULL) { 1599 error = copyin(uap->set, &set, sizeof(set)); 1600 if (error) 1601 return (error); 1602 ssp = &set; 1603 } else 1604 ssp = NULL; 1605 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp)); 1606 } 1607 1608 /* 1609 * ufds points to an array in user space. 1610 */ 1611 int 1612 kern_poll(struct thread *td, struct pollfd *ufds, u_int nfds, 1613 struct timespec *tsp, sigset_t *set) 1614 { 1615 struct pollfd *kfds; 1616 struct pollfd stackfds[32]; 1617 int error; 1618 1619 if (kern_poll_maxfds(nfds)) 1620 return (EINVAL); 1621 if (nfds > nitems(stackfds)) 1622 kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK); 1623 else 1624 kfds = stackfds; 1625 error = copyin(ufds, kfds, nfds * sizeof(*kfds)); 1626 if (error != 0) 1627 goto out; 1628 1629 error = kern_poll_kfds(td, kfds, nfds, tsp, set); 1630 if (error == 0) 1631 error = pollout(td, kfds, ufds, nfds); 1632 #ifdef KTRACE 1633 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) 1634 ktrstructarray("pollfd", UIO_USERSPACE, ufds, nfds, 1635 sizeof(*ufds)); 1636 #endif 1637 1638 out: 1639 if (nfds > nitems(stackfds)) 1640 free(kfds, M_TEMP); 1641 return (error); 1642 } 1643 1644 bool 1645 kern_poll_maxfds(u_int nfds) 1646 { 1647 1648 /* 1649 * This is kinda bogus. We have fd limits, but that is not 1650 * really related to the size of the pollfd array. Make sure 1651 * we let the process use at least FD_SETSIZE entries and at 1652 * least enough for the system-wide limits. We want to be reasonably 1653 * safe, but not overly restrictive. 1654 */ 1655 return (nfds > maxfilesperproc && nfds > FD_SETSIZE); 1656 } 1657 1658 static int 1659 pollrescan(struct thread *td) 1660 { 1661 struct seltd *stp; 1662 struct selfd *sfp; 1663 struct selfd *sfn; 1664 struct selinfo *si; 1665 struct filedesc *fdp; 1666 struct file *fp; 1667 struct pollfd *fd; 1668 int n, error; 1669 bool only_user; 1670 1671 n = 0; 1672 fdp = td->td_proc->p_fd; 1673 stp = td->td_sel; 1674 only_user = FILEDESC_IS_ONLY_USER(fdp); 1675 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) { 1676 fd = (struct pollfd *)sfp->sf_cookie; 1677 si = sfp->sf_si; 1678 selfdfree(stp, sfp); 1679 /* If the selinfo wasn't cleared the event didn't fire. */ 1680 if (si != NULL) 1681 continue; 1682 if (only_user) 1683 error = fget_only_user(fdp, fd->fd, &cap_event_rights, &fp); 1684 else 1685 error = fget_unlocked(td, fd->fd, &cap_event_rights, &fp); 1686 if (__predict_false(error != 0)) { 1687 fd->revents = POLLNVAL; 1688 n++; 1689 continue; 1690 } 1691 /* 1692 * Note: backend also returns POLLHUP and 1693 * POLLERR if appropriate. 1694 */ 1695 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td); 1696 if (only_user) 1697 fput_only_user(fdp, fp); 1698 else 1699 fdrop(fp, td); 1700 if (fd->revents != 0) 1701 n++; 1702 } 1703 stp->st_flags = 0; 1704 td->td_retval[0] = n; 1705 return (0); 1706 } 1707 1708 static int 1709 pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd) 1710 { 1711 int error = 0; 1712 u_int i = 0; 1713 u_int n = 0; 1714 1715 for (i = 0; i < nfd; i++) { 1716 error = copyout(&fds->revents, &ufds->revents, 1717 sizeof(ufds->revents)); 1718 if (error) 1719 return (error); 1720 if (fds->revents != 0) 1721 n++; 1722 fds++; 1723 ufds++; 1724 } 1725 td->td_retval[0] = n; 1726 return (0); 1727 } 1728 1729 static int 1730 pollscan(struct thread *td, struct pollfd *fds, u_int nfd) 1731 { 1732 struct filedesc *fdp; 1733 struct file *fp; 1734 int i, n, error; 1735 bool only_user; 1736 1737 n = 0; 1738 fdp = td->td_proc->p_fd; 1739 only_user = FILEDESC_IS_ONLY_USER(fdp); 1740 for (i = 0; i < nfd; i++, fds++) { 1741 if (fds->fd < 0) { 1742 fds->revents = 0; 1743 continue; 1744 } 1745 if (only_user) 1746 error = fget_only_user(fdp, fds->fd, &cap_event_rights, &fp); 1747 else 1748 error = fget_unlocked(td, fds->fd, &cap_event_rights, &fp); 1749 if (__predict_false(error != 0)) { 1750 fds->revents = POLLNVAL; 1751 n++; 1752 continue; 1753 } 1754 /* 1755 * Note: backend also returns POLLHUP and 1756 * POLLERR if appropriate. 1757 */ 1758 selfdalloc(td, fds); 1759 fds->revents = fo_poll(fp, fds->events, 1760 td->td_ucred, td); 1761 if (only_user) 1762 fput_only_user(fdp, fp); 1763 else 1764 fdrop(fp, td); 1765 /* 1766 * POSIX requires POLLOUT to be never 1767 * set simultaneously with POLLHUP. 1768 */ 1769 if ((fds->revents & POLLHUP) != 0) 1770 fds->revents &= ~POLLOUT; 1771 1772 if (fds->revents != 0) 1773 n++; 1774 } 1775 td->td_retval[0] = n; 1776 return (0); 1777 } 1778 1779 /* 1780 * XXX This was created specifically to support netncp and netsmb. This 1781 * allows the caller to specify a socket to wait for events on. It returns 1782 * 0 if any events matched and an error otherwise. There is no way to 1783 * determine which events fired. 1784 */ 1785 int 1786 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td) 1787 { 1788 struct timeval rtv; 1789 sbintime_t asbt, precision, rsbt; 1790 int error; 1791 1792 precision = 0; /* stupid gcc! */ 1793 if (tvp != NULL) { 1794 rtv = *tvp; 1795 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 || 1796 rtv.tv_usec >= 1000000) 1797 return (EINVAL); 1798 if (!timevalisset(&rtv)) 1799 asbt = 0; 1800 else if (rtv.tv_sec <= INT32_MAX) { 1801 rsbt = tvtosbt(rtv); 1802 precision = rsbt; 1803 precision >>= tc_precexp; 1804 if (TIMESEL(&asbt, rsbt)) 1805 asbt += tc_tick_sbt; 1806 if (asbt <= SBT_MAX - rsbt) 1807 asbt += rsbt; 1808 else 1809 asbt = -1; 1810 } else 1811 asbt = -1; 1812 } else 1813 asbt = -1; 1814 seltdinit(td); 1815 /* 1816 * Iterate until the timeout expires or the socket becomes ready. 1817 */ 1818 for (;;) { 1819 selfdalloc(td, NULL); 1820 if (sopoll(so, events, NULL, td) != 0) { 1821 error = 0; 1822 break; 1823 } 1824 error = seltdwait(td, asbt, precision); 1825 if (error) 1826 break; 1827 } 1828 seltdclear(td); 1829 /* XXX Duplicates ncp/smb behavior. */ 1830 if (error == ERESTART) 1831 error = 0; 1832 return (error); 1833 } 1834 1835 /* 1836 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines 1837 * have two select sets, one for read and another for write. 1838 */ 1839 static void 1840 selfdalloc(struct thread *td, void *cookie) 1841 { 1842 struct seltd *stp; 1843 1844 stp = td->td_sel; 1845 if (stp->st_free1 == NULL) 1846 stp->st_free1 = malloc(sizeof(*stp->st_free1), M_SELFD, M_WAITOK|M_ZERO); 1847 stp->st_free1->sf_td = stp; 1848 stp->st_free1->sf_cookie = cookie; 1849 if (stp->st_free2 == NULL) 1850 stp->st_free2 = malloc(sizeof(*stp->st_free2), M_SELFD, M_WAITOK|M_ZERO); 1851 stp->st_free2->sf_td = stp; 1852 stp->st_free2->sf_cookie = cookie; 1853 } 1854 1855 static void 1856 selfdfree(struct seltd *stp, struct selfd *sfp) 1857 { 1858 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link); 1859 /* 1860 * Paired with doselwakeup. 1861 */ 1862 if (atomic_load_acq_ptr((uintptr_t *)&sfp->sf_si) != (uintptr_t)NULL) { 1863 mtx_lock(sfp->sf_mtx); 1864 if (sfp->sf_si != NULL) { 1865 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads); 1866 } 1867 mtx_unlock(sfp->sf_mtx); 1868 } 1869 free(sfp, M_SELFD); 1870 } 1871 1872 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */ 1873 void 1874 seldrain(struct selinfo *sip) 1875 { 1876 1877 /* 1878 * This feature is already provided by doselwakeup(), thus it is 1879 * enough to go for it. 1880 * Eventually, the context, should take care to avoid races 1881 * between thread calling select()/poll() and file descriptor 1882 * detaching, but, again, the races are just the same as 1883 * selwakeup(). 1884 */ 1885 doselwakeup(sip, -1); 1886 } 1887 1888 /* 1889 * Record a select request. 1890 */ 1891 void 1892 selrecord(struct thread *selector, struct selinfo *sip) 1893 { 1894 struct selfd *sfp; 1895 struct seltd *stp; 1896 struct mtx *mtxp; 1897 1898 stp = selector->td_sel; 1899 /* 1900 * Don't record when doing a rescan. 1901 */ 1902 if (stp->st_flags & SELTD_RESCAN) 1903 return; 1904 /* 1905 * Grab one of the preallocated descriptors. 1906 */ 1907 sfp = NULL; 1908 if ((sfp = stp->st_free1) != NULL) 1909 stp->st_free1 = NULL; 1910 else if ((sfp = stp->st_free2) != NULL) 1911 stp->st_free2 = NULL; 1912 else 1913 panic("selrecord: No free selfd on selq"); 1914 mtxp = sip->si_mtx; 1915 if (mtxp == NULL) 1916 mtxp = mtx_pool_find(mtxpool_select, sip); 1917 /* 1918 * Initialize the sfp and queue it in the thread. 1919 */ 1920 sfp->sf_si = sip; 1921 sfp->sf_mtx = mtxp; 1922 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link); 1923 /* 1924 * Now that we've locked the sip, check for initialization. 1925 */ 1926 mtx_lock(mtxp); 1927 if (sip->si_mtx == NULL) { 1928 sip->si_mtx = mtxp; 1929 TAILQ_INIT(&sip->si_tdlist); 1930 } 1931 /* 1932 * Add this thread to the list of selfds listening on this selinfo. 1933 */ 1934 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads); 1935 mtx_unlock(sip->si_mtx); 1936 } 1937 1938 /* Wake up a selecting thread. */ 1939 void 1940 selwakeup(struct selinfo *sip) 1941 { 1942 doselwakeup(sip, -1); 1943 } 1944 1945 /* Wake up a selecting thread, and set its priority. */ 1946 void 1947 selwakeuppri(struct selinfo *sip, int pri) 1948 { 1949 doselwakeup(sip, pri); 1950 } 1951 1952 /* 1953 * Do a wakeup when a selectable event occurs. 1954 */ 1955 static void 1956 doselwakeup(struct selinfo *sip, int pri) 1957 { 1958 struct selfd *sfp; 1959 struct selfd *sfn; 1960 struct seltd *stp; 1961 1962 /* If it's not initialized there can't be any waiters. */ 1963 if (sip->si_mtx == NULL) 1964 return; 1965 /* 1966 * Locking the selinfo locks all selfds associated with it. 1967 */ 1968 mtx_lock(sip->si_mtx); 1969 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) { 1970 /* 1971 * Once we remove this sfp from the list and clear the 1972 * sf_si seltdclear will know to ignore this si. 1973 */ 1974 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads); 1975 stp = sfp->sf_td; 1976 mtx_lock(&stp->st_mtx); 1977 stp->st_flags |= SELTD_PENDING; 1978 cv_broadcastpri(&stp->st_wait, pri); 1979 mtx_unlock(&stp->st_mtx); 1980 /* 1981 * Paired with selfdfree. 1982 * 1983 * Storing this only after the wakeup provides an invariant that 1984 * stp is not used after selfdfree returns. 1985 */ 1986 atomic_store_rel_ptr((uintptr_t *)&sfp->sf_si, (uintptr_t)NULL); 1987 } 1988 mtx_unlock(sip->si_mtx); 1989 } 1990 1991 static void 1992 seltdinit(struct thread *td) 1993 { 1994 struct seltd *stp; 1995 1996 stp = td->td_sel; 1997 if (stp != NULL) { 1998 MPASS(stp->st_flags == 0); 1999 MPASS(STAILQ_EMPTY(&stp->st_selq)); 2000 return; 2001 } 2002 stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO); 2003 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF); 2004 cv_init(&stp->st_wait, "select"); 2005 stp->st_flags = 0; 2006 STAILQ_INIT(&stp->st_selq); 2007 td->td_sel = stp; 2008 } 2009 2010 static int 2011 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision) 2012 { 2013 struct seltd *stp; 2014 int error; 2015 2016 stp = td->td_sel; 2017 /* 2018 * An event of interest may occur while we do not hold the seltd 2019 * locked so check the pending flag before we sleep. 2020 */ 2021 mtx_lock(&stp->st_mtx); 2022 /* 2023 * Any further calls to selrecord will be a rescan. 2024 */ 2025 stp->st_flags |= SELTD_RESCAN; 2026 if (stp->st_flags & SELTD_PENDING) { 2027 mtx_unlock(&stp->st_mtx); 2028 return (0); 2029 } 2030 if (sbt == 0) 2031 error = EWOULDBLOCK; 2032 else if (sbt != -1) 2033 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx, 2034 sbt, precision, C_ABSOLUTE); 2035 else 2036 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx); 2037 mtx_unlock(&stp->st_mtx); 2038 2039 return (error); 2040 } 2041 2042 void 2043 seltdfini(struct thread *td) 2044 { 2045 struct seltd *stp; 2046 2047 stp = td->td_sel; 2048 if (stp == NULL) 2049 return; 2050 MPASS(stp->st_flags == 0); 2051 MPASS(STAILQ_EMPTY(&stp->st_selq)); 2052 if (stp->st_free1) 2053 free(stp->st_free1, M_SELFD); 2054 if (stp->st_free2) 2055 free(stp->st_free2, M_SELFD); 2056 td->td_sel = NULL; 2057 cv_destroy(&stp->st_wait); 2058 mtx_destroy(&stp->st_mtx); 2059 free(stp, M_SELECT); 2060 } 2061 2062 /* 2063 * Remove the references to the thread from all of the objects we were 2064 * polling. 2065 */ 2066 static void 2067 seltdclear(struct thread *td) 2068 { 2069 struct seltd *stp; 2070 struct selfd *sfp; 2071 struct selfd *sfn; 2072 2073 stp = td->td_sel; 2074 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) 2075 selfdfree(stp, sfp); 2076 stp->st_flags = 0; 2077 } 2078 2079 static void selectinit(void *); 2080 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL); 2081 static void 2082 selectinit(void *dummy __unused) 2083 { 2084 2085 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF); 2086 } 2087 2088 /* 2089 * Set up a syscall return value that follows the convention specified for 2090 * posix_* functions. 2091 */ 2092 int 2093 kern_posix_error(struct thread *td, int error) 2094 { 2095 2096 if (error <= 0) 2097 return (error); 2098 td->td_errno = error; 2099 td->td_pflags |= TDP_NERRNO; 2100 td->td_retval[0] = error; 2101 return (0); 2102 } 2103 2104 int 2105 kcmp_cmp(uintptr_t a, uintptr_t b) 2106 { 2107 if (a == b) 2108 return (0); 2109 else if (a < b) 2110 return (1); 2111 return (2); 2112 } 2113 2114 static int 2115 kcmp_pget(struct thread *td, pid_t pid, struct proc **pp) 2116 { 2117 int error; 2118 2119 if (pid == td->td_proc->p_pid) { 2120 *pp = td->td_proc; 2121 return (0); 2122 } 2123 error = pget(pid, PGET_NOTID | PGET_CANDEBUG | PGET_NOTWEXIT | 2124 PGET_HOLD, pp); 2125 MPASS(*pp != td->td_proc); 2126 return (error); 2127 } 2128 2129 int 2130 kern_kcmp(struct thread *td, pid_t pid1, pid_t pid2, int type, 2131 uintptr_t idx1, uintptr_t idx2) 2132 { 2133 struct proc *p1, *p2; 2134 struct file *fp1, *fp2; 2135 int error, res; 2136 2137 res = -1; 2138 p1 = p2 = NULL; 2139 error = kcmp_pget(td, pid1, &p1); 2140 if (error == 0) 2141 error = kcmp_pget(td, pid2, &p2); 2142 if (error != 0) 2143 goto out; 2144 2145 switch (type) { 2146 case KCMP_FILE: 2147 case KCMP_FILEOBJ: 2148 error = fget_remote(td, p1, idx1, &fp1); 2149 if (error == 0) { 2150 error = fget_remote(td, p2, idx2, &fp2); 2151 if (error == 0) { 2152 if (type == KCMP_FILEOBJ) 2153 res = fo_cmp(fp1, fp2, td); 2154 else 2155 res = kcmp_cmp((uintptr_t)fp1, 2156 (uintptr_t)fp2); 2157 fdrop(fp2, td); 2158 } 2159 fdrop(fp1, td); 2160 } 2161 break; 2162 case KCMP_FILES: 2163 res = kcmp_cmp((uintptr_t)p1->p_fd, (uintptr_t)p2->p_fd); 2164 break; 2165 case KCMP_SIGHAND: 2166 res = kcmp_cmp((uintptr_t)p1->p_sigacts, 2167 (uintptr_t)p2->p_sigacts); 2168 break; 2169 case KCMP_VM: 2170 res = kcmp_cmp((uintptr_t)p1->p_vmspace, 2171 (uintptr_t)p2->p_vmspace); 2172 break; 2173 default: 2174 error = EINVAL; 2175 break; 2176 } 2177 2178 out: 2179 if (p1 != NULL && p1 != td->td_proc) 2180 PRELE(p1); 2181 if (p2 != NULL && p2 != td->td_proc) 2182 PRELE(p2); 2183 2184 td->td_retval[0] = res; 2185 return (error); 2186 } 2187 2188 int 2189 sys_kcmp(struct thread *td, struct kcmp_args *uap) 2190 { 2191 return (kern_kcmp(td, uap->pid1, uap->pid2, uap->type, 2192 uap->idx1, uap->idx2)); 2193 } 2194 2195 int 2196 file_kcmp_generic(struct file *fp1, struct file *fp2, struct thread *td) 2197 { 2198 if (fp1->f_type != fp2->f_type) 2199 return (3); 2200 return (kcmp_cmp((uintptr_t)fp1->f_data, (uintptr_t)fp2->f_data)); 2201 } 2202