1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 39 * $FreeBSD$ 40 */ 41 42 #include "opt_compat.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysproto.h> 49 #include <sys/conf.h> 50 #include <sys/filedesc.h> 51 #include <sys/kernel.h> 52 #include <sys/sysctl.h> 53 #include <sys/vnode.h> 54 #include <sys/proc.h> 55 #include <sys/file.h> 56 #include <sys/stat.h> 57 #include <sys/filio.h> 58 #include <sys/fcntl.h> 59 #include <sys/malloc.h> 60 #include <sys/unistd.h> 61 #include <sys/resourcevar.h> 62 #include <sys/event.h> 63 64 #include <machine/limits.h> 65 66 #include <vm/vm.h> 67 #include <vm/vm_extern.h> 68 69 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 70 MALLOC_DEFINE(M_FILE, "file", "Open file structure"); 71 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 72 73 static d_open_t fdopen; 74 #define NUMFDESC 64 75 76 #define CDEV_MAJOR 22 77 static struct cdevsw fildesc_cdevsw = { 78 /* open */ fdopen, 79 /* close */ noclose, 80 /* read */ noread, 81 /* write */ nowrite, 82 /* ioctl */ noioctl, 83 /* poll */ nopoll, 84 /* mmap */ nommap, 85 /* strategy */ nostrategy, 86 /* name */ "FD", 87 /* maj */ CDEV_MAJOR, 88 /* dump */ nodump, 89 /* psize */ nopsize, 90 /* flags */ 0, 91 }; 92 93 static int do_dup __P((struct filedesc *fdp, int old, int new, register_t *retval, struct thread *td)); 94 static int badfo_readwrite __P((struct file *fp, struct uio *uio, 95 struct ucred *cred, int flags, struct thread *td)); 96 static int badfo_ioctl __P((struct file *fp, u_long com, caddr_t data, 97 struct thread *td)); 98 static int badfo_poll __P((struct file *fp, int events, 99 struct ucred *cred, struct thread *td)); 100 static int badfo_kqfilter __P((struct file *fp, struct knote *kn)); 101 static int badfo_stat __P((struct file *fp, struct stat *sb, struct thread *td)); 102 static int badfo_close __P((struct file *fp, struct thread *td)); 103 104 /* 105 * Descriptor management. 106 */ 107 struct filelist filehead; /* head of list of open files */ 108 int nfiles; /* actual number of open files */ 109 extern int cmask; 110 111 /* 112 * System calls on descriptors. 113 */ 114 #ifndef _SYS_SYSPROTO_H_ 115 struct getdtablesize_args { 116 int dummy; 117 }; 118 #endif 119 /* 120 * MPSAFE 121 */ 122 /* ARGSUSED */ 123 int 124 getdtablesize(td, uap) 125 struct thread *td; 126 struct getdtablesize_args *uap; 127 { 128 struct proc *p = td->td_proc; 129 130 mtx_lock(&Giant); 131 td->td_retval[0] = 132 min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 133 mtx_unlock(&Giant); 134 return (0); 135 } 136 137 /* 138 * Duplicate a file descriptor to a particular value. 139 * 140 * note: keep in mind that a potential race condition exists when closing 141 * descriptors from a shared descriptor table (via rfork). 142 */ 143 #ifndef _SYS_SYSPROTO_H_ 144 struct dup2_args { 145 u_int from; 146 u_int to; 147 }; 148 #endif 149 /* 150 * MPSAFE 151 */ 152 /* ARGSUSED */ 153 int 154 dup2(td, uap) 155 struct thread *td; 156 struct dup2_args *uap; 157 { 158 struct proc *p = td->td_proc; 159 register struct filedesc *fdp = td->td_proc->p_fd; 160 register u_int old = uap->from, new = uap->to; 161 int i, error; 162 163 mtx_lock(&Giant); 164 retry: 165 if (old >= fdp->fd_nfiles || 166 fdp->fd_ofiles[old] == NULL || 167 new >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur || 168 new >= maxfilesperproc) { 169 error = EBADF; 170 goto done2; 171 } 172 if (old == new) { 173 td->td_retval[0] = new; 174 error = 0; 175 goto done2; 176 } 177 if (new >= fdp->fd_nfiles) { 178 if ((error = fdalloc(td, new, &i))) 179 goto done2; 180 if (new != i) 181 panic("dup2: fdalloc"); 182 /* 183 * fdalloc() may block, retest everything. 184 */ 185 goto retry; 186 } 187 error = do_dup(fdp, (int)old, (int)new, td->td_retval, td); 188 done2: 189 mtx_unlock(&Giant); 190 return(error); 191 } 192 193 /* 194 * Duplicate a file descriptor. 195 */ 196 #ifndef _SYS_SYSPROTO_H_ 197 struct dup_args { 198 u_int fd; 199 }; 200 #endif 201 /* 202 * MPSAFE 203 */ 204 /* ARGSUSED */ 205 int 206 dup(td, uap) 207 struct thread *td; 208 struct dup_args *uap; 209 { 210 register struct filedesc *fdp; 211 u_int old; 212 int new, error; 213 214 mtx_lock(&Giant); 215 old = uap->fd; 216 fdp = td->td_proc->p_fd; 217 if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) { 218 error = EBADF; 219 goto done2; 220 } 221 if ((error = fdalloc(td, 0, &new))) 222 goto done2; 223 error = do_dup(fdp, (int)old, new, td->td_retval, td); 224 done2: 225 mtx_unlock(&Giant); 226 return (error); 227 } 228 229 /* 230 * The file control system call. 231 */ 232 #ifndef _SYS_SYSPROTO_H_ 233 struct fcntl_args { 234 int fd; 235 int cmd; 236 long arg; 237 }; 238 #endif 239 /* 240 * MPSAFE 241 */ 242 /* ARGSUSED */ 243 int 244 fcntl(td, uap) 245 struct thread *td; 246 register struct fcntl_args *uap; 247 { 248 register struct proc *p = td->td_proc; 249 register struct filedesc *fdp; 250 register struct file *fp; 251 register char *pop; 252 struct vnode *vp; 253 int i, tmp, error = 0, flg = F_POSIX; 254 struct flock fl; 255 u_int newmin; 256 257 mtx_lock(&Giant); 258 259 fdp = p->p_fd; 260 if ((unsigned)uap->fd >= fdp->fd_nfiles || 261 (fp = fdp->fd_ofiles[uap->fd]) == NULL) { 262 error = EBADF; 263 goto done2; 264 } 265 pop = &fdp->fd_ofileflags[uap->fd]; 266 267 switch (uap->cmd) { 268 case F_DUPFD: 269 newmin = uap->arg; 270 if (newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur || 271 newmin >= maxfilesperproc) { 272 error = EINVAL; 273 break; 274 } 275 if ((error = fdalloc(td, newmin, &i))) 276 break; 277 error = do_dup(fdp, uap->fd, i, td->td_retval, td); 278 break; 279 280 case F_GETFD: 281 td->td_retval[0] = *pop & 1; 282 break; 283 284 case F_SETFD: 285 *pop = (*pop &~ 1) | (uap->arg & 1); 286 break; 287 288 case F_GETFL: 289 td->td_retval[0] = OFLAGS(fp->f_flag); 290 break; 291 292 case F_SETFL: 293 fhold(fp); 294 fp->f_flag &= ~FCNTLFLAGS; 295 fp->f_flag |= FFLAGS(uap->arg & ~O_ACCMODE) & FCNTLFLAGS; 296 tmp = fp->f_flag & FNONBLOCK; 297 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td); 298 if (error) { 299 fdrop(fp, td); 300 break; 301 } 302 tmp = fp->f_flag & FASYNC; 303 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, td); 304 if (!error) { 305 fdrop(fp, td); 306 break; 307 } 308 fp->f_flag &= ~FNONBLOCK; 309 tmp = 0; 310 (void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, td); 311 fdrop(fp, td); 312 break; 313 314 case F_GETOWN: 315 fhold(fp); 316 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)td->td_retval, td); 317 fdrop(fp, td); 318 break; 319 320 case F_SETOWN: 321 fhold(fp); 322 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&uap->arg, td); 323 fdrop(fp, td); 324 break; 325 326 case F_SETLKW: 327 flg |= F_WAIT; 328 /* Fall into F_SETLK */ 329 330 case F_SETLK: 331 if (fp->f_type != DTYPE_VNODE) { 332 error = EBADF; 333 break; 334 } 335 vp = (struct vnode *)fp->f_data; 336 337 /* 338 * copyin/lockop may block 339 */ 340 fhold(fp); 341 /* Copy in the lock structure */ 342 error = copyin((caddr_t)(intptr_t)uap->arg, (caddr_t)&fl, 343 sizeof(fl)); 344 if (error) { 345 fdrop(fp, td); 346 break; 347 } 348 if (fl.l_whence == SEEK_CUR) { 349 if (fp->f_offset < 0 || 350 (fl.l_start > 0 && 351 fp->f_offset > OFF_MAX - fl.l_start)) { 352 fdrop(fp, td); 353 error = EOVERFLOW; 354 break; 355 } 356 fl.l_start += fp->f_offset; 357 } 358 359 switch (fl.l_type) { 360 case F_RDLCK: 361 if ((fp->f_flag & FREAD) == 0) { 362 error = EBADF; 363 break; 364 } 365 p->p_flag |= P_ADVLOCK; 366 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 367 &fl, flg); 368 break; 369 case F_WRLCK: 370 if ((fp->f_flag & FWRITE) == 0) { 371 error = EBADF; 372 break; 373 } 374 p->p_flag |= P_ADVLOCK; 375 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 376 &fl, flg); 377 break; 378 case F_UNLCK: 379 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 380 &fl, F_POSIX); 381 break; 382 default: 383 error = EINVAL; 384 break; 385 } 386 fdrop(fp, td); 387 break; 388 389 case F_GETLK: 390 if (fp->f_type != DTYPE_VNODE) { 391 error = EBADF; 392 break; 393 } 394 vp = (struct vnode *)fp->f_data; 395 /* 396 * copyin/lockop may block 397 */ 398 fhold(fp); 399 /* Copy in the lock structure */ 400 error = copyin((caddr_t)(intptr_t)uap->arg, (caddr_t)&fl, 401 sizeof(fl)); 402 if (error) { 403 fdrop(fp, td); 404 break; 405 } 406 if (fl.l_type != F_RDLCK && fl.l_type != F_WRLCK && 407 fl.l_type != F_UNLCK) { 408 fdrop(fp, td); 409 error = EINVAL; 410 break; 411 } 412 if (fl.l_whence == SEEK_CUR) { 413 if ((fl.l_start > 0 && 414 fp->f_offset > OFF_MAX - fl.l_start) || 415 (fl.l_start < 0 && 416 fp->f_offset < OFF_MIN - fl.l_start)) { 417 fdrop(fp, td); 418 error = EOVERFLOW; 419 break; 420 } 421 fl.l_start += fp->f_offset; 422 } 423 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, 424 &fl, F_POSIX); 425 fdrop(fp, td); 426 if (error == 0) { 427 error = copyout((caddr_t)&fl, 428 (caddr_t)(intptr_t)uap->arg, sizeof(fl)); 429 } 430 break; 431 default: 432 error = EINVAL; 433 break; 434 } 435 done2: 436 mtx_unlock(&Giant); 437 return (error); 438 } 439 440 /* 441 * Common code for dup, dup2, and fcntl(F_DUPFD). 442 */ 443 static int 444 do_dup(fdp, old, new, retval, td) 445 register struct filedesc *fdp; 446 register int old, new; 447 register_t *retval; 448 struct thread *td; 449 { 450 struct file *fp; 451 struct file *delfp; 452 453 /* 454 * Save info on the descriptor being overwritten. We have 455 * to do the unmap now, but we cannot close it without 456 * introducing an ownership race for the slot. 457 */ 458 delfp = fdp->fd_ofiles[new]; 459 #if 0 460 if (delfp && (fdp->fd_ofileflags[new] & UF_MAPPED)) 461 (void) munmapfd(td, new); 462 #endif 463 464 /* 465 * Duplicate the source descriptor, update lastfile 466 */ 467 fp = fdp->fd_ofiles[old]; 468 fdp->fd_ofiles[new] = fp; 469 fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE; 470 fhold(fp); 471 if (new > fdp->fd_lastfile) 472 fdp->fd_lastfile = new; 473 *retval = new; 474 475 /* 476 * If we dup'd over a valid file, we now own the reference to it 477 * and must dispose of it using closef() semantics (as if a 478 * close() were performed on it). 479 */ 480 if (delfp) 481 (void) closef(delfp, td); 482 return (0); 483 } 484 485 /* 486 * If sigio is on the list associated with a process or process group, 487 * disable signalling from the device, remove sigio from the list and 488 * free sigio. 489 */ 490 void 491 funsetown(sigio) 492 struct sigio *sigio; 493 { 494 int s; 495 496 if (sigio == NULL) 497 return; 498 s = splhigh(); 499 *(sigio->sio_myref) = NULL; 500 splx(s); 501 if (sigio->sio_pgid < 0) { 502 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio, 503 sigio, sio_pgsigio); 504 } else /* if ((*sigiop)->sio_pgid > 0) */ { 505 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio, 506 sigio, sio_pgsigio); 507 } 508 crfree(sigio->sio_ucred); 509 FREE(sigio, M_SIGIO); 510 } 511 512 /* Free a list of sigio structures. */ 513 void 514 funsetownlst(sigiolst) 515 struct sigiolst *sigiolst; 516 { 517 struct sigio *sigio; 518 519 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) 520 funsetown(sigio); 521 } 522 523 /* 524 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 525 * 526 * After permission checking, add a sigio structure to the sigio list for 527 * the process or process group. 528 */ 529 int 530 fsetown(pgid, sigiop) 531 pid_t pgid; 532 struct sigio **sigiop; 533 { 534 struct proc *proc; 535 struct pgrp *pgrp; 536 struct sigio *sigio; 537 int s; 538 539 if (pgid == 0) { 540 funsetown(*sigiop); 541 return (0); 542 } 543 if (pgid > 0) { 544 proc = pfind(pgid); 545 if (proc == NULL) 546 return (ESRCH); 547 548 /* 549 * Policy - Don't allow a process to FSETOWN a process 550 * in another session. 551 * 552 * Remove this test to allow maximum flexibility or 553 * restrict FSETOWN to the current process or process 554 * group for maximum safety. 555 */ 556 if (proc->p_session != curthread->td_proc->p_session) { 557 PROC_UNLOCK(proc); 558 return (EPERM); 559 } 560 PROC_UNLOCK(proc); 561 562 pgrp = NULL; 563 } else /* if (pgid < 0) */ { 564 pgrp = pgfind(-pgid); 565 if (pgrp == NULL) 566 return (ESRCH); 567 568 /* 569 * Policy - Don't allow a process to FSETOWN a process 570 * in another session. 571 * 572 * Remove this test to allow maximum flexibility or 573 * restrict FSETOWN to the current process or process 574 * group for maximum safety. 575 */ 576 if (pgrp->pg_session != curthread->td_proc->p_session) 577 return (EPERM); 578 579 proc = NULL; 580 } 581 funsetown(*sigiop); 582 MALLOC(sigio, struct sigio *, sizeof(struct sigio), M_SIGIO, M_WAITOK); 583 if (pgid > 0) { 584 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 585 sigio->sio_proc = proc; 586 } else { 587 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 588 sigio->sio_pgrp = pgrp; 589 } 590 sigio->sio_pgid = pgid; 591 crhold(curthread->td_proc->p_ucred); 592 sigio->sio_ucred = curthread->td_proc->p_ucred; 593 sigio->sio_myref = sigiop; 594 s = splhigh(); 595 *sigiop = sigio; 596 splx(s); 597 return (0); 598 } 599 600 /* 601 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 602 */ 603 pid_t 604 fgetown(sigio) 605 struct sigio *sigio; 606 { 607 return (sigio != NULL ? sigio->sio_pgid : 0); 608 } 609 610 /* 611 * Close a file descriptor. 612 */ 613 #ifndef _SYS_SYSPROTO_H_ 614 struct close_args { 615 int fd; 616 }; 617 #endif 618 /* 619 * MPSAFE 620 */ 621 /* ARGSUSED */ 622 int 623 close(td, uap) 624 struct thread *td; 625 struct close_args *uap; 626 { 627 register struct filedesc *fdp; 628 register struct file *fp; 629 register int fd = uap->fd; 630 int error = 0; 631 632 mtx_lock(&Giant); 633 fdp = td->td_proc->p_fd; 634 if ((unsigned)fd >= fdp->fd_nfiles || 635 (fp = fdp->fd_ofiles[fd]) == NULL) { 636 error = EBADF; 637 goto done2; 638 } 639 #if 0 640 if (fdp->fd_ofileflags[fd] & UF_MAPPED) 641 (void) munmapfd(td, fd); 642 #endif 643 fdp->fd_ofiles[fd] = NULL; 644 fdp->fd_ofileflags[fd] = 0; 645 646 /* 647 * we now hold the fp reference that used to be owned by the descriptor 648 * array. 649 */ 650 while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL) 651 fdp->fd_lastfile--; 652 if (fd < fdp->fd_freefile) 653 fdp->fd_freefile = fd; 654 if (fd < fdp->fd_knlistsize) 655 knote_fdclose(td, fd); 656 error = closef(fp, td); 657 done2: 658 mtx_unlock(&Giant); 659 return(error); 660 } 661 662 #if defined(COMPAT_43) || defined(COMPAT_SUNOS) 663 /* 664 * Return status information about a file descriptor. 665 */ 666 #ifndef _SYS_SYSPROTO_H_ 667 struct ofstat_args { 668 int fd; 669 struct ostat *sb; 670 }; 671 #endif 672 /* 673 * MPSAFE 674 */ 675 /* ARGSUSED */ 676 int 677 ofstat(td, uap) 678 struct thread *td; 679 register struct ofstat_args *uap; 680 { 681 register struct filedesc *fdp = td->td_proc->p_fd; 682 register struct file *fp; 683 struct stat ub; 684 struct ostat oub; 685 int error; 686 687 mtx_lock(&Giant); 688 689 if ((unsigned)uap->fd >= fdp->fd_nfiles || 690 (fp = fdp->fd_ofiles[uap->fd]) == NULL) { 691 error = EBADF; 692 goto done2; 693 } 694 fhold(fp); 695 error = fo_stat(fp, &ub, td); 696 if (error == 0) { 697 cvtstat(&ub, &oub); 698 error = copyout((caddr_t)&oub, (caddr_t)uap->sb, sizeof (oub)); 699 } 700 fdrop(fp, td); 701 done2: 702 mtx_unlock(&Giant); 703 return (error); 704 } 705 #endif /* COMPAT_43 || COMPAT_SUNOS */ 706 707 /* 708 * Return status information about a file descriptor. 709 */ 710 #ifndef _SYS_SYSPROTO_H_ 711 struct fstat_args { 712 int fd; 713 struct stat *sb; 714 }; 715 #endif 716 /* 717 * MPSAFE 718 */ 719 /* ARGSUSED */ 720 int 721 fstat(td, uap) 722 struct thread *td; 723 register struct fstat_args *uap; 724 { 725 register struct filedesc *fdp; 726 register struct file *fp; 727 struct stat ub; 728 int error; 729 730 mtx_lock(&Giant); 731 fdp = td->td_proc->p_fd; 732 733 if ((unsigned)uap->fd >= fdp->fd_nfiles || 734 (fp = fdp->fd_ofiles[uap->fd]) == NULL) { 735 error = EBADF; 736 goto done2; 737 } 738 fhold(fp); 739 error = fo_stat(fp, &ub, td); 740 if (error == 0) 741 error = copyout((caddr_t)&ub, (caddr_t)uap->sb, sizeof (ub)); 742 fdrop(fp, td); 743 done2: 744 mtx_unlock(&Giant); 745 return (error); 746 } 747 748 /* 749 * Return status information about a file descriptor. 750 */ 751 #ifndef _SYS_SYSPROTO_H_ 752 struct nfstat_args { 753 int fd; 754 struct nstat *sb; 755 }; 756 #endif 757 /* 758 * MPSAFE 759 */ 760 /* ARGSUSED */ 761 int 762 nfstat(td, uap) 763 struct thread *td; 764 register struct nfstat_args *uap; 765 { 766 register struct filedesc *fdp; 767 register struct file *fp; 768 struct stat ub; 769 struct nstat nub; 770 int error; 771 772 mtx_lock(&Giant); 773 774 fdp = td->td_proc->p_fd; 775 if ((unsigned)uap->fd >= fdp->fd_nfiles || 776 (fp = fdp->fd_ofiles[uap->fd]) == NULL) { 777 error = EBADF; 778 goto done2; 779 } 780 fhold(fp); 781 error = fo_stat(fp, &ub, td); 782 if (error == 0) { 783 cvtnstat(&ub, &nub); 784 error = copyout((caddr_t)&nub, (caddr_t)uap->sb, sizeof (nub)); 785 } 786 fdrop(fp, td); 787 done2: 788 mtx_unlock(&Giant); 789 return (error); 790 } 791 792 /* 793 * Return pathconf information about a file descriptor. 794 */ 795 #ifndef _SYS_SYSPROTO_H_ 796 struct fpathconf_args { 797 int fd; 798 int name; 799 }; 800 #endif 801 /* 802 * MPSAFE 803 */ 804 /* ARGSUSED */ 805 int 806 fpathconf(td, uap) 807 struct thread *td; 808 register struct fpathconf_args *uap; 809 { 810 struct filedesc *fdp; 811 struct file *fp; 812 struct vnode *vp; 813 int error = 0; 814 815 mtx_lock(&Giant); 816 fdp = td->td_proc->p_fd; 817 818 if ((unsigned)uap->fd >= fdp->fd_nfiles || 819 (fp = fdp->fd_ofiles[uap->fd]) == NULL) { 820 error = EBADF; 821 goto done2; 822 } 823 824 fhold(fp); 825 826 switch (fp->f_type) { 827 case DTYPE_PIPE: 828 case DTYPE_SOCKET: 829 if (uap->name != _PC_PIPE_BUF) { 830 error = EINVAL; 831 goto done2; 832 } 833 td->td_retval[0] = PIPE_BUF; 834 error = 0; 835 break; 836 case DTYPE_FIFO: 837 case DTYPE_VNODE: 838 vp = (struct vnode *)fp->f_data; 839 error = VOP_PATHCONF(vp, uap->name, td->td_retval); 840 break; 841 default: 842 error = EOPNOTSUPP; 843 break; 844 } 845 fdrop(fp, td); 846 done2: 847 mtx_unlock(&Giant); 848 return(error); 849 } 850 851 /* 852 * Allocate a file descriptor for the process. 853 */ 854 static int fdexpand; 855 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, ""); 856 857 int 858 fdalloc(td, want, result) 859 struct thread *td; 860 int want; 861 int *result; 862 { 863 struct proc *p = td->td_proc; 864 register struct filedesc *fdp = td->td_proc->p_fd; 865 register int i; 866 int lim, last, nfiles; 867 struct file **newofile; 868 char *newofileflags; 869 870 /* 871 * Search for a free descriptor starting at the higher 872 * of want or fd_freefile. If that fails, consider 873 * expanding the ofile array. 874 */ 875 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 876 for (;;) { 877 last = min(fdp->fd_nfiles, lim); 878 if ((i = want) < fdp->fd_freefile) 879 i = fdp->fd_freefile; 880 for (; i < last; i++) { 881 if (fdp->fd_ofiles[i] == NULL) { 882 fdp->fd_ofileflags[i] = 0; 883 if (i > fdp->fd_lastfile) 884 fdp->fd_lastfile = i; 885 if (want <= fdp->fd_freefile) 886 fdp->fd_freefile = i; 887 *result = i; 888 return (0); 889 } 890 } 891 892 /* 893 * No space in current array. Expand? 894 */ 895 if (fdp->fd_nfiles >= lim) 896 return (EMFILE); 897 if (fdp->fd_nfiles < NDEXTENT) 898 nfiles = NDEXTENT; 899 else 900 nfiles = 2 * fdp->fd_nfiles; 901 MALLOC(newofile, struct file **, nfiles * OFILESIZE, 902 M_FILEDESC, M_WAITOK); 903 904 /* 905 * deal with file-table extend race that might have occured 906 * when malloc was blocked. 907 */ 908 if (fdp->fd_nfiles >= nfiles) { 909 FREE(newofile, M_FILEDESC); 910 continue; 911 } 912 newofileflags = (char *) &newofile[nfiles]; 913 /* 914 * Copy the existing ofile and ofileflags arrays 915 * and zero the new portion of each array. 916 */ 917 bcopy(fdp->fd_ofiles, newofile, 918 (i = sizeof(struct file *) * fdp->fd_nfiles)); 919 bzero((char *)newofile + i, nfiles * sizeof(struct file *) - i); 920 bcopy(fdp->fd_ofileflags, newofileflags, 921 (i = sizeof(char) * fdp->fd_nfiles)); 922 bzero(newofileflags + i, nfiles * sizeof(char) - i); 923 if (fdp->fd_nfiles > NDFILE) 924 FREE(fdp->fd_ofiles, M_FILEDESC); 925 fdp->fd_ofiles = newofile; 926 fdp->fd_ofileflags = newofileflags; 927 fdp->fd_nfiles = nfiles; 928 fdexpand++; 929 } 930 return (0); 931 } 932 933 /* 934 * Check to see whether n user file descriptors 935 * are available to the process p. 936 */ 937 int 938 fdavail(td, n) 939 struct thread *td; 940 register int n; 941 { 942 struct proc *p = td->td_proc; 943 register struct filedesc *fdp = td->td_proc->p_fd; 944 register struct file **fpp; 945 register int i, lim, last; 946 947 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc); 948 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) 949 return (1); 950 951 last = min(fdp->fd_nfiles, lim); 952 fpp = &fdp->fd_ofiles[fdp->fd_freefile]; 953 for (i = last - fdp->fd_freefile; --i >= 0; fpp++) { 954 if (*fpp == NULL && --n <= 0) 955 return (1); 956 } 957 return (0); 958 } 959 960 /* 961 * Create a new open file structure and allocate 962 * a file decriptor for the process that refers to it. 963 */ 964 int 965 falloc(td, resultfp, resultfd) 966 register struct thread *td; 967 struct file **resultfp; 968 int *resultfd; 969 { 970 struct proc *p = td->td_proc; 971 register struct file *fp, *fq; 972 int error, i; 973 974 if (nfiles >= maxfiles) { 975 tablefull("file"); 976 return (ENFILE); 977 } 978 /* 979 * Allocate a new file descriptor. 980 * If the process has file descriptor zero open, add to the list 981 * of open files at that point, otherwise put it at the front of 982 * the list of open files. 983 */ 984 nfiles++; 985 MALLOC(fp, struct file *, sizeof(struct file), M_FILE, M_WAITOK | M_ZERO); 986 987 /* 988 * wait until after malloc (which may have blocked) returns before 989 * allocating the slot, else a race might have shrunk it if we had 990 * allocated it before the malloc. 991 */ 992 if ((error = fdalloc(td, 0, &i))) { 993 nfiles--; 994 FREE(fp, M_FILE); 995 return (error); 996 } 997 fp->f_count = 1; 998 fp->f_cred = p->p_ucred; 999 fp->f_ops = &badfileops; 1000 fp->f_seqcount = 1; 1001 crhold(fp->f_cred); 1002 if ((fq = p->p_fd->fd_ofiles[0])) { 1003 LIST_INSERT_AFTER(fq, fp, f_list); 1004 } else { 1005 LIST_INSERT_HEAD(&filehead, fp, f_list); 1006 } 1007 p->p_fd->fd_ofiles[i] = fp; 1008 if (resultfp) 1009 *resultfp = fp; 1010 if (resultfd) 1011 *resultfd = i; 1012 return (0); 1013 } 1014 1015 /* 1016 * Free a file descriptor. 1017 */ 1018 void 1019 ffree(fp) 1020 register struct file *fp; 1021 { 1022 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!")); 1023 LIST_REMOVE(fp, f_list); 1024 crfree(fp->f_cred); 1025 nfiles--; 1026 FREE(fp, M_FILE); 1027 } 1028 1029 /* 1030 * Build a new filedesc structure. 1031 */ 1032 struct filedesc * 1033 fdinit(td) 1034 struct thread *td; 1035 { 1036 register struct filedesc0 *newfdp; 1037 register struct filedesc *fdp = td->td_proc->p_fd; 1038 1039 MALLOC(newfdp, struct filedesc0 *, sizeof(struct filedesc0), 1040 M_FILEDESC, M_WAITOK | M_ZERO); 1041 newfdp->fd_fd.fd_cdir = fdp->fd_cdir; 1042 if (newfdp->fd_fd.fd_cdir) 1043 VREF(newfdp->fd_fd.fd_cdir); 1044 newfdp->fd_fd.fd_rdir = fdp->fd_rdir; 1045 if (newfdp->fd_fd.fd_rdir) 1046 VREF(newfdp->fd_fd.fd_rdir); 1047 newfdp->fd_fd.fd_jdir = fdp->fd_jdir; 1048 if (newfdp->fd_fd.fd_jdir) 1049 VREF(newfdp->fd_fd.fd_jdir); 1050 1051 /* Create the file descriptor table. */ 1052 newfdp->fd_fd.fd_refcnt = 1; 1053 newfdp->fd_fd.fd_cmask = cmask; 1054 newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles; 1055 newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags; 1056 newfdp->fd_fd.fd_nfiles = NDFILE; 1057 newfdp->fd_fd.fd_knlistsize = -1; 1058 1059 return (&newfdp->fd_fd); 1060 } 1061 1062 /* 1063 * Share a filedesc structure. 1064 */ 1065 struct filedesc * 1066 fdshare(p) 1067 struct proc *p; 1068 { 1069 p->p_fd->fd_refcnt++; 1070 return (p->p_fd); 1071 } 1072 1073 /* 1074 * Copy a filedesc structure. 1075 */ 1076 struct filedesc * 1077 fdcopy(td) 1078 struct thread *td; 1079 { 1080 register struct filedesc *newfdp, *fdp = td->td_proc->p_fd; 1081 register struct file **fpp; 1082 register int i; 1083 1084 /* Certain daemons might not have file descriptors. */ 1085 if (fdp == NULL) 1086 return (NULL); 1087 1088 MALLOC(newfdp, struct filedesc *, sizeof(struct filedesc0), 1089 M_FILEDESC, M_WAITOK); 1090 bcopy(fdp, newfdp, sizeof(struct filedesc)); 1091 if (newfdp->fd_cdir) 1092 VREF(newfdp->fd_cdir); 1093 if (newfdp->fd_rdir) 1094 VREF(newfdp->fd_rdir); 1095 if (newfdp->fd_jdir) 1096 VREF(newfdp->fd_jdir); 1097 newfdp->fd_refcnt = 1; 1098 1099 /* 1100 * If the number of open files fits in the internal arrays 1101 * of the open file structure, use them, otherwise allocate 1102 * additional memory for the number of descriptors currently 1103 * in use. 1104 */ 1105 if (newfdp->fd_lastfile < NDFILE) { 1106 newfdp->fd_ofiles = ((struct filedesc0 *) newfdp)->fd_dfiles; 1107 newfdp->fd_ofileflags = 1108 ((struct filedesc0 *) newfdp)->fd_dfileflags; 1109 i = NDFILE; 1110 } else { 1111 /* 1112 * Compute the smallest multiple of NDEXTENT needed 1113 * for the file descriptors currently in use, 1114 * allowing the table to shrink. 1115 */ 1116 i = newfdp->fd_nfiles; 1117 while (i > 2 * NDEXTENT && i > newfdp->fd_lastfile * 2) 1118 i /= 2; 1119 MALLOC(newfdp->fd_ofiles, struct file **, i * OFILESIZE, 1120 M_FILEDESC, M_WAITOK); 1121 newfdp->fd_ofileflags = (char *) &newfdp->fd_ofiles[i]; 1122 } 1123 newfdp->fd_nfiles = i; 1124 bcopy(fdp->fd_ofiles, newfdp->fd_ofiles, i * sizeof(struct file **)); 1125 bcopy(fdp->fd_ofileflags, newfdp->fd_ofileflags, i * sizeof(char)); 1126 1127 /* 1128 * kq descriptors cannot be copied. 1129 */ 1130 if (newfdp->fd_knlistsize != -1) { 1131 fpp = newfdp->fd_ofiles; 1132 for (i = newfdp->fd_lastfile; i-- >= 0; fpp++) { 1133 if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) 1134 *fpp = NULL; 1135 } 1136 newfdp->fd_knlist = NULL; 1137 newfdp->fd_knlistsize = -1; 1138 newfdp->fd_knhash = NULL; 1139 newfdp->fd_knhashmask = 0; 1140 } 1141 1142 fpp = newfdp->fd_ofiles; 1143 for (i = newfdp->fd_lastfile; i-- >= 0; fpp++) { 1144 if (*fpp != NULL) 1145 fhold(*fpp); 1146 } 1147 return (newfdp); 1148 } 1149 1150 /* 1151 * Release a filedesc structure. 1152 */ 1153 void 1154 fdfree(td) 1155 struct thread *td; 1156 { 1157 register struct filedesc *fdp = td->td_proc->p_fd; 1158 struct file **fpp; 1159 register int i; 1160 1161 /* Certain daemons might not have file descriptors. */ 1162 if (fdp == NULL) 1163 return; 1164 1165 if (--fdp->fd_refcnt > 0) 1166 return; 1167 /* 1168 * we are the last reference to the structure, we can 1169 * safely assume it will not change out from under us. 1170 */ 1171 fpp = fdp->fd_ofiles; 1172 for (i = fdp->fd_lastfile; i-- >= 0; fpp++) { 1173 if (*fpp) 1174 (void) closef(*fpp, td); 1175 } 1176 if (fdp->fd_nfiles > NDFILE) 1177 FREE(fdp->fd_ofiles, M_FILEDESC); 1178 if (fdp->fd_cdir) 1179 vrele(fdp->fd_cdir); 1180 if (fdp->fd_rdir) 1181 vrele(fdp->fd_rdir); 1182 if (fdp->fd_jdir) 1183 vrele(fdp->fd_jdir); 1184 if (fdp->fd_knlist) 1185 FREE(fdp->fd_knlist, M_TEMP); 1186 if (fdp->fd_knhash) 1187 FREE(fdp->fd_knhash, M_TEMP); 1188 FREE(fdp, M_FILEDESC); 1189 } 1190 1191 /* 1192 * For setugid programs, we don't want to people to use that setugidness 1193 * to generate error messages which write to a file which otherwise would 1194 * otherwise be off-limits to the process. 1195 * 1196 * This is a gross hack to plug the hole. A better solution would involve 1197 * a special vop or other form of generalized access control mechanism. We 1198 * go ahead and just reject all procfs file systems accesses as dangerous. 1199 * 1200 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 1201 * sufficient. We also don't for check setugidness since we know we are. 1202 */ 1203 static int 1204 is_unsafe(struct file *fp) 1205 { 1206 if (fp->f_type == DTYPE_VNODE && 1207 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS) 1208 return (1); 1209 return (0); 1210 } 1211 1212 /* 1213 * Make this setguid thing safe, if at all possible. 1214 */ 1215 void 1216 setugidsafety(td) 1217 struct thread *td; 1218 { 1219 struct filedesc *fdp = td->td_proc->p_fd; 1220 register int i; 1221 1222 /* Certain daemons might not have file descriptors. */ 1223 if (fdp == NULL) 1224 return; 1225 1226 /* 1227 * note: fdp->fd_ofiles may be reallocated out from under us while 1228 * we are blocked in a close. Be careful! 1229 */ 1230 for (i = 0; i <= fdp->fd_lastfile; i++) { 1231 if (i > 2) 1232 break; 1233 if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) { 1234 struct file *fp; 1235 1236 #if 0 1237 if ((fdp->fd_ofileflags[i] & UF_MAPPED) != 0) 1238 (void) munmapfd(td, i); 1239 #endif 1240 if (i < fdp->fd_knlistsize) 1241 knote_fdclose(td, i); 1242 /* 1243 * NULL-out descriptor prior to close to avoid 1244 * a race while close blocks. 1245 */ 1246 fp = fdp->fd_ofiles[i]; 1247 fdp->fd_ofiles[i] = NULL; 1248 fdp->fd_ofileflags[i] = 0; 1249 if (i < fdp->fd_freefile) 1250 fdp->fd_freefile = i; 1251 (void) closef(fp, td); 1252 } 1253 } 1254 while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL) 1255 fdp->fd_lastfile--; 1256 } 1257 1258 /* 1259 * Close any files on exec? 1260 */ 1261 void 1262 fdcloseexec(td) 1263 struct thread *td; 1264 { 1265 struct filedesc *fdp = td->td_proc->p_fd; 1266 register int i; 1267 1268 /* Certain daemons might not have file descriptors. */ 1269 if (fdp == NULL) 1270 return; 1271 1272 /* 1273 * We cannot cache fd_ofiles or fd_ofileflags since operations 1274 * may block and rip them out from under us. 1275 */ 1276 for (i = 0; i <= fdp->fd_lastfile; i++) { 1277 if (fdp->fd_ofiles[i] != NULL && 1278 (fdp->fd_ofileflags[i] & UF_EXCLOSE)) { 1279 struct file *fp; 1280 1281 #if 0 1282 if (fdp->fd_ofileflags[i] & UF_MAPPED) 1283 (void) munmapfd(td, i); 1284 #endif 1285 if (i < fdp->fd_knlistsize) 1286 knote_fdclose(td, i); 1287 /* 1288 * NULL-out descriptor prior to close to avoid 1289 * a race while close blocks. 1290 */ 1291 fp = fdp->fd_ofiles[i]; 1292 fdp->fd_ofiles[i] = NULL; 1293 fdp->fd_ofileflags[i] = 0; 1294 if (i < fdp->fd_freefile) 1295 fdp->fd_freefile = i; 1296 (void) closef(fp, td); 1297 } 1298 } 1299 while (fdp->fd_lastfile > 0 && fdp->fd_ofiles[fdp->fd_lastfile] == NULL) 1300 fdp->fd_lastfile--; 1301 } 1302 1303 /* 1304 * Internal form of close. 1305 * Decrement reference count on file structure. 1306 * Note: td may be NULL when closing a file 1307 * that was being passed in a message. 1308 */ 1309 int 1310 closef(fp, td) 1311 register struct file *fp; 1312 register struct thread *td; 1313 { 1314 struct vnode *vp; 1315 struct flock lf; 1316 1317 if (fp == NULL) 1318 return (0); 1319 /* 1320 * POSIX record locking dictates that any close releases ALL 1321 * locks owned by this process. This is handled by setting 1322 * a flag in the unlock to free ONLY locks obeying POSIX 1323 * semantics, and not to free BSD-style file locks. 1324 * If the descriptor was in a message, POSIX-style locks 1325 * aren't passed with the descriptor. 1326 */ 1327 if (td && (td->td_proc->p_flag & P_ADVLOCK) && 1328 fp->f_type == DTYPE_VNODE) { 1329 lf.l_whence = SEEK_SET; 1330 lf.l_start = 0; 1331 lf.l_len = 0; 1332 lf.l_type = F_UNLCK; 1333 vp = (struct vnode *)fp->f_data; 1334 (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader, 1335 F_UNLCK, &lf, F_POSIX); 1336 } 1337 return (fdrop(fp, td)); 1338 } 1339 1340 int 1341 fdrop(fp, td) 1342 struct file *fp; 1343 struct thread *td; 1344 { 1345 struct flock lf; 1346 struct vnode *vp; 1347 int error; 1348 1349 if (--fp->f_count > 0) 1350 return (0); 1351 if (fp->f_count < 0) 1352 panic("fdrop: count < 0"); 1353 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) { 1354 lf.l_whence = SEEK_SET; 1355 lf.l_start = 0; 1356 lf.l_len = 0; 1357 lf.l_type = F_UNLCK; 1358 vp = (struct vnode *)fp->f_data; 1359 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 1360 } 1361 if (fp->f_ops != &badfileops) 1362 error = fo_close(fp, td); 1363 else 1364 error = 0; 1365 ffree(fp); 1366 return (error); 1367 } 1368 1369 /* 1370 * Apply an advisory lock on a file descriptor. 1371 * 1372 * Just attempt to get a record lock of the requested type on 1373 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 1374 */ 1375 #ifndef _SYS_SYSPROTO_H_ 1376 struct flock_args { 1377 int fd; 1378 int how; 1379 }; 1380 #endif 1381 /* 1382 * MPSAFE 1383 */ 1384 /* ARGSUSED */ 1385 int 1386 flock(td, uap) 1387 struct thread *td; 1388 register struct flock_args *uap; 1389 { 1390 register struct filedesc *fdp = td->td_proc->p_fd; 1391 register struct file *fp; 1392 struct vnode *vp; 1393 struct flock lf; 1394 int error; 1395 1396 mtx_lock(&Giant); 1397 1398 if ((unsigned)uap->fd >= fdp->fd_nfiles || 1399 (fp = fdp->fd_ofiles[uap->fd]) == NULL) { 1400 error = EBADF; 1401 goto done2; 1402 } 1403 if (fp->f_type != DTYPE_VNODE) { 1404 error = EOPNOTSUPP; 1405 goto done2; 1406 } 1407 vp = (struct vnode *)fp->f_data; 1408 lf.l_whence = SEEK_SET; 1409 lf.l_start = 0; 1410 lf.l_len = 0; 1411 if (uap->how & LOCK_UN) { 1412 lf.l_type = F_UNLCK; 1413 fp->f_flag &= ~FHASLOCK; 1414 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 1415 goto done2; 1416 } 1417 if (uap->how & LOCK_EX) 1418 lf.l_type = F_WRLCK; 1419 else if (uap->how & LOCK_SH) 1420 lf.l_type = F_RDLCK; 1421 else { 1422 error = EBADF; 1423 goto done2; 1424 } 1425 fp->f_flag |= FHASLOCK; 1426 if (uap->how & LOCK_NB) 1427 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK); 1428 else 1429 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK|F_WAIT); 1430 done2: 1431 mtx_unlock(&Giant); 1432 return (error); 1433 } 1434 1435 /* 1436 * File Descriptor pseudo-device driver (/dev/fd/). 1437 * 1438 * Opening minor device N dup()s the file (if any) connected to file 1439 * descriptor N belonging to the calling process. Note that this driver 1440 * consists of only the ``open()'' routine, because all subsequent 1441 * references to this file will be direct to the other driver. 1442 */ 1443 /* ARGSUSED */ 1444 static int 1445 fdopen(dev, mode, type, td) 1446 dev_t dev; 1447 int mode, type; 1448 struct thread *td; 1449 { 1450 1451 /* 1452 * XXX Kludge: set curthread->td_dupfd to contain the value of the 1453 * the file descriptor being sought for duplication. The error 1454 * return ensures that the vnode for this device will be released 1455 * by vn_open. Open will detect this special error and take the 1456 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 1457 * will simply report the error. 1458 */ 1459 td->td_dupfd = dev2unit(dev); 1460 return (ENODEV); 1461 } 1462 1463 /* 1464 * Duplicate the specified descriptor to a free descriptor. 1465 */ 1466 int 1467 dupfdopen(td, fdp, indx, dfd, mode, error) 1468 struct thread *td; 1469 struct filedesc *fdp; 1470 int indx, dfd; 1471 int mode; 1472 int error; 1473 { 1474 register struct file *wfp; 1475 struct file *fp; 1476 1477 /* 1478 * If the to-be-dup'd fd number is greater than the allowed number 1479 * of file descriptors, or the fd to be dup'd has already been 1480 * closed, then reject. 1481 */ 1482 if ((u_int)dfd >= fdp->fd_nfiles || 1483 (wfp = fdp->fd_ofiles[dfd]) == NULL) { 1484 return (EBADF); 1485 } 1486 1487 /* 1488 * There are two cases of interest here. 1489 * 1490 * For ENODEV simply dup (dfd) to file descriptor 1491 * (indx) and return. 1492 * 1493 * For ENXIO steal away the file structure from (dfd) and 1494 * store it in (indx). (dfd) is effectively closed by 1495 * this operation. 1496 * 1497 * Any other error code is just returned. 1498 */ 1499 switch (error) { 1500 case ENODEV: 1501 /* 1502 * Check that the mode the file is being opened for is a 1503 * subset of the mode of the existing descriptor. 1504 */ 1505 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) 1506 return (EACCES); 1507 fp = fdp->fd_ofiles[indx]; 1508 #if 0 1509 if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED) 1510 (void) munmapfd(td, indx); 1511 #endif 1512 fdp->fd_ofiles[indx] = wfp; 1513 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 1514 fhold(wfp); 1515 if (indx > fdp->fd_lastfile) 1516 fdp->fd_lastfile = indx; 1517 /* 1518 * we now own the reference to fp that the ofiles[] array 1519 * used to own. Release it. 1520 */ 1521 if (fp) 1522 fdrop(fp, td); 1523 return (0); 1524 1525 case ENXIO: 1526 /* 1527 * Steal away the file pointer from dfd, and stuff it into indx. 1528 */ 1529 fp = fdp->fd_ofiles[indx]; 1530 #if 0 1531 if (fp && fdp->fd_ofileflags[indx] & UF_MAPPED) 1532 (void) munmapfd(td, indx); 1533 #endif 1534 fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd]; 1535 fdp->fd_ofiles[dfd] = NULL; 1536 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 1537 fdp->fd_ofileflags[dfd] = 0; 1538 1539 /* 1540 * we now own the reference to fp that the ofiles[] array 1541 * used to own. Release it. 1542 */ 1543 if (fp) 1544 fdrop(fp, td); 1545 /* 1546 * Complete the clean up of the filedesc structure by 1547 * recomputing the various hints. 1548 */ 1549 if (indx > fdp->fd_lastfile) { 1550 fdp->fd_lastfile = indx; 1551 } else { 1552 while (fdp->fd_lastfile > 0 && 1553 fdp->fd_ofiles[fdp->fd_lastfile] == NULL) { 1554 fdp->fd_lastfile--; 1555 } 1556 if (dfd < fdp->fd_freefile) 1557 fdp->fd_freefile = dfd; 1558 } 1559 return (0); 1560 1561 default: 1562 return (error); 1563 } 1564 /* NOTREACHED */ 1565 } 1566 1567 /* 1568 * Get file structures. 1569 */ 1570 static int 1571 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 1572 { 1573 int error; 1574 struct file *fp; 1575 1576 if (!req->oldptr) { 1577 /* 1578 * overestimate by 10 files 1579 */ 1580 return (SYSCTL_OUT(req, 0, sizeof(filehead) + 1581 (nfiles + 10) * sizeof(struct file))); 1582 } 1583 1584 error = SYSCTL_OUT(req, (caddr_t)&filehead, sizeof(filehead)); 1585 if (error) 1586 return (error); 1587 1588 /* 1589 * followed by an array of file structures 1590 */ 1591 LIST_FOREACH(fp, &filehead, f_list) { 1592 error = SYSCTL_OUT(req, (caddr_t)fp, sizeof (struct file)); 1593 if (error) 1594 return (error); 1595 } 1596 return (0); 1597 } 1598 1599 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 1600 0, 0, sysctl_kern_file, "S,file", "Entire file table"); 1601 1602 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 1603 &maxfilesperproc, 0, "Maximum files allowed open per process"); 1604 1605 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 1606 &maxfiles, 0, "Maximum number of files"); 1607 1608 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 1609 &nfiles, 0, "System-wide number of open files"); 1610 1611 static void 1612 fildesc_drvinit(void *unused) 1613 { 1614 dev_t dev; 1615 1616 dev = make_dev(&fildesc_cdevsw, 0, UID_BIN, GID_BIN, 0666, "fd/0"); 1617 make_dev_alias(dev, "stdin"); 1618 dev = make_dev(&fildesc_cdevsw, 1, UID_BIN, GID_BIN, 0666, "fd/1"); 1619 make_dev_alias(dev, "stdout"); 1620 dev = make_dev(&fildesc_cdevsw, 2, UID_BIN, GID_BIN, 0666, "fd/2"); 1621 make_dev_alias(dev, "stderr"); 1622 if (!devfs_present) { 1623 int fd; 1624 1625 for (fd = 3; fd < NUMFDESC; fd++) 1626 make_dev(&fildesc_cdevsw, fd, UID_BIN, GID_BIN, 0666, 1627 "fd/%d", fd); 1628 } 1629 } 1630 1631 struct fileops badfileops = { 1632 badfo_readwrite, 1633 badfo_readwrite, 1634 badfo_ioctl, 1635 badfo_poll, 1636 badfo_kqfilter, 1637 badfo_stat, 1638 badfo_close 1639 }; 1640 1641 static int 1642 badfo_readwrite(fp, uio, cred, flags, td) 1643 struct file *fp; 1644 struct uio *uio; 1645 struct ucred *cred; 1646 struct thread *td; 1647 int flags; 1648 { 1649 1650 return (EBADF); 1651 } 1652 1653 static int 1654 badfo_ioctl(fp, com, data, td) 1655 struct file *fp; 1656 u_long com; 1657 caddr_t data; 1658 struct thread *td; 1659 { 1660 1661 return (EBADF); 1662 } 1663 1664 static int 1665 badfo_poll(fp, events, cred, td) 1666 struct file *fp; 1667 int events; 1668 struct ucred *cred; 1669 struct thread *td; 1670 { 1671 1672 return (0); 1673 } 1674 1675 static int 1676 badfo_kqfilter(fp, kn) 1677 struct file *fp; 1678 struct knote *kn; 1679 { 1680 1681 return (0); 1682 } 1683 1684 static int 1685 badfo_stat(fp, sb, td) 1686 struct file *fp; 1687 struct stat *sb; 1688 struct thread *td; 1689 { 1690 1691 return (EBADF); 1692 } 1693 1694 static int 1695 badfo_close(fp, td) 1696 struct file *fp; 1697 struct thread *td; 1698 { 1699 1700 return (EBADF); 1701 } 1702 1703 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR, 1704 fildesc_drvinit,NULL) 1705