1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_compat.h" 41 42 #include <sys/param.h> 43 #include <sys/limits.h> 44 #include <sys/systm.h> 45 #include <sys/syscallsubr.h> 46 #include <sys/sysproto.h> 47 #include <sys/conf.h> 48 #include <sys/filedesc.h> 49 #include <sys/lock.h> 50 #include <sys/kernel.h> 51 #include <sys/limits.h> 52 #include <sys/malloc.h> 53 #include <sys/mutex.h> 54 #include <sys/sysctl.h> 55 #include <sys/vnode.h> 56 #include <sys/mount.h> 57 #include <sys/proc.h> 58 #include <sys/namei.h> 59 #include <sys/file.h> 60 #include <sys/stat.h> 61 #include <sys/filio.h> 62 #include <sys/fcntl.h> 63 #include <sys/unistd.h> 64 #include <sys/resourcevar.h> 65 #include <sys/event.h> 66 #include <sys/sx.h> 67 #include <sys/socketvar.h> 68 #include <sys/signalvar.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_extern.h> 72 #include <vm/uma.h> 73 74 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table"); 75 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader", 76 "file desc to leader structures"); 77 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures"); 78 79 static uma_zone_t file_zone; 80 81 static d_open_t fdopen; 82 #define NUMFDESC 64 83 84 #define CDEV_MAJOR 22 85 static struct cdevsw fildesc_cdevsw = { 86 .d_version = D_VERSION, 87 .d_flags = D_NEEDGIANT, 88 .d_open = fdopen, 89 .d_name = "FD", 90 .d_maj = CDEV_MAJOR, 91 }; 92 93 /* How to treat 'new' parameter when allocating a fd for do_dup(). */ 94 enum dup_type { DUP_VARIABLE, DUP_FIXED }; 95 96 static int do_dup(struct thread *td, enum dup_type type, int old, int new, 97 register_t *retval); 98 static int fd_first_free(struct filedesc *, int, int); 99 static int fd_last_used(struct filedesc *, int, int); 100 static void fdgrowtable(struct filedesc *, int); 101 102 /* 103 * Descriptor management. 104 */ 105 struct filelist filehead; /* head of list of open files */ 106 int nfiles; /* actual number of open files */ 107 struct sx filelist_lock; /* sx to protect filelist */ 108 struct mtx sigio_lock; /* mtx to protect pointers to sigio */ 109 110 /* 111 * Find the first zero bit in the given bitmap, starting at low and not 112 * exceeding size - 1. 113 */ 114 static int 115 fd_first_free(struct filedesc *fdp, int low, int size) 116 { 117 NDSLOTTYPE *map = fdp->fd_map; 118 NDSLOTTYPE mask; 119 int off, maxoff; 120 121 if (low >= size) 122 return (low); 123 124 off = NDSLOT(low); 125 if (low % NDENTRIES) { 126 mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES))); 127 if ((mask &= ~map[off]) != 0UL) 128 return (off * NDENTRIES + ffsl(mask) - 1); 129 ++off; 130 } 131 for (maxoff = NDSLOTS(size); off < maxoff; ++off) 132 if (map[off] != ~0UL) 133 return (off * NDENTRIES + ffsl(~map[off]) - 1); 134 return (size); 135 } 136 137 /* 138 * Find the highest non-zero bit in the given bitmap, starting at low and 139 * not exceeding size - 1. 140 */ 141 static int 142 fd_last_used(struct filedesc *fdp, int low, int size) 143 { 144 NDSLOTTYPE *map = fdp->fd_map; 145 NDSLOTTYPE mask; 146 int off, minoff; 147 148 if (low >= size) 149 return (-1); 150 151 off = NDSLOT(size); 152 if (size % NDENTRIES) { 153 mask = ~(~(NDSLOTTYPE)0 << (size % NDENTRIES)); 154 if ((mask &= map[off]) != 0) 155 return (off * NDENTRIES + flsl(mask) - 1); 156 --off; 157 } 158 for (minoff = NDSLOT(low); off >= minoff; --off) 159 if (map[off] != 0) 160 return (off * NDENTRIES + flsl(map[off]) - 1); 161 return (size - 1); 162 } 163 164 static int 165 fdisused(struct filedesc *fdp, int fd) 166 { 167 KASSERT(fd >= 0 && fd < fdp->fd_nfiles, 168 ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles)); 169 return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0); 170 } 171 172 /* 173 * Mark a file descriptor as used. 174 */ 175 void 176 fdused(struct filedesc *fdp, int fd) 177 { 178 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 179 KASSERT(!fdisused(fdp, fd), 180 ("fd already used")); 181 fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd); 182 if (fd > fdp->fd_lastfile) 183 fdp->fd_lastfile = fd; 184 if (fd == fdp->fd_freefile) 185 fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles); 186 } 187 188 /* 189 * Mark a file descriptor as unused. 190 */ 191 void 192 fdunused(struct filedesc *fdp, int fd) 193 { 194 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 195 KASSERT(fdisused(fdp, fd), 196 ("fd is already unused")); 197 KASSERT(fdp->fd_ofiles[fd] == NULL, 198 ("fd is still in use")); 199 fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd); 200 if (fd < fdp->fd_freefile) 201 fdp->fd_freefile = fd; 202 if (fd == fdp->fd_lastfile) 203 fdp->fd_lastfile = fd_last_used(fdp, 0, fd); 204 } 205 206 /* 207 * System calls on descriptors. 208 */ 209 #ifndef _SYS_SYSPROTO_H_ 210 struct getdtablesize_args { 211 int dummy; 212 }; 213 #endif 214 /* 215 * MPSAFE 216 */ 217 /* ARGSUSED */ 218 int 219 getdtablesize(td, uap) 220 struct thread *td; 221 struct getdtablesize_args *uap; 222 { 223 struct proc *p = td->td_proc; 224 225 PROC_LOCK(p); 226 td->td_retval[0] = 227 min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 228 PROC_UNLOCK(p); 229 return (0); 230 } 231 232 /* 233 * Duplicate a file descriptor to a particular value. 234 * 235 * note: keep in mind that a potential race condition exists when closing 236 * descriptors from a shared descriptor table (via rfork). 237 */ 238 #ifndef _SYS_SYSPROTO_H_ 239 struct dup2_args { 240 u_int from; 241 u_int to; 242 }; 243 #endif 244 /* 245 * MPSAFE 246 */ 247 /* ARGSUSED */ 248 int 249 dup2(td, uap) 250 struct thread *td; 251 struct dup2_args *uap; 252 { 253 254 return (do_dup(td, DUP_FIXED, (int)uap->from, (int)uap->to, 255 td->td_retval)); 256 } 257 258 /* 259 * Duplicate a file descriptor. 260 */ 261 #ifndef _SYS_SYSPROTO_H_ 262 struct dup_args { 263 u_int fd; 264 }; 265 #endif 266 /* 267 * MPSAFE 268 */ 269 /* ARGSUSED */ 270 int 271 dup(td, uap) 272 struct thread *td; 273 struct dup_args *uap; 274 { 275 276 return (do_dup(td, DUP_VARIABLE, (int)uap->fd, 0, td->td_retval)); 277 } 278 279 /* 280 * The file control system call. 281 */ 282 #ifndef _SYS_SYSPROTO_H_ 283 struct fcntl_args { 284 int fd; 285 int cmd; 286 long arg; 287 }; 288 #endif 289 /* 290 * MPSAFE 291 */ 292 /* ARGSUSED */ 293 int 294 fcntl(td, uap) 295 struct thread *td; 296 struct fcntl_args *uap; 297 { 298 struct flock fl; 299 intptr_t arg; 300 int error; 301 302 error = 0; 303 switch (uap->cmd) { 304 case F_GETLK: 305 case F_SETLK: 306 case F_SETLKW: 307 error = copyin((void *)(intptr_t)uap->arg, &fl, sizeof(fl)); 308 arg = (intptr_t)&fl; 309 break; 310 default: 311 arg = uap->arg; 312 break; 313 } 314 if (error) 315 return (error); 316 error = kern_fcntl(td, uap->fd, uap->cmd, arg); 317 if (error) 318 return (error); 319 if (uap->cmd == F_GETLK) 320 error = copyout(&fl, (void *)(intptr_t)uap->arg, sizeof(fl)); 321 return (error); 322 } 323 324 int 325 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg) 326 { 327 struct filedesc *fdp; 328 struct flock *flp; 329 struct file *fp; 330 struct proc *p; 331 char *pop; 332 struct vnode *vp; 333 u_int newmin; 334 int error, flg, tmp; 335 336 error = 0; 337 flg = F_POSIX; 338 p = td->td_proc; 339 fdp = p->p_fd; 340 mtx_lock(&Giant); 341 FILEDESC_LOCK(fdp); 342 if ((unsigned)fd >= fdp->fd_nfiles || 343 (fp = fdp->fd_ofiles[fd]) == NULL) { 344 FILEDESC_UNLOCK(fdp); 345 error = EBADF; 346 goto done2; 347 } 348 pop = &fdp->fd_ofileflags[fd]; 349 350 switch (cmd) { 351 case F_DUPFD: 352 FILEDESC_UNLOCK(fdp); 353 newmin = arg; 354 PROC_LOCK(p); 355 if (newmin >= lim_cur(p, RLIMIT_NOFILE) || 356 newmin >= maxfilesperproc) { 357 PROC_UNLOCK(p); 358 error = EINVAL; 359 break; 360 } 361 PROC_UNLOCK(p); 362 error = do_dup(td, DUP_VARIABLE, fd, newmin, td->td_retval); 363 break; 364 365 case F_GETFD: 366 td->td_retval[0] = (*pop & UF_EXCLOSE) ? FD_CLOEXEC : 0; 367 FILEDESC_UNLOCK(fdp); 368 break; 369 370 case F_SETFD: 371 *pop = (*pop &~ UF_EXCLOSE) | 372 (arg & FD_CLOEXEC ? UF_EXCLOSE : 0); 373 FILEDESC_UNLOCK(fdp); 374 break; 375 376 case F_GETFL: 377 FILE_LOCK(fp); 378 FILEDESC_UNLOCK(fdp); 379 td->td_retval[0] = OFLAGS(fp->f_flag); 380 FILE_UNLOCK(fp); 381 break; 382 383 case F_SETFL: 384 FILE_LOCK(fp); 385 FILEDESC_UNLOCK(fdp); 386 fhold_locked(fp); 387 fp->f_flag &= ~FCNTLFLAGS; 388 fp->f_flag |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS; 389 FILE_UNLOCK(fp); 390 tmp = fp->f_flag & FNONBLOCK; 391 error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 392 if (error) { 393 fdrop(fp, td); 394 break; 395 } 396 tmp = fp->f_flag & FASYNC; 397 error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td); 398 if (error == 0) { 399 fdrop(fp, td); 400 break; 401 } 402 FILE_LOCK(fp); 403 fp->f_flag &= ~FNONBLOCK; 404 FILE_UNLOCK(fp); 405 tmp = 0; 406 (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td); 407 fdrop(fp, td); 408 break; 409 410 case F_GETOWN: 411 fhold(fp); 412 FILEDESC_UNLOCK(fdp); 413 error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td); 414 if (error == 0) 415 td->td_retval[0] = tmp; 416 fdrop(fp, td); 417 break; 418 419 case F_SETOWN: 420 fhold(fp); 421 FILEDESC_UNLOCK(fdp); 422 tmp = arg; 423 error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td); 424 fdrop(fp, td); 425 break; 426 427 case F_SETLKW: 428 flg |= F_WAIT; 429 /* FALLTHROUGH F_SETLK */ 430 431 case F_SETLK: 432 if (fp->f_type != DTYPE_VNODE) { 433 FILEDESC_UNLOCK(fdp); 434 error = EBADF; 435 break; 436 } 437 438 flp = (struct flock *)arg; 439 if (flp->l_whence == SEEK_CUR) { 440 if (fp->f_offset < 0 || 441 (flp->l_start > 0 && 442 fp->f_offset > OFF_MAX - flp->l_start)) { 443 FILEDESC_UNLOCK(fdp); 444 error = EOVERFLOW; 445 break; 446 } 447 flp->l_start += fp->f_offset; 448 } 449 450 /* 451 * VOP_ADVLOCK() may block. 452 */ 453 fhold(fp); 454 FILEDESC_UNLOCK(fdp); 455 vp = fp->f_vnode; 456 457 switch (flp->l_type) { 458 case F_RDLCK: 459 if ((fp->f_flag & FREAD) == 0) { 460 error = EBADF; 461 break; 462 } 463 PROC_LOCK(p->p_leader); 464 p->p_leader->p_flag |= P_ADVLOCK; 465 PROC_UNLOCK(p->p_leader); 466 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 467 flp, flg); 468 break; 469 case F_WRLCK: 470 if ((fp->f_flag & FWRITE) == 0) { 471 error = EBADF; 472 break; 473 } 474 PROC_LOCK(p->p_leader); 475 p->p_leader->p_flag |= P_ADVLOCK; 476 PROC_UNLOCK(p->p_leader); 477 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK, 478 flp, flg); 479 break; 480 case F_UNLCK: 481 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK, 482 flp, F_POSIX); 483 break; 484 default: 485 error = EINVAL; 486 break; 487 } 488 /* Check for race with close */ 489 FILEDESC_LOCK(fdp); 490 if ((unsigned) fd >= fdp->fd_nfiles || 491 fp != fdp->fd_ofiles[fd]) { 492 FILEDESC_UNLOCK(fdp); 493 flp->l_whence = SEEK_SET; 494 flp->l_start = 0; 495 flp->l_len = 0; 496 flp->l_type = F_UNLCK; 497 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, 498 F_UNLCK, flp, F_POSIX); 499 } else 500 FILEDESC_UNLOCK(fdp); 501 fdrop(fp, td); 502 break; 503 504 case F_GETLK: 505 if (fp->f_type != DTYPE_VNODE) { 506 FILEDESC_UNLOCK(fdp); 507 error = EBADF; 508 break; 509 } 510 flp = (struct flock *)arg; 511 if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK && 512 flp->l_type != F_UNLCK) { 513 FILEDESC_UNLOCK(fdp); 514 error = EINVAL; 515 break; 516 } 517 if (flp->l_whence == SEEK_CUR) { 518 if ((flp->l_start > 0 && 519 fp->f_offset > OFF_MAX - flp->l_start) || 520 (flp->l_start < 0 && 521 fp->f_offset < OFF_MIN - flp->l_start)) { 522 FILEDESC_UNLOCK(fdp); 523 error = EOVERFLOW; 524 break; 525 } 526 flp->l_start += fp->f_offset; 527 } 528 /* 529 * VOP_ADVLOCK() may block. 530 */ 531 fhold(fp); 532 FILEDESC_UNLOCK(fdp); 533 vp = fp->f_vnode; 534 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp, 535 F_POSIX); 536 fdrop(fp, td); 537 break; 538 default: 539 FILEDESC_UNLOCK(fdp); 540 error = EINVAL; 541 break; 542 } 543 done2: 544 mtx_unlock(&Giant); 545 return (error); 546 } 547 548 /* 549 * Common code for dup, dup2, and fcntl(F_DUPFD). 550 */ 551 static int 552 do_dup(td, type, old, new, retval) 553 enum dup_type type; 554 int old, new; 555 register_t *retval; 556 struct thread *td; 557 { 558 struct filedesc *fdp; 559 struct proc *p; 560 struct file *fp; 561 struct file *delfp; 562 int error, holdleaders, maxfd; 563 564 KASSERT((type == DUP_VARIABLE || type == DUP_FIXED), 565 ("invalid dup type %d", type)); 566 567 p = td->td_proc; 568 fdp = p->p_fd; 569 570 /* 571 * Verify we have a valid descriptor to dup from and possibly to 572 * dup to. 573 */ 574 if (old < 0 || new < 0) 575 return (EBADF); 576 PROC_LOCK(p); 577 maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 578 PROC_UNLOCK(p); 579 if (new >= maxfd) 580 return (EMFILE); 581 582 FILEDESC_LOCK(fdp); 583 if (old >= fdp->fd_nfiles || fdp->fd_ofiles[old] == NULL) { 584 FILEDESC_UNLOCK(fdp); 585 return (EBADF); 586 } 587 if (type == DUP_FIXED && old == new) { 588 *retval = new; 589 FILEDESC_UNLOCK(fdp); 590 return (0); 591 } 592 fp = fdp->fd_ofiles[old]; 593 fhold(fp); 594 595 /* 596 * If the caller specified a file descriptor, make sure the file 597 * table is large enough to hold it, and grab it. Otherwise, just 598 * allocate a new descriptor the usual way. Since the filedesc 599 * lock may be temporarily dropped in the process, we have to look 600 * out for a race. 601 */ 602 if (type == DUP_FIXED) { 603 if (new >= fdp->fd_nfiles) 604 fdgrowtable(fdp, new + 1); 605 if (fdp->fd_ofiles[new] == NULL) 606 fdused(fdp, new); 607 } else { 608 if ((error = fdalloc(td, new, &new)) != 0) { 609 FILEDESC_UNLOCK(fdp); 610 fdrop(fp, td); 611 return (error); 612 } 613 } 614 615 /* 616 * If the old file changed out from under us then treat it as a 617 * bad file descriptor. Userland should do its own locking to 618 * avoid this case. 619 */ 620 if (fdp->fd_ofiles[old] != fp) { 621 /* we've allocated a descriptor which we won't use */ 622 if (fdp->fd_ofiles[new] == NULL) 623 fdunused(fdp, new); 624 FILEDESC_UNLOCK(fdp); 625 fdrop(fp, td); 626 return (EBADF); 627 } 628 KASSERT(old != new, 629 ("new fd is same as old")); 630 631 /* 632 * Save info on the descriptor being overwritten. We cannot close 633 * it without introducing an ownership race for the slot, since we 634 * need to drop the filedesc lock to call closef(). 635 * 636 * XXX this duplicates parts of close(). 637 */ 638 delfp = fdp->fd_ofiles[new]; 639 holdleaders = 0; 640 if (delfp != NULL) { 641 if (td->td_proc->p_fdtol != NULL) { 642 /* 643 * Ask fdfree() to sleep to ensure that all relevant 644 * process leaders can be traversed in closef(). 645 */ 646 fdp->fd_holdleaderscount++; 647 holdleaders = 1; 648 } 649 } 650 651 /* 652 * Duplicate the source descriptor 653 */ 654 fdp->fd_ofiles[new] = fp; 655 fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE; 656 if (new > fdp->fd_lastfile) 657 fdp->fd_lastfile = new; 658 FILEDESC_UNLOCK(fdp); 659 *retval = new; 660 661 /* 662 * If we dup'd over a valid file, we now own the reference to it 663 * and must dispose of it using closef() semantics (as if a 664 * close() were performed on it). 665 * 666 * XXX this duplicates parts of close(). 667 */ 668 if (delfp != NULL) { 669 /* XXX need to call knote_fdclose() */ 670 mtx_lock(&Giant); 671 (void) closef(delfp, td); 672 mtx_unlock(&Giant); 673 if (holdleaders) { 674 FILEDESC_LOCK(fdp); 675 fdp->fd_holdleaderscount--; 676 if (fdp->fd_holdleaderscount == 0 && 677 fdp->fd_holdleaderswakeup != 0) { 678 fdp->fd_holdleaderswakeup = 0; 679 wakeup(&fdp->fd_holdleaderscount); 680 } 681 FILEDESC_UNLOCK(fdp); 682 } 683 } 684 return (0); 685 } 686 687 /* 688 * If sigio is on the list associated with a process or process group, 689 * disable signalling from the device, remove sigio from the list and 690 * free sigio. 691 */ 692 void 693 funsetown(sigiop) 694 struct sigio **sigiop; 695 { 696 struct sigio *sigio; 697 698 SIGIO_LOCK(); 699 sigio = *sigiop; 700 if (sigio == NULL) { 701 SIGIO_UNLOCK(); 702 return; 703 } 704 *(sigio->sio_myref) = NULL; 705 if ((sigio)->sio_pgid < 0) { 706 struct pgrp *pg = (sigio)->sio_pgrp; 707 PGRP_LOCK(pg); 708 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio, 709 sigio, sio_pgsigio); 710 PGRP_UNLOCK(pg); 711 } else { 712 struct proc *p = (sigio)->sio_proc; 713 PROC_LOCK(p); 714 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio, 715 sigio, sio_pgsigio); 716 PROC_UNLOCK(p); 717 } 718 SIGIO_UNLOCK(); 719 crfree(sigio->sio_ucred); 720 FREE(sigio, M_SIGIO); 721 } 722 723 /* 724 * Free a list of sigio structures. 725 * We only need to lock the SIGIO_LOCK because we have made ourselves 726 * inaccessable to callers of fsetown and therefore do not need to lock 727 * the proc or pgrp struct for the list manipulation. 728 */ 729 void 730 funsetownlst(sigiolst) 731 struct sigiolst *sigiolst; 732 { 733 struct proc *p; 734 struct pgrp *pg; 735 struct sigio *sigio; 736 737 sigio = SLIST_FIRST(sigiolst); 738 if (sigio == NULL) 739 return; 740 p = NULL; 741 pg = NULL; 742 743 /* 744 * Every entry of the list should belong 745 * to a single proc or pgrp. 746 */ 747 if (sigio->sio_pgid < 0) { 748 pg = sigio->sio_pgrp; 749 PGRP_LOCK_ASSERT(pg, MA_NOTOWNED); 750 } else /* if (sigio->sio_pgid > 0) */ { 751 p = sigio->sio_proc; 752 PROC_LOCK_ASSERT(p, MA_NOTOWNED); 753 } 754 755 SIGIO_LOCK(); 756 while ((sigio = SLIST_FIRST(sigiolst)) != NULL) { 757 *(sigio->sio_myref) = NULL; 758 if (pg != NULL) { 759 KASSERT(sigio->sio_pgid < 0, 760 ("Proc sigio in pgrp sigio list")); 761 KASSERT(sigio->sio_pgrp == pg, 762 ("Bogus pgrp in sigio list")); 763 PGRP_LOCK(pg); 764 SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio, 765 sio_pgsigio); 766 PGRP_UNLOCK(pg); 767 } else /* if (p != NULL) */ { 768 KASSERT(sigio->sio_pgid > 0, 769 ("Pgrp sigio in proc sigio list")); 770 KASSERT(sigio->sio_proc == p, 771 ("Bogus proc in sigio list")); 772 PROC_LOCK(p); 773 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, 774 sio_pgsigio); 775 PROC_UNLOCK(p); 776 } 777 SIGIO_UNLOCK(); 778 crfree(sigio->sio_ucred); 779 FREE(sigio, M_SIGIO); 780 SIGIO_LOCK(); 781 } 782 SIGIO_UNLOCK(); 783 } 784 785 /* 786 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg). 787 * 788 * After permission checking, add a sigio structure to the sigio list for 789 * the process or process group. 790 */ 791 int 792 fsetown(pgid, sigiop) 793 pid_t pgid; 794 struct sigio **sigiop; 795 { 796 struct proc *proc; 797 struct pgrp *pgrp; 798 struct sigio *sigio; 799 int ret; 800 801 if (pgid == 0) { 802 funsetown(sigiop); 803 return (0); 804 } 805 806 ret = 0; 807 808 /* Allocate and fill in the new sigio out of locks. */ 809 MALLOC(sigio, struct sigio *, sizeof(struct sigio), M_SIGIO, M_WAITOK); 810 sigio->sio_pgid = pgid; 811 sigio->sio_ucred = crhold(curthread->td_ucred); 812 sigio->sio_myref = sigiop; 813 814 sx_slock(&proctree_lock); 815 if (pgid > 0) { 816 proc = pfind(pgid); 817 if (proc == NULL) { 818 ret = ESRCH; 819 goto fail; 820 } 821 822 /* 823 * Policy - Don't allow a process to FSETOWN a process 824 * in another session. 825 * 826 * Remove this test to allow maximum flexibility or 827 * restrict FSETOWN to the current process or process 828 * group for maximum safety. 829 */ 830 PROC_UNLOCK(proc); 831 if (proc->p_session != curthread->td_proc->p_session) { 832 ret = EPERM; 833 goto fail; 834 } 835 836 pgrp = NULL; 837 } else /* if (pgid < 0) */ { 838 pgrp = pgfind(-pgid); 839 if (pgrp == NULL) { 840 ret = ESRCH; 841 goto fail; 842 } 843 PGRP_UNLOCK(pgrp); 844 845 /* 846 * Policy - Don't allow a process to FSETOWN a process 847 * in another session. 848 * 849 * Remove this test to allow maximum flexibility or 850 * restrict FSETOWN to the current process or process 851 * group for maximum safety. 852 */ 853 if (pgrp->pg_session != curthread->td_proc->p_session) { 854 ret = EPERM; 855 goto fail; 856 } 857 858 proc = NULL; 859 } 860 funsetown(sigiop); 861 if (pgid > 0) { 862 PROC_LOCK(proc); 863 /* 864 * Since funsetownlst() is called without the proctree 865 * locked, we need to check for P_WEXIT. 866 * XXX: is ESRCH correct? 867 */ 868 if ((proc->p_flag & P_WEXIT) != 0) { 869 PROC_UNLOCK(proc); 870 ret = ESRCH; 871 goto fail; 872 } 873 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio); 874 sigio->sio_proc = proc; 875 PROC_UNLOCK(proc); 876 } else { 877 PGRP_LOCK(pgrp); 878 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio); 879 sigio->sio_pgrp = pgrp; 880 PGRP_UNLOCK(pgrp); 881 } 882 sx_sunlock(&proctree_lock); 883 SIGIO_LOCK(); 884 *sigiop = sigio; 885 SIGIO_UNLOCK(); 886 return (0); 887 888 fail: 889 sx_sunlock(&proctree_lock); 890 crfree(sigio->sio_ucred); 891 FREE(sigio, M_SIGIO); 892 return (ret); 893 } 894 895 /* 896 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg). 897 */ 898 pid_t 899 fgetown(sigiop) 900 struct sigio **sigiop; 901 { 902 pid_t pgid; 903 904 SIGIO_LOCK(); 905 pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0; 906 SIGIO_UNLOCK(); 907 return (pgid); 908 } 909 910 /* 911 * Close a file descriptor. 912 */ 913 #ifndef _SYS_SYSPROTO_H_ 914 struct close_args { 915 int fd; 916 }; 917 #endif 918 /* 919 * MPSAFE 920 */ 921 /* ARGSUSED */ 922 int 923 close(td, uap) 924 struct thread *td; 925 struct close_args *uap; 926 { 927 struct filedesc *fdp; 928 struct file *fp; 929 int fd, error; 930 int holdleaders; 931 932 fd = uap->fd; 933 error = 0; 934 holdleaders = 0; 935 fdp = td->td_proc->p_fd; 936 mtx_lock(&Giant); 937 FILEDESC_LOCK(fdp); 938 if ((unsigned)fd >= fdp->fd_nfiles || 939 (fp = fdp->fd_ofiles[fd]) == NULL) { 940 FILEDESC_UNLOCK(fdp); 941 mtx_unlock(&Giant); 942 return (EBADF); 943 } 944 fdp->fd_ofiles[fd] = NULL; 945 fdp->fd_ofileflags[fd] = 0; 946 fdunused(fdp, fd); 947 if (td->td_proc->p_fdtol != NULL) { 948 /* 949 * Ask fdfree() to sleep to ensure that all relevant 950 * process leaders can be traversed in closef(). 951 */ 952 fdp->fd_holdleaderscount++; 953 holdleaders = 1; 954 } 955 956 /* 957 * we now hold the fp reference that used to be owned by the descriptor 958 * array. 959 */ 960 if (fd < fdp->fd_knlistsize) { 961 FILEDESC_UNLOCK(fdp); 962 knote_fdclose(td, fd); 963 } else 964 FILEDESC_UNLOCK(fdp); 965 966 error = closef(fp, td); 967 mtx_unlock(&Giant); 968 if (holdleaders) { 969 FILEDESC_LOCK(fdp); 970 fdp->fd_holdleaderscount--; 971 if (fdp->fd_holdleaderscount == 0 && 972 fdp->fd_holdleaderswakeup != 0) { 973 fdp->fd_holdleaderswakeup = 0; 974 wakeup(&fdp->fd_holdleaderscount); 975 } 976 FILEDESC_UNLOCK(fdp); 977 } 978 return (error); 979 } 980 981 #if defined(COMPAT_43) 982 /* 983 * Return status information about a file descriptor. 984 */ 985 #ifndef _SYS_SYSPROTO_H_ 986 struct ofstat_args { 987 int fd; 988 struct ostat *sb; 989 }; 990 #endif 991 /* 992 * MPSAFE 993 */ 994 /* ARGSUSED */ 995 int 996 ofstat(td, uap) 997 struct thread *td; 998 struct ofstat_args *uap; 999 { 1000 struct file *fp; 1001 struct stat ub; 1002 struct ostat oub; 1003 int error; 1004 1005 if ((error = fget(td, uap->fd, &fp)) != 0) 1006 goto done2; 1007 mtx_lock(&Giant); 1008 error = fo_stat(fp, &ub, td->td_ucred, td); 1009 mtx_unlock(&Giant); 1010 if (error == 0) { 1011 cvtstat(&ub, &oub); 1012 error = copyout(&oub, uap->sb, sizeof(oub)); 1013 } 1014 fdrop(fp, td); 1015 done2: 1016 return (error); 1017 } 1018 #endif /* COMPAT_43 */ 1019 1020 /* 1021 * Return status information about a file descriptor. 1022 */ 1023 #ifndef _SYS_SYSPROTO_H_ 1024 struct fstat_args { 1025 int fd; 1026 struct stat *sb; 1027 }; 1028 #endif 1029 /* 1030 * MPSAFE 1031 */ 1032 /* ARGSUSED */ 1033 int 1034 fstat(td, uap) 1035 struct thread *td; 1036 struct fstat_args *uap; 1037 { 1038 struct file *fp; 1039 struct stat ub; 1040 int error; 1041 1042 if ((error = fget(td, uap->fd, &fp)) != 0) 1043 goto done2; 1044 mtx_lock(&Giant); 1045 error = fo_stat(fp, &ub, td->td_ucred, td); 1046 mtx_unlock(&Giant); 1047 if (error == 0) 1048 error = copyout(&ub, uap->sb, sizeof(ub)); 1049 fdrop(fp, td); 1050 done2: 1051 return (error); 1052 } 1053 1054 /* 1055 * Return status information about a file descriptor. 1056 */ 1057 #ifndef _SYS_SYSPROTO_H_ 1058 struct nfstat_args { 1059 int fd; 1060 struct nstat *sb; 1061 }; 1062 #endif 1063 /* 1064 * MPSAFE 1065 */ 1066 /* ARGSUSED */ 1067 int 1068 nfstat(td, uap) 1069 struct thread *td; 1070 struct nfstat_args *uap; 1071 { 1072 struct file *fp; 1073 struct stat ub; 1074 struct nstat nub; 1075 int error; 1076 1077 if ((error = fget(td, uap->fd, &fp)) != 0) 1078 goto done2; 1079 mtx_lock(&Giant); 1080 error = fo_stat(fp, &ub, td->td_ucred, td); 1081 mtx_unlock(&Giant); 1082 if (error == 0) { 1083 cvtnstat(&ub, &nub); 1084 error = copyout(&nub, uap->sb, sizeof(nub)); 1085 } 1086 fdrop(fp, td); 1087 done2: 1088 return (error); 1089 } 1090 1091 /* 1092 * Return pathconf information about a file descriptor. 1093 */ 1094 #ifndef _SYS_SYSPROTO_H_ 1095 struct fpathconf_args { 1096 int fd; 1097 int name; 1098 }; 1099 #endif 1100 /* 1101 * MPSAFE 1102 */ 1103 /* ARGSUSED */ 1104 int 1105 fpathconf(td, uap) 1106 struct thread *td; 1107 struct fpathconf_args *uap; 1108 { 1109 struct file *fp; 1110 struct vnode *vp; 1111 int error; 1112 1113 if ((error = fget(td, uap->fd, &fp)) != 0) 1114 return (error); 1115 1116 /* If asynchronous I/O is available, it works for all descriptors. */ 1117 if (uap->name == _PC_ASYNC_IO) { 1118 td->td_retval[0] = async_io_version; 1119 goto out; 1120 } 1121 vp = fp->f_vnode; 1122 if (vp != NULL) { 1123 mtx_lock(&Giant); 1124 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1125 error = VOP_PATHCONF(vp, uap->name, td->td_retval); 1126 VOP_UNLOCK(vp, 0, td); 1127 mtx_unlock(&Giant); 1128 } else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) { 1129 if (uap->name != _PC_PIPE_BUF) { 1130 error = EINVAL; 1131 } else { 1132 td->td_retval[0] = PIPE_BUF; 1133 error = 0; 1134 } 1135 } else { 1136 error = EOPNOTSUPP; 1137 } 1138 out: 1139 fdrop(fp, td); 1140 return (error); 1141 } 1142 1143 /* 1144 * Grow the file table to accomodate (at least) nfd descriptors. This may 1145 * block and drop the filedesc lock, but it will reacquire it before 1146 * returing. 1147 */ 1148 static void 1149 fdgrowtable(struct filedesc *fdp, int nfd) 1150 { 1151 struct file **ntable; 1152 char *nfileflags; 1153 int nnfiles, onfiles; 1154 NDSLOTTYPE *nmap; 1155 1156 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1157 1158 KASSERT(fdp->fd_nfiles > 0, 1159 ("zero-length file table")); 1160 1161 /* compute the size of the new table */ 1162 onfiles = fdp->fd_nfiles; 1163 nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */ 1164 if (nnfiles <= onfiles) 1165 /* the table is already large enough */ 1166 return; 1167 1168 /* allocate a new table and (if required) new bitmaps */ 1169 FILEDESC_UNLOCK(fdp); 1170 MALLOC(ntable, struct file **, nnfiles * OFILESIZE, 1171 M_FILEDESC, M_ZERO | M_WAITOK); 1172 nfileflags = (char *)&ntable[nnfiles]; 1173 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) 1174 MALLOC(nmap, NDSLOTTYPE *, NDSLOTS(nnfiles) * NDSLOTSIZE, 1175 M_FILEDESC, M_ZERO | M_WAITOK); 1176 else 1177 nmap = NULL; 1178 FILEDESC_LOCK(fdp); 1179 1180 /* 1181 * We now have new tables ready to go. Since we dropped the 1182 * filedesc lock to call malloc(), watch out for a race. 1183 */ 1184 onfiles = fdp->fd_nfiles; 1185 if (onfiles >= nnfiles) { 1186 /* we lost the race, but that's OK */ 1187 free(ntable, M_FILEDESC); 1188 if (nmap != NULL) 1189 free(nmap, M_FILEDESC); 1190 return; 1191 } 1192 bcopy(fdp->fd_ofiles, ntable, onfiles * sizeof(*ntable)); 1193 bcopy(fdp->fd_ofileflags, nfileflags, onfiles); 1194 if (onfiles > NDFILE) 1195 free(fdp->fd_ofiles, M_FILEDESC); 1196 fdp->fd_ofiles = ntable; 1197 fdp->fd_ofileflags = nfileflags; 1198 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) { 1199 bcopy(fdp->fd_map, nmap, NDSLOTS(onfiles) * sizeof(*nmap)); 1200 if (NDSLOTS(onfiles) > NDSLOTS(NDFILE)) 1201 free(fdp->fd_map, M_FILEDESC); 1202 fdp->fd_map = nmap; 1203 } 1204 fdp->fd_nfiles = nnfiles; 1205 } 1206 1207 /* 1208 * Allocate a file descriptor for the process. 1209 */ 1210 int 1211 fdalloc(struct thread *td, int minfd, int *result) 1212 { 1213 struct proc *p = td->td_proc; 1214 struct filedesc *fdp = p->p_fd; 1215 int fd = -1, maxfd; 1216 1217 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1218 1219 PROC_LOCK(p); 1220 maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 1221 PROC_UNLOCK(p); 1222 1223 /* 1224 * Search the bitmap for a free descriptor. If none is found, try 1225 * to grow the file table. Keep at it until we either get a file 1226 * descriptor or run into process or system limits; fdgrowtable() 1227 * may drop the filedesc lock, so we're in a race. 1228 */ 1229 for (;;) { 1230 fd = fd_first_free(fdp, minfd, fdp->fd_nfiles); 1231 if (fd >= maxfd) 1232 return (EMFILE); 1233 if (fd < fdp->fd_nfiles) 1234 break; 1235 fdgrowtable(fdp, min(fdp->fd_nfiles * 2, maxfd)); 1236 } 1237 1238 /* 1239 * Perform some sanity checks, then mark the file descriptor as 1240 * used and return it to the caller. 1241 */ 1242 KASSERT(!fdisused(fdp, fd), 1243 ("fd_first_free() returned non-free descriptor")); 1244 KASSERT(fdp->fd_ofiles[fd] == NULL, 1245 ("free descriptor isn't")); 1246 fdp->fd_ofileflags[fd] = 0; /* XXX needed? */ 1247 fdused(fdp, fd); 1248 fdp->fd_freefile = fd_first_free(fdp, fd, fdp->fd_nfiles); 1249 *result = fd; 1250 return (0); 1251 } 1252 1253 /* 1254 * Check to see whether n user file descriptors 1255 * are available to the process p. 1256 */ 1257 int 1258 fdavail(td, n) 1259 struct thread *td; 1260 int n; 1261 { 1262 struct proc *p = td->td_proc; 1263 struct filedesc *fdp = td->td_proc->p_fd; 1264 struct file **fpp; 1265 int i, lim, last; 1266 1267 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1268 1269 PROC_LOCK(p); 1270 lim = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc); 1271 PROC_UNLOCK(p); 1272 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) 1273 return (1); 1274 last = min(fdp->fd_nfiles, lim); 1275 fpp = &fdp->fd_ofiles[fdp->fd_freefile]; 1276 for (i = last - fdp->fd_freefile; --i >= 0; fpp++) { 1277 if (*fpp == NULL && --n <= 0) 1278 return (1); 1279 } 1280 return (0); 1281 } 1282 1283 /* 1284 * Create a new open file structure and allocate 1285 * a file decriptor for the process that refers to it. 1286 * We add one reference to the file for the descriptor table 1287 * and one reference for resultfp. This is to prevent us being 1288 * prempted and the entry in the descriptor table closed after 1289 * we release the FILEDESC lock. 1290 */ 1291 int 1292 falloc(td, resultfp, resultfd) 1293 struct thread *td; 1294 struct file **resultfp; 1295 int *resultfd; 1296 { 1297 struct proc *p = td->td_proc; 1298 struct file *fp, *fq; 1299 int error, i; 1300 int maxuserfiles = maxfiles - (maxfiles / 20); 1301 static struct timeval lastfail; 1302 static int curfail; 1303 1304 fp = uma_zalloc(file_zone, M_WAITOK | M_ZERO); 1305 sx_xlock(&filelist_lock); 1306 if ((nfiles >= maxuserfiles && td->td_ucred->cr_ruid != 0) 1307 || nfiles >= maxfiles) { 1308 if (ppsratecheck(&lastfail, &curfail, 1)) { 1309 printf("kern.maxfiles limit exceeded by uid %i, please see tuning(7).\n", 1310 td->td_ucred->cr_ruid); 1311 } 1312 sx_xunlock(&filelist_lock); 1313 uma_zfree(file_zone, fp); 1314 return (ENFILE); 1315 } 1316 nfiles++; 1317 1318 /* 1319 * If the process has file descriptor zero open, add the new file 1320 * descriptor to the list of open files at that point, otherwise 1321 * put it at the front of the list of open files. 1322 */ 1323 fp->f_mtxp = mtx_pool_alloc(mtxpool_sleep); 1324 fp->f_count = 1; 1325 if (resultfp) 1326 fp->f_count++; 1327 fp->f_cred = crhold(td->td_ucred); 1328 fp->f_ops = &badfileops; 1329 fp->f_data = NULL; 1330 fp->f_vnode = NULL; 1331 FILEDESC_LOCK(p->p_fd); 1332 if ((fq = p->p_fd->fd_ofiles[0])) { 1333 LIST_INSERT_AFTER(fq, fp, f_list); 1334 } else { 1335 LIST_INSERT_HEAD(&filehead, fp, f_list); 1336 } 1337 sx_xunlock(&filelist_lock); 1338 if ((error = fdalloc(td, 0, &i))) { 1339 FILEDESC_UNLOCK(p->p_fd); 1340 fdrop(fp, td); 1341 if (resultfp) 1342 fdrop(fp, td); 1343 return (error); 1344 } 1345 p->p_fd->fd_ofiles[i] = fp; 1346 FILEDESC_UNLOCK(p->p_fd); 1347 if (resultfp) 1348 *resultfp = fp; 1349 if (resultfd) 1350 *resultfd = i; 1351 return (0); 1352 } 1353 1354 /* 1355 * Free a file descriptor. 1356 */ 1357 void 1358 ffree(fp) 1359 struct file *fp; 1360 { 1361 1362 KASSERT(fp->f_count == 0, ("ffree: fp_fcount not 0!")); 1363 sx_xlock(&filelist_lock); 1364 LIST_REMOVE(fp, f_list); 1365 nfiles--; 1366 sx_xunlock(&filelist_lock); 1367 crfree(fp->f_cred); 1368 uma_zfree(file_zone, fp); 1369 } 1370 1371 /* 1372 * Build a new filedesc structure from another. 1373 * Copy the current, root, and jail root vnode references. 1374 */ 1375 struct filedesc * 1376 fdinit(fdp) 1377 struct filedesc *fdp; 1378 { 1379 struct filedesc0 *newfdp; 1380 1381 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1382 1383 FILEDESC_UNLOCK(fdp); 1384 MALLOC(newfdp, struct filedesc0 *, sizeof(struct filedesc0), 1385 M_FILEDESC, M_WAITOK | M_ZERO); 1386 FILEDESC_LOCK(fdp); 1387 mtx_init(&newfdp->fd_fd.fd_mtx, FILEDESC_LOCK_DESC, NULL, MTX_DEF); 1388 newfdp->fd_fd.fd_cdir = fdp->fd_cdir; 1389 if (newfdp->fd_fd.fd_cdir) 1390 VREF(newfdp->fd_fd.fd_cdir); 1391 newfdp->fd_fd.fd_rdir = fdp->fd_rdir; 1392 if (newfdp->fd_fd.fd_rdir) 1393 VREF(newfdp->fd_fd.fd_rdir); 1394 newfdp->fd_fd.fd_jdir = fdp->fd_jdir; 1395 if (newfdp->fd_fd.fd_jdir) 1396 VREF(newfdp->fd_fd.fd_jdir); 1397 1398 /* Create the file descriptor table. */ 1399 newfdp->fd_fd.fd_refcnt = 1; 1400 newfdp->fd_fd.fd_cmask = CMASK; 1401 newfdp->fd_fd.fd_ofiles = newfdp->fd_dfiles; 1402 newfdp->fd_fd.fd_ofileflags = newfdp->fd_dfileflags; 1403 newfdp->fd_fd.fd_nfiles = NDFILE; 1404 newfdp->fd_fd.fd_knlistsize = -1; 1405 newfdp->fd_fd.fd_map = newfdp->fd_dmap; 1406 return (&newfdp->fd_fd); 1407 } 1408 1409 /* 1410 * Share a filedesc structure. 1411 */ 1412 struct filedesc * 1413 fdshare(fdp) 1414 struct filedesc *fdp; 1415 { 1416 FILEDESC_LOCK(fdp); 1417 fdp->fd_refcnt++; 1418 FILEDESC_UNLOCK(fdp); 1419 return (fdp); 1420 } 1421 1422 /* 1423 * Copy a filedesc structure. 1424 * A NULL pointer in returns a NULL reference, this is to ease callers, 1425 * not catch errors. 1426 */ 1427 struct filedesc * 1428 fdcopy(fdp) 1429 struct filedesc *fdp; 1430 { 1431 struct filedesc *newfdp; 1432 int i; 1433 1434 /* Certain daemons might not have file descriptors. */ 1435 if (fdp == NULL) 1436 return (NULL); 1437 1438 FILEDESC_LOCK_ASSERT(fdp, MA_OWNED); 1439 newfdp = fdinit(fdp); 1440 while (fdp->fd_lastfile >= newfdp->fd_nfiles) { 1441 FILEDESC_UNLOCK(fdp); 1442 FILEDESC_LOCK(newfdp); 1443 fdgrowtable(newfdp, fdp->fd_lastfile + 1); 1444 FILEDESC_UNLOCK(newfdp); 1445 FILEDESC_LOCK(fdp); 1446 } 1447 /* copy everything except kqueue descriptors */ 1448 newfdp->fd_freefile = -1; 1449 for (i = 0; i <= fdp->fd_lastfile; ++i) { 1450 if (fdisused(fdp, i) && 1451 fdp->fd_ofiles[i]->f_type != DTYPE_KQUEUE) { 1452 newfdp->fd_ofiles[i] = fdp->fd_ofiles[i]; 1453 newfdp->fd_ofileflags[i] = fdp->fd_ofileflags[i]; 1454 fhold(newfdp->fd_ofiles[i]); 1455 newfdp->fd_lastfile = i; 1456 } else { 1457 if (newfdp->fd_freefile == -1) 1458 newfdp->fd_freefile = i; 1459 } 1460 } 1461 FILEDESC_UNLOCK(fdp); 1462 FILEDESC_LOCK(newfdp); 1463 for (i = 0; i <= newfdp->fd_lastfile; ++i) 1464 if (newfdp->fd_ofiles[i] != NULL) 1465 fdused(newfdp, i); 1466 FILEDESC_UNLOCK(newfdp); 1467 FILEDESC_LOCK(fdp); 1468 if (newfdp->fd_freefile == -1) 1469 newfdp->fd_freefile = i; 1470 newfdp->fd_cmask = fdp->fd_cmask; 1471 return (newfdp); 1472 } 1473 1474 /* A mutex to protect the association between a proc and filedesc. */ 1475 struct mtx fdesc_mtx; 1476 MTX_SYSINIT(fdesc, &fdesc_mtx, "fdesc", MTX_DEF); 1477 1478 /* 1479 * Release a filedesc structure. 1480 */ 1481 void 1482 fdfree(td) 1483 struct thread *td; 1484 { 1485 struct filedesc *fdp; 1486 struct file **fpp; 1487 int i; 1488 struct filedesc_to_leader *fdtol; 1489 struct file *fp; 1490 struct vnode *vp; 1491 struct flock lf; 1492 1493 /* Certain daemons might not have file descriptors. */ 1494 fdp = td->td_proc->p_fd; 1495 if (fdp == NULL) 1496 return; 1497 1498 /* Check for special need to clear POSIX style locks */ 1499 fdtol = td->td_proc->p_fdtol; 1500 if (fdtol != NULL) { 1501 FILEDESC_LOCK(fdp); 1502 KASSERT(fdtol->fdl_refcount > 0, 1503 ("filedesc_to_refcount botch: fdl_refcount=%d", 1504 fdtol->fdl_refcount)); 1505 if (fdtol->fdl_refcount == 1 && 1506 (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1507 i = 0; 1508 fpp = fdp->fd_ofiles; 1509 for (i = 0, fpp = fdp->fd_ofiles; 1510 i <= fdp->fd_lastfile; 1511 i++, fpp++) { 1512 if (*fpp == NULL || 1513 (*fpp)->f_type != DTYPE_VNODE) 1514 continue; 1515 fp = *fpp; 1516 fhold(fp); 1517 FILEDESC_UNLOCK(fdp); 1518 lf.l_whence = SEEK_SET; 1519 lf.l_start = 0; 1520 lf.l_len = 0; 1521 lf.l_type = F_UNLCK; 1522 vp = fp->f_vnode; 1523 (void) VOP_ADVLOCK(vp, 1524 (caddr_t)td->td_proc-> 1525 p_leader, 1526 F_UNLCK, 1527 &lf, 1528 F_POSIX); 1529 FILEDESC_LOCK(fdp); 1530 fdrop(fp, td); 1531 fpp = fdp->fd_ofiles + i; 1532 } 1533 } 1534 retry: 1535 if (fdtol->fdl_refcount == 1) { 1536 if (fdp->fd_holdleaderscount > 0 && 1537 (td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1538 /* 1539 * close() or do_dup() has cleared a reference 1540 * in a shared file descriptor table. 1541 */ 1542 fdp->fd_holdleaderswakeup = 1; 1543 msleep(&fdp->fd_holdleaderscount, &fdp->fd_mtx, 1544 PLOCK, "fdlhold", 0); 1545 goto retry; 1546 } 1547 if (fdtol->fdl_holdcount > 0) { 1548 /* 1549 * Ensure that fdtol->fdl_leader 1550 * remains valid in closef(). 1551 */ 1552 fdtol->fdl_wakeup = 1; 1553 msleep(fdtol, &fdp->fd_mtx, 1554 PLOCK, "fdlhold", 0); 1555 goto retry; 1556 } 1557 } 1558 fdtol->fdl_refcount--; 1559 if (fdtol->fdl_refcount == 0 && 1560 fdtol->fdl_holdcount == 0) { 1561 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev; 1562 fdtol->fdl_prev->fdl_next = fdtol->fdl_next; 1563 } else 1564 fdtol = NULL; 1565 td->td_proc->p_fdtol = NULL; 1566 FILEDESC_UNLOCK(fdp); 1567 if (fdtol != NULL) 1568 FREE(fdtol, M_FILEDESC_TO_LEADER); 1569 } 1570 FILEDESC_LOCK(fdp); 1571 if (--fdp->fd_refcnt > 0) { 1572 FILEDESC_UNLOCK(fdp); 1573 return; 1574 } 1575 1576 /* 1577 * We are the last reference to the structure, so we can 1578 * safely assume it will not change out from under us. 1579 */ 1580 FILEDESC_UNLOCK(fdp); 1581 fpp = fdp->fd_ofiles; 1582 for (i = fdp->fd_lastfile; i-- >= 0; fpp++) { 1583 if (*fpp) 1584 (void) closef(*fpp, td); 1585 } 1586 1587 /* XXX This should happen earlier. */ 1588 mtx_lock(&fdesc_mtx); 1589 td->td_proc->p_fd = NULL; 1590 mtx_unlock(&fdesc_mtx); 1591 1592 if (fdp->fd_nfiles > NDFILE) 1593 FREE(fdp->fd_ofiles, M_FILEDESC); 1594 if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE)) 1595 FREE(fdp->fd_map, M_FILEDESC); 1596 if (fdp->fd_cdir) 1597 vrele(fdp->fd_cdir); 1598 if (fdp->fd_rdir) 1599 vrele(fdp->fd_rdir); 1600 if (fdp->fd_jdir) 1601 vrele(fdp->fd_jdir); 1602 if (fdp->fd_knlist) 1603 FREE(fdp->fd_knlist, M_KQUEUE); 1604 if (fdp->fd_knhash) 1605 FREE(fdp->fd_knhash, M_KQUEUE); 1606 mtx_destroy(&fdp->fd_mtx); 1607 FREE(fdp, M_FILEDESC); 1608 } 1609 1610 /* 1611 * For setugid programs, we don't want to people to use that setugidness 1612 * to generate error messages which write to a file which otherwise would 1613 * otherwise be off-limits to the process. We check for filesystems where 1614 * the vnode can change out from under us after execve (like [lin]procfs). 1615 * 1616 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is 1617 * sufficient. We also don't for check setugidness since we know we are. 1618 */ 1619 static int 1620 is_unsafe(struct file *fp) 1621 { 1622 if (fp->f_type == DTYPE_VNODE) { 1623 struct vnode *vp = fp->f_vnode; 1624 1625 if ((vp->v_vflag & VV_PROCDEP) != 0) 1626 return (1); 1627 } 1628 return (0); 1629 } 1630 1631 /* 1632 * Make this setguid thing safe, if at all possible. 1633 */ 1634 void 1635 setugidsafety(td) 1636 struct thread *td; 1637 { 1638 struct filedesc *fdp; 1639 int i; 1640 1641 /* Certain daemons might not have file descriptors. */ 1642 fdp = td->td_proc->p_fd; 1643 if (fdp == NULL) 1644 return; 1645 1646 /* 1647 * Note: fdp->fd_ofiles may be reallocated out from under us while 1648 * we are blocked in a close. Be careful! 1649 */ 1650 FILEDESC_LOCK(fdp); 1651 for (i = 0; i <= fdp->fd_lastfile; i++) { 1652 if (i > 2) 1653 break; 1654 if (fdp->fd_ofiles[i] && is_unsafe(fdp->fd_ofiles[i])) { 1655 struct file *fp; 1656 1657 if (i < fdp->fd_knlistsize) { 1658 FILEDESC_UNLOCK(fdp); 1659 knote_fdclose(td, i); 1660 FILEDESC_LOCK(fdp); 1661 } 1662 /* 1663 * NULL-out descriptor prior to close to avoid 1664 * a race while close blocks. 1665 */ 1666 fp = fdp->fd_ofiles[i]; 1667 fdp->fd_ofiles[i] = NULL; 1668 fdp->fd_ofileflags[i] = 0; 1669 fdunused(fdp, i); 1670 FILEDESC_UNLOCK(fdp); 1671 (void) closef(fp, td); 1672 FILEDESC_LOCK(fdp); 1673 } 1674 } 1675 FILEDESC_UNLOCK(fdp); 1676 } 1677 1678 /* 1679 * Close any files on exec? 1680 */ 1681 void 1682 fdcloseexec(td) 1683 struct thread *td; 1684 { 1685 struct filedesc *fdp; 1686 int i; 1687 1688 /* Certain daemons might not have file descriptors. */ 1689 fdp = td->td_proc->p_fd; 1690 if (fdp == NULL) 1691 return; 1692 1693 FILEDESC_LOCK(fdp); 1694 1695 /* 1696 * We cannot cache fd_ofiles or fd_ofileflags since operations 1697 * may block and rip them out from under us. 1698 */ 1699 for (i = 0; i <= fdp->fd_lastfile; i++) { 1700 if (fdp->fd_ofiles[i] != NULL && 1701 (fdp->fd_ofileflags[i] & UF_EXCLOSE)) { 1702 struct file *fp; 1703 1704 if (i < fdp->fd_knlistsize) { 1705 FILEDESC_UNLOCK(fdp); 1706 knote_fdclose(td, i); 1707 FILEDESC_LOCK(fdp); 1708 } 1709 /* 1710 * NULL-out descriptor prior to close to avoid 1711 * a race while close blocks. 1712 */ 1713 fp = fdp->fd_ofiles[i]; 1714 fdp->fd_ofiles[i] = NULL; 1715 fdp->fd_ofileflags[i] = 0; 1716 fdunused(fdp, i); 1717 FILEDESC_UNLOCK(fdp); 1718 (void) closef(fp, td); 1719 FILEDESC_LOCK(fdp); 1720 } 1721 } 1722 FILEDESC_UNLOCK(fdp); 1723 } 1724 1725 /* 1726 * It is unsafe for set[ug]id processes to be started with file 1727 * descriptors 0..2 closed, as these descriptors are given implicit 1728 * significance in the Standard C library. fdcheckstd() will create a 1729 * descriptor referencing /dev/null for each of stdin, stdout, and 1730 * stderr that is not already open. 1731 */ 1732 int 1733 fdcheckstd(td) 1734 struct thread *td; 1735 { 1736 struct nameidata nd; 1737 struct filedesc *fdp; 1738 struct file *fp; 1739 register_t retval; 1740 int fd, i, error, flags, devnull; 1741 1742 fdp = td->td_proc->p_fd; 1743 if (fdp == NULL) 1744 return (0); 1745 KASSERT(fdp->fd_refcnt == 1, ("the fdtable should not be shared")); 1746 devnull = -1; 1747 error = 0; 1748 for (i = 0; i < 3; i++) { 1749 if (fdp->fd_ofiles[i] != NULL) 1750 continue; 1751 if (devnull < 0) { 1752 error = falloc(td, &fp, &fd); 1753 if (error != 0) 1754 break; 1755 /* Note extra ref on `fp' held for us by falloc(). */ 1756 KASSERT(fd == i, ("oof, we didn't get our fd")); 1757 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, "/dev/null", 1758 td); 1759 flags = FREAD | FWRITE; 1760 error = vn_open(&nd, &flags, 0, -1); 1761 if (error != 0) { 1762 /* 1763 * Someone may have closed the entry in the 1764 * file descriptor table, so check it hasn't 1765 * changed before dropping the reference count. 1766 */ 1767 FILEDESC_LOCK(fdp); 1768 KASSERT(fdp->fd_ofiles[fd] == fp, 1769 ("table not shared, how did it change?")); 1770 fdp->fd_ofiles[fd] = NULL; 1771 fdunused(fdp, fd); 1772 FILEDESC_UNLOCK(fdp); 1773 fdrop(fp, td); 1774 fdrop(fp, td); 1775 break; 1776 } 1777 NDFREE(&nd, NDF_ONLY_PNBUF); 1778 fp->f_vnode = nd.ni_vp; 1779 fp->f_data = nd.ni_vp; 1780 fp->f_flag = flags; 1781 fp->f_ops = &vnops; 1782 fp->f_type = DTYPE_VNODE; 1783 VOP_UNLOCK(nd.ni_vp, 0, td); 1784 devnull = fd; 1785 fdrop(fp, td); 1786 } else { 1787 error = do_dup(td, DUP_FIXED, devnull, i, &retval); 1788 if (error != 0) 1789 break; 1790 } 1791 } 1792 return (error); 1793 } 1794 1795 /* 1796 * Internal form of close. 1797 * Decrement reference count on file structure. 1798 * Note: td may be NULL when closing a file 1799 * that was being passed in a message. 1800 */ 1801 int 1802 closef(fp, td) 1803 struct file *fp; 1804 struct thread *td; 1805 { 1806 struct vnode *vp; 1807 struct flock lf; 1808 struct filedesc_to_leader *fdtol; 1809 struct filedesc *fdp; 1810 1811 if (fp == NULL) 1812 return (0); 1813 /* 1814 * POSIX record locking dictates that any close releases ALL 1815 * locks owned by this process. This is handled by setting 1816 * a flag in the unlock to free ONLY locks obeying POSIX 1817 * semantics, and not to free BSD-style file locks. 1818 * If the descriptor was in a message, POSIX-style locks 1819 * aren't passed with the descriptor. 1820 */ 1821 if (td != NULL && 1822 fp->f_type == DTYPE_VNODE) { 1823 if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) { 1824 lf.l_whence = SEEK_SET; 1825 lf.l_start = 0; 1826 lf.l_len = 0; 1827 lf.l_type = F_UNLCK; 1828 vp = fp->f_vnode; 1829 (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader, 1830 F_UNLCK, &lf, F_POSIX); 1831 } 1832 fdtol = td->td_proc->p_fdtol; 1833 if (fdtol != NULL) { 1834 /* 1835 * Handle special case where file descriptor table 1836 * is shared between multiple process leaders. 1837 */ 1838 fdp = td->td_proc->p_fd; 1839 FILEDESC_LOCK(fdp); 1840 for (fdtol = fdtol->fdl_next; 1841 fdtol != td->td_proc->p_fdtol; 1842 fdtol = fdtol->fdl_next) { 1843 if ((fdtol->fdl_leader->p_flag & 1844 P_ADVLOCK) == 0) 1845 continue; 1846 fdtol->fdl_holdcount++; 1847 FILEDESC_UNLOCK(fdp); 1848 lf.l_whence = SEEK_SET; 1849 lf.l_start = 0; 1850 lf.l_len = 0; 1851 lf.l_type = F_UNLCK; 1852 vp = fp->f_vnode; 1853 (void) VOP_ADVLOCK(vp, 1854 (caddr_t)fdtol->fdl_leader, 1855 F_UNLCK, &lf, F_POSIX); 1856 FILEDESC_LOCK(fdp); 1857 fdtol->fdl_holdcount--; 1858 if (fdtol->fdl_holdcount == 0 && 1859 fdtol->fdl_wakeup != 0) { 1860 fdtol->fdl_wakeup = 0; 1861 wakeup(fdtol); 1862 } 1863 } 1864 FILEDESC_UNLOCK(fdp); 1865 } 1866 } 1867 return (fdrop(fp, td)); 1868 } 1869 1870 /* 1871 * Drop reference on struct file passed in, may call closef if the 1872 * reference hits zero. 1873 */ 1874 int 1875 fdrop(fp, td) 1876 struct file *fp; 1877 struct thread *td; 1878 { 1879 1880 FILE_LOCK(fp); 1881 return (fdrop_locked(fp, td)); 1882 } 1883 1884 /* 1885 * Extract the file pointer associated with the specified descriptor for 1886 * the current user process. 1887 * 1888 * If the descriptor doesn't exist, EBADF is returned. 1889 * 1890 * If the descriptor exists but doesn't match 'flags' then 1891 * return EBADF for read attempts and EINVAL for write attempts. 1892 * 1893 * If 'hold' is set (non-zero) the file's refcount will be bumped on return. 1894 * It should be droped with fdrop(). 1895 * If it is not set, then the refcount will not be bumped however the 1896 * thread's filedesc struct will be returned locked (for fgetsock). 1897 * 1898 * If an error occured the non-zero error is returned and *fpp is set to NULL. 1899 * Otherwise *fpp is set and zero is returned. 1900 */ 1901 static __inline int 1902 _fget(struct thread *td, int fd, struct file **fpp, int flags, int hold) 1903 { 1904 struct filedesc *fdp; 1905 struct file *fp; 1906 1907 *fpp = NULL; 1908 if (td == NULL || (fdp = td->td_proc->p_fd) == NULL) 1909 return (EBADF); 1910 FILEDESC_LOCK(fdp); 1911 if ((fp = fget_locked(fdp, fd)) == NULL || fp->f_ops == &badfileops) { 1912 FILEDESC_UNLOCK(fdp); 1913 return (EBADF); 1914 } 1915 1916 /* 1917 * Note: FREAD failures returns EBADF to maintain backwards 1918 * compatibility with what routines returned before. 1919 * 1920 * Only one flag, or 0, may be specified. 1921 */ 1922 if (flags == FREAD && (fp->f_flag & FREAD) == 0) { 1923 FILEDESC_UNLOCK(fdp); 1924 return (EBADF); 1925 } 1926 if (flags == FWRITE && (fp->f_flag & FWRITE) == 0) { 1927 FILEDESC_UNLOCK(fdp); 1928 return (EINVAL); 1929 } 1930 if (hold) { 1931 fhold(fp); 1932 FILEDESC_UNLOCK(fdp); 1933 } 1934 *fpp = fp; 1935 return (0); 1936 } 1937 1938 int 1939 fget(struct thread *td, int fd, struct file **fpp) 1940 { 1941 1942 return(_fget(td, fd, fpp, 0, 1)); 1943 } 1944 1945 int 1946 fget_read(struct thread *td, int fd, struct file **fpp) 1947 { 1948 1949 return(_fget(td, fd, fpp, FREAD, 1)); 1950 } 1951 1952 int 1953 fget_write(struct thread *td, int fd, struct file **fpp) 1954 { 1955 1956 return(_fget(td, fd, fpp, FWRITE, 1)); 1957 } 1958 1959 /* 1960 * Like fget() but loads the underlying vnode, or returns an error if 1961 * the descriptor does not represent a vnode. Note that pipes use vnodes 1962 * but never have VM objects (so VOP_GETVOBJECT() calls will return an 1963 * error). The returned vnode will be vref()d. 1964 */ 1965 static __inline int 1966 _fgetvp(struct thread *td, int fd, struct vnode **vpp, int flags) 1967 { 1968 struct file *fp; 1969 int error; 1970 1971 *vpp = NULL; 1972 if ((error = _fget(td, fd, &fp, 0, 0)) != 0) 1973 return (error); 1974 if (fp->f_vnode == NULL) { 1975 error = EINVAL; 1976 } else { 1977 *vpp = fp->f_vnode; 1978 vref(*vpp); 1979 } 1980 FILEDESC_UNLOCK(td->td_proc->p_fd); 1981 return (error); 1982 } 1983 1984 int 1985 fgetvp(struct thread *td, int fd, struct vnode **vpp) 1986 { 1987 1988 return (_fgetvp(td, fd, vpp, 0)); 1989 } 1990 1991 int 1992 fgetvp_read(struct thread *td, int fd, struct vnode **vpp) 1993 { 1994 1995 return (_fgetvp(td, fd, vpp, FREAD)); 1996 } 1997 1998 int 1999 fgetvp_write(struct thread *td, int fd, struct vnode **vpp) 2000 { 2001 2002 return (_fgetvp(td, fd, vpp, FWRITE)); 2003 } 2004 2005 /* 2006 * Like fget() but loads the underlying socket, or returns an error if 2007 * the descriptor does not represent a socket. 2008 * 2009 * We bump the ref count on the returned socket. XXX Also obtain the SX 2010 * lock in the future. 2011 */ 2012 int 2013 fgetsock(struct thread *td, int fd, struct socket **spp, u_int *fflagp) 2014 { 2015 struct file *fp; 2016 int error; 2017 2018 *spp = NULL; 2019 if (fflagp != NULL) 2020 *fflagp = 0; 2021 if ((error = _fget(td, fd, &fp, 0, 0)) != 0) 2022 return (error); 2023 if (fp->f_type != DTYPE_SOCKET) { 2024 error = ENOTSOCK; 2025 } else { 2026 *spp = fp->f_data; 2027 if (fflagp) 2028 *fflagp = fp->f_flag; 2029 SOCK_LOCK(*spp); 2030 soref(*spp); 2031 SOCK_UNLOCK(*spp); 2032 } 2033 FILEDESC_UNLOCK(td->td_proc->p_fd); 2034 return (error); 2035 } 2036 2037 /* 2038 * Drop the reference count on the the socket and XXX release the SX lock in 2039 * the future. The last reference closes the socket. 2040 */ 2041 void 2042 fputsock(struct socket *so) 2043 { 2044 2045 NET_ASSERT_GIANT(); 2046 SOCK_LOCK(so); 2047 sorele(so); 2048 } 2049 2050 /* 2051 * Drop reference on struct file passed in, may call closef if the 2052 * reference hits zero. 2053 * Expects struct file locked, and will unlock it. 2054 */ 2055 int 2056 fdrop_locked(fp, td) 2057 struct file *fp; 2058 struct thread *td; 2059 { 2060 int error; 2061 2062 FILE_LOCK_ASSERT(fp, MA_OWNED); 2063 2064 if (--fp->f_count > 0) { 2065 FILE_UNLOCK(fp); 2066 return (0); 2067 } 2068 /* We have the last ref so we can proceed without the file lock. */ 2069 FILE_UNLOCK(fp); 2070 if (fp->f_count < 0) 2071 panic("fdrop: count < 0"); 2072 mtx_lock(&Giant); 2073 if (fp->f_ops != &badfileops) 2074 error = fo_close(fp, td); 2075 else 2076 error = 0; 2077 ffree(fp); 2078 mtx_unlock(&Giant); 2079 return (error); 2080 } 2081 2082 /* 2083 * Apply an advisory lock on a file descriptor. 2084 * 2085 * Just attempt to get a record lock of the requested type on 2086 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). 2087 */ 2088 #ifndef _SYS_SYSPROTO_H_ 2089 struct flock_args { 2090 int fd; 2091 int how; 2092 }; 2093 #endif 2094 /* 2095 * MPSAFE 2096 */ 2097 /* ARGSUSED */ 2098 int 2099 flock(td, uap) 2100 struct thread *td; 2101 struct flock_args *uap; 2102 { 2103 struct file *fp; 2104 struct vnode *vp; 2105 struct flock lf; 2106 int error; 2107 2108 if ((error = fget(td, uap->fd, &fp)) != 0) 2109 return (error); 2110 if (fp->f_type != DTYPE_VNODE) { 2111 fdrop(fp, td); 2112 return (EOPNOTSUPP); 2113 } 2114 2115 mtx_lock(&Giant); 2116 vp = fp->f_vnode; 2117 lf.l_whence = SEEK_SET; 2118 lf.l_start = 0; 2119 lf.l_len = 0; 2120 if (uap->how & LOCK_UN) { 2121 lf.l_type = F_UNLCK; 2122 FILE_LOCK(fp); 2123 fp->f_flag &= ~FHASLOCK; 2124 FILE_UNLOCK(fp); 2125 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); 2126 goto done2; 2127 } 2128 if (uap->how & LOCK_EX) 2129 lf.l_type = F_WRLCK; 2130 else if (uap->how & LOCK_SH) 2131 lf.l_type = F_RDLCK; 2132 else { 2133 error = EBADF; 2134 goto done2; 2135 } 2136 FILE_LOCK(fp); 2137 fp->f_flag |= FHASLOCK; 2138 FILE_UNLOCK(fp); 2139 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 2140 (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT); 2141 done2: 2142 fdrop(fp, td); 2143 mtx_unlock(&Giant); 2144 return (error); 2145 } 2146 2147 /* 2148 * File Descriptor pseudo-device driver (/dev/fd/). 2149 * 2150 * Opening minor device N dup()s the file (if any) connected to file 2151 * descriptor N belonging to the calling process. Note that this driver 2152 * consists of only the ``open()'' routine, because all subsequent 2153 * references to this file will be direct to the other driver. 2154 */ 2155 /* ARGSUSED */ 2156 static int 2157 fdopen(dev, mode, type, td) 2158 struct cdev *dev; 2159 int mode, type; 2160 struct thread *td; 2161 { 2162 2163 /* 2164 * XXX Kludge: set curthread->td_dupfd to contain the value of the 2165 * the file descriptor being sought for duplication. The error 2166 * return ensures that the vnode for this device will be released 2167 * by vn_open. Open will detect this special error and take the 2168 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN 2169 * will simply report the error. 2170 */ 2171 td->td_dupfd = dev2unit(dev); 2172 return (ENODEV); 2173 } 2174 2175 /* 2176 * Duplicate the specified descriptor to a free descriptor. 2177 */ 2178 int 2179 dupfdopen(td, fdp, indx, dfd, mode, error) 2180 struct thread *td; 2181 struct filedesc *fdp; 2182 int indx, dfd; 2183 int mode; 2184 int error; 2185 { 2186 struct file *wfp; 2187 struct file *fp; 2188 2189 /* 2190 * If the to-be-dup'd fd number is greater than the allowed number 2191 * of file descriptors, or the fd to be dup'd has already been 2192 * closed, then reject. 2193 */ 2194 FILEDESC_LOCK(fdp); 2195 if (dfd < 0 || dfd >= fdp->fd_nfiles || 2196 (wfp = fdp->fd_ofiles[dfd]) == NULL) { 2197 FILEDESC_UNLOCK(fdp); 2198 return (EBADF); 2199 } 2200 2201 /* 2202 * There are two cases of interest here. 2203 * 2204 * For ENODEV simply dup (dfd) to file descriptor 2205 * (indx) and return. 2206 * 2207 * For ENXIO steal away the file structure from (dfd) and 2208 * store it in (indx). (dfd) is effectively closed by 2209 * this operation. 2210 * 2211 * Any other error code is just returned. 2212 */ 2213 switch (error) { 2214 case ENODEV: 2215 /* 2216 * Check that the mode the file is being opened for is a 2217 * subset of the mode of the existing descriptor. 2218 */ 2219 FILE_LOCK(wfp); 2220 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { 2221 FILE_UNLOCK(wfp); 2222 FILEDESC_UNLOCK(fdp); 2223 return (EACCES); 2224 } 2225 fp = fdp->fd_ofiles[indx]; 2226 fdp->fd_ofiles[indx] = wfp; 2227 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 2228 if (fp == NULL) 2229 fdused(fdp, indx); 2230 fhold_locked(wfp); 2231 FILE_UNLOCK(wfp); 2232 if (fp != NULL) 2233 FILE_LOCK(fp); 2234 FILEDESC_UNLOCK(fdp); 2235 /* 2236 * We now own the reference to fp that the ofiles[] array 2237 * used to own. Release it. 2238 */ 2239 if (fp != NULL) 2240 fdrop_locked(fp, td); 2241 return (0); 2242 2243 case ENXIO: 2244 /* 2245 * Steal away the file pointer from dfd and stuff it into indx. 2246 */ 2247 fp = fdp->fd_ofiles[indx]; 2248 fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd]; 2249 fdp->fd_ofiles[dfd] = NULL; 2250 fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; 2251 fdp->fd_ofileflags[dfd] = 0; 2252 fdunused(fdp, dfd); 2253 if (fp == NULL) 2254 fdused(fdp, indx); 2255 if (fp != NULL) 2256 FILE_LOCK(fp); 2257 FILEDESC_UNLOCK(fdp); 2258 2259 /* 2260 * we now own the reference to fp that the ofiles[] array 2261 * used to own. Release it. 2262 */ 2263 if (fp != NULL) 2264 fdrop_locked(fp, td); 2265 return (0); 2266 2267 default: 2268 FILEDESC_UNLOCK(fdp); 2269 return (error); 2270 } 2271 /* NOTREACHED */ 2272 } 2273 2274 struct filedesc_to_leader * 2275 filedesc_to_leader_alloc(struct filedesc_to_leader *old, 2276 struct filedesc *fdp, 2277 struct proc *leader) 2278 { 2279 struct filedesc_to_leader *fdtol; 2280 2281 MALLOC(fdtol, struct filedesc_to_leader *, 2282 sizeof(struct filedesc_to_leader), 2283 M_FILEDESC_TO_LEADER, 2284 M_WAITOK); 2285 fdtol->fdl_refcount = 1; 2286 fdtol->fdl_holdcount = 0; 2287 fdtol->fdl_wakeup = 0; 2288 fdtol->fdl_leader = leader; 2289 if (old != NULL) { 2290 FILEDESC_LOCK(fdp); 2291 fdtol->fdl_next = old->fdl_next; 2292 fdtol->fdl_prev = old; 2293 old->fdl_next = fdtol; 2294 fdtol->fdl_next->fdl_prev = fdtol; 2295 FILEDESC_UNLOCK(fdp); 2296 } else { 2297 fdtol->fdl_next = fdtol; 2298 fdtol->fdl_prev = fdtol; 2299 } 2300 return (fdtol); 2301 } 2302 2303 /* 2304 * Get file structures. 2305 */ 2306 static int 2307 sysctl_kern_file(SYSCTL_HANDLER_ARGS) 2308 { 2309 struct xfile xf; 2310 struct filedesc *fdp; 2311 struct file *fp; 2312 struct proc *p; 2313 int error, n; 2314 2315 /* 2316 * Note: because the number of file descriptors is calculated 2317 * in different ways for sizing vs returning the data, 2318 * there is information leakage from the first loop. However, 2319 * it is of a similar order of magnitude to the leakage from 2320 * global system statistics such as kern.openfiles. 2321 */ 2322 error = sysctl_wire_old_buffer(req, 0); 2323 if (error != 0) 2324 return (error); 2325 if (req->oldptr == NULL) { 2326 n = 16; /* A slight overestimate. */ 2327 sx_slock(&filelist_lock); 2328 LIST_FOREACH(fp, &filehead, f_list) { 2329 /* 2330 * We should grab the lock, but this is an 2331 * estimate, so does it really matter? 2332 */ 2333 /* mtx_lock(fp->f_mtxp); */ 2334 n += fp->f_count; 2335 /* mtx_unlock(f->f_mtxp); */ 2336 } 2337 sx_sunlock(&filelist_lock); 2338 return (SYSCTL_OUT(req, 0, n * sizeof(xf))); 2339 } 2340 error = 0; 2341 bzero(&xf, sizeof(xf)); 2342 xf.xf_size = sizeof(xf); 2343 sx_slock(&allproc_lock); 2344 LIST_FOREACH(p, &allproc, p_list) { 2345 PROC_LOCK(p); 2346 if (p_cansee(req->td, p) != 0) { 2347 PROC_UNLOCK(p); 2348 continue; 2349 } 2350 xf.xf_pid = p->p_pid; 2351 xf.xf_uid = p->p_ucred->cr_uid; 2352 PROC_UNLOCK(p); 2353 mtx_lock(&fdesc_mtx); 2354 if ((fdp = p->p_fd) == NULL) { 2355 mtx_unlock(&fdesc_mtx); 2356 continue; 2357 } 2358 FILEDESC_LOCK(fdp); 2359 for (n = 0; n < fdp->fd_nfiles; ++n) { 2360 if ((fp = fdp->fd_ofiles[n]) == NULL) 2361 continue; 2362 xf.xf_fd = n; 2363 xf.xf_file = fp; 2364 xf.xf_data = fp->f_data; 2365 xf.xf_vnode = fp->f_vnode; 2366 xf.xf_type = fp->f_type; 2367 xf.xf_count = fp->f_count; 2368 xf.xf_msgcount = fp->f_msgcount; 2369 xf.xf_offset = fp->f_offset; 2370 xf.xf_flag = fp->f_flag; 2371 error = SYSCTL_OUT(req, &xf, sizeof(xf)); 2372 if (error) 2373 break; 2374 } 2375 FILEDESC_UNLOCK(fdp); 2376 mtx_unlock(&fdesc_mtx); 2377 if (error) 2378 break; 2379 } 2380 sx_sunlock(&allproc_lock); 2381 return (error); 2382 } 2383 2384 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD, 2385 0, 0, sysctl_kern_file, "S,xfile", "Entire file table"); 2386 2387 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW, 2388 &maxfilesperproc, 0, "Maximum files allowed open per process"); 2389 2390 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW, 2391 &maxfiles, 0, "Maximum number of files"); 2392 2393 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD, 2394 &nfiles, 0, "System-wide number of open files"); 2395 2396 static void 2397 fildesc_drvinit(void *unused) 2398 { 2399 struct cdev *dev; 2400 2401 dev = make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "fd/0"); 2402 make_dev_alias(dev, "stdin"); 2403 dev = make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "fd/1"); 2404 make_dev_alias(dev, "stdout"); 2405 dev = make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "fd/2"); 2406 make_dev_alias(dev, "stderr"); 2407 } 2408 2409 static fo_rdwr_t badfo_readwrite; 2410 static fo_ioctl_t badfo_ioctl; 2411 static fo_poll_t badfo_poll; 2412 static fo_kqfilter_t badfo_kqfilter; 2413 static fo_stat_t badfo_stat; 2414 static fo_close_t badfo_close; 2415 2416 struct fileops badfileops = { 2417 .fo_read = badfo_readwrite, 2418 .fo_write = badfo_readwrite, 2419 .fo_ioctl = badfo_ioctl, 2420 .fo_poll = badfo_poll, 2421 .fo_kqfilter = badfo_kqfilter, 2422 .fo_stat = badfo_stat, 2423 .fo_close = badfo_close, 2424 }; 2425 2426 static int 2427 badfo_readwrite(fp, uio, active_cred, flags, td) 2428 struct file *fp; 2429 struct uio *uio; 2430 struct ucred *active_cred; 2431 struct thread *td; 2432 int flags; 2433 { 2434 2435 return (EBADF); 2436 } 2437 2438 static int 2439 badfo_ioctl(fp, com, data, active_cred, td) 2440 struct file *fp; 2441 u_long com; 2442 void *data; 2443 struct ucred *active_cred; 2444 struct thread *td; 2445 { 2446 2447 return (EBADF); 2448 } 2449 2450 static int 2451 badfo_poll(fp, events, active_cred, td) 2452 struct file *fp; 2453 int events; 2454 struct ucred *active_cred; 2455 struct thread *td; 2456 { 2457 2458 return (0); 2459 } 2460 2461 static int 2462 badfo_kqfilter(fp, kn) 2463 struct file *fp; 2464 struct knote *kn; 2465 { 2466 2467 return (0); 2468 } 2469 2470 static int 2471 badfo_stat(fp, sb, active_cred, td) 2472 struct file *fp; 2473 struct stat *sb; 2474 struct ucred *active_cred; 2475 struct thread *td; 2476 { 2477 2478 return (EBADF); 2479 } 2480 2481 static int 2482 badfo_close(fp, td) 2483 struct file *fp; 2484 struct thread *td; 2485 { 2486 2487 return (EBADF); 2488 } 2489 2490 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR, 2491 fildesc_drvinit,NULL) 2492 2493 static void filelistinit(void *); 2494 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL) 2495 2496 /* ARGSUSED*/ 2497 static void 2498 filelistinit(dummy) 2499 void *dummy; 2500 { 2501 2502 file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL, 2503 NULL, NULL, UMA_ALIGN_PTR, 0); 2504 sx_init(&filelist_lock, "filelist lock"); 2505 mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF); 2506 } 2507